From 7f3cf0689f72105576fef8e17994000cd2b5402e Mon Sep 17 00:00:00 2001 From: sronilsson Date: Fri, 20 Dec 2024 09:14:45 -0500 Subject: [PATCH] uml --- docs/nb/Untitled2.ipynb | 23 + simba/sandbox/CLAHE.py | 89 + simba/sandbox/Geometry_7.py | 99 + simba/sandbox/MitraFeatureExtractor.py | 194 + simba/sandbox/MitraFeatureExtractor.zip | Bin 0 -> 4450 bytes simba/sandbox/ROI_analyzer.py | 222 + simba/sandbox/ROI_plotter.py | 255 + simba/sandbox/ROI_plotter_mp.py | 352 + simba/sandbox/abod.py | 98 + simba/sandbox/add_body_part.py | 35 + simba/sandbox/add_body_part.py.zip | Bin 0 -> 1276 bytes simba/sandbox/adjusted_rand.py | 32 + simba/sandbox/adjusted_rand_score.py | 69 + simba/sandbox/advanced_interpolator.py | 199 + simba/sandbox/advanced_smoothing.py | 180 + simba/sandbox/angle_3pt.py | 64 + simba/sandbox/annotations_mitra.py | 40 + simba/sandbox/average_frm_popup.py | 180 + simba/sandbox/bar_chart.py | 53 + simba/sandbox/batch_video_to_greyscale.py | 44 + simba/sandbox/berger_parker.py | 38 + simba/sandbox/bg_remover_cuda.py | 157 + simba/sandbox/bg_remover_cupy.py | 89 + simba/sandbox/bg_remover_popup.py | 129 + simba/sandbox/bg_substraction.py | 318 + simba/sandbox/biweight_midcorrelation.py | 40 + simba/sandbox/blank_img.py | 54 + simba/sandbox/blank_vid.py | 5 + simba/sandbox/blurbox.py | 55 + simba/sandbox/bouts_df | 6 + simba/sandbox/brillouins_index.py | 50 + .../calc_N_degree_direction_switches.py | 5 + simba/sandbox/calinski_harabasz.py | 43 + simba/sandbox/cb_frame.py | 135 + simba/sandbox/change_img_file_format.py | 117 + simba/sandbox/change_n_jobs_scikit.py | 9 + simba/sandbox/circling_detector.py | 119 + simba/sandbox/circular_statistics.py | 680 + simba/sandbox/clean_sleap_filename.py | 28 + .../clip_multiple_videos_by_frame_numbers.py | 161 + simba/sandbox/clip_videos_by_frm.py | 63 + simba/sandbox/cochran_q.py | 43 + simba/sandbox/coco.py | 151 + simba/sandbox/cohens_kappa.py | 63 + simba/sandbox/colinear_features.py | 93 + simba/sandbox/color_filtering.py | 0 simba/sandbox/concat_gpu.py | 163 + simba/sandbox/concordance_ratio.py | 38 + simba/sandbox/contrast.py | 127 + simba/sandbox/convert_to_bw.py | 175 + simba/sandbox/convert_to_mp4.py | 300 + simba/sandbox/convert_to_popup.py | 194 + simba/sandbox/convex_hull.py | 111 + simba/sandbox/convex_hull_area.py | 42 + simba/sandbox/corner_distance.py | 64 + simba/sandbox/count_values_in_range_gpu.py | 53 + simba/sandbox/create_average_cupy.py | 108 + simba/sandbox/create_average_frame_cuda.py | 152 + simba/sandbox/create_gif.py | 270 + simba/sandbox/create_import_pose_menu.py | 342 + simba/sandbox/create_shap_log.py | 162 + simba/sandbox/create_shap_log_3.py | 52 + simba/sandbox/cronbach_alpha.py | 65 + simba/sandbox/crossfade.py | 60 + simba/sandbox/cuda_median.py | 36 + simba/sandbox/cuda_rotate_video.py | 122 + .../sandbox/cuda_sliding_descriptive_stats.py | 120 + simba/sandbox/cuda_sort.py | 65 + simba/sandbox/cumcount_plot.py | 114 + simba/sandbox/cuml_kmeans.py | 44 + simba/sandbox/czenakowski.py | 110 + simba/sandbox/data.npy | Bin 0 -> 3328 bytes simba/sandbox/davis_bouldin.py | 64 + simba/sandbox/detect_scene_changes.py | 13 + simba/sandbox/directed_hausdorff.py | 66 + simba/sandbox/direction_two_bps.py | 45 + simba/sandbox/distance_velocity.py | 33 + simba/sandbox/distances.py | 192 + .../downsample_multiple_videos_popup.py | 81 + simba/sandbox/downsample_video_popup.py | 78 + simba/sandbox/dprime.py | 54 + simba/sandbox/dunn_index.py | 47 + .../sandbox/egocentri_video_rotator_numba.py | 135 + simba/sandbox/egocentric_alig_pose_numba.py | 92 + simba/sandbox/egocentric_align.py | 68 + simba/sandbox/egocentric_align_1025.py | 41 + simba/sandbox/egocentric_align_cuda.py | 139 + simba/sandbox/egocentric_aligner.py | 222 + simba/sandbox/egocentric_aligner_.py | 186 + simba/sandbox/egocentric_aligner_1024.py | 127 + simba/sandbox/egocentric_alignment.py | 49 + simba/sandbox/egocentric_alignment_cuda.py | 0 simba/sandbox/egocentric_video_dali.py | 70 + simba/sandbox/egocentric_video_rotation.py | 161 + .../sandbox/egocentric_video_rotator_cuda.py | 289 + simba/sandbox/elliptic_envelope.py | 59 + .../sandbox/entropy_of_directional_changes.py | 95 + simba/sandbox/eta_squared.py | 48 + simba/sandbox/euclidan_distance_cuda.py | 52 + simba/sandbox/euclidean_distances_cuda.py | 38 + simba/sandbox/ez_path_plot.py | 109 + simba/sandbox/fecal_boli.py | 49 + simba/sandbox/ffmpeg_progress_bar.py | 68 + simba/sandbox/fix_clahe.py | 65 + simba/sandbox/fleiss_kappa.py | 35 + simba/sandbox/freezing_detector.py | 130 + simba/sandbox/frm_rotator_cuda.py | 27 + simba/sandbox/gantt_plotly.py | 143 + simba/sandbox/gantt_update.py | 26 + simba/sandbox/geometry_9.py | 57 + simba/sandbox/get_convex_hull_cuda.py | 13 + simba/sandbox/gibbs_sampling.py | 72 + simba/sandbox/github_issues.json | 15563 ++++++++++++++++ simba/sandbox/github_issues_download.py | 102 + simba/sandbox/grangercausalitytests.py | 70 + simba/sandbox/graph.html | 164 + simba/sandbox/graph_flow.html | 164 + simba/sandbox/graph_katz.html | 164 + simba/sandbox/graph_pagerank.html | 164 + .../sandbox/grid_transition_probabilities.py | 104 + simba/sandbox/grubbs_test.py | 28 + simba/sandbox/hartley_fmax.py | 22 + simba/sandbox/hausdorff.py | 81 + simba/sandbox/heading.py | 34 + simba/sandbox/horizontal_videos_concat.py | 354 + simba/sandbox/img_conv.py | 48 + simba/sandbox/img_kmeans.py | 66 + simba/sandbox/img_stack_brightness.py | 95 + simba/sandbox/img_stack_mse.py | 106 + simba/sandbox/img_stack_to_bw.py | 62 + simba/sandbox/imgs_to_grayscale_cupy.py | 44 + simba/sandbox/imgs_to_greyscale_cuda.py | 53 + simba/sandbox/increase_fps.py | 227 + simba/sandbox/inset_overlay_video.py | 79 + simba/sandbox/interpolate.py | 111 + simba/sandbox/is_inside_circle.py | 39 + simba/sandbox/is_inside_polygon.py | 65 + simba/sandbox/is_inside_rectangle.py | 45 + simba/sandbox/isolation_forest.py | 82 + simba/sandbox/jaccard_distance.py | 33 + simba/sandbox/joint_plotter.py | 159 + simba/sandbox/kalman.py | 48 + simba/sandbox/keyPoi.py | 23 + simba/sandbox/kinetic_energy.py | 39 + simba/sandbox/knn.py | 86 + simba/sandbox/kumar.py | 137 + simba/sandbox/labelme_to_dlc.py | 88 + simba/sandbox/lbp.py | 20 + simba/sandbox/lcs.py | 5 + simba/sandbox/levenshtein.py | 39 + simba/sandbox/line_locate_point.py | 71 + simba/sandbox/line_plot.py | 43 + simba/sandbox/line_plot_plotly.py | 140 + simba/sandbox/linear_fretchet.py | 69 + simba/sandbox/linearity_index.py | 62 + simba/sandbox/linestring_path.py | 63 + simba/sandbox/madmedianrule.py | 59 + simba/sandbox/mahalanobis.py | 64 + simba/sandbox/make_path_plot.py | 106 + simba/sandbox/manhattan_distance.py | 36 + .../sandbox/margalef_diversification_index.py | 23 + simba/sandbox/mcnamar.py | 58 + simba/sandbox/mean_squared_jerk.py | 107 + simba/sandbox/menhinicks_index.py | 17 + simba/sandbox/mitra_appand_additional.py | 29 + simba/sandbox/mitra_bar_graph.py | 41 + simba/sandbox/mitra_bg_remover.py | 22 + simba/sandbox/mitra_circling_detector.py | 92 + simba/sandbox/mitra_correlation_checks.py | 31 + simba/sandbox/mitra_downsample_data.py | 48 + simba/sandbox/mitra_freezing_detector.py | 219 + simba/sandbox/mitra_frequency_grapher.py | 97 + simba/sandbox/mitra_frequency_grapher.zip | Bin 0 -> 1838 bytes simba/sandbox/mitra_laying_down_analyzer.py | 212 + .../mitra_style_annotation_appender.py | 75 + simba/sandbox/mitra_tail_analyzer.py | 166 + simba/sandbox/mitra_timebins.py | 63 + simba/sandbox/momentum.py | 46 + simba/sandbox/morans.py | 106 + simba/sandbox/mosaic.py | 35 + simba/sandbox/multiframe_is_shape_covered.py | 50 + simba/sandbox/network.py | 83 + .../sandbox/new_outlier_corrector_location.py | 157 + simba/sandbox/normalized_cross_correlation.py | 185 + simba/sandbox/opencv_cuda.py | 20 + simba/sandbox/optimal_font_scale.py | 72 + simba/sandbox/ordinal_clf.py | 108 + simba/sandbox/outliers_tietjen.py | 45 + simba/sandbox/path_curvature.py | 94 + simba/sandbox/path_geometry.py | 99 + simba/sandbox/path_geometry_2.py | 51 + simba/sandbox/path_plots.py | 163 + simba/sandbox/paths.py | 71 + simba/sandbox/pct_counts_in_top_N.py | 54 + simba/sandbox/piotr_120324 2.py.zip | Bin 0 -> 2243 bytes simba/sandbox/piotr_120324 3.py.zip | Bin 0 -> 2391 bytes simba/sandbox/piotr_120324 4.py.zip | Bin 0 -> 2396 bytes simba/sandbox/piotr_120324.py | 98 + simba/sandbox/piotr_120324.py.zip | Bin 0 -> 2268 bytes simba/sandbox/platea_video.py | 125 + simba/sandbox/plotly_gantt.py | 72 + simba/sandbox/plug_bouts.py | 181 + simba/sandbox/profiler.py | 9 + simba/sandbox/px_to_mm.py | 146 + simba/sandbox/pypi_sizes.py | 27 + simba/sandbox/querter_circle.py | 87 + simba/sandbox/quickselect.py | 60 + simba/sandbox/read_boris.py | 225 + simba/sandbox/read_boris_annotation_files.py | 237 + simba/sandbox/read_img.py | 167 + simba/sandbox/read_video_info.py | 78 + simba/sandbox/redis.py | 25 + simba/sandbox/reduce_features.py | 27 + simba/sandbox/reduce_features.zip | Bin 0 -> 1006 bytes simba/sandbox/reduce_imge_stack_size.py | 70 + simba/sandbox/relative_risk.py | 64 + simba/sandbox/reverse_popup.py | 124 + simba/sandbox/reverse_videos.py | 114 + simba/sandbox/roi_definition_csvs_to_h5.py | 53 + .../sandbox/roi_definition_csvs_to_h5.py.zip | Bin 0 -> 1016 bytes simba/sandbox/roi_to_yolo.py | 113 + simba/sandbox/rotate image.py | 39 + simba/sandbox/rotate.py | 353 + simba/sandbox/rotate_example_nb_egocentric.py | 82 + simba/sandbox/runs_test.py | 36 + simba/sandbox/seekable.py | 58 + simba/sandbox/segment_image_horizontal.py | 39 + simba/sandbox/sequential_lag_analysis.py | 77 + simba/sandbox/shannon_diversity_index.py | 33 + simba/sandbox/shap_2_nb.py | 48 + simba/sandbox/shift_geometries.py | 66 + simba/sandbox/siegel_tukey.py | 95 + simba/sandbox/silhouette_score.py | 28 + simba/sandbox/simpson_diversity_index.py | 31 + simba/sandbox/slic.py | 140 + simba/sandbox/sliding_autoc.py | 42 + simba/sandbox/sliding_circular_hotspots.py | 55 + simba/sandbox/sliding_circular_mean.py | 61 + simba/sandbox/sliding_circular_range.py | 60 + simba/sandbox/sliding_circular_range_test.py | 31 + simba/sandbox/sliding_circular_std.py | 60 + simba/sandbox/sliding_crosscorrelation.py | 65 + simba/sandbox/sliding_displacement.py | 40 + simba/sandbox/sliding_iqr.py | 39 + simba/sandbox/sliding_linearity_index.py | 85 + simba/sandbox/sliding_mean.py | 54 + simba/sandbox/sliding_min.py | 52 + simba/sandbox/sliding_rayleigh_z.py | 76 + .../sliding_resultant_vector_length.py | 63 + simba/sandbox/sliding_spatial_density.py | 137 + simba/sandbox/sliding_spearmans_rank.py | 67 + simba/sandbox/sliding_std.py | 58 + simba/sandbox/sliding_sum.py | 47 + simba/sandbox/smoothing.py | 120 + simba/sandbox/sorensen_dice_coefficient.py | 36 + .../spatial_density_trajectory_points.py | 93 + .../spontaneous_alternation_calculator.py | 123 + .../spontaneuous_alternation_plotter.py | 162 + simba/sandbox/spontanous_alternations.py | 225 + simba/sandbox/stabalize.py | 126 + simba/sandbox/structural_similarity_index.py | 146 + simba/sandbox/summetry_index.py | 49 + simba/sandbox/superimpose_elapsed_time.py | 98 + simba/sandbox/superimpose_frame_count.py | 69 + simba/sandbox/superimpose_frm_cnt.py | 91 + simba/sandbox/superimpose_popups.py | 427 + simba/sandbox/superimpose_text.py | 111 + simba/sandbox/superpixels.py | 46 + simba/sandbox/total_variation_distance.py | 43 + simba/sandbox/train_yolo_2.py | 72 + simba/sandbox/train_yolo_rois.py | 40 + .../two_fish_feature_extractor_040924.py | 154 + .../two_fish_feature_extractor_040924.py.zip | Bin 0 -> 3607 bytes .../unsupervised/clf_bout_aggregator.py | 287 + .../unsupervised/cluster_frequentist_stats.py | 121 + simba/sandbox/unsupervised/simba_hdbscan.py | 105 + simba/sandbox/unsupervised/simba_umap.py | 130 + simba/sandbox/unsupervised_lof.py | 48 + simba/sandbox/unsupervised_outliers.py | 329 + simba/sandbox/vertical_video_concatenator.py | 66 + simba/sandbox/violin.py | 38 + simba/sandbox/visualize_networks.py | 132 + simba/sandbox/wald_wolfowitz.py | 39 + simba/sandbox/warp_affine.py | 78 + simba/sandbox/warp_numba.py | 151 + simba/sandbox/watermark.py | 71 + simba/sandbox/wilcoxon.py | 43 + simba/sandbox/yolo_torch_inference.py | 75 + simba/sandbox/yolo_torch_train.py | 0 simba/sandbox/youden_j.py | 37 + 290 files changed, 41871 insertions(+) create mode 100644 docs/nb/Untitled2.ipynb create mode 100644 simba/sandbox/CLAHE.py create mode 100644 simba/sandbox/Geometry_7.py create mode 100644 simba/sandbox/MitraFeatureExtractor.py create mode 100644 simba/sandbox/MitraFeatureExtractor.zip create mode 100644 simba/sandbox/ROI_analyzer.py create mode 100644 simba/sandbox/ROI_plotter.py create mode 100644 simba/sandbox/ROI_plotter_mp.py create mode 100644 simba/sandbox/abod.py create mode 100644 simba/sandbox/add_body_part.py create mode 100644 simba/sandbox/add_body_part.py.zip create mode 100644 simba/sandbox/adjusted_rand.py create mode 100644 simba/sandbox/adjusted_rand_score.py create mode 100644 simba/sandbox/advanced_interpolator.py create mode 100644 simba/sandbox/advanced_smoothing.py create mode 100644 simba/sandbox/angle_3pt.py create mode 100644 simba/sandbox/annotations_mitra.py create mode 100644 simba/sandbox/average_frm_popup.py create mode 100644 simba/sandbox/bar_chart.py create mode 100644 simba/sandbox/batch_video_to_greyscale.py create mode 100644 simba/sandbox/berger_parker.py create mode 100644 simba/sandbox/bg_remover_cuda.py create mode 100644 simba/sandbox/bg_remover_cupy.py create mode 100644 simba/sandbox/bg_remover_popup.py create mode 100644 simba/sandbox/bg_substraction.py create mode 100644 simba/sandbox/biweight_midcorrelation.py create mode 100644 simba/sandbox/blank_img.py create mode 100644 simba/sandbox/blank_vid.py create mode 100644 simba/sandbox/blurbox.py create mode 100644 simba/sandbox/bouts_df create mode 100644 simba/sandbox/brillouins_index.py create mode 100644 simba/sandbox/calc_N_degree_direction_switches.py create mode 100644 simba/sandbox/calinski_harabasz.py create mode 100644 simba/sandbox/cb_frame.py create mode 100644 simba/sandbox/change_img_file_format.py create mode 100644 simba/sandbox/change_n_jobs_scikit.py create mode 100644 simba/sandbox/circling_detector.py create mode 100644 simba/sandbox/circular_statistics.py create mode 100644 simba/sandbox/clean_sleap_filename.py create mode 100644 simba/sandbox/clip_multiple_videos_by_frame_numbers.py create mode 100644 simba/sandbox/clip_videos_by_frm.py create mode 100644 simba/sandbox/cochran_q.py create mode 100644 simba/sandbox/coco.py create mode 100644 simba/sandbox/cohens_kappa.py create mode 100644 simba/sandbox/colinear_features.py create mode 100644 simba/sandbox/color_filtering.py create mode 100644 simba/sandbox/concat_gpu.py create mode 100644 simba/sandbox/concordance_ratio.py create mode 100644 simba/sandbox/contrast.py create mode 100644 simba/sandbox/convert_to_bw.py create mode 100644 simba/sandbox/convert_to_mp4.py create mode 100644 simba/sandbox/convert_to_popup.py create mode 100644 simba/sandbox/convex_hull.py create mode 100644 simba/sandbox/convex_hull_area.py create mode 100644 simba/sandbox/corner_distance.py create mode 100644 simba/sandbox/count_values_in_range_gpu.py create mode 100644 simba/sandbox/create_average_cupy.py create mode 100644 simba/sandbox/create_average_frame_cuda.py create mode 100644 simba/sandbox/create_gif.py create mode 100644 simba/sandbox/create_import_pose_menu.py create mode 100644 simba/sandbox/create_shap_log.py create mode 100644 simba/sandbox/create_shap_log_3.py create mode 100644 simba/sandbox/cronbach_alpha.py create mode 100644 simba/sandbox/crossfade.py create mode 100644 simba/sandbox/cuda_median.py create mode 100644 simba/sandbox/cuda_rotate_video.py create mode 100644 simba/sandbox/cuda_sliding_descriptive_stats.py create mode 100644 simba/sandbox/cuda_sort.py create mode 100644 simba/sandbox/cumcount_plot.py create mode 100644 simba/sandbox/cuml_kmeans.py create mode 100644 simba/sandbox/czenakowski.py create mode 100644 simba/sandbox/data.npy create mode 100644 simba/sandbox/davis_bouldin.py create mode 100644 simba/sandbox/detect_scene_changes.py create mode 100644 simba/sandbox/directed_hausdorff.py create mode 100644 simba/sandbox/direction_two_bps.py create mode 100644 simba/sandbox/distance_velocity.py create mode 100644 simba/sandbox/distances.py create mode 100644 simba/sandbox/downsample_multiple_videos_popup.py create mode 100644 simba/sandbox/downsample_video_popup.py create mode 100644 simba/sandbox/dprime.py create mode 100644 simba/sandbox/dunn_index.py create mode 100644 simba/sandbox/egocentri_video_rotator_numba.py create mode 100644 simba/sandbox/egocentric_alig_pose_numba.py create mode 100644 simba/sandbox/egocentric_align.py create mode 100644 simba/sandbox/egocentric_align_1025.py create mode 100644 simba/sandbox/egocentric_align_cuda.py create mode 100644 simba/sandbox/egocentric_aligner.py create mode 100644 simba/sandbox/egocentric_aligner_.py create mode 100644 simba/sandbox/egocentric_aligner_1024.py create mode 100644 simba/sandbox/egocentric_alignment.py create mode 100644 simba/sandbox/egocentric_alignment_cuda.py create mode 100644 simba/sandbox/egocentric_video_dali.py create mode 100644 simba/sandbox/egocentric_video_rotation.py create mode 100644 simba/sandbox/egocentric_video_rotator_cuda.py create mode 100644 simba/sandbox/elliptic_envelope.py create mode 100644 simba/sandbox/entropy_of_directional_changes.py create mode 100644 simba/sandbox/eta_squared.py create mode 100644 simba/sandbox/euclidan_distance_cuda.py create mode 100644 simba/sandbox/euclidean_distances_cuda.py create mode 100644 simba/sandbox/ez_path_plot.py create mode 100644 simba/sandbox/fecal_boli.py create mode 100644 simba/sandbox/ffmpeg_progress_bar.py create mode 100644 simba/sandbox/fix_clahe.py create mode 100644 simba/sandbox/fleiss_kappa.py create mode 100644 simba/sandbox/freezing_detector.py create mode 100644 simba/sandbox/frm_rotator_cuda.py create mode 100644 simba/sandbox/gantt_plotly.py create mode 100644 simba/sandbox/gantt_update.py create mode 100644 simba/sandbox/geometry_9.py create mode 100644 simba/sandbox/get_convex_hull_cuda.py create mode 100644 simba/sandbox/gibbs_sampling.py create mode 100644 simba/sandbox/github_issues.json create mode 100644 simba/sandbox/github_issues_download.py create mode 100644 simba/sandbox/grangercausalitytests.py create mode 100644 simba/sandbox/graph.html create mode 100644 simba/sandbox/graph_flow.html create mode 100644 simba/sandbox/graph_katz.html create mode 100644 simba/sandbox/graph_pagerank.html create mode 100644 simba/sandbox/grid_transition_probabilities.py create mode 100644 simba/sandbox/grubbs_test.py create mode 100644 simba/sandbox/hartley_fmax.py create mode 100644 simba/sandbox/hausdorff.py create mode 100644 simba/sandbox/heading.py create mode 100644 simba/sandbox/horizontal_videos_concat.py create mode 100644 simba/sandbox/img_conv.py create mode 100644 simba/sandbox/img_kmeans.py create mode 100644 simba/sandbox/img_stack_brightness.py create mode 100644 simba/sandbox/img_stack_mse.py create mode 100644 simba/sandbox/img_stack_to_bw.py create mode 100644 simba/sandbox/imgs_to_grayscale_cupy.py create mode 100644 simba/sandbox/imgs_to_greyscale_cuda.py create mode 100644 simba/sandbox/increase_fps.py create mode 100644 simba/sandbox/inset_overlay_video.py create mode 100644 simba/sandbox/interpolate.py create mode 100644 simba/sandbox/is_inside_circle.py create mode 100644 simba/sandbox/is_inside_polygon.py create mode 100644 simba/sandbox/is_inside_rectangle.py create mode 100644 simba/sandbox/isolation_forest.py create mode 100644 simba/sandbox/jaccard_distance.py create mode 100644 simba/sandbox/joint_plotter.py create mode 100644 simba/sandbox/kalman.py create mode 100644 simba/sandbox/keyPoi.py create mode 100644 simba/sandbox/kinetic_energy.py create mode 100644 simba/sandbox/knn.py create mode 100644 simba/sandbox/kumar.py create mode 100644 simba/sandbox/labelme_to_dlc.py create mode 100644 simba/sandbox/lbp.py create mode 100644 simba/sandbox/lcs.py create mode 100644 simba/sandbox/levenshtein.py create mode 100644 simba/sandbox/line_locate_point.py create mode 100644 simba/sandbox/line_plot.py create mode 100644 simba/sandbox/line_plot_plotly.py create mode 100644 simba/sandbox/linear_fretchet.py create mode 100644 simba/sandbox/linearity_index.py create mode 100644 simba/sandbox/linestring_path.py create mode 100644 simba/sandbox/madmedianrule.py create mode 100644 simba/sandbox/mahalanobis.py create mode 100644 simba/sandbox/make_path_plot.py create mode 100644 simba/sandbox/manhattan_distance.py create mode 100644 simba/sandbox/margalef_diversification_index.py create mode 100644 simba/sandbox/mcnamar.py create mode 100644 simba/sandbox/mean_squared_jerk.py create mode 100644 simba/sandbox/menhinicks_index.py create mode 100644 simba/sandbox/mitra_appand_additional.py create mode 100644 simba/sandbox/mitra_bar_graph.py create mode 100644 simba/sandbox/mitra_bg_remover.py create mode 100644 simba/sandbox/mitra_circling_detector.py create mode 100644 simba/sandbox/mitra_correlation_checks.py create mode 100644 simba/sandbox/mitra_downsample_data.py create mode 100644 simba/sandbox/mitra_freezing_detector.py create mode 100644 simba/sandbox/mitra_frequency_grapher.py create mode 100644 simba/sandbox/mitra_frequency_grapher.zip create mode 100644 simba/sandbox/mitra_laying_down_analyzer.py create mode 100644 simba/sandbox/mitra_style_annotation_appender.py create mode 100644 simba/sandbox/mitra_tail_analyzer.py create mode 100644 simba/sandbox/mitra_timebins.py create mode 100644 simba/sandbox/momentum.py create mode 100644 simba/sandbox/morans.py create mode 100644 simba/sandbox/mosaic.py create mode 100644 simba/sandbox/multiframe_is_shape_covered.py create mode 100644 simba/sandbox/network.py create mode 100644 simba/sandbox/new_outlier_corrector_location.py create mode 100644 simba/sandbox/normalized_cross_correlation.py create mode 100644 simba/sandbox/opencv_cuda.py create mode 100644 simba/sandbox/optimal_font_scale.py create mode 100644 simba/sandbox/ordinal_clf.py create mode 100644 simba/sandbox/outliers_tietjen.py create mode 100644 simba/sandbox/path_curvature.py create mode 100644 simba/sandbox/path_geometry.py create mode 100644 simba/sandbox/path_geometry_2.py create mode 100644 simba/sandbox/path_plots.py create mode 100644 simba/sandbox/paths.py create mode 100644 simba/sandbox/pct_counts_in_top_N.py create mode 100644 simba/sandbox/piotr_120324 2.py.zip create mode 100644 simba/sandbox/piotr_120324 3.py.zip create mode 100644 simba/sandbox/piotr_120324 4.py.zip create mode 100644 simba/sandbox/piotr_120324.py create mode 100644 simba/sandbox/piotr_120324.py.zip create mode 100644 simba/sandbox/platea_video.py create mode 100644 simba/sandbox/plotly_gantt.py create mode 100644 simba/sandbox/plug_bouts.py create mode 100644 simba/sandbox/profiler.py create mode 100644 simba/sandbox/px_to_mm.py create mode 100644 simba/sandbox/pypi_sizes.py create mode 100644 simba/sandbox/querter_circle.py create mode 100644 simba/sandbox/quickselect.py create mode 100644 simba/sandbox/read_boris.py create mode 100644 simba/sandbox/read_boris_annotation_files.py create mode 100644 simba/sandbox/read_img.py create mode 100644 simba/sandbox/read_video_info.py create mode 100644 simba/sandbox/redis.py create mode 100644 simba/sandbox/reduce_features.py create mode 100644 simba/sandbox/reduce_features.zip create mode 100644 simba/sandbox/reduce_imge_stack_size.py create mode 100644 simba/sandbox/relative_risk.py create mode 100644 simba/sandbox/reverse_popup.py create mode 100644 simba/sandbox/reverse_videos.py create mode 100644 simba/sandbox/roi_definition_csvs_to_h5.py create mode 100644 simba/sandbox/roi_definition_csvs_to_h5.py.zip create mode 100644 simba/sandbox/roi_to_yolo.py create mode 100644 simba/sandbox/rotate image.py create mode 100644 simba/sandbox/rotate.py create mode 100644 simba/sandbox/rotate_example_nb_egocentric.py create mode 100644 simba/sandbox/runs_test.py create mode 100644 simba/sandbox/seekable.py create mode 100644 simba/sandbox/segment_image_horizontal.py create mode 100644 simba/sandbox/sequential_lag_analysis.py create mode 100644 simba/sandbox/shannon_diversity_index.py create mode 100644 simba/sandbox/shap_2_nb.py create mode 100644 simba/sandbox/shift_geometries.py create mode 100644 simba/sandbox/siegel_tukey.py create mode 100644 simba/sandbox/silhouette_score.py create mode 100644 simba/sandbox/simpson_diversity_index.py create mode 100644 simba/sandbox/slic.py create mode 100644 simba/sandbox/sliding_autoc.py create mode 100644 simba/sandbox/sliding_circular_hotspots.py create mode 100644 simba/sandbox/sliding_circular_mean.py create mode 100644 simba/sandbox/sliding_circular_range.py create mode 100644 simba/sandbox/sliding_circular_range_test.py create mode 100644 simba/sandbox/sliding_circular_std.py create mode 100644 simba/sandbox/sliding_crosscorrelation.py create mode 100644 simba/sandbox/sliding_displacement.py create mode 100644 simba/sandbox/sliding_iqr.py create mode 100644 simba/sandbox/sliding_linearity_index.py create mode 100644 simba/sandbox/sliding_mean.py create mode 100644 simba/sandbox/sliding_min.py create mode 100644 simba/sandbox/sliding_rayleigh_z.py create mode 100644 simba/sandbox/sliding_resultant_vector_length.py create mode 100644 simba/sandbox/sliding_spatial_density.py create mode 100644 simba/sandbox/sliding_spearmans_rank.py create mode 100644 simba/sandbox/sliding_std.py create mode 100644 simba/sandbox/sliding_sum.py create mode 100644 simba/sandbox/smoothing.py create mode 100644 simba/sandbox/sorensen_dice_coefficient.py create mode 100644 simba/sandbox/spatial_density_trajectory_points.py create mode 100644 simba/sandbox/spontaneous_alternation_calculator.py create mode 100644 simba/sandbox/spontaneuous_alternation_plotter.py create mode 100644 simba/sandbox/spontanous_alternations.py create mode 100644 simba/sandbox/stabalize.py create mode 100644 simba/sandbox/structural_similarity_index.py create mode 100644 simba/sandbox/summetry_index.py create mode 100644 simba/sandbox/superimpose_elapsed_time.py create mode 100644 simba/sandbox/superimpose_frame_count.py create mode 100644 simba/sandbox/superimpose_frm_cnt.py create mode 100644 simba/sandbox/superimpose_popups.py create mode 100644 simba/sandbox/superimpose_text.py create mode 100644 simba/sandbox/superpixels.py create mode 100644 simba/sandbox/total_variation_distance.py create mode 100644 simba/sandbox/train_yolo_2.py create mode 100644 simba/sandbox/train_yolo_rois.py create mode 100644 simba/sandbox/two_fish_feature_extractor_040924.py create mode 100644 simba/sandbox/two_fish_feature_extractor_040924.py.zip create mode 100644 simba/sandbox/unsupervised/clf_bout_aggregator.py create mode 100644 simba/sandbox/unsupervised/cluster_frequentist_stats.py create mode 100644 simba/sandbox/unsupervised/simba_hdbscan.py create mode 100644 simba/sandbox/unsupervised/simba_umap.py create mode 100644 simba/sandbox/unsupervised_lof.py create mode 100644 simba/sandbox/unsupervised_outliers.py create mode 100644 simba/sandbox/vertical_video_concatenator.py create mode 100644 simba/sandbox/violin.py create mode 100644 simba/sandbox/visualize_networks.py create mode 100644 simba/sandbox/wald_wolfowitz.py create mode 100644 simba/sandbox/warp_affine.py create mode 100644 simba/sandbox/warp_numba.py create mode 100644 simba/sandbox/watermark.py create mode 100644 simba/sandbox/wilcoxon.py create mode 100644 simba/sandbox/yolo_torch_inference.py create mode 100644 simba/sandbox/yolo_torch_train.py create mode 100644 simba/sandbox/youden_j.py diff --git a/docs/nb/Untitled2.ipynb b/docs/nb/Untitled2.ipynb new file mode 100644 index 000000000..13260d46c --- /dev/null +++ b/docs/nb/Untitled2.ipynb @@ -0,0 +1,23 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "c0cecda1-8604-44a4-9b9c-8e8bade04950", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "", + "name": "" + }, + "language_info": { + "name": "" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/simba/sandbox/CLAHE.py b/simba/sandbox/CLAHE.py new file mode 100644 index 000000000..bc40b5b37 --- /dev/null +++ b/simba/sandbox/CLAHE.py @@ -0,0 +1,89 @@ +import os +from datetime import datetime +import threading +from tkinter import * +from tkinter import Button +from simba.utils.enums import Keys, Links, Options +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.ui.tkinter_functions import CreateLabelFrameWithIcon, FileSelect, FolderSelect +from simba.utils.checks import check_file_exist_and_readable, check_if_dir_exists +from simba.video_processors.clahe_ui import interactive_clahe_ui +from simba.video_processors.video_processing import clahe_enhance_video +from simba.utils.read_write import find_files_of_filetypes_in_directory, get_fn_ext +from simba.utils.printing import stdout_success, SimbaTimer + +class CLAHEPopUp(PopUpMixin): + def __init__(self): + super().__init__(title="CLAHE VIDEO CONVERSION") + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - Contrast Limited Adaptive Histogram Equalization", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", file_types=[("VIDEO", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + run_single_video_btn = Button(single_video_frm, text="Apply CLAHE", command=lambda: self.run_single_video()) + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOs - Contrast Limited Adaptive Histogram Equalization", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", lblwidth=25) + run_multiple_btn = Button(multiple_videos_frm, text="RUN VIDEO DIRECTORY", command=lambda: self.run_directory(), fg="blue") + + single_video_frm.grid(row=0, column=0, sticky=NW) + self.selected_video.grid(row=0, column=0, sticky=NW) + run_single_video_btn.grid(row=1, column=0, sticky=NW) + + multiple_videos_frm.grid(row=1, column=0, sticky=NW) + self.selected_dir.grid(row=0, column=0, sticky=NW) + run_multiple_btn.grid(row=1, column=0, sticky=NW) + + def run_single_video(self): + selected_video = self.selected_video.file_path + check_file_exist_and_readable(file_path=selected_video) + threading.Thread(target=clahe_enhance_video(file_path=selected_video)).start() + + def run_directory(self): + timer = SimbaTimer(start=True) + video_dir = self.selected_dir.folder_path + check_if_dir_exists(in_dir=video_dir, source=self.__class__.__name__) + self.video_paths = find_files_of_filetypes_in_directory(directory=video_dir, extensions=Options.ALL_VIDEO_FORMAT_OPTIONS.value, raise_error=True) + for file_path in self.video_paths: + threading.Thread(target=clahe_enhance_video(file_path=file_path)).start() + timer.stop_timer() + stdout_success(msg=f'CLAHE enhanced {len(self.video_paths)} video(s)', elapsed_time=timer.elapsed_time_str) + + + + + + + + + + + +# # Function to update CLAHE +# def update_clahe(x): +# global img, clahe +# clip_limit = cv2.getTrackbarPos('Clip Limit', 'CLAHE') / 10.0 # Scale the trackbar value +# tile_size = cv2.getTrackbarPos('Tile Size', 'CLAHE') +# if tile_size % 2 == 0: +# tile_size += 1 # Ensure tile size is odd +# clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=(tile_size, tile_size)) +# img_clahe = clahe.apply(img) +# cv2.imshow('CLAHE', img_clahe) +# +# # Load an image +# img = cv2.imread('/Users/simon/Downloads/PXL_20240429_222923838.jpg', cv2.IMREAD_GRAYSCALE) +# +# # Create a window +# cv2.namedWindow('CLAHE', cv2.WINDOW_NORMAL) +# +# # Initialize the clip limit trackbar +# cv2.createTrackbar('Clip Limit', 'CLAHE', 10, 300, update_clahe) +# +# # Initialize the tile size trackbar +# cv2.createTrackbar('Tile Size', 'CLAHE', 8, 64, update_clahe) +# +# # Apply CLAHE with initial parameters +# clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(8, 8)) +# img_clahe = clahe.apply(img) +# cv2.imshow('Original', img) +# cv2.imshow('CLAHE', img_clahe) +# +# cv2.waitKey(0) +# cv2.destroyAllWindows() diff --git a/simba/sandbox/Geometry_7.py b/simba/sandbox/Geometry_7.py new file mode 100644 index 000000000..bc6f343e0 --- /dev/null +++ b/simba/sandbox/Geometry_7.py @@ -0,0 +1,99 @@ +import os +import numpy as np + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.geometry_mixin import GeometryMixin +from simba.mixins.image_mixin import ImageMixin +from simba.utils.read_write import read_df +from simba.plotting.geometry_plotter import GeometryPlotter + +CONFIG_PATH = '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/project_config.ini' +VIDEO_NAME = '2022-06-20_NOB_DOT_4' + +cfg = ConfigReader(config_path=CONFIG_PATH, read_video_info=False) +data = read_df(os.path.join(cfg.outlier_corrected_dir, VIDEO_NAME + f'.{cfg.file_type}'), file_type=cfg.file_type) +video_path = os.path.join(cfg.video_dir, VIDEO_NAME + '.mp4') +animal_df = data[[x for x in data.columns if x in cfg.animal_bp_dict['Animal_1']['X_bps'] + cfg.animal_bp_dict['Animal_1']['Y_bps']]] +animal_data = animal_df.drop(['Tail_end_x', 'Tail_end_y'], axis=1).values.reshape(-1, 7, 2).astype(np.int) + +animal_polygons = GeometryMixin.bodyparts_to_polygon(data=animal_data)[:300] +geometry_plotter = GeometryPlotter(config_path=CONFIG_PATH, + geometries=[animal_polygons], + video_name=VIDEO_NAME, + thickness=10, + bg_opacity=0.2) +geometry_plotter.run() + +# +# animal_rectangles = GeometryMixin().multiframe_minimum_rotated_rectangle(shapes=animal_polygons) +# geometry_plotter = GeometryPlotter(config_path=CONFIG_PATH, +# geometries=[animal_rectangles], +# video_name=VIDEO_NAME, +# thickness=10) +# geometry_plotter.run() +# +# animal_polygons_buffered = GeometryMixin.bodyparts_to_polygon(data=animal_data[:300], parallel_offset=100, pixels_per_mm=1.88) +# geometry_plotter = GeometryPlotter(config_path=CONFIG_PATH, +# geometries=[animal_polygons], +# video_name=VIDEO_NAME, +# thickness=10, +# bg_opacity=1.0) +# geometry_plotter.run() + +imgs = ImageMixin().read_img_batch_from_video(video_path=video_path, start_frm=0, end_frm=299) +imgs = np.stack(list(imgs.values())) +# animal_polygons_buffered = np.array(animal_polygons_buffered).T.reshape(-1, 1) +# sliced_images = ImageMixin().slice_shapes_in_imgs(imgs=imgs, shapes=animal_polygons_buffered) + +# ImageMixin.img_stack_to_video(imgs=sliced_images, +# save_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/frames/output/geometry_visualization/sliced.mp4', +# fps=30, +# verbose=True) + +# animal_polygons_tighter = np.array(animal_polygons).T.reshape(-1, 1) +# sliced_images = ImageMixin().slice_shapes_in_imgs(imgs=imgs, shapes=animal_polygons_tighter) +# ImageMixin.img_stack_to_video(imgs=sliced_images, +# save_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/frames/output/geometry_visualization/sliced_tighter.mp4', +# fps=30, +# verbose=True) + +animal_head = animal_df[['Nose_x', 'Nose_y', 'Ear_left_x', 'Ear_left_y', 'Ear_right_x', 'Ear_right_y']].values.reshape(-1, 3, 2).astype(np.int) +#animal_head_polygons = GeometryMixin.bodyparts_to_polygon(data=animal_head, parallel_offset=100, pixels_per_mm=1.88)[:300] +# animal_head_polygons = np.array(animal_head_polygons).T.reshape(-1, 1) +# sliced_images = ImageMixin().slice_shapes_in_imgs(imgs=imgs, shapes=animal_head_polygons) +# ImageMixin.img_stack_to_video(imgs=sliced_images, +# save_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/frames/output/geometry_visualization/sliced_head.mp4', +# fps=30, +# verbose=True) + +# animal_head_polygons = GeometryMixin.bodyparts_to_polygon(data=animal_head, parallel_offset=25, pixels_per_mm=1.88)[:300] +# animal_head_polygons = np.array(animal_head_polygons).T.reshape(-1, 1) +# sliced_images = ImageMixin().slice_shapes_in_imgs(imgs=imgs, shapes=animal_head_polygons) +# ImageMixin.img_stack_to_video(imgs=sliced_images, +# save_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/frames/output/geometry_visualization/sliced_head_tighter_even.mp4', +# fps=30, +# verbose=True) + + + +# animal_head_polygons = GeometryMixin.bodyparts_to_polygon(data=animal_head)[:300] +# head_centers = GeometryMixin.get_center(shape=animal_head_polygons) +# head_circles = GeometryMixin.bodyparts_to_circle(data=head_centers, parallel_offset=100, pixels_per_mm=1.88) +# head_circles = np.array(head_circles).T.reshape(-1, 1) +# sliced_images = ImageMixin().slice_shapes_in_imgs(imgs=imgs, shapes=head_circles) +# ImageMixin.img_stack_to_video(imgs=sliced_images, +# save_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/frames/output/geometry_visualization/sliced_head_circles.mp4', +# fps=30, +# verbose=True) + + +# head_circles = GeometryMixin.bodyparts_to_circle(data=head_centers, parallel_offset=50, pixels_per_mm=1.88) +# head_circles = np.array(head_circles).T.reshape(-1, 1) +# sliced_images = ImageMixin().slice_shapes_in_imgs(imgs=imgs, shapes=head_circles) +# ImageMixin.img_stack_to_video(imgs=sliced_images, +# save_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/frames/output/geometry_visualization/sliced_head_circles_tighter.mp4', +# fps=30, +# verbose=True) + + +#video_bg_substraction_mp(video_path=video_path, save_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/frames/output/geometry_visualization/bg_removed.mp4') diff --git a/simba/sandbox/MitraFeatureExtractor.py b/simba/sandbox/MitraFeatureExtractor.py new file mode 100644 index 000000000..675d43609 --- /dev/null +++ b/simba/sandbox/MitraFeatureExtractor.py @@ -0,0 +1,194 @@ +import os +import numpy as np +import pandas as pd +from numba.typed import List +from itertools import product +import argparse +from typing import Union + +from simba.mixins.abstract_classes import AbstractFeatureExtraction +from simba.mixins.config_reader import ConfigReader +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.timeseries_features_mixin import TimeseriesFeatureMixin +from simba.mixins.statistics_mixin import Statistics +from simba.mixins.circular_statistics import CircularStatisticsMixin +from simba.feature_extractors.perimeter_jit import jitted_hull +from simba.utils.checks import check_if_filepath_list_is_empty, check_all_file_names_are_represented_in_video_log +from simba.utils.read_write import read_df, get_fn_ext, read_frm_of_video +from simba.utils.read_write import SimbaTimer, stdout_success, write_df + +NOSE = 'nose' +LEFT_SIDE = 'left_side' +RIGHT_SIDE = 'right_side' +LEFT_EAR = 'left_ear' +RIGHT_EAR = 'right_ear' +CENTER = 'center' +TAIL_BASE = 'tail_base' +TAIL_CENTER = 'tail_center' +TAIL_TIP = 'tail_tip' + +TIME_WINDOWS = np.array([0.25, 0.5, 1.0, 2.0]) + +class MitraFeatureExtractor(ConfigReader, + AbstractFeatureExtraction): + def __init__(self, + config_path: Union[str, os.PathLike]): + + ConfigReader.__init__(self, config_path=config_path, read_video_info=True, create_logger=False) + check_if_filepath_list_is_empty(filepaths=self.outlier_corrected_paths, error_msg=f'No data files found in {self.outlier_corrected_dir} directory.') + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.outlier_corrected_paths) + + def run(self): + for file_cnt, file_path in enumerate(self.outlier_corrected_paths): + df = read_df(file_path=file_path, file_type=self.file_type) + results = pd.DataFrame() + video_timer = SimbaTimer(start=True) + _, video_name, _ = get_fn_ext(filepath=file_path) + save_path = os.path.join(self.features_dir, video_name + f'.{self.file_type}') + print(f'Featurizing video {video_name} ...(Video {file_cnt+1}/{len(self.outlier_corrected_paths)})') + _, px_per_mm, fps = self.read_video_info(video_name=video_name) + shifted_ = df.shift(periods=1).combine_first(df) + nose_arr = df[[f'{NOSE}_x', f'{NOSE}_y']].values.astype(np.float32) + p_arr = df[self.animal_bp_dict['Animal_1']['P_bps']].values.astype(np.float32) + tailbase_arr = df[[f'{TAIL_BASE}_x', f'{TAIL_BASE}_y']].values.astype(np.float32) + left_ear_arr = df[[f'{LEFT_EAR}_x', f'{LEFT_EAR}_y']].values.astype(np.float32) + right_ear_arr = df[[f'{RIGHT_EAR}_x', f'{RIGHT_EAR}_y']].values.astype(np.float32) + center_arr = df[[f'{CENTER}_x', f'{CENTER}_y']].values.astype(np.float32) + lat_left_arr = df[[f'{LEFT_SIDE}_x', f'{LEFT_SIDE}_y']].values.astype(np.float32) + lat_right_arr = df[[f'{RIGHT_SIDE}_x', f'{RIGHT_SIDE}_y']].values.astype(np.float32) + tail_center_arr = df[[f'{TAIL_CENTER}_x', f'{TAIL_CENTER}_y']].values.astype(np.float32) + tail_tip_arr = df[[f'{TAIL_TIP}_x', f'{TAIL_TIP}_y']].values.astype(np.float32) + animal_hull_arr = df[[f'{LEFT_EAR}_x', f'{LEFT_EAR}_y', f'{RIGHT_EAR}_x', f'{RIGHT_EAR}_y', f'{NOSE}_x', f'{NOSE}_y', f'{LEFT_SIDE}_x', f'{LEFT_SIDE}_y', f'{RIGHT_SIDE}_x', f'{RIGHT_SIDE}_y', f'{TAIL_BASE}_x', f'{TAIL_BASE}_y']].values.astype(np.float32).reshape(len(df), 6, 2) + animal_head_arr = df[[f'{LEFT_EAR}_x', f'{LEFT_EAR}_y', f'{RIGHT_EAR}_x', f'{RIGHT_EAR}_y', f'{NOSE}_x', f'{NOSE}_y']].values.astype(np.float32).reshape(len(df), 3, 2) + animal_body_arr = df[[f'{LEFT_EAR}_x', f'{LEFT_EAR}_y', f'{RIGHT_EAR}_x', f'{RIGHT_EAR}_y', f'{LEFT_SIDE}_x', f'{LEFT_SIDE}_y', f'{RIGHT_SIDE}_x', f'{RIGHT_SIDE}_y', f'{TAIL_BASE}_x', f'{TAIL_BASE}_y']].values.astype(np.float32).reshape(len(df), 5, 2) + animal_lower_body_arr = df[[f'{LEFT_SIDE}_x', f'{LEFT_SIDE}_y', f'{RIGHT_SIDE}_x', f'{RIGHT_SIDE}_y', f'{TAIL_BASE}_x', f'{TAIL_BASE}_y']].values.astype(np.float32).reshape(len(df), 3, 2) + animal_upper_body_arr = df[[f'{LEFT_EAR}_x', f'{LEFT_EAR}_y', f'{RIGHT_EAR}_x', f'{RIGHT_EAR}_y', f'{NOSE}_x', f'{NOSE}_y', f'{LEFT_SIDE}_x', f'{LEFT_SIDE}_y', f'{RIGHT_SIDE}_x', f'{RIGHT_SIDE}_y']].values.astype(np.float32).reshape(len(df), 5, 2) + left_body_arr = df[[f'{LEFT_EAR}_x', f'{LEFT_EAR}_y', f'{NOSE}_x', f'{NOSE}_y', f'{LEFT_SIDE}_x', f'{LEFT_SIDE}_y', f'{TAIL_BASE}_x', f'{TAIL_BASE}_y', f'{CENTER}_x', f'{CENTER}_y']].values.astype(np.float32).reshape(len(df), 5, 2) + right_body_arr = df[[f'{RIGHT_EAR}_x', f'{RIGHT_EAR}_y', f'{NOSE}_x', f'{NOSE}_y', f'{RIGHT_SIDE}_x', f'{RIGHT_SIDE}_y', f'{TAIL_BASE}_x', f'{TAIL_BASE}_y', f'{CENTER}_x', f'{CENTER}_y']].values.astype(np.float32).reshape(len(df), 5, 2) + direction_degrees = CircularStatisticsMixin().direction_three_bps(nose_loc=nose_arr, left_ear_loc=left_ear_arr, right_ear_loc=right_ear_arr).astype(np.float32) + + # GEOMETRY FEATURES + print('Compute geometry features...') + results['GEOMETRY_FRAME_HULL_LENGTH'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=nose_arr, location_2=tailbase_arr, px_per_mm=px_per_mm).astype(np.int32) + results['GEOMETRY_FRAME_HULL_WIDTH'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=lat_left_arr, location_2=lat_right_arr, px_per_mm=px_per_mm).astype(np.int32) + results['GEOMETRY_FRAME_HULL_AREA'] = (jitted_hull(points=animal_hull_arr, target='area') / px_per_mm).astype(np.int32) + results['GEOMETRY_FRAME_BODY_AREA'] = (jitted_hull(points=animal_body_arr, target='area') / px_per_mm).astype(np.int32) + results['GEOMETRY_FRAME_LOWER_BODY_AREA'] = (jitted_hull(points=animal_lower_body_arr, target='area') / px_per_mm).astype(np.int32) + results['GEOMETRY_FRAME_UPPER_BODY_AREA'] = (jitted_hull(points=animal_upper_body_arr, target='area') / px_per_mm).astype(np.int32) + results['GEOMETRY_FRAME_HEAD_AREA'] = (jitted_hull(points=animal_head_arr, target='area') / px_per_mm).astype(np.int32) + results['GEOMETRY_FRAME_LEFT_BODY_AREA'] = (jitted_hull(points=left_body_arr, target='area') / px_per_mm).astype(np.int32) + results['GEOMETRY_FRAME_RIGHT_BODY_AREA'] = (jitted_hull(points=right_body_arr, target='area') / px_per_mm).astype(np.int32) + results['GEOMETRY_FRAME_TAIL_LENGTH'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=tailbase_arr, location_2=tail_tip_arr, px_per_mm=px_per_mm).astype(np.int32) + results['GEOMETRY_FRAME_EAR_DISTANCE'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=left_ear_arr, location_2=right_ear_arr, px_per_mm=px_per_mm).astype(np.int32) + + for time, feature in product(TIME_WINDOWS, ['HULL_LENGTH', 'HULL_WIDTH', 'HULL_AREA', 'BODY_AREA', 'LOWER_BODY_AREA', 'UPPER_BODY_AREA', 'HEAD_AREA', 'LEFT_BODY_AREA', 'RIGHT_BODY_AREA', 'TAIL_LENGTH', 'EAR_DISTANCE']): + results[f'GEOMETRY_MEAN_{feature}_{time}'] = results[f'GEOMETRY_FRAME_{feature}'].rolling(int(time * fps), min_periods=1).mean().fillna(0).astype(np.int32) + results[f'GEOMETRY_VAR_{feature}_{time}'] = results[f'GEOMETRY_FRAME_{feature}'].rolling(int(time * fps), min_periods=1).var().fillna(0).astype(np.float32) + results[f'GEOMETRY_SUM_{feature}_{time}'] = results[f'GEOMETRY_FRAME_{feature}'].rolling(int(time * fps), min_periods=1).sum().fillna(0).astype(np.int32) + + for feature in ['HULL_LENGTH', 'HULL_WIDTH', 'HULL_AREA', 'BODY_AREA', 'LOWER_BODY_AREA', 'UPPER_BODY_AREA', 'HEAD_AREA', 'LEFT_BODY_AREA', 'RIGHT_BODY_AREA', 'TAIL_LENGTH', 'EAR_DISTANCE']: + sliding_skew = pd.DataFrame(Statistics.sliding_z_scores(data=results[f'GEOMETRY_FRAME_{feature}'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=int(fps)), columns=[f'GEOMETRY_{feature}_SLIDING_Z_SCORE_250', f'GEOMETRY_{feature}_SLIDING_Z_SCORE_500', f'GEOMETRY_{feature}_SLIDING_Z_SCORE_1000', f'GEOMETRY_{feature}_SLIDING_Z_SCORE_2000']) + sliding_mad_median = pd.DataFrame(Statistics.sliding_mad_median_rule(data=results[f'GEOMETRY_FRAME_{feature}'].values.astype(np.float32), k=0.5, time_windows=TIME_WINDOWS, fps=fps), columns=[f'GEOMETRY_{feature}_SLIDING_MAD_MEDIAN_0.5_250', f'GEOMETRY_{feature}_SLIDING_MAD_MEDIAN_0.5_500', f'GEOMETRY_{feature}_SLIDING_MAD_MEDIAN_0.5_1000', f'GEOMETRY_{feature}_SLIDING_MAD_MEDIAN_0.5_2000']) + results = pd.concat([results, sliding_skew, sliding_mad_median], axis=1) + + for feature in ['HULL_LENGTH', 'HULL_WIDTH', 'HULL_AREA', 'BODY_AREA', 'LOWER_BODY_AREA', 'UPPER_BODY_AREA', 'HEAD_AREA', 'LEFT_BODY_AREA', 'RIGHT_BODY_AREA', 'TAIL_LENGTH', 'EAR_DISTANCE']: + statistics = List(['mac', 'rms']) + x = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=results[f'GEOMETRY_FRAME_{feature}'].values.astype(np.float32), window_sizes=TIME_WINDOWS, sample_rate=int(fps), statistics=statistics) + for i in range(x.shape[0]): + v = pd.DataFrame(x[i], columns=[f'GEOMETRY_{feature}_{statistics[i]}_250', f'GEOMETRY_{feature}_{statistics[i]}_500', f'GEOMETRY_{feature}_{statistics[i]}_1000', f'GEOMETRY_{feature}_{statistics[i]}_2000']) + results = pd.concat([results, v], axis=1) + + upper_lower_body_size_correlations = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['GEOMETRY_FRAME_UPPER_BODY_AREA'].values.astype(np.float32), sample_2=results['GEOMETRY_FRAME_LOWER_BODY_AREA'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['GEOMETRY_UPPER_LOWER_BODY_SIZE_SPEARMAN_250', 'GEOMETRY_UPPER_LOWER_BODY_SIZE_SPEARMAN_500', 'GEOMETRY_UPPER_LOWER_BODY_SIZE_SPEARMAN_1000', 'GEOMETRY_UPPER_LOWER_BODY_SIZE_SPEARMAN_2000']).astype(np.float32) + hull_head_correlations = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['GEOMETRY_FRAME_HULL_AREA'].values.astype(np.float32), sample_2=results['GEOMETRY_FRAME_HEAD_AREA'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['GEOMETRY_HULL_HEAD_SIZE_SPEARMAN_250', 'GEOMETRY_HULL_HEAD_BODY_SIZE_SPEARMAN_500', 'GEOMETRY_HULL_HEAD_SIZE_SPEARMAN_1000', 'GEOMETRY_HULL_HEAD_SIZE_SPEARMAN_2000']).astype(np.float32) + hull_tail_length_correlations = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['GEOMETRY_FRAME_HULL_LENGTH'].values.astype(np.float32), sample_2=results['GEOMETRY_FRAME_TAIL_LENGTH'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['GEOMETRY_HULL_TAIL_LENGTH_SPEARMAN_250', 'GEOMETRY_HULL_TAIL_LENGTH_SPEARMAN_500', 'GEOMETRY_HULL_TAIL_LENGTH_SPEARMAN_1000', 'GEOMETRY_HULL_TAIL_LENGTH_SPEARMAN_2000']).astype(np.float32) + left_body_right_body_correlations = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['GEOMETRY_FRAME_LEFT_BODY_AREA'].values.astype(np.float32), sample_2=results['GEOMETRY_FRAME_RIGHT_BODY_AREA'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['GEOMETRY_LEFT_RIGHT_BODY_SPEARMAN_250', 'GEOMETRY_LEFT_RIGHT_BODY_SPEARMAN_500', 'GEOMETRY_LEFT_RIGHT_BODY_SPEARMAN_1000', 'GEOMETRY_LEFT_RIGHT_BODY_SPEARMAN_2000']).astype(np.float32) + results = pd.concat([results, upper_lower_body_size_correlations, hull_head_correlations, hull_tail_length_correlations, left_body_right_body_correlations], axis=1) + + # CIRCULAR FEATURES + print('Compute circular features...') + results['CIRCULAR_FRAME_HULL_3POINT_ANGLE'] = FeatureExtractionMixin.angle3pt_serialized(data=np.hstack([nose_arr, center_arr, tailbase_arr])) + results['CIRCULAR_FRAME_TAIL_3POINT_ANGLE'] = FeatureExtractionMixin.angle3pt_serialized(data=np.hstack([tailbase_arr, tail_center_arr, tail_tip_arr])) + results['CIRCULAR_FRAME_HEAD_3POINT_ANGLE'] = FeatureExtractionMixin.angle3pt_serialized(data=np.hstack([left_ear_arr, nose_arr, right_ear_arr])) + results['CIRCULAR_INSTANTANEOUS_ANGULAR_VELOCITY'] = CircularStatisticsMixin.instantaneous_angular_velocity(data=direction_degrees, bin_size=1) + angular_difference = pd.DataFrame(CircularStatisticsMixin.sliding_angular_diff(data=direction_degrees, time_windows=TIME_WINDOWS, fps=int(fps)), columns=['CIRCULAR_HEAD_DIRECTION_ANGULAR_DIFFERENCE_250', 'CIRCULAR_HEAD_DIRECTION_ANGULAR_DIFFERENCE_500', 'CIRCULAR_HEAD_DIRECTION_ANGULAR_DIFFERENCE_1000', 'CIRCULAR_HEAD_DIRECTION_ANGULAR_DIFFERENCE_2000']) + rao_spacing = pd.DataFrame(CircularStatisticsMixin.sliding_rao_spacing(data=direction_degrees, time_windows=TIME_WINDOWS, fps=int(fps)), columns=['CIRCULAR_HEAD_DIRECTION_RAO_SPACING_250', 'CIRCULAR_HEAD_DIRECTION_RAO_SPACING_500', 'CIRCULAR_HEAD_DIRECTION_RAO_SPACING_1000', 'CIRCULAR_HEAD_DIRECTION_RAO_SPACING_2000']) + circular_range = pd.DataFrame(CircularStatisticsMixin.sliding_circular_range(data=direction_degrees, time_windows=TIME_WINDOWS, fps=int(fps)), columns=['CIRCULAR_HEAD_DIRECTION_RANGE_250', 'CIRCULAR_HEAD_DIRECTION_RANGE_500', 'CIRCULAR_HEAD_DIRECTION_RANGE_1000', 'CIRCULAR_HEAD_DIRECTION_RANGE_2000']) + circular_std = pd.DataFrame(CircularStatisticsMixin.sliding_circular_std(data=direction_degrees, time_windows=TIME_WINDOWS, fps=int(fps)), columns=['CIRCULAR_HEAD_DIRECTION_STD_250', 'CIRCULAR_HEAD_DIRECTION_STD_500', 'CIRCULAR_HEAD_DIRECTION_STD_1000', 'CIRCULAR_HEAD_DIRECTION_STD_2000']) + head_hull_angular_corr = pd.DataFrame(CircularStatisticsMixin.sliding_circular_correlation(sample_1=results['CIRCULAR_FRAME_HULL_3POINT_ANGLE'].values.astype(np.float32), sample_2=results['CIRCULAR_FRAME_HEAD_3POINT_ANGLE'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['CIRCULAR_HULL_HEAD_3POINT_ANGLE_CORRELATION_250', 'CIRCULAR_HULL_HEAD_3POINT_ANGLE_CORRELATION_500', 'CIRCULAR_HULL_HEAD_3POINT_ANGLE_CORRELATION_1000', 'CIRCULAR_HULL_HEAD_3POINT_ANGLE_CORRELATION_2000']) + hull_tail_angular_corr = pd.DataFrame(CircularStatisticsMixin.sliding_circular_correlation(sample_1=results['CIRCULAR_FRAME_HULL_3POINT_ANGLE'].values.astype(np.float32), sample_2=results['CIRCULAR_FRAME_TAIL_3POINT_ANGLE'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['CIRCULAR_HULL_TAIL_3POINT_ANGLE_CORRELATION_250', 'CIRCULAR_HULL_TAIL_3POINT_ANGLE_CORRELATION_500', 'CIRCULAR_HULL_TAIL_3POINT_ANGLE_CORRELATION_1000', 'CIRCULAR_HULL_TAIL_3POINT_ANGLE_CORRELATION_2000']) + mean_resultant_vector_length = pd.DataFrame(CircularStatisticsMixin.sliding_mean_resultant_vector_length(data=direction_degrees, fps=int(fps), time_windows=TIME_WINDOWS), columns=['CIRCULAR_MEAN_RESULTANT_LENGTH_250', 'CIRCULAR_MEAN_RESULTANT_LENGTH_500', 'CIRCULAR_MEAN_RESULTANT_LENGTH_1000', 'CIRCULAR_MEAN_RESULTANT_LENGTH_2000']) + results = pd.concat([results, angular_difference, rao_spacing, circular_range, circular_std, head_hull_angular_corr, hull_tail_angular_corr, mean_resultant_vector_length], axis=1) + + # MOVEMENT FEATURES + print('Compute movement features...') + results['MOVEMENT_FRAME_NOSE'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=nose_arr, location_2=shifted_[[f'{NOSE}_x', f'{NOSE}_y']].values.astype(np.float32), px_per_mm=px_per_mm).astype(np.int32) + results['MOVEMENT_FRAME_CENTER'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=center_arr, location_2=shifted_[[f'{CENTER}_x', f'{CENTER}_y']].values.astype(np.float32), px_per_mm=px_per_mm).astype(np.int32) + results['MOVEMENT_FRAME_TAILBASE'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=tailbase_arr, location_2=shifted_[[f'{TAIL_BASE}_x', f'{TAIL_BASE}_y']].values.astype(np.float32), px_per_mm=px_per_mm).astype(np.int32) + results['MOVEMENT_FRAME_TAILTIP'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=tail_tip_arr, location_2=shifted_[[f'{TAIL_TIP}_x', f'{TAIL_TIP}_y']].values.astype(np.float32), px_per_mm=px_per_mm).astype(np.int32) + results['MOVEMENT_FRAME_TAILCENTER'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=tail_tip_arr, location_2=shifted_[[f'{TAIL_CENTER}_x', f'{TAIL_CENTER}_y']].values.astype(np.float32), px_per_mm=px_per_mm).astype(np.int32) + results['MOVEMENT_FRAME_LEFT_EAR'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=left_ear_arr, location_2=shifted_[[f'{LEFT_EAR}_x', f'{LEFT_EAR}_y']].values.astype(np.float32), px_per_mm=px_per_mm).astype(np.int32) + results['MOVEMENT_FRAME_RIGHT_EAR'] = FeatureExtractionMixin.framewise_euclidean_distance(location_1=right_ear_arr, location_2=shifted_[[f'{RIGHT_EAR}_x', f'{RIGHT_EAR}_y']].values.astype(np.float32), px_per_mm=px_per_mm).astype(np.int32) + results['MOVEMENT_FRAME_SUMMED'] = results['MOVEMENT_FRAME_NOSE'] + results['MOVEMENT_FRAME_CENTER'] + results['MOVEMENT_FRAME_TAILBASE'] + results['MOVEMENT_FRAME_TAILTIP'] + results['MOVEMENT_FRAME_TAILCENTER'] + results['MOVEMENT_FRAME_LEFT_EAR'] + results['MOVEMENT_FRAME_RIGHT_EAR'] + results['MOVEMENT_NOSE_ACCELERATION_MM_S'] = TimeseriesFeatureMixin.acceleration(data=results['MOVEMENT_FRAME_NOSE'].values.astype(np.float32), pixels_per_mm=px_per_mm, fps=fps) + results['MOVEMENT_CENTER_ACCELERATION_MM_S'] = TimeseriesFeatureMixin.acceleration(data=results['MOVEMENT_FRAME_CENTER'].values.astype(np.float32), pixels_per_mm=px_per_mm, fps=fps) + results['MOVEMENT_TAILBASE_ACCELERATION_MM_S'] = TimeseriesFeatureMixin.acceleration(data=results['MOVEMENT_FRAME_TAILBASE'].values.astype(np.float32), pixels_per_mm=px_per_mm, fps=fps) + results['MOVEMENT_TAILTIP_ACCELERATION_MM_S'] = TimeseriesFeatureMixin.acceleration(data=results['MOVEMENT_FRAME_TAILTIP'].values.astype(np.float32), pixels_per_mm=px_per_mm, fps=fps) + results['MOVEMENT_TAILCENTER_ACCELERATION_MM_S'] = TimeseriesFeatureMixin.acceleration(data=results['MOVEMENT_FRAME_TAILCENTER'].values.astype(np.float32), pixels_per_mm=px_per_mm, fps=fps) + nose_center_acceleration_spearman = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['MOVEMENT_NOSE_ACCELERATION_MM_S'].values.astype(np.float32), sample_2=results['MOVEMENT_CENTER_ACCELERATION_MM_S'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['MOVEMENT_NOSE_CENTER_ACCELERATION_SPEARMAN_CORRELATION_250', 'MOVEMENT_NOSE_CENTER_ACCELERATION_SPEARMAN_CORRELATION_500', 'MOVEMENT_NOSE_CENTER_ACCELERATION_SPEARMAN_CORRELATION_1000', 'MOVEMENT_NOSE_CENTER_ACCELERATION_SPEARMAN_CORRELATION_2000']) + nose_tailbase_acceleration_spearman = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['MOVEMENT_NOSE_ACCELERATION_MM_S'].values.astype(np.float32), sample_2=results['MOVEMENT_TAILBASE_ACCELERATION_MM_S'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['MOVEMENT_NOSE_TAILBASE_ACCELERATION_SPEARMAN_CORRELATION_250', 'MOVEMENT_NOSE_TAILBASE_ACCELERATION_SPEARMAN_CORRELATION_500', 'MOVEMENT_NOSE_TAILBASE_ACCELERATION_SPEARMAN_CORRELATION_1000', 'MOVEMENT_NOSE_TAILBASE_ACCELERATION_SPEARMAN_CORRELATION_2000']) + center_tailbase_acceleration_spearman = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['MOVEMENT_CENTER_ACCELERATION_MM_S'].values.astype(np.float32), sample_2=results['MOVEMENT_TAILBASE_ACCELERATION_MM_S'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['MOVEMENT_CENTER_TAILBASE_ACCELERATION_SPEARMAN_CORRELATION_250', 'MOVEMENT_CENTER_TAILBASE_ACCELERATION_SPEARMAN_CORRELATION_500', 'MOVEMENT_CENTER_TAILBASE_ACCELERATION_SPEARMAN_CORRELATION_1000', 'MOVEMENT_CENTER_TAILBASE_ACCELERATION_SPEARMAN_CORRELATION_2000']) + tailtip_tailbase_acceleration_spearman = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['MOVEMENT_TAILBASE_ACCELERATION_MM_S'].values.astype(np.float32), sample_2=results['MOVEMENT_TAILTIP_ACCELERATION_MM_S'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['MOVEMENT_TAILBASE_TAILEND_ACCELERATION_SPEARMAN_CORRELATION_250', 'MOVEMENT_TAILBASE_TAILEND_ACCELERATION_SPEARMAN_CORRELATION_500', 'MOVEMENT_TAILBASE_TAILEND_ACCELERATION_SPEARMAN_CORRELATION_1000', 'MOVEMENT_TAILBASE_TAILEND_ACCELERATION_SPEARMAN_CORRELATION_2000']) + tailcenter_tailend_acceleration_spearman = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['MOVEMENT_TAILCENTER_ACCELERATION_MM_S'].values.astype(np.float32), sample_2=results['MOVEMENT_TAILTIP_ACCELERATION_MM_S'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['MOVEMENT_TAILCENTER_TAILEND_ACCELERATION_SPEARMAN_CORRELATION_250', 'MOVEMENT_TAILCENTER_TAILEND_ACCELERATION_SPEARMAN_CORRELATION_500', 'MOVEMENT_TAILCENTER_TAILEND_ACCELERATION_SPEARMAN_CORRELATION_1000', 'MOVEMENT_TAILCENTER_TAILEND_ACCELERATION_SPEARMAN_CORRELATION_2000']) + nose_center_movement_spearman = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['MOVEMENT_FRAME_NOSE'].values.astype(np.float32), sample_2=results['MOVEMENT_FRAME_CENTER'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['MOVEMENT_NOSE_CENTER_MOVEMENT_SPEARMAN_CORRELATION_250', 'MOVEMENT_NOSE_CENTER_MOVEMENT_SPEARMAN_CORRELATION_500', 'MOVEMENT_NOSE_CENTER_MOVEMENT_SPEARMAN_CORRELATION_1000', 'MOVEMENT_NOSE_CENTER_MOVEMENT_SPEARMAN_CORRELATION_2000']) + nose_tailbase_movement_spearman = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['MOVEMENT_FRAME_NOSE'].values.astype(np.float32), sample_2=results['MOVEMENT_FRAME_TAILBASE'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['MOVEMENT_NOSE_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_250', 'MOVEMENT_NOSE_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_500', 'MOVEMENT_NOSE_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_1000', 'MOVEMENT_NOSE_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_2000']) + center_tailbase_movement_spearman = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['MOVEMENT_FRAME_CENTER'].values.astype(np.float32), sample_2=results['MOVEMENT_FRAME_TAILBASE'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['MOVEMENT_CENTER_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_250', 'MOVEMENT_CENTER_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_500', 'MOVEMENT_CENTER_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_1000', 'MOVEMENT_CENTER_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_2000']) + tailbase_tailend_movement_spearman = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['MOVEMENT_FRAME_TAILBASE'].values.astype(np.float32), sample_2=results['MOVEMENT_FRAME_TAILTIP'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['MOVEMENT_TAILTIP_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_250', 'MOVEMENT_TAILTIP_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_500', 'MOVEMENT_TAILTIP_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_1000', 'MOVEMENT_TAILTIP_TAILBASE_MOVEMENT_SPEARMAN_CORRELATION_2000']) + tailcenter_tailend_movement_spearman = pd.DataFrame(Statistics.sliding_spearman_rank_correlation(sample_1=results['MOVEMENT_FRAME_TAILCENTER'].values.astype(np.float32), sample_2=results['MOVEMENT_FRAME_TAILTIP'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=fps), columns=['MOVEMENT_TAILTIP_TAILCENTER_MOVEMENT_SPEARMAN_CORRELATION_250', 'MOVEMENT_TAILTIP_TAILCENTER_MOVEMENT_SPEARMAN_CORRELATION_500', 'MOVEMENT_TAILTIP_TAILCENTER_MOVEMENT_SPEARMAN_CORRELATION_1000', 'MOVEMENT_TAILTIP_TAILCENTER_MOVEMENT_SPEARMAN_CORRELATION_2000']) + results = pd.concat([results, nose_center_acceleration_spearman, nose_tailbase_acceleration_spearman, center_tailbase_acceleration_spearman, tailtip_tailbase_acceleration_spearman, tailcenter_tailend_acceleration_spearman, nose_center_movement_spearman, nose_tailbase_movement_spearman, center_tailbase_movement_spearman, tailbase_tailend_movement_spearman, tailcenter_tailend_movement_spearman], axis=1) + + dominant_f_nose = pd.DataFrame(Statistics.sliding_dominant_frequencies(data=results['MOVEMENT_FRAME_NOSE'].values.astype(np.float32), fps=fps, k=2, time_windows=TIME_WINDOWS), columns=['MOVEMENT_NOSE_MOVEMENT_DOMINANT_FREQUENCY_250', 'MOVEMENT_NOSE_MOVEMENT_DOMINANT_FREQUENCY_500', 'MOVEMENT_NOSE_MOVEMENT_DOMINANT_FREQUENCY_1000', 'MOVEMENT_NOSE_MOVEMENT_DOMINANT_FREQUENCY_2000']) + dominant_f_center = pd.DataFrame(Statistics.sliding_dominant_frequencies(data=results['MOVEMENT_FRAME_CENTER'].values.astype(np.float32), fps=fps, k=2, time_windows=TIME_WINDOWS), columns=['MOVEMENT_CENTER_MOVEMENT_DOMINANT_FREQUENCY_250', 'MOVEMENT_CENTER_MOVEMENT_DOMINANT_FREQUENCY_500', 'MOVEMENT_CENTER_MOVEMENT_DOMINANT_FREQUENCY_1000', 'MOVEMENT_CENTER_MOVEMENT_DOMINANT_FREQUENCY_2000']) + results = pd.concat([results, dominant_f_nose, dominant_f_center], axis=1) + results['MOVEMENT_NOSE_AUTOCORRELATION_500'] = Statistics.sliding_autocorrelation(data=results['MOVEMENT_FRAME_NOSE'].values.astype(np.float32), max_lag=0.5, time_window=1.0, fps=fps) + + for time, bp in product(TIME_WINDOWS, [NOSE, CENTER, 'TAILTIP', 'TAILCENTER', 'SUMMED']): + results[f'MOVEMENT_MEAN_{time}_{bp.upper()}'] = results[f'MOVEMENT_FRAME_{bp.upper()}'].rolling(int(time * fps), min_periods=1).mean() + results[f'MOVEMENT_VAR_{time}_{bp.upper()}'] = results[f'MOVEMENT_FRAME_{bp.upper()}'].rolling(int(time * fps), min_periods=1).var() + results[f'MOVEMENT_SUM_{time}_{bp.upper()}'] = results[f'MOVEMENT_FRAME_{bp.upper()}'].rolling(int(time * fps), min_periods=1).sum() + + # POSE CONFIDENCE FEATURES + print('Compute probability features...') + p_df = pd.DataFrame(FeatureExtractionMixin.count_values_in_range(data=p_arr, ranges=np.array([[0.0, 0.25], [0.25, 0.50], [0.50, 0.75], [0.75, 1.0]])), columns=['PROBABILITIES_LOW_COUNT', 'PROBABILITIES_MEDIUM_LOW_COUNT', 'PROBABILITIES_MEDIUM_HIGHT', 'PROBABILITIES_HIGH_COUNT']).astype(np.int32) + sliding_z_p_low = pd.DataFrame(Statistics.sliding_z_scores(data=p_df['PROBABILITIES_LOW_COUNT'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=int(fps)), columns=[f'PROBABILITIES_LOW_COUNT_SLIDING_Z_SCORE_250', f'PROBABILITIES_LOW_COUNT_SLIDING_Z_SCORE_500', f'PROBABILITIES_LOW_COUNT_SLIDING_Z_SCORE_1000', f'PROBABILITIES_LOW_COUNT_SLIDING_Z_SCORE_2000']) + sliding_z_p_high = pd.DataFrame(Statistics.sliding_z_scores(data=p_df['PROBABILITIES_HIGH_COUNT'].values.astype(np.float32), time_windows=TIME_WINDOWS, fps=int(fps)), columns=[f'PROBABILITIES_HIGH_COUNT_SLIDING_Z_SCORE_250', f'PROBABILITIES_HIGH_COUNT_SLIDING_Z_SCORE_500', f'PROBABILITIES_HIGH_COUNT_SLIDING_Z_SCORE_1000', f'PROBABILITIES_HIGH_COUNT_SLIDING_Z_SCORE_2000']) + results = pd.concat([df, results, p_df, sliding_z_p_low, sliding_z_p_high], axis=1).fillna(-1) + self.save(df=results, save_path=save_path) + video_timer.stop_timer() + print(f'Video {video_name} complete (elapsed time: {video_timer.elapsed_time_str}s)...') + + self.timer.stop_timer() + stdout_success(msg=f'Features extracted for {len(self.outlier_corrected_paths)} files(s)', elapsed_time=self.timer.elapsed_time_str) + + def save(self, df: pd.DataFrame, save_path: os.PathLike): + write_df(df=df.astype(np.float32), file_type=self.file_type, save_path=save_path) + +# if __name__ == "__main__": +# parser = argparse.ArgumentParser(description='SimBA Custom Feature Extractor') +# parser.add_argument('--config_path', type=str, help='SimBA project config path') +# args = parser.parse_args() +# feature_extractor = MitraFeatureExtractor(config_path=args.config_path) +# feature_extractor.run() +# + + + +# feature_extractor = MitraFeatureExtractor(config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini") +# feature_extractor.run() + + + + + + + + diff --git a/simba/sandbox/MitraFeatureExtractor.zip b/simba/sandbox/MitraFeatureExtractor.zip new file mode 100644 index 0000000000000000000000000000000000000000..d9b589d767d5991d8335730eca0aa736fcbe50bf GIT binary patch literal 4450 zcmZ{o_ahXJ8^`UDy~R0uMMgL)dtYRa>>0}Htjv&(5a&4S>>1&5E`=hi?0tnZvsXAr z+21~Y!1sCo@P5WW@P1xq4@k*aiHL}(h>TyI+dW!w`8v)>M1&0>BI5kFHSrCBxaoVj zg@i)9^q&9gJwgH@Qb7?5P7Z;qEzGfFD8@NTiJCgNNmI>NlGww!!A4>ih@W>Vh>A+D zoH;a96<%ntO1`b?olfD} zx|%Bu;2fTM@xSxUh;B%ZdvC?$GA?XCxdgacf?tcYFiA`fr~gXqO@GFD(ZS+HrnQB1 z5FAb1S&1o4Vp2Ei7epF{?2F=3t*)d}bQY2`SGLR(gPAHYm(06Ipx!x_FX)&I!Jb34 zp$-H7A0~juB;PPWdP7S0Z3H-09A(PG@15Fz35MhfinTZaR>r?EfxYy-jF5$Vh*=@= zloL+BC){kryPpexhN;T8qyx(nR6lA)zFS6N9h(w(^`8*cwQ=50xyGy-vtFh(y#}_V z_n-qkaKvoD_eTmSgfl-*J7<4cGlLgKiV#%Qo}v5YEi63FlVrhXN_ou3lTG!E=vxo4 zcHnfp!rz@&`8wu{{x4s`U9x$v50lJO7L`!|!68993%IsKNM@k$q^?lIfYYyLv&`}t z!bjPZ7HE8hVWt?rRWJl?*QnqtM#mnTNGU3}4|(}xiu!JW8;xCQTi3vo^X=eg9$rft zp5bCiAp3P%I|jLPj1qs%x`%($gwIM|9wiu5u}vET5C;2&;IzV3CK7Wv)(Gicc)x1f zvo|yJ6r!TYZAYC>6OOF)z^9+q$?Jg7^XBN_M(5AAmBDP+#}CzPD``$HPYG*@VOG8Y zr0#=7y$oJ0Q{ti)LoOaS>Q|a)#(qXruCCvy2AIMm(?4OBSYR=6j5W?(8TnUiFOv)N zu!eVOJkOGb4_K9`ul>gGNywnoctA?mRa}1*fL=OOw7Vukjpu!C^DiI69UEesbh?<~ z!yZO%vLiyZz={k(XHEZ)4$W8L{K)zTiHdN^7rR^FN+@!D_Nk3fa!9XGu98NWZzz581z0>_F#L$A!?G9#_j$CMkh zgjc@A2~&1_o4=rEC=C6*0%7~j+!|qJ*X#xs13pO^EH#F3j|@5BRo=@dbe*JW*GqR> z`x&R@c=l}=9M4E8#!S*E;J8jDj=39baPIM>h@V*Xtm=9j_u2#-y+=&+9&4y!^g}a7 zT=6v2jGZU3h4L0HeAMU0E9%c{s_iNm;ZAMcofk$aay3_k-zR&0UC!-U&Zd+K56DcZ zpj>Nh%UN-D1>m=Y`>k(d?JnzQ*1K8^t@%hUQIiky)7(35(=8Qghb=G80x7nV%q5XF z`M8%v^iJ{5$1wo`^<6a7`=~8nqDU`}v6nc96Z7yEPUf6sJ2R+^uW|?Q24XJj#h(T; z{Nrbvv;~OsNc;O(2Hdzr7Z6BcYX*3on$G_?OU%Vak(U}$ln)1{sBVi{p(_6 z-hW#{@w~a7r_c&kUEITx{=vqA-d@1dncFc}{`)(Jl6Pl)Iaz&!?}cHxuR!5pZ95l= z_w!vdPXt(&k~p)bKe$!tsawK2{Ko;rHtmJ@YH53iRpYi#TW5rv?#jf(Kl zaX4B7T!LA5#MQ^WK(KX6IW%LV#upRV14>%`x?(4?njS}d94Tb?tBthpq)7~&ZHP-%ycrJ|EfnU z@aZj-PpnR#xVt1=x7*@3lbHp%C~K2U{16%|ky={cUf=B!?2plu_MLK>ppos92)&)8 z^eo_^+9zwBrDuKz&LY&LZis8WANzcI5Pt^QA%Cgo?99AFVF1(IF;!P==`M%Q(u#G= zbPt6|D-APUTO(@IU|eCYZQrIrtBCkagJv{T-hff&tD)F=N~aM*;Y?uDMh@L+=K0q( zCjWi~=dISVXW-ItSbWE9@%D>U=(-GXG1nv__;5x%qIk{ENtz37&DL)>R-Lu|i{+fO z;rkE*Dz#WL)%u!f`y$wFGJNwFh9mK`g&segF6Buq1@d2Dk%`nw>uz}Y$KwK}bN;uf zhukQMsredEs`69BGWivseJ?c|J+X_GlCGnL^}8;)s!a>%LD!bpQspafEm901y?6(H z?U@yXzu~!?QpBj!_CT-W_NO*2I~~Pe`6tv=XE=GtbEMKeZPny`gJHi9GYo?)o)!Jq zG`1@|+X2h2y2URyGWFf;l_X<-AoVVvuw+2n!ZX%I5mEv3g~!Yk_&;|08>Wh<{yzrQJakG%z0E< zqGn7YrYi!*CEC>zq?Nm(A&}_%Kjl(Y10gDMXBvs^wFQ{;Fb$3n66XMB``|#2eYH!l zcR&5<@6{xac17e(=Rj8#BJpKwUTsy+fFrmusWm&A_xK&hF{G)ww|)mOHibo?6qVas zVneBtEZ^i8a9u4J+J%M-x{csQLQ|F7V@eNzV+g1#3PVX##qDLH{4Hbhc^j9@n7%}r zx5Dfh z_4+Y}kP320uR$)=!s!_9SmOjGnhqH*a8&W5ArH<3!=iiAOITh#Wl(3^9zFFLN_YtO~zya1UkYkkkToF0pfl+%>pPES3E3e~PpaTb&w5^0csQgFc83tLku^N!m zHr5{TiL?KB;DtJc(l)M3#_ZiYZqj{8M_Nqtq3n3sZA#L$GI4O)EniL{=QXzCkk|{KH+c!HQ(|L(HGf~aP`cA6*uROXfTQhNFw6}qcu#tmtmaFuU*G(9ApP%+FMA3_ z3yV!m3y1uZ!=BPx>Rf}PLdb%)DgEgfP_C{ZS|N;=E@0l->It5^rSPDA1-Xswm~+}I ze&YW}>fp;>JN827c?R_08a$znEZJ{bGFavQQ;y(TY8$z+8yQucJ*ZefG=!PZ9n9>! zsINw$HiP{w=AL0?-d|0t3CU;4xM7OKsFnBSNls)n14P{C7u_V>N#waQKIFH6@#ANJ zhFUCFnEDnpo(E3>$~Vf}Q>W~RaQNb{;U-!a@!9ZL6=S*=Dt09o2tbqHj;6;#Ns5yK z$d@l*Z%P+7$NiNgi)VlxojK$6Dnm-Qo$%i9sMzS)+bZV`tW<<`H7T}i#HMw=hS^#@ z*Is|os8{&fC@=Y5{}}SpVwlzFLW5Jm|LEBD(~9k#^g`gJ;~rr-P}7uwdVc*k zY@c21TAvflJ^r1NjrAngtk3)fb*RuuZdVU8F;l)#EU(P|&IP@af{;#$ zcit?1ie8}Z`J9$6kC`f!uQn@7*?VswuO%h5g@HcXR3~{+SC@4l|9#IB_U`XCfwFH} zEpMUx%&(SV%tdA&i43D3wo5#o`7AHZN|K`F?I5)5l5Nk{FAMna#D559vYs6)!%~=7v&YaFDxHx$7ik!8&D+Y1-{~& zYz2Ol&_?STOb-_pR#2)+j+$K)cZ7>8XtNgtlF8>c6*Ho1K!m(V7e8y65N<&oAF zI1hZh?Z|JV%TC7DWX-D=q>Z97oytDVZm+waR2pn&Lna_+yTWBX6~HqU?w_%@-^6Hc zTYA9`;J-d#kAgZ~L9}ivW%eJgk)P8{>^nvX%PS9}M25~nWXnUJ-n9N;UtKzjaO5Uc zjCktNe;V~V_%M=YHMO^h@(5~p8J(q3H=+GikVKNs$Wj_bbx6o?M*u6Q7y1cm=`(Y3 z$-5(HF5G~JdiwE+|6cRzCU;cXI}Lt=PE<^fyP-x+iN>0QLiu0A38!&b18c0geoi2AF zRe{?OkeUMEiLq;dk@)#h$xh?AL|mM_vy@Z~3*H_!zG0?0RTS&38a99W<~z~E!B(!e zS5J;W&k{VQ9>V}*AcR|2nnFtw+EV-6*tMQ)`F5e0o?z4yC{!h?ptUY!$*2@5sH~=+{;;dB9-^b4 zQ8zhUc6WN1NlofIMm;-{%>%!4r25DKO`4TlVVqjRr3mPsTpnjZ7SiYf5@7NGum<1S zj?7^xFWi1x%R^{ljGo8RVC5>uc%gBNNBIVpbwEe?6nCWr|K<>sh};W+eE7AYwApcMZ f5it?pfA+8b9~CisKu+=Bo%o;h|2YNIf7AZ}b2NNg literal 0 HcmV?d00001 diff --git a/simba/sandbox/ROI_analyzer.py b/simba/sandbox/ROI_analyzer.py new file mode 100644 index 000000000..1c26f5cb7 --- /dev/null +++ b/simba/sandbox/ROI_analyzer.py @@ -0,0 +1,222 @@ +__author__ = "Simon Nilsson" + +import os +from typing import Optional, Union, List +import numpy as np +import pandas as pd + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.feature_extraction_supplement_mixin import FeatureExtractionSupplemental +from simba.utils.checks import check_file_exist_and_readable, check_float, check_valid_lst, check_all_file_names_are_represented_in_video_log, check_that_column_exist +from simba.utils.enums import Keys +from simba.utils.errors import (MissingColumnsError, CountError, ROICoordinatesNotFoundError) +from simba.utils.printing import stdout_success +from simba.utils.read_write import get_fn_ext, read_df, read_data_paths +from simba.utils.warnings import NoDataFoundWarning +from simba.utils.data import slice_roi_dict_for_video, detect_bouts + + +class ROIAnalyzer(ConfigReader, FeatureExtractionMixin): + """ + Analyze movements, entries, exits, and time-spent-in user-defined ROIs. Results are stored in the + 'project_folder/logs' directory of the SimBA project. + + :param str config_path: Path to SimBA project config file in Configparser format. + :param Optional[str] data_path: Path to folder or file holding the data used to calculate ROI aggregate statistics. If None, then defaults to the `project_folder/csv/outlier_corrected_movement_location` directory of the SimBA project. Default: None. + :param Optional[bool] calculate_distances: If True, then calculate movements aggregate statistics (distances and velocities) inside ROIs. Results are saved in ``project_folder/logs/`` directory. Default: False. + :param Optional[bool] detailed_bout_data: If True, saves a file with a row for every entry into each ROI for each animal in each video. Results are saved in ``project_folder/logs/`` directory. Default: False. + :param Optional[float] threshold: Float between 0 and 1. Body-part locations detected below this confidence threshold are filtered. Default: 0.0. + :param Optional[float] threshold: List of body-parts to perform ROI analysis on. + + .. note:: + `ROI tutorials `__. + + :example: + >>> test = ROIAnalyzer(config_path = r"/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini", calculate_distances=True, detailed_bout_data=True, body_parts=['Nose_1', 'Nose_2'], threshold=0.0) + >>> test.run() + >>> test.save() + """ + + def __init__(self, + config_path: Union[str, os.PathLike], + data_path: Optional[Union[str, os.PathLike, List[str]]] = None, + detailed_bout_data: Optional[bool] = False, + calculate_distances: Optional[bool] = False, + threshold: Optional[float] = 0.0, + body_parts: Optional[List[str]] = None): + + check_file_exist_and_readable(file_path=config_path) + ConfigReader.__init__(self, config_path=config_path) + if not os.path.isfile(self.roi_coordinates_path): + raise ROICoordinatesNotFoundError(expected_file_path=self.roi_coordinates_path) + self.read_roi_data() + FeatureExtractionMixin.__init__(self) + if detailed_bout_data and (not os.path.exists(self.detailed_roi_data_dir)): + os.makedirs(self.detailed_roi_data_dir) + self.data_paths = read_data_paths(path=data_path, default=self.outlier_corrected_paths, default_name=self.outlier_corrected_dir, file_type=self.file_type) + + check_float(name="Body-part probability threshold", value=threshold, min_value=0.0, max_value=1.0) + check_valid_lst(data=body_parts, source=f'{self.__class__.__name__} body-parts', valid_dtypes=(str,)) + if len(set(body_parts)) != len(body_parts): + raise CountError(msg=f'All body-part entries have to be unique. Got {body_parts}', source=self.__class__.__name__) + self.bp_dict, self.bp_lk = {}, {} + for bp in body_parts: + animal = self.find_animal_name_from_body_part_name(bp_name=bp, bp_dict=self.animal_bp_dict) + self.bp_dict[animal] = [f'{bp}_{"x"}', f'{bp}_{"y"}', f'{bp}_{"p"}'] + self.bp_lk[animal] = bp + self.roi_headers = [v for k, v in self.bp_dict.items()] + self.roi_headers = [item for sublist in self.roi_headers for item in sublist] + self.calculate_distances, self.threshold = calculate_distances, threshold + self.detailed_bout_data = detailed_bout_data + + def run(self): + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.data_paths) + self.movements_df = pd.DataFrame(columns=["VIDEO", "ANIMAL", "SHAPE", "MEASUREMENT", "VALUE"]) + self.entry_results = pd.DataFrame(columns=["VIDEO", "ANIMAL", "SHAPE", "ENTRY COUNT"]) + self.time_results = pd.DataFrame(columns=["VIDEO", "ANIMAL", "SHAPE", "TIME (S)"]) + self.roi_bout_results = [] + for file_cnt, file_path in enumerate(self.data_paths): + _, video_name, _ = get_fn_ext(file_path) + print(f"Analysing ROI data for video {video_name}...") + video_settings, pix_per_mm, self.fps = self.read_video_info(video_name=video_name) + self.roi_dict, video_shape_names = slice_roi_dict_for_video(data=self.roi_dict, video_name=video_name) + if video_shape_names == 0: + NoDataFoundWarning(msg=f"Skipping video {video_name}: No user-defined ROI data found for this video...") + continue + else: + self.data_df = read_df(file_path, self.file_type).reset_index(drop=True) + if len(self.bp_headers) != len(self.data_df.columns): + raise MissingColumnsError(msg=f"The data file {file_path} contains {len(self.data_df.columns)} body-part columns, but the project is made for {len(self.bp_headers)} body-part columns as suggested by the {self.body_parts_path} file", source=self.__class__.__name__) + self.data_df.columns = self.bp_headers + check_that_column_exist(df=self.data_df, column_name=self.roi_headers, file_name=file_path) + for animal_name, bp_names in self.bp_dict.items(): + animal_df = self.data_df[self.bp_dict[animal_name]].reset_index(drop=True) + animal_bout_results = {} + for _, row in self.roi_dict[Keys.ROI_RECTANGLES.value].iterrows(): + roi_coords = np.array([[row["topLeftX"], row["topLeftY"]], [row["Bottom_right_X"], row["Bottom_right_Y"]]]) + animal_df[row['Name']] = FeatureExtractionMixin.framewise_inside_rectangle_roi(bp_location=animal_df.values[:, 0:2], roi_coords=roi_coords) + animal_df.loc[animal_df[bp_names[2]] < self.threshold, row["Name"]] = 0 + roi_bouts = detect_bouts(data_df=animal_df, target_lst=[row['Name']], fps=self.fps) + roi_bouts['ANIMAL'] = animal_name; roi_bouts['VIDEO'] = video_name + self.roi_bout_results.append(roi_bouts) + animal_bout_results[row['Name']] = roi_bouts + self.entry_results.loc[len(self.entry_results)] = [video_name, animal_name, row['Name'], len(roi_bouts)] + self.time_results.loc[len(self.time_results)] = [video_name, animal_name, row['Name'], roi_bouts['Bout_time'].sum()] + for _, row in self.roi_dict[Keys.ROI_CIRCLES.value].iterrows(): + center_x, center_y = row["centerX"], row["centerY"] + animal_df[f'{row["Name"]}_distance'] = FeatureExtractionMixin.framewise_euclidean_distance_roi(location_1=animal_df.values[:, 0:2], location_2=np.array([center_x, center_y])) + animal_df[row["Name"]] = 0 + animal_df.loc[animal_df[row["Name"]] <= row["radius"], row["Name"]] = 1 + animal_df.loc[animal_df[bp_names[2]] < self.threshold, row["Name"]] = 0 + roi_bouts = detect_bouts(data_df=animal_df, target_lst=[row['Name']], fps=self.fps) + roi_bouts['ANIMAL'] = animal_name; roi_bouts['VIDEO'] = video_name + self.roi_bout_results.append(roi_bouts) + animal_bout_results[row['Name']] = roi_bouts + self.entry_results.loc[len(self.entry_results)] = [video_name, animal_name, row['Name'], len(roi_bouts)] + self.time_results.loc[len(self.time_results)] = [video_name, animal_name, row['Name'], roi_bouts['Bout_time'].sum()] + for _, row in self.roi_dict[Keys.ROI_POLYGONS.value].iterrows(): + roi_coords = np.array(list(zip(row["vertices"][:, 0], row["vertices"][:, 1]))) + animal_df[row['Name']] = FeatureExtractionMixin.framewise_inside_polygon_roi(bp_location=animal_df.values[:, 0:2], roi_coords=roi_coords) + animal_df.loc[animal_df[bp_names[2]] < self.threshold, row["Name"]] = 0 + roi_bouts = detect_bouts(data_df=animal_df, target_lst=[row['Name']], fps=self.fps) + roi_bouts['ANIMAL'] = animal_name; roi_bouts['VIDEO'] = video_name + self.roi_bout_results.append(roi_bouts) + animal_bout_results[row['Name']] = roi_bouts + self.entry_results.loc[len(self.entry_results)] = [video_name, animal_name, row['Name'], len(roi_bouts)] + self.time_results.loc[len(self.time_results)] = [video_name, animal_name, row['Name'], roi_bouts['Bout_time'].sum()] + if self.calculate_distances: + for roi_name, roi_data in animal_bout_results.items(): + if len(roi_data) == 0: + self.movements_df.loc[len(self.movements_df)] = [video_name, animal_name, roi_name, "Movement (cm)", 0] + self.movements_df.loc[len(self.movements_df)] = [video_name, animal_name, roi_name, "Average velocity (cm/s)", "None"] + else: + distances, velocities = [], [] + roi_frames = roi_data[['Start_frame', 'End_frame']].values + for event in roi_frames: + event_pose = animal_df.loc[np.arange(event[0], event[1]+1), bp_names] + event_pose = event_pose[event_pose[bp_names[2]] > self.threshold][bp_names[:2]].values + if event_pose.shape[0] > 1: + distance, velocity = FeatureExtractionSupplemental.distance_and_velocity(x=event_pose, fps=self.fps, pixels_per_mm=pix_per_mm, centimeters=True) + distances.append(distance); velocities.append(velocity) + self.movements_df.loc[len(self.movements_df)] = [video_name, animal_name, roi_name, "Movement (cm)", sum(distances)] + self.movements_df.loc[len(self.movements_df)] = [video_name, animal_name, roi_name, "Average velocity (cm/s)", np.average(velocities)] + + self.detailed_df = pd.concat(self.roi_bout_results, axis=0) + self.detailed_df = self.detailed_df.rename(columns={"Event": "SHAPE NAME", "Start_time": "START TIME", 'End Time': 'END TIME', 'Start_frame': 'START FRAME', 'End_frame': 'END FRAME', 'Bout_time': 'DURATION (S)'}) + self.detailed_df["BODY-PART"] = self.detailed_df["ANIMAL"].map(self.bp_lk) + self.detailed_df = self.detailed_df[['VIDEO', 'ANIMAL', 'BODY-PART', 'SHAPE NAME', 'START TIME', 'END TIME', 'START FRAME', 'END FRAME', 'DURATION (S)']] + + def save(self): + self.entry_results["BODY-PART"] = self.entry_results["ANIMAL"].map(self.bp_lk) + self.time_results["BODY-PART"] = self.time_results["ANIMAL"].map(self.bp_lk) + self.entry_results = self.entry_results[['VIDEO', 'ANIMAL', 'BODY-PART', 'SHAPE', 'ENTRY COUNT']] + self.time_results = self.time_results[['VIDEO', 'ANIMAL', 'BODY-PART', 'SHAPE', 'TIME (S)']] + self.entry_results.to_csv(os.path.join(self.logs_path, f'{"ROI_entry_data"}_{self.datetime}.csv')) + self.time_results.to_csv(os.path.join(self.logs_path, f'{"ROI_time_data"}_{self.datetime}.csv')) + if self.detailed_bout_data: + detailed_path = os.path.join(self.logs_path, f'{"Detailed_ROI_data"}_{self.datetime}.csv') + self.detailed_df.to_csv(detailed_path) + print(f'Detailed ROI data saved at {detailed_path}...') + if self.calculate_distances: + movement_path = os.path.join(self.logs_path, f'{"ROI_movement_data"}_{self.datetime}.csv') + self.movements_df["BODY-PART"] = self.movements_df["ANIMAL"].map(self.bp_lk) + self.movements_df = self.movements_df[['VIDEO', 'ANIMAL', 'BODY-PART', 'SHAPE', 'MEASUREMENT', 'VALUE']] + self.movements_df.to_csv(movement_path) + print(f'ROI aggregate movement data saved at {movement_path}...') + stdout_success(msg=f'ROI time and ROI entry saved in the {self.logs_path} directory in CSV format.') + +# test = ROIAnalyzer(config_path = r"/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini", +# data_path=None, +# calculate_distances=True, +# detailed_bout_data=True, +# body_parts=['Nose_1', 'Nose_2'], +# threshold=0.0) +# test.run() +# test.save() + + +# +# test = ROIAnalyzer(ini_path = r"/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini", +# data_path = "outlier_corrected_movement_location", +# calculate_distances=True, +# settings={'threshold': 0.00, 'body_parts': {'Animal_1': 'Nose_1'}}) +# test.run() +# test.save() + +# test = ROIAnalyzer(ini_path = r"/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini", +# data_path = "outlier_corrected_movement_location", +# calculate_distances=True) +# test.run() + +# test = ROIAnalyzer(ini_path = r"/Users/simon/Desktop/envs/simba_dev/tests/data/test_projects/zebrafish/project_folder/project_config.ini", +# data_path = "outlier_corrected_movement_location", +# calculate_distances=True) + + +# settings = {'body_parts': {'animal_1_bp': 'Ear_left_1', 'animal_2_bp': 'Ear_left_2', 'animal_3_bp': 'Ear_right_1',}, 'threshold': 0.4} +# test = ROIAnalyzer(ini_path = r"/Users/simon/Desktop/envs/troubleshooting/two_animals_16bp_032023/project_folder/project_config.ini", +# data_path = "outlier_corrected_movement_location", +# settings=settings, +# calculate_distances=True) +# test.run() +# test.save() + + +# settings = {'body_parts': {'Simon': 'Ear_left_1', 'JJ': 'Ear_left_2'}, 'threshold': 0.4} +# test = ROIAnalyzer(ini_path = r"/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini", +# data_path = "outlier_corrected_movement_location", +# settings=settings, +# calculate_distances=True) +# test.read_roi_dfs() +# test.analyze_ROIs() +# test.save_data() + + +# settings = {'body_parts': {'animal_1_bp': 'Ear_left_1', 'animal_2_bp': 'Ear_left_2'}, 'threshold': 0.4} +# test = ROIAnalyzer(ini_path = r"/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini", +# data_path = "outlier_corrected_movement_location", +# calculate_distances=True) +# test.run() +# test.analyze_ROIs() +# test.save_data() diff --git a/simba/sandbox/ROI_plotter.py b/simba/sandbox/ROI_plotter.py new file mode 100644 index 000000000..ac7fb34b3 --- /dev/null +++ b/simba/sandbox/ROI_plotter.py @@ -0,0 +1,255 @@ +__author__ = "Simon Nilsson" + +import itertools +import os +from copy import deepcopy +from typing import Tuple, Optional, Union, Dict + +import cv2 +import numpy as np + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.plotting_mixin import PlottingMixin +from simba.roi_tools.ROI_analyzer import ROIAnalyzer +from simba.utils.data import create_color_palettes, slice_roi_dict_for_video, detect_bouts +from simba.utils.checks import check_float, check_if_keys_exist_in_dict, check_file_exist_and_readable, check_video_and_data_frm_count_align +from simba.utils.enums import Formats, Paths, TagNames, TextOptions +from simba.utils.errors import DuplicationError, NoFilesFoundError +from simba.utils.printing import SimbaTimer, log_event, stdout_success +from simba.utils.read_write import (get_fn_ext, get_video_meta_data) +from simba.utils.warnings import DuplicateNamesWarning + +SHOW_BODY_PARTS = 'show_body_part' +SHOW_ANIMAL_NAMES = 'show_animal_name' +STYLE_KEYS = [SHOW_BODY_PARTS, SHOW_ANIMAL_NAMES] + + +class ROIPlot(ConfigReader, PlottingMixin): + """ + Visualize the ROI data (number of entries/exits, time-spent in ROIs etc). + + .. note:: + `ROI tutorials `__. + + Use :meth:`simba.plotting.ROI_plotter_mp.ROIPlotMultiprocess` for improved run-time. + + :param str config_path: Path to SimBA project config file in Configparser format + :param str video_path: Name of video to create ROI visualizations for + :param dict style_attr: User-defined visualization settings. + + :example: + >>> settings = {'show_body_parts': True, 'show_animal_name': True} + >>> roi_visualizer = ROIPlot(ini_path=r'MyProjectConfig', video_path="MyVideo.mp4", settings=settings) + >>> roi_visualizer.insert_data() + >>> roi_visualizer.run() + """ + + def __init__(self, + config_path: Union[str, os.PathLike], + video_path: Union[str, os.PathLike], + style_attr: Dict[str, bool], + body_parts: Optional[Dict[str, str]] = None, + threshold: Optional[float] = 0.0): + + check_float(name=f'{self.__class__.__name__} threshold', value=threshold, min_value=0.0, max_value=1.0) + check_if_keys_exist_in_dict(data=style_attr, key=STYLE_KEYS, name=f'{self.__class__.__name__} style_attr') + check_file_exist_and_readable(file_path=video_path) + ConfigReader.__init__(self, config_path=config_path) + PlottingMixin.__init__(self) + log_event(logger_name=str(__class__.__name__), log_type=TagNames.CLASS_INIT.value, msg=self.create_log_msg_from_init_args(locals=locals())) + settings = None + if body_parts: settings = {"body_parts": body_parts, 'threshold': threshold} + self.roi_analyzer = ROIAnalyzer(ini_path=config_path, data_path="outlier_corrected_movement_location", settings=settings) + if not body_parts: self.animal_id_lst = self.roi_analyzer.multi_animal_id_list + else: self.animal_id_lst = list(body_parts.keys()) + _, self.video_name, _ = get_fn_ext(video_path) + self.roi_analyzer.files_found = [os.path.join(self.roi_analyzer.input_folder, f"{self.video_name}.{self.roi_analyzer.file_type}")] + if not os.path.isfile(self.roi_analyzer.files_found[0]): + raise NoFilesFoundError( msg=f"SIMBA ERROR: Could not find the file at path {self.roi_analyzer.files_found[0]}. Please make sure you have corrected body-part outliers or indicated that you want to skip outlier correction", source=self.__class__.__name__, ) + self.roi_analyzer.run() + self.roi_entries_df = self.roi_analyzer.detailed_df + self.data_df, self.style_attr = self.roi_analyzer.data_df, style_attr + self.save_dir = os.path.join(self.project_path, Paths.ROI_ANALYSIS.value) + if not os.path.exists(self.save_dir): os.makedirs(self.save_dir) + self.video_save_path = os.path.join(self.save_dir, f"{self.video_name}.mp4") + self.read_roi_data() + self.shape_columns = [] + _, self.shape_names = slice_roi_dict_for_video(data=self.roi_dict, video_name=self.video_name) + for animal in self.animal_id_lst: + for shape_name in self.shape_names: + self.data_df[f"{animal}_{shape_name}"] = 0; self.shape_columns.append(f"{animal}_{shape_name}") + self.bp_dict = self.roi_analyzer.bp_dict + self.__insert_data() + self.video_path = video_path + self.cap = cv2.VideoCapture(self.video_path) + self.video_meta_data = get_video_meta_data(self.video_path) + self.threshold = threshold + + def __insert_data(self): + roi_entries_dict = self.roi_entries_df[["ANIMAL", "SHAPE", "ENTRY FRAMES", "EXIT FRAMES"]].to_dict(orient="records") + for entry_dict in roi_entries_dict: + entry, exit = int(entry_dict["ENTRY FRAMES"]), int(entry_dict["EXIT FRAMES"]) + entry_dict["frame_range"] = list(range(entry, exit + 1)) + col_name = entry_dict["ANIMAL"] + "_" + entry_dict["SHAPE"] + self.data_df[col_name][self.data_df.index.isin(entry_dict["frame_range"])] = 1 + + def __calc_text_locs(self) -> dict: + loc_dict = {} + line_spacer = TextOptions.FIRST_LINE_SPACING.value + for animal_cnt, animal_name in enumerate(self.animal_id_lst): + loc_dict[animal_name] = {} + for shape in self.shape_names: + loc_dict[animal_name][shape] = {} + loc_dict[animal_name][shape]["timer_text"] = f"{shape} {animal_name} timer:" + loc_dict[animal_name][shape]["entries_text"] = f"{shape} {animal_name} entries:" + loc_dict[animal_name][shape]["timer_text_loc"] = ((self.video_meta_data["width"] + TextOptions.BORDER_BUFFER_X.value), (self.video_meta_data["height"] - (self.video_meta_data["height"] + TextOptions.BORDER_BUFFER_Y.value) + self.scalers["space_size"] * line_spacer)) + loc_dict[animal_name][shape]["timer_data_loc"] = (int(self.border_img_w - (self.border_img_w / 8)), (self.video_meta_data["height"] - (self.video_meta_data["height"] + TextOptions.BORDER_BUFFER_Y.value) + self.scalers["space_size"] * line_spacer)) + line_spacer += TextOptions.LINE_SPACING.value + loc_dict[animal_name][shape]["entries_text_loc"] = ((self.video_meta_data["width"] + TextOptions.BORDER_BUFFER_X.value), (self.video_meta_data["height"] - (self.video_meta_data["height"] + TextOptions.BORDER_BUFFER_Y.value) + self.scalers["space_size"] * line_spacer)) + loc_dict[animal_name][shape]["entries_data_loc"] = (int(self.border_img_w - (self.border_img_w / 8)), (self.video_meta_data["height"]- (self.video_meta_data["height"] + TextOptions.BORDER_BUFFER_Y.value) + self.scalers["space_size"] * line_spacer)) + line_spacer += TextOptions.LINE_SPACING.value + return loc_dict + + def __insert_texts(self, shape_df): + for animal_name in self.animal_id_lst: + for _, shape in shape_df.iterrows(): + shape_name, shape_color = shape["Name"], shape["Color BGR"] + cv2.putText(self.border_img, self.loc_dict[animal_name][shape_name]["timer_text"], self.loc_dict[animal_name][shape_name]["timer_text_loc"], TextOptions.FONT.value, self.scalers["font_size"], shape_color, TextOptions.TEXT_THICKNESS.value) + cv2.putText(self.border_img, self.loc_dict[animal_name][shape_name]["entries_text"], self.loc_dict[animal_name][shape_name]["entries_text_loc"], TextOptions.FONT.value, self.scalers["font_size"], shape_color, TextOptions.TEXT_THICKNESS.value) + + def __create_counters(self) -> dict: + cnt_dict = {} + for animal_cnt, animal_name in enumerate(self.animal_id_lst): + cnt_dict[animal_name] = {} + for shape in self.shape_names: + cnt_dict[animal_name][shape] = {} + cnt_dict[animal_name][shape]["timer"] = 0 + cnt_dict[animal_name][shape]["entries"] = 0 + cnt_dict[animal_name][shape]["entry_status"] = False + return cnt_dict + + def __calculate_cumulative(self): + for animal_name in self.animal_id_lst: + for shape in self.shape_names: + self.data_df[f"{animal_name}_{shape}_cum_sum_time"] = (self.data_df[f"{animal_name}_{shape}"].cumsum() / self.video_meta_data['fps']) + roi_bouts = list(detect_bouts(data_df=self.data_df, target_lst=[f"{animal_name}_{shape}"], fps=self.video_meta_data['fps'])["Start_frame"]) + self.data_df[f"{animal_name}_{shape}_entry"] = 0 + self.data_df.loc[roi_bouts, f"{animal_name}_{shape}_entry"] = 1 + self.data_df[f"{animal_name}_{shape}_cum_sum_entries"] = (self.data_df[f"{animal_name}_{shape}_entry"].cumsum()) + + def __create_shape_dicts(self): + shape_dicts = {} + for df in [self.roi_analyzer.video_recs, self.roi_analyzer.video_circs, self.roi_analyzer.video_polys]: + if not df["Name"].is_unique: + df = df.drop_duplicates(subset=["Name"], keep="first") + DuplicateNamesWarning('Some of your ROIs with the same shape has the same names. E.g., you have two rectangles named "My rectangle". SimBA prefers ROI shapes with unique names. SimBA will keep one of the unique shape names and drop the rest.', source=self.__class__.__name__) + d = df.set_index("Name").to_dict(orient="index") + shape_dicts = {**shape_dicts, **d} + return shape_dicts + + def __get_bordered_img_size(self) -> Tuple[int, int]: + cap = cv2.VideoCapture(self.video_path) + cap.set(1, 1) + _, img = self.cap.read() + self.base_img = cv2.copyMakeBorder(img, 0, 0, 0, int(self.video_meta_data["width"]), borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]) + self.base_img_h, self.base_img_w = self.base_img.shape[0], self.base_img.shape[1] + cap.release() + return self.base_img_h, self.base_img_w + + def run(self): + video_timer = SimbaTimer(start=True) + max_dim = max(self.video_meta_data["width"], self.video_meta_data["height"]) + self.scalers = {} + self.scalers["circle_size"] = int(TextOptions.RADIUS_SCALER.value / (TextOptions.RESOLUTION_SCALER.value / max_dim)) + self.scalers["font_size"] = float(TextOptions.FONT_SCALER.value / (TextOptions.RESOLUTION_SCALER.value / max_dim)) + self.scalers["space_size"] = int(TextOptions.SPACE_SCALER.value / (TextOptions.RESOLUTION_SCALER.value / max_dim)) + color_lst = create_color_palettes(self.roi_analyzer.animal_cnt, int((len(self.roi_analyzer.bp_names) / 3)))[0] + self.border_img_h, self.border_img_w = self.__get_bordered_img_size() + fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value) + writer = cv2.VideoWriter(self.video_save_path, fourcc, self.video_meta_data["fps"], (self.border_img_w, self.border_img_h)) + self.loc_dict = self.__calc_text_locs() + self.cnt_dict = self.__create_counters() + self.shape_dicts = self.__create_shape_dicts() + self.__calculate_cumulative() + check_video_and_data_frm_count_align(video=self.video_path, data=self.data_df, name=self.video_name, raise_error=False) + frame_cnt = 0 + while self.cap.isOpened(): + ret, img = self.cap.read() + if ret: + self.border_img = cv2.copyMakeBorder(img, 0, 0, 0, int(self.video_meta_data["width"]), borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]) + self.__insert_texts(self.roi_analyzer.video_recs) + self.__insert_texts(self.roi_analyzer.video_circs) + self.__insert_texts(self.roi_analyzer.video_polys) + for _, row in self.roi_analyzer.video_recs.iterrows(): + top_left_x, top_left_y, shape_name = (row["topLeftX"], row["topLeftY"], row["Name"]) + bottom_right_x, bottom_right_y = (row["Bottom_right_X"], row["Bottom_right_Y"]) + thickness, color = row["Thickness"], row["Color BGR"] + cv2.rectangle(self.border_img, (int(top_left_x), int(top_left_y)), (int(bottom_right_x), int(bottom_right_y)), color,int(thickness)) + for _, row in self.roi_analyzer.video_circs.iterrows(): + center_x, center_y, radius, shape_name = (row["centerX"],row["centerY"],row["radius"],row["Name"]) + thickness, color = row["Thickness"], row["Color BGR"] + cv2.circle(self.border_img,(center_x, center_y),radius,color,int(thickness)) + for _, row in self.roi_analyzer.video_polys.iterrows(): + vertices, shape_name = row["vertices"], row["Name"] + thickness, color = row["Thickness"], row["Color BGR"] + cv2.polylines(self.border_img,[vertices],True,color,thickness=int(thickness)) + for animal_cnt, animal_name in enumerate(self.animal_id_lst): + bp_data = (self.data_df.loc[frame_cnt, self.bp_dict[animal_name]].fillna(0.0).values) + if self.threshold < bp_data[2]: + if self.style_attr[SHOW_BODY_PARTS]: + cv2.circle(self.border_img, (int(bp_data[0]), int(bp_data[1])), self.scalers["circle_size"], color_lst[animal_cnt], -1) + if self.style_attr[SHOW_ANIMAL_NAMES]: + cv2.putText(self.border_img, animal_name, (int(bp_data[0]), int(bp_data[1])), self.font, self.scalers["font_size"], color_lst[animal_cnt], TextOptions.TEXT_THICKNESS.value) + for animal_cnt, animal_name in enumerate(self.animal_id_lst): + for shape in self.shape_names: + time = str(round(self.data_df.loc[frame_cnt, f"{animal_name}_{shape}_cum_sum_time"], 2)) + entries = str(int(self.data_df.loc[frame_cnt, f"{animal_name}_{shape}_cum_sum_entries"])) + cv2.putText(self.border_img, time, self.loc_dict[animal_name][shape]["timer_data_loc"], self.font, self.scalers["font_size"], self.shape_dicts[shape]["Color BGR"], TextOptions.TEXT_THICKNESS.value) + cv2.putText(self.border_img, entries, self.loc_dict[animal_name][shape]["entries_data_loc"], self.font, self.scalers["font_size"], self.shape_dicts[shape]["Color BGR"], TextOptions.TEXT_THICKNESS.value) + writer.write(self.border_img) + print(f"Frame: {frame_cnt+1} / {self.video_meta_data['frame_count']}, Video: {self.video_name}.") + frame_cnt += 1 + else: + break + writer.release() + video_timer.stop_timer() + stdout_success(msg=f"Video {self.video_name} created. Video saved at {self.video_save_path}", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__) + + +# test = ROIPlot(config_path=r'/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/project_config.ini', +# video_path="/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/videos/SI_DAY3_308_CD1_PRESENT.mp4", +# style_attr={'show_body_part': True, 'show_animal_name': True}) +# test.run() + + + + +# test = ROIPlot(ini_path=r'/Users/simon/Desktop/envs/troubleshooting/Termites_5/project_folder/project_config.ini', +# video_path="termite_test.mp4", +# style_attr={'Show_body_part': True, 'Show_animal_name': True}) +# test.insert_data() +# test.visualize_ROI_data() + + +# test = ROIPlot(ini_path=r'/Users/simon/Desktop/envs/troubleshooting/Termites_5/project_folder/project_config.ini', video_path="termite_test.mp4") +# test.insert_data() +# test.visualize_ROI_data() + +# test = ROIPlot(ini_path=r'/Users/simon/Desktop/troubleshooting/train_model_project/project_folder/project_config.ini', video_path=r"Together_1.avi") +# test.insert_data() +# test.visualize_ROI_data() + + +# test = ROIPlot(ini_path=r'/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', +# video_path="Together_1.avi", +# style_attr={'Show_body_part': True, 'Show_animal_name': False}, +# body_parts={f'Simon': 'Ear_left_1'}) +# test.insert_data() +# test.run() + +# test = ROIPlot(ini_path=r'/Users/simon/Desktop/envs/troubleshooting/Termites_5/project_folder/project_config.ini', +# video_path="termite_test.mp4", +# style_attr={'Show_body_part': True, 'Show_animal_name': True}, +# body_parts={f'Simon': 'Termite_1_Head_1'}) +# test.insert_data() +# test.run() diff --git a/simba/sandbox/ROI_plotter_mp.py b/simba/sandbox/ROI_plotter_mp.py new file mode 100644 index 000000000..21879ad60 --- /dev/null +++ b/simba/sandbox/ROI_plotter_mp.py @@ -0,0 +1,352 @@ +__author__ = "Simon Nilsson" + +import functools +import multiprocessing +import os +import platform +import shutil +from typing import Optional, Union, Dict, Tuple + +import cv2 +import numpy as np +import pandas as pd + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.plotting_mixin import PlottingMixin +from simba.roi_tools.ROI_analyzer import ROIAnalyzer +from simba.utils.data import create_color_palettes, detect_bouts, slice_roi_dict_for_video +from simba.utils.enums import Paths, TagNames, TextOptions, Formats +from simba.utils.errors import NoFilesFoundError +from simba.utils.printing import SimbaTimer, log_event, stdout_success +from simba.utils.read_write import (concatenate_videos_in_folder, get_fn_ext, get_video_meta_data, find_core_cnt) +from simba.utils.warnings import DuplicateNamesWarning +from simba.utils.checks import (check_float, + check_int, + check_if_keys_exist_in_dict, + check_file_exist_and_readable, + check_video_and_data_frm_count_align) +pd.options.mode.chained_assignment = None + +SHOW_BODY_PARTS = 'show_body_part' +SHOW_ANIMAL_NAMES = 'show_animal_name' +STYLE_KEYS = [SHOW_BODY_PARTS, SHOW_ANIMAL_NAMES] + + +def _roi_plotter_mp(data: pd.DataFrame, + loc_dict: dict, + scalers: dict, + video_meta_data: dict, + save_temp_directory: str, + shape_meta_data: dict, + video_shape_names: list, + input_video_path: str, + body_part_dict: dict, + roi_analyzer_data: object, + colors: list, + style_attr: dict, + animal_ids: list, + threshold: float): + + def __insert_texts(shape_df): + for animal_name in animal_ids: + for _, shape in shape_df.iterrows(): + shape_name, shape_color = shape["Name"], shape["Color BGR"] + cv2.putText(border_img, loc_dict[animal_name][shape_name]["timer_text"], loc_dict[animal_name][shape_name]["timer_text_loc"], TextOptions.FONT.value, scalers["font_size"], shape_color, TextOptions.TEXT_THICKNESS.value) + cv2.putText(border_img, loc_dict[animal_name][shape_name]["entries_text"], loc_dict[animal_name][shape_name]["entries_text_loc"], TextOptions.FONT.value, scalers["font_size"], shape_color, TextOptions.TEXT_THICKNESS.value) + return border_img + + fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value) + group_cnt = int(data["group"].values[0]) + start_frm, current_frm, end_frm = data.index[0], data.index[0], data.index[-1] + save_path = os.path.join(save_temp_directory, f"{group_cnt}.mp4") + writer = cv2.VideoWriter(save_path, fourcc, video_meta_data["fps"], (video_meta_data["width"] * 2, video_meta_data["height"])) + cap = cv2.VideoCapture(input_video_path) + cap.set(1, start_frm) + + while current_frm <= end_frm: + ret, img = cap.read() + border_img = cv2.copyMakeBorder(img, 0, 0, 0, int(video_meta_data["width"]), borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]) + border_img = __insert_texts(roi_analyzer_data.video_recs) + border_img = __insert_texts(roi_analyzer_data.video_circs) + border_img = __insert_texts(roi_analyzer_data.video_polys) + + for _, row in roi_analyzer_data.video_recs.iterrows(): + top_left_x, top_left_y, shape_name = (row["topLeftX"], row["topLeftY"], row["Name"]) + bottom_right_x, bottom_right_y = (row["Bottom_right_X"], row["Bottom_right_Y"],) + thickness, color = row["Thickness"], row["Color BGR"] + cv2.rectangle(border_img, (int(top_left_x), int(top_left_y)), (int(bottom_right_x), int(bottom_right_y)), color, int(thickness)) + + for _, row in roi_analyzer_data.video_circs.iterrows(): + center_x, center_y, radius, shape_name = (row["centerX"], row["centerY"], row["radius"], row["Name"]) + thickness, color = row["Thickness"], row["Color BGR"] + cv2.circle(border_img, (center_x, center_y), radius, color, int(thickness)) + + for _, row in roi_analyzer_data.video_polys.iterrows(): + vertices, shape_name = row["vertices"], row["Name"] + thickness, color = row["Thickness"], row["Color BGR"] + cv2.polylines(border_img, [vertices], True, color, thickness=int(thickness)) + + for animal_cnt, animal_name in enumerate(animal_ids): + if style_attr[SHOW_BODY_PARTS] or style_attr[SHOW_ANIMAL_NAMES]: + bp_data = data.loc[current_frm, body_part_dict[animal_name]].values + if threshold < bp_data[2]: + if style_attr[SHOW_BODY_PARTS]: + cv2.circle(border_img, (int(bp_data[0]), int(bp_data[1])), scalers["circle_size"], colors[animal_cnt], -1) + if style_attr[SHOW_ANIMAL_NAMES]: + cv2.putText(border_img, animal_name, (int(bp_data[0]), int(bp_data[1])), TextOptions.FONT.value, scalers["font_size"], colors[animal_cnt], TextOptions.TEXT_THICKNESS.value) + + for shape_name in video_shape_names: + timer = round(data.loc[current_frm, f"{animal_name}_{shape_name}_cum_sum_time"], 2) + entries = data.loc[current_frm, f"{animal_name}_{shape_name}_cum_sum_entries"] + cv2.putText(border_img, str(timer), loc_dict[animal_name][shape_name]["timer_data_loc"], TextOptions.FONT.value, scalers["font_size"], shape_meta_data[shape_name]["Color BGR"], TextOptions.TEXT_THICKNESS.value) + cv2.putText(border_img, str(entries), loc_dict[animal_name][shape_name]["entries_data_loc"], TextOptions.FONT.value, scalers["font_size"], shape_meta_data[shape_name]["Color BGR"], TextOptions.TEXT_THICKNESS.value) + writer.write(border_img) + current_frm += 1 + print(f"Multi-processing video frame {current_frm} on core {group_cnt}...") + cap.release() + writer.release() + + return group_cnt + + +class ROIPlotMultiprocess(ConfigReader, PlottingMixin): + """ + Visualize the ROI data (number of entries/exits, time-spent-in ROIs). + + .. note:: + `ROI tutorials `__. + + .. image:: _static/img/roi_visualize.png + :width: 400 + :align: center + + :param str config_path: Path to SimBA project config file in Configparser format + :param str video_path: Name of video to create ROI visualizations for + :param dict style_attr: User-defined visualization settings. + :param int core_cnt: Number of cores to use. Default to -1 representing all available cores + + :example: + >>> test = ROIPlotMultiprocess(config_path=r'/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', + >>> video_path="/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/videos/Together_1.avi", + >>> core_cnt=7, + >>> style_attr={'show_body_parts': True, 'show_animal_name': True}, + >>> body_parts={'Animal_1': 'Nose_1', 'Animal_2': 'Nose_2'}) + >>> test.run() + """ + + def __init__(self, + config_path: Union[str, os.PathLike], + video_path: Union[str, os.PathLike], + style_attr: Dict[str, bool], + body_parts: Optional[dict] = None, + threshold: Optional[float] = 0.0, + core_cnt: Optional[int] = -1): + + # if platform.system() == "Darwin": + # multiprocessing.set_start_method("spawn", force=True) + ConfigReader.__init__(self, config_path=config_path) + PlottingMixin.__init__(self) + log_event(logger_name=str(__class__.__name__), log_type=TagNames.CLASS_INIT.value, msg=self.create_log_msg_from_init_args(locals=locals())) + check_float(name=f'{self.__class__.__name__} threshold', value=threshold, min_value=0.0, max_value=1.0) + check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1) + if core_cnt == -1: core_cnt = find_core_cnt()[0] + check_if_keys_exist_in_dict(data=style_attr, key=STYLE_KEYS, name=f'{self.__class__.__name__} style_attr') + check_file_exist_and_readable(file_path=video_path) + _, self.video_name, _ = get_fn_ext(video_path) + + settings = None + if body_parts: settings = {"body_parts": body_parts, 'threshold': threshold} + self.roi_analyzer = ROIAnalyzer(ini_path=config_path, data_path="outlier_corrected_movement_location", settings=settings) + if not body_parts: self.animal_id_lst = self.roi_analyzer.multi_animal_id_list + else: self.animal_id_lst = list(body_parts.keys()) + self.roi_analyzer.files_found = [os.path.join(self.roi_analyzer.input_folder, f"{self.video_name}.{self.roi_analyzer.file_type}")] + if not os.path.isfile(self.roi_analyzer.files_found[0]): + raise NoFilesFoundError(msg=f"SIMBA ERROR: Could not find the file at path {self.roi_analyzer.files_found[0]}. Please make sure you have corrected body-part outliers or indicated that you want to skip outlier correction", source=self.__class__.__name__,) + self.roi_analyzer.run() + self.roi_entries_df = self.roi_analyzer.detailed_df + self.data_df, self.style_attr = self.roi_analyzer.data_df, style_attr + self.out_parent_dir = os.path.join(self.project_path, Paths.ROI_ANALYSIS.value) + if not os.path.exists(self.out_parent_dir): + os.makedirs(self.out_parent_dir) + self.video_save_path = os.path.join(self.out_parent_dir, f"{self.video_name}.mp4") + self.read_roi_data() + self.shape_columns = [] + _, self.shape_names = slice_roi_dict_for_video(data=self.roi_dict, video_name=self.video_name) + for animal in self.animal_id_lst: + for shape_name in self.shape_names: + self.data_df[f"{animal}_{shape_name}"] = 0; self.shape_columns.append(f"{animal}_{shape_name}") + self.bp_dict = self.roi_analyzer.bp_dict + self.__insert_data() + self.video_path = video_path + self.cap = cv2.VideoCapture(self.video_path) + self.video_meta_data = get_video_meta_data(self.video_path) + self.temp_folder = os.path.join(self.out_parent_dir, self.video_name, "temp") + if os.path.exists(self.temp_folder): + shutil.rmtree(self.temp_folder) + os.makedirs(self.temp_folder) + self.core_cnt, self.threshold = core_cnt, threshold + + def __insert_data(self): + roi_entries_dict = self.roi_entries_df[["ANIMAL", "SHAPE", "ENTRY FRAMES", "EXIT FRAMES"]].to_dict(orient="records") + for entry_dict in roi_entries_dict: + entry, exit = int(entry_dict["ENTRY FRAMES"]), int(entry_dict["EXIT FRAMES"]) + entry_dict["frame_range"] = list(range(entry, exit + 1)) + col_name = entry_dict["ANIMAL"] + "_" + entry_dict["SHAPE"] + self.data_df[col_name][self.data_df.index.isin(entry_dict["frame_range"])] = 1 + + def __calc_text_locs(self) -> dict: + loc_dict = {} + line_spacer = TextOptions.FIRST_LINE_SPACING.value + for animal_cnt, animal_name in enumerate(self.animal_id_lst): + loc_dict[animal_name] = {} + for shape in self.shape_names: + loc_dict[animal_name][shape] = {} + loc_dict[animal_name][shape]["timer_text"] = f"{shape} {animal_name} timer:" + loc_dict[animal_name][shape]["entries_text"] = f"{shape} {animal_name} entries:" + loc_dict[animal_name][shape]["timer_text_loc"] = ((self.video_meta_data["width"] + TextOptions.BORDER_BUFFER_X.value), (self.video_meta_data["height"] - (self.video_meta_data["height"] + TextOptions.BORDER_BUFFER_Y.value) + self.scalers["space_size"] * line_spacer)) + loc_dict[animal_name][shape]["timer_data_loc"] = (int(self.border_img_w - (self.border_img_w / 8)), (self.video_meta_data["height"] - (self.video_meta_data["height"] + TextOptions.BORDER_BUFFER_Y.value) + self.scalers["space_size"] * line_spacer)) + line_spacer += TextOptions.LINE_SPACING.value + loc_dict[animal_name][shape]["entries_text_loc"] = ((self.video_meta_data["width"] + TextOptions.BORDER_BUFFER_X.value), (self.video_meta_data["height"] - (self.video_meta_data["height"] + TextOptions.BORDER_BUFFER_Y.value) + self.scalers["space_size"] * line_spacer)) + loc_dict[animal_name][shape]["entries_data_loc"] = (int(self.border_img_w - (self.border_img_w / 8)), (self.video_meta_data["height"]- (self.video_meta_data["height"] + TextOptions.BORDER_BUFFER_Y.value) + self.scalers["space_size"] * line_spacer)) + line_spacer += TextOptions.LINE_SPACING.value + return loc_dict + + def __create_counters(self) -> dict: + cnt_dict = {} + for animal_cnt, animal_name in enumerate(self.animal_id_lst): + cnt_dict[animal_name] = {} + for shape in self.shape_names: + cnt_dict[animal_name][shape] = {} + cnt_dict[animal_name][shape]["timer"] = 0 + cnt_dict[animal_name][shape]["entries"] = 0 + cnt_dict[animal_name][shape]["entry_status"] = False + return cnt_dict + # + def __calculate_cumulative(self): + for animal_name in self.animal_id_lst: + for shape in self.shape_names: + self.data_df[f"{animal_name}_{shape}_cum_sum_time"] = (self.data_df[f"{animal_name}_{shape}"].cumsum() / self.video_meta_data['fps']) + roi_bouts = list(detect_bouts(data_df=self.data_df, target_lst=[f"{animal_name}_{shape}"], fps=self.video_meta_data['fps'])["Start_frame"]) + self.data_df[f"{animal_name}_{shape}_entry"] = 0 + self.data_df.loc[roi_bouts, f"{animal_name}_{shape}_entry"] = 1 + self.data_df[f"{animal_name}_{shape}_cum_sum_entries"] = (self.data_df[f"{animal_name}_{shape}_entry"].cumsum()) + + + def __create_shape_dicts(self): + shape_dicts = {} + for df in [self.roi_analyzer.video_recs, self.roi_analyzer.video_circs, self.roi_analyzer.video_polys]: + if not df["Name"].is_unique: + df = df.drop_duplicates(subset=["Name"], keep="first") + DuplicateNamesWarning('Some of your ROIs with the same shape has the same names. E.g., you have two rectangles named "My rectangle". SimBA prefers ROI shapes with unique names. SimBA will keep one of the unique shape names and drop the rest.', source=self.__class__.__name__) + d = df.set_index("Name").to_dict(orient="index") + shape_dicts = {**shape_dicts, **d} + return shape_dicts + + def __get_bordered_img_size(self) -> Tuple[int, int]: + cap = cv2.VideoCapture(self.video_path) + cap.set(1, 1) + _, img = self.cap.read() + bordered_img = cv2.copyMakeBorder(img, 0, 0, 0, int(self.video_meta_data["width"]), borderType=cv2.BORDER_CONSTANT, value=[0, 0, 0]) + cap.release() + return bordered_img.shape[0], bordered_img.shape[1] + + def run(self): + video_timer = SimbaTimer(start=True) + max_dim = max(self.video_meta_data["width"], self.video_meta_data["height"]) + self.scalers = {} + self.scalers["circle_size"] = int(TextOptions.RADIUS_SCALER.value / (TextOptions.RESOLUTION_SCALER.value / max_dim)) + self.scalers["font_size"] = float(TextOptions.FONT_SCALER.value / (TextOptions.RESOLUTION_SCALER.value / max_dim)) + self.scalers["space_size"] = int(TextOptions.SPACE_SCALER.value / (TextOptions.RESOLUTION_SCALER.value / max_dim)) + color_lst = create_color_palettes(self.roi_analyzer.animal_cnt, int((len(self.roi_analyzer.bp_names) / 3)))[0] + self.border_img_h, self.border_img_w = self.__get_bordered_img_size() + self.loc_dict = self.__calc_text_locs() + self.cnt_dict = self.__create_counters() + self.shape_dicts = self.__create_shape_dicts() + self.__calculate_cumulative() + check_video_and_data_frm_count_align(video=self.video_path, data=self.data_df, name=self.video_name, raise_error=False) + data_lst = np.array_split(self.data_df.fillna(0), self.core_cnt) + for cnt in range(len(data_lst)): + data_lst[cnt]["group"] = cnt + + print(f"Creating ROI images, multiprocessing (determined chunksize: {self.multiprocess_chunksize}, cores: {self.core_cnt})...") + del self.roi_analyzer.logger + with multiprocessing.Pool(self.core_cnt, maxtasksperchild=self.maxtasksperchild) as pool: + constants = functools.partial(_roi_plotter_mp, + loc_dict=self.loc_dict, + scalers=self.scalers, + video_meta_data=self.video_meta_data, + save_temp_directory=self.temp_folder, + body_part_dict=self.bp_dict, + input_video_path=self.video_path, + roi_analyzer_data=self.roi_analyzer, + video_shape_names=self.shape_names, + shape_meta_data=self.shape_dicts, + colors=color_lst, + style_attr=self.style_attr, + animal_ids=self.animal_id_lst, + threshold=self.threshold) + + for cnt, result in enumerate(pool.imap(constants, data_lst, chunksize=self.multiprocess_chunksize)): + print(f'Image batch {result+1} / {len(data_lst)} complete...') + + print(f"Joining {self.video_name} multi-processed ROI video...") + concatenate_videos_in_folder(in_folder=self.temp_folder, save_path=self.video_save_path, video_format="mp4") + video_timer.stop_timer() + pool.terminate() + pool.join() + stdout_success(msg=f"Video {self.video_name} created. ROI video saved at {self.video_save_path}", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__, ) + +# test = ROIPlotMultiprocess(config_path=r'/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/project_config.ini', +# video_path="/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/videos/SI_DAY3_308_CD1_PRESENT.mp4", +# core_cnt=7, +# style_attr={'show_body_part': True, 'show_animal_name': False}, +# body_parts={'Animal_1': 'Nose'}) +# test.run() + +# test = ROIPlotMultiprocess(config_path=r'/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', +# video_path="/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/videos/Together_1.avi", +# core_cnt=7, +# style_attr={'show_body_parts': True, 'show_animal_name': True}, +# body_parts={'Animal_1': 'Nose_1', 'Animal_2': 'Nose_2'}) +# test.run() + + + +# +# test = ROIPlotMultiprocess(ini_path=r'/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/project_config.ini', +# video_path="2022-06-20_NOB_DOT_4.mp4", +# core_cnt=7, +# style_attr={'Show_body_part': True, 'Show_animal_name': True}, body_parts={'Animal_1': 'Nose'}) +# test.run() +# + +# +# test = ROIPlotMultiprocess(ini_path=r'/Users/simon/Desktop/envs/simba/troubleshooting/spontenous_alternation/project_folder/project_config.ini', +# video_path="F1 HAB.mp4", +# core_cnt=5, +# style_attr={'Show_body_part': True, 'Show_animal_name': True}) +# test.run() +# +# get_video_meta_data(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/frames/output/ROI_analysis/2022-06-20_NOB_DOT_4.mp4') +# get_video_meta_data(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/videos/Together_1.avi') + +# test = ROIPlot(ini_path=r'/Users/simon/Desktop/troubleshooting/train_model_project/project_folder/project_config.ini', video_path=r"Together_1.avi") +# test.insert_data() +# test.visualize_ROI_data() + +# test = ROIPlot(ini_path=r"Z:\DeepLabCut\DLC_extract\Troubleshooting\ROI_2_animals\project_folder\project_config.ini", video_path=r"Z:\DeepLabCut\DLC_extract\Troubleshooting\ROI_2_animals\project_folder\videos\Video7.mp4") +# test.insert_data() +# test.visualize_ROI_data() +# +# test = ROIPlotMultiprocess(ini_path=r'/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', +# video_path="Together_1.avi", +# style_attr={'Show_body_part': True, 'Show_animal_name': False}, +# core_cnt=5) +# test.run() + + +# test = ROIPlotMultiprocess(ini_path=r'/Users/simon/Desktop/envs/troubleshooting/DLC_2_Black_animals/project_folder/project_config.ini', +# video_path="Together_1.avi", +# style_attr={'Show_body_part': True, 'Show_animal_name': False}, +# core_cnt=5) +# test.run() diff --git a/simba/sandbox/abod.py b/simba/sandbox/abod.py new file mode 100644 index 000000000..ef0ce89ff --- /dev/null +++ b/simba/sandbox/abod.py @@ -0,0 +1,98 @@ +from typing import Union, Optional +import numpy as np +import pandas as pd +from sklearn.covariance import EllipticEnvelope +from pyod.models.abod import ABOD + +from simba.utils.checks import check_valid_array, check_float, check_int +from sklearn.neighbors import LocalOutlierFactor +from sklearn.datasets import make_blobs +from simba.mixins.plotting_mixin import PlottingMixin +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin + +def angle_based_od(data: np.ndarray, + k: Union[int, float] = 5, + groupby_idx: Optional[int] = None, + normalize: Optional[bool] = False) -> np.ndarray: + """ + + :param data: + :param k: + :return: + Adopted from https://pyod.readthedocs.io/en/latest/_modules/pyod/models/abod.html#ABOD + """ + + def _wcos(x: np.ndarray, nn_s: np.ndarray): + wcos = np.full((nn_s.shape[0],), np.nan) + for j in range(nn_s.shape[0]): + wcos[j] = np.dot(x, nn_s[j]) / (np.linalg.norm(x, 2) ** 2) / (np.linalg.norm(nn_s[j], 2) ** 2) + return np.var(wcos) + + if groupby_idx is not None: + check_int(name=f'{angle_based_od.__name__} groupby_idx', value=groupby_idx, min_value=0, max_value=data.shape[1]-1) + check_valid_array(source=f"{angle_based_od.__name__} local_outlier_factor", data=data, accepted_sizes=[2], min_axis_1=3) + else: + check_valid_array(source=f"{angle_based_od.__name__} data", data=data, accepted_sizes=[2], min_axis_1=2) + check_float(name=f"{local_outlier_factor.__name__} k", value=k) + if groupby_idx is None: + if isinstance(k, int): + k = min(k, data.shape[0]-1) + elif isinstance(k, float): + k = int((data.shape[0]-1) * k) + distances = FeatureExtractionMixin.cdist(array_1=data, array_2=data) + results = np.full((data.shape[0],), np.nan) + for i in range(distances.shape[0]): + idx = np.argsort(distances[i])[1:k+1] + nn_s = data[idx, :] + results[i] = _wcos(data[i], nn_s) + if normalize: + return (results - np.min(results)) / (np.max(results) - np.min(results)) + + else: + return results + else: + results = [] + data_w_idx = np.hstack((np.arange(0, data.shape[0]).reshape(-1, 1), data)) + unique_c = np.unique(data[:, groupby_idx]).astype(np.float32) + if -1.0 in unique_c: + unique_c = unique_c[np.where(unique_c != -1)] + unclustered_idx = np.argwhere(data[:, groupby_idx] == -1.0).flatten() + unclustered = data_w_idx[unclustered_idx] + data_w_idx = np.delete(data_w_idx, unclustered_idx, axis=0) + else: + unclustered = None + for i in unique_c: + c_data = data_w_idx[np.argwhere(data_w_idx[:, groupby_idx + 1] == i)].reshape(-1, data_w_idx.shape[1]) + c_data_idx = c_data[:, 0].reshape(c_data.shape[0], 1) + c_data = np.delete(c_data, [0, groupby_idx + 1], 1) + print(c_data) + distances = FeatureExtractionMixin.cdist(array_1=c_data, array_2=c_data) + c_results = np.full((c_data.shape[0], 1), np.nan) + c_results_idx = np.full((c_data.shape[0], 1), np.nan) + for j in range(distances.shape[0]): + if isinstance(k, int): + c_k = min(k, c_data.shape[0]-1) + elif isinstance(k, float): + c_k = int((c_data.shape[0] - 1) * k) + if c_k < 1: + c_k = 1 + nn_idx = np.argsort(distances[j])[1:c_k + 1] + nn_s = c_data[nn_idx, :] + c_results_idx[j] = c_data_idx[j] + c_results[j] = _wcos(c_data[j], nn_s) + if normalize: + c_results = (c_results - np.min(c_results)) / (np.max(c_results) - np.min(c_results)) + results.append(np.hstack((c_results_idx, c_results))) + results = np.concatenate(results, axis=0) + if unclustered is not None: + max_angle_od = np.full((unclustered.shape[0], 1), np.max(x[:, -1])) + unclustered = np.hstack((unclustered, max_angle_od))[:, [0, -1]] + results = np.vstack((results, unclustered)) + return results[np.argsort(results[:, 0])][:, -1] + + +data, lbls = make_blobs(n_samples=1000, n_features=2, centers=2, random_state=42) +abod_model = ABOD(contamination=0.1, method='fast', n_neighbors=10) +abod_model.fit(data) + +pred = abod_model.predict(data) \ No newline at end of file diff --git a/simba/sandbox/add_body_part.py b/simba/sandbox/add_body_part.py new file mode 100644 index 000000000..0c64dfba9 --- /dev/null +++ b/simba/sandbox/add_body_part.py @@ -0,0 +1,35 @@ +import os.path +import numpy as np +import pandas as pd + +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.read_write import read_df, write_df, get_fn_ext, read_frm_of_video +from simba.utils.checks import check_if_dir_exists + +#CHANGE THIS TO THE PATH OF YOUR SIMBA PROJECT CONFIG +CONFIG_PATH = '/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini' + +#CHANGE THIS PATH TO A NEW DIRECTORY ON YOUR COMPUTER +SAVE_DIRECTORY = '/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/new_data' + +BP_1 = 'Ear_left_1' # THE NAME OF YOUR LEFT EAR BODY-PART +BP_2 = 'Ear_right_1' # THE NAME OF YOUR RIGHT EAR BODY-PART +NEW_BP_NAME = 'Head_1' # THE NAME OF YOUR NEW BODY-PART + +######################## + +config = ConfigReader(config_path=CONFIG_PATH) # READS IN YOUR PROJECT CONFIG +check_if_dir_exists(in_dir=SAVE_DIRECTORY) # CHECKS THAT YOUR SPECIFIED SAVE_DIRECTORY ACTUALLY EXIST + +for file_path in config.outlier_corrected_paths: # LOOPS OVER EACH CSV FILE IN THE "project_folder/csv/outlier_corrected_movement_location" directory + df = read_df(file_path=file_path, file_type=config.file_type) #READS THE FILE + file_name = get_fn_ext(filepath=file_path)[1] #GET THE FILENAME OF THE FILE BEING READ + bp_1, bp_2 = df[[f'{BP_1}_x', f'{BP_1}_y']].values.astype(int), df[[f'{BP_2}_x', f'{BP_2}_y']].values.astype(int) #GET THE COLUMNS OF THE BODY-PARTS SPECIFIED + results = FeatureExtractionMixin.find_midpoints(bp_1=bp_1, bp_2=bp_2, percentile=0.5) #FIND THE MIDPOINT IN BETWEEN THE TWO BODY-PARTS + results = np.hstack((results, np.ones(results.shape[0]).reshape(-1, 1))) # THE NEW BODY-PART WILL NOT HAVE A PROBABILITY VALUE, SO WE SET THEM ALL TO 1 + df = pd.concat([df, pd.DataFrame(results, columns=[f'{NEW_BP_NAME}_x', f'{NEW_BP_NAME}_y', f'{NEW_BP_NAME}_p'])], axis=1) # ADD THE NEW BODY-PART TO THE DATA + save_path = os.path.join(SAVE_DIRECTORY, f'{file_name}.{config.file_type}') #CREATE A PATH FOR THE DATA WITH THE NEW BODY-PART FILE + write_df(df=df, file_type=config.file_type, save_path=save_path) #WRITE THE DATA TO DISK + print(f'File saved {save_path}...') +print(f'SimBA complete: {len(config.outlier_corrected_paths)} files saved in {SAVE_DIRECTORY}') \ No newline at end of file diff --git a/simba/sandbox/add_body_part.py.zip b/simba/sandbox/add_body_part.py.zip new file mode 100644 index 0000000000000000000000000000000000000000..7aceb2877012debe0ba80ce0ee9d8fd710460d50 GIT binary patch literal 1276 zcmWIWW@Zs#-~hsa{G12|DA40%U=UzXU`R|!iBHN;sf;g3EGp3}s0g+Sf;^%A-A97595UVTelYSVIL${i+0PmS(S;M)><^?zNmGgIozy|PXT{kF#U&(s*7>+1R_HN#-lj(b@*6QkdCy%J4gOugE% zYKKoi!+oubTc&JgatT|Ly!b@irXUH;-q*6r1#4GU?pbLZ=JtxE@7LnPmYMy_KYi(1 z$g6$%jpid0KaKn?xvF2LzKKZ>Pv2?q882wv^^(%bMU^PKbB{xA zPb+_;wQQ8Z_r3c&Wt-lfb$d6r>__(%S7EEatqWqlTr}1G%_q7)JM3%b&hSO0ch+0p zUR$iSt>>@lpQt-Kw)Ss>Eeu<{JSo?Mg4Z(IKQTrGcpZ2GPl?HpgP+L{R})n7-Q;d?|g_p=zqh4ZEsrPt|9 z+LF@W`Bym8a@!$g$>&GP-1Q0{ELt>8R&UM8CyXcT^BNv5>2aR*G=!1m%>(7m8|%&) zpPCpvC;4v1BE1gz7dekAKQrv9ew6I@*6_wb+nzg|6DOW{Q@eA;SGPC6>t|`-m?ySn z&Eh$^NtR0c7>>22_w3JVn6Gj)L@qiyUvbvkRpkd__lI4vY06`rG1<9DRj$)FGURw_ zOn^y^l)$NP?w9W#KggB$&f~@_^PY@v1}g8~_=N_{j0?ZGK)qg7aq^dshHv+HChd3E zmu0z?6KB18>c;-hvZ8J}DnV;9@4lV8^3J@~cj{IZHKkWiK3XDX@*-*#o4Sxj$DFR% zIPN3mwN}aBymY*$h49F7#I~i(do=0vZMl#s`5UrkXnvmkQ8uyX&dkL!{;}!rc5V*1 zl;CklRgvAQ#rr+a71k-6jJ7R0mbvh?d!qlT2v1jKgCl0^UOe6&D(=2N>UF%}b**Pb(2uGRNh zQe*P3;qtcN&$nKAW}VWhz1O!_IBj$6!p}?Jw>^0xz+8Ig!E&RxLY0N{%G@%XGu1y_ zTeVNR?c&Rm%c4_>RKkEm0vvW-6 z*>F&cg@FN7+5~trGKnxFN+M)AP)P&>OBz8eQp=?PZ&o&tK1Lvn1JWHpTNxMtXT?Z< literal 0 HcmV?d00001 diff --git a/simba/sandbox/adjusted_rand.py b/simba/sandbox/adjusted_rand.py new file mode 100644 index 000000000..646ecfa04 --- /dev/null +++ b/simba/sandbox/adjusted_rand.py @@ -0,0 +1,32 @@ +import numpy as np +from sklearn.metrics import (adjusted_mutual_info_score, adjusted_rand_score, fowlkes_mallows_score) + + +def cluster_comparison(x: np.ndarray, y: np.ndarray, method: str = 'adjusted_rand_score'): + if method == 'adjusted_mutual_info_score': + return adjusted_mutual_info_score(labels_true=x, labels_pred=y) + elif method == 'fowlkes_mallows_score': + return fowlkes_mallows_score(labels_true=x, labels_pred=y) + else: + return adjusted_rand_score(labels_true=x, labels_pred=y) + + + + +def adjusted_rand(x: np.ndarray, y: np.ndarray): + n_samples = np.int64(x.shape[0]) + classes, class_idx = np.unique(x, return_inverse=True) + clusters, cluster_idx = np.unique(y, return_inverse=True) + n_classes = classes.shape[0] + n_clusters = clusters.shape[0] + contingency = coo_matrix((np.ones(class_idx.shape[0]), (class_idx, cluster_idx)), shape=(n_classes, n_clusters), dtype=np.int64) + print(contingency) + + +x = np.array([0, 1, 0, 0, 0]) +y = np.array([1, 1, 1, 1, 1]) +adjusted_rand(x=x, y=y) + + + + #(tn, fp), (fn, tp) = pair_confusion_matrix(labels_true, labels_pred) \ No newline at end of file diff --git a/simba/sandbox/adjusted_rand_score.py b/simba/sandbox/adjusted_rand_score.py new file mode 100644 index 000000000..8c7ebed60 --- /dev/null +++ b/simba/sandbox/adjusted_rand_score.py @@ -0,0 +1,69 @@ +import numpy as np +from simba.utils.checks import check_valid_array +from sklearn.metrics import adjusted_rand_score, fowlkes_mallows_score, adjusted_mutual_info_score + +def adjusted_rand(x: np.ndarray, y: np.ndarray) -> float: + """ + Calculate the Adjusted Rand Index (ARI) between two clusterings. + + :param np.ndarray x: 1D array representing the labels of the first model. + :param np.ndarray y: 1D array representing the labels of the second model. + :return float: 1 indicates perfect clustering agreement, 0 indicates random clustering, and negative values indicate disagreement between the clusterings. + + :example: + >>> x = np.array([0, 0, 0, 0, 0]) + >>> y = np.array([1, 1, 1, 1, 1]) + >>> adjusted_rand(x=x, y=y) + >>> 1.0 + """ + + check_valid_array(data=x, source=adjusted_rand_score.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, int), min_axis_0=1) + check_valid_array(data=y, source=adjusted_rand_score.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, int), accepted_shapes=[(x.shape[0],)]) + return adjusted_rand_score(labels_true=x, labels_pred=y) + +def fowlkes_mallows(x: np.ndarray, y: np.ndarray) -> float: + """ + Calculate the Fowlkes-Mallows Index (FMI) between two clusterings. + + :param np.ndarray x: 1D array representing the labels of the first model. + :param np.ndarray y: 1D array representing the labels of the second model. + :return float: Score between 0 and 1. 1 indicates perfect clustering agreement, 0 indicates random clustering. + """ + check_valid_array(data=x, source=adjusted_rand_score.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, int), min_axis_0=1) + check_valid_array(data=y, source=adjusted_rand_score.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, int), accepted_shapes=[(x.shape[0],)]) + return fowlkes_mallows_score(labels_true=x, labels_pred=y) + + +def adjusted_mutual_info(x: np.ndarray, y: np.ndarray) -> float: + """ + Calculate the Adjusted Mutual Information (AMI) between two clusterings as a meassure of similarity. + clusterings. + + :param np.ndarray x: 1D array representing the labels of the first model. + :param np.ndarray y: 1D array representing the labels of the second model. + :return float: Score between 0 and 1, where 1 indicates perfect clustering agreement. + + """ + check_valid_array(data=x, source=adjusted_rand_score.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, int), min_axis_0=1) + check_valid_array(data=y, source=adjusted_rand_score.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, int), accepted_shapes=[(x.shape[0],)]) + return adjusted_mutual_info_score(labels_true=x, labels_pred=y) + + + + + + + + +# +# +# x = np.array([0, 0, 1, 1, 2]) +# y = np.array([1, 1, 2, 1, 3]) +# +# #x = np.random.randint(0, 5, (100,)) +# +# +# adjusted_mutual_info(x=x, y=y) + + + diff --git a/simba/sandbox/advanced_interpolator.py b/simba/sandbox/advanced_interpolator.py new file mode 100644 index 000000000..649341727 --- /dev/null +++ b/simba/sandbox/advanced_interpolator.py @@ -0,0 +1,199 @@ +__author__ = "Simon Nilsson" + +import os +from typing import Any, Dict, Optional, Union + +import numpy as np +import pandas as pd +pd.options.mode.chained_assignment = None + + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + +from simba.mixins.config_reader import ConfigReader +from simba.utils.checks import (check_valid_boolean, check_instance, check_that_column_exist, check_str, check_file_exist_and_readable) +from simba.utils.errors import DataHeaderError, InvalidInputError +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import (find_files_of_filetypes_in_directory, get_fn_ext, read_df, write_df, copy_files_to_directory) + + +BODY_PART_TYPE = 'body-part' +ANIMAL_TYPE = 'animal' +NEAREST = 'nearest' +LINEAR = 'linear' +QUADRATIC = 'quadratic' + +class AdvancedInterpolator(ConfigReader): + """ + Interpolation method that allows different interpolation parameters for different animals or body-parts. + For example, interpolate some body-parts of animals using linear interpolation, and other body-parts of animals using nearest interpolation. + + .. image:: _static/img/AdvancedInterpolator.webp + :width: 600 + :align: center + + :parameter Union[str, os.PathLike] data_dir: path to folder containing pose-estimation data or a file with pose-estimation data. + :parameter Union[str, os.PathLike] config_path: path to SimBA project config file in Configparser format. + :parameter Literal["animal", "body-part"] type: Type of interpolation: animal or body-part. + :parameter Dict settings: Interpolation rules for each animal or each animal body-part. See examples. + :parameter bool multi_index_data: If True, the incoming data is multi-index columns dataframes. Use of input data is the ``project_folder/csv/input_csv`` directory. Default: False. + :parameter bool overwrite: If True, overwrites the input data. If False, then saves input data in datetime-stamped sub-directory. + :parameter Optional[verbose] bool: If True, prints the progress. Default: True. + + :examples: + >>> interpolator = AdvancedInterpolator(data_dir='/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/csv/input_csv', + >>> config_path='/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', + >>> type='animal', + >>> settings={'Animal_1': 'linear', 'Animal_2': 'quadratic'}, + >>> multi_index_data=True) + >>> interpolator.run() + >>> interpolator = AdvancedInterpolator(data_dir='/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/csv/input_csv', + >>> config_path='/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', + >>> type='animal', + >>> settings={'Simon': {'Ear_left_1': 'linear', + >>> 'Ear_right_1': 'linear', + >>> 'Nose_1': 'quadratic', + >>> 'Lat_left_1': 'quadratic', + >>> 'Lat_right_1': 'quadratic', + >>> 'Center_1': 'nearest', + >>> 'Tail_base_1': 'nearest'}, + >>> 'JJ': {'Ear_left_2': 'nearest', + >>> 'Ear_right_2': 'nearest', + >>> 'Nose_2': 'quadratic', + >>> 'Lat_left_2': 'quadratic', + >>> 'Lat_right_2': 'quadratic', + >>> 'Center_2': 'linear', + >>> 'Tail_base_2': 'linear'}}, + >>> multi_index_data=True) + >>> interpolator.run() + """ + + def __init__(self, + data_path: Union[str, os.PathLike], + config_path: Union[str, os.PathLike], + settings: Dict[str, Any], + type: Optional[Literal["animal", "body-part"]] = 'body-part', + verbose: Optional[bool] = True, + multi_index_data: Optional[bool] = False, + overwrite: Optional[bool] = True): + + ConfigReader.__init__(self, config_path=config_path, read_video_info=False) + check_str(name=f'{self.__class__.__name__} type', value=type, options=[ANIMAL_TYPE, BODY_PART_TYPE], raise_error=True) + if os.path.isfile(data_path): + check_file_exist_and_readable(file_path=data_path) + self.file_paths = [data_path] + self.input_dir = os.path.dirname(data_path) + self.cpy_dir = os.path.join(os.path.dirname(data_path), f"Pre_Advanced_Interpolation_{self.datetime}") + elif os.path.isdir(data_path): + self.file_paths = find_files_of_filetypes_in_directory(directory=data_path, extensions=[f".{self.file_type}"], raise_warning=False,raise_error=True) + self.cpy_dir = os.path.join(data_path, f"Pre_Advanced_Interpolation_{self.datetime}") + self.input_dir = data_path + else: + raise InvalidInputError(msg=f'{data_path} is not a valid file path or file directory', source=self.__class__.__name__) + check_valid_boolean(value=[multi_index_data, overwrite], source=self.__class__.__name__, raise_error=True) + check_instance(source=self.__class__.__name__, instance=settings, accepted_types=(dict,)) + for animal, animal_data in settings.items(): + if type == BODY_PART_TYPE: + check_instance(source=self.__class__.__name__, instance=animal_data, accepted_types=(dict,)) + for bp_name, bp_data in animal_data.items(): + check_str(name='body_part', value=bp_name, options=self.project_bps) + check_str(name='method', value=bp_data, options=[LINEAR, NEAREST, QUADRATIC]) + else: + check_str(name='method', value=animal_data, options=[LINEAR, NEAREST, QUADRATIC]) + self.settings, self.type, self.multi_index_data, self.verbose = settings, type, multi_index_data, verbose + if type == ANIMAL_TYPE: + self.__transpose_settings() + + self.overwrite = overwrite + if not overwrite and not os.path.isdir(self.cpy_dir): os.makedirs(self.cpy_dir) + + def __transpose_settings(self): + """Helper to transpose settings dict if interpolating per animal, so the same method can be used for both animal and body-part interpolation""" + transposed_settings = {} + for animal_name, body_part_data in self.animal_bp_dict.items(): + transposed_settings[animal_name] = {} + for animal_body_part in body_part_data["X_bps"]: + transposed_settings[animal_name][animal_body_part[:-2]] = self.settings[animal_name] + self.settings = transposed_settings + + def __insert_multi_index(self, df: pd.DataFrame) -> pd.DataFrame: + multi_idx_header = [] + for i in range(len(df.columns)): + multi_idx_header.append(("IMPORTED_POSE", "IMPORTED_POSE", list(df.columns)[i])) + df.columns = pd.MultiIndex.from_tuples(multi_idx_header) + return df + + def run(self): + for file_cnt, file_path in enumerate(self.file_paths): + video_timer = SimbaTimer(start=True) + df = read_df(file_path=file_path, file_type=self.file_type, check_multiindex=self.multi_index_data).fillna(0).reset_index(drop=True) + _, video_name, _ = get_fn_ext(filepath=file_path) + if self.verbose: print(f"Interpolating data in video {video_name} ({file_cnt+1}/{len(self.file_paths)}) ...") + if len(df.columns) != len(self.bp_col_names): + raise DataHeaderError(msg=f"The SimBA project suggest the data should have {len(self.bp_col_names)} columns, but the input data has {len(df.columns)} columns", source=self.__class__.__name__) + df.columns = self.bp_headers + df[df < 0] = 0 + for animal_name, animal_body_parts in self.settings.items(): + for bp, interpolation_setting in animal_body_parts.items(): + check_that_column_exist(df=df, column_name=[f"{bp}_x", f"{bp}_y"], file_name=file_path) + df[[f"{bp}_x", f"{bp}_y"]] = df[[f"{bp}_x", f"{bp}_y"]].astype(int) + idx = df.loc[(df[f"{bp}_x"] <= 0.0) & (df[f"{bp}_y"] <= 0.0)].index.tolist() + if self.verbose: print(f"Interpolating {len(idx)} {bp} body-parts in video {video_name}...") + df.loc[idx, [f"{bp}_x", f"{bp}_y"]] = np.nan + df[[f"{bp}_x", f"{bp}_y"]] = (df[[f"{bp}_x", f"{bp}_y"]].interpolate(method=interpolation_setting, axis=0).ffill().bfill().astype(int)) + df[[f"{bp}_x", f"{bp}_y"]][df[[f"{bp}_x", f"{bp}_y"]] < 0] = 0 + if self.multi_index_data: + df = self.__insert_multi_index(df=df) + if not self.overwrite: + copy_files_to_directory(file_paths=[file_path], dir=self.cpy_dir, verbose=False) + write_df(df=df, file_type=self.file_type, save_path=file_path, multi_idx_header=self.multi_index_data) + video_timer.stop_timer() + print(f'Video {video_name} complete ({file_cnt+1}/{len(self.file_paths)}). Elapsed time {video_timer.elapsed_time_str}s') + self.timer.stop_timer() + if self.overwrite: + msg = f"Advanced interpolation complete. Data saved in {self.input_dir}." + else: + msg = f"Advanced interpolation complete. Data saved in {self.input_dir}. Original data saved in {self.cpy_dir}." + stdout_success(msg=msg, elapsed_time=self.timer.elapsed_time_str, source=self.__class__.__name__) + + +# +# +# INTERPOLATION_SETTINGS = {'Simon': {'Ear_left_1': 'linear', +# 'Ear_right_1': 'linear', +# 'Nose_1': 'quadratic', +# 'Lat_left_1': 'quadratic', +# 'Lat_right_1': 'quadratic', +# 'Center_1': 'nearest', +# 'Tail_base_1': 'nearest'}, +# 'JJ': {'Ear_left_2': 'nearest', +# 'Ear_right_2': 'nearest', +# 'Nose_2': 'quadratic', +# 'Lat_left_2': 'quadratic', +# 'Lat_right_2': 'quadratic', +# 'Center_2': 'linear', +# 'Tail_base_2': 'linear'}} +# +# +# INTERPOLATION_SETTINGS = {'Animal_1': 'linear', 'Animal_2': 'quadratic'} +# +# advanced_interpolator = AdvancedInterpolator(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', +# data_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/new_data', +# settings=INTERPOLATION_SETTINGS, type='animal', multi_index_data=True, overwrite=False) +# +# advanced_interpolator.run() + +# for animal, animal_data in settings.items(): +# check_instance(source=self.__class__.__name__, instance=animal_data, accepted_types=(dict,)) +# if type == BODY_PART_TYPE: +# for bp_name, bp_data in animal_data.items(): +# check_if_keys_exist_in_dict(data=bp_data, key=['method', 'time_window']) +# check_str(name='method', value=bp_data['method'], options=[GAUSSIAN, SAVITZKY_GOLAY]) +# check_int(name='time_window', value=bp_data['time_window'], min_value=1) +# else: +# check_if_keys_exist_in_dict(data=animal_data, key=['method', 'time_window']) +# check_str(name='method', value=animal_data['method'], options=[GAUSSIAN, SAVITZKY_GOLAY]) +# check_int(name='time_window', value=animal_data['time_window'], min_value=1) \ No newline at end of file diff --git a/simba/sandbox/advanced_smoothing.py b/simba/sandbox/advanced_smoothing.py new file mode 100644 index 000000000..5532fdebf --- /dev/null +++ b/simba/sandbox/advanced_smoothing.py @@ -0,0 +1,180 @@ +__author__ = "Simon Nilsson" + +import os +from copy import deepcopy +from typing import Any, Dict, Optional, Union +import pandas as pd + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + +from simba.mixins.config_reader import ConfigReader +from simba.utils.checks import (check_valid_boolean, check_instance, check_str, check_int, check_that_column_exist, check_file_exist_and_readable) +from simba.utils.enums import Methods, TagNames +from simba.utils.errors import DataHeaderError, NoFilesFoundError, InvalidInputError +from simba.utils.printing import SimbaTimer, log_event, stdout_success +from simba.utils.read_write import (find_files_of_filetypes_in_directory, + find_video_of_file, get_fn_ext, + get_video_meta_data, read_df, write_df, copy_files_to_directory) +from simba.utils.data import savgol_smoother, df_smoother + +BODY_PART_TYPE = 'body-part' +ANIMAL_TYPE = 'animal' +GAUSSIAN = 'gaussian' +SAVITZKY_GOLAY = 'savitzky_golay' +TIME_WINDOW = 'time_window' + + +class AdvancedSmoother(ConfigReader): + """ + Smoothing method that allows different smoothing parameters for different animals or body-parts. + For example, smooth some body-parts of animals using Savitzky-Golay smoothing, and other body-parts of animals using Gaussian smoothing. + + :parameter str data_dir: path to pose-estimation data in CSV or parquet format + :parameter str config_path: path to SimBA project config file in Configparser format. + :parameter Literal type: Level of smoothing: animal or body-part. + :parameter Dict settings: Smoothing rules for each animal or each animal body-part. + :parameter bool initial_import_multi_index: If True, the incoming data is multi-index columns dataframes. Use of input data is the ``project_folder/csv/input_csv`` directory. Default: False. + :parameter bool overwrite: If True, overwrites the input data. If False, then saves a copy input data in datetime-stamped sub-directory. + + :examples: + + >>> smoother = AdvancedSmoother(data_dir='/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/csv/input_csv', + >>> config_path='/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', + >>> type='animal', + >>> settings={'Simon': {'method': 'Savitzky Golay', 'time_window': 200}, + >>> 'JJ': {'method': 'Savitzky Golay', 'time_window': 200}}, + >>> initial_import_multi_index=True, + >>> overwrite=False) + >>> smoother.run() + """ + + def __init__(self, + data_path: Union[str, os.PathLike], + config_path: Union[str, os.PathLike], + settings: Dict[str, Any], + type: Optional[Literal["animal", "body-part"]] = 'body-part', + verbose: Optional[bool] = True, + multi_index_data: Optional[bool] = False, + overwrite: Optional[bool] = True): + + ConfigReader.__init__(self, config_path=config_path, read_video_info=False) + log_event(logger_name=str(self.__class__.__name__), log_type=TagNames.CLASS_INIT.value, msg=f"data_dir: {data_path}, type: {type}, settings: {settings}, initial_import_multi_index: {multi_index_data}, overwrite: {overwrite}",) + ConfigReader.__init__(self, config_path=config_path, read_video_info=False) + check_str(name=f'{self.__class__.__name__} type', value=type, options=[ANIMAL_TYPE, BODY_PART_TYPE], raise_error=True) + if os.path.isfile(data_path): + check_file_exist_and_readable(file_path=data_path) + self.file_paths = [data_path] + self.input_dir = os.path.dirname(data_path) + self.cpy_dir = os.path.join(os.path.dirname(data_path), f"Pre_Advanced_Interpolation_{self.datetime}") + elif os.path.isdir(data_path): + self.file_paths = find_files_of_filetypes_in_directory(directory=data_path, extensions=[f".{self.file_type}"], raise_warning=False,raise_error=True) + self.cpy_dir = os.path.join(data_path, f"Pre_Advanced_Interpolation_{self.datetime}") + self.input_dir = data_path + else: + raise InvalidInputError(msg=f'{data_path} is not a valid file path or file directory', source=self.__class__.__name__) + check_valid_boolean(value=[multi_index_data, overwrite], source=self.__class__.__name__, raise_error=True) + check_instance(source=self.__class__.__name__, instance=settings, accepted_types=(dict,)) + for animal, animal_data in settings.items(): + if type == BODY_PART_TYPE: + check_instance(source=self.__class__.__name__, instance=animal_data, accepted_types=(dict,)) + for bp_name, bp_data in animal_data.items(): + check_str(name='body_part', value=bp_name, options=self.project_bps) + check_str(name='method', value=bp_data['method'], options=[SAVITZKY_GOLAY, GAUSSIAN]) + check_int(name='method', value=bp_data[TIME_WINDOW], min_value=1) + else: + check_str(name='method', value=animal_data['method'], options=[SAVITZKY_GOLAY, GAUSSIAN]) + check_int(name='method', value=animal_data[TIME_WINDOW], min_value=1) + self.settings, self.type, self.multi_index_data, self.verbose = settings, type, multi_index_data, verbose + if type == ANIMAL_TYPE: + self.__transpose_settings() + + self.overwrite = overwrite + if not overwrite and not os.path.isdir(self.cpy_dir): os.makedirs(self.cpy_dir) + + def __transpose_settings(self): + """Helper to transpose settings dict if interpolating per animal, so the same method can be used for both animal and body-part interpolation""" + transposed_settings = {} + for animal_name, body_part_data in self.animal_bp_dict.items(): + transposed_settings[animal_name] = {} + for animal_body_part in body_part_data["X_bps"]: + transposed_settings[animal_name][animal_body_part[:-2]] = self.settings[animal_name] + self.settings = transposed_settings + + def __insert_multi_index(self, df: pd.DataFrame) -> pd.DataFrame: + multi_idx_header = [] + for i in range(len(df.columns)): + multi_idx_header.append(("IMPORTED_POSE", "IMPORTED_POSE", list(df.columns)[i])) + df.columns = pd.MultiIndex.from_tuples(multi_idx_header) + return df + + def run(self): + for file_cnt, file_path in enumerate(self.file_paths): + video_timer = SimbaTimer(start=True) + _, video_name, _ = get_fn_ext(filepath=file_path) + df = read_df(file_path=file_path, file_type=self.file_type, check_multiindex=self.multi_index_data).fillna(0).reset_index(drop=True) + if self.verbose: print(f"Smoothing data in video {video_name} ({file_cnt+1}/{len(self.file_paths)})...") + if len(df.columns) != len(self.bp_col_names): + raise DataHeaderError(msg=f"The SimBA project suggest the data should have {len(self.bp_col_names)} columns, but the input data has {len(df.columns)} columns", source=self.__class__.__name__) + df.columns = self.bp_headers + df[df < 0] = 0 + video_path = find_video_of_file(video_dir=self.video_dir, filename=video_name, warning=False, raise_error=False) + if video_path is None: + try: + video_meta_data = {} + self.video_info_df = self.read_video_info_csv(file_path=self.video_info_path) + _, _, fps = self.read_video_info(video_name=video_name) + video_meta_data["fps"] = fps + except: + raise NoFilesFoundError(msg=f"No video for file {video_name} found in SimBA project. Import the video before doing smoothing. To perform smoothing, SimBA needs the video fps from the video itself OR the logs/video_info.csv file in order to read the video FPS.", source=self.__class__.__name__) + else: + video_meta_data = get_video_meta_data(video_path=video_path) + out_df = deepcopy(df) + for animal_name, animal_body_parts in self.settings.items(): + for bp, smoothing_setting in animal_body_parts.items(): + if self.verbose: print(f"Smoothing body-part {bp} in video {video_name} using method {smoothing_setting['method']} (time window: {smoothing_setting[TIME_WINDOW]}ms)...") + check_that_column_exist(df=df, column_name=[f"{bp}_x", f"{bp}_y"], file_name=file_path) + bp_df = df[[f"{bp}_x", f"{bp}_y"]] + if smoothing_setting['method'] == SAVITZKY_GOLAY: + bp_df = savgol_smoother(data=bp_df, fps=video_meta_data['fps'], time_window=smoothing_setting[TIME_WINDOW], source=f'{file_path} {bp}') + else: + bp_df = df_smoother(data=bp_df, fps=video_meta_data['fps'], time_window=smoothing_setting[TIME_WINDOW], source=f'{file_path} {bp}') + out_df[[f"{bp}_x", f"{bp}_y"]] = bp_df + if self.multi_index_data: + out_df = self.__insert_multi_index(df=out_df) + if not self.overwrite: + copy_files_to_directory(file_paths=[file_path], dir=self.cpy_dir, verbose=False) + write_df(df=out_df, file_type=self.file_type, save_path=file_path, multi_idx_header=self.multi_index_data) + video_timer.stop_timer() + print(f'Smoothing video {video_name} complete ({file_cnt+1}/{len(self.file_paths)}). Elapsed time {video_timer.elapsed_time_str}s') + self.timer.stop_timer() + if self.overwrite: + msg = f"Advanced smoothing complete. Data saved in {self.input_dir}." + else: + msg = f"Advanced smoothing complete. Data saved in {self.input_dir}. Original data saved in {self.cpy_dir}." + stdout_success(msg=msg, elapsed_time=self.timer.elapsed_time_str, source=self.__class__.__name__) + +SMOOTHING_SETTINGS = {'Simon': {'Ear_left_1': {'method': 'savitzky_golay', 'time_window': 3500}, + 'Ear_right_1': {'method': 'gaussian', 'time_window': 500}, + 'Nose_1': {'method': 'savitzky_golay', 'time_window': 2000}, + 'Lat_left_1': {'method': 'savitzky_golay', 'time_window': 2000}, + 'Lat_right_1': {'method': 'gaussian', 'time_window': 2000}, + 'Center_1': {'method': 'savitzky_golay', 'time_window': 2000}, + 'Tail_base_1': {'method': 'gaussian', 'time_window': 500}}, + 'JJ': {'Ear_left_2': {'method': 'savitzky_golay', 'time_window': 2000}, + 'Ear_right_2': {'method': 'savitzky_golay', 'time_window': 500}, + 'Nose_2': {'method': 'gaussian', 'time_window': 3500}, + 'Lat_left_2': {'method': 'savitzky_golay', 'time_window': 500}, + 'Lat_right_2': {'method': 'gaussian', 'time_window': 3500}, + 'Center_2': {'method': 'gaussian', 'time_window': 2000}, + 'Tail_base_2': {'method': 'savitzky_golay', 'time_window': 3500}}} + + +#SMOOTHING_SETTINGS = {'Animal_1': {'method': 'savitzky_golay', 'time_window': 3500}, 'Animal_2': {'method': 'savitzky_golay', 'time_window': 3500}} +advanced_smoother = AdvancedSmoother(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', + data_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/new_data', + settings=SMOOTHING_SETTINGS, type='body-part', multi_index_data=True, overwrite=False) + +advanced_smoother.run() diff --git a/simba/sandbox/angle_3pt.py b/simba/sandbox/angle_3pt.py new file mode 100644 index 000000000..a00d4ee6e --- /dev/null +++ b/simba/sandbox/angle_3pt.py @@ -0,0 +1,64 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import math + +import numpy as np +from numba import cuda + +from simba.utils.read_write import read_df + +THREADS_PER_BLOCK = 256 +@cuda.jit +def _get_3pt_angle_kernel(x_dev, y_dev, z_dev, results): + i = cuda.grid(1) + + if i >= x_dev.shape[0]: + return + if i < x_dev.shape[0]: + x_x, x_y = x_dev[i][0], x_dev[i][1] + y_x, y_y = y_dev[i][0], y_dev[i][1] + z_x, z_y = z_dev[i][0], z_dev[i][1] + D = math.degrees(math.atan2(z_y - y_y, z_x - y_x) - math.atan2(x_y - y_y, x_x - y_x)) + if D < 0: + D += 360 + results[i] = D + +def get_3pt_angle(x: np.ndarray, y: np.ndarray, z: np.ndarray) -> np.ndarray: + """ + Computes the angle formed by three points in 2D space for each corresponding row in the input arrays using + GPU. The points x, y, and z represent the coordinates of three points in space, and the angle is calculated + at point `y` between the line segments `xy` and `yz`. + + .. image:: _static/img/get_3pt_angle_cuda.png + :width: 500 + :align: center + + :param x: A numpy array of shape (n, 2) representing the first point (e.g., nose) coordinates. + :param y: A numpy array of shape (n, 2) representing the second point (e.g., center) coordinates, where the angle is computed. + :param z: A numpy array of shape (n, 2) representing the second point (e.g., center) coordinates, where the angle is computed. + :return: A numpy array of shape (n, 1) containing the calculated angles (in degrees) for each row. + + :example: + >>> video_path = r"/mnt/c/troubleshooting/mitra/project_folder/videos/501_MA142_Gi_CNO_0514.mp4" + >>> data_path = r"/mnt/c/troubleshooting/mitra/project_folder/csv/outlier_corrected_movement_location/501_MA142_Gi_CNO_0514 - test.csv" + >>> df = read_df(file_path=data_path, file_type='csv') + >>> y = df[['Center_x', 'Center_y']].values + >>> x = df[['Nose_x', 'Nose_y']].values + >>> z = df[['Tail_base_x', 'Tail_base_y']].values + >>> angle_x = get_3pt_angle(x=x, y=y, z=z) + """ + + + x = np.ascontiguousarray(x).astype(np.float32) + y = np.ascontiguousarray(y).astype(np.float32) + n, m = x.shape + x_dev = cuda.to_device(x) + y_dev = cuda.to_device(y) + z_dev = cuda.to_device(z) + results = cuda.device_array((n, m), dtype=np.int32) + bpg = (n + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _get_3pt_angle_kernel[bpg, THREADS_PER_BLOCK](x_dev, y_dev, z_dev, results) + results = results.copy_to_host() + cuda.current_context().memory_manager.deallocations.clear() + return results diff --git a/simba/sandbox/annotations_mitra.py b/simba/sandbox/annotations_mitra.py new file mode 100644 index 000000000..e261c3217 --- /dev/null +++ b/simba/sandbox/annotations_mitra.py @@ -0,0 +1,40 @@ +import pandas as pd +import glob +import os +from simba.utils.read_write import get_fn_ext +import shutil + +# ANNOTATED_VIDEOS_PATH = '/Users/simon/Desktop/envs/simba/troubleshooting/mitra/annotated_videos.csv' +# DATA_PATH = '/Users/simon/Desktop/envs/simba/troubleshooting/mitra/project_folder/csv/input_csv/originals' +# OUT_DIR = '/Users/simon/Desktop/envs/simba/troubleshooting/mitra/project_folder/csv/input_csv/' +# NOT_ANNOT_DIR = '/Users/simon/Desktop/envs/simba/troubleshooting/mitra/project_folder/csv/input_csv/not_annotated' +# +# +# file_paths = glob.glob(DATA_PATH + '/*.csv') +# annotated_lst = list(pd.read_csv(ANNOTATED_VIDEOS_PATH)['VIDE_NAME']) +# +# for file_path in file_paths: +# dir, file_name, ext = get_fn_ext(filepath=file_path) +# file_name = file_name.split('.', 1)[0] +# if file_name in annotated_lst: +# save_path = os.path.join(OUT_DIR, file_name + '.csv') +# else: +# save_path = os.path.join(NOT_ANNOT_DIR, file_name + '.csv') +# shutil.copy(src=file_path, dst=save_path) + +annot_df = pd.read_csv('/Users/simon/Desktop/envs/simba/troubleshooting/mitra/annotations.csv') +annot_df[['START','STOP']] = annot_df['START-STOP'].str.split('-',expand=True)[[0, 1]] +file_paths = glob.glob('/Users/simon/Desktop/envs/simba/troubleshooting/mitra/project_folder/csv/features_extracted' + '/*.csv') +for file_path in file_paths: + dir, file_name, ext = get_fn_ext(filepath=file_path) + video_annot = annot_df[annot_df['VIDEO'] == file_name] + if len(video_annot) > 0: + data_df = pd.read_csv(file_path) + for clf in annot_df['BEHAVIOR'].unique(): + data_df[clf] = 0 + video_clf_annot = video_annot[video_annot['BEHAVIOR'] == clf] + + annotations_idx = list(video_clf_annot.apply(lambda x: list(range(int(x["START"]), int(x["STOP"]) + 1)),1)) + print(annotations_idx) + + diff --git a/simba/sandbox/average_frm_popup.py b/simba/sandbox/average_frm_popup.py new file mode 100644 index 000000000..e00288833 --- /dev/null +++ b/simba/sandbox/average_frm_popup.py @@ -0,0 +1,180 @@ +import os +from typing import Union, Optional +import subprocess +from simba.utils.read_write import get_fn_ext, get_video_meta_data + +import threading +import functools +import glob +import multiprocessing +import os +import platform +import shutil +import subprocess +import time +from copy import deepcopy +from datetime import datetime +from tkinter import * +from typing import Any, Dict, List, Optional, Tuple, Union + +import cv2 +import numpy as np +from PIL import Image, ImageTk +from shapely.geometry import Polygon + +try: + from typing import Literal +except: + from typing_extensions import Literal + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.image_mixin import ImageMixin +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, check_float, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_instance, check_int, + check_nvidea_gpu_available, check_str, + check_that_hhmmss_start_is_before_end, + check_valid_lst, check_valid_tuple) +from simba.utils.data import find_frame_numbers_from_time_stamp +from simba.utils.enums import OS, ConfigKey, Formats, Options, Paths +from simba.utils.errors import (CountError, DirectoryExistError, + FFMPEGCodecGPUError, FFMPEGNotFoundError, + FileExistError, FrameRangeError, + InvalidFileTypeError, InvalidInputError, + InvalidVideoFileError, NoDataError, + NoFilesFoundError, NotDirectoryError) +from simba.utils.lookups import (get_ffmpeg_crossfade_methods, get_fonts, + percent_to_crf_lookup, percent_to_qv_lk) +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, find_core_cnt, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + read_config_entry, read_config_file, read_frm_of_video, timestamp_to_seconds) +from simba.utils.warnings import (FileExistWarning, InValidUserInputWarning, + SameInputAndOutputWarning, FrameRangeWarning, ResolutionWarning) +from simba.video_processors.extract_frames import video_to_frames +from simba.video_processors.roi_selector import ROISelector +from simba.video_processors.roi_selector_circle import ROISelectorCircle +from simba.video_processors.roi_selector_polygon import ROISelectorPolygon + +from tkinter import * +from typing import Optional, Union + +import numpy as np +from PIL import Image, ImageTk + +import simba +from simba.labelling.extract_labelled_frames import AnnotationFrameExtractor +from simba.mixins.config_reader import ConfigReader +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.plotting.frame_mergerer_ffmpeg import FrameMergererFFmpeg +from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, + CreateToolTip, DropDownMenu, Entry_Box, + FileSelect, FolderSelect) +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_int, check_nvidea_gpu_available, + check_str, + check_that_hhmmss_start_is_before_end) +from simba.utils.data import convert_roi_definitions +from simba.utils.enums import Dtypes, Formats, Keys, Links, Options, Paths +from simba.utils.errors import (CountError, DuplicationError, FrameRangeError, + InvalidInputError, MixedMosaicError, + NoChoosenClassifierError, NoFilesFoundError, + NotDirectoryError, ResolutionError) +from simba.utils.lookups import get_color_dict, get_fonts +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + seconds_to_timestamp, str_2_bool) +from simba.video_processors.brightness_contrast_ui import \ + brightness_contrast_ui +from simba.video_processors.clahe_ui import interactive_clahe_ui +from simba.video_processors.extract_seqframes import extract_seq_frames +from simba.video_processors.multi_cropper import MultiCropper +from simba.video_processors.px_to_mm import get_coordinates_nilsson +from simba.video_processors.video_processing import ( + VideoRotator, batch_convert_video_format, batch_create_frames, + batch_video_to_greyscale, change_fps_of_multiple_videos, change_img_format, + change_single_video_fps, clahe_enhance_video, clip_video_in_range, + clip_videos_by_frame_ids, convert_to_avi, convert_to_bmp, convert_to_jpeg, + convert_to_mov, convert_to_mp4, convert_to_png, convert_to_tiff, + convert_to_webm, convert_to_webp, crossfade_two_videos, + convert_video_powerpoint_compatible_format, copy_img_folder, + crop_multiple_videos, crop_multiple_videos_circles, + crop_multiple_videos_polygons, crop_single_video, crop_single_video_circle, + crop_single_video_polygon, downsample_video, extract_frame_range, + extract_frames_single_video, frames_to_movie, gif_creator, + multi_split_video, remove_beginning_of_video, resize_videos_by_height, + resize_videos_by_width, roi_blurbox, superimpose_elapsed_time, + superimpose_frame_count, superimpose_freetext, superimpose_overlay_video, + superimpose_video_names, superimpose_video_progressbar, + video_bg_subtraction_mp, video_bg_subtraction, + video_to_greyscale, watermark_video, rotate_video, flip_videos, create_average_frm) +# +class CrossfadeVideosPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="CROSS-FADE VIDEOS") + crossfade_methods = get_ffmpeg_crossfade_methods() + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="NUMBER OF VIDEOS TO JOIN", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.video_path_1 = FileSelect(settings_frm, f"VIDEO PATH 1:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + self.video_path_2 = FileSelect(settings_frm, f"VIDEO PATH 2:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + self.quality_dropdown = DropDownMenu(settings_frm, "OUTPUT VIDEO QUALITY:", list(range(10, 110, 10)), labelwidth=25) + self.out_format_dropdown = DropDownMenu(settings_frm, "OUTPUT VIDEO FORMAT:", Options.ALL_VIDEO_FORMAT_OPTIONS.value, labelwidth=25) + self.fade_method_dropdown = DropDownMenu(settings_frm, "CROSS-FADE METHOD:", crossfade_methods, labelwidth=25) + self.duration_dropdown = DropDownMenu(settings_frm, "CROSS-FADE DURATION:", list(range(2, 22, 2)), labelwidth=25) + self.offset_dropdown = DropDownMenu(settings_frm, "CROSS-FADE OFFSET:", list(range(1, 1001, 1)), labelwidth=25) + + self.quality_dropdown.setChoices(60) + self.out_format_dropdown.setChoices('.mp4') + self.fade_method_dropdown.setChoices('fade') + self.duration_dropdown.setChoices(6) + self.offset_dropdown.setChoices(10) + settings_frm.grid(row=0, column=0, sticky=NW) + self.video_path_1.grid(row=0, column=0, sticky=NW) + self.video_path_2.grid(row=1, column=0, sticky=NW) + self.quality_dropdown.grid(row=2, column=0, sticky=NW) + self.out_format_dropdown.grid(row=3, column=0, sticky=NW) + self.fade_method_dropdown.grid(row=4, column=0, sticky=NW) + self.duration_dropdown.grid(row=5, column=0, sticky=NW) + self.offset_dropdown.grid(row=6, column=0, sticky=NW) + self.create_run_frm(run_function=self.run) + self.main_frm.mainloop() + + def run(self): + video_1_path, video_2_path = self.video_path_1.file_path, self.video_path_2.file_path + quality = int(self.quality_dropdown.getChoices()) + format = self.out_format_dropdown.getChoices()[1:] + fade_method = self.fade_method_dropdown.getChoices() + offset = int(self.offset_dropdown.getChoices()) + duration = int(self.duration_dropdown.getChoices()) + for video_path in [video_1_path, video_2_path]: + video_meta_data = get_video_meta_data(video_path=video_path) + if video_meta_data['video_length_s'] < duration: + raise FrameRangeError(msg=f'Video {video_meta_data["video_name"]} is shorter {video_meta_data["video_length_s"]} than the crossfade duration {duration}.', source=self.__class__.__name__) + if video_meta_data['video_length_s'] < offset: + raise FrameRangeError(msg=f'Video {video_meta_data["video_name"]} is shorter {video_meta_data["video_length_s"]} than the crossfade offset {offset}.',source=self.__class__.__name__) + threading.Thread(crossfade_two_videos(video_path_1=video_1_path, + video_path_2=video_2_path, + crossfade_duration=duration, + crossfade_method=fade_method, + crossfade_offset=offset, + out_format=format, + quality=quality)).start() + + +#CrossfadeVideosPopUp() + + + + +#crossfade_multiple_videos \ No newline at end of file diff --git a/simba/sandbox/bar_chart.py b/simba/sandbox/bar_chart.py new file mode 100644 index 000000000..c24957512 --- /dev/null +++ b/simba/sandbox/bar_chart.py @@ -0,0 +1,53 @@ +import pandas as pd +from typing import Union, List, Optional, Tuple +import os +import seaborn as sns +from simba.utils.checks import check_instance, check_str, check_valid_lst, check_if_dir_exists +from simba.utils.enums import Formats +import numpy as np +import matplotlib.pyplot as plt + + +def plot_bar_chart(df: pd.DataFrame, + x: str, + y: str, + error: Optional[str] = None, + x_label: Optional[str] = None, + y_label: Optional[str] = None, + title: Optional[str] = None, + fig_size: Optional[Tuple[int, int]] = (10, 8), + palette: Optional[str] = 'magma', + save_path: Optional[Union[str, os.PathLike]] = None): + + check_instance(source=f"{plot_bar_chart.__name__} df", instance=df, accepted_types=(pd.DataFrame)) + check_str(name=f"{plot_bar_chart.__name__} x", value=x, options=tuple(df.columns)) + check_str(name=f"{plot_bar_chart.__name__} y", value=y, options=tuple(df.columns)) + check_valid_lst(data=list(df[y]), source=f"{plot_bar_chart.__name__} y", valid_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_lst(data=list(df[x]), source=f"{plot_bar_chart.__name__} x", valid_dtypes=Formats.NUMERIC_DTYPES.value) + fig, ax = plt.subplots(figsize=fig_size) + sns.barplot(x=x, y=y, data=df, palette=palette, ax=ax) + if error is not None: + check_str(name=f"{plot_bar_chart.__name__} error", value=error, options=tuple(df.columns)) + check_valid_lst(data=list(df[error]), source=f"{plot_bar_chart.__name__} error",valid_dtypes=Formats.NUMERIC_DTYPES.value) + for i, (value, error) in enumerate(zip(df['FEATURE_IMPORTANCE_MEAN'], df['FEATURE_IMPORTANCE_STDEV'])): + plt.errorbar(i, value, yerr=(0, error), fmt='o', color='grey', capsize=1) + + if x_label is not None: + check_str(name=f"{plot_bar_chart.__name__} x_label", value=x_label) + plt.xlabel(x_label) + if y_label is not None: + check_str(name=f"{plot_bar_chart.__name__} y_label", value=y_label) + plt.ylabel(y_label) + if title is not None: + check_str(name=f"{plot_bar_chart.__name__} title", value=title) + plt.title(title, ha="center", fontsize=15) + if save_path is not None: + check_str(name=f"{plot_bar_chart.__name__} save_path", value=save_path) + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + fig.savefig(save_path, dpi=600, bbox_inches='tight') + else: + return fig + + + + diff --git a/simba/sandbox/batch_video_to_greyscale.py b/simba/sandbox/batch_video_to_greyscale.py new file mode 100644 index 000000000..f030bada4 --- /dev/null +++ b/simba/sandbox/batch_video_to_greyscale.py @@ -0,0 +1,44 @@ +import os +from typing import Union, Optional +import subprocess + +from simba.utils.checks import check_ffmpeg_available, check_nvidea_gpu_available, check_if_dir_exists +from simba.utils.errors import FFMPEGCodecGPUError +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import find_all_videos_in_directory, get_fn_ext + +def batch_video_to_greyscale(directory: Union[str, os.PathLike], gpu: Optional[bool] = False) -> None: + """ + Convert a directory of video file to greyscale mp4 format. The results are stored in the same directory as the + input files with the ``_grayscale.mp4`` suffix. + + :parameter Union[str, os.PathLike] directory: Path to directory holding video files in color. + :parameter Optional[bool] gpu: If True, use NVIDEA GPU codecs. Default False. + :raise FFMPEGCodecGPUError: If no GPU is found and ``gpu == True``. + + :example: + >>> _ = batch_video_to_greyscale(directory='/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/videos/test_2') + """ + check_ffmpeg_available(raise_error=True) + if gpu and not check_nvidea_gpu_available(): + raise FFMPEGCodecGPUError(msg="No GPU found (as evaluated by nvidea-smi returning None)", source=batch_video_to_greyscale.__name__) + timer = SimbaTimer(start=True) + check_if_dir_exists(in_dir=directory, source=batch_video_to_greyscale.__name__) + video_paths = find_all_videos_in_directory(directory=directory, as_dict=True, raise_error=True) + for file_cnt, (file_name, file_path) in enumerate(video_paths.items()): + video_timer = SimbaTimer(start=True) + in_dir, _, _ = get_fn_ext(filepath=file_path) + save_name = os.path.join(in_dir, f"{file_name}_grayscale.mp4") + if gpu: + command = f'ffmpeg -hwaccel auto -c:v h264_cuvid -i "{file_path}" -vf "hwupload_cuda,hwdownload,format=nv12,format=gray" -c:v h264_nvenc -c:a copy "{save_name}" -y' + else: + command = f'ffmpeg -i "{file_path}" -vf format=gray -c:v libx264 "{save_name}" -hide_banner -loglevel error -y' + print(f"Converting {file_name} to greyscale (Video {file_cnt+1}/{len(list(video_paths.keys()))})... ") + subprocess.call(command, shell=True, stdout=subprocess.PIPE) + video_timer.stop_timer() + print(f'Video {save_name} complete, (elapsed time: {video_timer.elapsed_time_str}s)') + timer.stop_timer() + stdout_success(msg=f"{len(list(video_paths.keys()))} video(s) converted to gresyscale! Saved in {directory} with '_greyscale' suffix", elapsed_time=timer.elapsed_time_str, source=batch_video_to_greyscale.__name__,) + + +#_ = batch_video_to_greyscale(directory='/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/videos/test_2') \ No newline at end of file diff --git a/simba/sandbox/berger_parker.py b/simba/sandbox/berger_parker.py new file mode 100644 index 000000000..b37ebdcec --- /dev/null +++ b/simba/sandbox/berger_parker.py @@ -0,0 +1,38 @@ +import time + +import numpy as np +from numba import jit, typed, types +from simba.utils.checks import check_valid_array +from simba.utils.data import get_mode + +def berger_parker(x: np.ndarray) -> float: + """ + Berger-Parker index for the given one-dimensional array. + The Berger-Parker index is a measure of category dominance, calculated as the ratio of + the frequency of the most abundant category to the total number of observations + + :example: + x = np.random.randint(0, 25, (100,)).astype(np.float32) + z = berger_parker(x=x) + """ + check_valid_array(source=f'{berger_parker.__name__} x', accepted_ndims=(1,), data=x, accepted_dtypes=(np.float32, np.float64, np.int32, np.int64, np.int8)) + return get_mode(x=x) / x.shape[0] + + + + + + + + +x = np.random.randint(0, 25, (100,)).astype(np.float32) +start = time.time() +p = berger_parker(x=x) +print(time.time() - start) + +# start = time.time() +# u = mode_2(x=x) +# print(time.time() - start) + + + diff --git a/simba/sandbox/bg_remover_cuda.py b/simba/sandbox/bg_remover_cuda.py new file mode 100644 index 000000000..f794435a7 --- /dev/null +++ b/simba/sandbox/bg_remover_cuda.py @@ -0,0 +1,157 @@ +import os +import time + +import numpy as np +import math +from typing import Union, Optional, Tuple +from numba import cuda +import cv2 +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import get_video_meta_data, read_img_batch_from_video_gpu, get_fn_ext +from simba.video_processors.video_processing import create_average_frm, video_bg_subtraction, video_bg_subtraction_mp +from simba.utils.checks import is_video_color, check_if_valid_rgb_tuple, check_if_valid_img, check_int +from simba.utils.enums import Formats +from simba.data_processors.cuda.utils import _cuda_luminance_pixel_to_grey + + +@cuda.jit() +def _bg_subtraction_cuda_kernel(imgs, avg_img, results, is_clr, fg_clr, threshold): + x, y, n = cuda.grid(3) + if n < 0 or n > (imgs.shape[0] -1): + return + if y < 0 or y > (imgs.shape[1] -1): + return + if x < 0 or x > (imgs.shape[2] -1): + return + if is_clr[0] == 1: + r1, g1, b1 = imgs[n][y][x][0],imgs[n][y][x][1], imgs[n][y][x][2] + r2, g2, b2 = avg_img[y][x][0], avg_img[y][x][1], avg_img[y][x][2] + r_diff, g_diff, b_diff = abs(r1-r2), abs(g1-g2), abs(b1-b2) + grey_diff = _cuda_luminance_pixel_to_grey(r_diff, g_diff, b_diff) + if grey_diff > threshold[0]: + if fg_clr[0] != -1: + r_out, g_out, b_out = fg_clr[0], fg_clr[1], fg_clr[2] + else: + r_out, g_out, b_out = r1, g1, b1 + else: + r_out, g_out, b_out = results[n][y][x][0], results[n][y][x][1], results[n][y][x][2] + results[n][y][x][0], results[n][y][x][1], results[n][y][x][2] = r_out, g_out, b_out + + else: + val_1, val_2 = imgs[n][y][x][0], avg_img[y][x][0] + grey_diff = abs(val_1-val_2) + if grey_diff > threshold[0]: + if fg_clr[0] != -1: + val_out = val_1 + else: + val_out = 255 + else: + val_out = 0 + results[n][y][x] = val_out + + + + + + + +def bg_subtraction_cuda(video_path: Union[str, os.PathLike], + avg_frm: np.ndarray, + save_path: Optional[Union[str, os.PathLike]] = None, + bg_clr: Optional[Tuple[int, int, int]] = (0, 0, 0), + fg_clr: Optional[Tuple[int, int, int]] = None, + batch_size: Optional[int] = 500, + threshold: Optional[int] = 50): + """ + Remove background from videos using GPU acceleration. + + .. note:: + To create an `avg_frm`, use :func:`simba.video_processors.video_processing.create_average_frm`, :func:`simba.data_processors.cuda.image.create_average_frm_cupy`, or :func:`~simba.data_processors.cuda.image.create_average_frm_cuda` + + .. seealso:: + For CPU-based alternative, see :func:`simba.video_processors.video_processing.video_bg_subtraction`. Needs work, multi-core is faster. + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/bg_subtraction_cuda.csv + :widths: 10, 45, 45 + :align: center + :class: simba-table + :header-rows: 1 + + :param Union[str, os.PathLike] video_path: The path to the video to remove the background from. + :param np.ndarray avg_frm: Average frame of the video. Can be created with e.g., :func:`simba.video_processors.video_processing.create_average_frm`. + :param Optional[Union[str, os.PathLike]] save_path: Optional location to store the background removed video. If None, then saved in the same directory as the input video with the `_bg_removed` suffix. + :param Optional[Tuple[int, int, int]] bg_clr: Tuple representing the background color of the video. + :param Optional[Tuple[int, int, int]] fg_clr: Tuple representing the foreground color of the video (e.g., the animal). If None, then the original pixel colors will be used. Default: 50. + :param Optional[int] batch_size: Number of frames to process concurrently. Use higher values of RAM memory allows. Default: 500. + :param Optional[int] threshold: Value between 0-255 representing the difference threshold between the average frame subtracted from each frame. Higher values and more pixels will be considered background. Default: 100. + + :example: + >>> video_path = "/mnt/c/troubleshooting/mitra/project_folder/videos/clipped/592_MA147_Gq_CNO_0515.mp4" + >>> avg_frm = create_average_frm(video_path=video_path) + >>> bg_subtraction_cuda(video_path=video_path, avg_frm=avg_frm, fg_clr=(255, 255, 255)) + """ + + check_if_valid_img(data=avg_frm, source=f'{bg_subtraction_cuda}') + check_if_valid_rgb_tuple(data=bg_clr) + check_int(name=f'{bg_subtraction_cuda.__name__} batch_size', value=batch_size, min_value=1) + check_int(name=f'{bg_subtraction_cuda.__name__} threshold', value=threshold, min_value=0, max_value=255) + THREADS_PER_BLOCK = (32, 32, 1) + timer = SimbaTimer(start=True) + video_meta = get_video_meta_data(video_path=video_path) + batch_cnt = int(max(1, np.ceil(video_meta['frame_count'] / batch_size))) + frm_batches = np.array_split(np.arange(0, video_meta['frame_count']), batch_cnt) + n, w, h = video_meta['frame_count'], video_meta['width'], video_meta['height'] + if is_video_color(video_path): is_color = np.array([1]) + else: is_color = np.array([0]) + fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value) + if save_path is None: + in_dir, video_name, _ = get_fn_ext(filepath=video_path) + save_path = os.path.join(in_dir, f'{video_name}_bg_removed.mp4') + if fg_clr is not None: + check_if_valid_rgb_tuple(data=fg_clr) + fg_clr = np.array(fg_clr) + else: + fg_clr = np.array([-1]) + threshold = np.array([threshold]).astype(np.int32) + writer = cv2.VideoWriter(save_path, fourcc, video_meta['fps'], (w, h)) + y_dev = cuda.to_device(avg_frm.astype(np.float32)) + fg_clr_dev = cuda.to_device(fg_clr) + is_color_dev = cuda.to_device(is_color) + for frm_batch_cnt, frm_batch in enumerate(frm_batches): + print(f'Processing frame batch {frm_batch_cnt+1} / {len(frm_batches)} (complete: {round((frm_batch_cnt / len(frm_batches)) * 100, 2)}%)') + batch_imgs = read_img_batch_from_video_gpu(video_path=video_path, start_frm=frm_batch[0], end_frm=frm_batch[-1]) + batch_imgs = np.stack(list(batch_imgs.values()), axis=0).astype(np.float32) + batch_n = batch_imgs.shape[0] + results = np.zeros_like(batch_imgs).astype(np.uint8) + results[:] = bg_clr + results = cuda.to_device(results) + grid_x = math.ceil(w / THREADS_PER_BLOCK[0]) + grid_y = math.ceil(h / THREADS_PER_BLOCK[1]) + grid_z = math.ceil(batch_n / THREADS_PER_BLOCK[2]) + bpg = (grid_x, grid_y, grid_z) + x_dev = cuda.to_device(batch_imgs) + _bg_subtraction_cuda_kernel[bpg, THREADS_PER_BLOCK](x_dev, y_dev, results, is_color_dev, fg_clr_dev, threshold) + results = results.copy_to_host() + for img_cnt, img in enumerate(results): + writer.write(img) + writer.release() + timer.stop_timer() + stdout_success(msg=f'Video saved at {save_path}', elapsed_time=timer.elapsed_time_str) + + + +# video_path = "/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/clipped/03152021_NOB_IOT_8_clipped.mp4" +# video_path = "/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/08102021_DOT_Rat7_8(2).mp4" +# video_path = "/mnt/c/troubleshooting/mitra/project_folder/videos/clipped/592_MA147_Gq_CNO_0515.mp4" +# +# avg_frm = create_average_frm(video_path="/mnt/c/troubleshooting/mitra/project_folder/videos/temp/temp_ex_bg_subtraction/original/844_MA131_gq_CNO_0624.mp4") +# video_path = "/mnt/c/troubleshooting/mitra/project_folder/videos/temp/temp_ex_bg_subtraction/844_MA131_gq_CNO_0624_7.mp4" +# timer = [] +# for i in range(1): +# start = time.perf_counter() +# bg_subtraction_cuda(video_path=video_path, avg_frm=avg_frm, batch_size=3000) +# end = time.perf_counter() +# timer.append(end-start) +# print(np.mean(timer), np.std(timer)) diff --git a/simba/sandbox/bg_remover_cupy.py b/simba/sandbox/bg_remover_cupy.py new file mode 100644 index 000000000..14b5d77b9 --- /dev/null +++ b/simba/sandbox/bg_remover_cupy.py @@ -0,0 +1,89 @@ +import os +import cupy as cp +import numpy as np +from typing import Union, Optional, Tuple +import cv2 +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import get_video_meta_data, read_img_batch_from_video_gpu, get_fn_ext +from simba.video_processors.video_processing import create_average_frm, video_bg_subtraction, video_bg_subtraction_mp +from simba.utils.checks import is_video_color, check_if_valid_rgb_tuple, check_if_valid_img, check_int +from simba.utils.enums import Formats +from simba.data_processors.cuda.utils import _cuda_luminance_pixel_to_grey, _cuda_available +from simba.utils.errors import SimBAGPUError +from simba.data_processors.cuda.image import img_stack_to_grayscale_cupy + +def bg_subtraction_cupy(video_path: Union[str, os.PathLike], + avg_frm: np.ndarray, + save_path: Optional[Union[str, os.PathLike]] = None, + bg_clr: Optional[Tuple[int, int, int]] = (0, 0, 0), + fg_clr: Optional[Tuple[int, int, int]] = None, + batch_size: Optional[int] = 500, + threshold: Optional[int] = 50): + + """ + Remove background from videos using GPU acceleration through CuPY. + + .. seealso:: + For CPU-based alternative, see :func:`simba.video_processors.video_processing.video_bg_subtraction`. Needs work, multi-core is faster. + + :param Union[str, os.PathLike] video_path: The path to the video to remove the background from. + :param np.ndarray avg_frm: Average frame of the video. Can be created with e.g., :func:`simba.video_processors.video_processing.create_average_frm`. + :param Optional[Union[str, os.PathLike]] save_path: Optional location to store the background removed video. If None, then saved in the same directory as the input video with the `_bg_removed` suffix. + :param Optional[Tuple[int, int, int]] bg_clr: Tuple representing the background color of the video. + :param Optional[Tuple[int, int, int]] fg_clr: Tuple representing the foreground color of the video (e.g., the animal). If None, then the original pixel colors will be used. Default: 50. + :param Optional[int] batch_size: Number of frames to process concurrently. Use higher values of RAM memory allows. Default: 500. + :param Optional[int] threshold: Value between 0-255 representing the difference threshold between the average frame subtracted from each frame. Higher values and more pixels will be considered background. Default: 50. + + + :example: + >>> avg_frm = create_average_frm(video_path="/mnt/c/troubleshooting/mitra/project_folder/videos/temp/temp_ex_bg_subtraction/original/844_MA131_gq_CNO_0624.mp4") + >>> video_path = "/mnt/c/troubleshooting/mitra/project_folder/videos/temp/temp_ex_bg_subtraction/844_MA131_gq_CNO_0624_7.mp4" + >>> bg_subtraction_cupy(video_path=video_path, avg_frm=avg_frm, batch_size=500) + """ + + if not _cuda_available()[0]: raise SimBAGPUError('NP GPU detected using numba.cuda', source=bg_subtraction_cupy.__name__) + check_if_valid_img(data=avg_frm, source=f'{bg_subtraction_cupy}') + avg_frm = cp.array(avg_frm) + check_if_valid_rgb_tuple(data=bg_clr) + check_int(name=f'{bg_subtraction_cupy.__name__} batch_size', value=batch_size, min_value=1) + check_int(name=f'{bg_subtraction_cupy.__name__} threshold', value=threshold, min_value=0, max_value=255) + timer = SimbaTimer(start=True) + video_meta = get_video_meta_data(video_path=video_path) + batch_cnt = int(max(1, np.ceil(video_meta['frame_count'] / batch_size))) + frm_batches = np.array_split(np.arange(0, video_meta['frame_count']), batch_cnt) + n, w, h = video_meta['frame_count'], video_meta['width'], video_meta['height'] + if is_video_color(video_path): is_color = np.array([1]) + else: is_color = np.array([0]) + fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value) + if save_path is None: + in_dir, video_name, _ = get_fn_ext(filepath=video_path) + save_path = os.path.join(in_dir, f'{video_name}_bg_removed_ppp.mp4') + if fg_clr is not None: + check_if_valid_rgb_tuple(data=fg_clr) + fg_clr = np.array(fg_clr) + else: + fg_clr = -1 + writer = cv2.VideoWriter(save_path, fourcc, video_meta['fps'], (w, h)) + for frm_batch_cnt, frm_batch in enumerate(frm_batches): + print(f'Processing frame batch {frm_batch_cnt+1} / {len(frm_batches)} (complete: {round((frm_batch_cnt / len(frm_batches)) * 100, 2)}%)') + batch_imgs = read_img_batch_from_video_gpu(video_path=video_path, start_frm=frm_batch[0], end_frm=frm_batch[-1]) + batch_imgs = cp.array(np.stack(list(batch_imgs.values()), axis=0).astype(np.float32)) + img_diff = cp.abs(batch_imgs - avg_frm) + if is_color: + img_diff = img_stack_to_grayscale_cupy(imgs=img_diff, batch_size=img_diff.shape[0]) + mask = cp.where(img_diff > threshold, 1, 0).astype(cp.uint8) + batch_imgs[mask == 0] = bg_clr + if fg_clr != -1: + batch_imgs[mask == 1] = fg_clr + batch_imgs = batch_imgs.astype(cp.uint8).get() + for img_cnt, img in enumerate(batch_imgs): + writer.write(img) + writer.release() + timer.stop_timer() + stdout_success(msg=f'Video saved at {save_path}', elapsed_time=timer.elapsed_time_str) + + + +avg_frm = create_average_frm(video_path="/mnt/c/troubleshooting/mitra/project_folder/videos/temp/temp_ex_bg_subtraction/original/844_MA131_gq_CNO_0624.mp4") +video_path = "/mnt/c/troubleshooting/mitra/project_folder/videos/temp/temp_ex_bg_subtraction/844_MA131_gq_CNO_0624_7.mp4" +bg_subtraction_cupy(video_path=video_path, avg_frm=avg_frm, batch_size=500) \ No newline at end of file diff --git a/simba/sandbox/bg_remover_popup.py b/simba/sandbox/bg_remover_popup.py new file mode 100644 index 000000000..3b63af99d --- /dev/null +++ b/simba/sandbox/bg_remover_popup.py @@ -0,0 +1,129 @@ +import os + +from typing import Union +from copy import deepcopy +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.ui.tkinter_functions import CreateLabelFrameWithIcon, FileSelect, FolderSelect, DropDownMenu, Entry_Box +from simba.utils.enums import Keys, Links, Options +from simba.utils.checks import check_file_exist_and_readable, check_if_dir_exists, check_str, check_int, check_that_hhmmss_start_is_before_end +from simba.utils.data import check_if_string_value_is_valid_video_timestamp +from simba.video_processors.video_processing import watermark_video, superimpose_elapsed_time, superimpose_video_progressbar, superimpose_overlay_video, superimpose_video_names, superimpose_freetext, roi_blurbox +from simba.utils.lookups import get_color_dict +from simba.utils.read_write import get_video_meta_data +import threading +from tkinter import * +import numpy as np +from simba.utils.errors import InvalidInputError, DuplicationError +from simba.utils.read_write import get_video_meta_data, str_2_bool +from simba.utils.enums import Formats +from simba.video_processors.video_processing import video_bg_subtraction, video_bg_subtraction_mp + +class BackgroundRemoverPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="REMOVE BACKGROUND IN VIDEOS") + self.clr_dict = get_color_dict() + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.video_path = FileSelect(settings_frm, "VIDEO PATH:", title="Select a video file", file_types=[("VIDEO", Options.ALL_VIDEO_FORMAT_OPTIONS.value)], lblwidth=40) + self.bg_video_path = FileSelect(settings_frm, "BACKGROUND REFERENCE VIDEO PATH:", title="Select a video file", file_types=[("VIDEO", Options.ALL_VIDEO_FORMAT_OPTIONS.value)], lblwidth=40) + self.bg_clr_dropdown = DropDownMenu(settings_frm, "BACKGROUND COLOR:", list(self.clr_dict.keys()), labelwidth=40) + self.fg_clr_dropdown = DropDownMenu(settings_frm, "FOREGROUND COLOR:", list(self.clr_dict.keys()), labelwidth=40) + self.bg_start_eb = Entry_Box(parent=settings_frm, labelwidth=40, entry_box_width=15, fileDescription='BACKGROUND VIDEO START (FRAME # OR TIME):') + self.bg_end_eb = Entry_Box(parent=settings_frm, labelwidth=40, entry_box_width=15, fileDescription='BACKGROUND VIDEO END (FRAME # OR TIME):') + self.multiprocessing_var = BooleanVar() + self.multiprocess_cb = Checkbutton(settings_frm, text="Multiprocess videos (faster)", variable=self.multiprocessing_var, command=lambda: self.enable_dropdown_from_checkbox(check_box_var=self.multiprocessing_var, dropdown_menus=[self.multiprocess_dropdown])) + self.multiprocess_dropdown = DropDownMenu(settings_frm, "CPU cores:", list(range(2, self.cpu_cnt)), "12") + self.multiprocess_dropdown.setChoices(2) + self.multiprocess_dropdown.disable() + self.bg_clr_dropdown.setChoices('Black') + self.fg_clr_dropdown.setChoices('White') + self.bg_start_eb.entry_set('00:00:00') + self.bg_end_eb.entry_set('00:00:20') + + settings_frm.grid(row=0, column=0, sticky=NW) + self.video_path.grid(row=0, column=0, sticky=NW) + self.bg_video_path.grid(row=1, column=0, sticky=NW) + self.bg_clr_dropdown.grid(row=2, column=0, sticky=NW) + self.fg_clr_dropdown.grid(row=3, column=0, sticky=NW) + self.bg_start_eb.grid(row=4, column=0, sticky=NW) + self.bg_end_eb.grid(row=5, column=0, sticky=NW) + self.multiprocess_cb.grid(row=6, column=0, sticky=NW) + self.multiprocess_dropdown.grid(row=6, column=1, sticky=NW) + self.create_run_frm(run_function=self.run) + self.main_frm.mainloop() + + def run(self): + video_path = self.video_path.file_path + _ = get_video_meta_data(video_path=video_path) + bg_video = self.bg_video_path.file_path + bg_clr = self.colors_dict[self.bg_clr_dropdown.getChoices()] + fg_clr = self.colors_dict[self.fg_clr_dropdown.getChoices()] + if bg_clr == fg_clr: + raise DuplicationError(msg=f'The background and foreground color cannot be the same color ({fg_clr})', source=self.__class__.__name__) + if not os.path.isfile(bg_video): + bg_video = deepcopy(video_path) + else: + _ = get_video_meta_data(video_path=bg_video) + start, end = self.bg_start_eb.entry_get.strip(), self.bg_end_eb.entry_get.strip() + int_start, _ = check_int(name='', value=start, min_value=0, raise_error=False) + int_end, _ = check_int(name='', value=end, min_value=0, raise_error=False) + if int_start and int_end: + bg_start_time, bg_end_time = None, None + bg_start_frm, bg_end_frm = int(int_start), int(int_end) + if bg_start_frm >= bg_end_frm: + raise InvalidInputError(msg=f'Start frame has to be before end frame (start: {bg_start_frm}, end: {bg_end_frm})', source=self.__class__.__name__) + else: + check_if_string_value_is_valid_video_timestamp(value=start, name='START FRAME') + check_if_string_value_is_valid_video_timestamp(value=end, name='END FRAME') + check_that_hhmmss_start_is_before_end(start_time=start, end_time=end, name='START AND END TIME') + bg_start_frm, bg_end_frm = None, None + bg_start_time, bg_end_time = start, end + + + if not self.multiprocessing_var.get(): + print(video_path, bg_video) + video_bg_subtraction(video_path=video_path, + bg_video_path=bg_video, + bg_start_frm=bg_start_frm, + bg_end_frm=bg_end_frm, + bg_start_time=bg_start_time, + bg_end_time=bg_end_time, + bg_color=bg_clr, + fg_color=fg_clr) + else: + core_cnt = int(self.multiprocess_dropdown.getChoices()) + video_bg_subtraction_mp(video_path=video_path, + bg_video_path=bg_video, + bg_start_frm=bg_start_frm, + bg_end_frm=bg_end_frm, + bg_start_time=bg_start_time, + bg_end_time=bg_end_time, + bg_color=bg_clr, + fg_color=fg_clr, + core_cnt=core_cnt) + + + + + + + # start_frm, end_frm = self.bg_start_frm_eb.entry_get.strip(), self.bg_end_frm_eb.entry_get.strip() + # if ((start_frm is not '') or (end_frm is not '')) and ((start_time is not '') or (end_time is not '')): + # raise InvalidInputError(msg=f'Provide start frame and end frame OR start time and end time', source=self.__class__.__name__) + # elif type(start_frm) != type(end_frm): + # raise InvalidInputError(msg=f'Pass start frame and end frame', source=self.__class__.__name__) + # elif type(start_time) != type(end_time): + # raise InvalidInputError(msg=f'Pass start time and end time', source=self.__class__.__name__) + # bg_clr = self.clr_dict[self.bg_clr_dropdown.getChoices()] + # fg_clr = self.clr_dict[self.fg_clr_dropdown.getChoices()] + # + + # + + + +BackgroundRemoverPopUp() + + + + + diff --git a/simba/sandbox/bg_substraction.py b/simba/sandbox/bg_substraction.py new file mode 100644 index 000000000..46a9fce3d --- /dev/null +++ b/simba/sandbox/bg_substraction.py @@ -0,0 +1,318 @@ + +from typing import Union, Optional, Tuple, Dict, Any +import cv2 +import multiprocessing +from datetime import datetime +import functools +import os +import numpy as np +from copy import deepcopy +from simba.utils.checks import check_file_exist_and_readable, check_int, check_that_hhmmss_start_is_before_end, check_if_string_value_is_valid_video_timestamp, check_if_dir_exists +from simba.utils.data import find_frame_numbers_from_time_stamp +from simba.utils.read_write import get_video_meta_data, check_if_hhmmss_timestamp_is_valid_part_of_video, get_fn_ext, find_core_cnt, concatenate_videos_in_folder +from simba.utils.errors import InvalidInputError +from simba.utils.enums import Formats +from simba.utils.printing import SimbaTimer, stdout_success + +def create_average_frm(video_path: Union[str, os.PathLike], + start_frm: Optional[int] = None, + end_frm: Optional[int] = None, + start_time: Optional[str] = None, + end_time: Optional[str] = None, + save_path: Optional[Union[str, os.PathLike]] = None) -> Union[None, np.ndarray]: + + """ + Create an image representing the average frame of a segment in a video or an entire video. + + .. note:: + Either pass ``start_frm`` and ``end_frm`` OR ``start_time`` and ``end_time`` OR pass all four arguments as None. + If all are None, then the entire video will be used to create the average frame. + + :param Union[str, os.PathLike] video_path: The path to the video to create the average frame from. Default: None. + :param Optional[int] start_frm: The first frame in the segment to create the average frame from. Default: None. + :param Optional[int] end_frm: The last frame in the segment to create the average frame from. Default: None. + :param Optional[str] start_time: The start timestamp in `HH:MM:SS` format in the segment to create the average frame from. Default: None. + :param Optional[str] end_time: The end timestamp in `HH:MM:SS` format in the segment to create the average frame from. Default: None. + :param Optional[Union[str, os.PathLike]] save_path: The path to where to save the average image. If None, then reaturens the average image in np,ndarray format. Default: None. + :return Union[None, np.ndarray]: The average image (if ``save_path`` is not None) or None if ``save_path`` is passed. + """ + + if ((start_frm is not None) or (end_frm is not None)) and ((start_time is not None) or (end_time is not None)): + raise InvalidInputError(msg=f'Pass start_frm and end_frm OR start_time and end_time', source=create_average_frm.__name__) + elif type(start_frm) != type(end_frm): + raise InvalidInputError(msg=f'Pass start frame and end frame', source=create_average_frm.__name__) + elif type(start_time) != type(end_time): + raise InvalidInputError(msg=f'Pass start time and end time', source=create_average_frm.__name__) + check_file_exist_and_readable(file_path=video_path) + video_meta_data = get_video_meta_data(video_path=video_path) + cap = cv2.VideoCapture(video_path) + if (start_frm is not None) and (end_frm is not None): + check_int(name='start_frm', value=start_frm, min_value=0, max_value=video_meta_data['frame_count']) + check_int(name='end_frm', value=end_frm, min_value=0, max_value=video_meta_data['frame_count']) + if start_frm > end_frm: + raise InvalidInputError(msg=f'Start frame ({start_frm}) has to be before end frame ({end_frm}).', source=create_average_frm.__name__) + frame_ids = list(range(start_frm, end_frm+1)) + elif (start_time is not None) and (end_time is not None): + check_if_string_value_is_valid_video_timestamp(value=start_time, name=create_average_frm.__name__) + check_if_string_value_is_valid_video_timestamp(value=end_time, name=create_average_frm.__name__) + check_that_hhmmss_start_is_before_end(start_time=start_time, end_time=end_time, name=create_average_frm.__name__) + check_if_hhmmss_timestamp_is_valid_part_of_video(timestamp=start_time, video_path=video_path) + frame_ids = find_frame_numbers_from_time_stamp(start_time=start_time, end_time=end_time, fps=video_meta_data['fps']) + else: + frame_ids = list(range(0, video_meta_data['frame_count'])) + cap.set(0, frame_ids[0]) + bg_sum, frm_cnt, frm_len = None, 0, len(frame_ids) + while frm_cnt <= frm_len: + ret, frm = cap.read() + if bg_sum is None: bg_sum = np.float32(frm) + else: cv2.accumulate(frm, bg_sum) + frm_cnt += 1 + img = cv2.convertScaleAbs(bg_sum / frm_len) + cap.release() + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path), source=create_average_frm.__name__) + cv2.imwrite(save_path, img) + else: + return img + +def _bg_remover_mp(frm_range: Tuple[int, np.ndarray], + video_path: Union[str, os.PathLike], + bg_frm: np.ndarray, + bg_clr: Tuple[int, int, int], + fg_clr: Tuple[int, int, int], + video_meta_data: Dict[str, Any], + temp_dir: Union[str, os.PathLike]): + + batch, frm_range = frm_range[0], frm_range[1] + start_frm, current_frm, end_frm = frm_range[0], frm_range[0], frm_range[-1] + cap = cv2.VideoCapture(video_path) + fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value) + save_path = os.path.join(temp_dir, f"{batch}.mp4") + cap.set(1, start_frm) + writer = cv2.VideoWriter(save_path, fourcc, video_meta_data['fps'], (video_meta_data['width'], video_meta_data['height'])) + bg = np.full_like(bg_frm, bg_clr) + bg = bg[:, :, ::-1] + dir, video_name, ext = get_fn_ext(filepath=video_path) + while current_frm <= end_frm: + ret, frm = cap.read() + if not ret: + break + diff = cv2.absdiff(frm, bg_frm) + b, g, r = diff[:, :, 0], diff[:, :, 1], diff[:, :, 2] + gray_diff = 0.2989 * r + 0.5870 * g + 0.1140 * b + gray_diff = gray_diff.astype(np.uint8) # Ensure the type is uint8 + mask = np.where(gray_diff > 50, 255, 0).astype(np.uint8) + if fg_clr is None: + fg = cv2.bitwise_and(frm, frm, mask=mask) + result = cv2.add(bg, fg) + else: + mask_inv = cv2.bitwise_not(mask) + fg_clr_img = np.full_like(frm, fg_clr) + fg_clr_img = fg_clr_img[:, :, ::-1] + fg_clr_img = cv2.bitwise_and(fg_clr_img, fg_clr_img, mask=mask) + result = cv2.bitwise_and(bg, bg, mask=mask_inv) + result = cv2.add(result, fg_clr_img) + writer.write(result) + current_frm += 1 + print(f'Background subtraction frame {current_frm}/{video_meta_data["frame_count"]} (Video: {video_name})') + writer.release() + cap.release() + return batch + +def video_bg_substraction_mp(video_path: Union[str, os.PathLike], + bg_video_path: Optional[Union[str, os.PathLike]] = None, + bg_start_frm: Optional[int] = None, + bg_end_frm: Optional[int] = None, + bg_start_time: Optional[str] = None, + bg_end_time: Optional[str] = None, + bg_color: Optional[Tuple[int, int, int]] = (0, 0, 0), + fg_color: Optional[Tuple[int, int, int]] = None, + save_path: Optional[Union[str, os.PathLike]] = None, + core_cnt: Optional[int] = -1) -> None: + + """ + Subtract the background from a video using multiprocessing. + + .. video:: _static/img/video_bg_subtraction.webm + :width: 900 + :autoplay: + :nocontrols: + :loop: + + .. note:: + For single core alternative, see ``simba.video_processors.video_processing.video_bg_subtraction`` + + If ``bg_video_path`` is passed, that video will be used to parse the background. If None, ``video_path`` will be use dto parse background. + Either pass ``start_frm`` and ``end_frm`` OR ``start_time`` and ``end_time`` OR pass all four arguments as None. + Those two arguments will be used to slice the background video, and the sliced part is used to parse the background. + + For example, in the scenario where there is **no** animal in the ``video_path`` video for the first 20s, then the first 20s can be used to parse the background. + In this scenario, ``bg_video_path`` can be passed as ``None`` and bg_start_time and bg_end_time can be ``00:00:00`` and ``00:00:20``, repectively. + + In the scenario where there **is** animal(s) in the entire ``video_path`` video, pass ``bg_video_path`` as a path to a video recording the arena without the animals. + + :param Union[str, os.PathLike] video_path: The path to the video to remove the background from. + :param Optional[Union[str, os.PathLike]] bg_video_path: Path to the video which contains a segment with the background only. If None, then ``video_path`` will be used. + :param Optional[int] bg_start_frm: The first frame in the background video to use when creating a representative background image. Default: None. + :param Optional[int] bg_end_frm: The last frame in the background video to use when creating a representative background image. Default: None. + :param Optional[str] bg_start_time: The start timestamp in `HH:MM:SS` format in the background video to use to create a representative background image. Default: None. + :param Optional[str] bg_end_time: The end timestamp in `HH:MM:SS` format in the background video to use to create a representative background image. Default: None. + :param Optional[Tuple[int, int, int]] bg_color: The RGB color of the moving objects in the output video. Defaults to None, which represents the original colors of the moving objects. + :param Optional[Tuple[int, int, int]] fg_color: The RGB color of the background output video. Defaults to black (0, 0, 0). + :param Optional[Union[str, os.PathLike]] save_path: The patch to where to save the output video where the background is removed. If None, saves the output video in the same directory as the input video with the ``_bg_subtracted`` suffix. Default: None. + :param Optional[int] core_cnt: The number of cores to use. Defaults to -1 representing all available cores. + :return: None. + + :example: + >>> video_bg_substraction_mp(video_path='/Users/simon/Downloads/1_LH.mp4', bg_start_time='00:00:00', bg_end_time='00:00:10', bg_color=(0, 0, 0), fg_color=(255, 255, 255)) + """ + + timer = SimbaTimer(start=True) + check_file_exist_and_readable(file_path=video_path) + if bg_video_path is None: + bg_video_path = deepcopy(video_path) + video_meta_data = get_video_meta_data(video_path=video_path) + dir, video_name, ext = get_fn_ext(filepath=video_path) + if save_path is None: + save_path = os.path.join(dir, f'{video_name}_bg_subtracted{ext}') + dt = datetime.now().strftime("%Y%m%d%H%M%S") + temp_dir = os.path.join(dir, f'temp_{video_name}_{dt}') + os.makedirs(temp_dir) + check_int(name=f'{video_bg_substraction_mp.__name__} core_cnt', value=core_cnt, min_value=-1, max_value=find_core_cnt()[0]) + if core_cnt == -1: core_cnt = find_core_cnt()[0] + bg_frm = create_average_frm(video_path=bg_video_path, start_frm=bg_start_frm, end_frm=bg_end_frm, start_time=bg_start_time, end_time=bg_end_time) + bg_frm = cv2.resize(bg_frm, (video_meta_data['width'], video_meta_data['height'])) + bg_frm = bg_frm[:, :, ::-1] + frm_list = np.array_split(list(range(0, video_meta_data['frame_count'])), core_cnt) + frm_data = [] + for c, i in enumerate(frm_list): + frm_data.append((c, i)) + with multiprocessing.Pool(core_cnt, maxtasksperchild=9000) as pool: + constants = functools.partial(_bg_remover_mp, + video_path=video_path, + bg_frm=bg_frm, + bg_clr=bg_color, + fg_clr=fg_color, + video_meta_data=video_meta_data, + temp_dir=temp_dir) + for cnt, result in enumerate(pool.imap(constants, frm_data, chunksize=1)): + print(f'Frame batch {result+1} completed...') + pool.terminate() + pool.join() + + print(f"Joining {video_name} multiprocessed video...") + concatenate_videos_in_folder(in_folder=temp_dir, save_path=save_path, video_format=ext[1:], remove_splits=False) + timer.stop_timer() + stdout_success(msg=f'Video saved at {save_path}', elapsed_time=timer.elapsed_time_str) + +# video_bg_substraction_mp(video_path='/Users/simon/Downloads/1_LH.mp4', +# bg_start_time='00:00:00', +# bg_end_time='00:00:10', +# bg_color=(0, 0, 0), +# fg_color=(255, 255, 255)) + + + + + # bg = cv2.cvtColor(np.full_like(bg_frm, bg_color), cv2.COLOR_BGR2RGB) + # cap = cv2.VideoCapture(video_path) + # frm_cnt = 0 + # while True: + # ret, frm = cap.read() + # if not ret: + # break + # diff = cv2.absdiff(frm, bg_frm) + # gray_diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) + # _, mask = cv2.threshold(gray_diff, 50, 255, cv2.THRESH_BINARY) + # if fg_color is None: + # fg = cv2.bitwise_and(frm, frm, mask=mask) + # result = cv2.add(bg, fg) + # else: + # mask_inv = cv2.bitwise_not(mask) + # fg_clr = cv2.cvtColor(np.full_like(frm, fg_color), cv2.COLOR_BGR2RGB) + # fg_clr = cv2.bitwise_and(fg_clr, fg_clr, mask=mask) + # result = cv2.bitwise_and(bg, bg, mask=mask_inv) + # result = cv2.add(result, fg_clr) + # writer.write(result) + # frm_cnt+= 1 + # print(f'Background subtraction frame {frm_cnt}/{video_meta_data["frame_count"]} (Video: {video_name})') + # + # writer.release() + # cap.release() + # timer.stop_timer() + # stdout_success(msg=f'Background subtracted from {video_name} and saved at {save_path}', elapsed_time=timer.elapsed_time) + + + + + + + +# def bg_substraction(bg_video: Union[str, os.PathLike, cv2.VideoCapture], +# video: Union[str, os.PathLike, cv2.VideoCapture]): +# +# +# if isinstance(bg_video, str): bg_video = cv2.VideoCapture(bg_video) +# bg_sum, bg_meta = None, get_video_meta_data(video_path=bg_video) +# while True: +# ret, frm = bg_video.read() +# if ret: +# if bg_sum is None: +# bg_sum = np.float32(frm) +# else: +# cv2.accumulate(frm, bg_sum) +# else: break +# background_model = cv2.convertScaleAbs(bg_sum / bg_meta['frame_count']) +# bg_video.release() +# +# if isinstance(video, str): +# video_meta = get_video_meta_data(video_path=bg_video) +# video = cv2.VideoCapture(video) +# +# while True: +# ret, frame = video.read() +# +# +# +# while True: +# +# +# if not ret: +# break +# +# # Resize the background model to match the frame dimensions +# background_resized = cv2.resize(background_model, (frame.shape[1], frame.shape[0])) +# +# # Calculate the absolute difference between the frame and the background model +# diff = cv2.absdiff(frame, background_resized) +# +# # Convert the difference image to grayscale +# gray_diff = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) +# +# # Threshold the grayscale difference image to create a binary mask +# _, mask = cv2.threshold(gray_diff, 50, 255, cv2.THRESH_BINARY) +# +# # Invert the mask to get foreground as white and background as black +# mask_inv = cv2.bitwise_not(mask) +# +# # Create a black background +# black_background = np.zeros_like(frame) +# +# # Composite the foreground onto the black background using the inverted mask +# foreground = cv2.bitwise_and(frame, frame, mask=mask) +# +# # Composite the black background with the foreground +# result = cv2.add(black_background, foreground) +# +# # Show the original frame and the frame with background black and foreground with original colors +# cv2.imshow('Original', frame) +# cv2.imshow('Frame with Background Black and Foreground Retaining Original Colors', result) +# +# # Break the loop if 'q' is pressed +# if cv2.waitKey(30) & 0xFF == ord('q'): +# break +# +# # Release the video capture object and close all windows +# cap.release() +# cv2.destroyAllWindows() + diff --git a/simba/sandbox/biweight_midcorrelation.py b/simba/sandbox/biweight_midcorrelation.py new file mode 100644 index 000000000..f62fd6b36 --- /dev/null +++ b/simba/sandbox/biweight_midcorrelation.py @@ -0,0 +1,40 @@ +import numpy as np +from pingouin.correlation import bicor +from numba import jit +import time + + +@jit(nopython=True) +def biweight_midcorrelation(x: np.ndarray, y: np.ndarray, c: int = 9): + x_median = np.median(x) + y_median = np.median(y) + + x_mad = np.median(np.abs(x - x_median)) + y_mad = np.median(np.abs(y - y_median)) + + if x_mad == 0 or y_mad == 0: + return -1.0, -1.0 + + u = (x - x_median) / (c * x_mad) + v = (y - y_median) / (c * y_mad) + w_x = (1 - u ** 2) ** 2 * ((1 - np.abs(u)) > 0) + w_y = (1 - v ** 2) ** 2 * ((1 - np.abs(v)) > 0) + + x_norm = (x - x_median) * w_x + y_norm = (y - y_median) * w_y + denom = np.sqrt((x_norm ** 2).sum()) * np.sqrt((y_norm ** 2).sum()) + r = (x_norm * y_norm).sum() / denom + #print(r) + + + +x = np.random.randint(0, 50, (10000000,)) +y = np.random.randint(0, 50, (10000000,)) +start = time.time() +biweight_midcorrelation(x=x, y=y) +print(time.time() - start) +start = time.time() +bicor(x=x, y=y) +print(time.time() - start) + + diff --git a/simba/sandbox/blank_img.py b/simba/sandbox/blank_img.py new file mode 100644 index 000000000..8e947c8d5 --- /dev/null +++ b/simba/sandbox/blank_img.py @@ -0,0 +1,54 @@ +import os +import cv2 +from typing import Tuple, Union, Optional +from simba.utils.checks import check_if_valid_rgb_tuple, check_valid_tuple, check_if_dir_exists, check_int +import numpy as np + +def create_uniform_img(size: Tuple[int, int], + color: Tuple[int, int, int], + save_path: Optional[Union[str, os.PathLike]] = None) -> Union[None, np.ndarray]: + + """ + Creates an image of specified size and color, and optionally saves it to a file. + + :param Tuple[int, int] size: A tuple of two integers representing the width and height of the image. + :param Tuple[int, int, int] color: A tuple of three integers representing the RGB color (e.g., (255, 0, 0) for red). + :param Optional[Union[str, os.PathLike]] save_path: a string representing the file path to save the image. If not provided, the function returns the image as a numpy array. + :return Union[None, np.ndarray]: If save_path is provided, the function saves the image to the specified path and returns None. f save_path is not provided, the function returns the image as a numpy ndarray. + """ + + check_valid_tuple(x=size, accepted_lengths=(2,), valid_dtypes=(int,)) + check_if_valid_rgb_tuple(data=color) + img = np.zeros((size[1], size[0], 3), dtype=np.uint8) + img[:] = color[::-1] + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + cv2.imwrite(save_path, img) + else: + return img + + +def interpolate_color_palette(start_color: Tuple[int, int, int], + end_color: Tuple[int, int, int], + n: Optional[int] = 10): + """ + Generate a list of colors interpolated between two passed RGB colors. + + :param start_color: Tuple of RGB values for the start color. + :param end_color: Tuple of RGB values for the end color. + :param n: Number of colors to generate. + :return: List of interpolated RGB colors. + + :example: + >>> red, black = (255, 0, 0), (0, 0, 0) + >>> colors = interpolate_color_palette(start_color=red, end_color=black, n = 10) + """ + + check_if_valid_rgb_tuple(data=start_color) + check_if_valid_rgb_tuple(data=end_color) + check_int(name=f'{interpolate_color_palette.__name__} n', value=n, min_value=3) + return [( + int(start_color[0] + (end_color[0] - start_color[0]) * i / (n - 1)), + int(start_color[1] + (end_color[1] - start_color[1]) * i / (n - 1)), + int(start_color[2] + (end_color[2] - start_color[2]) * i / (n - 1)) + ) for i in range(n)] diff --git a/simba/sandbox/blank_vid.py b/simba/sandbox/blank_vid.py new file mode 100644 index 000000000..5f4383958 --- /dev/null +++ b/simba/sandbox/blank_vid.py @@ -0,0 +1,5 @@ +from simba.video_processors.video_processing import create_blank_video + + + +create_blank_video(path='/Users/simon/Desktop/blank_video.mp4', length=10, width=600, height=600) \ No newline at end of file diff --git a/simba/sandbox/blurbox.py b/simba/sandbox/blurbox.py new file mode 100644 index 000000000..52db1fd12 --- /dev/null +++ b/simba/sandbox/blurbox.py @@ -0,0 +1,55 @@ +from typing import Union, Optional +import os +import subprocess +from simba.utils.read_write import get_fn_ext, find_all_videos_in_directory, get_video_meta_data +from simba.video_processors.roi_selector import ROISelector +from simba.utils.checks import check_ffmpeg_available, check_float, check_if_dir_exists, check_file_exist_and_readable +from simba.utils.printing import SimbaTimer, stdout_success + + +def roi_blurbox(video_path: Union[str, os.PathLike], + blur_level: Optional[float] = 0.99, + invert: Optional[bool] = False, + save_path: Optional[Union[str, os.PathLike]] = None) -> None: + + """ + Blurs either the selected or unselected portion of a region-of-interest according to the passed blur level. + Higher blur levels produces more opaque regions. + + .. video:: _static/img/roi_blurbox.webm + :loop: + + :param Union[str, os.PathLike] video_path: The path to the video on disk + :param Optional[float] blur_level: The level of the blur as a ratio 0-1.0. + :param Optional[bool] invert: If True, blurs the unselected region. If False, blurs the selected region. + :param Optional[Union[str, os.PathLike]] save_path: The location where to save the blurred video. If None, then saves the blurred video in the same directory as the input video with the ``_blurred`` suffix. + :return: None + + :example: + >>> roi_blurbox(video_path='/Users/simon/Downloads/1_LH_clipped_downsampled.mp4', blur_level=0.2, invert=True) + """ + check_ffmpeg_available(raise_error=True) + timer = SimbaTimer(start=True) + check_float(name=f'{roi_blurbox.__name__} blur_level', value=blur_level, min_value=0.001, max_value=1.0) + check_file_exist_and_readable(file_path=video_path) + dir, video_name, ext = get_fn_ext(video_path) + _ = get_video_meta_data(video_path=video_path) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path), source=roi_blurbox.__name__) + else: + save_path = os.path.join(dir, f'{video_name}_blurred{ext}') + roi_selector = ROISelector(path=video_path) + roi_selector.run() + w, h = roi_selector.width, roi_selector.height + top_left_x, top_left_y = roi_selector.top_left + max_blur_value = int(min(w, h) / 2) / 2 + blur_level = int(max_blur_value * blur_level) + if not invert: + cmd = f'ffmpeg -i "{video_path}" -filter_complex "[0:v]crop={w}:{h}:{top_left_x}:{top_left_y},boxblur={int(blur_level)}:10[fg]; [0:v][fg]overlay={top_left_x}:{top_left_y}[v]" -map "[v]" "{save_path}" -loglevel error -stats -hide_banner -y' + else: + cmd = f'ffmpeg -i "{video_path}" -filter_complex "[0:v]boxblur={blur_level}[bg];[0:v]crop={w}:{h}:{top_left_x}:{top_left_y}[fg];[bg][fg]overlay={top_left_x}:{top_left_y}" -c:a copy "{save_path}" -loglevel error -stats -hide_banner -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f'Blurred {video_name} video saved in {save_path}', elapsed_time=timer.elapsed_time_str) + +#roi_blurbox(video_path='/Users/simon/Downloads/1_LH_clipped_downsampled.mp4', blur_level=0.7, invert=False) diff --git a/simba/sandbox/bouts_df b/simba/sandbox/bouts_df new file mode 100644 index 000000000..05711e960 --- /dev/null +++ b/simba/sandbox/bouts_df @@ -0,0 +1,6 @@ +,Event,Start_time,End Time,Start_frame,End_frame,Bout_time +0,Nose to Nose,0.2857142857142857,0.6428571428571429,4,8,0.35714285714285715 +1,Nose to Nose,20.071428571428573,30.428571428571427,281,425,10.357142857142858 +2,Nose to Tailbase,11.428571428571429,19.428571428571427,160,271,8.0 +3,Nose to Tailbase,23.0,23.071428571428573,322,322,0.07142857142857142 +4,Nose to Tailbase,23.142857142857142,23.357142857142858,324,326,0.21428571428571427 diff --git a/simba/sandbox/brillouins_index.py b/simba/sandbox/brillouins_index.py new file mode 100644 index 000000000..5972b1985 --- /dev/null +++ b/simba/sandbox/brillouins_index.py @@ -0,0 +1,50 @@ +import numpy as np +from simba.utils.checks import check_valid_array + + +def brillouins_index(x: np.array) -> float: + """ + Calculate Brillouin's Diversity Index for a given array of values. + + Brillouin's Diversity Index is a measure of species diversity that accounts for both species richness + and evenness of distribution. + + Brillouin's Diversity Index (H) is calculated using the formula: + + .. math:: + + H = \\frac{1}{\\log(S)} \\sum_{i=1}^{S} \\frac{N_i(N_i - 1)}{n(n-1)} + + where: + - \( H \) is Brillouin's Diversity Index, + - \( S \) is the total number of unique species, + - \( N_i \) is the count of individuals in the i-th species, + - \( n \) is the total number of individuals. + + :param np.array x: One-dimensional numpy array containing the values for which Brillouin's Index is calculated. + :return float: Brillouin's Diversity Index value for the input array `x` + + :example: + >>> x = np.random.randint(0, 10, (100,)) + >>> brillouins_index(x) + """ + + check_valid_array( + source=f"{brillouins_index.__name__} x", + accepted_ndims=(1,), + data=x, + accepted_dtypes=(np.float32, np.float64, np.int32, np.int64, np.int8), + min_axis_0=2, + ) + n_total = x.shape[0] + n_unique = np.unique(x, return_counts=True)[1] + if n_unique.shape[0] == 1: + return 1.0 + else: + S = len(n_unique) + h = 0 + for count in n_unique: + h += count * (count - 1) + h /= (n_total * (n_total - 1)) + h /= np.log(S) + return h \ No newline at end of file diff --git a/simba/sandbox/calc_N_degree_direction_switches.py b/simba/sandbox/calc_N_degree_direction_switches.py new file mode 100644 index 000000000..c32f71980 --- /dev/null +++ b/simba/sandbox/calc_N_degree_direction_switches.py @@ -0,0 +1,5 @@ +import numpy as np + +def count_n_degree_direction_switches(x: np.ndarray): + pass + diff --git a/simba/sandbox/calinski_harabasz.py b/simba/sandbox/calinski_harabasz.py new file mode 100644 index 000000000..e77c19f03 --- /dev/null +++ b/simba/sandbox/calinski_harabasz.py @@ -0,0 +1,43 @@ +import numpy as np +from numba import jit, prange, njit +import time + +from sklearn.metrics.cluster import calinski_harabasz_score + +@njit("(float32[:,:], int64[:])", cache=True) +def calinski_harabasz(x: np.ndarray, + y: np.ndarray) -> float: + """ + Compute the Calinski-Harabasz score to evaluate clustering quality. + + The Calinski-Harabasz score is a measure of cluster separation and compactness. + It is calculated as the ratio of the between-cluster dispersion to the + within-cluster dispersion. A higher score indicates better clustering. + + :param x: 2D array representing the data points. Shape (n_samples, n_features/n_dimension). + :param y: 2D array representing cluster labels for each data point. Shape (n_samples,). + :return float: Calinski-Harabasz score. + + :example: + >>> x = np.random.random((100, 2)).astype(np.float32) + >>> y = np.random.randint(0, 100, (100,)).astype(np.int64) + >>> calinski_harabasz(x=x, y=y) + """ + n_labels = np.unique(y).shape[0] + labels = np.unique(y) + extra_dispersion, intra_dispersion = 0.0, 0.0 + global_mean = np.full((x.shape[1]), np.nan) + for i in range(x.shape[1]): + global_mean[i] = np.mean(x[:, i].flatten()) + for k in range(n_labels): + cluster_k = x[np.argwhere(y == labels[k]).flatten(), :] + mean_k = np.full((x.shape[1]), np.nan) + for i in prange(cluster_k.shape[1]): mean_k[i] = np.mean(cluster_k[:, i].flatten()) + extra_dispersion += len(cluster_k) * np.sum((mean_k - global_mean) ** 2) + intra_dispersion += np.sum((cluster_k - mean_k) ** 2) + return extra_dispersion * (x.shape[0] - n_labels) / (intra_dispersion * (n_labels - 1.0)) + + +x = np.random.random((100, 2)).astype(np.float32) +y = np.random.randint(-1, 2, (100,)).astype(np.int64) +calinski_harabasz(x=x, y=y) \ No newline at end of file diff --git a/simba/sandbox/cb_frame.py b/simba/sandbox/cb_frame.py new file mode 100644 index 000000000..fbce0c815 --- /dev/null +++ b/simba/sandbox/cb_frame.py @@ -0,0 +1,135 @@ +__author__ = "Simon Nilsson" + +import os +from tkinter import * +from tkinter import ttk +from typing import Callable, Dict, List, Optional, Tuple, Union, Any + +import PIL.Image +from PIL import ImageTk + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.ui.tkinter_functions import (DropDownMenu, Entry_Box, FileSelect, hxtScrollbar) +from simba.utils.checks import check_float, check_int, check_valid_lst, check_instance +from simba.utils.enums import Formats, Options +from simba.utils.errors import CountError, NoFilesFoundError +from simba.utils.lookups import (get_color_dict, get_icons_paths, get_named_colors) +from simba.utils.read_write import find_core_cnt + + +def create_cb_frame(cb_titles: List[str], + main_frm: Optional[Union[Frame, Canvas, LabelFrame, ttk.Frame]] = None, + frm_title: Optional[str] = '', + idx_row: Optional[int] = -1, + command: Optional[Callable[[str], Any]] = None) -> Dict[str, BooleanVar]: + """ + Creates a labelframe with checkboxes and inserts the labelframe into a window. + + .. image:: _static/img/create_cb_frame.png + :width: 200 + :align: center + + .. note:: + One checkbox will be created per ``cb_titles``. The checkboxes will be labeled according to the ``cb_titles``. + If checking/un-checking the box should have some effect, pass that function as ``command`` which takes the name of the checked/unchecked box. + + :param Optional[Union[Frame, Canvas, LabelFrame, ttk.Frame]] main_frm: The pop-up window to insert the labelframe into. + :param List[str] cb_titles: List of strings representing the names of the checkboxes. + :param Optional[str] frm_title: Title of the frame. + :param Optional[int] idx_row: The location in main_frm to create the LabelFrame. If -1, then at the bottom. + :param Optional[Callable[[str], Any]] frm_title: Optional function callable associated with checking/unchecking the checkboxes. + :return Dict[str, BooleanVar]: Dictionary holding the ``cb_titles`` as keys and the BooleanVar representing if the checkbox is ticked or not. + + :example: + >>> create_cb_frame(cb_titles=['Attack', 'Sniffing', 'Rearing'], frm_title='My classifiers') + """ + + check_valid_lst(data=cb_titles, source=f'{create_cb_frame.__name__} cb_titles', valid_dtypes=(str,), min_len=1) + check_int(name=f'{create_cb_frame.__name__} idx_row', value=idx_row, min_value=-1) + + if main_frm is not None: + check_instance(source=f'{create_cb_frame.__name__} parent_frm', accepted_types=(Frame, Canvas, LabelFrame, ttk.Frame), instance=main_frm) + else: + main_frm = Toplevel(); main_frm.minsize(960, 720); main_frm.lift() + if idx_row == -1: + idx_row = int(len(list(main_frm.children.keys()))) + cb_frm = LabelFrame(main_frm, text=frm_title, font=Formats.LABELFRAME_HEADER_FORMAT.value) + cb_dict = {} + for cnt, title in enumerate(cb_titles): + cb_dict[title] = BooleanVar(value=False) + if command is not None: + cb = Checkbutton(cb_frm, text=title, variable=cb_dict[title], command=lambda k=cb_titles[cnt]: command(k)) + else: + cb = Checkbutton(cb_frm, text=title, variable=cb_dict[title]) + cb.grid(row=cnt, column=0, sticky=NW) + cb_frm.grid(row=idx_row, column=0, sticky=NW) + + #main_frm.mainloop() + + return cb_dict + +def create_dropdown_frame(drop_down_titles: List[str], + drop_down_options: List[str], + frm_title: Optional[str] = '', + idx_row: Optional[int] = -1, + main_frm: Optional[Union[Frame, Canvas, LabelFrame, ttk.Frame]] = None) -> Dict[str, DropDownMenu]: + + """ + Creates a labelframe with dropdowns. + + .. image:: _static/img/create_dropdown_frame.png + :width: 300 + :align: center + + :param Optional[Union[Frame, Canvas, LabelFrame, ttk.Frame]] main_frm: The pop-up window to insert the labelframe into. If None, one will be created. + :param List[str] drop_down_titles: The titles of the dropdown menus. + :param List[str] drop_down_options: The options in each dropdown. Note: All dropdowns must have the same options. + :param Optional[str] frm_title: Title of the frame. + :return Dict[str, BooleanVar]: Dictionary holding the ``drop_down_titles`` as keys and the drop-down menus as values. + + :example: + >>> create_dropdown_frame(drop_down_titles=['Dropdown 1', 'Dropdown 2', 'Dropdown 2'], drop_down_options=['Option 1', 'Option 2'], frm_title='My dropdown frame') + """ + + check_valid_lst(data=drop_down_titles, source=f'{create_dropdown_frame.__name__} drop_down_titles', valid_dtypes=(str,), min_len=1) + check_valid_lst(data=drop_down_options, source=f'{create_dropdown_frame.__name__} drop_down_options', valid_dtypes=(str,), min_len=2) + check_int(name=f'{create_cb_frame.__name__} idx_row', value=idx_row, min_value=-1) + if main_frm is not None: + check_instance(source=f'{create_cb_frame.__name__} parent_frm', accepted_types=(Frame, Canvas, LabelFrame, ttk.Frame), instance=main_frm) + else: + main_frm = Toplevel(); main_frm.minsize(960, 720); main_frm.lift() + if idx_row == -1: + idx_row = int(len(list(main_frm.children.keys()))) + dropdown_frm = LabelFrame(main_frm, text=frm_title, font=Formats.LABELFRAME_HEADER_FORMAT.value) + dropdown_dict = {} + for cnt, title in enumerate(drop_down_titles): + dropdown_dict[title] = DropDownMenu(dropdown_frm, title, drop_down_options, "35") + dropdown_dict[title].setChoices(drop_down_options[0]) + dropdown_dict[title].grid(row=cnt, column=0, sticky=NW) + dropdown_frm.grid(row=idx_row, column=0, sticky=NW) + #main_frm.mainloop() + return dropdown_dict + +def create_run_frm(run_function: Callable, + title: Optional[str] = "RUN", + btn_txt_clr: Optional[str] = "black") -> None: + """ + Create a label frame with a single button with a specified callback. + + :param Callable run_function: The function/method callback of the button. + :param str title: The title of the frame. + """ + if hasattr(self, "run_frm"): + self.run_frm.destroy() + self.run_btn.destroy() + self.run_frm = LabelFrame( + self.main_frm, + text=title, + font=Formats.LABELFRAME_HEADER_FORMAT.value, + fg=btn_txt_clr, + ) + self.run_btn = Button(self.run_frm, text=title, fg="blue", command=lambda: run_function() + ) + self.run_frm.grid(row=self.children_cnt_main() + 1, column=0, sticky=NW) + self.run_btn.grid(row=0, column=0, sticky=NW) \ No newline at end of file diff --git a/simba/sandbox/change_img_file_format.py b/simba/sandbox/change_img_file_format.py new file mode 100644 index 000000000..fab16eaa3 --- /dev/null +++ b/simba/sandbox/change_img_file_format.py @@ -0,0 +1,117 @@ + +try: + from typing import Literal +except: + from typing_extensions import Literal +from typing import Union, Optional +import os +from tkinter import * +from datetime import datetime +from PIL import Image +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.ui.tkinter_functions import CreateLabelFrameWithIcon, DropDownMenu, FolderSelect + + +from simba.utils.checks import check_if_dir_exists, check_str, check_int +from simba.utils.enums import Options, Keys, Links +from simba.utils.read_write import find_files_of_filetypes_in_directory, get_fn_ext +from simba.utils.printing import SimbaTimer, stdout_success +from simba.video_processors.video_processing import convert_to_jpeg + + + +# class Convert2jpegPopUp(PopUpMixin): +# def __init__(self): +# super().__init__(title="CONVERT IMAGE DIRECTORY TO JPEG") +# settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) +# self.selected_frame_dir = FolderSelect(settings_frm, "IMAGE DIRECTORY PATH:", title="Select a image directory", lblwidth=25) +# self.quality_scale = Scale(settings_frm, from_=1, to=100, orient=HORIZONTAL, length=200, label='JPEG QUALITY', fg='blue') +# self.quality_scale.set(95) +# run_btn = Button(settings_frm, text="RUN JPEG CONVERSION", command=lambda: self.run()) +# settings_frm.grid(row=0, column=0, sticky="NW") +# self.selected_frame_dir.grid(row=0, column=0, sticky="NW") +# self.quality_scale.grid(row=1, column=0, sticky="NW") +# run_btn.grid(row=2, column=0, sticky="NW") +# self.main_frm.mainloop() +# +# def run(self): +# folder_path = self.selected_frame_dir.folder_path +# check_if_dir_exists(in_dir=folder_path) +# _ = convert_to_jpeg(directory=folder_path, quality=int(self.quality_scale.get()), verbose=True) + + +# class Convert2bmpPopUp(PopUpMixin): +# def __init__(self): +# super().__init__(title="CONVERT IMAGE DIRECTORY TO BMP") +# settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) +# self.selected_frame_dir = FolderSelect(settings_frm, "IMAGE DIRECTORY PATH:", title="Select a image directory", lblwidth=25) +# self.bits_dropdown = DropDownMenu(settings_frm, "BMP BITS:", [1, 4, 8, 24], labelwidth=25) +# self.bits_dropdown.setChoices(24) +# self.compression_drop_down = DropDownMenu(settings_frm, "COMPRESSION LEVEL:", list(range(0, 10)), labelwidth=25) +# self.compression_drop_down.setChoices(0) + + +def convert_to_png(directory: Union[str, os.PathLike], + verbose: Optional[bool] = False) -> None: + + timer = SimbaTimer(start=True) + check_if_dir_exists(in_dir=directory, source=convert_to_png.__name__) + file_paths = find_files_of_filetypes_in_directory(directory=directory, extensions=Options.ALL_IMAGE_FORMAT_OPTIONS.value, raise_error=True) + datetime_ = datetime.now().strftime("%Y%m%d%H%M%S") + print(f"{len(file_paths)} image file(s) found in {directory}...") + save_dir = os.path.join(directory, f'png_{datetime_}') + os.makedirs(save_dir) + for file_cnt, file_path in enumerate(file_paths): + dir, file_name, _ = get_fn_ext(filepath=file_path) + save_path = os.path.join(save_dir, f'{file_name}.png') + if verbose: + print(f"Converting file {file_cnt+1}/{len(file_paths)} ...") + img = Image.open(file_path) + if img.mode in ('RGBA', 'LA'): img = img.convert('RGB') + img.save(save_path, 'PNG') + timer.stop_timer() + stdout_success(msg=f"SIMBA COMPLETE: {len(file_paths)} image file(s) in {directory} directory converted to PNG and stored in {save_dir} directory", source=convert_to_png.__name__, elapsed_time=timer.elapsed_time_str) + + +convert_to_png(directory='/Users/simon/Desktop/imgs') + +# def convert_to_jpeg(directory: Union[str, os.PathLike], +# file_type_in: Literal['.bmp', '.jpg', '.jpeg', '.png'], +# quality: Optional[int] = 95, +# verbose: Optional[bool] = False) -> None: +# +# """ +# Convert the file type of all image files within a directory to jpeg format of the passed quality. +# +# .. note:: +# Quality above 95 should be avoided; 100 disables portions of the JPEG compression algorithm, and results in large files with hardly any gain in image quality +# +# :parameter Union[str, os.PathLike] directory: Path to directory holding image files +# :parameter str file_type_in: Input file type, e.g., 'bmp' or 'png. +# :parameter str file_type_out: Output file type, e.g., 'bmp' or 'png. +# :parameter Optional[bool] verbose: If True, prints progress. Default False. +# +# :example: +# >>> convert_to_jpeg(directory='/Users/simon/Desktop/imgs', file_type_in='.png', quality=15) +# """ +# timer = SimbaTimer(start=True) +# check_if_dir_exists(in_dir=directory, source=convert_to_jpeg.__name__) +# check_str(name=f'{convert_to_jpeg.__name__} file_type_in', value=file_type_in, options=Options.ALL_IMAGE_FORMAT_OPTIONS.value) +# check_int(name=f'{convert_to_jpeg.__name__} quality', value=quality, min_value=1, max_value=100) +# file_paths = find_files_of_filetypes_in_directory(directory=directory, extensions=[file_type_in], raise_error=True) +# datetime_ = datetime.now().strftime("%Y%m%d%H%M%S") +# print(f"{len(file_paths)} {file_type_in} image file(s) found in {directory}...") +# save_dir = os.path.join(directory, f'png_{datetime_}') +# os.makedirs(save_dir) +# for file_cnt, file_path in enumerate(file_paths): +# dir, file_name, _ = get_fn_ext(filepath=file_path) +# save_path = os.path.join(save_dir, f'{file_name}.jpeg') +# if verbose: +# print(f"Converting file {file_cnt+1}/{len(file_paths)} ...") +# img = Image.open(file_path) +# if img.mode in ('RGBA', 'LA'): img = img.convert('RGB') +# img.save(save_path, 'JPEG', quality=quality) +# timer.stop_timer() +# stdout_success(msg=f"SIMBA COMPLETE: {len(file_paths)} {file_type_in} image files in {directory} directory converted to jpeg and stored in {save_dir} directory", source=convert_to_jpeg.__name__, elapsed_time=timer.elapsed_time_str) + +#convert_to_jpeg(directory='/Users/simon/Desktop/imgs', file_type_in='.png', quality=15) \ No newline at end of file diff --git a/simba/sandbox/change_n_jobs_scikit.py b/simba/sandbox/change_n_jobs_scikit.py new file mode 100644 index 000000000..e29ae3e5e --- /dev/null +++ b/simba/sandbox/change_n_jobs_scikit.py @@ -0,0 +1,9 @@ +import pickle + +MODEL_PATH = r'/Users/simon/Downloads/Floor Skimming_3.sav' +NEW_MODEL_SAVE_PATH = r'/Users/simon/Downloads/Floor Skimming_3_new_n_jobs.sav' +NEW_N_JOBS = 16 + +clf = pickle.load(open(MODEL_PATH, "rb")) +clf.n_jobs = NEW_N_JOBS +pickle.dump(clf, open(NEW_MODEL_SAVE_PATH, "wb")) diff --git a/simba/sandbox/circling_detector.py b/simba/sandbox/circling_detector.py new file mode 100644 index 000000000..ff40ec66c --- /dev/null +++ b/simba/sandbox/circling_detector.py @@ -0,0 +1,119 @@ +import os +import numpy as np +import pandas as pd +from numba import typed +from simba.utils.read_write import find_files_of_filetypes_in_directory, read_df, get_fn_ext, read_video_info +from simba.mixins.circular_statistics import CircularStatisticsMixin +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.timeseries_features_mixin import TimeseriesFeatureMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.enums import Formats +from typing import Union, Optional +from simba.utils.checks import check_if_dir_exists, check_str, check_valid_dataframe, check_int, check_all_file_names_are_represented_in_video_log +from simba.utils.data import detect_bouts, plug_holes_shortest_bout +from simba.utils.printing import stdout_success + +CIRCLING = 'CIRCLING' + +class CirclingDetector(ConfigReader): + + """ + Detect circling using heuristic rules. + + .. important:: + Circling is detected as :underline:`present` when **the circular range of the animal is above the ``circular_range_threshold`` within the preceding ``time_threshold``** AND + **the movement of the animal (defined as the sum of the center movement) is above the ``movement_threshold`` within the preceding ``time_threshold``.** + + :param Union[str, os.PathLike] data_dir: Path to directory containing pose-estimated body-part data in CSV format. + :param Union[str, os.PathLike] config_path: Path to SimBA project config file. + :param Optional[str] nose_name: The name of the pose-estimated nose body-part. Defaults to 'nose'. + :param Optional[str] left_ear: The name of the pose-estimated left ear body-part. Defaults to 'left_ear'. + :param Optional[str] right_ear: The name of the pose-estimated right ear body-part. Defaults to 'right_ear'. + :param Optional[str] tail_base_name: The name of the pose-estimated tail base body-part. Defaults to 'tail_base'. + :param Optional[str] center_name: The name of the pose-estimated center body-part. Defaults to 'center'. + :param Optional[int] time_threshold: The time window in preceding seconds in which to evaluate the animals circular range. Default: 10. + :param Optional[int] circular_range_threshold: A value in degrees, between 0-360. + :param Optional[int] movement_threshold: A movement threshold in millimeters. + :param Optional[Union[str, os.PathLike]] save_dir: Directory where to store the results. If None, then results are stored in the ``logs`` directory of the SimBA project. + + References + ---------- + .. [1] Sabnis et al., Visual detection of seizures in mice using supervised machine learning, `biorxiv`, doi: https://doi.org/10.1101/2024.05.29.596520. + + :example: + >>> CirclingDetector(data_dir=r'D:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location', config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini") + """ + + def __init__(self, + data_dir: Union[str, os.PathLike], + config_path: Union[str, os.PathLike], + nose_name: Optional[str] = 'nose', + left_ear_name: Optional[str] = 'left_ear', + right_ear_name: Optional[str] = 'right_ear', + tail_base_name: Optional[str] = 'tail_base', + center_name: Optional[str] = 'center', + time_threshold: Optional[int] = 10, + circular_range_threshold: Optional[int] = 320, + movement_threshold: Optional[int] = 60, + save_dir: Optional[Union[str, os.PathLike]] = None): + + check_if_dir_exists(in_dir=data_dir) + for bp_name in [nose_name, left_ear_name, right_ear_name, tail_base_name]: check_str(name='body part name', value=bp_name, allow_blank=False) + self.data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv']) + ConfigReader.__init__(self, config_path=config_path, read_video_info=True, create_logger=False) + self.nose_heads = [f'{nose_name}_x'.lower(), f'{nose_name}_y'.lower()] + self.left_ear_heads = [f'{left_ear_name}_x'.lower(), f'{left_ear_name}_y'.lower()] + self.right_ear_heads = [f'{right_ear_name}_x'.lower(), f'{right_ear_name}_y'.lower()] + self.center_heads = [f'{center_name}_x'.lower(), f'{center_name}_y'.lower()] + self.required_field = self.nose_heads + self.left_ear_heads + self.right_ear_heads + self.save_dir = save_dir + if self.save_dir is None: + self.save_dir = os.path.join(self.logs_path, f'circling_data_{self.datetime}') + os.makedirs(self.save_dir) + else: + check_if_dir_exists(in_dir=self.save_dir) + self.time_threshold, self.circular_range_threshold, self.movement_threshold = time_threshold, circular_range_threshold, movement_threshold + + def run(self): + agg_results = pd.DataFrame(columns=['VIDEO', 'CIRCLING FRAMES', 'CIRCLING TIME (S)', 'CIRCLING BOUT COUNTS', 'CIRCLING PCT OF SESSION', 'VIDEO TOTAL FRAMES', 'VIDEO TOTAL TIME (S)']) + agg_results_path = os.path.join(self.save_dir, 'aggregate_circling_results.csv') + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.data_paths) + for file_cnt, file_path in enumerate(self.data_paths): + video_name = get_fn_ext(filepath=file_path)[1] + print(f'Analyzing {video_name} ({file_cnt+1}/{len(self.data_paths)})...') + save_file_path = os.path.join(self.save_dir, f'{video_name}.csv') + df = read_df(file_path=file_path, file_type='csv').reset_index(drop=True) + _, px_per_mm, fps = read_video_info(video_info_df=self.video_info_df, video_name=video_name) + df.columns = [str(x).lower() for x in df.columns] + check_valid_dataframe(df=df, valid_dtypes=Formats.NUMERIC_DTYPES.value, required_fields=self.required_field) + + nose_arr = df[self.nose_heads].values.astype(np.float32) + left_ear_arr = df[self.left_ear_heads].values.astype(np.float32) + right_ear_arr = df[self.right_ear_heads].values.astype(np.float32) + + center_shifted = FeatureExtractionMixin.create_shifted_df(df[self.center_heads]) + center_1, center_2 = center_shifted.iloc[:, 0:2].values, center_shifted.iloc[:, 2:4].values + + angle_degrees = CircularStatisticsMixin().direction_three_bps(nose_loc=nose_arr, left_ear_loc=left_ear_arr, right_ear_loc=right_ear_arr).astype(np.float32) + sliding_circular_range = CircularStatisticsMixin().sliding_circular_range(data=angle_degrees, time_windows=np.array([self.time_threshold], dtype=np.float64), fps=int(fps)).flatten() + movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=center_1[:, 0].flatten(), bp_2_x=center_2[:, 0].flatten(), bp_1_y=center_1[:, 1].flatten(), bp_2_y=center_2[:, 1].flatten(), px_per_mm=2.15) + movement_sum = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=movement.astype(np.float32), window_sizes=np.array([self.time_threshold], dtype=np.float64), sample_rate=fps, statistics=typed.List(["sum"])).astype(np.int32)[0].flatten() + + circling_idx = np.argwhere(sliding_circular_range >= self.circular_range_threshold).astype(np.int32).flatten() + movement_idx = np.argwhere(movement_sum >= self.movement_threshold).astype(np.int32).flatten() + circling_idx = [x for x in movement_idx if x in circling_idx] + df[CIRCLING] = 0 + df.loc[circling_idx, CIRCLING] = 1 + bouts = detect_bouts(data_df=df, target_lst=[CIRCLING], fps=fps) + df = plug_holes_shortest_bout(data_df=df, clf_name=CIRCLING, fps=fps, shortest_bout=100) + df.to_csv(save_file_path) + agg_results.loc[len(agg_results)] = [video_name, len(circling_idx), round(len(circling_idx) / fps, 4), len(bouts), round((len(circling_idx) / len(df)) * 100, 4), len(df), round(len(df)/fps, 2) ] + + agg_results.to_csv(agg_results_path) + stdout_success(msg=f'Results saved in {self.save_dir} directory.') + + + +# detector = CirclingDetector(data_dir=r'D:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location', config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini") +# detector.run() + diff --git a/simba/sandbox/circular_statistics.py b/simba/sandbox/circular_statistics.py new file mode 100644 index 000000000..fccd5fe8d --- /dev/null +++ b/simba/sandbox/circular_statistics.py @@ -0,0 +1,680 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import math +from typing import Optional, Tuple + +try: + from typing import Literal +except: + from typing_extensions import Literal + +import numpy as np +from numba import cuda, int32 + +try: + import cupy as cp +except: + import numpy as cp + +from simba.utils.checks import check_float, check_int, check_valid_array +from simba.utils.enums import Formats + +THREADS_PER_BLOCK = 1024 + +@cuda.jit() +def _cuda_direction_from_two_bps(x, y, results): + i = cuda.grid(1) + if i > x.shape[0]: + return + else: + a = math.atan2(x[i][0] - y[i][0], y[i][1] - x[i][1]) * (180 / math.pi) + a = int32(a + 360 if a < 0 else a) + results[i] = a + + +def direction_from_two_bps(x: np.ndarray, y: np.ndarray) -> np.ndarray: + """ + Compute the directionality in degrees from two body-parts. E.g., ``nape`` and ``nose``, + or ``swim_bladder`` and ``tail`` with GPU acceleration. + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/direction_two_bps.csv + :widths: 10, 90 + :align: center + :header-rows: 1 + + .. seealso:: + For CPU function see :func:`~simba.mixins.circular_statistics.CircularStatisticsMixin.direction_two_bps`. + + :parameter np.ndarray x: Size len(frames) x 2 representing x and y coordinates for first body-part. + :parameter np.ndarray y: Size len(frames) x 2 representing x and y coordinates for second body-part. + :return: Frame-wise directionality in degrees. + :rtype: np.ndarray. + + """ + x = np.ascontiguousarray(x).astype(np.int32) + y = np.ascontiguousarray(y).astype(np.int32) + x_dev = cuda.to_device(x) + y_dev = cuda.to_device(y) + results = cuda.device_array((x.shape[0]), dtype=np.int32) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _cuda_direction_from_two_bps[bpg, THREADS_PER_BLOCK](x_dev, y_dev, results) + results = results.copy_to_host() + return results + + +def sliding_circular_hotspots(x: np.ndarray, + time_window: float, + sample_rate: float, + bins: np.ndarray, + batch_size: Optional[int] = int(3.5e+7)) -> np.ndarray: + """ + Calculate the proportion of data points falling within specified circular bins over a sliding time window using GPU + + This function processes time series data representing angles (in degrees) and calculates the proportion of data + points within specified angular bins over a sliding window. The calculations are performed in batches to + accommodate large datasets efficiently. + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/sliding_circular_hotspots.csv + :widths: 10, 45, 45 + :align: center + :header-rows: 1 + + .. seealso:: + For CPU function see :func:`~simba.mixins.circular_statistics.CircularStatisticsMixin.sliding_circular_hotspots`. + + + :param np.ndarray x: The input time series data in degrees. Should be a 1D numpy array. + :param float time_window: The size of the sliding window in seconds. + :param float sample_rate: The sample rate of the time series data (i.e., hz, fps). + :param ndarray bins: 2D array of shape representing circular bins defining [start_degree, end_degree] inclusive. + :param Optional[int] batch_size: The size of each batch for processing the data. Default is 5e+7 (50m). + :return: A 2D numpy array where each row corresponds to a time point in `data`, and each column represents a circular bin. The values in the array represent the proportion of data points within each bin at each time point. The first column represents the first bin. + :rtype: np.ndarray + """ + + n = x.shape[0] + x = cp.asarray(x, dtype=cp.float16) + results = cp.full((x.shape[0], bins.shape[0]), dtype=cp.float16, fill_value=-1) + window_size = int(cp.ceil(time_window * sample_rate)) + for cnt, left in enumerate(range(0, n, batch_size)): + right = int(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size + 1 + x_batch = x[left:right] + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size).astype(cp.float16) + batch_results = cp.full((x_batch.shape[0], bins.shape[0]), dtype=cp.float16, fill_value=-1) + for bin_cnt in range(bins.shape[0]): + if bins[bin_cnt][0] > bins[bin_cnt][1]: + mask = ((x_batch >= bins[bin_cnt][0]) & (x_batch <= 360)) | ((x_batch >= 0) & (x_batch <= bins[bin_cnt][1])) + else: + mask = (x_batch >= bins[bin_cnt][0]) & (x_batch <= bins[bin_cnt][1]) + count_per_row = cp.array(mask.sum(axis=1) / window_size).reshape(-1, ) + batch_results[:, bin_cnt] = count_per_row + results[left + window_size - 1:right, ] = batch_results + return results.get() + + + + +def sliding_circular_mean(x: np.ndarray, + time_window: float, + sample_rate: int, + batch_size: Optional[int] = 3e+7) -> np.ndarray: + + """ + Calculate the sliding circular mean over a time window for a series of angles. + + This function computes the circular mean of angles in the input array `x` over a specified sliding window. + The circular mean is a measure of the average direction for angles, which is especially useful for angular data + where traditional averaging would not be meaningful due to the circular nature of angles (e.g., 359° and 1° should average to 0°). + + The calculation is performed using a sliding window approach, where the circular mean is computed for each window + of angles. The function leverages GPU acceleration via CuPy for efficiency when processing large datasets. + + The circular mean :math:`\\mu` for a set of angles is calculated using the following formula: + + .. math:: + + \\mu = \\text{atan2}\\left(\\frac{1}{N} \\sum_{i=1}^{N} \\sin(\\theta_i), \\frac{1}{N} \\sum_{i=1}^{N} \\cos(\\theta_i)\\right) + + - :math:`\\theta_i` are the angles in radians within the sliding window + - :math:`N` is the number of samples in the window + + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/sliding_circular_mean.csv + :widths: 10, 45, 45 + :align: center + :header-rows: 1 + + .. seealso:: + For CPU function see :func:`~simba.mixins.circular_statistics.CircularStatisticsMixin.sliding_circular_mean`. + + :param np.ndarray x: Input array containing angle values in degrees. The array should be 1-dimensional. + :param float time_window: Time duration for the sliding window, in seconds. This determines the number of samples in each window based on the `sample_rate`. + :param int sample_rate: The number of samples per second (i.e., FPS). This is used to calculate the window size in terms of array indices. + :param Optional[int] batch_size: The maximum number of elements to process in each batch. This is used to handle large arrays by processing them in chunks to avoid memory overflow. Defaults to 3e+7 (30 million elements). + :return np.ndarray: A 1D numpy array of the same length as `x`, containing the circular mean for each sliding window. Values before the window is fully populated will be set to -1. + + :example: + >>> x = np.random.randint(0, 361, (i, )).astype(np.int32) + >>> results = sliding_circular_mean(x, 1, 10) + """ + + + window_size = np.ceil(time_window * sample_rate).astype(np.int64) + n = x.shape[0] + results = cp.full(x.shape[0], -1, dtype=np.int32) + for cnt, left in enumerate(range(0, int(n), int(batch_size))): + right = np.int32(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size+1 + x_batch = cp.asarray(x[left:right]) + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size) + x_batch = np.deg2rad(x_batch) + cos, sin = cp.cos(x_batch).astype(np.float32), cp.sin(x_batch).astype(np.float32) + r = cp.rad2deg(cp.arctan2(cp.mean(sin, axis=1), cp.mean(cos, axis=1))) + r = cp.where(r < 0, r + 360, r) + results[left + window_size - 1:right] = r + return results.get() + + + +def sliding_circular_range(x: np.ndarray, + time_window: float, + sample_rate: float, + batch_size: Optional[int] = int(5e+7)) -> np.ndarray: + """ + Computes the sliding circular range of a time series data array using GPU. + + This function calculates the circular range of a time series data array using a sliding window approach. + The input data is assumed to be in degrees, and the function handles the circular nature of the data + by considering the circular distance between angles. + + .. math:: + + R = \\min \\left( \\text{max}(\\Delta \\theta) - \\text{min}(\\Delta \\theta), \\, 360 - \\text{max}(\\Delta \\theta) + \\text{min}(\\Delta \\theta) \\right) + + where: + + - :math:`\\Delta \\theta` is the difference between angles within the window, + - :math:`360` accounts for the circular nature of the data (i.e., wrap-around at 360 degrees). + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/sliding_circular_range.csv + :widths: 10, 45, 45 + :align: center + :header-rows: 1 + + .. seealso:: + For CPU function see :func:`~simba.mixins.circular_statistics.CircularStatisticsMixin.sliding_circular_range`. + + :param np.ndarray x: The input time series data in degrees. Should be a 1D numpy array. + :param float time_window: The size of the sliding window in seconds. + :param float sample_rate: The sample rate of the time series data (i.e., hz, fps). + :param Optional[int] batch_size: The size of each batch for processing the data. Default is 5e+7 (50m). + :return: A numpy array containing the sliding circular range values. + :rtype: np.ndarray + + :example: + >>> x = np.random.randint(0, 361, (19, )).astype(np.int32) + >>> p = sliding_circular_range(x, 1, 10) + """ + + n = x.shape[0] + x = cp.asarray(x, dtype=cp.float16) + results = cp.zeros_like(x, dtype=cp.int16) + x = cp.deg2rad(x).astype(cp.float16) + window_size = int(cp.ceil(time_window * sample_rate)) + for cnt, left in enumerate(range(0, n, batch_size)): + right = int(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size + 1 + x_batch = x[left:right] + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size).astype(cp.float16) + x_batch = cp.sort(x_batch) + results[left + window_size - 1:right] = cp.abs(cp.rint(cp.rad2deg(cp.amin(cp.vstack([x_batch[:, -1] - x_batch[:, 0], 2 * cp.pi - cp.max(cp.diff(x_batch), axis=1)]).T, axis=1)))) + return results.get() + + + + +def sliding_circular_std(x: np.ndarray, + time_window: float, + sample_rate: float, + batch_size: Optional[int] = int(5e+7)) -> np.ndarray: + + """ + Calculate the sliding circular standard deviation of a time series data on GPU. + + This function computes the circular standard deviation over a sliding window for a given time series array. + The time series data is assumed to be in degrees, and the function converts it to radians for computation. + The sliding window approach is used to handle large datasets efficiently, processing the data in batches. + + The circular standard deviation (σ) is computed using the formula: + + .. math:: + + \sigma = \sqrt{-2 \cdot \log \left|\text{mean}\left(\exp(i \cdot x_{\text{batch}})\right)\right|} + + where :math:`x_{\text{batch}}` is the data within the current sliding window, and :math:`\text{mean}` and + :math:`\log` are computed in the circular (complex plane) domain. + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/sliding_circular_std.csv + :widths: 10, 45, 45 + :align: center + :header-rows: 1 + + .. seealso:: + For CPU function see :func:`~simba.mixins.circular_statistics.CircularStatisticsMixin.sliding_circular_std`. + + :param np.ndarray x: The input time series data in degrees. Should be a 1D numpy array. + :param float time_window: The size of the sliding window in seconds. + :param float sample_rate: The sample rate of the time series data (i.e., hz, fps). + :param Optional[int] batch_size: The size of each batch for processing the data. Default is 5e+7 (50m). + + :return: A numpy array containing the sliding circular standard deviation values. + :rtype: np.ndarray + """ + + + n = x.shape[0] + x = cp.asarray(x, dtype=cp.float16) + results = cp.zeros_like(x, dtype=cp.float16) + x = np.deg2rad(x).astype(cp.float16) + window_size = int(np.ceil(time_window * sample_rate)) + for cnt, left in enumerate(range(0, n, batch_size)): + right = int(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size + 1 + x_batch = x[left:right] + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size).astype(cp.float16) + m = cp.log(cp.abs(cp.mean(cp.exp(1j * x_batch), axis=1))) + stdev = cp.rad2deg(cp.sqrt(-2 * m)) + results[left + window_size - 1:right] = stdev + + return results.get() + + +def sliding_rayleigh_z(x: np.ndarray, + time_window: float, + sample_rate: float, + batch_size: Optional[int] = int(5e+7)) -> Tuple[np.ndarray, np.ndarray]: + + """ + Computes the Rayleigh Z-statistic over a sliding window for a given time series of angles + + This function calculates the Rayleigh Z-statistic, which tests the null hypothesis that the population of angles + is uniformly distributed around the circle. The calculation is performed over a sliding window across the input + time series, and results are computed in batches for memory efficiency. + + Data is processed using GPU acceleration via CuPy, which allows for faster computation compared to a CPU-based approach. + + .. note:: + Adapted from ``pingouin.circular.circ_rayleigh`` and ``pycircstat.tests.rayleigh``. + + + **Rayleigh Z-statistic:** + + The Rayleigh Z-statistic is given by: + + .. math:: + + R = \frac{1}{n} \sqrt{\left(\sum_{i=1}^{n} \cos(\theta_i)\right)^2 + \left(\sum_{i=1}^{n} \sin(\theta_i)\right)^2} + + where: + - :math:`\theta_i` are the angles in the window. + - :math:`n` is the number of angles in the window. + + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/sliding_rayleigh_z.csv + :widths: 10, 45, 45 + :align: center + :header-rows: 1 + + .. seealso:: + For CPU function see :func:`~simba.mixins.circular_statistics.CircularStatisticsMixin.sliding_rayleigh_z`. + + + :param np.ndarray x: Input array of angles in degrees. Should be a 1D numpy array. + :param float time_window: The size of the sliding window in time units (e.g., seconds). + :param float sample_rate: The sampling rate of the input time series in samples per time unit (e.g., Hz, fps). + :param Optional[int] batch_size: The number of samples to process in each batch. Default is 5e7 (50m). Reducing this value may save memory at the cost of longer computation time. + :return: + A tuple containing two numpy arrays: + - **z_results**: Rayleigh Z-statistics for each position in the input array where the window was fully applied. + - **p_results**: Corresponding p-values for the Rayleigh Z-statistics. + :rtype: Tuple[np.ndarray, np.ndarray] + """ + + n = x.shape[0] + x = cp.asarray(x, dtype=cp.float16) + z_results = cp.zeros_like(x, dtype=cp.float16) + p_results = cp.zeros_like(x, dtype=cp.float16) + x = np.deg2rad(x).astype(cp.float16) + window_size = int(np.ceil(time_window * sample_rate)) + for cnt, left in enumerate(range(0, n, batch_size)): + right = int(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size + 1 + x_batch = x[left:right] + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size).astype(cp.float16) + cos_sums = cp.nansum(cp.cos(x_batch), axis=1) ** 2 + sin_sums = cp.nansum(cp.sin(x_batch), axis=1) ** 2 + R = cp.sqrt(cos_sums + sin_sums) / window_size + Z = window_size * (R**2) + P = cp.exp(np.sqrt(1 + 4 * window_size + 4 * (window_size ** 2 - R ** 2)) - (1 + 2 * window_size)) + z_results[left + window_size - 1:right] = Z + p_results[left + window_size - 1:right] = P + + return z_results.get(), p_results.get() + + +def sliding_resultant_vector_length(x: np.ndarray, + time_window: float, + sample_rate: int, + batch_size: Optional[int] = 3e+7) -> np.ndarray: + + """ + Calculate the sliding resultant vector length over a time window for a series of angles. + + This function computes the resultant vector length (R) for each window of angles in the input array `x`. + The resultant vector length is a measure of the concentration of angles, and it ranges from 0 to 1, where 1 + indicates all angles point in the same direction, and 0 indicates uniform distribution of angles. + + For a given sliding window of angles, the resultant vector length :math:`R` is calculated using the following formula: + + .. math:: + + R = \\frac{1}{N} \\sqrt{\\left(\\sum_{i=1}^{N} \\cos(\\theta_i)\\right)^2 + \\left(\\sum_{i=1}^{N} \\sin(\\theta_i)\\right)^2} + + where: + + - :math:`\\theta_i` are the angles in radians within the sliding window + - :math:`N` is the number of samples in the window + + The computation is performed in a sliding window manner over the entire array, utilizing GPU acceleration + with CuPy for efficiency, especially on large datasets. + + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/sliding_resultant_vector_length.csv + :widths: 10, 10, 80 + :align: center + :header-rows: 1 + + .. seealso:: + For CPU function see :func:`~simba.mixins.circular_statistics.CircularStatisticsMixin.sliding_resultant_vector_length`. + + :param np.ndarray x: Input array containing angle values in degrees. The array should be 1-dimensional. + :param float time_window: Time duration for the sliding window, in seconds. This determines the number of samples in each window based on the `sample_rate`. + :param int sample_rate: The number of samples per second (i.e., FPS). This is used to calculate the window size in terms of array indices. + :param Optional[int] batch_size: The maximum number of elements to process in each batch. This is used to handle large arrays by processing them in chunks to avoid memory overflow. Defaults to 3e+7 (30 million elements). + :return np.ndarray: A 1D numpy array of the same length as `x`, containing the resultant vector length for each sliding window. Values before the window is fully populated will be set to -1. + + :example: + >>> x = np.random.randint(0, 361, (5000, )).astype(np.int32) + >>> results = sliding_resultant_vector_length(x, 1, 10) + """ + + window_size = np.ceil(time_window * sample_rate).astype(np.int64) + n = x.shape[0] + results = cp.full(x.shape[0], -1, dtype=np.float32) + for cnt, left in enumerate(range(0, int(n), int(batch_size))): + right = np.int32(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size+1 + x_batch = cp.asarray(x[left:right]) + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size) + x_batch = np.deg2rad(x_batch) + cos, sin = cp.cos(x_batch).astype(np.float32), cp.sin(x_batch).astype(np.float32) + cos_sum, sin_sum = cp.sum(cos, axis=1), cp.sum(sin, axis=1) + r = np.sqrt(cos_sum ** 2 + sin_sum ** 2) / window_size + results[left+window_size-1:right] = r + return results.get() + + +def direction_from_three_bps(x: np.ndarray, + y: np.ndarray, + z: np.ndarray, + batch_size: Optional[int] = int(1.5e+7)) -> np.ndarray: + + """ + Calculate the direction angle based on the coordinates of three body points using GPU acceleration. + + This function computes the mean direction angle (in degrees) for a batch of coordinates + provided in the form of NumPy arrays. The calculation is based on the arctangent of the + difference in x and y coordinates between pairs of points. The result is a value in + the range [0, 360) degrees. + + .. seealso:: + :func:`simba.mixins.circular_statistics.CircularStatisticsMixin.direction_three_bps` + + :param np.ndarray x: A 2D array of shape (N, 2) containing the x-coordinates of the first body part (nose) + :param np.ndarray y: A 2D array of shape (N, 2) containing the coordinates of the second body part (left ear). + :param np.ndarray z: A 2D array of shape (N, 2) containing the coordinates of the second body part (right ear). + :param Optional[int] batch_size: The size of the batch to be processed in each iteration. Default is 15 million. + :return: An array of shape (N,) containing the computed direction angles in degrees. + :rtype np.ndarray: + """ + + check_valid_array(data=x, source=direction_from_three_bps.__name__, accepted_ndims=(2,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=y, source=direction_from_three_bps.__name__, accepted_shapes=(x.shape,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=z, source=direction_from_three_bps.__name__, accepted_shapes=(x.shape,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_int(value=batch_size, name=direction_from_three_bps.__name__, min_value=1) + results = cp.full((x.shape[0]), fill_value=-1, dtype=np.int16) + + for l in range(0, x.shape[0], batch_size): + r = l + batch_size + x_batch = cp.array(x[l:r]) + y_batch = cp.array(y[l:r]) + z_batch = cp.array(z[l:r]) + left_ear_to_nose = cp.arctan2(x_batch[:, 0] - y_batch[:, 0], y_batch[:, 1] - x_batch[:,1]) + right_ear_nose = cp.arctan2(x_batch[:, 0] - z_batch[:, 0], z_batch[:, 1] - x_batch[:, 1]) + mean_angle_rad = cp.arctan2(cp.sin(left_ear_to_nose) + cp.sin(right_ear_nose), cp.cos(left_ear_to_nose) + cp.cos(right_ear_nose)) + results[l:r] = (cp.degrees(mean_angle_rad) + 360) % 360 + + return results.get() + + +@cuda.jit() +def _instantaneous_angular_velocity(x, stride, results): + r = cuda.grid(1) + l = np.int32(r - (stride[0])) + if (r > results.shape[0]) or (l < 0): + results[r] = -1 + else: + d = math.pi - (abs(math.pi - abs(x[l] - x[r]))) + results[r] = d * (180 / math.pi) + + +def instantaneous_angular_velocity(x: np.ndarray, stride: Optional[int] = 1) -> np.ndarray: + """ + Calculate the instantaneous angular velocity between angles in a given array. + + This function uses CUDA to perform parallel computations on the GPU. + + The angular velocity is computed using the difference in angles between + the current and previous values (with a specified stride) in the array. + The result is returned in degrees per unit time. + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/instantaneous_angular_velocity.csv + :widths: 10, 90 + :align: center + :header-rows: 1 + + .. math:: + \omega = \frac{{\Delta \theta}}{{\Delta t}} = \frac{{180}}{{\pi}} \times \left( \pi - \left| \pi - \left| \theta_r - \theta_l \right| \right| \right) + + where: + - \( \theta_r \) is the current angle. + - \( \theta_l \) is the angle at the specified stride before the current angle. + - \( \Delta t \) is the time difference between the two angles. + + + .. seealso:: + :func:`simba.mixins.circular_statistics.CircularStatisticsMixin.instantaneous_angular_velocity` + + :param np.ndarray x: Array of angles in degrees, for which the instantaneous angular velocity will be calculated. + :param Optional[int] stride: The stride or lag (in frames) to use when calculating the difference in angles. Defaults to 1. + :return: Array of instantaneous angular velocities corresponding to the input angles. Velocities are in degrees per unit time. + :rtype: np.ndarray + """ + + x = np.deg2rad(x).astype(np.int16) + stride = np.array([stride]).astype(np.int64) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + x_dev = cuda.to_device(x) + stride_dev = cuda.to_device(stride) + results = cuda.device_array(x.shape[0], dtype=np.float32) + _instantaneous_angular_velocity[bpg, THREADS_PER_BLOCK](x_dev, stride_dev, results) + return results.copy_to_host() + + +@cuda.jit(device=True) +def _rad2deg(x): + return x * (180/math.pi) + + +@cuda.jit() +def _sliding_bearing(x, stride, results): + r = cuda.grid(1) + l = np.int32(r - (stride[0])) + if (r > results.shape[0]-1) or (l < 0): + results[r] = -1 + else: + x1, y1 = x[l, 0], x[l, 1] + x2, y2 = x[r, 0], x[r, 1] + bearing = _rad2deg(math.atan2(x2 - x1, y2 - y1)) + results[r] = (bearing + 360) % 360 + + +def sliding_bearing(x: np.ndarray, + stride: Optional[float] = 1, + sample_rate: Optional[float] = 1) -> np.ndarray: + """ + Compute the bearing between consecutive points in a 2D coordinate array using a sliding window approach using GPU acceleration. + + This function calculates the angle (bearing) in degrees between each point and a point a certain number of + steps ahead (defined by `stride`) in the 2D coordinate array `x`. The bearing is calculated using the + arctangent of the difference in coordinates, converted from radians to degrees. + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/sliding_bearing.csv + :widths: 10, 90 + :align: center + :header-rows: 1 + + .. seealso:: + :func:`simba.mixins.circular_statistics.CircularStatisticsMixin.sliding_bearing` + + :param np.ndarray x: A 2D array of shape `(n, 2)` where each row represents a point with `x` and `y` coordinates. The array must be numeric. + :param Optional[float] stride: The time (multiplied by `sample_rate`) to look ahead when computing the bearing in seconds. Defaults to 1. + :param Optional[float] sample_rate: A multiplier applied to the `stride` value to determine the actual step size for calculating the bearing. E.g., frames per second. Defaults to 1. If the resulting stride is less than 1, it is automatically set to 1. + :return:A 1D array of shape `(n,)` containing the calculated bearings in degrees. Values outside the valid range (i.e., where the stride exceeds array bounds) are set to -1. + :rtype: np.ndarray + """ + + check_valid_array(data=x, source=f'{sliding_bearing.__name__} x', accepted_ndims=(2,), accepted_axis_1_shape=(2,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_float(name=f'{sliding_bearing.__name__} stride', value=stride, min_value=10e-6, max_value=x.shape[0]-1) + check_float(name=f'{sliding_bearing.__name__} sample_rate', value=sample_rate, min_value=10e-6, max_value=x.shape[0]-1) + stride = int(stride * sample_rate) + if stride < 1: + stride = 1 + stride = np.array([stride]).astype(np.int64) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + x_dev = cuda.to_device(x) + stride_dev = cuda.to_device(stride) + results = cuda.device_array(x.shape[0], dtype=np.float32) + _sliding_bearing[bpg, THREADS_PER_BLOCK](x_dev, stride_dev, results) + return results.copy_to_host() + + +@cuda.jit(device=True) +def _rad2deg(x): + return x * (180 / math.pi) + + +@cuda.jit() +def _sliding_angular_diff(data, strides, results): + x, y = cuda.grid(2) + if (x > data.shape[0] - 1) or (y > strides.shape[0] - 1): + return + else: + stride = int(strides[y]) + if x - stride < 0: + return + a_2 = data[x] + a_1 = data[x - stride] + distance = math.pi - abs(math.pi - abs(a_1 - a_2)) + distance = abs(int(_rad2deg(distance)) + 1) + results[x][y] = distance + + +def sliding_angular_diff(x: np.ndarray, + time_windows: np.ndarray, + fps: float) -> np.ndarray: + """ + Calculate the sliding angular differences for a given time window using GPU acceleration. + + + This function computes the angular differences between each angle in `x` + and the corresponding angle located at a distance determined by the time window + and frame rate (fps). The results are returned as a 2D array where each row corresponds + to a position in `x`, and each column corresponds to a different time window. + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/sliding_angular_diff.csv + :widths: 10, 90 + :align: center + :header-rows: 1 + + + .. seealso:: + :func:`simba.mixins.circular_statistics.CircularStatisticsMixin.sliding_angular_diff` + + .. math:: + \text{difference} = \pi - |\pi - |a_1 - a_2|| + + Where: + - \( a_1 \) is the angle at position `x`. + - \( a_2 \) is the angle at position `x - \text{stride}`. + + :param np.ndarray x: 1D array of angles in degrees. + :param np.ndarray time_windows: 1D array of time windows in seconds to determine the stride (distance in frames) between angles. + :param float fps: Frame rate (frames per second) used to convert time windows to strides. + :return: 2D array of angular differences. Each row corresponds to an angle in `x`, and each column corresponds to a time window. + :rtype: np.ndarray + """ + + x = np.deg2rad(x) + strides = np.zeros(time_windows.shape[0]) + for i in range(time_windows.shape[0]): + strides[i] = np.ceil(time_windows[i] * fps).astype(np.int32) + x_dev = cuda.to_device(x) + stride_dev = cuda.to_device(strides) + results = cuda.device_array((x.shape[0], time_windows.shape[0])) + grid_x = (x.shape[0] + THREADS_PER_BLOCK - 1) // THREADS_PER_BLOCK + grid_y = (strides.shape[0] + THREADS_PER_BLOCK - 1) + blocks_per_grid = (grid_x, grid_y) + _sliding_angular_diff[blocks_per_grid, THREADS_PER_BLOCK](x_dev, stride_dev, results) + results = results.copy_to_host().astype(np.int32) + return results + + diff --git a/simba/sandbox/clean_sleap_filename.py b/simba/sandbox/clean_sleap_filename.py new file mode 100644 index 000000000..6e83bb51e --- /dev/null +++ b/simba/sandbox/clean_sleap_filename.py @@ -0,0 +1,28 @@ + + +def clean_sleap_file_name(filename: str) -> str: + """ + Clean a SLEAP input filename by removing '.analysis' suffix, the video and project name prefix to match orginal video name. + + .. note:: + Modified from `vtsai881 `_. + + :param str filename: The original filename to be cleaned to match video name. + :returns str: The cleaned filename. + + :example: + >>> clean_sleap_file_name("projectname.v00x.00x_videoname.analysis.csv") + >>> 'videoname.csv' + >>> clean_sleap_file_name("projectname.v00x.00x_videoname.analysis.h5") + >>> 'videoname.h5' + """ + + if (".analysis" in filename.lower()) and ("_" in filename) and (filename.count('.') >= 3): + filename_parts = filename.split('.') + video_num_name = filename_parts[2] + if '_' in video_num_name: + return video_num_name.split('_', 1)[1] + else: + return filename + else: + return filename diff --git a/simba/sandbox/clip_multiple_videos_by_frame_numbers.py b/simba/sandbox/clip_multiple_videos_by_frame_numbers.py new file mode 100644 index 000000000..3e0a77236 --- /dev/null +++ b/simba/sandbox/clip_multiple_videos_by_frame_numbers.py @@ -0,0 +1,161 @@ +import os +from typing import Union +from tkinter import * + +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.utils.checks import check_if_dir_exists, check_int, check_float, check_that_hhmmss_start_is_before_end +from simba.utils.read_write import find_all_videos_in_directory, get_video_meta_data, seconds_to_timestamp, check_if_hhmmss_timestamp_is_valid_part_of_video +from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, Entry_Box, FolderSelect) +from simba.utils.enums import Keys, Links +from simba.utils.errors import FrameRangeError +from simba.video_processors.video_processing import clip_videos_by_frame_ids, clip_video_in_range +from simba.utils.printing import stdout_success, SimbaTimer + + +class ClipMultipleVideosByFrameNumbers(PopUpMixin): + + def __init__(self, + data_dir: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike]): + + check_if_dir_exists(in_dir=data_dir, source=self.__class__.__name__, create_if_not_exist=False) + check_if_dir_exists(in_dir=save_dir, source=self.__class__.__name__, create_if_not_exist=True) + self.video_paths = find_all_videos_in_directory(directory=data_dir, as_dict=True, raise_error=True) + self.video_meta_data = [get_video_meta_data(video_path=x)['frame_count'] for x in list(self.video_paths.values())] + max_video_name_len = len(max(list(self.video_paths.keys()))) + super().__init__(title="CLIP MULTIPLE VIDEOS BY FRAME NUMBERS") + self.save_dir = save_dir + data_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="VIDEO SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + data_frm.grid(row=0, column=0, sticky=NW) + Label(data_frm, text="VIDEO NAME", width=max_video_name_len).grid(row=0, column=0, sticky=NW) + Label(data_frm, text="TOTAL FRAMES", width=10).grid(row=0, column=1) + Label(data_frm, text="START FRAME", width=10).grid(row=0, column=2) + Label(data_frm, text="END FRAME", width=10).grid(row=0, column=3) + self.entry_boxes = {} + for cnt, video_name in enumerate(self.video_paths.keys()): + self.entry_boxes[video_name] = {} + Label(data_frm, text=video_name, width=max_video_name_len).grid(row=cnt+1, column=0, sticky=NW) + Label(data_frm, text=self.video_meta_data[cnt], width=max_video_name_len).grid(row=cnt + 1, column=1, sticky=NW) + self.entry_boxes[video_name]['start'] = Entry_Box(data_frm, "", 5, validation="numeric") + self.entry_boxes[video_name]['end'] = Entry_Box(data_frm, "", 5, validation="numeric") + self.entry_boxes[video_name]['start'].grid(row=cnt+1, column=2, sticky=NW) + self.entry_boxes[video_name]['end'].grid(row=cnt+1, column=3, sticky=NW) + self.create_run_frm(run_function=self.run, btn_txt_clr='blue') + self.main_frm.mainloop() + + + def run(self): + video_paths, frame_ids = [], [] + for cnt, (video_name, v) in enumerate(self.entry_boxes.items()): + video_paths.append(self.video_paths[video_name]) + video_frm_cnt = self.video_meta_data[cnt] + check_int(name=f'START {video_name}', value=v['start'].entry_get, min_value=0) + check_int(name=f'START {video_name}', value=v['end'].entry_get, min_value=1) + start, end = int(v['start'].entry_get), int(v['end'].entry_get) + if start >= end: raise FrameRangeError(msg=f'For video {video_name}, the start frame ({start}) is after or the same as the end frame ({end})', source=__class__.__name__) + if (start < 0) or (end < 1): + raise FrameRangeError(msg=f'For video {video_name}, start frame has to be at least 0 and end frame has to be at least 1', source=__class__.__name__) + if start > video_frm_cnt: + raise FrameRangeError( + msg=f'The video {video_name} has {video_frm_cnt} frames, which is less than the start frame: {start}', source=__class__.__name__) + if end > video_frm_cnt: + raise FrameRangeError(msg=f'The video {video_name} has {video_frm_cnt} frames, which is less than the end frame: {end}', source=__class__.__name__) + frame_ids.append([start, end]) + + _ = clip_videos_by_frame_ids(file_paths=video_paths, frm_ids=frame_ids, save_dir=self.save_dir) + +class InitiateClipMultipleVideosByFrameNumbersPopUp(PopUpMixin): + def __init__(self): + super().__init__(title="CLIP MULTIPLE VIDEOS BY FRAME NUMBERS") + data_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SELECT DATA DIRECTORIES", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.input_folder = FolderSelect(data_frm, "Video directory:", title="Select Folder with videos", lblwidth=20) + self.output_folder = FolderSelect(data_frm, "Output directory:", title="Select a folder for your output videos", lblwidth=20) + data_frm.grid(row=0, column=0, sticky=NW) + self.input_folder.grid(row=0, column=0, sticky=NW) + self.output_folder.grid(row=1, column=0, sticky=NW) + self.create_run_frm(run_function=self.run) + self.main_frm.mainloop() + + def run(self): + check_if_dir_exists(in_dir=self.input_folder.folder_path, source=self.__class__.__name__, create_if_not_exist=False) + check_if_dir_exists(in_dir=self.output_folder.folder_path, source=self.__class__.__name__, create_if_not_exist=True) + self.video_paths = find_all_videos_in_directory(directory=self.input_folder.folder_path, as_dict=True, raise_error=True) + self.root.destroy() + _ = ClipMultipleVideosByFrameNumbers(data_dir=self.input_folder.folder_path, save_dir=self.output_folder.folder_path) + + +class ClipMultipleVideosByTimestamps(PopUpMixin): + + def __init__(self, + data_dir: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike]): + + check_if_dir_exists(in_dir=data_dir, source=self.__class__.__name__, create_if_not_exist=False) + check_if_dir_exists(in_dir=save_dir, source=self.__class__.__name__, create_if_not_exist=True) + self.video_paths = find_all_videos_in_directory(directory=data_dir, as_dict=True, raise_error=True) + self.video_meta_data = [get_video_meta_data(video_path=x) for x in list(self.video_paths.values())] + max_video_name_len = len(max(list(self.video_paths.keys()))) + super().__init__(title="CLIP MULTIPLE VIDEOS BY TIME-STAMPS") + self.save_dir = save_dir + data_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="VIDEO SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + data_frm.grid(row=0, column=0, sticky=NW) + self.save_dir = save_dir + data_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="VIDEO SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + data_frm.grid(row=0, column=0, sticky=NW) + Label(data_frm, text="VIDEO NAME", width=max_video_name_len).grid(row=0, column=0, sticky=NW) + Label(data_frm, text="VIDEO LENGTH", width=10).grid(row=0, column=1) + Label(data_frm, text="START TIME (HH:MM:SS)", width=18).grid(row=0, column=2) + Label(data_frm, text="END TIME (HH:MM:SS)", width=18).grid(row=0, column=3) + + self.entry_boxes = {} + for cnt, video_name in enumerate(self.video_paths.keys()): + self.entry_boxes[video_name] = {} + Label(data_frm, text=video_name, width=max_video_name_len).grid(row=cnt+1, column=0, sticky=NW) + video_length = self.video_meta_data[cnt]['video_length_s'] + video_length_hhmmss = seconds_to_timestamp(seconds=video_length) + Label(data_frm, text=video_length_hhmmss, width=max_video_name_len).grid(row=cnt + 1, column=1, sticky=NW) + self.entry_boxes[video_name]['start'] = Entry_Box(data_frm, "", 5) + self.entry_boxes[video_name]['end'] = Entry_Box(data_frm, "", 5) + self.entry_boxes[video_name]['start'].grid(row=cnt+1, column=2, sticky=NW) + self.entry_boxes[video_name]['end'].grid(row=cnt+1, column=3, sticky=NW) + self.create_run_frm(run_function=self.run, btn_txt_clr='blue') + self.main_frm.mainloop() + + def run(self): + timer = SimbaTimer(start=True) + for cnt, (video_name, v) in enumerate(self.entry_boxes.items()): + start, end = v['start'].entry_get, v['end'].entry_get + check_that_hhmmss_start_is_before_end(start_time=start, end_time=end, name=video_name) + check_if_hhmmss_timestamp_is_valid_part_of_video(timestamp=start, video_path=self.video_paths[video_name]) + check_if_hhmmss_timestamp_is_valid_part_of_video(timestamp=end, video_path=self.video_paths[video_name]) + clip_video_in_range(file_path=self.video_paths[video_name], start_time=start, end_time=end, out_dir=self.save_dir, overwrite=True, include_clip_time_in_filename=False) + timer.stop_timer() + stdout_success(msg=f'{len(self.entry_boxes)} videos clipped by time-stamps and saved in {self.save_dir}', elapsed_time=timer.elapsed_time_str) + + +class InitiateClipMultipleVideosByTimestampsPopUp(PopUpMixin): + def __init__(self): + super().__init__(title="CLIP MULTIPLE VIDEOS BY TIME-STAMPS") + data_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SELECT DATA DIRECTORIES", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.input_folder = FolderSelect(data_frm, "Video directory:", title="Select Folder with videos", lblwidth=20) + self.output_folder = FolderSelect(data_frm, "Output directory:", title="Select a folder for your output videos", lblwidth=20) + data_frm.grid(row=0, column=0, sticky=NW) + self.input_folder.grid(row=0, column=0, sticky=NW) + self.output_folder.grid(row=1, column=0, sticky=NW) + self.create_run_frm(run_function=self.run) + self.main_frm.mainloop() + + def run(self): + check_if_dir_exists(in_dir=self.input_folder.folder_path, source=self.__class__.__name__, create_if_not_exist=False) + check_if_dir_exists(in_dir=self.output_folder.folder_path, source=self.__class__.__name__, create_if_not_exist=True) + self.video_paths = find_all_videos_in_directory(directory=self.input_folder.folder_path, as_dict=True, raise_error=True) + self.root.destroy() + _ = ClipMultipleVideosByTimestamps(data_dir=self.input_folder.folder_path, save_dir=self.output_folder.folder_path) + +#InitiateClipMultipleVideosByTimestampsPopUp() + +#ClipMultipleVideosByTimestamps(data_dir='/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/videos/test', save_dir='/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/videos/clipped') + + +#InitiateClipMultipleVideosByFrameNumbersPopUp() +#ClipMultipleVideosByFrameNumbers(data_dir='/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/videos/test', save_dir='/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/videos/clipped') diff --git a/simba/sandbox/clip_videos_by_frm.py b/simba/sandbox/clip_videos_by_frm.py new file mode 100644 index 000000000..4af11da07 --- /dev/null +++ b/simba/sandbox/clip_videos_by_frm.py @@ -0,0 +1,63 @@ +import os +from typing import List, Union, Tuple, Optional +import subprocess +from simba.utils.read_write import get_video_meta_data, get_fn_ext +from simba.utils.checks import check_valid_lst, check_int, check_if_dir_exists +from simba.utils.errors import FrameRangeError +from simba.utils.printing import stdout_success, SimbaTimer + + +def clip_videos_by_frame_ids(file_paths: List[Union[str, os.PathLike]], + frm_ids: List[List[int]], + save_dir: Optional[Union[str, os.PathLike]] = None): + + """ + Clip videos specified by frame IDs (numbers). + + :param List[Union[str, os.PathLike]] file_paths: List of paths to input video files. + :param List[List[int]] frm_ids: List of lists containing start and end frame IDs for each video. + :param Optional[Union[str, os.PathLike]] save_dir: Directory to save the clipped videos. If None, videos will be saved in the same directory as the input videos with frame numbers as suffix. + :return: None. + + :example: + >>> file_paths = ['/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/path_plots/Trial 10.mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/path_plots/Trial 10_1.mp4'] + >>> frm_ids = [[0, 20], [20, 40]] + >>> clip_videos_by_frame_ids(file_paths=file_paths, frm_ids=frm_ids, save_dir='/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/path_plots/trial_cnt') + """ + + timer = SimbaTimer(start=True) + check_valid_lst(data=file_paths, source=clip_videos_by_frame_ids.__name__, valid_dtypes=(str,), min_len=1) + check_valid_lst(data=frm_ids, source=clip_videos_by_frame_ids.__name__, valid_dtypes=(list,), exact_len=len(file_paths)) + for cnt, i in enumerate(frm_ids): + check_valid_lst(data=i, source=f'clip_videos_by_frame_count.__name_ frm_ids {cnt}', valid_dtypes=(int,), exact_len=2) + if i[0] >= i[1]: raise FrameRangeError(msg=f'Start frame for video {i} is after or the same as the end frame ({i[0]}, {i[1]})', source=clip_videos_by_frame_ids.__name__) + if (i[0] < 0) or (i[1] < 1): raise FrameRangeError(msg=f'Start frame has to be at least 0 and end frame has to be at least 1', source=clip_videos_by_frame_ids.__name__) + video_meta_data = [get_video_meta_data(video_path=x) for x in file_paths] + for i in range(len(video_meta_data)): + if (frm_ids[i][0] > video_meta_data[i]['frame_count']) or (frm_ids[i][1] > video_meta_data[i]['frame_count']): + raise FrameRangeError(msg=f'Video {i+1} has {video_meta_data[i]["frame_count"]} frames, cannot use start and end frame {frm_ids[i]}', source=clip_videos_by_frame_ids.__name__) + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir, source=clip_videos_by_frame_ids.__name__, create_if_not_exist=True) + for cnt, file_path in enumerate(file_paths): + video_timer = SimbaTimer(start=True) + dir, video_name, ext = get_fn_ext(filepath=file_path) + s_f, e_f = frm_ids[cnt][0], frm_ids[cnt][1] + print(f'Trimming {video_name} from frame {s_f} to frame {e_f}...') + if save_dir is not None: + out_path = os.path.join(save_dir, os.path.basename(file_path)) + else: + out_path = os.path.join(dir, f'{video_name}_{s_f}_{e_f}.{ext}') + cmd = f'ffmpeg -i "{file_path}" -vf "trim={s_f}:{e_f},setpts=PTS-STARTPTS" -c:v libx264 -c:a aac -loglevel error -stats "{out_path}" -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + video_timer.stop_timer() + print(f'Video {video_name} complete (elapsed time {video_timer.elapsed_time_str}s)') + timer.stop_timer() + if save_dir is None: + stdout_success(msg=f'{len(file_paths)} video(s) clipped by frame', elapsed_time=timer.elapsed_time_str) + else: + stdout_success(msg=f'{len(file_paths)} video(s) clipped by frame and saved in {save_dir}', elapsed_time=timer.elapsed_time_str) + + +# file_paths = ['/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/path_plots/Trial 10.mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/path_plots/Trial 10_1.mp4'] +# frm_ids = [[0, 20], [20, 40]] +# clip_videos_by_frame_ids(file_paths=file_paths, frm_ids=frm_ids, save_dir='/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/path_plots/trial_cnt') diff --git a/simba/sandbox/cochran_q.py b/simba/sandbox/cochran_q.py new file mode 100644 index 000000000..65d76d66a --- /dev/null +++ b/simba/sandbox/cochran_q.py @@ -0,0 +1,43 @@ +import pandas as pd +import numpy as np +from simba.utils.checks import check_valid_array +from simba.utils.errors import InvalidInputError +from simba.utils.read_write import read_pickle +from scipy import stats + +def cochrans_q(data: np.ndarray) -> float: + """ + Compute Cochrans Q for 2-dimensional boolean array. + + Can be used to evaluate if the performance of multiple (>=2) classifiers on the same data is the same or significantly different. + + .. note:: + If two classifiers, consider ``simba.mixins.statistics.Statistics.mcnemar``. + `Useful background `__. + + :param np.ndarray data: Two dimensional array of boolean values where axis 1 represents classifiers or features and rows represent frames. + :return float: Cochran's Q statistic + + :example: + >>> data = np.random.randint(0, 2, (100000, 4)) + >>> cochrans_q(data=data) + """ + check_valid_array(data=data, source=cochrans_q.__name__, accepted_ndims=(2,)) + additional = list(set(list(np.sort(np.unique(data)))) - {0, 1}) + if len(additional) > 0: + raise InvalidInputError(msg=f'Cochrans Q requires binary input data but found {additional}', source=cochrans_q.__name__) + col_sums = np.sum(data, axis=0) + row_sum_sum = np.sum(np.sum(data, axis=1)) + row_sum_square_sum = np.sum(np.square(np.sum(data, axis=1))) + k = data.shape[1] + g2 = np.sum(sum(np.square(col_sums))) + nominator = (k - 1) * ((k * g2) - np.square(np.sum(col_sums))) + denominator = (k * row_sum_sum) - row_sum_square_sum + if nominator == 0 or denominator == 0: + return -1.0, -1.0 + else: + q = nominator / denominator, + return q, stats.chi2.sf(q, k - 1) + + +stats = read_pickle('/Users/simon/Desktop/envs/simba/simba/simba/assets/lookups/critical_values_05.pickle') \ No newline at end of file diff --git a/simba/sandbox/coco.py b/simba/sandbox/coco.py new file mode 100644 index 000000000..aac2bec61 --- /dev/null +++ b/simba/sandbox/coco.py @@ -0,0 +1,151 @@ +import os +from typing import List, Optional, Union, Dict, Any, Tuple +from pycocotools import mask + +import cv2 +import json +from simba.mixins.image_mixin import ImageMixin +from simba.utils.read_write import read_df, get_video_meta_data, read_frm_of_video, find_core_cnt +import numpy as np +from simba.mixins.geometry_mixin import GeometryMixin +from shapely.geometry import Polygon +from datetime import datetime +from numba import njit, prange +from simba.utils.enums import Defaults +import multiprocessing +from simba.sandbox.keyPoi import _geometries_to_exterior_keypoints_helper +from simba.utils.checks import check_int, check_valid_lst, check_instance, check_valid_array +from simba.utils.enums import Formats +from skimage.draw import polygon +import base64 + + +def geometry_to_rle(geometry: Union[np.ndarray, Polygon], img_size: Tuple[int, int]): + check_instance(source=geometry_to_rle.__name__, instance=geometry, accepted_types=(Polygon, np.ndarray)) + if isinstance(geometry, (Polygon,)): + geometry = geometry.exterior.coords + else: + check_valid_array(data=geometry, source=geometry_to_rle.__name__, accepted_ndims=(2,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + binary_mask = np.zeros(img_size, dtype=np.uint8) + rr, cc = polygon(geometry[:, 0].flatten(), geometry[:, 1].flatten(), img_size) + binary_mask[rr, cc] = 1 + pixels = binary_mask.flatten() + pixels = np.concatenate([[0], pixels, [0]]) + runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 + runs[1::2] -= runs[::2] + rle_counts = ' '.join(map(str, runs)) + compressed_counts = base64.b64encode(rle_counts.encode('ascii')).decode('ascii') + return {'counts': compressed_counts, 'size': list(binary_mask.shape)} +# +# +# data_path = r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\FRR_gq_Saline_0624.csv" +# animal_data = read_df(file_path=data_path, file_type='csv', usecols=['Nose_x', 'Nose_y', 'Tail_base_x', 'Tail_base_y', 'Left_side_x', 'Left_side_y', 'Right_side_x', 'Right_side_y']).values.reshape(-1, 4, 2)[0:20].astype(np.int32) +# animal_data = animal_data[0] +# geometry_to_rle(geometry=animal_data, img_size=(1000, 1000)) + + +def geometries_to_coco(geometries: Dict[str, np.ndarray], + video_path: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike], + version: Optional[int] = 1, + description: Optional[str] = None, + licences: Optional[str] = None): + + + categories = [] + for cnt, i in enumerate(geometries.keys()): categories.append({'id': i, 'name': i, 'supercategory': i}) + results = {'info': {'year': datetime.now().year, 'version': version, 'description': description}, 'licences': licences, 'categories': categories} + video_data = get_video_meta_data(video_path) + w, h = video_data['width'], video_data['height'] + images = [] + annotations = [] + img_names = [] + if not os.path.isdir(save_dir): os.makedirs(save_dir) + save_img_dir = os.path.join(save_dir, 'img') + if not os.path.isdir(save_img_dir): os.makedirs(save_img_dir) + for category_cnt, (category_id, category_data) in enumerate(geometries.items()): + for img_cnt in range(category_data.shape[0]): + img_geometry = category_data[img_cnt] + img_name = f'{video_data["video_name"]}_{img_cnt}.png' + if img_name not in img_names: + images.append({'id': img_cnt, 'width': w, 'height': h, 'file_name': img_name}) + img = read_frm_of_video(video_path=video_path, frame_index=img_cnt) + img_save_path = os.path.join(save_img_dir, img_name) + cv2.imwrite(img_save_path, img) + img_names.append(img_name) + annotation_id = category_cnt * img_cnt + 1 + d = GeometryMixin().get_shape_lengths_widths(shapes=Polygon(img_geometry)) + a_h, a_w, a_a = d['max_length'], d['max_width'], d['max_area'] + bbox = [int(category_data[img_cnt][0][0]), int(category_data[img_cnt][0][1]), int(a_w), int(a_h)] + segmentation = {'size': [h, w], 'counts': geometry_to_rle(geometry=img_geometry, img_size=(h, w))['counts']} + annotation = {'id': annotation_id, 'image_id': img_cnt, 'category_id': category_id, 'bbox': bbox, 'area': a_a, 'iscrowd': 0, 'segmentation': segmentation} + annotations.append(annotation) + results['images'] = images + results['annotations'] = annotations + with open(os.path.join(save_dir, f"annotations.json"), "w") as final: + json.dump(results, final) + + +# +# +# +# +# + + + + + + + + # + # + # + # + # for geo_cnt, geo in enumerate(img_geo): + # annotation_id = img_cnt+1 * geo_cnt + # annotation = {'id': annotation_id, 'image_id': img_cnt, 'category_id': 1} + # print(dir(geo)) + # print(np.array(geo.exterior.coords)) + + {"id": 20173, "image_id": 8499, "category_id": 15, "segmentation": {"size": [333, 500], + "counts": "e\\d02[:0O1N2XO2jF5S9c00N0001000O1O10M501O2N>CN1TOXG3n8@[G0KN`PX4"}, + "area": 528.0, "bbox": [62, 189, 19, 51], "iscrowd": 0} + + + + + + + + +data_path = r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\FRR_gq_Saline_0624.csv" +animal_data = read_df(file_path=data_path, file_type='csv', usecols=['Nose_x', 'Nose_y', 'Tail_base_x', 'Tail_base_y', 'Left_side_x', 'Left_side_y', 'Right_side_x', 'Right_side_y']).values.reshape(-1, 4, 2)[0:20].astype(np.int32) +animal_polygons = GeometryMixin().bodyparts_to_polygon(data=animal_data) +#animal_polygons = GeometryMixin().multiframe_minimum_rotated_rectangle(shapes=animal_polygons) +animal_polygons = GeometryMixin().geometries_to_exterior_keypoints(geometries=animal_polygons) +animal_polygons = GeometryMixin.keypoints_to_axis_aligned_bounding_box(keypoints=animal_polygons) +animal_polygons = {0: animal_polygons} + + +geometries_to_coco(geometries=animal_polygons, video_path=r'C:\troubleshooting\mitra\project_folder\videos\FRR_gq_Saline_0624.mp4', save_dir=r"C:\troubleshooting\coco_data") + + + +#geometries_to_exterior_keypoints(geometries=animal_polygons) + +# animal_polygons = [[x] for x in animal_polygons] +# + + + + +# import json +# +# file_path = r"C:\Users\sroni\Downloads\sbd_coco_anns\pascal_sbd_train.json" +# +# +# with open(file_path, 'r') as file: +# data = json.load(file) +# +# diff --git a/simba/sandbox/cohens_kappa.py b/simba/sandbox/cohens_kappa.py new file mode 100644 index 000000000..06af4e940 --- /dev/null +++ b/simba/sandbox/cohens_kappa.py @@ -0,0 +1,63 @@ +import numpy as np +from numba import njit +from sklearn.metrics import cohen_kappa_score +import time +im + +@njit("(int64[:],int64[:])") +def cohens_kappa(sample_1: np.ndarray, sample_2: np.ndarray): + """ + Jitted compute Cohen's Kappa coefficient for two binary samples. + + Cohen's Kappa coefficient between classification sand ground truth taking into account agreement between classifications and ground truth occurring by chance. + + :example: + >>> sample_1 = np.random.randint(0, 2, size=(10000,)) + >>> sample_2 = np.random.randint(0, 2, size=(10000,)) + >>> cohens_kappa(sample_1=sample_1, sample_2=sample_2)) + """ + sample_1 = np.ascontiguousarray(sample_1) + sample_2 = np.ascontiguousarray(sample_2) + data = np.hstack((sample_1.reshape(-1, 1), sample_2.reshape(-1, 1))) + tp = len(np.argwhere((data[:, 0] == 1) & (data[:, 1] == 1)).flatten()) + tn = len(np.argwhere((data[:, 0] == 0) & (data[:, 1] == 0)).flatten()) + fp = len(np.argwhere((data[:, 0] == 1) & (data[:, 1] == 0)).flatten()) + fn = len(np.argwhere((data[:, 0] == 0) & (data[:, 1] == 1)).flatten()) + data = np.array(([tp, fp], [fn, tn])) + sum0 = data.sum(axis=0) + sum1 = data.sum(axis=1) + expected = np.outer(sum0, sum1) / np.sum(sum0) + w_mat = np.full(shape=(2, 2),fill_value=1) + w_mat[0, 0] = 0 + w_mat[1, 1] = 0 + return 1 - np.sum(w_mat * data) / np.sum(w_mat * expected) + + +def cohens_kappa_one_against_all(data: pd.DataFrame, labels: np.ndarray): + results = {} + for lbl in np.unique(labels): + cluster_data, non_cluster_data = split_specific_cluster_data(data=data, labels=labels, label=lbl) + results[lbl] = {} + for field in cluster_data.columns: + sample_1 = cluster_data[field].values + sample_2 = non_cluster_data[field].values + data = np.hstack((sample_1.reshape(-1, 1), sample_2.reshape(-1, 1))) + tp = len(np.argwhere((data[:, 0] == 1) & (data[:, 1] == 1)).flatten()) + tn = len(np.argwhere((data[:, 0] == 0) & (data[:, 1] == 0)).flatten()) + fp = len(np.argwhere((data[:, 0] == 1) & (data[:, 1] == 0)).flatten()) + fn = len(np.argwhere((data[:, 0] == 0) & (data[:, 1] == 1)).flatten()) + data = np.array(([tp, fp], [fn, tn])) + sum0 = data.sum(axis=0) + sum1 = data.sum(axis=1) + expected = np.outer(sum0, sum1) / np.sum(sum0) + w_mat = np.full(shape=(2, 2), fill_value=1) + w_mat[0, 0] = 0 + w_mat[1, 1] = 0 + results[lbl][field] = 1 - np.sum(w_mat * data) / np.sum(w_mat * expected) + + + + + + +#confusion_matrix() diff --git a/simba/sandbox/colinear_features.py b/simba/sandbox/colinear_features.py new file mode 100644 index 000000000..53a99d86e --- /dev/null +++ b/simba/sandbox/colinear_features.py @@ -0,0 +1,93 @@ +import pandas as pd +import numpy as np +try: + from typing import Literal +except: + from typing_extensions import Literal +from typing import List +from simba.utils.checks import check_float, check_instance, check_int, check_valid_dataframe, check_str +from typing import Optional, Tuple, Any +from simba.utils.errors import InvalidInputError +from simba.mixins.statistics_mixin import Statistics +from itertools import combinations +from simba.utils.read_write import read_pickle + +def find_collinear_features(df: pd.DataFrame, + threshold: float, + method: Optional[Literal['pearson', 'spearman', 'kendall']] = 'pearson', + verbose: Optional[bool] = False) -> List[str]: + """ + Identify collinear features in the dataframe based on the specified correlation method and threshold. + + :param pd.DataFrame df: Input DataFrame containing features. + :param float threshold: Threshold value to determine collinearity. + :param Optional[Literal['pearson', 'spearman', 'kendall']] method: Method for calculating correlation. Defaults to 'pearson'. + :return: Set of feature names identified as collinear. Returns one feature for every feature pair with correlation value above specified threshold. + + :example: + >>> x = pd.DataFrame(np.random.randint(0, 100, (100, 100))) + >>> names = find_collinear_features(df=x, threshold=0.2, method='pearson', verbose=True) + """ + + check_valid_dataframe(df=df, source=find_collinear_features.__name__, valid_dtypes=(float, int, np.float32, np.float64, np.int32, np.int64), min_axis_1=1, min_axis_0=1) + check_float(name=find_collinear_features.__name__, value=threshold, max_value=1.0, min_value=0.0) + check_str(name=find_collinear_features.__name__, value=method, options=('pearson', 'spearman', 'kendall')) + feature_names = set() + feature_pairs = list(combinations(list(df.columns), 2)) + + for cnt, i in enumerate(feature_pairs): + if verbose: + print(f'Analyzing feature pair {cnt+1}/{len(feature_pairs)}...') + if (i[0] not in feature_names) and (i[1] not in feature_names): + sample_1, sample_2 = df[i[0]].values.astype(np.float32), df[i[1]].values.astype(np.float32) + if method == 'pearson': + r = Statistics.pearsons_r(sample_1=sample_1, sample_2=sample_2) + elif method == 'spearman': + r = Statistics.spearman_rank_correlation(sample_1=sample_1, sample_2=sample_2) + else: + r = Statistics.kendall_tau(sample_1=sample_1, sample_2=sample_2)[0] + if abs(r) > threshold: + feature_names.add(i[0]) + if verbose: + print('Collinear analysis complete.') + return feature_names + + + +data_path = '/Users/simon/Desktop/envs/NG_Unsupervised/project_folder/embeddings/amazing_rubin.pickle' +data = read_pickle(data_path)['DATA']['FRAME_FEATURES'] #['BOUTS_FEATURES'] + +x = pd.DataFrame(np.random.randint(0, 100, (100, 100))) +names = find_collinear_features(df=x, threshold=0.2, method='pearson', verbose=True) + +data['Mouse_1_poly_area'].corr(data['Low_prob_detections_0.75']) + +Statistics.pearsons_r(sample_1=data['Mouse_1_poly_area'].values, sample_2=data['Low_prob_detections_0.75'].values) + + + + +# l = find_colinear_features(df=data, threshold=0.7, method='pearson') +# + + + + +remain = list(set(list(data.columns)) - set(names)) + +print(len(remain)) + +# + +#check_valid_dataframe(df=df, valid_dtypes=(float, int,),) + + + + + + + + + + + diff --git a/simba/sandbox/color_filtering.py b/simba/sandbox/color_filtering.py new file mode 100644 index 000000000..e69de29bb diff --git a/simba/sandbox/concat_gpu.py b/simba/sandbox/concat_gpu.py new file mode 100644 index 000000000..51ee819f9 --- /dev/null +++ b/simba/sandbox/concat_gpu.py @@ -0,0 +1,163 @@ +__author__ = "Simon Nilsson" + +import base64 +import configparser +import glob +import io +import itertools +import json +import math +import multiprocessing +import os +import pickle +import platform +import re +import shutil +import subprocess +import webbrowser +from configparser import ConfigParser +from copy import deepcopy +from datetime import datetime, timedelta +from pathlib import Path +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union + +from PIL import Image + +try: + from typing import Literal +except: + from typing_extensions import Literal + +from urllib.parse import urlparse + +import cv2 +import numpy as np +import pandas as pd +import pkg_resources +import pyarrow as pa +from numba import njit, prange +from pyarrow import csv +from shapely.geometry import (LineString, MultiLineString, MultiPolygon, Point, + Polygon) +from simba.utils.read_write import get_video_meta_data, get_fn_ext, remove_a_folder +from simba.utils.checks import (check_file_exist_and_readable, check_float, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_keys_exist_in_dict, + check_if_string_value_is_valid_video_timestamp, + check_if_valid_rgb_tuple, check_instance, + check_int, check_nvidea_gpu_available, + check_str, check_valid_array, + check_valid_boolean, check_valid_dataframe, + check_valid_lst, is_video_color) +from simba.utils.enums import ConfigKey, Dtypes, Formats, Keys, Options +from simba.utils.errors import (DataHeaderError, DuplicationError, + FFMPEGCodecGPUError, FileExistError, + FrameRangeError, IntegerError, + InvalidFilepathError, InvalidFileTypeError, + InvalidInputError, InvalidVideoFileError, + MissingProjectConfigEntryError, NoDataError, + NoFilesFoundError, NotDirectoryError, + ParametersFileError, PermissionError) +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.warnings import ( + FileExistWarning, FrameRangeWarning, InvalidValueWarning, + NoDataFoundWarning, NoFileFoundWarning, + ThirdPartyAnnotationsInvalidFileFormatWarning) + + +def concatenate_videos_in_folder(in_folder: Union[str, os.PathLike], + save_path: Union[str, os.PathLike], + file_paths: Optional[List[Union[str, os.PathLike]]] = None, + video_format: Optional[str] = "mp4", + substring: Optional[str] = None, + remove_splits: Optional[bool] = True, + gpu: Optional[bool] = False, + fps: Optional[Union[int, str]] = None) -> None: + """ + Concatenate (temporally) all video files in a folder into a single video. + + .. important:: + Input video parts will be joined in alphanumeric order, should ideally have to have sequential numerical ordered file names, e.g., ``1.mp4``, ``2.mp4``.... + + .. note:: + If substring and file_paths are both not None, then file_paths with be sliced and only file paths with substring will be retained. + + :param Union[str, os.PathLike] in_folder: Path to folder holding un-concatenated video files. + :param Union[str, os.PathLike] save_path: Path to the saved the output file. Note: If the path exist, it will be overwritten + :param Optional[List[Union[str, os.PathLike]]] file_paths: If not None, then the files that should be joined. If None, then all files. Default None. + :param Optional[str] video_format: The format of the video clips that should be concatenated. Default: mp4. + :param Optional[str] substring: If a string, then only videos in in_folder with a filename that contains substring will be joined. If None, then all are joined. Default: None. + :param Optional[str] video_format: Format of the input video files in ``in_folder``. Default: ``mp4``. + :param Optional[bool] remove_splits: If true, the input splits in the ``in_folder`` will be removed following concatenation. Default: True. + :rtype: None + """ + + if not check_nvidea_gpu_available() and gpu: + raise FFMPEGCodecGPUError(msg="No FFMpeg GPU codec found.", source=concatenate_videos_in_folder.__name__) + timer = SimbaTimer(start=True) + if file_paths is None: + files = glob.glob(in_folder + "/*.{}".format(video_format)) + else: + for file_path in file_paths: + check_file_exist_and_readable(file_path=file_path) + files = file_paths + check_if_filepath_list_is_empty(filepaths=files, error_msg=f"SIMBA ERROR: Cannot join videos in directory {in_folder}. The directory contain ZERO files in format {video_format}") + if substring is not None: + sliced_paths = [] + for file_path in files: + if substring in get_fn_ext(filepath=file_path)[1]: + sliced_paths.append(file_path) + check_if_filepath_list_is_empty( + filepaths=sliced_paths, + error_msg=f"SIMBA ERROR: Cannot join videos in directory {in_folder}. The directory contain ZERO files in format {video_format} with substring {substring}", + ) + files = sliced_paths + files.sort(key=lambda f: int(re.sub("\D", "", f))) + temp_txt_path = Path(in_folder, "files.txt") + if os.path.isfile(temp_txt_path): + os.remove(temp_txt_path) + with open(temp_txt_path, "w") as f: + for file in files: + f.write("file '" + str(Path(file)) + "'\n") + + out_fps = None + if fps is not None: + check_int(name='fps', value=fps, min_value=0) + int_fps = int(fps) + if isinstance(fps, str): + if int_fps > len(files): + raise InvalidInputError(msg=f'If FPS is a string it represents the video index ({fps}) which is more than the number of videos in the input directory ({len(files)})', source=concatenate_videos_in_folder.__name__) + out_fps = float(get_video_meta_data(video_path=files[int_fps])['fps']) + elif isinstance(fps, (int, float)): + out_fps = fps + else: + raise InvalidInputError(msg=f'FPS of the output video has to be None, or a string index, or a float, or an integer',source=concatenate_videos_in_folder.__name__) + + if check_nvidea_gpu_available() and gpu: + if fps is None: + returned = os.system(f"ffmpeg -f concat -safe 0 -i \"{temp_txt_path}\" -c:v h264_nvenc -pix_fmt yuv420p -c:a copy -hide_banner -loglevel info \"{save_path}\" -y") + #returned = os.system(f'ffmpeg -hwaccel auto -c:v h264_cuvid -f concat -safe 0 -i "{temp_txt_path}" -c:v h264_nvenc -c:a copy -hide_banner -loglevel info "{save_path}" -y') + #returned = os.system(f"ffmpeg -hwaccel cuvid -hwaccel_device 0 -c:v h264_cuvid -f concat -safe 0 -i \"{temp_txt_path}\" -c:v h264_nvenc -c:a copy -hide_banner -loglevel info \"{save_path}\" -y") + else: + returned = os.system(f"ffmpeg -f concat -safe 0 -i \"{temp_txt_path}\" -r {out_fps} -c:v h264_nvenc -pix_fmt yuv420p -c:a copy -hide_banner -loglevel info \"{save_path}\" -y") + #returned = os.system(f'ffmpeg -hwaccel auto -c:v h264_cuvid -f concat -safe 0 -i "{temp_txt_path}" -r {out_fps} -c:v h264_nvenc -c:a copy -hide_banner -loglevel info "{save_path}" -y') + #returned = os.system(f'ffmpeg -hwaccel cuda -hwaccel_output_format cuda -c:v h264_cuvid -f concat -safe 0 -i "{temp_txt_path}" -vf scale_cuda=1280:720,format=nv12 -r {out_fps} -c:v h264_nvenc -c:a copy -hide_banner -loglevel info "{save_path}" -y') + else: + if fps is None: + returned = os.system(f'ffmpeg -f concat -safe 0 -i "{temp_txt_path}" "{save_path}" -c copy -hide_banner -loglevel info -y') + else: + returned = os.system(f'ffmpeg -f concat -safe 0 -i "{temp_txt_path}" -r {out_fps} -c:v libx264 -c:a copy -hide_banner -loglevel info "{save_path}" -y') + while True: + if returned != 0: + pass + else: + if remove_splits: + remove_a_folder(folder_dir=Path(in_folder)) + break + timer.stop_timer() + stdout_success(msg="Video concatenated", elapsed_time=timer.elapsed_time_str, source=concatenate_videos_in_folder.__name__) + + + +concatenate_videos_in_folder(in_folder=r'C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\temp - Copy (2)', save_path=r'C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated_gpu.mp4', remove_splits=False, gpu=True, fps=50) \ No newline at end of file diff --git a/simba/sandbox/concordance_ratio.py b/simba/sandbox/concordance_ratio.py new file mode 100644 index 000000000..a1967500f --- /dev/null +++ b/simba/sandbox/concordance_ratio.py @@ -0,0 +1,38 @@ +import time + +import numpy as np +from numba import jit, njit + + +@njit('(int64[:, :]), bool_') +def concordance_ratio(x: np.ndarray, invert: bool) -> float: + """ + Calculate the concordance ratio of a 2D numpy array. + + :param np.ndarray x: A 2D numpy array with ordinals represented as integers. + :param bool invert: If True, the concordance ratio is inverted, and disconcordance ratio is returned + :return float: The concordance ratio, representing the count of rows with only one unique value divided by the total number of rows in the array. + + :example: + >>> x = np.random.randint(0, 2, (5000, 4)) + >>> results = concordance_ratio(x=x, invert=False) + """ + conc_count = 0 + for i in prange(x.shape[0]): + unique_cnt = np.unique((x[i])).shape[0] + if unique_cnt == 1: + conc_count += 1 + if invert: + conc_count = x.shape[0] - conc_count + return conc_count / x.shape[0] + +# x = np.random.randint(0, 2, (5000, 4)) +# start = time.time() +# results = concordance_ratio(x=x, invert=False) +# print(time.time() - start) +# print(results) + + + + + diff --git a/simba/sandbox/contrast.py b/simba/sandbox/contrast.py new file mode 100644 index 000000000..90f2da565 --- /dev/null +++ b/simba/sandbox/contrast.py @@ -0,0 +1,127 @@ +from typing import Union, Tuple +import os + +from datetime import datetime +import subprocess + +from tkinter import Button +from simba.utils.enums import Keys, Links, Options +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.ui.tkinter_functions import CreateLabelFrameWithIcon, FileSelect, FolderSelect +from simba.utils.checks import check_file_exist_and_readable, check_if_dir_exists +from simba.video_processors.brightness_contrast_ui import brightness_contrast_ui +from simba.utils.read_write import find_files_of_filetypes_in_directory, get_fn_ext +from simba.utils.printing import stdout_success, SimbaTimer + + +class BrightnessContrastPopUp(PopUpMixin): + def __init__(self): + super().__init__(title="CHANGE BRIGHTNESS / CONTRAST") + self.datetime = datetime.now().strftime("%Y%m%d%H%M%S") + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="CHANGE BRIGHTNESS / CONTRAST SINGLE VIDEO", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", file_types=[("VIDEO", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)], lblwidth=25) + run_video_btn = Button(single_video_frm, text="RUN SINGLE VIDEO", command=lambda: self.run_video(), fg="blue") + + single_video_frm.grid(row=0, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + run_video_btn.grid(row=1, column=0, sticky="NW") + + video_dir_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="CHANGE BRIGHTNESS / CONTRAST MULTIPLE VIDEOS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_dir = FolderSelect(video_dir_frm, "VIDEO DIRECTORY PATH:", lblwidth=25) + run_dir_btn = Button(video_dir_frm, text="RUN VIDEO DIRECTORY", command=lambda: self.run_directory(), fg="blue") + + video_dir_frm.grid(row=1, column=0, sticky="NW") + self.selected_dir.grid(row=0, column=0, sticky="NW") + run_dir_btn.grid(row=1, column=0, sticky="NW") + self.main_frm.mainloop() + + def run_video(self): + video_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=video_path) + self.brightness, self.contrast = brightness_contrast_ui(video_path=video_path) + self.video_paths = [video_path] + print(self.video_paths) + self.apply() + + def run_directory(self): + video_dir = self.selected_dir.folder_path + check_if_dir_exists(in_dir=video_dir, source=self.__class__.__name__) + self.video_paths = find_files_of_filetypes_in_directory(directory=video_dir, extensions=Options.ALL_VIDEO_FORMAT_OPTIONS.value, raise_error=True) + self.brightness, self.contrast = brightness_contrast_ui(video_path=self.video_paths[0]) + self.apply() + + def apply(self): + timer = SimbaTimer(start=True) + for file_cnt, file_path in enumerate(self.video_paths): + video_timer = SimbaTimer(start=True) + dir, video_name, ext = get_fn_ext(filepath=file_path) + print(f'Creating copy of {video_name}...') + out_path = os.path.join(dir, f'{video_name}_eq_{self.datetime}{ext}') + cmd = f'ffmpeg -i "{file_path}" -vf "eq=brightness={self.brightness}:contrast={self.contrast}" -loglevel error -stats "{out_path}" -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + video_timer.stop_timer() + stdout_success(msg=f'Video {out_path} complete!', elapsed_time=video_timer.elapsed_time_str) + timer.stop_timer() + stdout_success(f'{len(self.video_paths)} video(s) converted.', elapsed_time=timer.stop_timer()) + + + + + + + + + + + +# +# def change_contrast(video_path: Union[str, os.PathLike]) -> Tuple[float, float]: +# """ +# Create a user interface using OpenCV to explore and change the brightness and contrast of a video. +# +# :param Union[str, os.PathLike] video_path: Path to the video file. +# :return Tuple: The scaled brightness and scaled contrast values on scale -1 to +1 suitable for FFmpeg conversion +# +# :example: +# >>> change_contrast(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/frames/output/ROI_features/2022-06-20_NOB_DOT_4.mp4') +# """ +# def _get_trackbar_values(v): +# brightness = cv2.getTrackbarPos('Brightness', 'Contrast / Brightness') +# contrast = cv2.getTrackbarPos('Contrast', 'Contrast / Brightness') +# brightness = int((brightness - 0) * (255 - (-255)) / (510 - 0) + (-255)) +# contrast = int((contrast - 0) * (127 - (-127)) / (254 - 0) + (-127)) +# if brightness != 0: +# if brightness > 0: +# shadow, max = brightness, 255 +# else: +# shadow, max = 0, 255 + brightness +# cal = cv2.addWeighted(original_img, (max - shadow) / 255, original_img, 0, shadow) +# else: +# cal = original_img +# if contrast != 0: +# Alpha = float(131 * (contrast + 127)) / (127 * (131 - contrast)) +# Gamma = 127 * (1 - Alpha) +# cal = cv2.addWeighted(cal, Alpha, cal, 0, Gamma) +# img = np.copy(cal) +# cv2.imshow('Contrast / Brightness', img) +# +# _ = get_video_meta_data(video_path=video_path) +# original_img = read_frm_of_video(video_path=video_path, frame_index=0) +# img = np.copy(original_img) +# cv2.namedWindow('Contrast / Brightness', cv2.WINDOW_NORMAL) +# cv2.imshow('Contrast / Brightness', img) +# cv2.createTrackbar('Brightness', 'Contrast / Brightness', 255, 2 * 255, _get_trackbar_values) +# cv2.createTrackbar('Contrast', 'Contrast / Brightness', 127, 2 * 127, _get_trackbar_values) +# while True: +# k = cv2.waitKey(1) & 0xFF +# if k == 27: +# brightness = cv2.getTrackbarPos('Brightness', 'Change contrast') +# contrast = cv2.getTrackbarPos('Contrast', 'Change contrast') +# scaled_brightness = ((brightness - 0) / (510 - 0)) * (1 - -1) + -1 +# scaled_contrast= ((contrast - 0) / (254 - 0)) * (1 - -1) + -1 +# if scaled_contrast == 0.0 and scaled_brightness == 0.0: +# InValidUserInputWarning(msg=f'Both the selected brightness and contrast are the same as in the input video. Select different values.') +# else: +# cv2.destroyAllWindows() +# return scaled_brightness, scaled_contrast + diff --git a/simba/sandbox/convert_to_bw.py b/simba/sandbox/convert_to_bw.py new file mode 100644 index 000000000..7f8c78eaf --- /dev/null +++ b/simba/sandbox/convert_to_bw.py @@ -0,0 +1,175 @@ +from typing import Union, Optional +import os +import subprocess +from simba.utils.read_write import get_fn_ext, find_all_videos_in_directory, get_video_meta_data +from simba.video_processors.roi_selector import ROISelector +from simba.utils.checks import check_ffmpeg_available, check_float, check_if_dir_exists, check_file_exist_and_readable +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.errors import InvalidInputError +import threading + +import glob +import os +import subprocess +import sys +import threading +from copy import deepcopy +from datetime import datetime +from tkinter import * +from typing import Optional, Union + +import numpy as np +from PIL import Image, ImageTk + +import simba +from simba.labelling.extract_labelled_frames import AnnotationFrameExtractor +from simba.mixins.config_reader import ConfigReader +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.plotting.frame_mergerer_ffmpeg import FrameMergererFFmpeg +from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, + CreateToolTip, DropDownMenu, Entry_Box, + FileSelect, FolderSelect) +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_int, check_nvidea_gpu_available, + check_str, + check_that_hhmmss_start_is_before_end) +from simba.utils.data import convert_roi_definitions +from simba.utils.enums import Dtypes, Formats, Keys, Links, Options, Paths +from simba.utils.errors import (CountError, DuplicationError, FrameRangeError, + InvalidInputError, MixedMosaicError, + NoChoosenClassifierError, NoFilesFoundError, + NotDirectoryError) +from simba.utils.lookups import get_color_dict, get_fonts +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + seconds_to_timestamp, str_2_bool) +from simba.video_processors.brightness_contrast_ui import \ + brightness_contrast_ui +from simba.video_processors.clahe_ui import interactive_clahe_ui +from simba.video_processors.extract_seqframes import extract_seq_frames +from simba.video_processors.multi_cropper import MultiCropper +from simba.video_processors.px_to_mm import get_coordinates_nilsson +from simba.video_processors.video_processing import ( + VideoRotator, batch_convert_video_format, batch_create_frames, + batch_video_to_greyscale, change_fps_of_multiple_videos, change_img_format, + change_single_video_fps, clahe_enhance_video, clip_video_in_range, + clip_videos_by_frame_ids, convert_to_avi, convert_to_bmp, convert_to_jpeg, + convert_to_mov, convert_to_mp4, convert_to_png, convert_to_tiff, + convert_to_webm, convert_to_webp, + convert_video_powerpoint_compatible_format, copy_img_folder, + crop_multiple_videos, crop_multiple_videos_circles, + crop_multiple_videos_polygons, crop_single_video, crop_single_video_circle, + crop_single_video_polygon, downsample_video, extract_frame_range, + extract_frames_single_video, frames_to_movie, gif_creator, + multi_split_video, remove_beginning_of_video, resize_videos_by_height, + resize_videos_by_width, roi_blurbox, superimpose_elapsed_time, + superimpose_frame_count, superimpose_freetext, superimpose_overlay_video, + superimpose_video_names, superimpose_video_progressbar, + video_bg_subtraction_mp, video_bg_subtraction, video_concatenator, + video_to_greyscale, watermark_video, rotate_video, flip_videos, upsample_fps, reverse_videos) + +sys.setrecursionlimit(10**7) + +def video_to_bw(video_path: Union[str, os.PathLike], + threshold: Optional[float] = 0.5, + save_dir: Optional[Union[str, os.PathLike]] = None) -> None: + """ + Convert video to black and white using passed threshold. + + .. video:: _static/img/video_to_bw.webm + :width: 800 + :autoplay: + :loop: + + :param Union[str, os.PathLike] video_path: Path to the video + :param Optional[float] threshold: Value between 0 and 1. Lower values gives more white and vice versa. + :return: None. + + :example: + >>> video_to_bw(video_path='/Users/simon/Downloads/1_LH_clipped_cropped_eq_20240515135926.mp4', threshold=0.02) + """ + + check_float(name=f'{video_to_bw.__name__} threshold', value=threshold, min_value=0, max_value=1.0) + if os.path.isfile(video_path): + video_paths = [video_path] + elif os.path.isdir(video_path): + video_paths = list(find_all_videos_in_directory(directory=video_path, as_dict=True, raise_error=True).values()) + else: + raise InvalidInputError(msg=f'{video_path} is not a valid file path or a valid directory path', source=video_to_bw.__name__) + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir) + else: + save_dir = os.path.dirname(video_paths[0]) + for file_cnt, video_path in enumerate(video_paths): + _, video_name, ext = get_fn_ext(video_path) + threshold = int(255 * threshold) + check_ffmpeg_available(raise_error=True) + timer = SimbaTimer(start=True) + for file_cnt, video_path in enumerate(video_paths): + _, video_name, ext = get_fn_ext(video_path) + print(f'Converting video {video_name} to black and white (Video {file_cnt + 1}/{len(video_paths)})...') + _ = get_video_meta_data(video_path=video_path) + _, video_name, ext = get_fn_ext(video_path) + save_path = os.path.join(save_dir, f'{video_name}_bw{ext}') + cmd = f"ffmpeg -i '{video_path}' -vf \"format=gray,geq=lum_expr='if(lt(lum(X,Y),{threshold}),0,255)'\" -pix_fmt yuv420p '{save_path}' -loglevel error -stats -hide_banner -y" + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f'Video {video_name} converted to black and white.', elapsed_time=timer.elapsed_time_str) + + + +class Convert2BlackWhitePopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="SUPER-IMPOSE TEXT ON VIDEOS") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + threshold = [round(x, 2) for x in list(np.arange(0.01, 1.01, 0.01))] + self.threshold_dropdown = DropDownMenu(settings_frm, "BLACK THRESHOLD:", threshold, labelwidth=25) + self.threshold_dropdown.setChoices(0.5) + + settings_frm.grid(row=0, column=0, sticky=NW) + self.threshold_dropdown.grid(row=0, column=0, sticky=NW) + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - CONVERT TO BLACK & WHITE", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - CONVERT TO BLACK & WHITE", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run(multiple=True)) + + multiple_videos_frm.grid(row=2, column=0, sticky="NW") + self.selected_video_dir.grid(row=0, column=0, sticky="NW") + multiple_videos_run.grid(row=1, column=0, sticky="NW") + self.main_frm.mainloop() + + def run(self, multiple: bool): + threshold = float(self.threshold_dropdown.getChoices()) + if not multiple: + data_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=data_path) + else: + data_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=data_path) + + threading.Thread(target=video_to_bw(video_path=data_path, + threshold=threshold)).start() + + + + +#Convert2BlackWhitePopUp() + + + +#video_to_bw(video_path='/Users/simon/Desktop/Screen Recording 2024-05-08 at 10.57.59 AM_upsampled_time_superimposed.mov', threshold=0.5) diff --git a/simba/sandbox/convert_to_mp4.py b/simba/sandbox/convert_to_mp4.py new file mode 100644 index 000000000..3123e9183 --- /dev/null +++ b/simba/sandbox/convert_to_mp4.py @@ -0,0 +1,300 @@ +import os +from datetime import datetime +from typing import Union, Optional +import subprocess +try: + from typing import Literal +except: + from typing_extensions import Literal +from simba.utils.checks import check_instance, check_ffmpeg_available, check_str, check_if_dir_exists, check_int, check_nvidea_gpu_available +from simba.utils.read_write import find_files_of_filetypes_in_directory, get_fn_ext, find_files_of_filetypes_in_directory +from simba.utils.enums import Options +from simba.utils.errors import InvalidInputError, FFMPEGCodecGPUError, NotDirectoryError +from simba.utils.lookups import percent_to_crf_lookup, percent_to_qv_lk +from simba.utils.printing import stdout_success, SimbaTimer +import cv2 + +def frames_to_movie(directory: Union[str, os.PathLike], + fps: int, + quality: int, + out_format: Literal['mp4', 'avi', 'webm', 'mov'] = 'mp4', + gpu: Optional[bool] = False) -> None: + """ + Merge all image files in a folder to a mp4 video file. Video file is stored in the same directory as the + input directory sub-folder. + + .. note:: + The Image files have to have ordered numerical names e.g., ``1.png``, ``2.png`` etc... + + :parameter str directory: Directory containing the images. + :parameter int fps: The frame rate of the output video. + :parameter int bitrate: The bitrate of the output video (e.g., 32000). + :parameter str img_format: The format of the input image files (e.g., ``png``). + :parameter Optional[bool] gpu: If True, use NVIDEA GPU codecs. Default False. + + :example: + >>> _ = frames_to_movie(directory_path='project_folder/video_img', fps=15, bitrate=32000, img_format='png') + """ + + check_ffmpeg_available(raise_error=True) + if gpu and not check_nvidea_gpu_available(): + raise FFMPEGCodecGPUError(msg="NVIDEA GPU not available (as evaluated by nvidea-smi returning None", source=frames_to_movie.__name__) + check_if_dir_exists(in_dir=directory, source=frames_to_movie.__name__) + check_int(name="FPS", value=fps, min_value=1) + check_int(name="quality", value=quality, min_value=1) + crf_lk = percent_to_crf_lookup() + crf = crf_lk[str(quality)] + img_paths = find_files_of_filetypes_in_directory(directory=directory, extensions=Options.ALL_IMAGE_FORMAT_OPTIONS.value, raise_error=True) + _, _, ext = get_fn_ext(filepath=img_paths[0]) + img = cv2.imread(img_paths[0]) + img_h, img_w = int(img.shape[0]), int(img.shape[1]) + ffmpeg_fn = os.path.join(directory, f"%d.{ext}") + save_path = os.path.join(os.path.dirname(directory), f"{os.path.basename(directory)}.{out_format}") + if not gpu: + cmd = f'ffmpeg -y -r {fps} -f image2 -s {img_h}x{img_w} -i "{ffmpeg_fn}" -c:v libx265 -crf {crf} "{save_path}" -loglevel error -stats -hide_banner -y' + else: + cmd = f'ffmpeg -y -r {fps} -f image2 -s {img_h}x{img_w} -i "{ffmpeg_fn}" -c:v h264_nvenc -crf {crf} "{save_path}" -loglevel error -stats -hide_banner -y' + subprocess.call(cmd, shell=True) + stdout_success(msg=f"Video created at {save_path}", source=frames_to_movie.__name__) + + + + + +# if gpu: + # else: + # command = f'ffmpeg -y -r {fps} -f image2 -s {img_h}x{img_w} -i "{ffmpeg_fn}" -vcodec libx264 -b {bitrate}k "{save_path}" -y' + # print( + # f"Creating {os.path.basename(save_path)} from {len(img_paths_in_folder)} images..." + # ) + # subprocess.call(command, shell=True) + # stdout_success(msg=f"Video created at {save_path}", source=frames_to_movie.__name__) + + +# _ = frames_to_movie(directory='/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/videos/SI_DAY3_308_CD1_PRESENT_downsampled', fps=15, bitrate=32000, img_format='png') + + + + + + + + + + + +def convert_to_mp4(path: Union[str, os.PathLike], + codec: Literal['libx265', 'libx264', 'powerpoint'] = 'libx265', + save_dir: Optional[Union[str, os.PathLike]] = None, + quality: Optional[int] = 60) -> None: + """ + Convert a directory containing videos, or a single video, to MP4 format using passed quality and codec. + + :param Union[str, os.PathLike] path: Path to directory or file. + :param Literal['libx265', 'libx264', 'powerpoint'] codec: + :param Optional[Optional[Union[str, os.PathLike]]] save_dir: Directory where to save the converted videos. If None, then creates a directory in the same directory as the input. + :param Optional[int] quality: Integer representing the quality: 10, 20, 30.. 100. + :return: None. + + :example: + >>> convert_to_mp4(path='/Users/simon/Desktop/video_test', quality="100") + """ + + timer = SimbaTimer(start=True) + check_ffmpeg_available(raise_error=True) + check_str(name=f'{convert_to_mp4.__name__} codec', value=codec, options=('libx265', 'libx264')) + check_instance(source=f'{convert_to_mp4.__name__} path', instance=path, accepted_types=(str,)) + check_int(name=f'{convert_to_mp4.__name__} quality', value=quality) + datetime_ = datetime.now().strftime("%Y%m%d%H%M%S") + crf_lk = percent_to_crf_lookup() + crf = crf_lk[str(quality)] + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir, source=convert_to_mp4.__name__) + if os.path.isfile(path): + file_paths = [path] + if save_dir is None: + save_dir = os.path.join(os.path.dirname(path), f'mp4_{datetime_}') + os.makedirs(save_dir) + elif os.path.isdir(path): + file_paths = find_files_of_filetypes_in_directory(directory=path, extensions=Options.ALL_VIDEO_FORMAT_OPTIONS.value, raise_error=True) + if save_dir is None: + save_dir = os.path.join(path, f'mp4_{datetime_}') + os.makedirs(save_dir) + else: + raise InvalidInputError(msg=f'Paths is not a valid file or directory path.', source=convert_to_mp4.__name__) + for file_cnt, file_path in enumerate(file_paths): + _, video_name, _ = get_fn_ext(filepath=file_path) + print(f'Converting video {video_name} to MP4 (Video {file_cnt+1}/{len(file_paths)})...') + _ = get_video_meta_data(video_path=file_path) + out_path = os.path.join(save_dir, f'{video_name}.mp4') + if codec != 'powerpoint': + cmd = f'ffmpeg -i "{file_path}" -c:v {codec} -crf {crf} -c:a copy -an "{out_path}" -loglevel error -stats -hide_banner -y' + else: + cmd = f'ffmpeg -i "{file_path}" -c:v libx264 -preset slow -profile:v high -level:v 4.0 -pix_fmt yuv420p -crf {crf} -c:v libx264 -codec:a aac "{out_path}" -loglevel error -stats -hide_banner -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f"{len(file_paths)} video(s) converted to MP4 and saved in {save_dir} directory.", elapsed_time=timer.elapsed_time_str, source=convert_to_mp4.__name__,) + + +def convert_to_avi(path: Union[str, os.PathLike], + codec: Literal['xvid', 'divx', 'mjpeg'] = 'divx', + save_dir: Optional[Union[str, os.PathLike]] = None, + quality: Optional[int] = 60) -> None: + + """ + Convert a directory containing videos, or a single video, to AVI format using passed quality and codec. + + :param Union[str, os.PathLike] path: Path to directory or file. + :param Literal['xvid', 'divx', 'mjpeg'] codec: Method to encode the AVI format. Default: xvid. + :param Optional[Optional[Union[str, os.PathLike]]] save_dir: Directory where to save the converted videos. If None, then creates a directory in the same directory as the input. + :param Optional[int] quality: Integer representing the quality: 10, 20, 30.. 100. + :return: None. + """ + + + timer = SimbaTimer(start=True) + check_ffmpeg_available(raise_error=True) + check_str(name=f'{convert_to_avi.__name__} codec', value=codec, options=('xvid', 'divx', 'mjpeg')) + check_instance(source=f'{convert_to_avi.__name__} path', instance=path, accepted_types=(str,)) + check_int(name=f'{convert_to_avi.__name__} quality', value=quality) + datetime_ = datetime.now().strftime("%Y%m%d%H%M%S") + crf_lk = percent_to_crf_lookup() + cv_lk = percent_to_qv_lk() + crf = crf_lk[str(quality)] + qv = cv_lk[int(quality)] + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir, source=convert_to_avi.__name__) + if os.path.isfile(path): + file_paths = [path] + if save_dir is None: + save_dir = os.path.join(os.path.dirname(path), f'mp4_{datetime_}') + os.makedirs(save_dir) + elif os.path.isdir(path): + file_paths = find_files_of_filetypes_in_directory(directory=path, extensions=Options.ALL_VIDEO_FORMAT_OPTIONS.value, raise_error=True) + if save_dir is None: + save_dir = os.path.join(path, f'avi_{datetime_}') + os.makedirs(save_dir) + else: + raise InvalidInputError(msg=f'Paths is not a valid file or directory path.', source=convert_to_avi.__name__) + for file_cnt, file_path in enumerate(file_paths): + _, video_name, _ = get_fn_ext(filepath=file_path) + print(f'Converting video {video_name} to avi (Video {file_cnt+1}/{len(file_paths)})...') + _ = get_video_meta_data(video_path=file_path) + out_path = os.path.join(save_dir, f'{video_name}.avi') + if codec == 'divx': + cmd = f'ffmpeg -i "{file_path}" -c:v mpeg4 -crf {crf} -vtag DIVX "{out_path}" -loglevel error -stats -hide_banner -y' + elif codec == 'xvid': + cmd = f'ffmpeg -i "{file_path}" -c:v libxvid -q:v {qv} "{out_path}" -loglevel error -stats -hide_banner -y' + else: + cmd = f'ffmpeg -i "{file_path}" -c:v mjpeg -q:v {qv} "{out_path}" -loglevel error -stats -hide_banner -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f"{len(file_paths)} video(s) converted to AVI and saved in {save_dir} directory.", elapsed_time=timer.elapsed_time_str, source=convert_to_avi.__name__,) + + +def convert_to_webm(path: Union[str, os.PathLike], + codec: Literal['vp8', 'vp9', 'av1'] = 'vp9', + save_dir: Optional[Union[str, os.PathLike]] = None, + quality: Optional[int] = 60) -> None: + + """ + Convert a directory containing videos, or a single video, to WEBM format using passed quality and codec. + + :param Union[str, os.PathLike] path: Path to directory or file. + :param Literal['vp8', 'vp9', 'av1'] codec: Method to encode the WEBM format. Default: vp9. + :param Optional[Optional[Union[str, os.PathLike]]] save_dir: Directory where to save the converted videos. If None, then creates a directory in the same directory as the input. + :param Optional[int] quality: Integer representing the quality: 10, 20, 30.. 100. + :return: None. + """ + + timer = SimbaTimer(start=True) + check_ffmpeg_available(raise_error=True) + check_str(name=f'{convert_to_webm.__name__} codec', value=codec, options=('vp8', 'vp9', 'av1')) + check_instance(source=f'{convert_to_webm.__name__} path', instance=path, accepted_types=(str,)) + check_int(name=f'{convert_to_webm.__name__} quality', value=quality) + datetime_ = datetime.now().strftime("%Y%m%d%H%M%S") + crf_lk = percent_to_crf_lookup() + crf = crf_lk[str(quality)] + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir, source=convert_to_webm.__name__) + if os.path.isfile(path): + file_paths = [path] + if save_dir is None: + save_dir = os.path.join(os.path.dirname(path), f'webm_{datetime_}') + os.makedirs(save_dir) + elif os.path.isdir(path): + file_paths = find_files_of_filetypes_in_directory(directory=path, extensions=Options.ALL_VIDEO_FORMAT_OPTIONS.value, raise_error=True) + if save_dir is None: + save_dir = os.path.join(path, f'webm_{datetime_}') + os.makedirs(save_dir) + else: + raise InvalidInputError(msg=f'Paths is not a valid file or directory path.', source=convert_to_webm.__name__) + for file_cnt, file_path in enumerate(file_paths): + _, video_name, _ = get_fn_ext(filepath=file_path) + print(f'Converting video {video_name} to WEBM (Video {file_cnt+1}/{len(file_paths)})...') + _ = get_video_meta_data(video_path=file_path) + out_path = os.path.join(save_dir, f'{video_name}.webm') + if codec == 'vp8': + cmd = f'ffmpeg -i "{file_path}" -c:v libvpx -crf {crf} "{out_path}" -loglevel error -stats -hide_banner -y' + elif codec == 'vp9': + cmd = f'ffmpeg -i "{file_path}" -c:v libvpx-vp9 -crf {crf} "{out_path}" -loglevel error -stats -hide_banner -y' + else: + cmd = f'ffmpeg -i "{file_path}" -c:v libaom-av1 -crf {crf} "{out_path}" -loglevel error -stats -hide_banner -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f"{len(file_paths)} video(s) converted to WEBM and saved in {save_dir} directory.", elapsed_time=timer.elapsed_time_str, source=convert_to_webm.__name__,) + + +def convert_to_mov(path: Union[str, os.PathLike], + codec: Literal['prores', 'animation', 'dnxhd', 'cineform'] = 'prores', + save_dir: Optional[Union[str, os.PathLike]] = None, + quality: Optional[int] = 60) -> None: + """ + Convert a directory containing videos, or a single video, to MOV format using passed quality and codec. + + :param Union[str, os.PathLike] path: Path to directory or file. + :param Literal['prores', 'animation'] codec: Method to encode the MOV format. Default: prores. + :param Optional[Optional[Union[str, os.PathLike]]] save_dir: Directory where to save the converted videos. If None, then creates a directory in the same directory as the input. + :param Optional[int] quality: Integer representing the quality: 10, 20, 30.. 100. + :return: None. + """ + + timer = SimbaTimer(start=True) + check_ffmpeg_available(raise_error=True) + check_str(name=f'{convert_to_mov.__name__} codec', value=codec, options=('prores', 'animation', 'cineform', 'dnxhd')) + check_instance(source=f'{convert_to_mov.__name__} path', instance=path, accepted_types=(str,)) + check_int(name=f'{convert_to_mov.__name__} quality', value=quality) + datetime_ = datetime.now().strftime("%Y%m%d%H%M%S") + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir, source=convert_to_mov.__name__) + if os.path.isfile(path): + file_paths = [path] + if save_dir is None: + save_dir = os.path.join(os.path.dirname(path), f'mp4_{datetime_}') + os.makedirs(save_dir) + elif os.path.isdir(path): + file_paths = find_files_of_filetypes_in_directory(directory=path, extensions=Options.ALL_VIDEO_FORMAT_OPTIONS.value, raise_error=True) + if save_dir is None: + save_dir = os.path.join(path, f'mov_{datetime_}') + os.makedirs(save_dir) + else: + raise InvalidInputError(msg=f'Paths is not a valid file or directory path.', source=convert_to_mov.__name__) + for file_cnt, file_path in enumerate(file_paths): + _, video_name, _ = get_fn_ext(filepath=file_path) + print(f'Converting video {video_name} to MOV (Video {file_cnt + 1}/{len(file_paths)})...') + _ = get_video_meta_data(video_path=file_path) + out_path = os.path.join(save_dir, f'{video_name}.mov') + if codec == 'prores': + cmd = f'ffmpeg -i "{file_path}" -c:v prores_ks -profile:v 1 "{out_path}" -loglevel error -stats -hide_banner -y' + elif codec == 'dnxhd': + cmd = f'ffmpeg -i "{file_path}" -c:v dnxhd -profile:v dnxhr_mq "{out_path}" -loglevel error -stats -hide_banner -y' + elif codec == "cineform": + cmd = f'ffmpeg -i "{file_path}" -c:v cfhd -compression_level 5 -q:v 3 "{out_path}" -loglevel error -stats -hide_banner -y' + else: + cmd = f'ffmpeg -i "{file_path} -c:v qtrle "{out_path}" -loglevel error -stats -hide_banner -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f"{len(file_paths)} video(s) converted to MOV and saved in {save_dir} directory.", elapsed_time=timer.elapsed_time_str, source=convert_to_mov.__name__, ) + +#convert_to_mov(path='/Users/simon/Desktop/video_test', quality=0) + diff --git a/simba/sandbox/convert_to_popup.py b/simba/sandbox/convert_to_popup.py new file mode 100644 index 000000000..072bf947d --- /dev/null +++ b/simba/sandbox/convert_to_popup.py @@ -0,0 +1,194 @@ +import threading +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.ui.tkinter_functions import FolderSelect, CreateLabelFrameWithIcon, DropDownMenu, FileSelect +from simba.utils.enums import Links, Keys, Options +from simba.utils.checks import check_file_exist_and_readable, check_if_dir_exists +from tkinter import * +from simba.sandbox.convert_to_mp4 import convert_to_mp4, convert_to_avi, convert_to_webm, convert_to_mov + + +class Convert2MP4PopUp(PopUpMixin): + """ + :example: + >>> Convert2MP4PopUp() + """ + def __init__(self): + super().__init__(title="CONVERT VIDEOS TO MP4") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.MP4_CODEC_LK = {'HEVC (H.265)': 'libx265', 'H.264 (AVC)': 'libx264', 'Guranteed powerpoint compatible': 'powerpoint'} + self.quality_dropdown = DropDownMenu(settings_frm, "OUTPUT VIDEO QUALITY:", list(range(10, 110, 10)), labelwidth=25) + self.quality_dropdown.setChoices(60) + self.codec_dropdown = DropDownMenu(settings_frm, "COMPRESSION CODEC:", list(self.MP4_CODEC_LK.keys()), labelwidth=25) + self.codec_dropdown.setChoices('HEVC (H.265)') + settings_frm.grid(row=0, column=0, sticky=NW) + self.quality_dropdown.grid(row=0, column=0, sticky=NW) + self.codec_dropdown.grid(row=1, column=0, sticky=NW) + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + single_video_frm.grid(row=1, column=0, sticky=NW) + self.selected_video.grid(row=0, column=0, sticky=NW) + single_video_run.grid(row=1, column=0, sticky=NW) + + multiple_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="VIDEO DIRECTORY", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_video_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_video_run = Button(multiple_video_frm, text="RUN - VIDEO DIRECTORY", command=lambda: self.run(multiple=True)) + multiple_video_frm.grid(row=2, column=0, sticky=NW) + self.selected_video_dir.grid(row=0, column=0, sticky=NW) + multiple_video_run.grid(row=1, column=0, sticky=NW) + + self.main_frm.mainloop() + + def run(self, multiple: bool): + if not multiple: + video_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=video_path) + else: + video_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=video_path, source=self.__class__.__name__) + codec = self.MP4_CODEC_LK[self.codec_dropdown.getChoices()] + quality = int(self.quality_dropdown.getChoices()) + threading.Thread(target=convert_to_mp4(path=video_path, codec=codec, quality=quality)) + +class Convert2AVIPopUp(PopUpMixin): + """ + :example: + >>> Convert2AVIPopUp() + """ + + def __init__(self): + super().__init__(title="CONVERT VIDEOS TO AVI") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.AVI_CODEC_LK = {'XviD': 'xvid', 'DivX': 'divx', 'MJPEG': 'mjpeg'} + self.quality_dropdown = DropDownMenu(settings_frm, "OUTPUT VIDEO QUALITY:", list(range(10, 110, 10)), labelwidth=25) + self.quality_dropdown.setChoices(60) + self.codec_dropdown = DropDownMenu(settings_frm, "COMPRESSION CODEC:", list(self.AVI_CODEC_LK.keys()), labelwidth=25) + self.codec_dropdown.setChoices('DivX') + settings_frm.grid(row=0, column=0, sticky=NW) + self.quality_dropdown.grid(row=0, column=0, sticky=NW) + self.codec_dropdown.grid(row=1, column=0, sticky=NW) + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + single_video_frm.grid(row=1, column=0, sticky=NW) + self.selected_video.grid(row=0, column=0, sticky=NW) + single_video_run.grid(row=1, column=0, sticky=NW) + + multiple_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="VIDEO DIRECTORY", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_video_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_video_run = Button(multiple_video_frm, text="RUN - VIDEO DIRECTORY", command=lambda: self.run(multiple=True)) + multiple_video_frm.grid(row=2, column=0, sticky=NW) + self.selected_video_dir.grid(row=0, column=0, sticky=NW) + multiple_video_run.grid(row=1, column=0, sticky=NW) + self.main_frm.mainloop() + + def run(self, multiple: bool): + if not multiple: + video_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=video_path) + else: + video_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=video_path, source=self.__class__.__name__) + codec = self.AVI_CODEC_LK[self.codec_dropdown.getChoices()] + quality = int(self.quality_dropdown.getChoices()) + threading.Thread(target=convert_to_avi(path=video_path, codec=codec, quality=quality)) + +class Convert2WEBMPopUp(PopUpMixin): + """ + :example: + >>> Convert2WEBMPopUp() + """ + + def __init__(self): + super().__init__(title="CONVERT VIDEOS TO WEBM") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.WEBM_CODEC_LK = {'VP8': 'vp8', 'VP9': 'vp9', 'AV1': 'av1'} + self.quality_dropdown = DropDownMenu(settings_frm, "OUTPUT VIDEO QUALITY:", list(range(10, 110, 10)), labelwidth=25) + self.quality_dropdown.setChoices(60) + self.codec_dropdown = DropDownMenu(settings_frm, "COMPRESSION CODEC:", list(self.WEBM_CODEC_LK.keys()), labelwidth=25) + self.codec_dropdown.setChoices('VP9') + settings_frm.grid(row=0, column=0, sticky=NW) + self.quality_dropdown.grid(row=0, column=0, sticky=NW) + self.codec_dropdown.grid(row=1, column=0, sticky=NW) + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + single_video_frm.grid(row=1, column=0, sticky=NW) + self.selected_video.grid(row=0, column=0, sticky=NW) + single_video_run.grid(row=1, column=0, sticky=NW) + + multiple_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="VIDEO DIRECTORY", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_video_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_video_run = Button(multiple_video_frm, text="RUN - VIDEO DIRECTORY", command=lambda: self.run(multiple=True)) + multiple_video_frm.grid(row=2, column=0, sticky=NW) + self.selected_video_dir.grid(row=0, column=0, sticky=NW) + multiple_video_run.grid(row=1, column=0, sticky=NW) + self.main_frm.mainloop() + + def run(self, multiple: bool): + if not multiple: + video_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=video_path) + else: + video_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=video_path, source=self.__class__.__name__) + codec = self.WEBM_CODEC_LK[self.codec_dropdown.getChoices()] + quality = int(self.quality_dropdown.getChoices()) + threading.Thread(target=convert_to_webm(path=video_path, codec=codec, quality=quality)) + +class Convert2MOVPopUp(PopUpMixin): + """ + :example: + >>> Convert2MOVPopUp() + """ + + def __init__(self): + super().__init__(title="CONVERT VIDEOS TO MOV") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.MOV_CODEC_LK = {'ProRes Kostya Samanta': 'prores', + 'Animation': 'animation', + 'CineForm': 'cineform', + 'DNxHD/DNxHR': 'dnxhd'} + self.quality_dropdown = DropDownMenu(settings_frm, "OUTPUT VIDEO QUALITY:", list(range(10, 110, 10)), labelwidth=25) + self.quality_dropdown.setChoices(60) + self.codec_dropdown = DropDownMenu(settings_frm, "COMPRESSION CODEC:", list(self.MOV_CODEC_LK.keys()), labelwidth=25) + self.codec_dropdown.setChoices('ProRes Kostya Samanta') + settings_frm.grid(row=0, column=0, sticky=NW) + self.quality_dropdown.grid(row=0, column=0, sticky=NW) + self.codec_dropdown.grid(row=1, column=0, sticky=NW) + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + single_video_frm.grid(row=1, column=0, sticky=NW) + self.selected_video.grid(row=0, column=0, sticky=NW) + single_video_run.grid(row=1, column=0, sticky=NW) + + multiple_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="VIDEO DIRECTORY", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_video_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_video_run = Button(multiple_video_frm, text="RUN - VIDEO DIRECTORY", command=lambda: self.run(multiple=True)) + multiple_video_frm.grid(row=2, column=0, sticky=NW) + self.selected_video_dir.grid(row=0, column=0, sticky=NW) + multiple_video_run.grid(row=1, column=0, sticky=NW) + self.main_frm.mainloop() + + def run(self, multiple: bool): + if not multiple: + video_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=video_path) + else: + video_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=video_path, source=self.__class__.__name__) + codec = self.MOV_CODEC_LK[self.codec_dropdown.getChoices()] + quality = int(self.quality_dropdown.getChoices()) + threading.Thread(target=convert_to_mov(path=video_path, codec=codec, quality=quality)) + +#Convert2MP4PopUp() + + + + # self.selected_frame_dir = FolderSelect(settings_frm, "IMAGE DIRECTORY PATH:", title="Select a image directory", lblwidth=25) + # self.create_run_frm(run_function=self.run, title='RUN PNG CONVERSION') + # settings_frm.grid(row=0, column=0, sticky="NW") + # self.selected_frame_dir.grid(row=0, column=0, sticky="NW") + # self.main_frm.mainloop() \ No newline at end of file diff --git a/simba/sandbox/convex_hull.py b/simba/sandbox/convex_hull.py new file mode 100644 index 000000000..ba56f8f89 --- /dev/null +++ b/simba/sandbox/convex_hull.py @@ -0,0 +1,111 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import numpy as np +from numba import cuda, njit + +THREADS_PER_BLOCK = 128 + +@cuda.jit(device=True) +def _cross_test(x, y, x1, y1, x2, y2): + """Cross product test for determining whether left of line.""" + cross = (x - x1) * (y2 - y1) - (y - y1) * (x2 - x1) + return cross < 0 + +@cuda.jit +def _convex_hull_kernel(pts: np.ndarray, results: np.ndarray) -> np.ndarray: + """ + CUDA kernel for the Jarvis March algorithm. + + .. note:: + `Modified from Jacob Hultman `_ + + :param pts: M x N x 2 array where M is the number of frames, N is the number of body-parts, and 2 representing the x and y coordinates of the body-parts. + :param results: M x N array where M is the number of frames, and N is the indexes of the body-parts belonging to the hull. If -1, the body-part does not belong to the hull. + """ + row = cuda.grid(1) + if row >= pts.shape[0]: + return + + point_on_hull = 0 + min_x = pts[row, 0, 0] + for j in range(pts.shape[1]): + x = pts[row, j, 0] + if x < min_x: + min_x = x + point_on_hull = j + startpoint = point_on_hull + count = 0 + while True: + results[row, count] = point_on_hull + count += 1 + endpoint = 0 + for j in range(pts.shape[1]): + if endpoint == point_on_hull: + endpoint = j + elif _cross_test( + pts[row, j, 0], + pts[row, j, 1], + pts[row, point_on_hull, 0], + pts[row, point_on_hull, 1], + pts[row, endpoint, 0], + pts[row, endpoint, 1], + ): + endpoint = j + point_on_hull = endpoint + if endpoint == startpoint: + break + for j in range(count, pts.shape[1], 1): + results[row, j] = -1 + + +#@jit(nopython=True) +@njit("(int32[:, :, :], int32[:, :])") +def _slice_hull_idx(points: np.ndarray, point_idx: np.ndarray): + results = np.zeros_like(points) + for i in range(point_idx.shape[0]): + results[i] = points[i][point_idx[i]] + return results + + +def get_convex_hull(pts: np.ndarray) -> np.ndarray: + """ + Compute the convex hull for each set of 2D points in parallel using CUDA and the Jarvis March algorithm. + + This function processes a batch of 2D point sets (frames) and computes the convex hull for each set. The convex hull of a set of points is the smallest convex polygon that contains all the points. + + The function uses a variant of the Gift Wrapping algorithm (Jarvis March) to compute the convex hull. It finds the leftmost point, then iteratively determines the next point on the hull by checking the orientation of the remaining points. The results are stored in the `results` array, where each row corresponds to a frame and contains the indices of the points forming the convex hull. Points not on the hull are marked with `-1`. + + + .. image:: _static/img/get_convex_hull_cuda.png + :width: 300 + :align: center + + .. note:: + `Modified from Jacob Hultman `_ + + :param pts: A 3D numpy array of shape (M, N, 2) where: + - M is the number of frames. + - N is the number of points (body-parts) in each frame. + - The last dimension (2) represents the x and y coordinates of each point. + + :return: An upated 3D numpy array of shape (M, N, 2) consisting of the points in the hull. + + + :example: + >>> video_path = r"/mnt/c/troubleshooting/mitra/project_folder/videos/501_MA142_Gi_CNO_0514.mp4" + >>> data_path = r"/mnt/c/troubleshooting/mitra/project_folder/csv/outlier_corrected_movement_location/501_MA142_Gi_CNO_0514 - test.csv" + >>> df = read_df(file_path=data_path, file_type='csv') + >>> frame_data = df.values.reshape(len(df), -1, 2) + >>> x = get_convex_hull(frame_data) + """ + + pts = np.ascontiguousarray(pts).astype(np.int32) + n, m, _ = pts.shape + bpg = (n + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + pts_dev = cuda.to_device(pts) + results = cuda.device_array((n, m), dtype=np.int32) + _convex_hull_kernel[bpg, THREADS_PER_BLOCK](pts_dev, results) + hull = results.copy_to_host().astype(np.int32) + hull = _slice_hull_idx(pts, hull) + return hull diff --git a/simba/sandbox/convex_hull_area.py b/simba/sandbox/convex_hull_area.py new file mode 100644 index 000000000..44fd775f2 --- /dev/null +++ b/simba/sandbox/convex_hull_area.py @@ -0,0 +1,42 @@ +from typing import Optional + +import cupy as cp +import numpy as np + +from simba.utils.checks import check_float, check_valid_array +from simba.utils.enums import Formats + + +def poly_area(data: np.ndarray, + pixels_per_mm: Optional[float] = 1.0, + batch_size: Optional[int] = int(0.5e+7)) -> np.ndarray: + + """ + Compute the area of a polygon using GPU acceleration. + + This function calculates the area of polygons defined by sets of points in a 3D array. + Each 2D slice along the first dimension represents a polygon, with each row corresponding + to a point in the polygon and each column representing the x and y coordinates. + + The computation is done in batches to handle large datasets efficiently. + + :param data: A 3D numpy array of shape (N, M, 2), where N is the number of polygons, M is the number of points per polygon, and 2 represents the x and y coordinates. + :param pixels_per_mm: Optional scaling factor to convert the area from pixels squared to square millimeters. Default is 1.0. + :param batch_size: Optional batch size for processing the data in chunks to fit in memory. Default is 0.5e+7. + :return: A 1D numpy array of shape (N,) containing the computed area of each polygon in square millimeters. + """ + + check_valid_array(data=data, source=f'{poly_area} data', accepted_ndims=(3,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_float(name=f'{poly_area} pixels_per_mm', min_value=10e-16, value=pixels_per_mm) + results = cp.full((data.shape[0]), fill_value=cp.nan, dtype=cp.int32) + for l in range(0, data.shape[0], batch_size): + r = l + batch_size + x = cp.asarray(data[l:r, :, 0]) + y = cp.asarray(data[l:r, :, 1]) + x_r = cp.roll(x, shift=1, axis=1) + y_r = cp.roll(y, shift=1, axis=1) + dot_xy_roll_y = cp.sum(x * y_r, axis=1) + dot_y_roll_x = cp.sum(y * x_r, axis=1) + results[l:r] = (0.5 * cp.abs(dot_xy_roll_y - dot_y_roll_x)) / pixels_per_mm + + return results.get() diff --git a/simba/sandbox/corner_distance.py b/simba/sandbox/corner_distance.py new file mode 100644 index 000000000..db9d803e9 --- /dev/null +++ b/simba/sandbox/corner_distance.py @@ -0,0 +1,64 @@ +import numpy as np +from simba.utils.read_write import read_df + +def img_edge_distances(data: np.ndarray, + pixels_per_mm: float, + img_resolution: np.ndarray, + time_window: float, + fps: int) -> np.ndarray: + + """ + Calculate the distances from a set of points to the edges of an image over a specified time window. + + This function computes the average distances from given coordinates to the four edges (top, right, bottom, left) + of an image. The distances are calculated for points within a specified time window, and the results are adjusted + based on the pixel-to-mm conversion. + + :param np.ndarray data: 3d array of size len(frames) x N x 2 with body-part coordinates. + :param np.ndarray img_resolution: Resolution of video in WxH format. + :param float pixels_per_mm: Pixels per millimeter of recorded video. + :param int fps: FPS of the recorded video + :param float time_windows: Rolling time-window as floats in seconds. E.g., ``0.2`` + :returns np.ndarray: Size data.shape[0] x 4 array with millimeter distances from TOP LEFT, TOP RIGH, BOTTOM RIGHT, BOTTOM LEFT. + :rtype: np.ndarray + + :example I: + >>> data = np.array([[0, 0], [758, 540], [0, 540], [748, 540]]) + >>> img_edge_distances(data=data, pixels_per_mm=2.13, img_resolution=np.array([748, 540]), time_window=1.0, fps=1) + + :example II: + >>> data = read_df(file_path=FILE_PATH, file_type='csv', usecols=['Nose_x', 'Nose_y', 'Tail_base_x', 'Tail_base_y']) + >>> data = data.values.reshape(len(data), 2, 2) + >>> img_edge_distances(data=data, pixels_per_mm=2.13, img_resolution=np.array([748, 540]), time_window=1.0, fps=1) + + """ + + results = np.full((data.shape[0], 4), np.nan) + window_size = int(time_window * fps) + for r in range(window_size, data.shape[0]+1): + l = r - window_size + w_data = data[l:r].reshape(-1, 2) + w_distances = np.full((4, w_data.shape[0]), np.nan) + for idx in range(w_data.shape[0]): + w_distances[0, idx] = np.linalg.norm(w_data[idx] - np.array([0, 0])) + w_distances[1, idx] = np.linalg.norm(w_data[idx] - np.array([img_resolution[0], 0])) + w_distances[2, idx] = np.linalg.norm(w_data[idx] - np.array([img_resolution[0], img_resolution[1]])) + w_distances[3, idx] = np.linalg.norm(w_data[idx] - np.array([0, img_resolution[1]])) + for i in range(4): + results[r-1][i] = np.mean(w_distances[i]) / pixels_per_mm + + return results.astype(np.float32) + + +FILE_PATH = r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\501_MA142_Gi_CNO_0514.csv" + + + +data = read_df(file_path=FILE_PATH, file_type='csv', usecols=['Nose_x', 'Nose_y', 'Tail_base_x', 'Tail_base_y']) +data = data.values.reshape(len(data), 2, 2) + +data = np.array([[0, 0], [758, 540], [0, 540], [748, 540]]) + +img_edge_distances(data=data, pixels_per_mm=2.13, img_resolution=np.array([748, 540]), time_window=1.0, fps=1) + + diff --git a/simba/sandbox/count_values_in_range_gpu.py b/simba/sandbox/count_values_in_range_gpu.py new file mode 100644 index 000000000..c17c7e2f5 --- /dev/null +++ b/simba/sandbox/count_values_in_range_gpu.py @@ -0,0 +1,53 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + + +import numpy as np +from numba import cuda + +THREADS_PER_BLOCK = 256 + +@cuda.jit +def _get_values_in_range_kernel(values, ranges, results): + i = cuda.grid(1) + if i >= values.shape[0]: + return + v = values[i] + for j in range(ranges.shape[0]): + l, u = ranges[j][0], ranges[j][1] + cnt = 0 + for k in v: + if k <= u and k >= l: + cnt += 1 + results[i, j] = cnt + + +def count_values_in_ranges(x: np.ndarray, r: np.ndarray) -> np.ndarray: + """ + Counts the number of values in each feature within specified ranges for each row in a 2D array using CUDA. + + .. image:: _static/img/get_euclidean_distance_cuda.png + :width: 500 + :align: center + + :param np.ndarray x: 2d array with feature values. + :param np.ndarray r: 2d array with lower and upper boundaries. + :return np.ndarray: 2d array of size len(x) x len(r) with the counts of values in each feature range (inclusive). + + :example: + >>> x = np.random.randint(1, 11, (10, 10)).astype(np.int8) + >>> r = np.array([[1, 6], [6, 11]]) + >>> r_x = count_values_in_ranges(x=x, r=r) + """ + + x = np.ascontiguousarray(x).astype(np.float32) + r = np.ascontiguousarray(r).astype(np.float32) + n, m = x.shape[0], r.shape[0] + values_dev = cuda.to_device(x) + ranges_dev = cuda.to_device(r) + results = cuda.device_array((n, m), dtype=np.int32) + bpg = (n + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _get_values_in_range_kernel[bpg, THREADS_PER_BLOCK](values_dev, ranges_dev, results) + results = results.copy_to_host() + cuda.current_context().memory_manager.deallocations.clear() + return results \ No newline at end of file diff --git a/simba/sandbox/create_average_cupy.py b/simba/sandbox/create_average_cupy.py new file mode 100644 index 000000000..409473653 --- /dev/null +++ b/simba/sandbox/create_average_cupy.py @@ -0,0 +1,108 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + + +import os +from typing import Optional, Union + +import cupy as cp +import cv2 +import numpy as np + +from simba.utils.checks import (check_file_exist_and_readable, + check_if_dir_exists, + check_if_string_value_is_valid_video_timestamp, + check_int, check_nvidea_gpu_available, + check_that_hhmmss_start_is_before_end) +from simba.utils.data import find_frame_numbers_from_time_stamp +from simba.utils.errors import FFMPEGCodecGPUError, InvalidInputError +from simba.utils.printing import stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, get_fn_ext, + get_video_meta_data, read_img_batch_from_video_gpu) + + +def create_average_frm(video_path: Union[str, os.PathLike], + start_frm: Optional[int] = None, + end_frm: Optional[int] = None, + start_time: Optional[str] = None, + end_time: Optional[str] = None, + save_path: Optional[Union[str, os.PathLike]] = None, + batch_size: Optional[int] = 3000, + verbose: Optional[bool] = False) -> Union[None, np.ndarray]: + + """ + Computes the average frame using GPU acceleration from a specified range of frames or time interval in a video file. + This average frame typically used for background substraction. + + The function reads frames from the video, calculates their average, and optionally saves the result + to a specified file. If `save_path` is provided, the average frame is saved as an image file; + otherwise, the average frame is returned as a NumPy array. + + + :param Union[str, os.PathLike] video_path: The path to the video file from which to extract frames. + :param Optional[int] start_frm: The starting frame number (inclusive). Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both. + :param Optional[int] end_frm: The ending frame number (exclusive). + :param Optional[str] start_time: The start time in the format 'HH:MM:SS' from which to begin extracting frames. + :param Optional[str] end_time: The end time in the format 'HH:MM:SS' up to which frames should be extracted. + :param Optional[Union[str, os.PathLike]] save_path: The path where the average frame image will be saved. If `None`, the average frame is returned as a NumPy array. + :param Optional[int] batch_size: The number of frames to process in each batch. Default is 3000. Increase if your RAM allows it. + :param Optional[bool] verbose: If `True`, prints progress and informational messages during execution. + :return: Returns `None` if the result is saved to `save_path`. Otherwise, returns the average frame as a NumPy array. + + :example: + >>> create_average_frm(video_path=r"C:\troubleshooting\RAT_NOR\project_folder\videos\2022-06-20_NOB_DOT_4_downsampled.mp4", verbose=True, start_frm=0, end_frm=9000) + """ + + def average_3d_stack(image_stack: np.ndarray) -> np.ndarray: + num_frames, height, width, _ = image_stack.shape + image_stack = cp.array(image_stack).astype(cp.float32) + img = cp.clip(cp.sum(image_stack, axis=0) / num_frames, 0, 255).astype(cp.uint8) + return img.get() + + if not check_nvidea_gpu_available(): + raise FFMPEGCodecGPUError(msg="No GPU found (as evaluated by nvidea-smi returning None)", source=create_average_frm.__name__) + + + if ((start_frm is not None) or (end_frm is not None)) and ((start_time is not None) or (end_time is not None)): + raise InvalidInputError(msg=f'Pass start_frm and end_frm OR start_time and end_time', source=create_average_frm.__name__) + elif type(start_frm) != type(end_frm): + raise InvalidInputError(msg=f'Pass start frame and end frame', source=create_average_frm.__name__) + elif type(start_time) != type(end_time): + raise InvalidInputError(msg=f'Pass start time and end time', source=create_average_frm.__name__) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path), source=create_average_frm.__name_) + check_file_exist_and_readable(file_path=video_path) + video_meta_data = get_video_meta_data(video_path=video_path) + video_name = get_fn_ext(filepath=video_path)[1] + if verbose: + print(f'Getting average frame from {video_name}...') + if (start_frm is not None) and (end_frm is not None): + check_int(name='start_frm', value=start_frm, min_value=0, max_value=video_meta_data['frame_count']) + check_int(name='end_frm', value=end_frm, min_value=0, max_value=video_meta_data['frame_count']) + if start_frm > end_frm: + raise InvalidInputError(msg=f'Start frame ({start_frm}) has to be before end frame ({end_frm}).', source=create_average_frm.__name__) + frame_ids = list(range(start_frm, end_frm)) + elif (start_time is not None) and (end_time is not None): + check_if_string_value_is_valid_video_timestamp(value=start_time, name=create_average_frm.__name__) + check_if_string_value_is_valid_video_timestamp(value=end_time, name=create_average_frm.__name__) + check_that_hhmmss_start_is_before_end(start_time=start_time, end_time=end_time, name=create_average_frm.__name__) + check_if_hhmmss_timestamp_is_valid_part_of_video(timestamp=start_time, video_path=video_path) + frame_ids = find_frame_numbers_from_time_stamp(start_time=start_time, end_time=end_time, fps=video_meta_data['fps']) + else: + frame_ids = list(range(0, video_meta_data['frame_count'])) + frame_ids = [frame_ids[i:i+batch_size] for i in range(0,len(frame_ids),batch_size)] + avg_imgs = [] + for batch_cnt in range(len(frame_ids)): + start_idx, end_idx = frame_ids[batch_cnt][0], frame_ids[batch_cnt][-1] + if start_idx == end_idx: + continue + imgs = read_img_batch_from_video_gpu(video_path=video_path, start_frm=start_idx, end_frm=end_idx, verbose=verbose) + avg_imgs.append(average_3d_stack(image_stack=imgs)) + avg_img = average_3d_stack(image_stack=np.stack(avg_imgs, axis=0)) + if save_path is not None: + cv2.imwrite(save_path, avg_img) + if verbose: + stdout_success(msg=f'Saved average frame at {save_path}', source=create_average_frm.__name__) + else: + return avg_img \ No newline at end of file diff --git a/simba/sandbox/create_average_frame_cuda.py b/simba/sandbox/create_average_frame_cuda.py new file mode 100644 index 000000000..6820841bf --- /dev/null +++ b/simba/sandbox/create_average_frame_cuda.py @@ -0,0 +1,152 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import os +from typing import Optional, Union + +try: + from typing import Literal +except: + from typing_extensions import Literal + +from copy import deepcopy + +import cupy as cp +import cv2 +import numpy as np +from numba import cuda + +from simba.utils.checks import (check_file_exist_and_readable, + check_if_dir_exists, + check_if_string_value_is_valid_video_timestamp, + check_if_valid_img, check_instance, check_int, + check_nvidea_gpu_available, + check_that_hhmmss_start_is_before_end) +from simba.utils.data import find_frame_numbers_from_time_stamp +from simba.utils.errors import FFMPEGCodecGPUError, InvalidInputError +from simba.utils.printing import stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, get_fn_ext, + get_video_meta_data, read_img_batch_from_video_gpu) + + +def average_3d_stack_cupy(image_stack: np.ndarray) -> np.ndarray: + num_frames, height, width, _ = image_stack.shape + image_stack = cp.array(image_stack).astype(cp.float32) + img = cp.clip(cp.sum(image_stack, axis=0) / num_frames, 0, 255).astype(cp.uint8) + return img.get() + +@cuda.jit() +def _average_3d_stack_cuda(data, results): + y, x, i = cuda.grid(3) + if i < 0 or x < 0 or y < 0: + return + if i > data.shape[0] - 1 or x > data.shape[1] - 1 or y > data.shape[2] - 1: + return + else: + sum_value = 0.0 + for n in range(data.shape[0]): + sum_value += data[n, y, x, i] + results[y, x, i] = sum_value / data.shape[0] + +def _average_3d_stack_cuda(image_stack: np.ndarray) -> np.ndarray: + check_instance(source=_average_3d_stack_cuda.__name__, instance=image_stack, accepted_types=(np.ndarray,)) + check_if_valid_img(data=image_stack[0], source=_average_3d_stack_cuda.__name__) + if image_stack.ndim != 4: + return image_stack + x = np.ascontiguousarray(image_stack) + x_dev = cuda.to_device(x) + results = cuda.device_array((x.shape[1], x.shape[2], x.shape[3]), dtype=np.float32) + grid_x = (x.shape[1] + 16 - 1) // 16 + grid_y = (x.shape[2] + 16 - 1) // 16 + grid_z = 3 + threads_per_block = (16, 16, 1) + blocks_per_grid = (grid_y, grid_x, grid_z) + _average_3d_stack_cuda[blocks_per_grid, threads_per_block](x_dev, results) + results = results.copy_to_host() + return results + + + +def create_average_frm_cuda(video_path: Union[str, os.PathLike], + start_frm: Optional[int] = None, + end_frm: Optional[int] = None, + start_time: Optional[str] = None, + end_time: Optional[str] = None, + save_path: Optional[Union[str, os.PathLike]] = None, + batch_size: Optional[int] = 6000, + verbose: Optional[bool] = False) -> Union[None, np.ndarray]: + """ + Computes the average frame using GPU acceleration from a specified range of frames or time interval in a video file. + This average frame typically used for background substraction. + + + The function reads frames from the video, calculates their average, and optionally saves the result + to a specified file. If `save_path` is provided, the average frame is saved as an image file; + otherwise, the average frame is returned as a NumPy array. + + + :param Union[str, os.PathLike] video_path: The path to the video file from which to extract frames. + :param Optional[int] start_frm: The starting frame number (inclusive). Either `start_frm`/`end_frm` or `start_time`/`end_time` must be provided, but not both. + :param Optional[int] end_frm: The ending frame number (exclusive). + :param Optional[str] start_time: The start time in the format 'HH:MM:SS' from which to begin extracting frames. + :param Optional[str] end_time: The end time in the format 'HH:MM:SS' up to which frames should be extracted. + :param Optional[Union[str, os.PathLike]] save_path: The path where the average frame image will be saved. If `None`, the average frame is returned as a NumPy array. + :param Optional[int] batch_size: The number of frames to process in each batch. Default is 3000. Increase if your RAM allows it. + :param Optional[bool] verbose: If `True`, prints progress and informational messages during execution. + :return: Returns `None` if the result is saved to `save_path`. Otherwise, returns the average frame as a NumPy array. + + :example: + >>> create_average_frm(video_path=r"C:\troubleshooting\RAT_NOR\project_folder\videos\2022-06-20_NOB_DOT_4_downsampled.mp4", verbose=True, start_frm=0, end_frm=9000) + """ + + if not check_nvidea_gpu_available(): + raise FFMPEGCodecGPUError(msg="No GPU found (as evaluated by nvidea-smi returning None)", + source=create_average_frm_cuda.__name__) + + if ((start_frm is not None) or (end_frm is not None)) and ((start_time is not None) or (end_time is not None)): + raise InvalidInputError(msg=f'Pass start_frm and end_frm OR start_time and end_time', + source=create_average_frm_cuda.__name__) + elif type(start_frm) != type(end_frm): + raise InvalidInputError(msg=f'Pass start frame and end frame', source=create_average_frm_cuda.__name__) + elif type(start_time) != type(end_time): + raise InvalidInputError(msg=f'Pass start time and end time', source=create_average_frm_cuda.__name__) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path), source=create_average_frm_cuda.__name_) + check_file_exist_and_readable(file_path=video_path) + video_meta_data = get_video_meta_data(video_path=video_path) + video_name = get_fn_ext(filepath=video_path)[1] + if verbose: + print(f'Getting average frame from {video_name}...') + if (start_frm is not None) and (end_frm is not None): + check_int(name='start_frm', value=start_frm, min_value=0, max_value=video_meta_data['frame_count']) + check_int(name='end_frm', value=end_frm, min_value=0, max_value=video_meta_data['frame_count']) + if start_frm > end_frm: + raise InvalidInputError(msg=f'Start frame ({start_frm}) has to be before end frame ({end_frm}).', + source=create_average_frm_cuda.__name__) + frame_ids = list(range(start_frm, end_frm)) + elif (start_time is not None) and (end_time is not None): + check_if_string_value_is_valid_video_timestamp(value=start_time, name=create_average_frm_cuda.__name__) + check_if_string_value_is_valid_video_timestamp(value=end_time, name=create_average_frm_cuda.__name__) + check_that_hhmmss_start_is_before_end(start_time=start_time, end_time=end_time, + name=create_average_frm_cuda.__name__) + check_if_hhmmss_timestamp_is_valid_part_of_video(timestamp=start_time, video_path=video_path) + frame_ids = find_frame_numbers_from_time_stamp(start_time=start_time, end_time=end_time, + fps=video_meta_data['fps']) + else: + frame_ids = list(range(0, video_meta_data['frame_count'])) + frame_ids = [frame_ids[i:i + batch_size] for i in range(0, len(frame_ids), batch_size)] + avg_imgs = [] + for batch_cnt in range(len(frame_ids)): + start_idx, end_idx = frame_ids[batch_cnt][0], frame_ids[batch_cnt][-1] + if start_idx == end_idx: + continue + imgs = read_img_batch_from_video_gpu(video_path=video_path, start_frm=start_idx, end_frm=end_idx, verbose=verbose) + avg_imgs.append(_average_3d_stack_cuda(image_stack=np.stack(list(imgs.values()), axis=0))) + avg_img = _average_3d_stack_cuda(image_stack=np.stack(avg_imgs, axis=0)) + if save_path is not None: + cv2.imwrite(save_path, avg_img) + if verbose: + stdout_success(msg=f'Saved average frame at {save_path}', source=create_average_frm.__name__) + else: + return avg_img \ No newline at end of file diff --git a/simba/sandbox/create_gif.py b/simba/sandbox/create_gif.py new file mode 100644 index 000000000..105fb2644 --- /dev/null +++ b/simba/sandbox/create_gif.py @@ -0,0 +1,270 @@ +import os +from typing import Union, Optional +import subprocess +from simba.utils.read_write import get_fn_ext, get_video_meta_data + +import threading +import functools +import glob +import multiprocessing +import os +import platform +import shutil +import subprocess +import time +from copy import deepcopy +from datetime import datetime +from tkinter import * +from typing import Any, Dict, List, Optional, Tuple, Union + +import cv2 +import numpy as np +from PIL import Image, ImageTk +from shapely.geometry import Polygon + +try: + from typing import Literal +except: + from typing_extensions import Literal + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.image_mixin import ImageMixin +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, check_float, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_instance, check_int, + check_nvidea_gpu_available, check_str, + check_that_hhmmss_start_is_before_end, + check_valid_lst, check_valid_tuple) +from simba.utils.data import find_frame_numbers_from_time_stamp +from simba.utils.enums import OS, ConfigKey, Formats, Options, Paths +from simba.utils.errors import (CountError, DirectoryExistError, + FFMPEGCodecGPUError, FFMPEGNotFoundError, + FileExistError, FrameRangeError, + InvalidFileTypeError, InvalidInputError, + InvalidVideoFileError, NoDataError, + NoFilesFoundError, NotDirectoryError) +from simba.utils.lookups import (get_ffmpeg_crossfade_methods, get_fonts, + percent_to_crf_lookup, percent_to_qv_lk) +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, find_core_cnt, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + read_config_entry, read_config_file, read_frm_of_video) +from simba.utils.warnings import (FileExistWarning, InValidUserInputWarning, + SameInputAndOutputWarning, FrameRangeWarning) +from simba.video_processors.extract_frames import video_to_frames +from simba.video_processors.roi_selector import ROISelector +from simba.video_processors.roi_selector_circle import ROISelectorCircle +from simba.video_processors.roi_selector_polygon import ROISelectorPolygon + +from tkinter import * +from typing import Optional, Union + +import numpy as np +from PIL import Image, ImageTk + +import simba +from simba.labelling.extract_labelled_frames import AnnotationFrameExtractor +from simba.mixins.config_reader import ConfigReader +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.plotting.frame_mergerer_ffmpeg import FrameMergererFFmpeg +from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, + CreateToolTip, DropDownMenu, Entry_Box, + FileSelect, FolderSelect) +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_int, check_nvidea_gpu_available, + check_str, + check_that_hhmmss_start_is_before_end) +from simba.utils.data import convert_roi_definitions +from simba.utils.enums import Dtypes, Formats, Keys, Links, Options, Paths +from simba.utils.errors import (CountError, DuplicationError, FrameRangeError, + InvalidInputError, MixedMosaicError, + NoChoosenClassifierError, NoFilesFoundError, + NotDirectoryError) +from simba.utils.lookups import get_color_dict, get_fonts +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + seconds_to_timestamp, str_2_bool) +from simba.video_processors.brightness_contrast_ui import \ + brightness_contrast_ui +from simba.video_processors.clahe_ui import interactive_clahe_ui +from simba.video_processors.extract_seqframes import extract_seq_frames +from simba.video_processors.multi_cropper import MultiCropper +from simba.video_processors.px_to_mm import get_coordinates_nilsson +from simba.video_processors.video_processing import ( + VideoRotator, batch_convert_video_format, batch_create_frames, + batch_video_to_greyscale, change_fps_of_multiple_videos, change_img_format, + change_single_video_fps, clahe_enhance_video, clip_video_in_range, + clip_videos_by_frame_ids, convert_to_avi, convert_to_bmp, convert_to_jpeg, + convert_to_mov, convert_to_mp4, convert_to_png, convert_to_tiff, + convert_to_webm, convert_to_webp, + convert_video_powerpoint_compatible_format, copy_img_folder, + crop_multiple_videos, crop_multiple_videos_circles, + crop_multiple_videos_polygons, crop_single_video, crop_single_video_circle, + crop_single_video_polygon, downsample_video, extract_frame_range, + extract_frames_single_video, frames_to_movie, + multi_split_video, remove_beginning_of_video, resize_videos_by_height, + resize_videos_by_width, roi_blurbox, superimpose_elapsed_time, + superimpose_frame_count, superimpose_freetext, superimpose_overlay_video, + superimpose_video_names, superimpose_video_progressbar, + video_bg_subtraction_mp, video_bg_subtraction, video_concatenator, + video_to_greyscale, watermark_video, rotate_video, flip_videos, create_average_frm) + + +def gif_creator(file_path: Union[str, os.PathLike], + start_time: int, + duration: int, + width: Optional[int] = None, + quality: Optional[int] = 100, + fps: Optional[int] = 15, + gpu: Optional[bool] = False) -> None: + """ + Create a sample gif from a video file. The result is stored in the same directory as the + input file with the ``.gif`` file-ending. + + .. note:: + The height is auto-computed to retain aspect ratio + + :param Union[str, os.PathLike] file_path: Path to video file. + :param int start_time: Start time of the gif in relation to the video in seconds. + :param int duration: Duration of the gif. + :param int width: Width of the gif. If None, then retains the width and height of the input video. + :param int fps: Width of the gif. + :param Optional[bool] gpu: If True, use NVIDEA GPU codecs. Default False. + + :example: + >>> _ = gif_creator(file_path='project_folder/videos/Video_1.avi', start_time=5, duration=10, width=600) + """ + + check_ffmpeg_available(raise_error=True) + if gpu and not check_nvidea_gpu_available(): + raise FFMPEGCodecGPUError(msg="NVIDEA GPU not available (as evaluated by nvidea-smi returning None", source=gif_creator.__name__) + timer = SimbaTimer(start=True) + check_file_exist_and_readable(file_path=file_path) + check_int(name="Start time", value=start_time, min_value=0) + check_int(name="Duration", value=duration, min_value=1) + video_meta_data = get_video_meta_data(file_path) + if width is None: + width = video_meta_data['width'] + check_int(name="Width", value=width, min_value=2) + check_int(name="FPS", value=fps, min_value=1) + check_int(name="QUALITY", value=quality, min_value=1, max_value=100) + quality = int((quality - 1) / (100 - 1) * (256 - 1) + 1) + if width % 2 != 0: width -= 1 + if quality == 1: quality += 1 + if (int(start_time) + int(duration)) > video_meta_data["video_length_s"]: + raise FrameRangeError(msg=f'The end of the gif (start time: {start_time} + duration: {duration}) is longer than the {file_path} video: {video_meta_data["video_length_s"]}s', source=gif_creator.__name__) + dir, file_name, ext = get_fn_ext(filepath=file_path) + save_name = os.path.join(dir, f"{file_name}.gif") + if gpu: + command = f'ffmpeg -hwaccel auto -c:v h264_cuvid -ss {start_time} -i "{file_path}" -to {duration} -vf "fps=10,scale={width}:-1" -c:v gif -pix_fmt rgb24 -y "{save_name}" -y' + else: + command = f'ffmpeg -ss {start_time} -t {duration} -i "{file_path}" -filter_complex "[0:v] fps={fps},scale=w={width}:h=-1:flags=lanczos,split [a][b];[a] palettegen=stats_mode=single:max_colors={quality} [p];[b][p] paletteuse=dither=bayer:bayer_scale=3" "{save_name}" -loglevel error -stats -hide_banner -y' + print("Creating gif sample... ") + subprocess.call(command, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f"SIMBA COMPLETE: Video converted! {save_name} generated!", elapsed_time=timer.elapsed_time_str, source=gif_creator.__name__) + +#gif_creator(file_path='/Users/simon/Desktop/avg_frm_test/F1 HAB.mp4', start_time=0, duration=5, width=None, quality=100) + +# +# +class CreateGIFPopUP(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="CREATE GIF FROM VIDEO", size=(600, 400)) + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(settings_frm, "VIDEO PATH: ", title="Select a video file", file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)], lblwidth=40) + self.start_time_entry_box = Entry_Box(settings_frm, "START TIME (s):", "40", validation="numeric") + self.duration_entry_box = Entry_Box(settings_frm, "DURATION (s): ", "40", validation="numeric") + resolution_widths = Options.RESOLUTION_OPTIONS_2.value + self.resolution_dropdown = DropDownMenu(settings_frm, "GIF WIDTH (ASPECT RATIO RETAINED):", resolution_widths, "40") + self.quality_dropdown = DropDownMenu(settings_frm, "GIF QUALITY (%):", list(range(1, 101, 1)), "40") + fps_lst = list(range(1, 101, 1)) + fps_lst.insert(0, 'AUTO') + self.fps_dropdown = DropDownMenu(settings_frm, "GIF FPS:", fps_lst, "40") + self.gpu_var = BooleanVar() + gpu_cb = Checkbutton(settings_frm, text="USE GPU (decreased runtime)", variable=self.gpu_var) + self.quality_dropdown.setChoices(100) + self.resolution_dropdown.setChoices('AUTO') + self.fps_dropdown.setChoices('AUTO') + settings_frm.grid(row=0, sticky=NW) + self.selected_video.grid(row=0, sticky=NW, pady=5) + self.start_time_entry_box.grid(row=1, sticky=NW) + self.duration_entry_box.grid(row=2, sticky=NW) + self.resolution_dropdown.grid(row=3, sticky=NW) + self.quality_dropdown.grid(row=4, sticky=NW) + self.fps_dropdown.grid(row=5, sticky=NW) + gpu_cb.grid(row=6, column=0, sticky=NW) + self.create_run_frm(run_function=self.run) + self.main_frm.mainloop() + + def run(self): + video_path = self.selected_video.file_path + width = self.resolution_dropdown.getChoices() + start_time = self.start_time_entry_box.entry_get + duration = self.duration_entry_box.entry_get + fps = self.fps_dropdown.getChoices() + quality = int(self.quality_dropdown.getChoices()) + gpu = self.gpu_var.get() + check_ffmpeg_available() + if gpu: check_nvidea_gpu_available() + check_file_exist_and_readable(file_path=video_path) + video_meta_data = get_video_meta_data(video_path=video_path) + if width == 'AUTO': width = video_meta_data['width'] + else: width = int(width) + if fps == 'AUTO': fps = int(video_meta_data['fps']) + else: fps = int(fps) + if fps > int(video_meta_data['fps']): + FrameRangeWarning(msg=f'The chosen FPS ({fps}) is higher than the video FPS ({video_meta_data["fps"]}). The video FPS will be used', source=self.__class__.__name__) + fps = int(video_meta_data['fps']) + max_duration = video_meta_data['video_length_s'] - int(start_time) + check_int(name='start_time', value=start_time, max_value=video_meta_data['video_length_s'], min_value=0) + check_int(name='duration', value=duration, max_value=max_duration, min_value=1) + + threading.Thread(target=gif_creator(file_path=video_path, + start_time=int(start_time), + duration=int(duration), + width=width, + gpu=gpu, + fps=fps, + quality=int(quality))).start() + +#CreateGIFPopUP() + + +class CalculatePixelsPerMMInVideoPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="CALCULATE PIXELS PER MILLIMETER IN VIDEO", size=(550, 550)) + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.video_path = FileSelect(settings_frm, "Select a video file: ", title="Select a video file", file_types=[("VIDEO", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)], lblwidth=30) + self.known_distance = Entry_Box(settings_frm, "Known real-life metric distance (mm): ", "30", validation="numeric") + run_btn = Button(settings_frm, text="GET PIXELS PER MILLIMETER", command=lambda: self.run()) + settings_frm.grid(row=0, column=0, pady=10, sticky=NW) + self.video_path.grid(row=0, column=0, pady=10, sticky=NW) + self.known_distance.grid(row=1, column=0, pady=10, sticky=NW) + run_btn.grid(row=2, column=0, pady=10, sticky=NW) + self.main_frm.mainloop() + + def run(self): + check_file_exist_and_readable(file_path=self.video_path.file_path) + check_int(name="Distance", value=self.known_distance.entry_get, min_value=1) + _ = get_video_meta_data(video_path=self.video_path.file_path) + mm_cnt = get_coordinates_nilsson(self.video_path.file_path, self.known_distance.entry_get) + print(f"ONE (1) PIXEL REPRESENTS {round(mm_cnt, 4)} MILLIMETERS IN VIDEO {os.path.basename(self.video_path.file_path)}.") + +CalculatePixelsPerMMInVideoPopUp() + + + + diff --git a/simba/sandbox/create_import_pose_menu.py b/simba/sandbox/create_import_pose_menu.py new file mode 100644 index 000000000..bd64f244d --- /dev/null +++ b/simba/sandbox/create_import_pose_menu.py @@ -0,0 +1,342 @@ +__author__ = "Simon Nilsson" + +import os +from tkinter import * +import re +from tkinter import messagebox +from typing import Callable, Dict, List, Optional, Tuple, Union +try: + from typing import Literal +except: + from typing_extensions import Literal + + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.pose_importers.trk_importer import TRKImporter +from simba.pose_importers.dlc_importer_csv import import_dlc_csv_data +from simba.pose_importers.import_mars import MarsImporter +from simba.pose_importers.madlc_importer import MADLCImporterH5 +from simba.pose_importers.read_DANNCE_mat import (import_DANNCE_file, + import_DANNCE_folder) +from simba.pose_importers.sleap_csv_importer import SLEAPImporterCSV +from simba.pose_importers.sleap_h5_importer import SLEAPImporterH5 +from simba.pose_importers.sleap_slp_importer import SLEAPImporterSLP +from simba.ui.tkinter_functions import (DropDownMenu, Entry_Box, FileSelect, FolderSelect) +from simba.utils.checks import (check_int, check_str, check_instance) +from simba.utils.enums import ConfigKey, Formats, Options, Dtypes +from simba.utils.errors import InvalidInputError + +from simba.utils.read_write import read_config_file + + +GAUSSIAN = 'Gaussian' +SAVITZKY_GOLAY = 'Savitzky Golay' +INTERPOLATION_MAP = {'Animal(s)': 'animals', 'Body-parts': 'body-parts'} +SMOOTHING_MAP = {'Savitzky Golay': 'savitzky-golay', 'Gaussian': 'gaussian'} + +FRAME_DIR_IMPORT_TITLES = {'CSV (DLC/DeepPoseKit)': 'IMPORT DLC CSV DIRECTORY', 'MAT (DANNCE 3D)': 'IMPORT DANNCE MAT DIRECTORY', 'JSON (BENTO)': 'IMPORT MARS JSON DIRECTORY'} +FRAME_FILE_IMPORT_TITLES = {'CSV (DLC/DeepPoseKit)': 'IMPORT DLC CSV FILE', 'MAT (DANNCE 3D)': 'IMPORT DANNCE MAT FILE', 'JSON (BENTO)': 'IMPORT MARS JSON FILE'} +FILE_TYPES = {'CSV (DLC/DeepPoseKit)': '*.csv', 'MAT (DANNCE 3D)': '*.mat', 'JSON (BENTO)': '*.json'} + + +class ImportPoseFrame(ConfigReader, PopUpMixin): + + """ + .. image:: _static/img/ImportPoseFrame.webp + :width: 800 + :align: center + + :example: + >>> _ = ImportPoseFrame(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini') + """ + + def __init__(self, + parent_frm: Optional[Union[Frame, Canvas]] = None, + config_path: Optional[Union[str, os.PathLike]] = None, + idx_row: Optional[int] = 0, + idx_column: Optional[int] = 0): + + if parent_frm is None and config_path is None: + raise InvalidInputError(msg='If parent_frm is None, please pass config_path', source=self.__class__.__name__) + + elif parent_frm is None and config_path is not None: + PopUpMixin.__init__(self, config_path=config_path, title='IMPORT POSE ESTIMATION') + parent_frm = self.main_frm + + check_instance(source=f'{ImportPoseFrame} parent_frm', accepted_types=(Frame, Canvas), instance=parent_frm) + check_int(name=f'{ImportPoseFrame} idx_row', value=idx_row, min_value=0) + check_int(name=f'{ImportPoseFrame} idx_column', value=idx_column, min_value=0) + + self.import_tracking_frm = LabelFrame(parent_frm, text="IMPORT TRACKING DATA", font=Formats.LABELFRAME_HEADER_FORMAT.value, fg="black") + self.import_tracking_frm.grid(row=idx_row, column=idx_column, sticky=NW) + if config_path is None: + Label(self.import_tracking_frm, text="Please CREATE PROJECT CONFIG before importing tracking data \n").grid(row=0, column=0, sticky=NW) + else: + ConfigReader.__init__(self, config_path=config_path, read_video_info=False) + self.data_type_dropdown = DropDownMenu(self.import_tracking_frm, "DATA TYPE:", Options.IMPORT_TYPE_OPTIONS.value, labelwidth=25, com=self.create_import_menu) + self.data_type_dropdown.setChoices(Options.IMPORT_TYPE_OPTIONS.value[0]) + self.data_type_dropdown.grid(row=0, column=0, sticky=NW) + + self.create_import_menu(data_type_choice=Options.IMPORT_TYPE_OPTIONS.value[0]) + self.import_tracking_frm.grid(row=idx_row, column=idx_column, sticky=NW) + + parent_frm.mainloop() + + def __show_smoothing_entry_box_from_dropdown(self, choice: str): + if (choice == GAUSSIAN) or (choice == SAVITZKY_GOLAY): + self.smoothing_time_eb.grid(row=0, column=1, sticky=E) + else: + self.smoothing_time_eb.grid_forget() + + + def __get_smooth_interpolation_settings(self, + interpolation_settings: str, + smoothing_setting: str, + smoothing_time: Union[str, int]): + + if interpolation_settings != Dtypes.NONE.value: + interpolation_settings = interpolation_settings.split(':') + interpolation_settings = {'type': INTERPOLATION_MAP[interpolation_settings[0]].lower().strip(), 'method': interpolation_settings[1].lower().strip()} + else: + interpolation_settings = None + if smoothing_setting != Dtypes.NONE.value: + check_int(name='SMOOTHING TIME', value=smoothing_time, min_value=1) + smoothing_setting = {'time_window': int(smoothing_time), 'method': SMOOTHING_MAP[smoothing_setting]} + else: + smoothing_setting = None + + return interpolation_settings, smoothing_setting + + + def __import_dlc_csv_data(self, + interpolation_settings: str, + smoothing_setting: str, + smoothing_time: Union[str, int], + data_path: Union[str, os.PathLike]): + + if not os.path.isfile(data_path) and not os.path.isdir(data_path): + raise InvalidInputError(msg=f'{data_path} is NOT a valid path', source=self.__class__.__name__) + + smoothing_settings, interpolation_settings = self.__get_smooth_interpolation_settings(interpolation_settings, smoothing_setting, smoothing_time) + import_dlc_csv_data(config_path=self.config_path, + data_path=data_path, + interpolation_settings=interpolation_settings, + smoothing_settings=smoothing_setting) + + def __multi_animal_run_call(self, + pose_estimation_tool: str, + interpolation_settings: str, + smoothing_settings: str, + smoothing_window: int, + animal_names: Dict[int, Entry_Box], + data_path: Union[str, os.PathLike], + tracking_data_type: Optional[str] = None): + + # if not os.path.isfile(data_path) and not os.path.isdir(data_path): + # raise InvalidInputError(msg=f'{data_path} is NOT a valid path', source=self.__class__.__name__) + smoothing_settings, interpolation_settings = self.__get_smooth_interpolation_settings(interpolation_settings, smoothing_settings, smoothing_window) + animal_ids = [] + if len(list(animal_names.items())) == 1: animal_ids.append("Animal_1") + else: + for animal_cnt, animal_entry_box in animal_names.items(): + check_str(name=f"ANIMAL {str(animal_cnt)} NAME", value=animal_entry_box.entry_get, allow_blank=False) + animal_ids.append(animal_entry_box.entry_get) + + config = read_config_file(config_path=self.config_path) + config.set(ConfigKey.MULTI_ANIMAL_ID_SETTING.value, ConfigKey.MULTI_ANIMAL_IDS.value, ",".join(animal_ids)) + with open(config, "w") as f: config.write(f) + + if pose_estimation_tool == "H5 (multi-animal DLC)": + data_importer = MADLCImporterH5(config_path=self.config_path, + data_folder=data_path, + file_type=tracking_data_type, + id_lst=animal_ids, + interpolation_settings=interpolation_settings, + smoothing_settings=smoothing_settings) + + elif pose_estimation_tool == "SLP (SLEAP)": + data_importer = SLEAPImporterSLP(project_path=self.config_path, + data_folder=data_path, + id_lst=animal_ids, + interpolation_settings=interpolation_settings, + smoothing_settings=smoothing_settings) + + elif pose_estimation_tool == "TRK (multi-animal APT)": + data_importer = TRKImporter(config_path=self.config_path, + data_path=data_path, + animal_id_lst=animal_ids, + interpolation_method=interpolation_settings, + smoothing_settings=smoothing_settings) + + elif pose_estimation_tool == "CSV (SLEAP)": + data_importer = SLEAPImporterCSV(config_path=self.config_path, + data_folder=data_path, + id_lst=animal_ids, + interpolation_settings=interpolation_settings, + smoothing_settings=smoothing_settings) + + elif pose_estimation_tool == "H5 (SLEAP)": + data_importer = SLEAPImporterH5(config_path=self.config_path, + data_folder=data_path, + id_lst=animal_ids, + interpolation_settings=interpolation_settings, + smoothing_settings=smoothing_settings) + else: + raise InvalidInputError(msg=f'pose estimation tool {pose_estimation_tool} not recognized', source=self.__class__.__name__) + data_importer.run() + + def __create_animal_names_entry_boxes(self, + animal_cnt: str) -> None: + check_int(name="NUMBER OF ANIMALS", value=animal_cnt, min_value=0) + if hasattr(self, "animal_names_frm"): + self.animal_names_frm.destroy() + if not hasattr(self, "multi_animal_id_list"): + self.multi_animal_id_list = [] + for i in range(int(animal_cnt)): + self.multi_animal_id_list.append(f"Animal {i+1}") + self.animal_names_frm = Frame(self.animal_settings_frm, pady=5, padx=5) + self.animal_name_entry_boxes = {} + for i in range(int(animal_cnt)): + self.animal_name_entry_boxes[i + 1] = Entry_Box(self.animal_names_frm, f"Animal {str(i+1)} name: ", "25") + if i <= len(self.multi_animal_id_list) - 1: + self.animal_name_entry_boxes[i + 1].entry_set(self.multi_animal_id_list[i]) + self.animal_name_entry_boxes[i + 1].grid(row=i, column=0, sticky=NW) + self.animal_names_frm.grid(row=1, column=0, sticky=NW) + + def create_import_menu(self, data_type_choice: Literal["CSV (DLC/DeepPoseKit)", "JSON (BENTO)", "H5 (multi-animal DLC)", "SLP (SLEAP)", "CSV (SLEAP)", "H5 (SLEAP)", "TRK (multi-animal APT)", "MAT (DANNCE 3D)"]): + if hasattr(self, "choice_frm"): + self.choice_frm.destroy() + + self.choice_frm = Frame(self.import_tracking_frm) + self.choice_frm.grid(row=1, column=0, sticky=NW) + self.animal_name_entry_boxes = None + self.interpolation_frm = LabelFrame(self.choice_frm, text="INTERPOLATION METHOD", pady=5, padx=5) + self.interpolation_dropdown = DropDownMenu(self.interpolation_frm, "Interpolation method: ", Options.INTERPOLATION_OPTIONS_W_NONE.value, "25") + self.interpolation_dropdown.setChoices(Options.INTERPOLATION_OPTIONS_W_NONE.value[0]) + self.interpolation_frm.grid(row=0, column=0, sticky=NW) + self.interpolation_dropdown.grid(row=0, column=0, sticky=NW) + + self.smoothing_frm = LabelFrame(self.choice_frm, text="SMOOTHING METHOD", pady=5, padx=5) + self.smoothing_dropdown = DropDownMenu(self.smoothing_frm, "Smoothing", Options.SMOOTHING_OPTIONS_W_NONE.value, "25", com=self.__show_smoothing_entry_box_from_dropdown) + self.smoothing_dropdown.setChoices(Options.SMOOTHING_OPTIONS_W_NONE.value[0]) + self.smoothing_time_eb = Entry_Box(self.smoothing_frm, "Smoothing period (milliseconds):", labelwidth="25", width=10, validation="numeric") + self.smoothing_frm.grid(row=1, column=0, sticky=NW) + self.smoothing_dropdown.grid(row=0, column=0, sticky=NW) + + if data_type_choice in ["CSV (DLC/DeepPoseKit)", "MAT (DANNCE 3D)", "JSON (BENTO)"]: # DATA TYPES WHERE NO TRACKS HAVE TO BE SPECIFIED + self.import_directory_frm = LabelFrame(self.choice_frm, text=FRAME_DIR_IMPORT_TITLES[data_type_choice], pady=5, padx=5) + self.import_directory_select = FolderSelect(self.import_directory_frm, "Input data DIRECTORY:", lblwidth=25, initialdir=self.project_path) + self.import_single_frm = LabelFrame(self.choice_frm, text=FRAME_FILE_IMPORT_TITLES[data_type_choice], pady=5, padx=5) + self.import_file_select = FileSelect(self.import_single_frm, "Input data FILE:", lblwidth=25, file_types=[("Pose data file", FILE_TYPES[data_type_choice])]) + + if data_type_choice == "CSV (DLC/DeepPoseKit)": + self.import_dir_btn = Button(self.import_directory_frm, fg="blue", text="Import DLC CSV DIRECTORY to SimBA project", command=lambda: self.__import_dlc_csv_data(interpolation_settings=self.interpolation_dropdown.getChoices(), + smoothing_setting=self.smoothing_dropdown.getChoices(), + smoothing_time=self.smoothing_time_eb.entry_get, + data_path=self.import_directory_select.folder_path)) + self.import_file_btn = Button(self.import_single_frm, fg="blue", text="Import DLC CSV FILE to SimBA project", command=lambda: self.__import_dlc_csv_data(interpolation_settings=self.interpolation_dropdown.getChoices(), + smoothing_setting=self.smoothing_dropdown.getChoices(), + smoothing_time=self.smoothing_time_eb.entry_get, + data_path=self.import_file_select.file_path)) + elif data_type_choice == "MAT (DANNCE 3D)": + self.import_dir_btn = Button(self.import_directory_frm, fg="blue", text="Import DANNCE MAT DIRECTORY to SimBA project", command=lambda: import_DANNCE_folder(config_path=self.config_path, + folder_path=self.import_directory_select.folder_path, + interpolation_method=self.interpolation_dropdown.getChoices())) + + self.import_file_btn = Button(self.import_single_frm, fg="blue", text="Import DANNCE MAT FILE to SimBA project", command=lambda: import_DANNCE_file(config_path=self.config_path, + file_path=self.import_file_select.file_path, + interpolation_method=self.interpolation_dropdown.getChoices())) + else: + self.import_dir_btn = Button(self.import_directory_frm, fg="blue", text="Import BENTO JSON DIRECTORY to SimBA project", command=lambda: MarsImporter(config_path=self.config_path, + data_path=self.import_directory_select.folder_path, + interpolation_method=self.interpolation_dropdown.getChoices(), + smoothing_method={"Method": self.smoothing_dropdown.getChoices(), "Parameters": {"Time_window": self.smoothing_time_eb.entry_get}})) + + self.import_file_btn = Button(self.import_single_frm, fg="blue", text="Import BENTO JSON FILE to SimBA project", command=lambda: MarsImporter(config_path=self.config_path, data_path=self.import_directory_select.folder_path, interpolation_method=self.interpolation_dropdown.getChoices(), + smoothing_method={"Method": self.smoothing_dropdown.getChoices(), "Parameters": {"Time_window": self.smoothing_time_eb.entry_get}})) + + self.import_directory_frm.grid(row=2, column=0, sticky=NW) + self.import_directory_select.grid(row=0, column=0, sticky=NW) + self.import_dir_btn.grid(row=1, column=0, sticky=NW) + + self.import_single_frm.grid(row=3, column=0, sticky=NW) + self.import_file_select.grid(row=0, column=0, sticky=NW) + self.import_file_btn.grid(row=1, column=0, sticky=NW) + + else: # DATA TYPES WHERE TRACKS HAVE TO BE SPECIFIED + self.animal_settings_frm = LabelFrame(self.choice_frm, text="ANIMAL SETTINGS", pady=5, padx=5) + animal_cnt_entry_box = Entry_Box(self.animal_settings_frm, "ANIMAL COUNT:", "25", validation="numeric") + animal_cnt_entry_box.entry_set(val=self.animal_cnt) + animal_cnt_confirm = Button(self.animal_settings_frm, text="CONFIRM", fg="blue", command=lambda: self.create_animal_names_entry_boxes( animal_cnt=animal_cnt_entry_box.entry_get)) + self.create_animal_names_entry_boxes(animal_cnt=animal_cnt_entry_box.entry_get) + self.animal_settings_frm.grid(row=4, column=0, sticky=NW) + animal_cnt_entry_box.grid(row=0, column=0, sticky=NW) + animal_cnt_confirm.grid(row=0, column=1, sticky=NW) + + self.data_dir_frm = LabelFrame(self.choice_frm, text="DATA DIRECTORY", pady=5, padx=5) + self.import_frm = LabelFrame(self.choice_frm, text="IMPORT", pady=5, padx=5) + + if data_type_choice == "H5 (multi-animal DLC)": + self.tracking_type_frm = LabelFrame(self.choice_frm, text="TRACKING DATA TYPE", pady=5, padx=5) + self.dlc_data_type_option_dropdown = DropDownMenu(self.tracking_type_frm, "TRACKING_TYPE", Options.MULTI_DLC_TYPE_IMPORT_OPTION.value, labelwidth=25) + self.dlc_data_type_option_dropdown.setChoices(Options.MULTI_DLC_TYPE_IMPORT_OPTION.value[1]) + self.tracking_type_frm.grid(row=5, column=0, sticky=NW) + self.dlc_data_type_option_dropdown.grid(row=0, column=0, sticky=NW) + self.data_dir_select = FolderSelect(self.data_dir_frm, "H5 DLC DIRECTORY: ", lblwidth=25) + self.instructions_lbl = Label(self.data_dir_frm, text="Please import videos BEFORE importing the \n multi animal DLC tracking data") + self.run_btn = Button(self.import_frm, text="IMPORT DLC .H5", fg="blue", command=lambda: self.__multi_animal_run_call(pose_estimation_tool=data_type_choice, + interpolation_settings=self.interpolation_dropdown.getChoices(), + smoothing_settings=self.smoothing_dropdown.getChoices(), + smoothing_window=self.smoothing_time_eb.entry_get, + animal_names=self.animal_name_entry_boxes, + data_path=self.data_dir_select.folder_path, + tracking_data_type=self.dlc_data_type_option_dropdown.getChoices())) + elif data_type_choice == "SLP (SLEAP)": + self.data_dir_select = FolderSelect(self.data_dir_frm, "SLP SLEAP DIRECTORY: ", lblwidth=25) + self.instructions_lbl = Label(self.data_dir_frm, text="Please import videos before importing the \n multi animal SLEAP tracking data if you are tracking more than ONE animal") + self.run_btn = Button(self.import_frm, text="IMPORT SLEAP .SLP", fg="blue", command=lambda: self.__multi_animal_run_call(pose_estimation_tool=data_type_choice, + interpolation_settings=self.interpolation_dropdown.getChoices(), + smoothing_settings=self.smoothing_dropdown.getChoices(), + smoothing_window=self.smoothing_time_eb.entry_get, + animal_names=self.animal_name_entry_boxes, + data_path=self.data_dir_select.folder_path)) + + elif data_type_choice == "TRK (multi-animal APT)": + self.data_dir_select = FolderSelect(self.data_dir_frm, "TRK APT DIRECTORY: ", lblwidth=25) + self.instructions_lbl = Label(self.data_dir_frm, text="Please import videos before importing the \n multi animal TRK tracking data") + self.run_btn = Button(self.import_frm, text="IMPORT APT .TRK", fg="blue", command=lambda: self.__multi_animal_run_call(pose_estimation_tool=data_type_choice, + interpolation_settings=self.interpolation_dropdown.getChoices(), + smoothing_settings=self.smoothing_dropdown.getChoices(), + smoothing_window=self.smoothing_time_eb.entry_get, + animal_names=self.animal_name_entry_boxes, + data_path=self.data_dir_select.folder_path)) + + elif data_type_choice == "CSV (SLEAP)": + self.data_dir_select = FolderSelect(self.data_dir_frm, "CSV SLEAP DIRECTORY:", lblwidth=25) + self.instructions_lbl = Label(self.data_dir_frm, text="Please import videos before importing the SLEAP tracking data \n IF you are tracking more than ONE animal") + self.run_btn = Button(self.import_frm, text="IMPORT SLEAP .CSV", fg="blue", command=lambda: self.__multi_animal_run_call(pose_estimation_tool=data_type_choice, + interpolation_settings=self.interpolation_dropdown.getChoices(), + smoothing_settings=self.smoothing_dropdown.getChoices(), + smoothing_window=self.smoothing_time_eb.entry_get, + animal_names=self.animal_name_entry_boxes, + data_path=self.data_dir_select.folder_path)) + + elif data_type_choice == "H5 (SLEAP)": + self.data_dir_select = FolderSelect(self.data_dir_frm, "H5 SLEAP DIRECTORY", lblwidth=25) + self.instructions_lbl = Label(self.data_dir_frm,text="Please import videos before importing the SLEAP H5 tracking data \n IF you are tracking more than ONE animal") + self.run_btn = Button(self.import_frm, text="IMPORT SLEAP H5", fg="blue", command=lambda: self.__multi_animal_run_call(pose_estimation_tool=data_type_choice, + interpolation_settings=self.interpolation_dropdown.getChoices(), + smoothing_settings=self.smoothing_dropdown.getChoices(), + smoothing_window=self.smoothing_time_eb.entry_get, + animal_names=self.animal_name_entry_boxes, + data_path=self.data_dir_select.folder_path)) + + self.data_dir_frm.grid(row=self.frame_children(frame=self.choice_frm), column=0, sticky=NW) + self.data_dir_select.grid(row=0, column=0, sticky=NW) + self.instructions_lbl.grid(row=1, column=0, sticky=NW) + self.import_frm.grid(row=self.frame_children(frame=self.choice_frm) + 1, column=0, sticky=NW) + self.run_btn.grid(row=0, column=0, sticky=NW) + self.choice_frm.grid(row=1, column=0, sticky=NW) + +#_ = ImportPoseFrame(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini') \ No newline at end of file diff --git a/simba/sandbox/create_shap_log.py b/simba/sandbox/create_shap_log.py new file mode 100644 index 000000000..1a2b6fe32 --- /dev/null +++ b/simba/sandbox/create_shap_log.py @@ -0,0 +1,162 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import os +from typing import List, Optional, Tuple, Union + +import numpy as np +import pandas as pd +import shap +from sklearn.ensemble import RandomForestClassifier + +from simba.mixins.train_model_mixin import TrainModelMixin +from simba.utils.checks import (check_if_dir_exists, check_instance, check_int, + check_nvidea_gpu_available, check_str, + check_valid_array, check_valid_dataframe, + check_valid_lst) +from simba.utils.enums import Formats +from simba.utils.errors import FFMPEGCodecGPUError +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import write_df +from simba.utils.warnings import NotEnoughDataWarning + + +def create_shap_log(rf_clf: Union[str, os.PathLike, RandomForestClassifier], + x: Union[pd.DataFrame, np.ndarray], + y: Union[pd.DataFrame, pd.Series, np.ndarray], + cnt_present: int, + cnt_absent: int, + x_names: Optional[List[str]] = None, + clf_name: Optional[str] = None, + save_dir: Optional[Union[str, os.PathLike]] = None, + verbose: Optional[bool] = True) -> Union[None, Tuple[pd.DataFrame, pd.DataFrame, int]]: + """ + Computes SHAP (SHapley Additive exPlanations) values using a GPU for a RandomForestClassifier, + based on specified counts of positive and negative samples, and optionally saves the results. + + .. image:: _static/img/create_shap_log_cuda.png + :width: 500 + :align: center + + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/cuda_shap.csv + :widths: 10, 90 + :align: center + :class: simba-table + :header-rows: 1 + + :param Union[str, os.PathLike, RandomForestClassifier] rf_clf: Trained RandomForestClassifier model or path to the saved model. Can be a string, os.PathLike object, or an instance of RandomForestClassifier. + :param Union[pd.DataFrame, np.ndarray] x: Input features used for SHAP value computation. Can be a pandas DataFrame or numpy ndarray. + :param Union[pd.DataFrame, pd.Series, np.ndarray] y: Target labels corresponding to the input features. Can be a pandas DataFrame, pandas Series, or numpy ndarray with 0 and 1 values. + :param int cnt_present: Number of positive samples (label=1) to include in the SHAP value computation. + :param int cnt_absent: Number of negative samples (label=0) to include in the SHAP value computation. + :param Optional[List[str]] x_names: Optional list of feature names corresponding to the columns in `x`. If `x` is a DataFrame, this is extracted automatically. + :param Optional[str] clf_name: Optional name for the classifier, used in naming output files. If not provided, it is extracted from the `y` labels if possible. + :param Optional[Union[str, os.PathLike]] save_dir: Optional directory path where the SHAP values and corresponding raw features are saved as CSV files. + :param Optional[bool] verbose: Optional boolean flag indicating whether to print progress messages. Defaults to True. + :return Union[None, Tuple[pd.DataFrame, pd.DataFrame, int]]: If `save_dir` is None, returns a tuple containing: + - V: DataFrame with SHAP values, expected value, sum of SHAP values, prediction probability, and target labels. + - R: DataFrame containing the raw feature values for the selected samples. + - expected_value: The expected value from the SHAP explainer. + If `save_dir` is provided, the function returns None and saves the output to CSV files in the specified directory. + + :example: + >>> x = np.random.random((1000, 501)).astype(np.float32) + >>> y = np.random.randint(0, 2, size=(len(x), 1)).astype(np.int32) + >>> clf_names = [str(x) for x in range(501)] + >>> results = create_shap_log(rf_clf=MODEL_PATH, x=x, y=y, cnt_present=int(i/2), cnt_absent=int(i/2), clf_name='TEST', x_names=clf_names, verbose=False) + """ + + timer = SimbaTimer(start=True) + if verbose: + print('Computing SHAP values (GPU)...') + if not check_nvidea_gpu_available(): + raise FFMPEGCodecGPUError(msg="No GPU found (as evaluated by nvidea-smi returning None)", + source=create_shap_log.__name__) + check_instance(source=f'{create_shap_log.__name__} rf_clf', instance=rf_clf, + accepted_types=(str, RandomForestClassifier)) + if isinstance(rf_clf, (str, os.PathLike)): + rf_clf = TrainModelMixin().read_pickle(file_path=rf_clf) + check_instance(source=f'{create_shap_log.__name__} x', instance=x, accepted_types=(pd.DataFrame, np.ndarray)) + if isinstance(x, np.ndarray): + check_valid_lst(data=x_names, source=f'{create_shap_log.__name__} x_names', valid_dtypes=(str,), + exact_len=x.shape[1]) + check_valid_array(data=x, source=f'{create_shap_log.__name__} x', accepted_ndims=[2, ], + accepted_dtypes=Formats.NUMERIC_DTYPES.value) + else: + check_valid_dataframe(df=x, source=f'{create_shap_log.__name__} x', + valid_dtypes=Formats.NUMERIC_DTYPES.value) + x_names = list(x.columns) + x = x.values + check_instance(source=f'{create_shap_log.__name__} y', instance=y, + accepted_types=(pd.DataFrame, np.ndarray, pd.Series)) + if isinstance(y, np.ndarray): + check_str(name=f'{create_shap_log.__name__} clf_name', value=clf_name) + y = y.flatten() + elif isinstance(y, pd.Series): + clf_name = y.name + y = y.values.flatten() + else: + check_valid_dataframe(df=y, source=f'{create_shap_log.__name__} y', + valid_dtypes=Formats.NUMERIC_DTYPES.value, max_axis_1=1) + clf_name = list(y.columns)[0] + y = y.values.flatten() + save_shap_path, save_raw_path = None, None + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir) + save_shap_path = os.path.join(save_dir, f"SHAP_values_{clf_name}.csv") + save_raw_path = os.path.join(save_dir, f"RAW_SHAP_feature_values_{clf_name}.csv") + check_valid_array(data=y, source=f'{create_shap_log.__name__} y', accepted_values=[0, 1]) + check_int(name=f'{create_shap_log.__name__} cnt_present', value=cnt_present, min_value=1) + check_int(name=f'{create_shap_log.__name__} cnt_absent', value=cnt_absent, min_value=1) + target_cnt = np.sum(y) + absent_cnt = y.shape[0] - target_cnt + + if cnt_present > target_cnt: + NotEnoughDataWarning( + msg=f"Data contains {target_cnt} behavior-present annotations. This is less the number of frames you specified to calculate shap values for ({cnt_present}). SimBA will calculate shap scores for the {target_cnt} behavior-present frames available", + source=create_shap_log.__name__) + cnt_present = target_cnt + if absent_cnt < cnt_absent: + NotEnoughDataWarning( + msg=f"Data contains {absent_cnt} behavior-absent annotations. This is less the number of frames you specified to calculate shap values for ({cnt_absent}). SimBA will calculate shap scores for the {absent_cnt} behavior-absent frames available", + source=create_shap_log.__name__) + cnt_absent = absent_cnt + + target_idx = np.argwhere(y == 1).flatten() + absent_idx = np.argwhere(y == 0).flatten() + target_idx = np.sort(np.random.choice(target_idx, cnt_present)) + absent_idx = np.sort(np.random.choice(absent_idx, cnt_absent)) + target_x = x[target_idx] + absent_x = x[absent_idx] + X = np.vstack([target_x, absent_x]).astype(np.float32) + Y = np.hstack([np.ones(target_x.shape[0]), np.zeros(absent_x.shape[0])]).astype(np.int32) + explainer = shap.explainers.GPUTree(model=rf_clf, data=None, model_output='raw', feature_names='tree_path_dependent') + shap_values = explainer.shap_values(X, check_additivity=True) + V = pd.DataFrame(shap_values[1], columns=x_names).astype(np.float32) + sum = V.sum(axis=1) + expected_value = explainer.expected_value[1] + p = TrainModelMixin().clf_predict_proba(clf=rf_clf, x_df=X) + + V['EXPECTED_VALUE'] = expected_value.round(4) + V['SUM'] = sum + V['EXPECTED_VALUE'] + V['PREDICTION_PROBABILITY'] = p.round(4) + V['SUM'] = V['SUM'].round(4) + V[clf_name] = Y + x_idx = np.hstack([target_idx, absent_idx]) + R = pd.DataFrame(x[x_idx, :], columns=x_names) + timer.stop_timer() + if save_dir is None: + if verbose: + stdout_success(msg=f'Shap values compute complete (GPU) for {len(V)} observations.', elapsed_time=timer.elapsed_time_str) + return (V, R, expected_value) + else: + write_df(df=V, file_type='csv', save_path=save_shap_path) + write_df(df=R, file_type='csv', save_path=save_raw_path) + if verbose: + stdout_success(msg=f'Shap values compute complete (GPU) for {len(V)} observations, and saved in {save_dir}', elapsed_time=timer.elapsed_time_str) + + + diff --git a/simba/sandbox/create_shap_log_3.py b/simba/sandbox/create_shap_log_3.py new file mode 100644 index 000000000..efd7c991e --- /dev/null +++ b/simba/sandbox/create_shap_log_3.py @@ -0,0 +1,52 @@ +# Shapley calculations: Example II (GPU) + +# >NOTE I: The SHAP library has to be built from got rather than pip: ``pip install git+https://github.com/slundberg/shap.git`` +# >NOTE II: The scikit model can not be built using max_depth > 31 for it to work with this code. + +# In this example, we have previously created a classifier. We have the data used to create this classifier, and now we want to compute SHAP explainability scores +# for this classifier using GPU (to speed things up a MASSIVELY). + +from simba.sandbox.create_shap_log import create_shap_log +from simba.mixins.train_model_mixin import TrainModelMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.read_write import read_df, read_config_file +import glob + + +# DEFINITIONS +CONFIG_PATH = r"/mnt/c/troubleshooting/mitra/project_folder/project_config.ini" +CLASSIFIER_PATH = r"/mnt/c/troubleshooting/mitra/models/generated_models/grooming.sav" +CLASSIFIER_NAME = 'grooming' +SAVE_DIR = r'/mnt/c/troubleshooting/mitra/models/generated_models' +COUNT_PRESENT = 2000 +COUNT_ABSENT = 2000 + + +# READ IN THE CONFIG AND THE CLASSIFIER +config = read_config_file(config_path=CONFIG_PATH) +config_object = ConfigReader(config_path=CONFIG_PATH, create_logger=False) +clf = read_df(file_path=CLASSIFIER_PATH, file_type='pickle') + + +# READ IN THE DATA + +#Read in the path to all files inside the project_folder/csv/targets_inserted directory +file_paths = glob.glob(config_object.targets_folder + '/*' + config_object.file_type) +#Reads in the data held in all files in ``file_paths`` defined above +data, _ = TrainModelMixin().read_all_files_in_folder_mp(file_paths=file_paths, file_type=config.get('General settings', 'workflow_file_type').strip()) +#We find all behavior annotations that are NOT the targets. I.e., if SHAP values for Attack is going to be calculated, bit we need to find which other annotations exist in the data e.g., Escape and Defensive. +non_target_annotations = TrainModelMixin().read_in_all_model_names_to_remove(config=config, model_cnt=config_object.clf_cnt, clf_name=CLASSIFIER_NAME) +# We remove the body-part coordinate columns and the annotations which are not the target from the data +data = data.drop(non_target_annotations + config_object.bp_headers, axis=1) +# We place the target data in its own variable +target_df = data.pop(CLASSIFIER_NAME) + +shap_values, raw_values, expected_value = create_shap_log(rf_clf=clf, + x=data, + y=target_df, + cnt_present=COUNT_PRESENT, + cnt_absent=COUNT_ABSENT, + x_names=list(data.columns), + clf_name='grooming', + save_dir=None, + verbose=True) \ No newline at end of file diff --git a/simba/sandbox/cronbach_alpha.py b/simba/sandbox/cronbach_alpha.py new file mode 100644 index 000000000..f2d121731 --- /dev/null +++ b/simba/sandbox/cronbach_alpha.py @@ -0,0 +1,65 @@ +import time + +import numpy as np +from numba import prange, jit, njit +import pandas as pd +from pingouin.reliability import cronbach_alpha + + +@njit('(float32[:,:], )',) +def cov_matrix(data: np.ndarray): + """ + Jitted helper to compute the covariance matrix of the input data. Helper for computing cronbach alpha, + multivariate analysis, and distance computations. + + :param np.ndarray data: 2-dimensional numpy array representing the input data with shape (n, m), where n is the number of observations and m is the number of variables. + :return: Covariance matrix of the input data with shape (m, m). The (i, j)-th element of the matrix represents the covariance between the i-th and j-th variables in the data. + + :example: + >>> data = np.random.randint(0,2, (200, 40)).astype(np.float32) + >>> covariance_matrix = cov_matrix(data=data) + """ + n, m = data.shape + cov = np.full((m, m), 0.0) + for i in prange(m): + mean_i = np.sum(data[:, i]) / n + for j in range(m): + mean_j = np.sum(data[:, j]) / n + cov[i, j] = np.sum((data[:, i] - mean_i) * (data[:, j] - mean_j)) / (n - 1) + return cov + +@njit('(float32[:,:], )') +def cronbach_a(data: np.ndarray): + """ + Cronbach's alpha is a way of assessing reliability by comparing the amount of shared variance, or covariance, + among the items making up an instrument to the amount of overall variance. + Cronbach’s alpha can be used to assess internal consistency reliability when the variables + (e.g., survey items, measure items) analyzed are continuous (interval or ratio measurement scale); + + :example: + >>> data = np.random.randint(0,2, (200, 40)).astype(np.float32) + >>> x = cronbach_a(data=data) + """ + + cov = cov_matrix(data=data) + return (data.shape[1] / (data.shape[1] - 1)) * (1 - np.trace(cov) / np.sum(cov)) + + + + + + +data = np.random.randint(0,2, (200, 40)).astype(np.float32) +covariance_matrix = cov_matrix(data=data) +# start = time.time() +# d = cov_matrix(data=data) +# print(time.time() - start) +# +# start = time.time() +# x = cronbach_a(data=data) +# print(time.time() - start) +# start = time.time() +# y = cronbach_alpha(data=pd.DataFrame(data)) +# print(time.time() - start) + +#data.coV \ No newline at end of file diff --git a/simba/sandbox/crossfade.py b/simba/sandbox/crossfade.py new file mode 100644 index 000000000..87d6f9904 --- /dev/null +++ b/simba/sandbox/crossfade.py @@ -0,0 +1,60 @@ +from typing import Union, Optional +import os.path +import subprocess +from simba.utils.read_write import get_fn_ext, get_video_meta_data +from simba.utils.checks import check_int, check_str, check_if_dir_exists +from simba.utils.lookups import get_ffmpeg_crossfade_methods +from simba.utils.printing import stdout_success, SimbaTimer +from simba.utils.errors import InvalidInputError + + +def crossfade_two_videos(video_path_1: Union[str, os.PathLike], + video_path_2: Union[str, os.PathLike], + crossfade_duration: Optional[int] = 2, + crossfade_method: Optional[str] = 'fade', + crossfade_offset: Optional[int] = 2, + save_path: Optional[Union[str, os.PathLike]] = None): + """ + Cross-fade two videos. + + .. video:: _static/img/overlay_video_progressbar.webm + :loop: + + .. note:: + See ``simba.utils.lookups.get_ffmpeg_crossfade_methods`` for named crossfade methods. + See `https://trac.ffmpeg.org/wiki/Xfade `__. for visualizations of named crossfade methods, + + :param Union[str, os.PathLike] video_path_1: Path to the first video on disk. + :param Union[str, os.PathLike] video_path_2: Path to the second video on disk. + :param Optional[int] crossfade_duration: The duration of the crossfade. + :param Optional[str] crossfade_method: The crossfade method. For accepted methods, see ``simba.utils.lookups.get_ffmpeg_crossfade_methods``. + :param Optional[int] crossfade_offset: The time in seconds into the first video before the crossfade duration begins. + :param Optional[Union[str, os.PathLike]] save_path: The location where to save the crossfaded video. If None, then saves the video in the same directory as ``video_path_1`` with ``_crossfade`` suffix. + + :return: None. + + :example: + >>> crossfade_two_videos(video_path_1='/Users/simon/Desktop/envs/simba/troubleshooting/reptile/1.mp4', video_path_2='/Users/simon/Desktop/envs/simba/troubleshooting/reptile/1.mp4', crossfade_duration=5, crossfade_method='zoomin', save_path='/Users/simon/Desktop/cross_test.mp4') + """ + + timer = SimbaTimer(start=True) + video_1_meta = get_video_meta_data(video_path=video_path_1) + video_2_meta = get_video_meta_data(video_path=video_path_2) + if video_1_meta['resolution_str'] != video_1_meta['resolution_str']: + raise InvalidInputError(msg=f'Video 1 and Video 2 needs to be the same resolution, got {video_2_meta["resolution_str"]} and {video_1_meta["resolution_str"]}', source=crossfade_two_videos.__name__) + crossfade_offset_methods = get_ffmpeg_crossfade_methods() + check_str(name=f'{crossfade_method} crossfade_method', value=crossfade_method, options=crossfade_offset_methods) + check_int(name=f'{crossfade_two_videos.__name__} crossfade_duration', value=crossfade_duration, min_value=1, max_value=video_2_meta['video_length_s']) + check_int(name=f'{crossfade_two_videos.__name__} crossfade_offset', value=crossfade_offset, min_value=0, max_value=video_1_meta['video_length_s']) + dir_1, video_name_1, ext_1 = get_fn_ext(filepath=video_path_1) + dir_2, video_name_2, ext_2 = get_fn_ext(filepath=video_path_2) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + else: + save_path = os.path.join(dir_1, f'{video_name_1}_{video_name_2}_crossfade{ext_1}') + cmd = f'ffmpeg -i "{video_path_1}" -i "{video_path_2}" -filter_complex "xfade=transition={crossfade_method}:offset={crossfade_offset}:duration={crossfade_duration}" "{save_path}" -loglevel error -stats -hide_banner -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f'Cross-faded video saved at {save_path}', elapsed_time=timer.elapsed_time_str) + + diff --git a/simba/sandbox/cuda_median.py b/simba/sandbox/cuda_median.py new file mode 100644 index 000000000..b4e0da617 --- /dev/null +++ b/simba/sandbox/cuda_median.py @@ -0,0 +1,36 @@ +import numpy as np +import numba as nb +from numba import guvectorize +from numba import cuda +import time + + +a = np.random.rand(1024 * 1024, 32).astype('float32') +b = np.random.rand(1024 * 1024, 32).astype('float32') +dist = np.zeros(a.shape[0]).astype('float32') + + +@guvectorize(['void(float32[:], float32[:], float32[:])'], '(n),(n)->()', + target='cuda') +def numba_dist_cuda(a, b, dist): + len = a.shape[0] + x = 0 + for i in range(len): + x += a[i] + b[i] + dist[0] = x + + +nb.cuda.detect() +print(nb.cuda.is_available()) + + +d_a = cuda.to_device(a) +d_b = cuda.to_device(b) +d_dist = cuda.to_device(dist) + +t = time.time() +numba_dist_cuda(d_a, d_b, d_dist) +cuda.synchronize() +elapsed = time.time() - t + +print(elapsed) \ No newline at end of file diff --git a/simba/sandbox/cuda_rotate_video.py b/simba/sandbox/cuda_rotate_video.py new file mode 100644 index 000000000..844877562 --- /dev/null +++ b/simba/sandbox/cuda_rotate_video.py @@ -0,0 +1,122 @@ +import os +from typing import Union, Optional +from cupyx.scipy.ndimage import rotate +import cupy as cp +import numpy as np +from simba.utils.read_write import read_img_batch_from_video_gpu, get_video_meta_data, get_fn_ext +from simba.utils.checks import check_valid_array, check_int +from simba.mixins.image_mixin import ImageMixin +from simba.utils.enums import Formats +from simba.utils.printing import SimbaTimer, stdout_success +import cv2 + +def rotate_img_stack_cupy(imgs: np.ndarray, + rotation_degrees: Optional[float] = 180, + batch_size: Optional[int] = 500) -> np.ndarray: + """ + Rotates a stack of images by a specified number of degrees using GPU acceleration with CuPy. + + Accepts a 3D (single-channel images) or 4D (multichannel images) NumPy array, rotates each image in the stack by the specified degree around the center, and returns the result as a NumPy array. + + :param np.ndarray imgs: The input stack of images to be rotated. Expected to be a NumPy array with 3 or 4 dimensions. 3D shape: (num_images, height, width) - 4D shape: (num_images, height, width, channels) + :param Optional[float] rotation_degrees: The angle by which the images should be rotated, in degrees. Must be between 1 and 359 degrees. Defaults to 180 degrees. + :param Optional[int] batch_size: Number of images to process on GPU in each batch. Decrease if data can't fit on GPU RAM. + :returns: A NumPy array containing the rotated images with the same shape as the input. + :rtype: np.ndarray + + :example: + >>> video_path = r"/mnt/c/troubleshooting/mitra/project_folder/videos/F0_gq_Saline_0626_clipped.mp4" + >>> imgs = read_img_batch_from_video_gpu(video_path=video_path) + >>> imgs = np.stack(np.array(list(imgs.values())), axis=0) + >>> imgs = rotate_img_stack_cupy(imgs=imgs, rotation=50) + """ + + check_valid_array(data=imgs, source=f'{rotate_img_stack_cupy.__name__} imgs', accepted_ndims=(3, 4)) + check_int(name=f'{rotate_img_stack_cupy.__name__} rotation', value=rotation_degrees, min_value=1, max_value=359) + results = cp.full_like(imgs, fill_value=np.nan, dtype=np.uint8) + for l in range(0, imgs.shape[0], batch_size): + r = l + batch_size + batch_imgs = cp.array(imgs[l:r]) + results[l:r] = rotate(input=batch_imgs, angle=rotation_degrees, axes=(2, 1), reshape=True) + return results.get() + + +def rotate_video_cupy(video_path: Union[str, os.PathLike], + save_path: Optional[Union[str, os.PathLike]] = None, + rotation_degrees: Optional[float] = 180, + batch_cnt: Optional[int] = 1) -> None: + """ + Rotates a video by a specified angle using GPU acceleration and CuPy for image processing. + + :param Union[str, os.PathLike] video_path: Path to the input video file. + :param Optional[Union[str, os.PathLike]] save_path: Path to save the rotated video. If None, saves the video in the same directory as the input with '_rotated_' appended to the filename. + :param nptional[float] rotation_degrees: Degrees to rotate the video. Must be between 1 and 359 degrees. Default is 180. + :param Optional[int] batch_cnt: Number of batches to split the video frames into for processing. Higher values reduce memory usage. Default is 1. + :returns: None. + + :example: + >>> video_path = r"/mnt/c/troubleshooting/mitra/project_folder/videos/F0_gq_Saline_0626_clipped.mp4" + >>> rotate_video_cupy(video_path=video_path, rotation_degrees=45) + """ + + timer = SimbaTimer(start=True) + check_int(name=f'{rotate_img_stack_cupy.__name__} rotation', value=rotation_degrees, min_value=1, max_value=359) + check_int(name=f'{rotate_img_stack_cupy.__name__} batch_cnt', value=batch_cnt, min_value=1) + if save_path is None: + video_dir, video_name, _ = get_fn_ext(filepath=video_path) + save_path = os.path.join(video_dir, f'{video_name}_rotated_{rotation_degrees}.mp4') + video_meta_data = get_video_meta_data(video_path=video_path) + fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value) + is_clr = ImageMixin.is_video_color(video=video_path) + frm_ranges = np.arange(0, video_meta_data['frame_count']) + frm_ranges = np.array_split(frm_ranges, batch_cnt) + for frm_batch, frm_range in enumerate(frm_ranges): + imgs = read_img_batch_from_video_gpu(video_path=video_path, start_frm=frm_range[0], end_frm=frm_range[-1]) + imgs = np.stack(np.array(list(imgs.values())), axis=0) + imgs = rotate_img_stack_cupy(imgs=imgs, rotation_degrees=rotation_degrees) + if frm_batch == 0: + writer = cv2.VideoWriter(save_path, fourcc, video_meta_data['fps'], (imgs.shape[2], imgs.shape[1]), isColor=is_clr) + for img in imgs: writer.write(img) + writer.release() + timer.stop_timer() + stdout_success(f'Rotated video saved at {save_path}', source=rotate_video_cupy.__name__) + +# video_path = r"/mnt/c/troubleshooting/mitra/project_folder/videos/F0_gq_Saline_0626_clipped.mp4" +# rotate_video_cupy(video_path=video_path, rotation_degrees=45) + +import time +its = 3 +for i in [500, 1000, 2000, 4000, 8000, 16000, 32000]: #1000, 2000, 4000, 8000, 16000, 32000] + imgs = np.random.randint(0, 255, (i, 320, 240)) + times = [] + for j in range(its): + print(j) + start_time = time.perf_counter() + _ = rotate_img_stack_cupy(imgs=imgs) + end_time = time.perf_counter() - start_time + times.append(end_time) + print(i, '\t'* 3, np.mean(times), '\t' * 3, np.std(times)) + + + + + +# imgs = read_img_batch_from_video_gpu(video_path=video_path) +# imgs = np.stack(np.array(list(imgs.values())), axis=0) +# +# imgs = rotate_img_stack_cupy(imgs=imgs, rotation=50) +# +# cv2. imshow('sasdasdsad', imgs[0].astype(np.uint8)) +# cv2.waitKey(5000) +# + + + + + + + + + +# +# def rotate_video_cupy(video_path: Union[str, os.PathLike], rotation: Optional[int] = 180): diff --git a/simba/sandbox/cuda_sliding_descriptive_stats.py b/simba/sandbox/cuda_sliding_descriptive_stats.py new file mode 100644 index 000000000..17cea8d3d --- /dev/null +++ b/simba/sandbox/cuda_sliding_descriptive_stats.py @@ -0,0 +1,120 @@ +import numpy as np +from numba import cuda +from typing import Tuple +try: + from typing import Literal +except: + from typing_extensions import Literal +from simba.utils.checks import check_valid_array, check_valid_tuple, check_float +from simba.utils.enums import Formats +from simba.data_processors.cuda.utils import _cuda_bubble_sort, _cuda_range, _cuda_mean, _cuda_variance, _cuda_mac, _cuda_median, _cuda_standard_deviation, _cuda_mad, _cuda_sum, _cuda_min, _cuda_max, _cuda_rms, _cuda_abs_energy +import math + +THREADS_PER_BLOCK = 512 + + +# @cuda.jit(device=True) +# def _cuda_quartile(x: np.ndarray, y: float): +# b = _cuda_bubble_sort(x) +# idx = int(math.ceil(y*b.shape[0])) +# for i in b: +# print(i) +# print('ssssssssssssssssssssssssssssss') +# return idx + + +@cuda.jit(device=True) +def _bubble_sort(x): + diff = cuda.local.array(shape=512, dtype=np.float32) + for i in range(512): + diff[i] = np.inf + + +@cuda.jit(device=True) +def _cuda_iqr(x): + _cuda_bubble_sort(x) + lower_idx = x.shape[0] // 4 + upper_idx = (3 * x.shape[0]) // 4 + lower_val = x[lower_idx] + upper_val = x[upper_idx] + cuda.syncthreads() + + return x[-1] - x[0] + #return upper_val - lower_val + + #return sorted_arr + +@cuda.jit() +def _cuda_descriptive_stats_kernel(x, win_size, sV, results): + i = cuda.grid(1) + if ((x.shape[0]) < i) or (i < win_size[0]): + return + else: + sample = x[i - win_size[0]: i] + if sV[0] == 1: results[i-1, 0] = _cuda_variance(sample) + if sV[1] == 1: results[i-1, 1] = _cuda_mac(sample) + if sV[2] == 1: results[i-1, 2] = _cuda_median(sample) + if sV[3] == 1: results[i-1, 3] = _cuda_standard_deviation(sample) + if sV[4] == 1: results[i-1, 4] = _cuda_mad(sample) + if sV[5] == 1: results[i-1, 5] = _cuda_mean(sample) + if sV[6] == 1: results[i-1, 6] = _cuda_min(sample) + if sV[7] == 1: results[i-1, 7] = _cuda_max(sample) + if sV[8] == 1: results[i-1, 8] = _cuda_sum(sample) + if sV[9] == 1: results[i-1, 9] = _cuda_rms(sample) + if sV[10] == 1: results[i-1, 10] = _cuda_abs_energy(sample) + if sV[11] == 1: results[i - 1, 11] = _cuda_range(sample) + if sV[12] == 1: results[i - 1, 0] = _cuda_iqr(sample) + # val = _cuda_iqr(sample) + # print(val) + cuda.syncthreads() + + +def sliding_descriptive_statistics_cuda(data: np.ndarray, + window_size: float, + sample_rate: float, + statistics: Tuple[Literal["var", "max", "min", "std"]]): + + STATISTICS = ('var', 'mac', 'median', 'std', 'mad', 'mean', 'min', 'max', 'sum', 'rms', 'abs_energy', 'range', 'iqr') + check_valid_array(data=data, source=f'{sliding_descriptive_statistics_cuda.__name__} data', accepted_ndims=(1,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_float(name=f'{sliding_descriptive_statistics_cuda.__name__} window_size', value=window_size, min_value=10e-6) + check_float(name=f'{sliding_descriptive_statistics_cuda.__name__} sample_rate', value=sample_rate, min_value=10e-6) + check_valid_tuple(x=statistics, source=f'{sliding_descriptive_statistics_cuda.__name__} statistics', valid_dtypes=(str,), accepted_values=STATISTICS) + frm_win = np.array([max(1, int(window_size*sample_rate))]) + sV = np.zeros(shape=(len(STATISTICS),), dtype=np.uint8) + for cnt, statistic in enumerate(STATISTICS): + if statistic in statistics: sV[cnt] = 1 + results = np.full(shape=(data.shape[0], len(STATISTICS)), fill_value=-1.0, dtype=np.float32) + x_dev = cuda.to_device(data) + win_size_dev = cuda.to_device(frm_win) + sv_dev = cuda.to_device(sV) + results_dev = cuda.to_device(results) + bpg = (data.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _cuda_descriptive_stats_kernel[bpg, THREADS_PER_BLOCK](x_dev, win_size_dev, sv_dev, results_dev) + results = results_dev.copy_to_host() + print(results[:, 0]) + + + + + + + + +data = np.random.randint(0, 50, (90,)) +window_size = 1.5 +sliding_descriptive_statistics_cuda(data=data, window_size=window_size, sample_rate=30, statistics=('iqr',)) +sliding_iqr(x=data, window_size=window_size, sample_rate=30) + + + + +# arr = np.array([99, 2, 3, 5, 7, 9, 11]) +# bubble_sort(arr) + +# +# +# x = np.array([2.5, 5, 7.5, 10.0]) +# _cuda_variance(x=x) + +#np.mean([57, 42, 8, 136]) +#np.mean(np.diff(np.abs(data)) \ No newline at end of file diff --git a/simba/sandbox/cuda_sort.py b/simba/sandbox/cuda_sort.py new file mode 100644 index 000000000..c87d3f543 --- /dev/null +++ b/simba/sandbox/cuda_sort.py @@ -0,0 +1,65 @@ +from numba import cuda, float32 +import math +import numpy as np +from numba import cuda + + +@cuda.jit(device=True) +def bitonic_merge(arr, low, cnt, direction): + if cnt > 1: + k = cnt // 2 + for i in range(low, low + k): + if (arr[i] > arr[i + k]) == direction: # Ascending or Descending + arr[i], arr[i + k] = arr[i + k], arr[i] + # Recursively merge the two halves + bitonic_merge(arr, low, k, direction) + bitonic_merge(arr, low + k, k, direction) + + +@cuda.jit(device=True) +def bitonic_sort(arr, low, cnt, direction): + if cnt > 1: + k = cnt // 2 + # First sort in ascending order + bitonic_sort(arr, low, k, 1) # Ascending + # Then sort in descending order + bitonic_sort(arr, low + k, k, 0) # Descending + # Merge the results + bitonic_merge(arr, low, cnt, direction) + + +@cuda.jit +def sort_kernel(arr): + n = arr.shape[0] + idx = cuda.grid(1) + + # We only launch the sorting process for thread 0 + if idx == 0: + # Create arrays for low, cnt, and direction + low_array = cuda.local.array(1, dtype=float32) + cnt_array = cuda.local.array(1, dtype=float32) + direction_array = cuda.local.array(1, dtype=float32) + + # Set values in the arrays + low_array[0] = 0 + cnt_array[0] = n + direction_array[0] = 1 # Ascending order + + # Start the bitonic sort with the full array + bitonic_sort(arr, low_array[0], cnt_array[0], direction_array[0]) # Sorting in ascending order + + +# Example data +arr = np.array([5.0, 3.0, 8.0, 1.0, 9.0, 7.0, 2.0, 4.0], dtype=np.float32) + +# Allocate device memory +d_arr = cuda.to_device(arr) + +# Launch kernel with a single thread block +threads_per_block = 256 +blocks_per_grid = 1 +sort_kernel[blocks_per_grid, threads_per_block](d_arr) + +# Copy result back to host +d_arr.copy_to_host(arr) +print("Sorted array:", arr) \ No newline at end of file diff --git a/simba/sandbox/cumcount_plot.py b/simba/sandbox/cumcount_plot.py new file mode 100644 index 000000000..76ad606c1 --- /dev/null +++ b/simba/sandbox/cumcount_plot.py @@ -0,0 +1,114 @@ +import os +from typing import Union, Optional +import numpy as np +import pandas as pd +import seaborn as sns +import matplotlib.pyplot as plt + +from simba.utils.checks import check_if_dir_exists, check_valid_boolean, check_str, check_valid_dataframe, check_all_file_names_are_represented_in_video_log +from simba.utils.read_write import find_files_of_filetypes_in_directory, read_df, read_video_info +from simba.utils.data import detect_bouts, create_color_palette +from simba.mixins.config_reader import ConfigReader +from simba.utils.read_write import get_fn_ext +from simba.utils.printing import stdout_success + + + +def plot_clf_cumcount(config_path: Union[str, os.PathLike], + clf: str, + data_dir: Optional[Union[str, os.PathLike]] = None, + save_path: Optional[Union[str, os.PathLike]] = None, + bouts: Optional[bool] = False, + seconds: Optional[bool] = False) -> None: + + """ + + Generates and saves a cumulative count plot of a specified classifier's occurrences over video frames or time. + + .. image:: _static/img/plot_clf_cumcount.webp + :width: 500 + :align: center + + + :param Union[str, os.PathLike] config_path: Path to the configuration file, which includes settings and paths for data processing and storage. + :param str clf: The classifier name (e.g., 'CIRCLING') for which to calculate cumulative counts. + :param Optional[Union[str, os.PathLike]] data_dir: Directory containing the log files to analyze. If not provided, the default path in the configuration is used. + :param Optional[Union[str, os.PathLike]] save_path: Destination path to save the plot image. If None, saves to the logs path in the configuration. + :param Optional[bool] bouts: If True, calculates the cumulative count in terms of detected bouts instead of time or frames. + :param Optional[bool] seconds: If True, calculates time in seconds rather than frames. + :return: None. + + :example: + >>> plot_clf_cumcount(config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini", clf='CIRCLING', data_dir=r'D:\troubleshooting\mitra\project_folder\logs\test', seconds=True, bouts=True) + """ + + config = ConfigReader(config_path=config_path, read_video_info=True, create_logger=False) + if data_dir is not None: + check_if_dir_exists(in_dir=data_dir, source=f'{plot_clf_cumcount.__name__} data_dir') + else: + data_dir = config.machine_results_dir + if save_path is None: + save_path = os.path.join(config.logs_path, f'cumcount_{config.datetime}.png') + data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=[f'.{config.file_type}'], raise_error=True) + check_valid_boolean(value=[bouts, seconds], source=plot_clf_cumcount.__name__, raise_error=True) + check_str(name=f'{plot_clf_cumcount.__name__} clf', value=clf) + x_name = 'VIDEO TIME (FRAMES)' + y_name = f'{clf} TIME (FRAMES)' + if seconds: + check_all_file_names_are_represented_in_video_log(video_info_df=config.video_info_df, data_paths=data_paths) + x_name = f'VIDEO TIME (S)' + if bouts: + y_name = f'{clf} (BOUT COUNT)' + + clrs = create_color_palette(pallete_name='Set2', increments=len(data_paths), as_rgb_ratio=True) + for file_cnt, file_path in enumerate(data_paths): + _, video_name, _ = get_fn_ext(filepath=file_path) + print(f'Analysing video {video_name} ({file_cnt+1}/{len(data_paths)})...') + df = read_df(file_path=file_path, file_type=config.file_type) + check_valid_dataframe(df=df, source=f'{plot_clf_cumcount.__name__} {file_path}', required_fields=[clf]) + if not bouts and not seconds: + clf_sum = list(df[clf].cumsum().ffill()) + time = list(df.index) + elif not bouts and seconds: + _, _, fps = read_video_info(vid_info_df=config.video_info_df, video_name=video_name) + clf_sum = np.round(np.array(df[clf].cumsum().ffill() / fps), 2) + time = list(df.index / fps) + else: + bout_starts = detect_bouts(data_df=df, target_lst=[clf], fps=1)['Start_frame'].values + bouts_arr = np.full(len(df), fill_value=np.nan, dtype=np.float32) + bouts_arr[0] = 0 + for bout_cnt in range(bout_starts.shape[0]): bouts_arr[bout_starts[bout_cnt]] = bout_cnt+1 + clf_sum = pd.DataFrame(bouts_arr, columns=[clf]).ffill().values.reshape(-1) + if seconds: + _, _, fps = read_video_info(vid_info_df=config.video_info_df, video_name=video_name) + time = list(df.index / fps) + else: + time = list(df.index) + video_results = pd.DataFrame(data=clf_sum, columns=[y_name]) + video_results['VIDEO'] = video_name + video_results[x_name] = time + sns.lineplot(data=video_results, x=x_name, y=y_name, hue="VIDEO", palette=[clrs[file_cnt]]) + + plt.savefig(save_path) + + config.timer.stop_timer() + stdout_success(msg=f"Graph saved at {save_path}", elapsed_time=config.timer.elapsed_time_str) + # + # + # + # + # + + + + + + + + + +plot_clf_cumcount(config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini", + clf='CIRCLING', + data_dir=r'D:\troubleshooting\mitra\project_folder\logs\test', + seconds=True, + bouts=True) \ No newline at end of file diff --git a/simba/sandbox/cuml_kmeans.py b/simba/sandbox/cuml_kmeans.py new file mode 100644 index 000000000..e9f824a32 --- /dev/null +++ b/simba/sandbox/cuml_kmeans.py @@ -0,0 +1,44 @@ +import numpy as np +from typing import Optional, Tuple + + +from simba.utils.read_write import read_img_batch_from_video_gpu +from simba.mixins.image_mixin import ImageMixin +try: + from cuml.cluster import KMeans +except: + from sklearn.cluster import KMeans + +from simba.utils.checks import check_int, check_valid_array +from simba.utils.enums import Formats + + +def kmeans_cuml(data: np.ndarray, + k: int = 2, + max_iter: int = 300, + output_type: Optional[str] = None, + sample_n: Optional[int] = None) -> Tuple[np.ndarray, np.ndarray]: + + """CRAP, SLOWER THAN SCIKIT""" + + check_valid_array(data=data, source=f'{kmeans_cuml.__name__} data', accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_int(name=f'{kmeans_cuml.__name__} k', value=k, min_value=1) + check_int(name=f'{kmeans_cuml.__name__} max_iter', value=max_iter, min_value=1) + kmeans = KMeans(n_clusters=k, max_iter=max_iter) + if sample_n is not None: + check_int(name=f'{kmeans_cuml.__name__} sample', value=sample_n, min_value=1) + sample = min(sample_n, data.shape[0]) + data_idx = np.random.choice(np.arange(data.shape[0]), sample) + mdl = kmeans.fit(data[data_idx]) + else: + mdl = kmeans.fit(data) + + return (mdl.cluster_centers_, mdl.predict(data)) + +import time +for i in [1000000, 2000000]: + data = np.random.randint(0, 500, (i, 400)).astype(np.int32) + start = time.perf_counter() + results = kmeans_cuml(data=data) + elapsed = time.perf_counter() - start + print(i, elapsed) \ No newline at end of file diff --git a/simba/sandbox/czenakowski.py b/simba/sandbox/czenakowski.py new file mode 100644 index 000000000..1b154ef54 --- /dev/null +++ b/simba/sandbox/czenakowski.py @@ -0,0 +1,110 @@ +import numpy as np +from simba.utils.checks import check_valid_array +from simba.utils.enums import Formats +from numba import jit, njit +import time +from typing import Optional + +@jit(nopython=True) +def czebyshev_distance(sample_1: np.ndarray, sample_2: np.ndarray) -> float: + """ + Calculate the Czebyshev distance between two N-dimensional samples. + + The Czebyshev distance is defined as the maximum absolute difference + between the corresponding elements of the two arrays. + + .. math:: + D_\infty(p, q) = \max_i \left| p_i - q_i \right| + + :param np.ndarray sample_1: The first sample, an N-dimensional NumPy array. + :param np.ndarray sample_2: The second sample, an N-dimensional NumPy array. + :return float: The Czebyshev distance between the two samples. + + :example: + >>> sample_1 = np.random.randint(0, 10, (10000,100)) + >>> sample_2 = np.random.randint(0, 10, (10000,100)) + >>> czebyshev_distance(sample_1=sample_1, sample_2=sample_2) + """ + + c = 0.0 + for idx in np.ndindex(sample_1.shape): + c = max((c, np.abs(sample_1[idx] - sample_2[idx]))) + return c + + +@njit(["(float32[:, :], float64[:], int64)",]) +def sliding_czebyshev_distance(x: np.ndarray, window_sizes: np.ndarray, sample_rate: float) -> np.ndarray: + """ + Calculate the sliding Chebyshev distance for a given signal with different window sizes. + + This function computes the sliding Chebyshev distance for a signal `x` using + different window sizes specified by `window_sizes`. The Chebyshev distance measures + the maximum absolute difference between the corresponding elements of two signals. + + .. note:: + Normalize array x before passing it to ensure accurate results. + + .. math:: + D_\infty(p, q) = \max_i \left| p_i - q_i \right| + + :param np.ndarray x: Input signal, a 2D array with shape (n_samples, n_features). + :param np.ndarray window_sizes: Array containing window sizes for sliding computation. + :param float sample_rate: Sampling rate of the signal. + :return np.ndarray: 2D array of Chebyshev distances for each window size and position. + """ + + result = np.full((x.shape[0], window_sizes.shape[0]), 0.0) + for i in range(window_sizes.shape[0]): + window_size = int(window_sizes[i] * sample_rate) + for l, r in zip(range(0, x.shape[0] + 1), range(window_size, x.shape[0] + 1)): + sample, c = x[l:r, :], 0.0 + for j in range(sample.shape[1]): + c = max(c, (np.abs(np.min(sample[:, j]) - np.max(sample[:, j])))) + result[r-1, i] = c + return result + + + +@njit(["(int64[:], int64[:], float64[:])", "(int64[:], int64[:], types.misc.Omitted(None))", + "(int64[:, :], int64[:, :], float64[:])", "(int64[:, :], int64[:, :], types.misc.Omitted(None))"]) +def sokal_michener(x: np.ndarray, y: np.ndarray, w: Optional[np.ndarray] = None) -> float: + """ + Jitted compute of the Sokal-Michener dissimilarity between two binary vectors or matrices. + + Higher values indicate more dissimilar vectors or matrices, while lower values indicate more similar vectors or matrices. + + The Sokal-Michener dissimilarity is a measure of dissimilarity between two sets + based on the presence or absence of attributes, commonly used in ecological and + biological studies. This implementation supports weighted dissimilarity. + + :param np.ndarray x: First binary vector or matrix. + :param np.ndarray y: Second binary vector or matrix. + :param Optional[np.ndarray] w: Optional weight vector. If None, all weights are considered as 1. + :return float: Sokal-Michener dissimilarity between `x` and `y`. + + :example: + >>> x = np.random.randint(0, 2, (200,)) + >>> y = np.random.randint(0, 2, (200,)) + >>> sokal_michener = sokal_michener(x=x, y=y) + """ + if w is None: + w = np.ones(x.shape[0]).astype(np.float64) + unequal_cnt = 0.0 + for i in np.ndindex(x.shape): + x_i, y_i = x[i], y[i] + if x_i != y_i: + unequal_cnt += 1 * w[i[0]] + return (2.0 * unequal_cnt) / (x.size + unequal_cnt) + +# +# x = np.random.randint(0, 2, (200,)) +# y = np.random.randint(0, 2, (200,)) +# sokal_michener(x=x, y=y) + + + +#sliding_czebyshev_distance(x=sample_1, window_sizes=np.array([1.0, 2.0]), sample_rate=10.0) +#print(time.time() - start) + +# check_valid_array(data=sample_1, source=f'{czebyshev_distance.__name__} sample_1', accepted_dtypes=Formats.NUMERIC_DTYPES.value) +# check_valid_array(data=sample_2, source=f'{czebyshev_distance.__name__} sample_2', accepted_ndims=(sample_1.ndim,), accepted_shapes=(sample_1.shape,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) \ No newline at end of file diff --git a/simba/sandbox/data.npy b/simba/sandbox/data.npy new file mode 100644 index 0000000000000000000000000000000000000000..211b274999cd2bebb0437f50059ec292b15ad8af GIT binary patch literal 3328 zcmbW2`9GC;8^?*VHPSL;sc9sIVPqLbGF_(#DJ4ZJOR~m^NKzr0GDV1rp&Z7(9ce+v zayck1gX37^C`*J6&dI)%r|0$D_xS^!`?v4?`o8Y_`@OEu`+eOxU}bJ`Oi;jI;Ifjl z%Q+`sC4G4%16MsIHF+ghZ(l!OM=u9&UuT#9zBhKHo^$#3{+zqxS(k5XWi1U2HF+%+ zHTfX<|ML;|&^0YC{f7-Fy{30v-I#&p0Tza5O4#5a&l=Et|2OW}>9>fWv%xj9gzImd zgc)Bsk{grQAXCa39_)(4oyGg9x?ya1b>Hrz#r4}LxW-{Q$DIvvhqnt~j=hFK6Il{= zd)RQ}n98Etv$J?D>+#lt^=t^q(i0!BwT0l#dk=_nx`1NRrFlv-7;LpwXsN|ruy|2a zV-ti!AjM^K!R0Qvktn#c!q2T$>jfu2W(EDu0EEMkq`?`1Q4#cH{8Mr^$^z<#v zc3n~ycFKeab$4Z%<8MK6ON)2!vTXR_`tBv}Vh2?D95&eQmjkL93OiRObwH}gSaVcS zE+|!aR{pEs0q$QP%*8#=gDqaoFXG$UA;q?S_x7}W&|F+dZ8_Txv6Vi0G3Io5mO7`$ z5o?F4hS1&PHFTgdHIhtI+hFBNLA$rh3m}uRn0`dR4LE<6-IG{b0M44x0hd{=5TJA1 zGT;*()G{wADtoqqWt@|}X*3<=^p2@M7j6Z;L)#dgU-MxqT2Q7uxdkW^yc4zh`S56e znbdiM7LW?2STq{u!M>?JPwTE`sQOaAZC)-H3>B!xwSmpR)lHl7XJiBOLBR(3P0jEj z!|6?qUKU8U4!OL1$pY?BaDq~32KeoHuO;Khg4k0Eu1}NGpjh!sqw8iC?4)Y{vFpy? z(4lhFQ>48KZnRpiZ?B*MZ_8+=0=dPjwD#S+1(EfoFsH-S;+hu^YS-2=sVCkv)z z6Znakra*Qy423%^Cblu*cJ)@>fYNJVMBVpPGMxzlbA|!I<-X8y%lMAN1tx?%h-`Q= zYKd>|2E#v&g;+#sQ%(Sqm1j%}tdoStSTw97{JwL~fAM`x+50drXDK2;FOG+7% z{ixaeMs%jW9LatKDc%-R7^*Kfr)Rw+yAepzP))MD`J;}gnB98)>W=eQT3?%a!EManJrh$QEo@+PU zeYR$Tflfr;qiSP{Hi}nc7Qut6tVfv8x@shO(Y~}}wpgkfNuGK}?)w-BR3ph-Yo_#g zdLIKx9&Mruc#_W;Nb>sH|CB>!2m?u;WnWEOr5i9%n&3TUtrYhFrwS(t9|Taii?h*H zNcuv3Og2wYxe7_2NU>b@XP-t4Abg|K^!=fRGnGjC=+}X>f*0*7ko1*?^j&Gj_HtZB z_^jVlb#Jp!8IryeX!Tz`US5i%4{1R&ZZ59Rko0AX!aA3D!4ket1rGhxHW^*S_w9=g z?dth8Px(F`HT3S)y!DvxYcG+XM1T2G$oKg#AwsKGH5TxFA1)RnQJ0X%p9hZRax|u0 zHj?wgnV1&|YUaw+<2)#cwU#i^{fN%2UeRAHc`(+CK1J#SSf9iEIbb^v9`mfu>`>`P zwN1mz-K2OB?xyZJrqqi+Q*FjKw{xNTLGwC1_6}nYtr#1QPx%GffOC?{FBKl^vpRuVWRg5ZhvTg8KhW*A7dXT7EwGH2UE zP0t|^DBg6i>wPHrQYGZ9`UWATJ9p*ZUvof5IGdGcGys$f=a06u7DM!P{oOrx`oZ+w z)zVX~2;JKH5rs{?AhlIK`uKbe$UR@U^mgez+;y>+EWTFALuByvh+3J zRQzWzj}9={6Xz!HSP!}H(~a#yTA)@VdZTVcBm7L&@2Lo50&9)cvYUxac-y%r;IPmu zSm`zr6|CL_1Jrx_BSxwqS6fu0Kcoq|!$q5B;vPZh0p7G@OcQ8`rhJWLo`x7Jdvzme}eqpbtYTeHX3dk|Ae6 zZfjaaKMGXlI#*g8y&c1Mr)tt(kG&j+2vvGAozHZM|4(i;X+Xt)kAgf?|Giw`zgx%S-=|@?Ci~d( z&v=`-Z|uZ;rzrO`9wq7_diak6D$Nt9M%3#xr@oI#pTr5Gp3LRK+8eJ-p&C(dN~C@8 zcH?QxCiQwl62FZS=iffzQwVuVDM89d1U}iHqi*ZCA=-%yf zC;jJ8k;sdHxrFp4-W-y7l0Ts>D`z*4{}6f8KWF=dT{@2zL>`YN3z>+CET909SKaTk zj4g~7(3Z$^gGb$;zCH`6A@aYxhX)E@+ZDBd3j_}b2G`t7PFz3^!OLICHtkp97LZBs z?K#vYe6o3@-XcA17X1j{Bo3FYHCQo= zuLvK>wM{Zs*v{~Mb+q+$cQ1cNQ^L1L3cNZmMDqAPHgB3R*H7T`eZ6RHc&h-$`95!` l2+m738s+;wTl;8^rQtWOh float: + """ + Calculate the Davis-Bouldin index for evaluating clustering performance. + + Davis-Bouldin index measures the clustering quality based on the within-cluster + similarity and between-cluster dissimilarity. Lower values indicate better clustering. + + .. note:: + Modified from `scikit-learn `_ + + :param np.ndarray x: 2D array representing the data points. Shape (n_samples, n_features/n_dimension). + :param np.ndarray y: 2D array representing cluster labels for each data point. Shape (n_samples,). + :return float: Davis-Bouldin score. + + :example: + >>> x = np.random.randint(0, 100, (100, 2)) + >>> y = np.random.randint(0, 3, (100,)) + >>> Statistics.davis_bouldin(x=x, y=y) + """ + + check_valid_array(data=x, source=Statistics.davis_bouldin.__name__, accepted_ndims=(2,), accepted_dtypes=(int, float)) + check_valid_array(data=y, source=Statistics.davis_bouldin.__name__, accepted_ndims=(1,), accepted_shapes=[(x.shape[0],)], accepted_dtypes=(int, float)) + n_labels = np.unique(y).shape[0] + intra_dists = np.full((n_labels), 0.0) + centroids = np.full((n_labels, x.shape[1]), 0.0) + for k in range(n_labels): + cluster_k = x[np.argwhere(y == k)].reshape(-1, 2) + cluster_mean = np.full((x.shape[1]), np.nan) + for i in range(cluster_mean.shape[0]): + cluster_mean[i] = np.mean(cluster_k[:, i].flatten()) + centroids[k] = cluster_mean + intra_dists[k] = np.average(FeatureExtractionMixin.framewise_euclidean_distance(location_1=cluster_k, + location_2=np.full(cluster_k.shape, cluster_mean), + px_per_mm=1)) + centroid_distances = FeatureExtractionMixin.cdist(array_1=centroids.astype(np.float32), array_2=centroids.astype(np.float32)) + if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0): + return 0.0 + centroid_distances[centroid_distances == 0] = np.inf + combined_intra_dists = intra_dists[:, None] + intra_dists + return np.mean(np.max(combined_intra_dists / centroid_distances, axis=1)) + + + + + + +x = np.random.random((1000000, 2)) +y = np.random.randint(0, 25, (1000000,)) +start = time.time() +z = davis_bouldin(x=x, y=y) +print(time.time() - start) + +start = time.time() +p = davis_bouldin(x, y) +print(time.time() - start) +print(z, p) \ No newline at end of file diff --git a/simba/sandbox/detect_scene_changes.py b/simba/sandbox/detect_scene_changes.py new file mode 100644 index 000000000..531f71fb5 --- /dev/null +++ b/simba/sandbox/detect_scene_changes.py @@ -0,0 +1,13 @@ +from typing import Union, Optional +import os +import subprocess + + +def detect_scene_changes(video_path: Union[str, os.PathLike], threshold: Optional[float] = 0.4): + cmd = f"ffmpeg -i {video_path} -vf select='gt(scene\\,{threshold})',showinfo -vsync vfr -f null -" + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + output, _ = process.communicate() + output.decode("utf-8") + print(output) + +detect_scene_changes(video_path='/Users/simon/Desktop/video_test/test/concatenated.mp4', threshold=0.01) \ No newline at end of file diff --git a/simba/sandbox/directed_hausdorff.py b/simba/sandbox/directed_hausdorff.py new file mode 100644 index 000000000..421dfd966 --- /dev/null +++ b/simba/sandbox/directed_hausdorff.py @@ -0,0 +1,66 @@ +import numpy as np + +def directed_hausdorff_nb(ar1, ar2): + N1 = ar1.shape[0] + N2 = ar2.shape[0] + data_dims = ar1.shape[1] + + # Shuffling for very small arrays disbabled + # Enable it for larger arrays + #resort1 = np.arange(N1) + #resort2 = np.arange(N2) + #np.random.shuffle(resort1) + #np.random.shuffle(resort2) + + #ar1 = ar1[resort1] + #ar2 = ar2[resort2] + + cmax = 0 + for i in range(N1): + no_break_occurred = True + cmin = np.inf + for j in range(N2): + # faster performance with square of distance + # avoid sqrt until very end + # Simplificaten (loop unrolling) for (n,2) arrays + d = (ar1[i, 0] - ar2[j, 0])**2+(ar1[i, 1] - ar2[j, 1])**2 + if d < cmax: # break out of `for j` loop + no_break_occurred = False + break + + if d < cmin: # always true on first iteration of for-j loop + cmin = d + + # always true on first iteration of for-j loop, after that only + # if d >= cmax + if cmin != np.inf and cmin > cmax and no_break_occurred == True: + cmax = cmin + + return np.sqrt(cmax) + + + +x1 = np.random.randint(0, 100, (100, 2)) +y2 = np.random.randint(0, 100, (100, 2)) + +x = np.array([[0, 0], [1, 1], [0, 3]]) +y = np.array([[5, 1], [100, 2], [5, 3]]) + + +print(directed_hausdorff_nb(ar1=x, ar2=y)) + +from scipy.spatial.distance import directed_hausdorff +from shapely.geometry import LineString + +# Define two LineString geometries +line1 = LineString([(0, 0), (1, 1), (2, 2)]) +line2 = LineString([(0, 1), (1, 2), (2, 3)]) + +# Convert LineStrings to arrays of coordinates +coords1 = [(x, y) for x, y in line1.coords] +coords2 = [(x, y) for x, y in line2.coords] + +# Compute the directed Hausdorff distance +distance = directed_hausdorff(coords1, coords2) + +print("Hausdorff Distance:", distance) diff --git a/simba/sandbox/direction_two_bps.py b/simba/sandbox/direction_two_bps.py new file mode 100644 index 000000000..4dee71a60 --- /dev/null +++ b/simba/sandbox/direction_two_bps.py @@ -0,0 +1,45 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import math + +import numpy as np +from numba import cuda, int32 + +THREADS_PER_BLOCK = 1024 + +@cuda.jit() +def _cuda_direction_from_two_bps(x, y, results): + i = cuda.grid(1) + if i > x.shape[0]: + return + else: + a = math.atan2(x[i][0] - y[i][0], y[i][1] - x[i][1]) * (180 / math.pi) + a = int32(a + 360 if a < 0 else a) + results[i] = a + + +def direction_from_two_bps(x: np.ndarray, y: np.ndarray) -> np.ndarray: + """ + Compute the directionality in degrees from two body-parts. E.g., ``nape`` and ``nose``, + or ``swim_bladder`` and ``tail`` with GPU acceleration. + + .. image:: _static/img/direction_from_two_bps_cuda.png + :width: 1200 + :align: center + + + :parameter np.ndarray x: Size len(frames) x 2 representing x and y coordinates for first body-part. + :parameter np.ndarray y: Size len(frames) x 2 representing x and y coordinates for second body-part. + :return np.ndarray: Frame-wise directionality in degrees. + + """ + x = np.ascontiguousarray(x).astype(np.int32) + y = np.ascontiguousarray(y).astype(np.int32) + x_dev = cuda.to_device(x) + y_dev = cuda.to_device(y) + results = cuda.device_array((x.shape[0]), dtype=np.int32) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _cuda_direction_from_two_bps[bpg, THREADS_PER_BLOCK](x_dev, y_dev, results) + results = results.copy_to_host() + return results diff --git a/simba/sandbox/distance_velocity.py b/simba/sandbox/distance_velocity.py new file mode 100644 index 000000000..196c67a1b --- /dev/null +++ b/simba/sandbox/distance_velocity.py @@ -0,0 +1,33 @@ +import numpy as np +from typing import Optional, Tuple +from simba.utils.checks import check_float, check_valid_array + +def distance_and_velocity(x: np.array, + fps: float, pixels_per_mm: float, + centimeters: Optional[bool] = True) -> Tuple[float, float]: + """ + Calculate total movement and mean velocity from a sequence of position data. + + :param x: Array containing movement data. For example, created by ``simba.mixins.FeatureExtractionMixin.framewise_euclidean_distance``. + :param fps: Frames per second of the data. + :param pixels_per_mm: Conversion factor from pixels to millimeters. + :param Optional[bool] centimeters: If True, results are returned in centimeters. Defaults to True. + :return Tuple[float, float]: A tuple containing total movement and mean velocity. + """ + + check_valid_array(data=x, source=distance_and_velocity.__name__, accepted_ndims=(1,), accepted_dtypes=(np.float32, np.float64, np.int32, np.int64, int, float), min_axis_0=1) + check_float(name=f'{distance_and_velocity.__name__} fps', value=fps, min_value=1) + check_float(name=f'{distance_and_velocity.__name__} pixels_per_mm', value=pixels_per_mm, min_value=10e-6) + movement = (np.sum(x) / pixels_per_mm) + v = [] + for i in range(0, x.shape[0], int(fps)): + w = x[i: (i+fps)] + v.append((np.sum(w) / pixels_per_mm) * (1 / (w.shape[0] / int(fps)))) + if centimeters: + v = [vi/ 10 for vi in v] + movement = movement / 10 + return movement, np.mean(v) + + +# x = np.random.randint(0, 10, (20,)) +# distance_and_velocity(x=x, fps=10, pixels_per_mm=99, centimeters=True) \ No newline at end of file diff --git a/simba/sandbox/distances.py b/simba/sandbox/distances.py new file mode 100644 index 000000000..da76d07fd --- /dev/null +++ b/simba/sandbox/distances.py @@ -0,0 +1,192 @@ +import numpy as np +from scipy import stats +from typing import Optional + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + +from simba.utils.checks import check_valid_array, check_str +from simba.utils.data import bucket_data +from simba.utils.enums import Options +from simba.mixins.statistics_mixin import Statistics + + +class DistanceStatistics(object): + """ + Computes distances between probability distributions. Useful for (i) measure drift in datasets, and (ii) featurization of distribution shifts across. + + :examples: + >>> x = np.array([1, 5, 10, 20, 50]).astype(np.float32) + >>> y = np.array([1, 5, 10, 100, 110]).astype(np.float32) + >>> distance_statistics = DistanceStatistics() + >>> wave_hedges = distance_statistics.wave_hedges_distance(x=x, y=y, normalize=True) + >>> gower = distance_statistics.gower_distance(x=x, y=y, normalize=True, bucket_method='auto') + >>> wasserstein = distance_statistics.wasserstein_distance(x=x, y=y, normalize=True, bucket_method='auto') + >>> jensen_shannon_divergence = distance_statistics.jensen_shannon_divergence(x=x, y=y, normalize=True, bucket_method='auto') + >>> kullback_leibler_divergence = distance_statistics.kullback_leibler_divergence(x=x, y=y, normalize=True, bucket_method='auto', fill_value=20) + >>> total_variation_distance = distance_statistics.total_variation_distance(x=x, y=y, normalize=False) + >>> population_stability_index = distance_statistics.population_stability_index(x=x, y=y, normalize=True, bucket_method='auto', fill_value=1) + >>> normalized_google_distance = distance_statistics.normalized_google_distance(x=x, y=y, normalize=False, bucket_method='auto', fill_value=1) + >>> jeffreys_divergence = distance_statistics.jeffreys_divergence(x=x, y=y, normalize=True, bucket_method='auto', fill_value=1) + + + References + ---------- + .. [1] `statistical-distances `_. + + """ + def __init__(self): + pass + + @staticmethod + def distance_discretizer(func): + def wrapper(self, x: np.ndarray, y: np.ndarray, bucket_method: Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"] = 'auto', normalize: Optional[bool] = True, fill_value: Optional[int] = 1): + check_valid_array(data=x, source=func.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, np.int8, np.float32, np.float64, int, float)) + check_valid_array(data=y, source=func.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, np.int8, np.float32, np.float64, int, float)) + check_str(name=f"{func.__name__} method", value=bucket_method, options=Options.BUCKET_METHODS.value) + bin_width, bin_count = bucket_data(data=x, method=bucket_method) + x_h = Statistics._hist_1d(data=x, bin_count=bin_count, range=np.array([0, int(bin_width * bin_count)]), normalize=normalize) + y_h = Statistics._hist_1d(data=y, bin_count=bin_count, range=np.array([0, int(bin_width * bin_count)]), normalize=normalize) + return func(self, x=x, y=y, bucket_method=bucket_method, bin_width=bin_width, bin_count=bin_count, normalize=normalize, x_h=x_h, y_h=y_h, fill_value=fill_value) + + return wrapper + + @distance_discretizer.__get__('') + def wave_hedges_distance(self, x: np.ndarray, y: np.ndarray, bucket_method: Optional[Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"]] = 'auto', normalize: Optional[bool] = True, fill_value: Optional[int] = None, bin_width=None, bin_count=None, x_h=None, y_h=None): + """ + Compute Wave Hedges distance between two distributions. + """ + + return 0.5 * np.sum(np.abs(x_h - y_h)) + + @distance_discretizer.__get__('') + def gower_distance(self, x: np.ndarray, y: np.ndarray, bucket_method: Optional[Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"]] = 'auto', normalize: Optional[bool] = True, fill_value: Optional[int] = None, bin_width=None, bin_count=None, x_h=None, y_h=None): + """ + Compute Gower distance between two probability distributions. + """ + return np.sum(np.abs(x_h - y_h)) / x_h.size + + @distance_discretizer.__get__('') + def wasserstein_distance(self, x: np.ndarray, y: np.ndarray, bucket_method: Optional[Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"]] = 'auto', normalize: Optional[bool] = True, fill_value: Optional[int] = None, bin_width=None, bin_count=None, x_h=None, y_h=None): + """ + Compute Wasserstein distance between two distributions. + + .. note:: + Uses ``stats.wasserstein_distance``. I have tried to move ``stats.wasserstein_distance`` to jitted method extensively, + but this doesn't give significant runtime improvement. Rate-limiter appears to be the _hist_1d. + + :parameter ndarray sample_1: First 1d array representing feature values. + :parameter ndarray sample_2: Second 1d array representing feature values. + :parameter Literal bucket_method: Estimator determining optimal bucket count and bucket width. Default: The maximum of the Sturges and Freedman-Diaconis estimators + :returns float: Wasserstein distance between ``sample_1`` and ``sample_2`` + """ + return stats.wasserstein_distance(u_values=x_h, v_values=y_h) + + @distance_discretizer.__get__('') + def jensen_shannon_divergence(self, x: np.ndarray, y: np.ndarray, bucket_method: Optional[Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"]] = 'auto', normalize: Optional[bool] = True, fill_value: Optional[int] = None, bin_width=None, bin_count=None, x_h=None, y_h=None): + """ + Compute Jensen-Shannon divergence between two distributions. Useful for (i) measure drift in datasets, and (ii) featurization of distribution shifts across + sequential time-bins. + + .. note:: + JSD = 0: Indicates that the two distributions are identical. + 0 < JSD < 1: Indicates a degree of dissimilarity between the distributions, with values closer to 1 indicating greater dissimilarity. + JSD = 1: Indicates that the two distributions are maximally dissimilar. + + :parameter ndarray sample_1: First 1d array representing feature values. + :parameter ndarray sample_2: Second 1d array representing feature values. + :parameter Literal bucket_method: Estimator determining optimal bucket count and bucket width. Default: The maximum of the Sturges and Freedman-Diaconis estimators. + :returns float: Jensen-Shannon divergence between ``sample_1`` and ``sample_2`` + """ + mean_hist = np.mean([x_h, y_h], axis=0) + kl_sample_1, kl_sample_2 = stats.entropy(pk=x_h, qk=mean_hist), stats.entropy(pk=y_h, qk=mean_hist) + return (kl_sample_1 + kl_sample_2) / 2 + + @distance_discretizer.__get__('') + def kullback_leibler_divergence(self, x: np.ndarray, y: np.ndarray, bucket_method: Optional[Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"]] = 'auto', normalize: Optional[bool] = True, fill_value: Optional[int] = None, bin_width=None, bin_count=None, x_h=None, y_h=None): + """ + Compute Kullback-Leibler divergence between two distributions. + + .. note:: + Empty bins (0 observations in bin) in is replaced with passed ``fill_value``. + + Its range is from 0 to positive infinity. When the KL divergence is zero, it indicates that the two distributions are identical. As the KL divergence increases, it signifies an increasing difference between the distributions. + + :parameter ndarray sample_1: First 1d array representing feature values. + :parameter ndarray sample_2: Second 1d array representing feature values. + :parameter Optional[int] fill_value: Optional pseudo-value to use to fill empty buckets in ``sample_2`` histogram + :parameter Literal bucket_method: Estimator determining optimal bucket count and bucket width. Default: The maximum of the Sturges and Freedman-Diaconis estimators + :returns float: Kullback-Leibler divergence between ``sample_1`` and ``sample_2`` + """ + x_h[x_h == 0] = fill_value + y_h[y_h == 0] = fill_value + return stats.entropy(pk=x_h, qk=y_h) + + @distance_discretizer.__get__('') + def population_stability_index(self, x: np.ndarray, y: np.ndarray, bucket_method: Optional[Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"]] = 'auto', normalize: Optional[bool] = True, fill_value: Optional[int] = None, bin_width=None, bin_count=None, x_h=None, y_h=None): + """ + Compute Population Stability Index (PSI) comparing two distributions. + + .. note:: + Empty bins (0 observations in bin) in is replaced with ``fill_value``. The PSI value ranges from 0 to positive infinity. + + :parameter ndarray sample_1: First 1d array representing feature values. + :parameter ndarray sample_2: Second 1d array representing feature values. + :parameter Optional[int] fill_value: Empty bins (0 observations in bin) in is replaced with ``fill_value``. Default 1. + :parameter Literal bucket_method: Estimator determining optimal bucket count and bucket width. Default: The maximum of the Sturges and Freedman-Diaconis estimators + :returns float: PSI distance between ``sample_1`` and ``sample_2`` + + :example: + >>> sample_1, sample_2 = np.random.randint(0, 100, (100,)), np.random.randint(0, 10, (100,)) + >>> Statistics().population_stability_index(sample_1=sample_1, sample_2=sample_2, fill_value=1, bucket_method='auto') + >>> 3.9657026867553817 + """ + + x_h[x_h == 0] = fill_value + y_h[y_h == 0] = fill_value + x_h, y_h = x_h / np.sum(x_h), y_h / np.sum(y_h) + samples_diff = y_h - x_h + log = np.log(y_h / x_h) + return np.sum(samples_diff * log) + + @distance_discretizer.__get__('') + def total_variation_distance(self, x: np.ndarray, y: np.ndarray, bucket_method: Optional[Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"]] = 'auto', normalize: Optional[bool] = True, fill_value: Optional[int] = None, bin_width=None, bin_count=None, x_h=None, y_h=None): + """ + Calculate the total variation distance between two probability distributions. + + :param np.ndarray x: A 1-D array representing the first sample. + :param np.ndarray y: A 1-D array representing the second sample. + :param Optional[str] bucket_method: The method used to determine the number of bins for histogram computation. Supported methods are 'fd' (Freedman-Diaconis), 'doane', 'auto', 'scott', 'stone', 'rice', 'sturges', and 'sqrt'. Defaults to 'auto'. + :return float: The total variation distance between the two distributions. + + .. math:: + + TV(P, Q) = 0.5 \sum_i |P_i - Q_i| + + where :math:`P_i` and :math:`Q_i` are the probabilities assigned by the distributions :math:`P` and :math:`Q` + to the same event :math:`i`, respectively. + + :example: + >>> DistanceStatistics.total_variation_distance(x=np.array([1, 5, 10, 20, 50]), y=np.array([1, 5, 10, 100, 110])) + >>> 0.3999999761581421 + """ + return 0.5 * np.sum(np.abs(x_h - y_h)) + + @distance_discretizer.__get__('') + def normalized_google_distance(self, x: np.ndarray, y: np.ndarray, bucket_method: Optional[Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"]] = 'auto', normalize: Optional[bool] = True, fill_value: Optional[int] = None, bin_width=None, bin_count=None, x_h=None, y_h=None): + """ + Compute the normalized google distance between two probability distributions represented by histograms. + """ + x, y = float(np.sum(x_h)), float(np.sum(y_h)) + return (max([x, y]) - float(np.sum(np.minimum(x_h, y_h)))) / ((x + y) - min([x, y])) + + @distance_discretizer.__get__('') + def jeffreys_divergence(self, x: np.ndarray, y: np.ndarray, bucket_method: Optional[Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"]] = 'auto', normalize: Optional[bool] = True, fill_value: Optional[int] = None, bin_width=None, bin_count=None, x_h=None, y_h=None): + """ + Compute the Jeffreys divergence between two probability distributions represented by histograms. + """ + x_h[x_h == 0] = fill_value + y_h[y_h == 0] = fill_value + return np.sum((x_h - y_h) * np.log(x_h / y_h)) \ No newline at end of file diff --git a/simba/sandbox/downsample_multiple_videos_popup.py b/simba/sandbox/downsample_multiple_videos_popup.py new file mode 100644 index 000000000..831e48ccc --- /dev/null +++ b/simba/sandbox/downsample_multiple_videos_popup.py @@ -0,0 +1,81 @@ +from tkinter import * +import threading +import os +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.ui.tkinter_functions import CreateLabelFrameWithIcon, FolderSelect, Entry_Box, DropDownMenu +from simba.utils.enums import Keys, Links, Options, Formats +from simba.utils.checks import check_if_dir_exists, check_int, check_ffmpeg_available, check_nvidea_gpu_available +from simba.utils.read_write import find_all_videos_in_directory +from simba.video_processors.video_processing import downsample_video, resize_videos_by_width, resize_videos_by_height +from simba.utils.errors import InvalidInputError + +class DownsampleMultipleVideosPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self,title="DOWN-SAMPLE MULTIPLE VIDEO RESOLUTION") + choose_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SELECT VIDEO", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.DOWNSAMPLE.value) + + self.video_dir_selected = FolderSelect(choose_video_frm, "VIDEO DIRECTORY:",title="Select Folder with videos", lblwidth=20) + choose_video_frm.grid(row=0, column=0, sticky=NW) + self.video_dir_selected.grid(row=0, column=0, sticky=NW) + + gpu_frm = LabelFrame(self.main_frm, text="GPU (REDUCED RUNTIMES)", font=Formats.LABELFRAME_HEADER_FORMAT.value, fg="black", padx=5, pady=5) + self.use_gpu_var = BooleanVar(value=False) + use_gpu_cb = Checkbutton(gpu_frm, text="Use GPU (reduced runtime)", variable=self.use_gpu_var) + choose_video_frm.grid(row=1, column=0, sticky=NW) + use_gpu_cb.grid(row=0, column=0, sticky=NW) + + custom_size_frm = LabelFrame(self.main_frm, text="CUSTOM RESOLUTION",font=Formats.LABELFRAME_HEADER_FORMAT.value,fg="black",padx=5,pady=5) + self.entry_width = Entry_Box(custom_size_frm, "Width", "10", validation="numeric") + self.entry_height = Entry_Box(custom_size_frm, "Height", "10", validation="numeric") + self.custom_downsample_btn = Button(custom_size_frm, text="DOWN-SAMPLE USING CUSTOM RESOLUTION", font=Formats.LABELFRAME_HEADER_FORMAT.value, fg="black", command=lambda: self.downsample_custom()) + + custom_size_frm.grid(row=2, column=0, sticky=NW) + self.entry_width.grid(row=0, column=0, sticky=NW) + self.entry_height.grid(row=1, column=0, sticky=NW) + self.custom_downsample_btn.grid(row=2, column=0, sticky=NW) + + default_size_frm = LabelFrame(self.main_frm, text="DEFAULT RESOLUTION",font=Formats.LABELFRAME_HEADER_FORMAT.value,fg="black",padx=5,pady=5) + self.width_dropdown = DropDownMenu(default_size_frm, "WIDTH:", Options.RESOLUTION_OPTIONS_2.value, labelwidth=20) + self.height_dropdown = DropDownMenu(default_size_frm, "HEIGHT:", Options.RESOLUTION_OPTIONS_2.value, labelwidth=20) + self.width_dropdown.setChoices(640) + self.height_dropdown.setChoices("AUTO") + self.default_downsample_btn = Button(default_size_frm, text="DOWN-SAMPLE USING DEFAULT RESOLUTION", font=Formats.LABELFRAME_HEADER_FORMAT.value, fg="black", command=lambda: self.downsample_default()) + + default_size_frm.grid(row=3, column=0, sticky=NW) + self.width_dropdown.grid(row=0, column=0, sticky=NW) + self.height_dropdown.grid(row=1, column=0, sticky=NW) + self.default_downsample_btn.grid(row=2, column=0, sticky=NW) + self.main_frm.mainloop() + + def _checks(self): + check_ffmpeg_available(raise_error=True) + self.gpu = self.use_gpu_var.get() + if self.gpu: + check_nvidea_gpu_available() + self.video_directory = self.video_dir_selected.folder_path + check_if_dir_exists(in_dir=self.video_directory) + self.video_paths = find_all_videos_in_directory(directory=self.video_directory, raise_error=True, as_dict=True) + + def downsample_custom(self): + self._checks() + width, height = self.entry_width.entry_get, self.entry_height.entry_get + check_int(name=f'{self.__class__.__name__} width', value=width, min_value=1) + check_int(name=f'{self.__class__.__name__} height', value=height, min_value=1) + for file_path in self.video_paths.values(): + threading.Thread(target=downsample_video(file_path=file_path, video_height=height, video_width=width, gpu=self.gpu)).start() + + def downsample_default(self): + self._checks() + width, height = self.width_dropdown.getChoices(), self.height_dropdown.getChoices() + if width == 'AUTO' and height == 'AUTO': + raise InvalidInputError(msg='Both width and height cannot be AUTO', source=self.__class__.__name__) + elif width == 'AUTO': + resize_videos_by_height(video_paths=list(self.video_paths.values()), height=int(height), overwrite=False, save_dir=None, gpu=self.gpu, suffix='downsampled', verbose=True) + elif height == 'AUTO': + resize_videos_by_width(video_paths=list(self.video_paths.values()), width=int(width), overwrite=False, save_dir=None, gpu=self.gpu, suffix='downsampled', verbose=True) + else: + for file_path in self.video_paths.values(): + threading.Thread(target=downsample_video(file_path=file_path, video_height=height, video_width=width, gpu=self.gpu)).start() + + +#DownsampleMultipleVideosPopUp() \ No newline at end of file diff --git a/simba/sandbox/downsample_video_popup.py b/simba/sandbox/downsample_video_popup.py new file mode 100644 index 000000000..bfad44ee4 --- /dev/null +++ b/simba/sandbox/downsample_video_popup.py @@ -0,0 +1,78 @@ +from tkinter import * +import threading +import os +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.ui.tkinter_functions import CreateLabelFrameWithIcon, FileSelect, Entry_Box, DropDownMenu +from simba.utils.enums import Keys, Links, Options, Formats +from simba.utils.checks import check_file_exist_and_readable, check_int, check_ffmpeg_available, check_nvidea_gpu_available +from simba.utils.read_write import get_video_meta_data, get_fn_ext +from simba.video_processors.video_processing import downsample_video, resize_videos_by_width, resize_videos_by_height +from simba.utils.errors import InvalidInputError + +class DownsampleSingleVideoPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self,title="DOWN-SAMPLE SINGLE VIDEO RESOLUTION") + choose_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SELECT VIDEO", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.DOWNSAMPLE.value) + self.video_path_selected = FileSelect(choose_video_frm, "VIDEO PATH: ", title="Select a video file", file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + choose_video_frm.grid(row=0, column=0, sticky=NW) + self.video_path_selected.grid(row=0, column=0, sticky=NW) + + gpu_frm = LabelFrame(self.main_frm, text="GPU (REDUCED RUNTIMES)", font=Formats.LABELFRAME_HEADER_FORMAT.value, fg="black", padx=5, pady=5) + self.use_gpu_var = BooleanVar(value=False) + use_gpu_cb = Checkbutton(gpu_frm, text="Use GPU (reduced runtime)", variable=self.use_gpu_var) + choose_video_frm.grid(row=1, column=0, sticky=NW) + use_gpu_cb.grid(row=0, column=0, sticky=NW) + + custom_size_frm = LabelFrame(self.main_frm, text="CUSTOM RESOLUTION",font=Formats.LABELFRAME_HEADER_FORMAT.value,fg="black",padx=5,pady=5) + self.entry_width = Entry_Box(custom_size_frm, "Width", "10", validation="numeric") + self.entry_height = Entry_Box(custom_size_frm, "Height", "10", validation="numeric") + self.custom_downsample_btn = Button(custom_size_frm, text="DOWN-SAMPLE USING CUSTOM RESOLUTION", font=Formats.LABELFRAME_HEADER_FORMAT.value, fg="black", command=lambda: self.downsample_custom()) + + custom_size_frm.grid(row=2, column=0, sticky=NW) + self.entry_width.grid(row=0, column=0, sticky=NW) + self.entry_height.grid(row=1, column=0, sticky=NW) + self.custom_downsample_btn.grid(row=2, column=0, sticky=NW) + + default_size_frm = LabelFrame(self.main_frm, text="DEFAULT RESOLUTION",font=Formats.LABELFRAME_HEADER_FORMAT.value,fg="black",padx=5,pady=5) + self.width_dropdown = DropDownMenu(default_size_frm, "WIDTH:", Options.RESOLUTION_OPTIONS_2.value, labelwidth=20) + self.height_dropdown = DropDownMenu(default_size_frm, "HEIGHT:", Options.RESOLUTION_OPTIONS_2.value, labelwidth=20) + self.width_dropdown.setChoices(640) + self.height_dropdown.setChoices("AUTO") + self.default_downsample_btn = Button(default_size_frm, text="DOWN-SAMPLE USING DEFAULT RESOLUTION", font=Formats.LABELFRAME_HEADER_FORMAT.value, fg="black", command=lambda: self.downsample_default()) + + default_size_frm.grid(row=3, column=0, sticky=NW) + self.width_dropdown.grid(row=0, column=0, sticky=NW) + self.height_dropdown.grid(row=1, column=0, sticky=NW) + self.default_downsample_btn.grid(row=2, column=0, sticky=NW) + self.main_frm.mainloop() + + def _checks(self): + check_ffmpeg_available(raise_error=True) + self.gpu = self.use_gpu_var.get() + if self.gpu: + check_nvidea_gpu_available() + self.file_path = self.video_path_selected.file_path + check_file_exist_and_readable(file_path=self.file_path) + _ = get_video_meta_data(video_path=self.file_path) + + def downsample_custom(self): + self._checks() + width, height = self.entry_width.entry_get, self.entry_height.entry_get + check_int(name=f'{self.__class__.__name__} width', value=width, min_value=1) + check_int(name=f'{self.__class__.__name__} height', value=height, min_value=1) + threading.Thread(target=downsample_video(file_path=self.file_path, video_height=height, video_width=width, gpu=self.gpu)).start() + + def downsample_default(self): + self._checks() + width, height = self.width_dropdown.getChoices(), self.height_dropdown.getChoices() + if width == 'AUTO' and height == 'AUTO': + raise InvalidInputError(msg='Both width and height cannot be AUTO', source=self.__class__.__name__) + elif width == 'AUTO': + resize_videos_by_height(video_paths=[self.file_path], height=int(height), overwrite=False, save_dir=None, gpu=self.gpu, suffix='downsampled', verbose=True) + elif height == 'AUTO': + resize_videos_by_width(video_paths=[self.file_path], width=int(width), overwrite=False, save_dir=None, gpu=self.gpu, suffix='downsampled', verbose=True) + else: + threading.Thread(target=downsample_video(file_path=self.file_path, video_height=height, video_width=width, gpu=self.gpu)).start() + + +DownsampleSingleVideoPopUp() \ No newline at end of file diff --git a/simba/sandbox/dprime.py b/simba/sandbox/dprime.py new file mode 100644 index 000000000..edba985d5 --- /dev/null +++ b/simba/sandbox/dprime.py @@ -0,0 +1,54 @@ +import math +import numpy as np +from scipy.stats import norm +from typing import Optional + + +def d_prime(x: np.ndarray, y: np.ndarray): + target_idx = np.argwhere(y == 1).flatten() + hit_rate = np.sum(x[np.argwhere(y == 1)]) / target_idx.shape[0] + false_alarm_rate = np.sum(x[np.argwhere(y == 0)]) / target_idx.shape[0] + q = math.sqrt(-2.0 * math.log(hit_rate)) + q2 = math.sqrt(-2.0 * math.log(false_alarm_rate)) + zHR = q - ((q + 0.044715 * math.pow(q, 3)) / (1.0 + 0.196854 * math.pow(q, 2) + 0.056415 * math.pow(q, 3) + 0.004298 * math.pow(q, 4))) + zFA = q - ((q2 + 0.044715 * math.pow(q2, 3)) / (1.0 + 0.196854 * math.pow(q2, 2) + 0.056415 * math.pow(q2, 3) + 0.004298 * math.pow(q2, 4))) + + return zHR - zFA + + + +def d_prime(x: np.ndarray, + y: np.ndarray, + lower_limit: Optional[float] = 0.0001, + upper_limit: Optional[float] = 0.9999) -> float: + """ + Computes d-prime from two Boolean 1d arrays. + + :param np.ndarray x: Boolean 1D array of response values, where 1 represents presence, and 0 representing absence. + :param np.ndarray y: Boolean 1D array of ground truth, where 1 represents presence, and 0 representing absence. + :param Optional[float] lower_limit: Lower limit to bound hit and false alarm rates. Defaults to 0.0001. + :param Optional[float] upper_limit: Upper limit to bound hit and false alarm rates. Defaults to 0.9999. + :return float: The calculated d' (d-prime) value. + + :example: + >>> x = np.random.randint(0, 2, (1000,)) + >>> y = np.random.randint(0, 2, (1000,)) + >>> d_prime(x=x, y=y) + """ + + target_idx = np.argwhere(y == 1).flatten() + hit_rate = np.sum(x[np.argwhere(y == 1)]) / target_idx.shape[0] + false_alarm_rate = np.sum(x[np.argwhere(y == 0)]) / target_idx.shape[0] + if hit_rate < lower_limit: hit_rate = lower_limit + elif hit_rate > upper_limit: hit_rate = upper_limit + if false_alarm_rate < lower_limit: false_alarm_rate = lower_limit + elif false_alarm_rate > upper_limit: false_alarm_rate = upper_limit + return norm.ppf(hit_rate) - norm.ppf(false_alarm_rate) + + +#q = math.sqrt(-2.0 * math.log(p)) +# return q - ((q + 0.044715 * math.pow(q, 3)) / (1.0 + 0.196854 * math.pow(q, 2) + 0.056415 * math.pow(q, 3) + 0.004298 * math.pow(q, 4))) + +# x = np.random.randint(0, 2, (1000,)) +# y = np.random.randint(0, 2, (1000,)) +# print(d_prime(x=x, y=y)) diff --git a/simba/sandbox/dunn_index.py b/simba/sandbox/dunn_index.py new file mode 100644 index 000000000..8b761aedb --- /dev/null +++ b/simba/sandbox/dunn_index.py @@ -0,0 +1,47 @@ +import numpy as np +from itertools import permutations +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.utils.checks import check_valid_array + +def dunn_index(x: np.ndarray, y: np.ndarray) -> float: + """ + Calculate the Dunn index to evaluate the quality of clustered labels. + + This function calculates the Dunn index, which is a measure of clustering quality. + The index considers the ratio of the minimum inter-cluster distance to the maximum + intra-cluster distance. A higher Dunn index indicates better clustering. + + .. note:: + Modified from `jqmviegas `_ + Wiki `https://en.wikipedia.org/wiki/Dunn_index `_ + Uses Euclidean distances. + + :param np.ndarray x: 2D array representing the data points. Shape (n_samples, n_features). + :param np.ndarray y: 2D array representing cluster labels for each data point. Shape (n_samples,). + :return float: The Dunn index value + + :example: + >>> x = np.random.randint(0, 100, (100, 2)) + >>> y = np.random.randint(0, 3, (100,)) + >>> dunn_index(x=x, y=y) + """ + + check_valid_array(data=x, source=dunn_index.__name__, accepted_ndims=(2,), accepted_dtypes=(int, float)) + check_valid_array(data=y, source=dunn_index.__name__, accepted_ndims=(1,), accepted_shapes=[(x.shape[0],)], accepted_dtypes=(int, float)) + distances = FeatureExtractionMixin.cdist(array_1=x.astype(np.float32), array_2=x.astype(np.float32)) + ks = np.sort(np.unique(y)).astype(np.int64) + deltas = np.full((ks.shape[0], ks.shape[0]), np.inf) + big_deltas = np.zeros([ks.shape[0], 1]) + for (i, l) in list(permutations(ks, 2)): + values = distances[np.where((y == i))][:, np.where((y == l))] + deltas[i, l] = np.min(values[np.nonzero(values)]) + for k in ks: + values = distances[np.where((y == ks[k]))][:, np.where((y == ks[k]))] + big_deltas[k] = np.max(values) + + return np.min(deltas) / np.max(big_deltas) + + + + + diff --git a/simba/sandbox/egocentri_video_rotator_numba.py b/simba/sandbox/egocentri_video_rotator_numba.py new file mode 100644 index 000000000..aa5e34485 --- /dev/null +++ b/simba/sandbox/egocentri_video_rotator_numba.py @@ -0,0 +1,135 @@ +import os +from typing import Union, Tuple, Optional +import numpy as np +import cv2 +import time + +from simba.utils.checks import check_if_valid_rgb_tuple, check_valid_boolean, check_int, check_file_exist_and_readable, check_if_dir_exists, check_valid_array, check_valid_tuple +from simba.utils.enums import Formats +from simba.utils.read_write import get_video_meta_data, get_fn_ext, find_core_cnt, remove_a_folder, read_frm_of_video, concatenate_videos_in_folder, read_df, read_img_batch_from_video_gpu +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.data import egocentrically_align_pose, center_rotation_warpaffine_vectors, align_target_warpaffine_vectors, egocentric_frm_rotator +from simba.mixins.image_mixin import ImageMixin + + +class EgocentricVideoRotatorAccelerated(): + """ + Perform egocentric rotation of a video using CPU multiprocessing. + + .. video:: _static/img/EgocentricalAligner_2.webm + :width: 800 + :autoplay: + :loop: + + .. seealso:: + To perform joint egocentric alignment of both pose and video, or pose only, use :func:`~simba.data_processors.egocentric_aligner.EgocentricalAligner`. + To produce rotation vectors, use :func:`~simba.utils.data.egocentrically_align_pose_numba` or :func:`~simba.utils.data.egocentrically_align_pose`. + + :param Union[str, os.PathLike] video_path: Path to a video file. + :param np.ndarray centers: A 2D array of shape `(num_frames, 2)` containing the original locations of `anchor_1_idx` in each frame before alignment. Returned by :func:`~simba.utils.data.egocentrically_align_pose_numba` or :func:`~simba.utils.data.egocentrically_align_pose`. + :param np.ndarray rotation_vectors: A 3D array of shape `(num_frames, 2, 2)` containing the rotation matrices applied to each frame. Returned by :func:`~simba.utils.data.egocentrically_align_pose_numba` or :func:`~simba.utils.data.egocentrically_align_pose`. + :param bool verbose: If True, prints progress. Deafult True. + :param Tuple[int, int, int] fill_clr: The color of the additional pixels. Deafult black. (0, 0, 0). + :param int core_cnt: Number of CPU cores to use for video rotation; `-1` uses all available cores. + :param Optional[Union[str, os.PathLike]] save_path: The location where to store the rotated video. If None, saves the video as the same dir as the input video with the `_rotated` suffix. + + :example: + >>> DATA_PATH = "C:\501_MA142_Gi_Saline_0513.csv" + >>> VIDEO_PATH = "C:\501_MA142_Gi_Saline_0513.mp4" + >>> SAVE_PATH = "C:\501_MA142_Gi_Saline_0513_rotated.mp4" + >>> ANCHOR_LOC = np.array([250, 250]) + + >>> df = read_df(file_path=DATA_PATH, file_type='csv') + >>> bp_cols = [x for x in df.columns if not x.endswith('_p')] + >>> data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32) + >>> _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0) + >>> rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=ANCHOR_LOC, save_path=SAVE_PATH) + >>> rotater.run() + """ + + def __init__(self, + video_path: Union[str, os.PathLike], + centers: np.ndarray, + rotation_vectors: np.ndarray, + anchor_location: Tuple[int, int], + verbose: bool = True, + fill_clr: Tuple[int, int, int] = (0, 0, 0), + core_cnt: int = -1, + save_path: Optional[Union[str, os.PathLike]] = None, + batch_size: Optional[int] = 500, + gpu: Optional[bool] = True): + + check_file_exist_and_readable(file_path=video_path) + self.video_meta_data = get_video_meta_data(video_path=video_path) + check_valid_array(data=centers, source=f'{self.__class__.__name__} centers', accepted_ndims=(2,), accepted_axis_1_shape=[2, ], accepted_axis_0_shape=[self.video_meta_data['frame_count']], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=rotation_vectors, source=f'{self.__class__.__name__} rotation_vectors', accepted_ndims=(3,), accepted_axis_0_shape=[self.video_meta_data['frame_count']], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_tuple(x=anchor_location, source=f'{self.__class__.__name__} anchor_location', accepted_lengths=(2,), valid_dtypes=(int,)) + for i in anchor_location: check_int(name=f'{self.__class__.__name__} anchor_location', value=i, min_value=1) + check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose') + check_if_valid_rgb_tuple(data=fill_clr) + check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0]) + if core_cnt > find_core_cnt()[0] or core_cnt == -1: + self.core_cnt = find_core_cnt()[0] + else: + self.core_cnt = core_cnt + video_dir, self.video_name, _ = get_fn_ext(filepath=video_path) + if save_path is not None: + self.save_dir = os.path.dirname(save_path) + check_if_dir_exists(in_dir=self.save_dir, source=f'{self.__class__.__name__} save_path') + else: + self.save_dir = video_dir + save_path = os.path.join(video_dir, f'{self.video_name}_rotated.mp4') + self.video_path, self.save_path, self.gpu = video_path, save_path, gpu + self.centers, self.rotation_vectors, self.batch_size = centers, rotation_vectors, batch_size + self.verbose, self.fill_clr, self.anchor_loc = verbose, fill_clr, anchor_location + fourcc = cv2.VideoWriter_fourcc(*f'{Formats.MP4_CODEC.value}') + self.writer = cv2.VideoWriter(save_path, fourcc, self.video_meta_data['fps'], (self.video_meta_data['width'], self.video_meta_data['height'])) + + + def run(self): + center_rotations = center_rotation_warpaffine_vectors(rotation_vectors=rotation_vectors, centers=self.centers) + target_rotations = align_target_warpaffine_vectors(centers=self.centers, target=np.array(self.anchor_loc)) + frm_idx = np.arange(0, self.video_meta_data['frame_count']) + frm_idx = np.array_split(frm_idx, range(self.batch_size, len(frm_idx), self.batch_size)) + timer = time.time() + for frm_batch_cnt, frm_batch in enumerate(frm_idx): + print(frm_batch_cnt, len(frm_idx)) + sample_center_rotations = center_rotations[frm_batch[0]:frm_batch[-1],] + sample_target_rotations = target_rotations[frm_batch[0]:frm_batch[-1],] + start = time.time() + if not self.gpu: + sample_imgs = ImageMixin.read_img_batch_from_video(video_path=self.video_path, start_frm=frm_batch[0], end_frm=frm_batch[-1]) + else: + sample_imgs = read_img_batch_from_video_gpu(video_path=self.video_path, start_frm=frm_batch[0], end_frm=frm_batch[-1]) + sample_imgs = np.stack(list(sample_imgs.values()), axis=1) + sample_imgs = egocentric_frm_rotator(frames=sample_imgs, rotation_matrices=sample_center_rotations) + sample_imgs = egocentric_frm_rotator(frames=sample_imgs, rotation_matrices=sample_target_rotations) + for img in sample_imgs: + cv2.imshow('sasdasd', img) + cv2.waitKey(60) + + self.writer.write(img.astype(np.uint8)) + if frm_batch_cnt == 2: + self.writer.release() + break + + + print(f'Total time: {time.time() - timer}') + + pass + +DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv" +VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4" +SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4" +ANCHOR_LOC = np.array([250, 250]) + +df = read_df(file_path=DATA_PATH, file_type='csv') +bp_cols = [x for x in df.columns if not x.endswith('_p')] +data = df[bp_cols].values.reshape(len(df), int(len(bp_cols) / 2), 2).astype(np.int32) +_, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0) + +rotater = EgocentricVideoRotatorAccelerated(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=(250, 250), save_path=SAVE_PATH) +rotater.run() + + +#CHECK IF THIS ONE ALSO GIVE SNONE diff --git a/simba/sandbox/egocentric_alig_pose_numba.py b/simba/sandbox/egocentric_alig_pose_numba.py new file mode 100644 index 000000000..ffc8a8465 --- /dev/null +++ b/simba/sandbox/egocentric_alig_pose_numba.py @@ -0,0 +1,92 @@ +import numpy as np +from typing import Tuple +from numba import jit, prange, njit +from simba.utils.read_write import read_df +import time + +@njit("(int32[:, :, :], int64, int64, int64, int32[:])") +def egocentrically_align_pose_numba(data: np.ndarray, + anchor_1_idx: int, + anchor_2_idx: int, + direction: int, + anchor_location: np.ndarray, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + + """ + Aligns a set of 2D points egocentrically based on two anchor points and a target direction. + + Rotates and translates a 3D array of 2D points (e.g., time-series of frame-wise data) such that + one anchor point is aligned to a specified location, and the direction between the two anchors is aligned + to a target angle. + + + .. video:: _static/img/EgocentricalAligner.webm + :width: 600 + :autoplay: + :loop: + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/egocentrically_align_pose_numba.csv + :widths: 12, 22, 22, 22, 22 + :align: center + :class: simba-table + :header-rows: 1 + + :param np.ndarray data: A 3D array of shape `(num_frames, num_points, 2)` containing 2D points for each frame. Each frame is represented as a 2D array of shape `(num_points, 2)`, where each row corresponds to a point's (x, y) coordinates. + :param int anchor_1_idx: The index of the first anchor point in `data` used as the center of alignment. This body-part will be placed in the center of the image. + :param int anchor_2_idx: The index of the second anchor point in `data` used to calculate the direction vector. This bosy-part will be located `direction` degrees from the anchor_1 body-part. + :param int direction: The target direction in degrees to which the vector between the two anchors will be aligned. + :param np.ndarray anchor_location: A 1D array of shape `(2,)` specifying the target (x, y) location for `anchor_1_idx` after alignment. + :return: A tuple containing the rotated data, and variables required for also rotating the video using the same rules: + - `aligned_data`: A 3D array of shape `(num_frames, num_points, 2)` with the aligned 2D points. + - `centers`: A 2D array of shape `(num_frames, 2)` containing the original locations of `anchor_1_idx` in each frame before alignment. + - `rotation_vectors`: A 3D array of shape `(num_frames, 2, 2)` containing the rotation matrices applied to each frame. + :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray] + + :example: + >>> data = np.random.randint(0, 500, (100, 7, 2)) + >>> anchor_1_idx = 5 # E.g., the animal tail-base is the 5th body-part + >>> anchor_2_idx = 7 # E.g., the animal nose is the 7th row in the data + >>> anchor_location = np.array([250, 250]) # the tail-base (index 5) is placed at x=250, y=250 in the image. + >>> direction = 90 # The nose (index 7) will be placed in direction 90 degrees (S) relative to the tailbase. + >>> results, centers, rotation_vectors = egocentrically_align_pose_numba(data=data, anchor_1_idx=anchor_1_idx, anchor_2_idx=anchor_2_idx, direction=direction) + """ + + target_angle = np.deg2rad(direction) + centers = np.full((data.shape[0], 2), fill_value=-1, dtype=np.int32) + rotation_vectors = np.full((data.shape[0], 2, 2), fill_value=-1, dtype=np.float32) + results = np.zeros_like(data, dtype=np.int32) + for frm_idx in prange(data.shape[0]): + frm_points = data[frm_idx] + frm_anchor_1, frm_anchor_2 = frm_points[anchor_1_idx], frm_points[anchor_2_idx] + centers[frm_idx] = frm_anchor_1 + delta_x, delta_y = frm_anchor_2[0] - frm_anchor_1[0], frm_anchor_2[1] - frm_anchor_1[1] + frm_angle = np.arctan2(delta_y, delta_x) + frm_rotation_angle = target_angle - frm_angle + frm_cos_theta, frm_sin_theta = np.cos(frm_rotation_angle), np.sin(frm_rotation_angle) + R = np.array([[frm_cos_theta, -frm_sin_theta], [frm_sin_theta, frm_cos_theta]]) + rotation_vectors[frm_idx] = R + keypoints_rotated = np.dot(frm_points.astype(np.float64) - frm_anchor_1.astype(np.float64), R.T) + anchor_1_position_after_rotation = keypoints_rotated[anchor_1_idx] + translation_to_target = anchor_location - anchor_1_position_after_rotation + results[frm_idx] = keypoints_rotated + translation_to_target + + return results, centers, rotation_vectors + + + +# data_path = r"C:\projects\simba\simba\tests\data\test_projects\mouse_open_field\project_folder\csv\outlier_corrected_movement_location\Video1.csv" +# data_df = read_df(file_path=data_path, file_type='csv') +# bp_cols = [x for x in data_df.columns if not x.endswith('_p')] +# data_arr = data_df[bp_cols].values.astype(np.int32).reshape(len(data_df), int(len(bp_cols)/2), 2).astype(np.int32) +# +# for i in [1000000, 2000000, 4000000, 8000000, 16000000, 32000000, 64000000]: +# times = [] +# data_arr = np.random.randint(0, 500, (i, 6, 2)) +# for j in range(3): +# start = time.time() +# results, centers, rotation_vectors = egocentrically_align_pose_numba(data=data_arr, anchor_1_idx=2, anchor_2_idx=3, anchor_location=np.array([250, 250]).astype(np.int32), direction=90) +# run_time = time.time() - start +# times.append(run_time) +# print(i, '\t' * 3, np.mean(times), '\t' *3, np.std(times)) diff --git a/simba/sandbox/egocentric_align.py b/simba/sandbox/egocentric_align.py new file mode 100644 index 000000000..c9ceb7c5a --- /dev/null +++ b/simba/sandbox/egocentric_align.py @@ -0,0 +1,68 @@ +import cv2 +import pandas as pd +from simba.utils.read_write import read_df +import numpy as np +from simba.mixins.geometry_mixin import GeometryMixin + +def egocentric_alignment(df: pd.DataFrame, + anchor_1: int, + anchor_2: int): + + anchors_idx = [anchor_1, anchor_2] + data = df.values.reshape(len(df), int((len(list(df.columns))/2)), 2).astype(np.int32) + + results = np.zeros_like(data, dtype=np.int64) + + for frm in range(data.shape[0]): + x_corr, y_corr = data[frm][anchors_idx[0]][0], data[frm][anchors_idx[0]][1] + print(x_corr) + + + + # anchor_data = data[frm][anchors_idx] + # print(anchor_data) + + + # ref_bp = data[frm][anchors_idx[1]] + # centered_anchors = anchor_data - ref_bp + # avg_vector = centered_anchors[0] - centered_anchors[1] + # line_body3_body2 = centered_anchors[2] - centered_anchors[1] + # # avg_vector = (line_body1_body2 + line_body3_body2) / 2 + # angle = np.arctan2(avg_vector[1], avg_vector[0]) + # print(frm) + # + # rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) + # translated_points = data[frm] - ref_bp # Translate to the origin + # rotated_points = np.dot(translated_points, rotation_matrix.T) # Apply rotation + # results[frm] = rotated_points + ref_bp # Translate back + # + # return results + + + + + + +data_path = r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\501_MA142_Gi_CNO_0516.csv" +df = read_df(file_path=data_path, file_type='csv') +df = df[[x for x in df.columns if not '_p' in x]] +results = egocentric_alignment(df=df.head(1), anchor_1=2, anchor_2=6) +# points = GeometryMixin.multiframe_bodypart_to_point(data=results, core_cnt=10) +# for i in range(len(points)): +# img = GeometryMixin.view_shapes(shapes=points[i]) +# cv2.imshow('asdasd', img) +# cv2.waitKey(33) + + + + + + + + + + + + + + diff --git a/simba/sandbox/egocentric_align_1025.py b/simba/sandbox/egocentric_align_1025.py new file mode 100644 index 000000000..dc11abbaa --- /dev/null +++ b/simba/sandbox/egocentric_align_1025.py @@ -0,0 +1,41 @@ +import numpy as np + + +def egocentric_align_keypoints_fixed_tail(keypoints, nose, tail, target_angle_degrees, tail_target=(250, 250)): + # Convert target angle to radians + target_angle = np.deg2rad(target_angle_degrees) + + # Calculate the current angle of the nose-tail vector + delta_x = nose[0] - tail[0] + delta_y = nose[1] - tail[1] + current_angle = np.arctan2(delta_y, delta_x) + + # Calculate the required rotation angle + rotate_angle = target_angle - current_angle + + # Create the rotation matrix + cos_theta = np.cos(rotate_angle) + sin_theta = np.sin(rotate_angle) + R = np.array([[cos_theta, -sin_theta], [sin_theta, cos_theta]]) + + # Translate keypoints so that the tail is at the origin + keypoints_translated = keypoints - tail + + # Apply rotation + keypoints_rotated = np.dot(keypoints_translated, R.T) + + # Translate keypoints so that the tail is at the target position (250, 250) + tail_position_after_rotation = keypoints_rotated[-1] # Assuming tail is the last point in the array + translation_to_target = np.array(tail_target) - tail_position_after_rotation + keypoints_aligned = keypoints_rotated + translation_to_target + + return keypoints_aligned + + +# Example usage +keypoints = np.array([[x1, y1], [x2, y2], ..., [xn, yn]]) # Replace with actual keypoints +nose = keypoints[0] # Assuming nose is at index 0 +tail = keypoints[-1] # Assuming tail is at the last index +target_angle_degrees = 90 # Target angle + +aligned_keypoints = egocentric_align_keypoints_fixed_tail(keypoints, nose, tail, target_angle_degrees) diff --git a/simba/sandbox/egocentric_align_cuda.py b/simba/sandbox/egocentric_align_cuda.py new file mode 100644 index 000000000..0fb670ad0 --- /dev/null +++ b/simba/sandbox/egocentric_align_cuda.py @@ -0,0 +1,139 @@ +import math +import time + +from simba.utils.checks import check_int, check_valid_array +import numpy as np +from typing import Tuple +from simba.utils.errors import InvalidInputError +from simba.utils.enums import Formats +from simba.utils.read_write import read_df +from numba import cuda +from simba.data_processors.cuda.utils import _cuda_matrix_multiplication, _cuda_2d_transpose, _cuda_subtract_2d, _cuda_add_2d + +THREADS_PER_BLOCK = 1024 + + +@cuda.jit() +def _egocentric_align_kernel(data, centers, rotation_vectors, results, target_angle, anchor_idx, transposed_rotation_vectors, matrix_multiplier_arr, anchor_loc): + frm_idx = cuda.grid(1) + if frm_idx >= data.shape[0]: + return + else: + frm_points = data[frm_idx] + frm_anchor_1, frm_anchor_2 = frm_points[anchor_idx[0]], frm_points[anchor_idx[1]] + centers[frm_idx][0], centers[frm_idx][1] = frm_anchor_1[0], frm_anchor_1[1] + delta_x, delta_y = frm_anchor_2[0] - frm_anchor_1[0], frm_anchor_2[1] - frm_anchor_1[1] + frm_angle = math.atan2(delta_y, delta_x) + frm_rotation_angle = target_angle[0] - frm_angle + frm_cos_theta, frm_sin_theta = math.cos(frm_rotation_angle), math.sin(frm_rotation_angle) + rotation_vectors[frm_idx][0][0], rotation_vectors[frm_idx][0][1] = frm_cos_theta, -frm_sin_theta + rotation_vectors[frm_idx][1][0], rotation_vectors[frm_idx][1][1] = frm_sin_theta, frm_cos_theta + keypoints_rotated = _cuda_subtract_2d(frm_points, frm_anchor_1) + r_transposed = _cuda_2d_transpose(rotation_vectors[frm_idx], transposed_rotation_vectors[frm_idx]) + keypoints_rotated = _cuda_matrix_multiplication(keypoints_rotated, r_transposed, matrix_multiplier_arr[frm_idx]) + anchor_1_position_after_rotation = keypoints_rotated[anchor_idx[0]] + anchor_1_position_after_rotation[0] = anchor_loc[0] - anchor_1_position_after_rotation[0] + anchor_1_position_after_rotation[1] = anchor_loc[1] - anchor_1_position_after_rotation[1] + + frm_results = _cuda_add_2d(keypoints_rotated, anchor_1_position_after_rotation) + for i in range(frm_results.shape[0]): + for j in range(frm_results.shape[1]): + results[frm_idx][i][j] = frm_results[i][j] + + + +def egocentrically_align_pose_cuda(data: np.ndarray, + anchor_1_idx: int, + anchor_2_idx: int, + anchor_location: np.ndarray, + direction: int, + batch_size: int = int(10e+5)) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + + """ + + :example: + >>> DATA_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/data/501_MA142_Gi_Saline_0513.csv" + >>> VIDEO_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513.mp4" + >>> SAVE_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513_rotated.mp4" + >>> ANCHOR_LOC = np.array([300, 300]) + >>> + >>> df = read_df(file_path=DATA_PATH, file_type='csv') + >>> bp_cols = [x for x in df.columns if not x.endswith('_p')] + >>> data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int64) + >>> data, centers, rotation_matrices = egocentrically_align_pose_cuda(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=180,batch_size=36000000) + """ + + check_valid_array(data=data, source=egocentrically_align_pose_cuda.__name__, accepted_ndims=(3,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_int(name=f'{egocentrically_align_pose_cuda.__name__} anchor_1_idx', min_value=0, max_value=data.shape[1], value=anchor_1_idx) + check_int(name=f'{egocentrically_align_pose_cuda.__name__} anchor_2_idx', min_value=0, max_value=data.shape[1], value=anchor_2_idx) + if anchor_1_idx == anchor_2_idx: raise InvalidInputError(msg=f'Anchor 1 index ({anchor_1_idx}) cannot be the same as Anchor 2 index ({anchor_2_idx})', source=egocentrically_align_pose_cuda.__name__) + check_int(name=f'{egocentrically_align_pose_cuda.__name__} direction', value=direction, min_value=0, max_value=360) + check_valid_array(data=anchor_location, source=egocentrically_align_pose_cuda.__name__, accepted_ndims=(1,), accepted_axis_0_shape=[2,], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_int(name=f'{egocentrically_align_pose_cuda.__name__} batch_size', value=batch_size, min_value=1) + results = np.full_like(a=data, fill_value=-1, dtype=np.int64) + results_centers = np.full((data.shape[0], 2), fill_value=-1, dtype=np.int64) + results_rotation_vectors = np.full((data.shape[0], 2, 2), fill_value=-1, dtype=np.float64) + transposed_results_rotation_vectors = np.full((data.shape[0], 2, 2), fill_value=np.nan, dtype=np.float64) + matrix_multiplier_arr = np.full((data.shape[0], data.shape[1], 2), fill_value=-1, dtype=np.int64) + target_angle = np.deg2rad(direction) + target_angle_dev = cuda.to_device(np.array([target_angle])) + anchor_idx_dev = cuda.to_device(np.array([anchor_1_idx, anchor_2_idx])) + anchor_loc_dev = cuda.to_device(anchor_location) + + for l in range(0, data.shape[0], batch_size): + r = l + batch_size + sample_data = np.ascontiguousarray(data[l:r]).astype(np.float64) + sample_centers = np.ascontiguousarray(results_centers[l:r]).astype(np.int64) + sample_rotation_vectors = np.ascontiguousarray(results_rotation_vectors[l:r].astype(np.float64)) + sample_transposed_rotation_vectors = np.ascontiguousarray(transposed_results_rotation_vectors[l:r]) + sample_matrix_multiplier_arr = np.ascontiguousarray(matrix_multiplier_arr[l:r]) + sample_results = np.ascontiguousarray(results[l:r].astype(np.float64)) + sample_data_dev = cuda.to_device(sample_data) + sample_centers_dev = cuda.to_device(sample_centers) + sample_matrix_multiplier_arr_dev = cuda.to_device(sample_matrix_multiplier_arr) + sample_transposed_rotation_vectors_dev = cuda.to_device(sample_transposed_rotation_vectors) + sample_rotation_vectors_dev = cuda.to_device(sample_rotation_vectors) + sample_results_dev = cuda.to_device(sample_results) + bpg = (sample_data.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _egocentric_align_kernel[bpg, THREADS_PER_BLOCK](sample_data_dev, + sample_centers_dev, + sample_rotation_vectors_dev, + sample_results_dev, + target_angle_dev, + anchor_idx_dev, + sample_transposed_rotation_vectors_dev, + sample_matrix_multiplier_arr_dev, + anchor_loc_dev) + results[l:r] = sample_results_dev.copy_to_host() + results_centers[l:r] = sample_centers_dev.copy_to_host() + results_rotation_vectors[l:r] = sample_rotation_vectors_dev.copy_to_host() + + return results, results_centers, results_rotation_vectors + + +DATA_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/data/501_MA142_Gi_Saline_0513.csv" +VIDEO_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513.mp4" +SAVE_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513_rotated.mp4" +ANCHOR_LOC = np.array([300, 300]) + +df = read_df(file_path=DATA_PATH, file_type='csv') +bp_cols = [x for x in df.columns if not x.endswith('_p')] +data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int64) +data, centers, rotation_matrices = egocentrically_align_pose_cuda(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=180,batch_size=36000000) + +for i in [2000000, 4000000, 8000000, 16000000, 32000000, 64000000]: + data = np.random.randint(0, 500, (i, 6, 2)) + times = [] + for j in range(3): + start_t = time.perf_counter() + data, centers, rotation_matrices = egocentrically_align_pose_cuda(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=180, batch_size=36000000) + times.append(time.perf_counter() - start_t) + print(i, '\t' * 4, np.mean(times), '\t' * 4, np.std(times)) + + +# from simba.video_processors.egocentric_video_rotator import EgocentricVideoRotator +# +# runner = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_matrices, anchor_location=(300, 300)) +# runner.run() + +#_, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0) \ No newline at end of file diff --git a/simba/sandbox/egocentric_aligner.py b/simba/sandbox/egocentric_aligner.py new file mode 100644 index 000000000..d5fa0bc06 --- /dev/null +++ b/simba/sandbox/egocentric_aligner.py @@ -0,0 +1,222 @@ +import functools +import multiprocessing +import os +from typing import List, Optional, Tuple, Union + +import cv2 +import numpy as np +import pandas as pd + +from simba.utils.checks import (check_all_file_names_are_represented_in_video_log, + check_if_dir_exists, check_int, check_valid_dataframe, + check_str, check_valid_tuple, check_valid_boolean, check_instance, + check_valid_array, check_if_valid_rgb_tuple) +from simba.utils.enums import Formats, Options +from simba.utils.printing import SimbaTimer +from simba.utils.data import egocentrically_align_pose +from simba.utils.read_write import (concatenate_videos_in_folder, + find_core_cnt, + find_files_of_filetypes_in_directory, + find_video_of_file, get_fn_ext, + get_video_meta_data, read_df, + read_frm_of_video, remove_a_folder, + write_df, read_video_info_csv, bgr_to_rgb_tuple) +from simba.utils.warnings import FrameRangeWarning + + +def _egocentric_aligner(frm_range: np.ndarray, + video_path: Union[str, os.PathLike], + temp_dir: Union[str, os.PathLike], + video_name: str, + centers: List[Tuple[int, int]], + rotation_vectors: np.ndarray, + target: Tuple[int, int], + fill_clr: Tuple[int, int, int] = (255, 255, 255), + verbose: bool = False): + + video_meta = get_video_meta_data(video_path=video_path) + cap = cv2.VideoCapture(video_path) + batch, frm_range = frm_range[0], frm_range[1] + save_path = os.path.join(temp_dir, f'{batch}.mp4') + fourcc = cv2.VideoWriter_fourcc(*f'{Formats.MP4_CODEC.value}') + writer = cv2.VideoWriter(save_path, fourcc, video_meta['fps'], (video_meta['width'], video_meta['height'])) + + for frm_cnt, frm_id in enumerate(frm_range): + img = read_frm_of_video(video_path=cap, frame_index=frm_id) + R, center = rotation_vectors[frm_id], centers[frm_id] + M_rotate = np.hstack([R, np.array([[-center[0] * R[0, 0] - center[1] * R[0, 1] + center[0]], [-center[0] * R[1, 0] - center[1] * R[1, 1] + center[1]]])]) + rotated_frame = cv2.warpAffine(img, M_rotate, (video_meta['width'], video_meta['height']), borderValue=fill_clr) + translation_x = target[0] - center[0] + translation_y = target[1] - center[1] + M_translate = np.float32([[1, 0, translation_x], [0, 1, translation_y]]) + final_frame = cv2.warpAffine(rotated_frame, M_translate, (video_meta['width'], video_meta['height']), borderValue=fill_clr) + writer.write(final_frame) + if verbose: + print(f'Creating frame {frm_id} ({video_name}, CPU core: {batch+1}).') + + writer.release() + return batch+1 + +class EgocentricalAligner(): + + """ + Aligns and rotates movement data and associated video frames based on specified anchor points to produce an egocentric view of the subject. The class aligns frames around a selected anchor point, optionally rotating the subject to a consistent direction and saving the output video. + + .. video:: _static/img/EgocentricalAligner.webm + :width: 800 + :autoplay: + :loop: + + .. video:: _static/img/EgocentricalAligner_2.webm + :width: 800 + :autoplay: + :loop: + + :param Union[str, os.PathLike] config_path: Path to the configuration file. + :param Union[str, os.PathLike] save_dir: Directory where the processed output will be saved. + :param Optional[Union[str, os.PathLike]] data_dir: Directory containing CSV files with movement data. + :param Optional[str] anchor_1: Primary anchor point (e.g., 'tail_base') around which the alignment centers. + :param Optional[str] anchor_2: Secondary anchor point (e.g., 'nose') defining the alignment direction. + :param int direction: Target angle, in degrees, for alignment; e.g., `0` aligns along the x-axis. + :param Optional[Tuple[int, int]] anchor_location: Pixel location in the output where `anchor_1` should appear; default is `(250, 250)`. + :param Tuple[int, int, int] fill_clr: If rotating the videos, the color of the additional pixels. + :param Optional[bool] rotate_video: Whether to rotate the video to align with the specified direction. + :param Optional[int] cores: Number of CPU cores to use for video rotation; `-1` uses all available cores. + + :example: + >>> aligner = EgocentricalAligner(rotate_video=True, anchor_1='tail_base', anchor_2='nose', data_dir=r"C:/Users/sroni/OneDrive/Desktop/rotate_ex/data", videos_dir=r'C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos', save_dir=r"C:\troubleshooting\mitra\project_folder\videos\additional/examples/rotated", video_info=r"C:\troubleshooting\mitra\project_folder\logs\video_info.csv", direction=0, anchor_location=(250, 250), fill_clr=(0, 0, 0)) + >>> aligner.run() + """ + + def __init__(self, + data_dir: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike], + anchor_1: str = 'tail_base', + anchor_2: str = 'nose', + direction: int = 0, + anchor_location: Tuple[int, int] = (250, 250), + core_cnt: int = -1, + rotate_video: bool = False, + fill_clr: Tuple[int, int, int] = (250, 250, 255), + verbose: bool = True, + videos_dir: Optional[Union[str, os.PathLike]] = None, + video_info: Optional[Union[str, os.PathLike, pd.DataFrame]] = None): + + self.data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv']) + check_if_dir_exists(in_dir=save_dir, source=f'{self.__class__.__name__} save_dir') + check_str(name=f'{self.__class__.__name__} anchor_1', value=anchor_1) + check_str(name=f'{self.__class__.__name__} anchor_2', value=anchor_2) + check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1, max_value=find_core_cnt()[0], unaccepted_vals=[0]) + if core_cnt == -1: self.core_cnt = find_core_cnt()[0] + check_int(name=f'{self.__class__.__name__} direction', value=direction, min_value=0, max_value=360) + check_valid_tuple(x=anchor_location, source=f'{self.__class__.__name__} anchor_location', accepted_lengths=(2,), valid_dtypes=(int,)) + for i in anchor_location: check_int(name=f'{self.__class__.__name__} anchor_location', value=i, min_value=1) + check_valid_boolean(value=[rotate_video, verbose], source=f'{self.__class__.__name__} rotate_video') + if rotate_video: + check_if_valid_rgb_tuple(data=fill_clr) + fill_clr = bgr_to_rgb_tuple(value=fill_clr) + check_if_dir_exists(in_dir=videos_dir, source=f'{self.__class__.__name__} videos_dir') + check_instance(source=f'{self.__class__.__name__} video_info', accepted_types=(str, pd.DataFrame), instance=video_info) + if isinstance(video_info, str): video_info = read_video_info_csv(file_path=video_info) + else: check_valid_dataframe(df=video_info, source=f'{self.__class__.__name__} video_info', required_fields=Formats.EXPECTED_VIDEO_INFO_COLS.value) + self.video_paths = find_files_of_filetypes_in_directory(directory=videos_dir, extensions=Options.ALL_VIDEO_FORMAT_OPTIONS.value) + for file_path in self.data_paths: + find_video_of_file(video_dir=videos_dir, filename=get_fn_ext(file_path)[1], raise_error=True) + check_all_file_names_are_represented_in_video_log(video_info_df=video_info, data_paths=self.data_paths) + self.anchor_1_cols = [f'{anchor_1}_x'.lower(), f'{anchor_1}_y'.lower()] + self.anchor_2_cols = [f'{anchor_2}_x'.lower(), f'{anchor_2}_y'.lower()] + self.anchor_1, self.anchor_2, self.videos_dir = anchor_1, anchor_2, videos_dir + self.rotate_video, self.save_dir, self.verbose = rotate_video, save_dir, verbose + self.anchor_location, self.direction, self.fill_clr = np.array(anchor_location), direction, fill_clr + + def run(self): + for file_cnt, file_path in enumerate(self.data_paths): + video_timer = SimbaTimer(start=True) + _, self.video_name, _ = get_fn_ext(filepath=file_path) + if self.verbose: + print(f'Analyzing video {self.video_name}... ({file_cnt+1}/{len(self.data_paths)})') + save_path = os.path.join(self.save_dir, f'{self.video_name}.{Formats.CSV.value}') + df = read_df(file_path=file_path, file_type=Formats.CSV.value) + original_cols, self.file_path = list(df.columns), file_path + df.columns = [x.lower() for x in list(df.columns)] + check_valid_dataframe(df=df, source=self.__class__.__name__, valid_dtypes=Formats.NUMERIC_DTYPES.value, required_fields=self.anchor_1_cols + self.anchor_2_cols) + + bp_cols = [x for x in df.columns if not x.endswith('_p')] + body_parts_lst = [] + _= [body_parts_lst.append(x[:-2]) for x in bp_cols if x[:-2] not in body_parts_lst] + anchor_1_idx, anchor_2_idx = body_parts_lst.index(self.anchor_1), body_parts_lst.index(self.anchor_2) + data_arr = df[bp_cols].values.reshape(len(df), len(body_parts_lst), 2).astype(np.int32) + results_arr, self.centers, self.rotation_vectors = egocentrically_align_pose(data=data_arr, anchor_1_idx=anchor_1_idx, anchor_2_idx=anchor_2_idx, direction=self.direction, anchor_location=self.anchor_location) + results_arr = results_arr.reshape(len(df), len(bp_cols)) + self.out_df = pd.DataFrame(results_arr, columns=bp_cols) + df.update(self.out_df) + df.columns = original_cols + write_df(df=df, file_type=Formats.CSV.value, save_path=save_path) + video_timer.stop_timer() + print(f'{self.video_name} complete, saved at {save_path} (elapsed time: {video_timer.elapsed_time_str}s)') + if self.rotate_video: + self.out_df = self.out_df.head(500) + self.run_video_rotation() + + + + def run_video_rotation(self): + video_timer = SimbaTimer(start=True) + video_path = find_video_of_file(video_dir=self.videos_dir, filename=self.video_name, raise_error=False) + video_meta = get_video_meta_data(video_path=video_path) + save_path = os.path.join(self.save_dir, f'{self.video_name}.mp4') + temp_dir = os.path.join(self.save_dir, 'temp') + if not (os.path.isdir(temp_dir)): + os.makedirs(temp_dir) + else: + remove_a_folder(folder_dir=temp_dir) + os.makedirs(temp_dir) + if video_meta['frame_count'] != len(self.out_df): + FrameRangeWarning(msg=f'The video {video_path} contains {video_meta["frame_count"]} frames while the file {self.file_path} contains {len(self.out_df)} frames', source=self.__class__.__name__) + frm_list = np.arange(0, video_meta['frame_count']) + frm_list = np.arange(0, 500) + frm_list = np.array_split(frm_list, self.core_cnt) + frm_list = [(cnt, x) for cnt, x in enumerate(frm_list)] + print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...") + with multiprocessing.Pool(self.core_cnt, maxtasksperchild=100) as pool: + constants = functools.partial(_egocentric_aligner, + temp_dir=temp_dir, + video_name=self.video_name, + video_path=video_path, + centers=self.centers, + rotation_vectors=self.rotation_vectors, + target=self.anchor_location, + verbose=self.verbose, + fill_clr=self.fill_clr) + for cnt, result in enumerate(pool.imap(constants, frm_list, chunksize=1)): + print(f"Rotate batch {result}/{self.core_cnt} complete...") + pool.terminate() + pool.join() + + concatenate_videos_in_folder(in_folder=temp_dir, save_path=save_path, remove_splits=True, gpu=False) + video_timer.stop_timer() + print(f"Egocentric rotation video {save_path} complete (elapsed time: {video_timer.elapsed_time_str}s) ...") + +# if __name__ == "__main__": +# aligner = EgocentricalAligner(rotate_video=True, +# anchor_1='tail_base', +# anchor_2='nose', +# data_dir=r'C:\Users\sroni\OneDrive\Desktop\rotate_ex\data', +# videos_dir=r'C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos', +# save_dir=r"C:\troubleshooting\mitra\project_folder\videos\additional\examples\rotated", +# video_info=r"C:\troubleshooting\mitra\project_folder\logs\video_info.csv", +# direction=0, +# anchor_location=(250, 250), +# fill_clr=(0, 0, 0)) +# aligner.run() + + # aligner = EgocentricalAligner(rotate_video=True, + # anchor_1='tail_base', + # anchor_2='nose', + # data_dir=r'C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location', + # videos_dir=r'C:\troubleshooting\mitra\project_folder\videos', + # save_dir=r"C:\troubleshooting\mitra\project_folder\videos\additional\bg_removed\rotated", + # video_info=r"C:\troubleshooting\mitra\project_folder\logs\video_info.csv") + # aligner.run() + + diff --git a/simba/sandbox/egocentric_aligner_.py b/simba/sandbox/egocentric_aligner_.py new file mode 100644 index 000000000..5210ca4b2 --- /dev/null +++ b/simba/sandbox/egocentric_aligner_.py @@ -0,0 +1,186 @@ +import os +import numpy as np +import cv2 +import multiprocessing +import functools +import pandas as pd +from typing import Union, Optional, Tuple, List +from simba.utils.read_write import find_files_of_filetypes_in_directory, get_fn_ext,read_df, find_video_of_file, write_df, get_video_meta_data, read_frm_of_video, find_core_cnt, remove_a_folder, concatenate_videos_in_folder +from simba.utils.checks import check_all_file_names_are_represented_in_video_log, check_valid_dataframe, check_int +from simba.utils.enums import Formats +from simba.mixins.config_reader import ConfigReader +from simba.utils.warnings import FrameRangeWarning +from simba.utils.printing import SimbaTimer, stdout_success + +def _egocentric_aligner(frm_range: np.ndarray, + video_path: Union[str, os.PathLike], + temp_dir: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike], + centers: List[Tuple[int, int]], + rotation_vectors: np.ndarray, + target: Tuple[int, int]): + + video_meta = get_video_meta_data(video_path=video_path) + cap = cv2.VideoCapture(video_path) + batch, frm_range = frm_range[0], frm_range[1] + save_path = os.path.join(temp_dir, f'{batch}.mp4') + fourcc = cv2.VideoWriter_fourcc(*f'{Formats.MP4_CODEC.value}') + writer = cv2.VideoWriter(save_path, fourcc, video_meta['fps'], (video_meta['width'], video_meta['height'])) + + for frm_cnt, frm_id in enumerate(frm_range): + img = read_frm_of_video(video_path=cap, frame_index=frm_id) + R, center = rotation_vectors[frm_id], centers[frm_id] + M_rotate = np.hstack([R, np.array([[-center[0] * R[0, 0] - center[1] * R[0, 1] + center[0]], [-center[0] * R[1, 0] - center[1] * R[1, 1] + center[1]]])]) + rotated_frame = cv2.warpAffine(img, M_rotate, (video_meta['width'], video_meta['height'])) + translation_x = target[0] - center[0] + translation_y = target[1] - center[1] + M_translate = np.float32([[1, 0, translation_x], [0, 1, translation_y]]) + final_frame = cv2.warpAffine(rotated_frame, M_translate, (video_meta['width'], video_meta['height'])) + writer.write(final_frame) + print(f'Creating frame {frm_id} (CPU core: {batch+1}).') + + cap.release() + writer.release() + return batch+1 + +class EgocentricalAligner(ConfigReader): + + def __init__(self, + config_path: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike], + data_dir: Optional[Union[str, os.PathLike]] = None, + anchor_1: Optional[str] = 'tail_base', + anchor_2: Optional[str] = 'nose', + direction: int = 0, + anchor_location: Optional[Tuple[int, int]] = (250, 250), + rotate_video: Optional[bool] = False, + cores: Optional[int] = -1): + """ + Aligns and rotates movement data and associated video frames based on specified anchor points to produce an egocentric view of the subject. The class aligns frames around a selected anchor point, optionally rotating the subject to a consistent direction and saving the output video. + + .. video:: _static/img/EgocentricalAligner.webm + :width: 800 + :autoplay: + :loop: + + :param Union[str, os.PathLike] config_path: Path to the configuration file. + :param Union[str, os.PathLike] save_dir: Directory where the processed output will be saved. + :param Optional[Union[str, os.PathLike]] data_dir: Directory containing CSV files with movement data. + :param Optional[str] anchor_1: Primary anchor point (e.g., 'tail_base') around which the alignment centers. + :param Optional[str] anchor_2: Secondary anchor point (e.g., 'nose') defining the alignment direction. + :param int direction: Target angle, in degrees, for alignment; e.g., `0` aligns along the x-axis. + :param Optional[Tuple[int, int]] anchor_location: Pixel location in the output where `anchor_1` should appear; default is `(250, 250)`. + :param Optional[bool] rotate_video: Whether to rotate the video to align with the specified direction. + :param Optional[int] cores: Number of CPU cores to use for video rotation; `-1` uses all available cores. + + :example: + >>> aligner = EgocentricalAligner(config_path=r"C:\troubleshooting\mitra\project_folder\project_config.ini", rotate_video=True, anchor_1='tail_base', anchor_2='nose', data_dir=r'C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\test', save_dir=r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\test\bg_temp\rotated") + >>> aligner.run() + """ + + ConfigReader.__init__(self, config_path=config_path, read_video_info=True, create_logger=False) + if data_dir is None: + self.data_paths = find_files_of_filetypes_in_directory(directory=self.outlier_corrected_dir, extensions=['.csv']) + else: + self.data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv']) + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.data_paths) + self.anchor_1_cols = [f'{anchor_1}_x'.lower(), f'{anchor_1}_y'.lower()] + self.anchor_2_cols = [f'{anchor_2}_x'.lower(), f'{anchor_2}_y'.lower()] + self.rotate_video, self.save_dir = rotate_video, save_dir + self.anchor_1, self.anchor_2 = anchor_1, anchor_2 + self.target_angle = np.deg2rad(direction) + self.anchor_location = anchor_location + check_int(name='cores', value=cores, min_value=-1, max_value=find_core_cnt()[0]) + if cores == -1: + self.cores = find_core_cnt()[0] + else: + self.cores = cores + + def run(self): + for file_cnt, file_path in enumerate(self.data_paths): + _, self.video_name, _ = get_fn_ext(filepath=file_path) + save_path = os.path.join(self.save_dir, f'{self.video_name}.{self.file_type}') + df = read_df(file_path=file_path, file_type=self.file_type) + original_cols, self.file_path = list(df.columns), file_path + df.columns = [x.lower() for x in list(df.columns)] + check_valid_dataframe(df=df, source=self.__class__.__name__, valid_dtypes=Formats.NUMERIC_DTYPES.value, required_fields=self.anchor_1_cols + self.anchor_2_cols) + self.body_parts_lst = [x.lower() for x in self.body_parts_lst] + bp_cols = [x for x in df.columns if not x.endswith('_p')] + anchor_1_idx = self.body_parts_lst.index(self.anchor_1) + anchor_2_idx = self.body_parts_lst.index(self.anchor_2) + data_arr = df[bp_cols].values.reshape(len(df), len(self.body_parts_lst), 2).astype(np.int32) + results_arr = np.zeros_like(data_arr) + self.rotation_angles, self.rotation_vectors, self.centers, self.deltas = [], [], [], [] + for frame_index in range(data_arr.shape[0]): + frame_points = data_arr[frame_index] + frame_anchor_1 = frame_points[anchor_1_idx] + self.centers.append(tuple(frame_anchor_1)) + frame_anchor_2 = frame_points[anchor_2_idx] + delta_x, delta_y = frame_anchor_2[0] - frame_anchor_1[0], frame_anchor_2[1] - frame_anchor_1[1] + self.deltas.append((delta_x, delta_x)) + current_angle = np.arctan2(delta_y, delta_x) + rotate_angle = self.target_angle - current_angle + self.rotation_angles.append(rotate_angle) + cos_theta, sin_theta = np.cos(rotate_angle), np.sin(rotate_angle) + R = np.array([[cos_theta, -sin_theta], [sin_theta, cos_theta]]) + self.rotation_vectors.append(R) + keypoints_translated = frame_points - frame_anchor_1 + keypoints_rotated = np.dot(keypoints_translated, R.T) + anchor_1_position_after_rotation = keypoints_rotated[anchor_1_idx] + translation_to_target = np.array(self.anchor_location) - anchor_1_position_after_rotation + keypoints_aligned = keypoints_rotated + translation_to_target + results_arr[frame_index] = keypoints_aligned + + results_arr = results_arr.reshape(len(df), len(bp_cols)) + self.out_df = pd.DataFrame(results_arr, columns=bp_cols) + df.update(self.out_df) + df.columns = original_cols + write_df(df=df, file_type=self.file_type, save_path=save_path) + if self.rotate_video: + self.run_video_rotation() + + def run_video_rotation(self): + video_timer = SimbaTimer(start=True) + video_path = find_video_of_file(video_dir=self.video_dir, filename=self.video_name, raise_error=True) + video_meta = get_video_meta_data(video_path=video_path) + save_path = os.path.join(self.save_dir, f'{self.video_name}.mp4') + temp_dir = os.path.join(self.save_dir, 'temp') + if not os.path.isdir(temp_dir): + os.makedirs(temp_dir) + else: + remove_a_folder(folder_dir=temp_dir) + os.makedirs(temp_dir) + if video_meta['frame_count'] != len(self.out_df): + FrameRangeWarning(msg=f'The video {video_path} contains {video_meta["frame_count"]} frames while the file {self.file_path} contains {len(self.out_df)} frames', source=self.__class__.__name__) + frm_list = np.arange(0, video_meta['frame_count']) + frm_list = np.array_split(frm_list, self.cores) + frm_list = [(cnt, x) for cnt, x in enumerate(frm_list)] + print(f"Creating rotated videos, multiprocessing (chunksize: {self.multiprocess_chunksize}, cores: {self.cores})...") + with multiprocessing.Pool(self.cores, maxtasksperchild=self.maxtasksperchild) as pool: + constants = functools.partial(_egocentric_aligner, + save_dir=self.save_dir, + temp_dir=temp_dir, + video_path=video_path, + centers=self.centers, + rotation_vectors=self.rotation_vectors, + target=self.anchor_location) + for cnt, result in enumerate(pool.imap(constants, frm_list, chunksize=self.multiprocess_chunksize)): + print(f"Rotate batch {result}/{self.cores} complete...") + pool.terminate() + pool.join() + + concatenate_videos_in_folder(in_folder=temp_dir, save_path=save_path, remove_splits=True, gpu=False) + video_timer.stop_timer() + print(f"Egocentric rotation video {save_path} complete (elapsed time: {video_timer.elapsed_time_str}s) ...") + + +# if __name__ == "__main__": +# aligner = EgocentricalAligner(config_path=r"C:\troubleshooting\mitra\project_folder\project_config.ini", +# rotate_video=True, +# anchor_1='tail_base', +# anchor_2='nose', +# data_dir=r'C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\test', +# save_dir=r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\test\bg_temp\rotated") +# aligner.run() +# + diff --git a/simba/sandbox/egocentric_aligner_1024.py b/simba/sandbox/egocentric_aligner_1024.py new file mode 100644 index 000000000..dabc174ed --- /dev/null +++ b/simba/sandbox/egocentric_aligner_1024.py @@ -0,0 +1,127 @@ +import os +import numpy as np +import cv2 +import math +from copy import deepcopy +import pandas as pd +from typing import Union, Optional, Tuple +from simba.utils.read_write import find_files_of_filetypes_in_directory, get_fn_ext,read_df, read_video_info, find_video_of_file, write_df, get_video_meta_data, read_frm_of_video +from simba.utils.checks import check_all_file_names_are_represented_in_video_log, check_valid_dataframe +from simba.utils.enums import Formats +from simba.mixins.config_reader import ConfigReader +from simba.mixins.geometry_mixin import GeometryMixin +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.video_processors.video_processing import create_average_frm + + + +class EgocentricalAligner(ConfigReader): + + def __init__(self, + config_path: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike], + data_dir: Optional[Union[str, os.PathLike]] = None, + anchor_1: Optional[str] = 'tail_base', + anchor_2: Optional[str] = 'nose', + direction: int = 0, + anchor_location: Optional[Tuple[int, int]] = (250, 250), + rotate_video: Optional[bool] = False): + + ConfigReader.__init__(self, config_path=config_path, read_video_info=True, create_logger=False) + if data_dir is None: + self.data_paths = find_files_of_filetypes_in_directory(directory=self.outlier_corrected_dir, extensions=['.csv']) + else: + self.data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv']) + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.data_paths) + self.anchor_1_cols = [f'{anchor_1}_x'.lower(), f'{anchor_1}_y'.lower()] + self.anchor_2_cols = [f'{anchor_2}_x'.lower(), f'{anchor_2}_y'.lower()] + self.rotate_video, self.save_dir = rotate_video, save_dir + self.anchor_1, self.anchor_2 = anchor_1, anchor_2 + self.target_angle = np.deg2rad(direction) + self.anchor_location = anchor_location + + def run(self): + for file_cnt, file_path in enumerate(self.data_paths): + _, self.video_name, _ = get_fn_ext(filepath=file_path) + save_path = os.path.join(self.save_dir, f'{self.video_name}.{self.file_type}') + df = read_df(file_path=file_path, file_type=self.file_type) + original_cols = list(df.columns) + df.columns = [x.lower() for x in list(df.columns)] + self.body_parts_lst = [x.lower() for x in self.body_parts_lst] + bp_cols = [x for x in df.columns if not x.endswith('_p')] + anchor_1_idx = self.body_parts_lst.index(self.anchor_1) + anchor_2_idx = self.body_parts_lst.index(self.anchor_2) + data_arr = df[bp_cols].values.reshape(len(df), len(self.body_parts_lst), 2).astype(np.int32) + results_arr = np.zeros_like(data_arr) + self.rotation_angles, self.rotation_vectors, self.centers, self.deltas = [], [], [], [] + for frame_index in range(data_arr.shape[0]): + frame_points = data_arr[frame_index] + frame_anchor_1 = frame_points[anchor_1_idx] + self.centers.append(tuple(frame_anchor_1)) + frame_anchor_2 = frame_points[anchor_2_idx] + delta_x, delta_y = frame_anchor_2[0] - frame_anchor_1[0], frame_anchor_2[1] - frame_anchor_1[1] + self.deltas.append((delta_x, delta_x)) + current_angle = np.arctan2(delta_y, delta_x) + rotate_angle = self.target_angle - current_angle + self.rotation_angles.append(rotate_angle) + cos_theta, sin_theta = np.cos(rotate_angle), np.sin(rotate_angle) + R = np.array([[cos_theta, -sin_theta], [sin_theta, cos_theta]]) + self.rotation_vectors.append(R) + keypoints_translated = frame_points - frame_anchor_1 + keypoints_rotated = np.dot(keypoints_translated, R.T) + anchor_1_position_after_rotation = keypoints_rotated[anchor_1_idx] + translation_to_target = np.array(self.anchor_location) - anchor_1_position_after_rotation + keypoints_aligned = keypoints_rotated + translation_to_target + results_arr[frame_index] = keypoints_aligned + + results_arr = results_arr.reshape(len(df), len(bp_cols)) + self.out_df = pd.DataFrame(results_arr, columns=bp_cols) + df.update(self.out_df) + df.columns = original_cols + write_df(df=df, file_type=self.file_type, save_path=save_path) + if self.rotate_video: + self.run_video_rotation() + + def run_video_rotation(self): + video_path = find_video_of_file(video_dir=self.video_dir, filename=self.video_name, raise_error=True) + video_meta = get_video_meta_data(video_path=video_path) + save_path = os.path.join(self.save_dir, f'{self.video_name}.mp4') + fourcc = cv2.VideoWriter_fourcc(*f'{Formats.MP4_CODEC.value}') + writer = cv2.VideoWriter(save_path, fourcc, video_meta['fps'], (video_meta['width'], video_meta['height'])) + cap = cv2.VideoCapture(video_path) + target_x, target_y = self.anchor_location + + for frm_idx in range(video_meta['frame_count']): + img = read_frm_of_video(video_path=cap, frame_index=frm_idx) + # Get the rotation matrix and translation info for this frame + R = self.rotation_vectors[frm_idx] # 2x2 rotation matrix + center = self.centers[frm_idx] # Center to rotate around + + # Apply rotation to the image + M_rotate = np.hstack([R, np.array([[-center[0] * R[0, 0] - center[1] * R[0, 1] + center[0]], [-center[0] * R[1, 0] - center[1] * R[1, 1] + center[1]]])]) + rotated_frame = cv2.warpAffine(img, M_rotate, (video_meta['width'], video_meta['height'])) + + # Calculate translation to move rotated anchor to target location + translation_x = target_x - center[0] + translation_y = target_y - center[1] + M_translate = np.float32([[1, 0, translation_x], [0, 1, translation_y]]) + + # Apply translation to keep the anchor point at target location + final_frame = cv2.warpAffine(rotated_frame, M_translate, (video_meta['width'], video_meta['height'])) + + writer.write(final_frame) + print(frm_idx, save_path) + + # Release resources + cap.release() + writer.release() + + +EgocentricalAligner(config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini", + rotate_video=True, + anchor_1='tail_base', + anchor_2='nose', + data_dir=r'D:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location', + save_dir=r"D:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\rotated").run() + + diff --git a/simba/sandbox/egocentric_alignment.py b/simba/sandbox/egocentric_alignment.py new file mode 100644 index 000000000..a27d4d1e0 --- /dev/null +++ b/simba/sandbox/egocentric_alignment.py @@ -0,0 +1,49 @@ +import numpy as np + +# Function to rotate points around a given origin +def rotate_points(points, angle, origin=(0, 0)): + """ + Rotate points by a given angle around the origin. + points: array of shape (N, 2), where N is the number of points. + angle: rotation angle in radians. + origin: (x, y) coordinates of the origin of rotation. + """ + rotation_matrix = np.array([[np.cos(angle), -np.sin(angle)], + [np.sin(angle), np.cos(angle)]]) + translated_points = points - origin # Translate to the origin + rotated_points = np.dot(translated_points, rotation_matrix.T) # Apply rotation + return rotated_points + origin # Translate back + +# Sample body parts data (Body1 to Body7) +body_parts = np.array([ + [x1, y1], # Body1 + [x2, y2], # Body2 + [x3, y3], # Body3 + [x4, y4], # Body4 + [x5, y5], # Body5 + [x6, y6], # Body6 + [x7, y7] # Body7 +]) + +# Choose 2 body parts for alignment (e.g., Body1, Body2) +body_part_indices = [0, 1] # Indices for Body1 and Body2 +chosen_body_parts = body_parts[body_part_indices] + +# Center the data around the chosen reference body part (e.g., Body2) +reference_body_part = chosen_body_parts[1] # Body2 +centered_body_parts = chosen_body_parts - reference_body_part + +# Vector from Body2 to Body1 (for alignment) +line_body1_body2 = centered_body_parts[0] - centered_body_parts[1] + +# Calculate the angle to rotate the vector so it points north (along positive y-axis) +north_vector = np.array([0, 1]) # North is along the positive y-axis +angle_to_north = np.arctan2(line_body1_body2[1], line_body1_body2[0]) - np.arctan2(north_vector[1], north_vector[0]) + +# Rotate all body parts around the reference body part (Body2) +rotated_body_parts = rotate_points(body_parts, -angle_to_north, origin=reference_body_part) + +# Output the rotated coordinates relative to the egocentric view +print("Rotated Body Parts:") +for i, part in enumerate(rotated_body_parts): + print(f"Body{i+1}: {part}") diff --git a/simba/sandbox/egocentric_alignment_cuda.py b/simba/sandbox/egocentric_alignment_cuda.py new file mode 100644 index 000000000..e69de29bb diff --git a/simba/sandbox/egocentric_video_dali.py b/simba/sandbox/egocentric_video_dali.py new file mode 100644 index 000000000..d5821fe23 --- /dev/null +++ b/simba/sandbox/egocentric_video_dali.py @@ -0,0 +1,70 @@ +import cv2 +import numba +import numpy as np + + + +@numba.jit(nopython=True, cache=True) +def _bilinear_interpolate(image: np.ndarray, x: int, y: int): + """ + Perform bilinear interpolation on an image at fractional coordinates (x, y). Assumes coordinates (x, y) are within the bounds of the image. + """ + + x0, y0 = int(np.floor(x)), int(np.floor(y)) + dx, dy = x - x0, y - y0 + if x0 < 0 or x0 + 1 >= image.shape[1] or y0 < 0 or y0 + 1 >= image.shape[0]: + return 0 + + I00, I01 = image[y0, x0], image[y0, x0+1] + I10, I11 = image[y0+1, x0], image[y0+1, x0+1] + interpolated_value = (I00 * (1 - dx) * (1 - dy) + I01 * dx * (1 - dy) + I10 * (1 - dx) * dy + I11 * dx * dy) + return interpolated_value + +@numba.jit(nopython=True) +def warp_affine_numpy(frames: np.ndarray, rotation_matrices: np.ndarray): + N, H, W, C = frames.shape + warped_frames = np.zeros_like(frames) + for i in range(N): + frame = frames[i] + rotation_matrix = rotation_matrices[i] + affine_matrix = rotation_matrix[:2, :2] + translation = np.ascontiguousarray(rotation_matrix[:2, 2]) + inverse_affine_matrix = np.ascontiguousarray(np.linalg.inv(affine_matrix)) + inverse_translation = -np.dot(inverse_affine_matrix, translation) + for r in range(H): + for c in range(W): + src_x = inverse_affine_matrix[0, 0] * c + inverse_affine_matrix[0, 1] * r + inverse_translation[0] + src_y = inverse_affine_matrix[1, 0] * c + inverse_affine_matrix[1, 1] * r + inverse_translation[1] + for ch in range(C): + warped_frames[i, r, c, ch] = _bilinear_interpolate(frame[:, :, ch], src_x, src_y) + return warped_frames + + +from simba.utils.read_write import read_df, read_img_batch_from_video_gpu +from simba.utils.data import egocentrically_align_pose +from simba.sandbox.warp_numba import center_rotation_warpaffine_vectors, align_target_warpaffine_vectors +import cv2 + +DATA_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/data/501_MA142_Gi_Saline_0513.csv" +VIDEO_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513.mp4" +SAVE_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513_rotated.mp4" +ANCHOR_LOC = np.array([300, 300]) + +df = read_df(file_path=DATA_PATH, file_type='csv') +bp_cols = [x for x in df.columns if not x.endswith('_p')] +data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int64) +data, centers, rotation_matrices = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=180) +imgs = read_img_batch_from_video_gpu(video_path=VIDEO_PATH, start_frm=0, end_frm=100) +imgs = np.stack(list(imgs.values()), axis=0) + +rot_matrices_center = center_rotation_warpaffine_vectors(rotation_vectors=rotation_matrices, centers=centers) +rot_matrices_align = align_target_warpaffine_vectors(centers=centers, target=ANCHOR_LOC) + +imgs_centered = warp_affine_numpy(frames=imgs, rotation_matrices=rot_matrices_center) +imgs_out = warp_affine_numpy(frames=imgs_centered, rotation_matrices=rot_matrices_align) + +for i in range(imgs_out.shape[0]): + + + cv2.imshow('sadasdas', imgs_out[i]) + cv2.waitKey(60) diff --git a/simba/sandbox/egocentric_video_rotation.py b/simba/sandbox/egocentric_video_rotation.py new file mode 100644 index 000000000..0f8696aa3 --- /dev/null +++ b/simba/sandbox/egocentric_video_rotation.py @@ -0,0 +1,161 @@ +import os +from typing import Union, Tuple, Optional +import numpy as np +import functools +import multiprocessing +import cv2 + +from simba.utils.checks import check_if_valid_rgb_tuple, check_valid_boolean, check_int, check_file_exist_and_readable, check_if_dir_exists, check_valid_array +from simba.utils.enums import Formats +from simba.utils.read_write import get_video_meta_data, get_fn_ext, find_core_cnt, remove_a_folder, read_frm_of_video, concatenate_videos_in_folder, read_df +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.data import egocentrically_align_pose + +def egocentric_video_aligner(frm_range: np.ndarray, + video_path: Union[str, os.PathLike], + temp_dir: Union[str, os.PathLike], + video_name: str, + centers: np.ndarray, + rotation_vectors: np.ndarray, + target: Tuple[int, int], + fill_clr: Tuple[int, int, int] = (255, 255, 255), + verbose: bool = False): + + + video_meta = get_video_meta_data(video_path=video_path) + cap = cv2.VideoCapture(video_path) + batch, frm_range = frm_range[0], frm_range[1] + save_path = os.path.join(temp_dir, f'{batch}.mp4') + fourcc = cv2.VideoWriter_fourcc(*f'{Formats.MP4_CODEC.value}') + writer = cv2.VideoWriter(save_path, fourcc, video_meta['fps'], (video_meta['width'], video_meta['height'])) + + for frm_cnt, frm_id in enumerate(frm_range): + img = read_frm_of_video(video_path=cap, frame_index=frm_id) + R, center = rotation_vectors[frm_id], centers[frm_id] + M_rotate = np.hstack([R, np.array([[-center[0] * R[0, 0] - center[1] * R[0, 1] + center[0]], [-center[0] * R[1, 0] - center[1] * R[1, 1] + center[1]]])]) + rotated_frame = cv2.warpAffine(img, M_rotate, (video_meta['width'], video_meta['height']), borderValue=fill_clr) + translation_x = target[0] - center[0] + translation_y = target[1] - center[1] + M_translate = np.float32([[1, 0, translation_x], [0, 1, translation_y]]) + final_frame = cv2.warpAffine(rotated_frame, M_translate, (video_meta['width'], video_meta['height']), borderValue=fill_clr) + writer.write(final_frame) + if verbose: + print(f'Creating frame {frm_id} ({video_name}, CPU core: {batch+1}).') + + writer.release() + return batch+1 + + pass + +class EgocentricVideoRotator(): + + """ + + + + :param Union[str, os.PathLike] video_path: Path to a video file. + :param np.ndarray centers: A 2D array of shape `(num_frames, 2)` containing the original locations of `anchor_1_idx` in each frame before alignment. Returned by `` + :param np.ndarray rotation_vectors: + :param bool verbose: + :param Tuple[int, int, int] fill_clr: + :param int core_cnt: + :param Optional[Union[str, os.PathLike]] save_path: + + :example: + >>> DATA_PATH = "C:\501_MA142_Gi_Saline_0513.csv" + >>> VIDEO_PATH = "C:\501_MA142_Gi_Saline_0513.mp4" + >>> SAVE_PATH = "C:\501_MA142_Gi_Saline_0513_rotated.mp4" + >>> ANCHOR_LOC = np.array([250, 250]) + + >>> df = read_df(file_path=DATA_PATH, file_type='csv') + >>> bp_cols = [x for x in df.columns if not x.endswith('_p')] + >>> data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32) + >>> _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0) + >>> rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=ANCHOR_LOC, save_path=SAVE_PATH) + >>> rotater.run() + """ + + + + def __init__(self, + video_path: Union[str, os.PathLike], + centers: np.ndarray, + rotation_vectors: np.ndarray, + anchor_location: np.ndarray, + verbose: bool = True, + fill_clr: Tuple[int, int, int] = (0, 0, 0), + core_cnt: int = -1, + save_path: Optional[Union[str, os.PathLike]] = None): + + check_file_exist_and_readable(file_path=video_path) + self.video_meta_data = get_video_meta_data(video_path=video_path) + check_valid_array(data=centers, source=f'{self.__class__.__name__} centers', accepted_ndims=(2,), accepted_axis_1_shape=[2,], accepted_axis_0_shape=[self.video_meta_data['frame_count']], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=rotation_vectors, source=f'{self.__class__.__name__} rotation_vectors', accepted_ndims=(3,), accepted_axis_0_shape=[self.video_meta_data['frame_count']], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=anchor_location, source=f'{self.__class__.__name__} anchor_location', accepted_ndims=(1,), accepted_axis_0_shape=[2], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose') + check_if_valid_rgb_tuple(data=fill_clr) + check_int(name=f'{self.__class__.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0]) + if core_cnt > find_core_cnt()[0] or core_cnt == -1: self.core_cnt = find_core_cnt()[0] + else: self.core_cnt = core_cnt + video_dir, self.video_name, _ = get_fn_ext(filepath=video_path) + if save_path is not None: + self.save_dir = os.path.dirname(save_path) + check_if_dir_exists(in_dir=self.save_dir, source=f'{self.__class__.__name__} save_path') + else: + save_path = os.path.join(video_dir, f'{self.video_name}_rotated.mp4') + self.video_path, self.save_path = video_path, save_path + self.centers, self.rotation_vectors = centers, rotation_vectors + self.verbose, self.fill_clr, self.anchor_loc = verbose, fill_clr, anchor_location + + def run(self): + video_timer = SimbaTimer(start=True) + temp_dir = os.path.join(self.save_dir, 'temp') + if not os.path.isdir(temp_dir): + os.makedirs(temp_dir) + else: + remove_a_folder(folder_dir=temp_dir) + #os.makedirs(temp_dir) + frm_list = np.arange(0, self.video_meta_data['frame_count']) + frm_list = np.array_split(frm_list, self.core_cnt) + frm_list = [(cnt, x) for cnt, x in enumerate(frm_list)] + print(f"Creating rotated video {self.video_name}, multiprocessing (chunksize: {1}, cores: {self.core_cnt})...") + with multiprocessing.Pool(self.core_cnt, maxtasksperchild=100) as pool: + constants = functools.partial(egocentric_video_aligner, + temp_dir=temp_dir, + video_name=self.video_name, + video_path=self.video_path, + centers=self.centers, + rotation_vectors=self.rotation_vectors, + target=self.anchor_loc, + verbose=self.verbose, + fill_clr=self.fill_clr) + for cnt, result in enumerate(pool.imap(constants, frm_list, chunksize=1)): + print(f"Rotate batch {result}/{self.core_cnt} complete...") + pool.terminate() + pool.join() + + concatenate_videos_in_folder(in_folder=temp_dir, save_path=self.save_path, remove_splits=True, gpu=False) + video_timer.stop_timer() + stdout_success(msg=f"Egocentric rotation video {self.save_path} complete", elapsed_time=video_timer.elapsed_time_str, source=self.__class__.__name__) + + + + +# if __name__ == "__main__": +# DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv" +# VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4" +# SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4" +# ANCHOR_LOC = np.array([250, 250]) +# +# df = read_df(file_path=DATA_PATH, file_type='csv') +# bp_cols = [x for x in df.columns if not x.endswith('_p')] +# data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32) +# +# _, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0) +# rotater = EgocentricVideoRotator(video_path=VIDEO_PATH, centers=centers, rotation_vectors=rotation_vectors, anchor_location=ANCHOR_LOC, save_path=SAVE_PATH) +# rotater.run() + +# +# + + diff --git a/simba/sandbox/egocentric_video_rotator_cuda.py b/simba/sandbox/egocentric_video_rotator_cuda.py new file mode 100644 index 000000000..15451df26 --- /dev/null +++ b/simba/sandbox/egocentric_video_rotator_cuda.py @@ -0,0 +1,289 @@ +import math +import os +import time +from typing import Optional, Union + +import cv2 +import numpy as np + + +from simba.utils.read_write import read_df, read_img_batch_from_video_gpu, get_video_meta_data, get_fn_ext +from simba.utils.data import egocentrically_align_pose, align_target_warpaffine_vectors, center_rotation_warpaffine_vectors +from simba.utils.checks import check_valid_array, check_int, check_if_dir_exists +from simba.utils.enums import Formats +from simba.utils.errors import FrameRangeError +from numba import cuda, njit, prange, jit + +THREADS_PER_BLOCK = 256 + +#@njit("(int32[:, :, :], int64, int64, int64, int64[:])") +@njit("(float64[:, :, :]),") +def _get_inverse_affine_matrices_inverse_translation(matrices: np.ndarray) -> np.ndarray: + """ Helper for egodentric rotation of videos. This function is used by """ + matrices = np.ascontiguousarray(matrices) + inverse_affine_matrix = np.ascontiguousarray(np.full((matrices.shape[0], 2, 2), fill_value=np.nan, dtype=np.float64)) + inverse_translation_matrix = np.full((matrices.shape[0], 2), fill_value=np.nan, dtype=np.float64) + for i in prange(matrices.shape[0]): + inverse_affine_matrix[i] = np.linalg.inv(matrices[i][:2, :2]) + inverse_translation_matrix[i] = -np.dot(inverse_affine_matrix[i], matrices[i][:2, 2]) + return inverse_affine_matrix, inverse_translation_matrix + + +@jit() +def get_target_translations(targets): + rotation_matrix = np.eye(3) + results = np.full((targets.shape[0], 3, 3), fill_value=np.nan, dtype=np.float64) + for i in range(targets.shape[0]): + transform_matrix = np.dot(rotation_matrix, targets[i]) + results[i] = np.linalg.inv(transform_matrix) + return results + + +@cuda.jit() +def _egocentric_rotator_kernel_1(imgs, centers, target, rotation_matrices, results, video_width, video_height): + frm_idx = cuda.grid(1) # Get thread index + + # Ensure thread index is within bounds + if frm_idx >= imgs.shape[0]: + return + + img = imgs[frm_idx] # The image for this frame + center = centers[frm_idx] # The center for this frame + rotation_matrix = rotation_matrices[frm_idx] # The rotation matrix for this frame + + # Allocate transformation matrices in local memory + T_origin = cuda.local.array((3, 3), dtype=np.float32) + R = cuda.local.array((3, 3), dtype=np.float32) + T_target = cuda.local.array((3, 3), dtype=np.float32) + T_final = cuda.local.array((3, 3), dtype=np.float32) + + # Step 1: Translate to origin (center -> (0,0)) + T_origin[0, 0] = 1 + T_origin[0, 1] = 0 + T_origin[0, 2] = -center[0] + T_origin[1, 0] = 0 + T_origin[1, 1] = 1 + T_origin[1, 2] = -center[1] + T_origin[2, 0] = 0 + T_origin[2, 1] = 0 + T_origin[2, 2] = 1 + + # Step 2: Apply rotation matrix + R[0, 0] = rotation_matrix[0, 0] + R[0, 1] = rotation_matrix[0, 1] + R[1, 0] = rotation_matrix[1, 0] + R[1, 1] = rotation_matrix[1, 1] + R[2, 2] = 1 # Ensure homogeneous coordinate is included + + # Step 3: Translate back to target position + T_target[0, 0] = 1 + T_target[0, 1] = 0 + T_target[0, 2] = target[0] + T_target[1, 0] = 0 + T_target[1, 1] = 1 + T_target[1, 2] = target[1] + T_target[2, 0] = 0 + T_target[2, 1] = 0 + T_target[2, 2] = 1 + + # Combine the transformations: T_final = T_target * R * T_origin + for i in range(3): + for j in range(3): + T_final[i, j] = 0 + for k in range(3): + T_final[i, j] += T_target[i, k] * R[k, j] + + for i in range(3): + for j in range(3): + T_final[i, j] += T_final[i, j] * T_origin[i, j] + + # Apply the final transformation to every pixel in the image + video_height = video_height[0] + video_width = video_width[0] + for r in range(video_height): + for c in range(video_width): + # Transform coordinates + coords_x = c * T_final[0, 0] + r * T_final[0, 1] + T_final[0, 2] + coords_y = c * T_final[1, 0] + r * T_final[1, 1] + T_final[1, 2] + print(coords_x, coords_y) + + # Check if transformed coordinates are within bounds + if 0 <= coords_x < video_width and 0 <= coords_y < video_height: + for ch in range(3): # Assuming RGB images + results[frm_idx, r, c, ch] = img[int(coords_y), int(coords_x), ch] + + +@cuda.jit() +def _egocentric_rotator_kernel(imgs, + inverse_affines_center, + inverse_translations_center, + inverse_affines_target, + inverse_translations_target, + target_rotations, + results): + frm_idx = cuda.grid(1) + if frm_idx >= imgs.shape[0]: + return + else: + img, inverse_affine_center, inverse_translation_center = imgs[frm_idx], inverse_affines_center[frm_idx], inverse_translations_center[frm_idx] + inverse_affine_target, inverse_translation_target = inverse_affines_target[frm_idx], inverse_translations_target[frm_idx] + target_rotation = target_rotations[frm_idx] + H, W, C = img.shape + for r in range(H): + for c in range(W): + src_x = inverse_affine_center[0, 0] * c + inverse_affine_center[0, 1] * r + inverse_translation_center[0] + src_y = inverse_affine_center[1, 0] * c + inverse_affine_center[1, 1] * r + inverse_translation_center[1] + x0 = int(math.floor(src_x)) + y0 = int(math.floor(src_y)) + dx, dy = int(src_x - x0), int(src_y - y0) + for ch in range(C): + if x0 < 0 or x0 + 1 >= img.shape[1] or y0 < 0 or y0 + 1 >= img.shape[0]: + results[frm_idx, r, c, ch] = 0 + else: + I00, I01 = img[y0, x0][0], img[y0, x0 + 1][0] + I10, I11 = img[y0 + 1, x0][0], img[y0 + 1, x0 + 1][0] + val = I00 * (1 - dx) * (1 - dy) + I01 * dx * (1 - dy) + I10 * (1 - dx) * dy + I11 * dx * dy + results[frm_idx, r, c, ch] = val + + cuda.syncthreads() + + #img = results[frm_idx] + for r in range(H): + for c in range(W): + src_x = int(c - target_rotations[frm_idx, 0, 2]) + src_y = int(r - target_rotations[frm_idx, 1, 2]) + if 0 <= src_x < W and 0 <= src_y < H: + for ch in range(C): + results[frm_idx, r, c, ch] = results[frm_idx, src_y, src_x, ch] + else: + print(src_x, src_y) + for ch in range(C): + results[frm_idx, r, c, ch] = 255 + # + # + # src_x = inverse_affine_target[0, 0] * c + inverse_affine_target[0, 1] * r + inverse_translation_target[0] + # src_y = inverse_affine_target[1, 0] * c + inverse_affine_target[1, 1] * r + inverse_translation_target[1] + # x0 = int(math.floor(src_x)) + # y0 = int(math.floor(src_y)) + # dx, dy = int(src_x - x0), int(src_y - y0) + # for ch in range(C): + # if x0 < 0 or x0 + 1 >= img.shape[1] or y0 < 0 or y0 + 1 >= img.shape[0]: + # results[frm_idx, r, c, ch] = 0 + # else: + # I00, I01 = img[y0, x0][0], img[y0, x0 + 1][0] + # I10, I11 = img[y0 + 1, x0][0], img[y0 + 1, x0 + 1][0] + # results[frm_idx, r, c, ch] = I00 * (1 - dx) * (1 - dy) + I01 * dx * (1 - dy) + I10 * (1 - dx) * dy + I11 * dx * dy + # + +def egocentric_video_rotator_cuda(video_path: np.ndarray, + rotation_matrix: np.ndarray, + center_matrix: np.ndarray, + anchor_loc: np.ndarray, + batch_size: Optional[int] = 100, + save_path: Optional[Union[str, os.PathLike]] = None): + + video_meta_data = get_video_meta_data(video_path=video_path) + if video_meta_data['frame_count'] != rotation_matrix.shape[0]: + raise FrameRangeError(msg=f'The video {video_path} contains {video_meta_data["frame_count"]} frames while the the rotation_matrix has data for {rotation_matrix.shape[0]} frames', source=egocentric_video_rotator_cuda.__name__) + if video_meta_data['frame_count'] != center_matrix.shape[0]: + raise FrameRangeError(msg=f'The video {video_path} contains {video_meta_data["frame_count"]} frames while the the center_matrix has data for {center_matrix.shape[0]} frames', source=egocentric_video_rotator_cuda.__name__) + if rotation_matrix.shape[0] != center_matrix.shape[0]: + raise FrameRangeError(msg=f'The center_matrix has data for {center_matrix.shape[0]} frames while the rotation_matrix has data for {rotation_matrix.shape[0]} frames', source=egocentric_video_rotator_cuda.__name__) + check_int(name=f'{egocentric_video_rotator_cuda.__name__} batch_size', value=batch_size, min_value=1) + check_valid_array(data=anchor_loc, source=f'{egocentric_video_rotator_cuda.__name__} anchor_loc', accepted_axis_0_shape=[2,], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + video_dir, video_name, video_ext = get_fn_ext(filepath=video_path) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path), source=egocentric_video_rotator_cuda.__name__) + else: + save_path = os.path.join(video_dir, f'{video_name}_rotated.mp4') + fourcc = cv2.VideoWriter_fourcc(*f'{Formats.MP4_CODEC.value}') + writer = cv2.VideoWriter(save_path, fourcc, video_meta_data['fps'], (video_meta_data['width'], video_meta_data['height'])) + for batch_cnt, l in enumerate(range(0, video_meta_data['frame_count'], batch_size)): + r = min(l + batch_size -1, video_meta_data['frame_count']) + print(f'Reading frames {l}-{r} (video: {video_name}, frames: {video_meta_data["frame_count"]})...') + batch_imgs = read_img_batch_from_video_gpu(video_path=VIDEO_PATH, start_frm=l, end_frm=r) + batch_imgs = np.ascontiguousarray(np.stack(list(batch_imgs.values()), axis=0)).astype(np.uint8) + batch_rot = np.ascontiguousarray(rotation_matrix[l:r+1]).astype(np.float64) + batch_results = np.full_like(batch_imgs, fill_value=0, dtype=np.uint8) + batch_centers = np.ascontiguousarray(center_matrix[l:r+1]) + batch_center_rotations = center_rotation_warpaffine_vectors(rotation_vectors=batch_rot, centers=batch_centers) + batch_target_rotations = align_target_warpaffine_vectors(centers=batch_centers, target=anchor_loc) + inverse_affine_center, inverse_translation_center = _get_inverse_affine_matrices_inverse_translation(matrices=batch_center_rotations) + inverse_affine_target, inverse_translation_target = _get_inverse_affine_matrices_inverse_translation(matrices=batch_target_rotations) + batch_imgs_dev = cuda.to_device(batch_imgs) + batch_results_dev = cuda.to_device(batch_results) + inverse_affine_center_dev = cuda.to_device(inverse_affine_center) + inverse_translation_center_dev = cuda.to_device(inverse_translation_center) + inverse_affine_target_dev = cuda.to_device(inverse_affine_target) + inverse_translation_target_dev = cuda.to_device(inverse_translation_target) + batch_target_rotations_dev = cuda.to_device(batch_target_rotations) + bpg = (batch_imgs_dev.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + + batch_centers_dev = cuda.to_device(batch_centers) + target_dev = cuda.to_device(anchor_loc) + batch_rot_dev = cuda.to_device(batch_rot) + w_dev = cuda.to_device(np.array([video_meta_data['width']])) + h_dev = cuda.to_device(np.array([video_meta_data['height']])) + + _egocentric_rotator_kernel_1[bpg, THREADS_PER_BLOCK](batch_imgs_dev, + batch_centers_dev, + target_dev, + batch_rot_dev, + batch_results_dev, + w_dev, + h_dev) + # _egocentric_rotator_kernel[bpg, THREADS_PER_BLOCK](batch_imgs_dev, + # inverse_affine_center_dev, + # inverse_translation_center_dev, + # inverse_affine_target_dev, + # inverse_translation_target_dev, + # batch_target_rotations_dev, + # batch_results_dev) + out_results = batch_results_dev.copy_to_host() + errors, correct = [], [] + for frm in range(out_results.shape[0]): + f = out_results[frm] + # if len(np.unique(f)) == 1: + # errors.append(batch_target_rotations[frm]) + # else: + # correct.append(batch_target_rotations[frm]) + print(f.shape) + writer.write(cv2.cvtColor(out_results[frm], cv2.COLOR_BGR2RGB)) + # writer.write(f) + print(errors[0:5], '\n', correct[0:5]) + if batch_cnt > 1: + break + #writer.write(cv2.cvtColor(batch_results[frm], cv2.COLOR_BGR2RGB)) + writer.release() + + + #results[l:r] = + # + # return results + # # + +DATA_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/data/501_MA142_Gi_Saline_0513.csv" +VIDEO_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513.mp4" +SAVE_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513_rotated.mp4" +ANCHOR_LOC = np.array([250, 250]) + +df = read_df(file_path=DATA_PATH, file_type='csv') +bp_cols = [x for x in df.columns if not x.endswith('_p')] +data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32).astype(np.int32) +rotated_pose, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=5, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0) + +# imgs = read_img_batch_from_video_gpu(video_path=VIDEO_PATH, start_frm=0, end_frm=500) +# imgs = np.stack(list(imgs.values()), axis=0) +start = time.perf_counter() +results = egocentric_video_rotator_cuda(video_path=VIDEO_PATH, + rotation_matrix=rotation_vectors, + anchor_loc=ANCHOR_LOC, + center_matrix=centers, + batch_size=1000) +print(time.perf_counter() - start) +# # +# for i in range(results.shape[0]): +# print(i) +# cv2.imshow('asdasd', results[i]) +# cv2.waitKey(60) + diff --git a/simba/sandbox/elliptic_envelope.py b/simba/sandbox/elliptic_envelope.py new file mode 100644 index 000000000..2b01949c5 --- /dev/null +++ b/simba/sandbox/elliptic_envelope.py @@ -0,0 +1,59 @@ +from sklearn.covariance import EllipticEnvelope +from typing import Optional +from simba.utils.read_write import read_pickle +import numpy as np +from simba.mixins.plotting_mixin import PlottingMixin + +from simba.utils.checks import check_valid_array, check_float + + +def elliptic_envelope(data: np.ndarray, + contamination: Optional[float] = 1e-1, + normalize: Optional[bool] = True) -> np.ndarray: + """ + Compute the Mahalanobis distances of each observation in the input array using Elliptic Envelope method. + + .. image:: _static/img/elliptic_envelope.png + :width: 800 + :align: center + + :param data: Input data array of shape (n_samples, n_features). + :param Optional[float] contamination: The proportion of outliers to be assumed in the data. Defaults to 0.1. + :param Optional[bool] normalize: Whether to normalize the Mahalanobis distances between 0 and 1. Defaults to True. + :return np.ndarray: The Mahalanobis distances of each observation in array. Larger values indicate outliers. + + :example: + >>> data_path = '/Users/simon/Desktop/envs/NG_Unsupervised/project_folder/clusters/beautiful_beaver.pickle' + >>> x = read_pickle(data_path=data_path)['DR_MODEL']['MODEL'].embedding_ + >>> y = elliptic_envelope(data=x, contamination=0.1) + >>> data = np.hstack((x, y.reshape(-1, 1))) + >>> img = PlottingMixin.continuous_scatter(data=data, columns=('X', 'Y', 'Mahalanobis distances'), size=20, palette='jet') + """ + + check_valid_array(data=data, accepted_ndims=(2,), accepted_dtypes=(np.float64, np.float32, np.int32, np.int64, float, int)) + check_float(name=f'{elliptic_envelope} contamination', value=contamination, min_value=0.0, max_value=1.0) + mdl = EllipticEnvelope(contamination=contamination).fit(data) + y = -mdl.score_samples(data) + if normalize: + y = (y - np.min(y)) / (np.max(y) - np.min(y)) + return y + + + + +# +# #img = PlottingMixin.categorical_scatter(data=data, columns=('X', 'Y', 'LOF'), size=20, palette='Dark2') +# +# data_path = '/Users/simon/Desktop/envs/NG_Unsupervised/project_folder/clusters/beautiful_beaver.pickle' +# #data_path = '/Users/simon/Desktop/envs/NG_Unsupervised/project_folder/small_clusters/adoring_hoover.pickle' +# x = read_pickle(data_path=data_path)['DR_MODEL']['MODEL'].embedding_ +# y = elliptic_envelope(data=x, contamination=0.1) +# data = np.hstack((x, y.reshape(-1, 1))) +# img = PlottingMixin.continuous_scatter(data=data, columns=('X', 'Y', 'Mahalanobis distances'), size=20, palette='jet') +# +# + + + + + diff --git a/simba/sandbox/entropy_of_directional_changes.py b/simba/sandbox/entropy_of_directional_changes.py new file mode 100644 index 000000000..4773b881d --- /dev/null +++ b/simba/sandbox/entropy_of_directional_changes.py @@ -0,0 +1,95 @@ +import numpy as np +import pandas as pd +from simba.utils.checks import check_valid_array, check_float, check_int +from simba.utils.enums import Formats + + +def entropy_of_directional_changes(x: np.ndarray, bins: int = 16) -> float: + """ + Computes the Entropy of Directional Changes (EDC) of a path represented by an array of points. + + The output value ranges from 0 to log2(bins). + + The Entropy of Directional Changes quantifies the unpredictability or randomness of the directional + changes in a given path. Higher entropy indicates more variation in the directions of the movement, + while lower entropy suggests more linear or predictable movement. + + The function works by calculating the change in direction between consecutive points, discretizing + those changes into bins, and then computing the Shannon entropy based on the probability distribution + of the directional changes. + + :param np.ndarray x: A 2D array of shape (N, 2) representing the path, where N is the number of points and each point has two spatial coordinates (e.g., x and y for 2D space). The path should be in the form of an array of consecutive (x, y) points. + :param int bins: The number of bins to discretize the directional changes. Default is 16 bins for angles between 0 and 360 degrees. A larger number of bins will increase the precision of direction change measurement. + :return: The entropy of the directional changes in the path. A higher value indicates more unpredictable or random direction changes, while a lower value indicates more predictable or linear movement. + :rtype: float + + :example: + >>> x = np.random.randint(0, 500, (100, 2)) + >>> TimeseriesFeatureMixin.entropy_of_directional_changes(x, 3) + """ + + check_int(name=f'{entropy_of_directional_changes.__name__} bins', value=bins) + check_valid_array(data=x, source=f'{entropy_of_directional_changes.__name__} x', + accepted_ndims=(2,), accepted_axis_1_shape=[2, ], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + direction_vectors = np.diff(x, axis=0) + angles = np.arctan2(direction_vectors[:, 1], direction_vectors[:, 0]) * (180 / np.pi) + angles = (angles + 360) % 360 + angle_bins = np.linspace(0, 360, bins + 1) + digitized_angles = np.digitize(angles, angle_bins) - 1 + hist, _ = np.histogram(digitized_angles, bins=bins, range=(0, bins)) + hist = hist / hist.sum() + return np.max((0.0, -np.sum(hist * np.log2(hist + 1e-10)))) + + +def sliding_entropy_of_directional_changes(x: np.ndarray, + bins: int, + window_size: float, + sample_rate: float) -> np.ndarray: + """ + Computes a sliding window Entropy of Directional Changes (EDC) over a path represented by an array of points. + + This function calculates the entropy of directional changes within a specified window, sliding across the entire path. + By analyzing the changes in direction over shorter segments (windows) of the path, it provides a dynamic view of + movement unpredictability or randomness along the path. Higher entropy within a window indicates more varied directional + changes, while lower entropy suggests more consistent directional movement within that segment. + + :param np.ndarray x: A 2D array of shape (N, 2) representing the path, where N is the number of points and each point has two spatial coordinates (e.g., x and y for 2D space). The path should be in the form of an array of consecutive (x, y) points. + :param int bins: The number of bins to discretize the directional changes. Default is 16 bins for angles between 0 and 360 degrees. A larger number of bins will increase the precision of direction change measurement. + :param float window_size: The duration of the sliding window, in seconds, over which to compute the entropy. + :param float sample_rate: The sampling rate (in frames per second) of the path data. This parameter converts `window_size` from seconds into frames, defining the number of consecutive points in each sliding window. + :return: A 1D numpy array of length N, where each element contains the entropy of directional changes for each frame, computed over the specified sliding window. Frames before the first full window contain NaN values. + :rtype: np.ndarray + + :example: + >>> x = np.random.randint(0, 100, (400, 2)) + >>> results = sliding_entropy_of_directional_changes(x=x, bins=16, window_size=5.0, sample_rate=30) + >>> x = pd.read_csv(r"C:\troubleshooting\two_black_animals_14bp\project_folder\csv\input_csv\Together_1.csv")[['Ear_left_1_x', 'Ear_left_1_y']].values + >>> results = sliding_entropy_of_directional_changes(x=x, bins=16, window_size=5.0, sample_rate=30) + """ + + direction_vectors = np.diff(x, axis=0) + angles = np.arctan2(direction_vectors[:, 1], direction_vectors[:, 0]) * (180 / np.pi) + angles = (angles + 360) % 360 + angle_bins = np.linspace(0, 360, bins + 1) + frame_step = int(max(1.0, window_size * sample_rate)) + results = np.full(shape=(x.shape[0]), fill_value=np.nan, dtype=np.float64) + for r in range(frame_step, direction_vectors.shape[0]+1): + l = r - frame_step + sample_angles = angles[l:r] + digitized_angles = np.digitize(sample_angles, angle_bins) - 1 + hist, _ = np.histogram(digitized_angles, bins=bins, range=(0, bins)) + hist = hist / hist.sum() + results[r] = np.max((0.0, -np.sum(hist * np.log2(hist + 1e-10)))) + + return results + +x = pd.read_csv(r"C:\troubleshooting\two_black_animals_14bp\project_folder\csv\input_csv\Together_1.csv")[['Ear_left_1_x', 'Ear_left_1_y']].values +# x = np.random.randint(0, 100, (400, 2)) +bins = 16 +window_size = 5 +sample_rate = 30 +results = sliding_entropy_of_directional_changes(x=x, bins=bins, window_size=window_size, sample_rate=2) +#entropy_of_directional_changes(x=x) + + + diff --git a/simba/sandbox/eta_squared.py b/simba/sandbox/eta_squared.py new file mode 100644 index 000000000..eeff7b2d7 --- /dev/null +++ b/simba/sandbox/eta_squared.py @@ -0,0 +1,48 @@ +import time + +import numpy as np +import pandas as pd +from numba import jit + +@jit(nopython=True) +def sliding_eta_squared(x: np.ndarray, y: np.ndarray, window_sizes: np.ndarray, sample_rate: int) -> np.ndarray: + """ + Calculate sliding window eta-squared, a measure of effect size for between-subjects designs, + over multiple window sizes. + + :param np.ndarray x: The array containing the dependent variable data. + :param np.ndarray y: The array containing the grouping variable (categorical) data. + :param np.ndarray window_sizes: 1D array of window sizes in seconds. + :param int sample_rate: The sampling rate of the data in frames per second. + :return np.ndarray: + + :example: + >>> x = np.random.randint(0, 10, (10000,)) + >>> y = np.random.randint(0, 2, (10000,)) + >>> p = sliding_eta_squared(x=x, y=y, window_sizes=np.array([1.0, 2.0]), sample_rate=10) + + """ + results = np.full((x.shape[0], window_sizes.shape[0]), -1.0) + for i in range(window_sizes.shape[0]): + window_size = int(window_sizes[i] * sample_rate) + for l, r in zip(range(0, x.shape[0] + 1), range(window_size, x.shape[0] + 1)): + sample_x = x[l:r] + sample_y = y[l:r] + sum_square_within, sum_square_between = 0, 0 + for lbl in np.unique(sample_y): + g = sample_x[np.argwhere(sample_y == lbl).flatten()] + sum_square_within += np.sum((g - np.mean(g)) ** 2) + sum_square_between += len(g) * (np.mean(g) - np.mean(sample_x)) ** 2 + if sum_square_between + sum_square_within == 0: + results[r - 1, i] = 0.0 + else: + results[r - 1, i] = (sum_square_between / (sum_square_between + sum_square_within)) ** .5 + return results + + +x = np.random.randint(0, 10, (10000,)) +y = np.random.randint(0, 2, (10000,)) +p = sliding_eta_squared(x=x, y=y, window_sizes=np.array([1.0, 2.0]), sample_rate=10) + + +#print(p, o) \ No newline at end of file diff --git a/simba/sandbox/euclidan_distance_cuda.py b/simba/sandbox/euclidan_distance_cuda.py new file mode 100644 index 000000000..dae3f2880 --- /dev/null +++ b/simba/sandbox/euclidan_distance_cuda.py @@ -0,0 +1,52 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import math + +import numpy as np +from numba import cuda + +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.utils.read_write import read_df + +THREADS_PER_BLOCK = 128 + +@cuda.jit +def _euclidean_distance_kernel(x_dev, y_dev, results): + i = cuda.grid(1) + if i < x_dev.shape[0]: + p = (math.sqrt((x_dev[i][0] - y_dev[i][0]) ** 2 + (x_dev[i][1] - y_dev[i][1]) ** 2)) + results[i] = p + +def get_euclidean_distance_cuda(x: np.ndarray, y: np.ndarray) -> np.ndarray: + """ + Computes the Euclidean distance between two sets of points using CUDA for GPU acceleration. + + .. image:: _static/img/get_euclidean_distance_cuda.png + :width: 500 + :align: center + + :param np.ndarray x: A 2D array of shape (n, m) representing n points in m-dimensional space. Each row corresponds to a point. + :param np.ndarray y: A 2D array of shape (n, m) representing n points in m-dimensional space. Each row corresponds to a point. + :return np.ndarray: A 1D array of shape (n,) where each element represents the Euclidean distance between the corresponding points in `x` and `y`. + + :example: + >>> video_path = r"/mnt/c/troubleshooting/mitra/project_folder/videos/501_MA142_Gi_CNO_0514.mp4" + >>> data_path = r"/mnt/c/troubleshooting/mitra/project_folder/csv/outlier_corrected_movement_location/501_MA142_Gi_CNO_0514 - test.csv" + >>> df = read_df(file_path=data_path, file_type='csv')[['Center_x', 'Center_y']] + >>> shifted_df = FeatureExtractionMixin.create_shifted_df(df=df, periods=1) + >>> x = shifted_df[['Center_x', 'Center_y']].values + >>> y = shifted_df[['Center_x_shifted', 'Center_y_shifted']].values + >>> get_euclidean_distance_cuda(x=x, y=y) + """ + + x = np.ascontiguousarray(x).astype(np.int32) + y = np.ascontiguousarray(y).astype(np.int32) + n, m = x.shape + x_dev = cuda.to_device(x) + y_dev = cuda.to_device(y) + results = cuda.device_array((n, m), dtype=np.int32) + bpg = (n + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _euclidean_distance_kernel[bpg, THREADS_PER_BLOCK](x_dev, y_dev, results) + results = results.copy_to_host().astype(np.int32) + return results diff --git a/simba/sandbox/euclidean_distances_cuda.py b/simba/sandbox/euclidean_distances_cuda.py new file mode 100644 index 000000000..b4bc909d4 --- /dev/null +++ b/simba/sandbox/euclidean_distances_cuda.py @@ -0,0 +1,38 @@ +from typing import Optional + +import cupy as cp +import numpy as np + +from simba.utils.checks import check_int, check_valid_array +from simba.utils.enums import Formats + + +def get_euclidean_distance_cupy(x: np.ndarray, + y: np.ndarray, + batch_size: Optional[int] = int(3.5e10+7)) -> np.ndarray: + """ + Computes the Euclidean distance between corresponding pairs of points in two 2D arrays + using CuPy for GPU acceleration. The computation is performed in batches to handle large + datasets efficiently. + + :param np.ndarray x: A 2D NumPy array with shape (n, 2), where each row represents a point in a 2D space. + :param np.ndarray y: A 2D NumPy array with shape (n, 2), where each row represents a point in a 2D space. The shape of `y` must match the shape of `x`. + :param Optional[int] batch_size: The number of points to process in a single batch. This parameter controls memory usage and can be adjusted based on available GPU memory. The default value is large (`3.5e10 + 7`) to maximize GPU utilization, but it can be lowered if memory issues arise. + :return: A 1D NumPy array of shape (n,) containing the Euclidean distances between corresponding points in `x` and `y`. + :return: A 1D NumPy array of shape (n,) containing the Euclidean distances between corresponding points in `x` and `y`. + :rtype: np.ndarray + + :example: + >>> x = np.array([[1, 2], [3, 4], [5, 6]]) + >>> y = np.array([[7, 8], [9, 10], [11, 12]]) + >>> distances = get_euclidean_distance_cupy(x, y) + """ + check_valid_array(data=x, source=check_valid_array.__name__, accepted_ndims=[2,], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=y, source=check_valid_array.__name__, accepted_ndims=[2, ], accepted_dtypes=Formats.NUMERIC_DTYPES.value, accepted_shapes=(x.shape,)) + check_int(name='batch_size', value=batch_size, min_value=1) + results = cp.full((x.shape[0]), fill_value=cp.nan, dtype=cp.float32) + for l in range(0, x.shape[0], batch_size): + r = l + batch_size + batch_x, batch_y = cp.array(x[l:r]), cp.array(y[l:r]) + results[l:r] = (cp.sqrt((batch_x[:, 0] - batch_y[:, 0]) ** 2 + (batch_x[:, 1] - batch_y[:, 1]) ** 2)) + return results.get() \ No newline at end of file diff --git a/simba/sandbox/ez_path_plot.py b/simba/sandbox/ez_path_plot.py new file mode 100644 index 000000000..0a51a09f3 --- /dev/null +++ b/simba/sandbox/ez_path_plot.py @@ -0,0 +1,109 @@ +__author__ = "Simon Nilsson" + +import os +from copy import deepcopy +from typing import Union, Tuple, Optional + +import cv2 +import numpy as np +import pandas as pd + +from simba.utils.errors import (DataHeaderError, DuplicationError, InvalidFileTypeError) +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import get_fn_ext, get_video_meta_data, read_config_file, read_df, get_number_of_header_columns_in_df +from simba.utils.checks import check_file_exist_and_readable, check_if_valid_rgb_tuple, check_int + +H5 = '.h5' +CSV = '.csv' + +class EzPathPlot(object): + def __init__(self, + data_path: Union[str, os.PathLike], + video_path: Union[str, os.PathLike], + body_part: str, + bg_color: Optional[Tuple[int, int, int]] = (255, 255, 255), + line_color: Optional[Tuple[int, int, int]] = (147, 20, 255), + line_thickness: Optional[int] = 10, + circle_size: Optional[int] = 5): + """ + Create a simple path plot for a single path in a single video. + + .. note:: + For more complex path plots with/without multiprocessing, see ``simba.plotting.path_plotter.PathPlotterSingleCore`` and ``simba.plotting.path_plotter_mp.PathPlotterMulticore``. + + Notebook example link -> + + + .. image:: _static/img/EzPathPlot.gif + :width: 500 + :align: center + + :param Union[str, os.PathLike] data_path: The path to the data file in H5c or CSV format containing the coordinates. + :param Union[str, os.PathLike] video_path: The path to the video file. + :param str body_part: The specific body part to plot the path for. + :param Optional[Tuple[int, int, int]] bg_color: The background color of the plot. Defaults to (255, 255, 255). + :param Optional[Tuple[int, int, int]] line_color: The color of the path line. Defaults to (147, 20, 255). + :param Optional[int] line_thickness: The thickness of the path line. Defaults to 10. + :param Optional[int] circle_size: The size of the circle indicating each data point. Defaults to 5. + + :example: + >>> path_plotter = EzPathPlot(data_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/h5/Together_1DLC_resnet50_two_black_mice_DLC_052820May27shuffle1_150000_el.h5', video_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/videos/Together_1.avi', body_part='Mouse_1_Nose', bg_color=(255, 255, 255), line_color=(147,20,255)) + >>> path_plotter.run() + + """ + check_file_exist_and_readable(file_path=data_path) + self.video_meta_data = get_video_meta_data(video_path=video_path) + check_if_valid_rgb_tuple(data=bg_color) + check_if_valid_rgb_tuple(data=line_color) + check_int(name=f'{self.__class__.__name__} line_thickness', value=line_thickness, min_value=1) + check_int(name=f'{self.__class__.__name__} circle_size', value=circle_size, min_value=1) + if line_color == bg_color: + raise DuplicationError(msg=f"The line and background cannot be identical - ({line_color})", source=self.__class__.__name__) + + dir, file_name, ext = get_fn_ext(filepath=data_path) + if ext.lower() == H5: + self.data = pd.read_hdf(data_path) + headers = [] + if len(self.data.columns[0]) == 4: + for c in self.data.columns: + headers.append("{}_{}_{}".format(c[1], c[2], c[3])) + elif len(self.data.columns[0]) == 3: + for c in self.data.columns: + headers.append("{}_{}".format(c[2], c[3])) + self.data.columns = headers + elif ext.lower() == CSV: + self.data = pd.read_csv(data_path) + else: + raise InvalidFileTypeError(msg=f"File type {ext} is not supported (OPTIONS: h5 or csv)") + if len(self.data.columns[0]) == 4: + self.data = self.data.loc[3:] + elif len(self.data.columns[0]) == 3: + self.data = self.data.loc[2:] + body_parts_available = list(set([x[:-2] for x in self.data.columns])) + if body_part not in body_parts_available: + raise DataHeaderError(msg=f"Body-part {body_part} is not present in the data file. The body-parts available are: {body_parts_available}", source=self.__class__.__name__) + bps = [f'{body_part}_x', f'{body_part}_y'] + if (bps[0] not in self.data.columns) or (bps[1] not in self.data.columns): + raise DataHeaderError(msg=f"Could not finc column {bps[0]} and/or column {bps[1]} in the data file {data_path}", source=self.__class__.__name__) + self.data = self.data[bps].fillna(method="ffill").astype(int).reset_index(drop=True).values + self.save_name = os.path.join(dir, f"{file_name}_line_plot.mp4") + self.writer = cv2.VideoWriter(self.save_name, 0x7634706D, int(self.video_meta_data["fps"]), (self.video_meta_data["width"], self.video_meta_data["height"])) + self.bg_img = np.zeros([self.video_meta_data["height"], self.video_meta_data["width"], 3]) + self.bg_img[:] = [bg_color] + self.line_color, self.line_thickness, self.circle_size = line_color, line_thickness, circle_size + self.timer = SimbaTimer(start=True) + def run(self): + for i in range(1, self.data.shape[0]): + line_data = self.data[:i+1] + img = deepcopy(self.bg_img) + for j in range(1, line_data.shape[0]): + x1, y1 = line_data[j-1][0], line_data[j-1][1] + x2, y2 = line_data[j][0], line_data[j][1] + cv2.line(img, (x1, y1), (x2, y2), self.line_color, self.line_thickness) + cv2.circle(img, (line_data[-1][0], line_data[-1][1]), self.circle_size, self.line_color, -1) + self.writer.write(img.astype(np.uint8)) + print(f"Frame {i}/{len(self.data)} complete...") + + self.writer.release() + self.timer.stop_timer() + stdout_success(msg=f"Path plot saved at {self.save_name}", elapsed_time=self.timer.elapsed_time_str) diff --git a/simba/sandbox/fecal_boli.py b/simba/sandbox/fecal_boli.py new file mode 100644 index 000000000..4800c7f66 --- /dev/null +++ b/simba/sandbox/fecal_boli.py @@ -0,0 +1,49 @@ +from simba.utils.read_write import read_frm_of_video +from simba.mixins.image_mixin import ImageMixin +from simba.mixins.geometry_mixin import GeometryMixin +import cv2 +import numpy as np + +video_path = r"C:\troubleshooting\mitra\project_folder\videos\bg_removed\503_MA109_Gi_Saline_0513.mp4" +img = read_frm_of_video(video_path=video_path, frame_index=0, greyscale=True) + +# Set up the parameters for blob detection +params = cv2.SimpleBlobDetector_Params() +params.filterByColor = True +params.blobColor = 0 # Detect bright blobs on a dark background +params.filterByArea = True +params.minArea = 2 # Minimum blob area +params.maxArea = 5000 # Maximum blob area + +# Initialize the blob detector with the parameters +detector = cv2.SimpleBlobDetector_create(params) + +# Detect blobs in the image +keypoints = detector.detect(img) +keypoints_array = np.array([[kp.pt[0], kp.pt[1], kp.size] for kp in keypoints]) + + + + +def count_fecal_boli(video_path: str): + img = read_frm_of_video(video_path=video_path, frame_index=0) + contours = ImageMixin.find_contours(img=img, mode='all', method='simple') + contour_lst = [] + for i in contours: + contour_lst.append(i.reshape(1, -1, 2)) + + geometries = GeometryMixin.contours_to_geometries(contours=contour_lst, force_rectangles=False) + print(geometries) + img = GeometryMixin.view_shapes(shapes=geometries, bg_img=img, thickness=5) + # + # + # # print(contours) + # + # + cv2.imshow('azsdasdas', img) + cv2.waitKey(0) + + + + +count_fecal_boli(video_path=r"C:\troubleshooting\mitra\project_folder\videos\503_MA109_Gi_Saline_0513.mp4") \ No newline at end of file diff --git a/simba/sandbox/ffmpeg_progress_bar.py b/simba/sandbox/ffmpeg_progress_bar.py new file mode 100644 index 000000000..d2e9c0462 --- /dev/null +++ b/simba/sandbox/ffmpeg_progress_bar.py @@ -0,0 +1,68 @@ +from typing import Union, Optional +try: + from typing import Literal +except: + from typing_extensions import Literal +import os +import subprocess + +from simba.utils.checks import check_file_exist_and_readable, check_int, check_if_dir_exists +from simba.utils.read_write import get_video_meta_data, get_fn_ext, find_all_videos_in_directory +from simba.utils.errors import InvalidInputError +from simba.utils.printing import SimbaTimer, stdout_success + + +def overlay_video_progressbar(video_path: Union[str, os.PathLike], + bar_height: Optional[int] = 10, + color: Optional[str] = 'red', + position: Optional[str] = 'top', + save_dir: Optional[ Union[str, os.PathLike]] = None) -> None: + + """ + Overlay a progress bar on a directory of videos or a single video. + + .. video:: _static/img/overlay_video_progressbar.webm + :loop: + + :param Union[str, os.PathLike] video_path: Directory containing video files or a single video file + :param Optional[int] bar_height: The height of the progressbar in percent of the video height. + :param Optional[str] color: The color of the progress bar. See simba.utils.lookups.get_color_dict keys for accepted names. + :param Optional[Union[str, os.PathLike]] save_dir: If not None, then saves the videos in the passed directory. Else, in the same directry with the ``_progress_bar`` suffix. + :return: None. + """ + + timer = SimbaTimer(start=True) + color = ''.join(filter(str.isalnum, color)).lower() + if os.path.isfile(video_path): + check_file_exist_and_readable(file_path=video_path) + video_paths = [video_path] + elif os.path.isdir(video_path): + video_path = find_all_videos_in_directory(directory=video_path, as_dict=True, raise_error=True) + video_paths = list(video_path.values()) + else: + raise InvalidInputError(msg='{} is not a valid file path or directory path.', source=overlay_video_progressbar.__name__) + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir) + else: + save_dir, _, _ = get_fn_ext(filepath=video_paths[0]) + for cnt, video_path in enumerate(video_paths): + video_meta_data = get_video_meta_data(video_path=video_path) + video_length = video_meta_data['video_length_s'] + width, height = video_meta_data['width'], video_meta_data['height'] + bar_height = int(height * (bar_height/100)) + _, video_name, ext = get_fn_ext(filepath=video_path) + print(f'Inserting progress bar on video {video_name}...') + save_path = os.path.join(save_dir, f'{video_name}_progress_bar{ext}') + check_int(name=f'{overlay_video_progressbar} height', value=bar_height, max_value=height, min_value=1) + if position == 'bottom': + cmd = f'ffmpeg -i "{video_path}" -filter_complex "color=c={color}:s={width}x{bar_height}[bar];[0][bar]overlay=-w+(w/{video_length})*t:H-h:shortest=1" -c:a copy "{save_path}" -loglevel error -stats -hide_banner -y' + elif position == 'top': + cmd = f'ffmpeg -i "{video_path}" -filter_complex "color=c={color}:s={width}x{bar_height}[bar];[0][bar]overlay=-w+(w/{video_length})*t:{bar_height}-h:shortest=1" -c:a copy "{save_path}" -loglevel error -stats -hide_banner -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f"{len(video_paths)} video(s) saved with progressbar in {save_dir} directory.", elapsed_time=timer.elapsed_time_str, source=overlay_video_progressbar.__name__, ) + +overlay_video_progressbar(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/reptile/AGGRESSIVITY_4_11_21_Trial_2_camera1_clipped.mp4', + bar_height=50, + color='green', + position='right') \ No newline at end of file diff --git a/simba/sandbox/fix_clahe.py b/simba/sandbox/fix_clahe.py new file mode 100644 index 000000000..0c648a7a0 --- /dev/null +++ b/simba/sandbox/fix_clahe.py @@ -0,0 +1,65 @@ +import os +import cv2 +import numpy as np +from typing import Union +from simba.utils.read_write import get_fn_ext, get_video_meta_data +from simba.utils.enums import Formats +from simba.utils.checks import check_file_exist_and_readable + + +def clahe_enhance_video(file_path: Union[str, os.PathLike]) -> None: + """ + Convert a single video file to clahe-enhanced greyscale .avi file. The result is saved with prefix + ``CLAHE_`` in the same directory as in the input file. + + :parameter Union[str, os.PathLike] file_path: Path to video file. + + :example: + >>> _ = clahe_enhance_video(file_path: 'project_folder/videos/Video_1.mp4') + """ + + dir, file_name, file_ext = get_fn_ext(filepath=file_path) + save_path = os.path.join(dir, f"CLAHE_{file_name}.avi") + video_meta_data = get_video_meta_data(file_path) + fourcc = cv2.VideoWriter_fourcc(*Formats.AVI_CODEC.value) + print(f"Applying CLAHE on video {file_name}, this might take awhile...") + cap = cv2.VideoCapture(file_path) + writer = cv2.VideoWriter(save_path, fourcc, video_meta_data["fps"], (video_meta_data["width"], video_meta_data["height"]), 0) + clahe_filter = cv2.createCLAHE(clipLimit=2, tileGridSize=(16, 16)) + frm_cnt = 0 + try: + while True: + ret, img = cap.read() + if ret: + frm_cnt += 1 + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + clahe_frm = clahe_filter.apply(img) + writer.write(clahe_frm) + print(f"CLAHE converted frame {frm_cnt}/{video_meta_data['frame_count']}") + else: + break + cap.release() + writer.release() + except Exception as se: + print(se.args) + print(f"CLAHE conversion failed for video {file_name}.") + cap.release() + writer.release() + raise ValueError() + + + + #cap.release() + #writer.release() + # break + # except Exception as se: + # print(se.args) + # print(f"CLAHE conversion failed for video {file_name}") + # cap.release() + # writer.release() + # raise ValueError() + # print(f'Saved video at {save_path}') + + +#clahe_enhance_video(file_path=r'/Users/simon/Desktop/envs/simba/simba/tests/data/test_projects/mouse_open_field/project_folder/videos/Video1.mp4') + diff --git a/simba/sandbox/fleiss_kappa.py b/simba/sandbox/fleiss_kappa.py new file mode 100644 index 000000000..2873f6dae --- /dev/null +++ b/simba/sandbox/fleiss_kappa.py @@ -0,0 +1,35 @@ +import numpy as np +from simba.utils.checks import check_valid_array +from statsmodels.stats.inter_rater import fleiss_kappa + + +def fleiss_kappa_(data: np.ndarray): + check_valid_array(data=data, source=f'{fleiss_kappa_.__name__} data', accepted_ndims=(2,)) + col_sums = [] + for i in range(data.shape[1]): + pass + + + + + n_rat = data.sum(axis=1).max() + p_cat = data.sum(axis=0) / data.sum() + + + + + data_2 = data * data + p_rat = (data_2.sum(axis=1) - n_rat) / (n_rat * (n_rat - 1.)) + p_mean = p_rat.mean() + p_mean_exp = (p_cat*p_cat).sum() + return (p_mean - p_mean_exp) / (1 - p_mean_exp) + + + + +data = np.array([[0, 1, 2, 3], [0, 3, 1, 2]]) + +#data = np.random.randint(0, 2, (100, 4)) +x = fleiss_kappa_(data=data) +y = fleiss_kappa(data) +print(x, y) \ No newline at end of file diff --git a/simba/sandbox/freezing_detector.py b/simba/sandbox/freezing_detector.py new file mode 100644 index 000000000..e57578881 --- /dev/null +++ b/simba/sandbox/freezing_detector.py @@ -0,0 +1,130 @@ +import os +from typing import Union, Optional +import numpy as np +import pandas as pd +from numba import typed +from simba.utils.read_write import find_files_of_filetypes_in_directory, read_df, get_fn_ext, read_video_info +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.timeseries_features_mixin import TimeseriesFeatureMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.checks import check_if_dir_exists, check_str, check_valid_dataframe, check_int, check_all_file_names_are_represented_in_video_log +from simba.utils.enums import Formats +from simba.utils.data import detect_bouts, plug_holes_shortest_bout +from simba.utils.printing import stdout_success + + +NAPE_X, NAPE_Y = 'nape_x', 'nape_y' +FREEZING = 'FREEZING' + +class FreezingDetector(ConfigReader): + + """ + Detect freezing behavior using heuristic rules. + + .. important:: + + Freezing is detected as :underline:`present` when ** the velocity (computed from the mean movement of the nape, nose, and tail-base body-parts) falls below + the movement threshold for the duration of the defined time-window or longer. + + Freezing is detected as :underline:`absent` when not present. + + :param Union[str, os.PathLike] data_dir: Path to directory containing pose-estimated body-part data in CSV format. + :param Union[str, os.PathLike] config_path: Path to SimBA project config file. + :param Optional[str] nose_name: The name of the pose-estimated nose body-part. Defaults to 'nose'. + :param Optional[str] left_ear_name: The name of the pose-estimated left ear body-part. Defaults to 'left_ear'. + :param Optional[str] right_ear_name: The name of the pose-estimated right ear body-part. Defaults to 'right_ear'. + :param Optional[str] tail_base_name: The name of the pose-estimated tail base body-part. Defaults to 'tail_base'. + :param Optional[int] time_window: The time window in preceding seconds in which to evaluate freezing. Default: 3. + :param Optional[int] movement_threshold: A movement threshold in millimeters per second. + :param Optional[Union[str, os.PathLike]] save_dir: Directory where to store the results. If None, then results are stored in the ``logs`` directory of the SimBA project. + + :example: + >>> FreezingDetector(data_dir=r'D:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location', config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini") + + References + ---------- + .. [1] Sabnis et al., Visual detection of seizures in mice using supervised machine learning, `biorxiv`, doi: https://doi.org/10.1101/2024.05.29.596520. + + """ + + def __init__(self, + data_dir: Union[str, os.PathLike], + config_path: Union[str, os.PathLike], + nose_name: Optional[str] = 'nose', + left_ear_name: Optional[str] = 'Left_ear', + right_ear_name: Optional[str] = 'right_ear', + tail_base_name: Optional[str] = 'tail_base', + time_window: Optional[int] = 3, + movement_threshold: Optional[int] = 5, + shortest_bout: Optional[int] = 100, + save_dir: Optional[Union[str, os.PathLike]] = None): + + check_if_dir_exists(in_dir=data_dir) + for bp_name in [nose_name, left_ear_name, right_ear_name, tail_base_name]: check_str(name='body part name', value=bp_name, allow_blank=False) + self.data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv']) + ConfigReader.__init__(self, config_path=config_path, read_video_info=True, create_logger=False) + self.nose_heads = [f'{nose_name}_x'.lower(), f'{nose_name}_y'.lower()] + self.left_ear_heads = [f'{left_ear_name}_x'.lower(), f'{left_ear_name}_y'.lower()] + self.right_ear_heads = [f'{right_ear_name}_x'.lower(), f'{right_ear_name}_y'.lower()] + self.tail_base_heads = [f'{tail_base_name}_x'.lower(), f'{tail_base_name}_y'.lower()] + self.required_field = self.nose_heads + self.left_ear_heads + self.right_ear_heads + self.tail_base_heads + check_int(name='time_window', value=time_window, min_value=1) + check_int(name='movement_threshold', value=movement_threshold, min_value=1) + self.save_dir = save_dir + if self.save_dir is None: + self.save_dir = os.path.join(self.logs_path, f'freezing_data_time_{time_window}s_{self.datetime}') + os.makedirs(self.save_dir) + else: + check_if_dir_exists(in_dir=self.save_dir) + self.time_window, self.movement_threshold = time_window, movement_threshold + self.movement_threshold, self.shortest_bout = movement_threshold, shortest_bout + self.run() + + def run(self): + agg_results = pd.DataFrame(columns=['VIDEO', 'FREEZING FRAMES', 'FREEZING TIME (S)', 'FREEZING BOUT COUNTS', 'FREEZING PCT OF SESSION', 'VIDEO TOTAL FRAMES', 'VIDEO TOTAL TIME (S)']) + agg_results_path = os.path.join(self.save_dir, 'aggregate_freezing_results.csv') + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.data_paths) + for file_cnt, file_path in enumerate(self.data_paths): + video_name = get_fn_ext(filepath=file_path)[1] + print(f'Analyzing {video_name}...') + save_file_path = os.path.join(self.save_dir, f'{video_name}.csv') + df = read_df(file_path=file_path, file_type='csv').reset_index(drop=True) + _, px_per_mm, fps = read_video_info(vid_info_df=self.video_info_df, video_name=video_name) + df.columns = [str(x).lower() for x in df.columns] + check_valid_dataframe(df=df, valid_dtypes=Formats.NUMERIC_DTYPES.value, required_fields=self.required_field) + nose_shifted = FeatureExtractionMixin.create_shifted_df(df[self.nose_heads]) + nose_1, nose_2 = nose_shifted.iloc[:, 0:2].values, nose_shifted.iloc[:, 2:4].values + nose_movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=nose_1[:, 0].flatten(), bp_2_x=nose_2[:, 0].flatten(), bp_1_y=nose_1[:, 1].flatten(), bp_2_y=nose_2[:, 1].flatten(), px_per_mm=px_per_mm) + tail_base_shifted = FeatureExtractionMixin.create_shifted_df(df[self.tail_base_heads]) + tail_base_shifted_1, tail_base_shifted_2 = tail_base_shifted.iloc[:, 0:2].values, tail_base_shifted.iloc[:, 2:4].values + tail_base_movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=tail_base_shifted_1[:, 0].flatten(), bp_2_x=tail_base_shifted_2[:, 0].flatten(), bp_1_y=tail_base_shifted_1[:, 1].flatten(), bp_2_y=tail_base_shifted_2[:, 1].flatten(), px_per_mm=px_per_mm) + left_ear_arr = df[self.left_ear_heads].values.astype(np.int64) + right_ear_arr = df[self.right_ear_heads].values.astype(np.int64) + nape_arr = pd.DataFrame(FeatureExtractionMixin.find_midpoints(bp_1=left_ear_arr, bp_2=right_ear_arr, percentile=np.float64(0.5)), columns=[NAPE_X, NAPE_Y]) + nape_shifted = FeatureExtractionMixin.create_shifted_df(nape_arr[[NAPE_X, NAPE_Y]]) + nape_shifted_1, nape_shifted_2 = nape_shifted.iloc[:, 0:2].values, nape_shifted.iloc[:, 2:4].values + nape_movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=nape_shifted_1[:, 0].flatten(), bp_2_x=nape_shifted_2[:, 0].flatten(), bp_1_y=nape_shifted_1[:, 1].flatten(), bp_2_y=nape_shifted_2[:, 1].flatten(), px_per_mm=px_per_mm) + movement = np.hstack([nose_movement.reshape(-1, 1), nape_movement.reshape(-1, 1), tail_base_movement.reshape(-1, 1)]) + mean_movement = np.mean(movement, axis=1) + mm_s = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=mean_movement.astype(np.float32), window_sizes=np.array([1], dtype=np.float64), sample_rate=int(fps), statistics=typed.List(["sum"]))[0].flatten() + freezing_idx = np.argwhere(mm_s <= self.movement_threshold).astype(np.int32).flatten() + df[FREEZING] = 0 + df.loc[freezing_idx, FREEZING] = 1 + df = plug_holes_shortest_bout(data_df=df, clf_name=FREEZING, fps=fps, shortest_bout=self.shortest_bout) + bouts = detect_bouts(data_df=df, target_lst=[FREEZING], fps=fps) + bouts = bouts[bouts['Bout_time'] >= self.time_window] + if len(bouts) > 0: + freezing_idx = list(bouts.apply(lambda x: list(range(int(x["Start_frame"]), int(x["End_frame"]) + 1)), 1)) + freezing_idx = [x for xs in freezing_idx for x in xs] + df.loc[freezing_idx, FREEZING] = 1 + else: + freezing_idx = [] + df.to_csv(save_file_path) + agg_results.loc[len(agg_results)] = [video_name, len(freezing_idx), round(len(freezing_idx) / fps, 4), len(bouts), round((len(freezing_idx) / len(df)) * 100, 4), len(df), round(len(df)/fps, 2) ] + + agg_results.to_csv(agg_results_path) + stdout_success(msg=f'Results saved in {self.save_dir} directory.') + +# +# FreezingDetector(data_dir=r'D:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location', +# config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini") \ No newline at end of file diff --git a/simba/sandbox/frm_rotator_cuda.py b/simba/sandbox/frm_rotator_cuda.py new file mode 100644 index 000000000..0ad51c4d7 --- /dev/null +++ b/simba/sandbox/frm_rotator_cuda.py @@ -0,0 +1,27 @@ +import numpy as np +from typing import Optional +from simba.utils.read_write import read_img_batch_from_video_gpu, read_df +from simba.utils.data import egocentrically_align_pose_numba + + + +def egocentric_frm_rotator_cuda(imgs: np.ndarray, + rotation_matrices: np.ndarray, + batch_size: int = 1000): + + pass + + + + +DATA_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/data/501_MA142_Gi_Saline_0513.csv" +VIDEO_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513.mp4" +SAVE_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513_rotated.mp4" +ANCHOR_LOC = np.array([300, 300]) + +df = read_df(file_path=DATA_PATH, file_type='csv') +bp_cols = [x for x in df.columns if not x.endswith('_p')] +data = df[bp_cols].values.reshape(len(df), int(len(bp_cols) / 2), 2).astype(np.int64) +data, centers, rotation_matrices = egocentrically_align_pose_numba(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=180) +imgs = read_img_batch_from_video_gpu(video_path=VIDEO_PATH, start_frm=0, end_frm=100) +imgs = np.stack(list(imgs.values()), axis=0) diff --git a/simba/sandbox/gantt_plotly.py b/simba/sandbox/gantt_plotly.py new file mode 100644 index 000000000..6b932667d --- /dev/null +++ b/simba/sandbox/gantt_plotly.py @@ -0,0 +1,143 @@ +import pandas as pd +import numpy as np +import plotly.express as px +import PIL +import io +import cv2 +from typing import Optional, Tuple, Union +from simba.utils.data import create_color_palettes, detect_bouts +from simba.utils.checks import check_float, check_int, check_instance, check_valid_dataframe +from simba.utils.read_write import seconds_to_timestamp, read_df +from simba.utils.errors import InvalidInputError + +def gantt_plotly(bouts_df: pd.DataFrame, + img_size: Optional[Tuple[int, int]] = (640, 480), + bg_clr: Optional[str] = 'white', + title: Optional[str] = None, + font_size: Optional[int] = 12, + y_lbl: Optional[str] = 'Event', + x_lbl: Optional[str] = 'Session time (HH:MM:SS)', + show_grid: Optional[bool] = True, + color_palette: Optional[str] = 'Set1', + x_length: Optional[int] = None, + bar_height: Optional[float] = 0.4, + x_tick_spacing: Optional[Union[int, float]] = 30, + marker_line_color: Optional[str] = 'black', + marker_line_width: Optional[float] = 0.1, + time_format: Optional[str] = 'HH:MM:SS', + tick_angle: Optional[int] = 45, + font: Optional[str] = 'Georgia') -> np.ndarray: + + """ + Generates a Gantt chart using Plotly to visualize bout events over time. + + Creates a horizontal bar chart where each row represents an event (e.g., animal behavior) over time, with estensive customization options. + + .. image:: _static/img/gantt_plotly.webp + :width: 600 + :align: center + + :param pd.DataFrame bouts_df: A pandas DataFrame containing the bout events data. Can be created by :func:`~simba.utils.data.detect_bouts`. + :param Optional[Tuple[int, int]] img_size: The size of the output image as (width, height). Default (640, 480). + :param Optional[str] bg_clr: The color of the background as a string. Deafult: white. + :param Optional[str] title: The title of image. Deafult: None. + :param Optional[int] font_size: The font size for the title, labels, ticks. deafult 12. + :param Optional[str] y_lbl: The label on the y-axis. Deafult: `Event`. + :param Optional[str] x_lbl: The label on the x-axis. Deafult: 'Session time (s)'. + :param Optional[bool] show_grid: Whether to show the grid on the plot. Default is True. + :param Optional[str] color_palette: The name of the color palette to use for event bars. Default: ``Set``. + :param Optional[int] x_length: The maximum value of the x-axis. If None, the x-axis is determined based on the data. + :param Optional[str] bar_height: The height of each bar in the Gantt chart. Default is 0.4. + :param Optional[Union[int, float]] x_tick_spacing: The spacing between x-axis ticks. Can be either an integer or float. If float, it is treated as a fraction of the x-axis range (between 0 and 1). If integer, then interpreted as seconds. Default is 30. + :param Optional[str] marker_line_color: Color of the bar borders. Default is 'black'. + :param Optional[float] marker_line_width: Width of the bar borders. Default is 0.5. + :param Optional[str] time_format: Format for x-axis tick labels. Supported formats are: - 'HH:MM:SS': Converts seconds to hours:minutes:seconds format. - 'seconds': Displays tick labels as raw seconds. Default is 'HH:MM:SS'. + :param Optional[int] tick_angle: Angle for rotating x-axis tick labels. Default is 45 degrees. + :param Optional[str] font: Font name. E.g., "Arial", "Verdana", "Helvetica", "Tahoma", "Trebuchet MS", "Times New Roman", "Georgia", "Courier New", "Lucida Console". Default is None, which uses the default Plotly font. + :return: A Gantt chart image as a NumPy array (dtype=np.uint8). + :rtype: np.ndarray + + :example: + >>> FILE_PATH = r"D:\troubleshooting\mitra\project_folder\logs\all\592_MA147_CNO1_0515.csv" + >>> data = read_df(file_path=FILE_PATH, file_type='csv') + >>> bouts_df = detect_bouts(data_df=data, target_lst=list(data.columns), fps=30) + >>> img = gantt_plotly(bouts_df=bouts_df, x_tick_spacing=120, color_palette='Set1', img_size=(1200, 700), font_size=32, show_grid=True) + """ + + check_valid_dataframe(df=bouts_df, source=f'{gantt_plotly.__name__} bouts_df', required_fields=['Start_time', 'Bout_time', 'Event']) + check_instance(source=f'{gantt_plotly.__name__} x_tick_spacing', instance=x_tick_spacing, accepted_types=(int, float,)) + if isinstance(x_tick_spacing, float): + check_float(name=f'{gantt_plotly.__name__} x_tick_spacing', value=x_tick_spacing, min_value=10e-6, max_value=1.0, raise_error=True) + if x_length is not None: + x_tick_spacing = int(x_length * x_tick_spacing) + else: + x_tick_spacing = int(bouts_df['End Time'].max() * x_tick_spacing) + elif isinstance(x_tick_spacing, int): + check_int(name=f'{gantt_plotly.__name__} x_tick_spacing', value=x_tick_spacing, min_value=1, raise_error=True) + check_int(name=f'{gantt_plotly.__name__} tick_angle', value=tick_angle, min_value=0, max_value=360) + last_bout_time = bouts_df['Start_time'].max() + bouts_df['Bout_time'].max() + if x_length is not None: + last_bout_time = max(x_length, last_bout_time) + tickvals = np.arange(0, last_bout_time, x_tick_spacing) + if time_format == 'seconds': + ticktext = [str(x) for x in tickvals] + elif time_format == 'HH:MM:SS': + ticktext = [] + for val in tickvals: ticktext.append(seconds_to_timestamp(val)) + else: + raise InvalidInputError(msg=f'{time_format} is not a valid time_format', source=gantt_plotly.__name__) + width, height = img_size + unique_event_cnt = len(list(bouts_df['Event'].unique())) + clrs = create_color_palettes(no_animals=1, map_size=unique_event_cnt, cmaps=[color_palette])[0] + clrs = dict(zip(bouts_df["Event"].unique(), clrs)) + clrs = {k: f'rgb{tuple(v)}' for k, v in clrs.items()} + fig = px.bar(bouts_df, base="Start_time", x="Bout_time", y="Event", color="Event", color_discrete_map=clrs, orientation="h") + fig.update_layout(width=width, height=height, title=title, yaxis_type="category", showlegend=False) + fig.update_traces(width=bar_height, marker_line_color=marker_line_color, marker_line_width=marker_line_width) + + fig.update_layout(width=width, + height=height, + title=title, + font=dict( + family=font, + ), + xaxis=dict( + title=dict( + text=x_lbl, + font=dict(size=font_size, family=font), + ), + showline=True, + linecolor="black", + tickvals=tickvals, + ticktext=ticktext, + tickangle=tick_angle, + tickfont=dict(size=font_size), + showgrid=show_grid, + range=[0, last_bout_time]), + yaxis=dict( + title=dict( + text=y_lbl, + font=dict(size=font_size) + ), + tickfont=dict(size=font_size), + showline=True, + linecolor="black", + showgrid=show_grid, + ), + showlegend=False) + + if bg_clr is not None: + fig.update_layout(plot_bgcolor=bg_clr) + img_bytes = fig.to_image(format="png") + img = PIL.Image.open(io.BytesIO(img_bytes)) + fig = None + return np.array(img).astype(np.uint8) + + +# FILE_PATH = r"D:\troubleshooting\mitra\project_folder\logs\all\592_MA147_CNO1_0515.csv" +# +# data = read_df(file_path=FILE_PATH, file_type='csv') +# bouts_df = detect_bouts(data_df=data, target_lst=list(data.columns), fps=30) +# img = gantt_plotly(bouts_df=bouts_df, x_tick_spacing=120, color_palette='Set1', img_size=(1200, 700), font_size=32, show_grid=True, bg_clr='white') +# cv2.imshow('asdasd', img) +# cv2.waitKey(1) \ No newline at end of file diff --git a/simba/sandbox/gantt_update.py b/simba/sandbox/gantt_update.py new file mode 100644 index 000000000..5f9eabf78 --- /dev/null +++ b/simba/sandbox/gantt_update.py @@ -0,0 +1,26 @@ +import matplotlib.pyplot as plt +import pandas as pd + + + + + + + +df = pd.DataFrame({ + 'task': ['Task A', 'Task B', 'Task C'], + 'start': [0, 2, 5], + 'end': [3, 7, 8] +}) + +# Create figure and axes +fig, ax = plt.subplots() + +# Plot Gantt chart +ax.broken_barh([(start, end - start) for start, end in zip(df['start'], df['end'])], + (0, 1), facecolors='blue') +ax.set_yticks([0.5]) +ax.set_yticklabels(df['task']) +ax.set_xlabel('Time') +ax.set_title('Gantt Chart') +plt.show() \ No newline at end of file diff --git a/simba/sandbox/geometry_9.py b/simba/sandbox/geometry_9.py new file mode 100644 index 000000000..d40ec06a8 --- /dev/null +++ b/simba/sandbox/geometry_9.py @@ -0,0 +1,57 @@ +import pandas as pd + +from simba.video_processors.video_processing import video_bg_subtraction_mp, create_average_frm, read_frm_of_video +import matplotlib +import matplotlib.pyplot as plt +dpi = matplotlib.rcParams["figure.dpi"] +import cv2 + +df = pd.read_parquet('/Users/simon/Downloads/from-ds-team_test_embedding.parquet') + +VIDEO_PATH = cv2.VideoCapture('/Users/simon/Downloads/webm_20240715111159/Ant Test.webm') + + +#VIDEO_PATH = '/Users/simon/Downloads/Ant Test.mp4' +input_frm = read_frm_of_video(video_path=VIDEO_PATH, frame_index=1) + +height, width, depth = input_frm.shape +figsize = width / float(dpi), height / float(dpi) +plt.figure(figsize=figsize) +plt.axis("off") +plt.imshow(input_frm) +plt.show() + +avg_frm = create_average_frm(video_path=VIDEO_PATH, verbose=False) +#height, width, depth = avg_frm.shape +figsize = width / float(dpi), height / float(dpi) + + + +plt.figure(figsize=figsize) +plt.axis("off") +plt.imshow(avg_frm) +plt.show() + + + + + +video_bg_subtraction_mp(video_path=VIDEO_PATH, + save_path='/Users/simon/Desktop/1_LH_clipped_cropped_bg_removed.mp4', + verbose=False) + + + + + + + + + + + + + + +#video_bg_substraction_mp(video_path=VIDEO_PATH, save_path=) + diff --git a/simba/sandbox/get_convex_hull_cuda.py b/simba/sandbox/get_convex_hull_cuda.py new file mode 100644 index 000000000..ee9b98c47 --- /dev/null +++ b/simba/sandbox/get_convex_hull_cuda.py @@ -0,0 +1,13 @@ +from simba.utils.read_write import read_df +from simba.data_processors.cuda.geometry import get_convex_hull + + + + + + +video_path = r"C:/troubleshooting/mitra/project_folder/videos/501_MA142_Gi_CNO_0514.mp4" +data_path = r"C:/troubleshooting/mitra/project_folder/csv/outlier_corrected_movement_location/501_MA142_Gi_CNO_0514 - test.csv" +df = read_df(file_path=data_path, file_type='csv') +frame_data = df.values.reshape(len(df), -1, 2) +x = get_convex_hull(frame_data) \ No newline at end of file diff --git a/simba/sandbox/gibbs_sampling.py b/simba/sandbox/gibbs_sampling.py new file mode 100644 index 000000000..c28992c25 --- /dev/null +++ b/simba/sandbox/gibbs_sampling.py @@ -0,0 +1,72 @@ +import os +from typing import Union +import numpy as np +import pandas as pd + + +class GibbSampler(): + + + + def __init__(self, + data: np.ndarray, + save_path: Union[str, os.PathLike], + sequence_length: int = 4, + iterations: int = 1500, + epochs: int = 2, + stop_val: float = 0.001, + pseudo_number: float = 10e-6, + plateau_val: int = 50,): + + + self.unique_vals = np.unique(data.flatten()) + self.target_p = 1 * sequence_length + (pseudo_number * (sequence_length + 1)) + self.holdout_fields = [f"H_{i}" for i in range(sequence_length)] + self.non_holdout_cols = [f"C_{i}" for i in range(sequence_length)] + self.out_cols = [f"Behavior_{i+1}" for i in range(sequence_length)] + self.data, self.pseudo_num, self.sequence_len, self.plateau_val = (data, pseudo_number, sequence_length, plateau_val) + self.epochs, self.iterations, self.stop_val = epochs, iterations, stop_val + + + + + + + + + def run(self): + unique_elements, counts = np.unique(self.data, return_counts=True) + counts_dict = dict(zip(unique_elements, counts)) + counts_dict = {k: (v / np.sum(counts)) for k, v in counts_dict.items()} + + for epoch in range(self.epochs): + holdout_idx = np.random.randint(0, self.data.shape[0]-1) + start_pos = np.random.randint(low=0, high=data.shape[1]-self.sequence_len, size=(data.shape[0])) + end_pos = np.array([x+self.sequence_len for x in start_pos]) + slices = np.array([[x, y] for x, y in zip(start_pos, end_pos)]) + hold_out_obs = self.data[holdout_idx] + epoch_sample = np.delete(self.data, [holdout_idx], axis=0) + sequences = np.full(shape=(epoch_sample.shape[0], self.sequence_len), fill_value=np.str_) + for idx in range(epoch_sample.shape[0]): + i = slices[idx] + sequences[idx] = epoch_sample[idx][i[0]: i[1]] + for iteration in range(0, self.iterations): + probability_df = self.__make_probability_table(epoch_sample, self.unique_vals) + + + + + # + # df_it = self.data.drop(hold_out_observation_df.index) + # + # + # for it in range(self.iterations): + # + # if it == 0: + # + + + +data = pd.read_csv(r"C:\projects\simba\simba\tests\data\sample_data\gibbs_sample_cardinal.csv", index_col=0).values +sampler = GibbSampler(data=data, save_path=r'C:\Users\sroni\OneDrive\Desktop\gibbs.csv', iterations=5) +sampler.run() \ No newline at end of file diff --git a/simba/sandbox/github_issues.json b/simba/sandbox/github_issues.json new file mode 100644 index 000000000..8640164f2 --- /dev/null +++ b/simba/sandbox/github_issues.json @@ -0,0 +1,15563 @@ +[ + { + "title": "Should preprocessed videos be those used in SLEAP?", + "body": "I have a question regarding the video preprocessing step for simba. Should videos be preprocessed as shown in the simba tutorial, then run through SLEAP, and then also be used in simba? In other words, is the expectation that the processed videos are the ones used for pose estimation and then behavioral classification?", + "user": "SnowySnowySnowy", + "reaction_cnt": 0, + "created_at": "2024-07-15T16:56:21Z", + "updated_at": "2024-07-15T17:53:45Z", + "author": "SnowySnowySnowy", + "comments": [ + { + "body": "Hi @SnowySnowySnowy !\r\n\r\nShort answer Yes. \r\n\r\nTLDR: Preprocessing is for when recordings are made in less-than-optimal conditions for pose-estimation and is used to improve pose-estimation. Better input data gives between classifications. Often parts captured in videos are areas outside arena which animal can never reach, some cable that could be mistaken for a tail is there, or early captured portions can feature the experimenters hands in images etc., mucking up the pose-estimation performance and later classifications. The resolution and FPS may be also unnecessarily high introducing long pose-estimation and classification runtimes and unnecessary large disk space requirements. \r\n\r\nMuch can be fixed by clipping/cropping/re-sampling the videos, and you want to do this before performing pose-estimation. \r\n\r\nIf you performed pose-estimation, then resampled the videos, and imported the resampled videos but the original pose-estimation to SimBA, there could be issues in visualization components in SimBA. For example, the nose could be pose-estimated at pixel location (1600, 800). However, your cropped videos are only 800 by 600 large so 1600x600 is outside the image. If you re-sampled the FPS then there would be a similar mixup in time domain so you see the pose-estimation moving much quicker then the actual animal etc.\r\n", + "created_at": "2024-07-15T17:53:44Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Determination of classifier thresholds", + "body": "Hello,\r\n\r\nI'd like to learn if you are using any systematic method for determining the thresholds? Before running machine model, I am creating the probability plots and checking the frames. I am setting a value just with my opinion. When I check the sklearn results I am still seeing some positive or negative errors. \r\nIs there any statistical approach are you using or suggesting for setting the thresholds?\r\n\r\nThanks!", + "user": "qarden", + "reaction_cnt": 0, + "created_at": "2024-07-10T14:18:29Z", + "updated_at": "2024-07-10T15:49:28Z", + "author": "qarden", + "comments": [ + { + "body": "Hi @qarden !\r\n\r\nThe discrimination threshold titration will only help in some use-cases: \r\n\r\n * If the classifications are generally too liberal (too many false positives), **increase** the classification threshold. However, this will potentially come at the expense of introducing false negatives. \r\n\r\n * If the classifications are generally too conservative (too many false negatives), **decrease** the classification threshold. However, this will potentially come at the expense of introducing false positives.\r\n\r\nIf it is the case that you cannot get satisfactory results by titrating the discrimination threshold, it is possible that the behavior videos you are visualizing differ somewhat from the behavior in the videos you annotated and used to train the classifier? Do you see anything particular with the instances of the behavior where the classifier misses it and wrongly classifies it as behavior present? Is it possible to include these behavioral events when the classifier gets it wrong as correctly annotated examples in your annotated dataset and retrain the classifier with this additional information? \r\n\r\n\r\n", + "created_at": "2024-07-10T15:49:28Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Trouble installing simba", + "body": "I have followed instructions to install Simba using Anaconda but repeatedly see this error message:\r\n\r\n(simba) C:\\Users\\gabie>simba\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\gabie\\.conda\\envs\\simba\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"C:\\Users\\gabie\\.conda\\envs\\simba\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\gabie\\.conda\\envs\\simba\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"C:\\Users\\gabie\\.conda\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 17, in \r\n from tabulate import tabulate\r\nModuleNotFoundError: No module named 'tabulate'\r\n\r\nInstalling dependencies manually, using different python versions, installing Shapely first then simba without tf, and other fixes suggested before did not fix the issue. When I check which packages are installed, tabulate is always present:\r\n\r\n(simba) C:\\Users\\gabie>conda list\r\n# packages in environment at C:\\Users\\gabie\\.conda\\envs\\simba:\r\n#\r\n# Name Version Build Channel\r\nattrs 22.2.0 pypi_0 pypi\r\nblas 1.0 mkl\r\nbrotli 1.1.0 pypi_0 pypi\r\ncefpython3 66.0 pypi_0 pypi\r\ncertifi 2021.5.30 py36haa95532_0\r\nclick 8.0.4 pypi_0 pypi\r\ncloudpickle 2.2.1 pypi_0 pypi\r\ncolorama 0.4.5 pypi_0 pypi\r\ncolour 0.1.5 pypi_0 pypi\r\ncycler 0.11.0 pypi_0 pypi\r\ndash 1.14.0 pypi_0 pypi\r\ndash-color-picker 0.0.1 pypi_0 pypi\r\ndash-colorscales 0.0.4 pypi_0 pypi\r\ndash-core-components 1.10.2 pypi_0 pypi\r\ndash-html-components 1.0.3 pypi_0 pypi\r\ndash-renderer 1.6.0 pypi_0 pypi\r\ndash-table 4.9.0 pypi_0 pypi\r\ndask 2021.3.0 pypi_0 pypi\r\ndataclasses 0.8 pypi_0 pypi\r\ndecorator 4.4.2 pypi_0 pypi\r\ndtreeviz 0.8.1 pypi_0 pypi\r\neli5 0.10.1 pypi_0 pypi\r\nflask 2.0.3 pypi_0 pypi\r\nflask-compress 1.15 pypi_0 pypi\r\nfuture 1.0.0 pypi_0 pypi\r\ngeos 3.8.0 h33f27b4_0\r\nh5py 2.9.0 pypi_0 pypi\r\nimageio 2.9.0 pypi_0 pypi\r\nimbalanced-learn 0.6.2 pypi_0 pypi\r\nimblearn 0.0 pypi_0 pypi\r\nimgaug 0.4.0 pypi_0 pypi\r\nimportlib-metadata 4.8.3 pypi_0 pypi\r\nimutils 0.5.2 pypi_0 pypi\r\nintel-openmp 2023.1.0 h59b6b97_46320\r\nitsdangerous 2.0.1 pypi_0 pypi\r\njinja2 3.0.3 pypi_0 pypi\r\njoblib 1.1.1 pypi_0 pypi\r\nkiwisolver 1.3.1 pypi_0 pypi\r\nllvmlite 0.31.0 pypi_0 pypi\r\nmarkupsafe 2.0.1 pypi_0 pypi\r\nmatplotlib 3.0.3 pypi_0 pypi\r\nmkl 2020.2 256\r\nmkl-service 2.3.0 py36h196d8e1_0\r\nmkl_fft 1.3.0 py36h46781fe_0\r\nmkl_random 1.1.1 py36h47e9c7a_0\r\nnetworkx 2.5.1 pypi_0 pypi\r\nnumba 0.48.0 pypi_0 pypi\r\nnumexpr 2.6.9 pypi_0 pypi\r\nnumpy 1.18.1 pypi_0 pypi\r\nopencv-python 3.4.5.20 pypi_0 pypi\r\npandas 0.25.3 pypi_0 pypi\r\npillow 5.4.1 pypi_0 pypi\r\npip 21.2.2 py36haa95532_0\r\nplotly 4.9.0 pypi_0 pypi\r\npyarrow 0.17.1 pypi_0 pypi\r\npyparsing 3.1.2 pypi_0 pypi\r\npython 3.6.13 h3758d61_0\r\npython-dateutil 2.9.0.post0 pypi_0 pypi\r\npython-graphviz 0.11 pypi_0 pypi\r\npytz 2024.1 pypi_0 pypi\r\npywavelets 1.1.1 pypi_0 pypi\r\npyyaml 5.3.1 pypi_0 pypi\r\nretrying 1.3.4 pypi_0 pypi\r\nscikit-image 0.14.2 pypi_0 pypi\r\nscikit-learn 0.22.2 pypi_0 pypi\r\nscipy 1.1.0 pypi_0 pypi\r\nseaborn 0.9.0 pypi_0 pypi\r\nsetuptools 58.0.4 py36haa95532_0\r\nshap 0.35.0 pypi_0 pypi\r\nshapely 1.7.1 py36h06580b3_0\r\nsimba-uw-no-tf 1.3.0 pypi_0 pypi\r\nsix 1.16.0 pyhd3eb1b0_1\r\nsqlite 3.45.3 h2bbff1b_0\r\nstatsmodels 0.9.0 pypi_0 pypi\r\ntabulate 0.8.3 pypi_0 pypi\r\ntoolz 0.12.0 pypi_0 pypi\r\ntqdm 4.30.0 pypi_0 pypi\r\ntyping-extensions 4.1.1 pypi_0 pypi\r\nvc 14.2 h2eaa2aa_1\r\nvs2015_runtime 14.29.30133 h43f2093_3\r\nwerkzeug 2.0.3 pypi_0 pypi\r\nwheel 0.37.1 pyhd3eb1b0_0\r\nwincertstore 0.2 py36h7fe50ca_0\r\nwxpython 4.0.4 pypi_0 pypi\r\nxgboost 0.90 pypi_0 pypi\r\nyellowbrick 0.9.1 pypi_0 pypi\r\nzipp 3.6.0 pypi_0 pypi\r\nzstandard 0.20.0 pypi_0 pypi\r\n\r\nI am using Windows 10 and python 3.6.13. Thanks for your help! ", + "user": "GKNM995", + "reaction_cnt": 0, + "created_at": "2024-06-28T20:56:03Z", + "updated_at": "2024-07-11T12:50:44Z", + "author": "GKNM995", + "comments": [ + { + "body": "Hi @GKNM995 - not completely sure what is going on, but appear tabulate is missing or SimBA can't find it in your conda environment. It could possibly related to installing different simba versions in the same environment?\r\n\r\nWhat command did you use to install SimBA? Did you use `pip install simba-uw-tf-dev` ? \r\n\r\n", + "created_at": "2024-06-28T21:46:41Z", + "author": "sronilsson" + }, + { + "body": "Hi! I used pip install simba-uw-tf-dev. I only installed it once in this environment. ", + "created_at": "2024-06-28T21:53:50Z", + "author": "GKNM995" + }, + { + "body": "if you do `conda activate simba` and open a python in the environment and import `tabulate` does it import OK like below? \r\n\r\nJust trying to figure out if issue is with python environment or with simba:\r\n\r\n\"image\"\r\n", + "created_at": "2024-06-28T21:58:15Z", + "author": "sronilsson" + }, + { + "body": "It does not. I get an error message. \r\n![Screenshot 2024-06-28 150053](https://github.com/sgoldenlab/simba/assets/174150767/d6eb0cc4-cb8d-480f-a545-2e08f4eb7f35)\r\n\r\n\r\n", + "created_at": "2024-06-28T22:01:13Z", + "author": "GKNM995" + }, + { + "body": "Alright: what if you run `pip install tabulate==0.8.3` and then open python with `python` and try again? ", + "created_at": "2024-06-28T22:05:38Z", + "author": "sronilsson" + }, + { + "body": "I tried to install it as instructed but conda wouldn't let me, so I uninstalled, then reinstalled it. Weirdly, it prints the same error message as before. \r\n![Screenshot 2024-06-28 151145](https://github.com/sgoldenlab/simba/assets/174150767/7bb9815f-a2fb-4391-a53b-d9e7d3eb4ac0)\r\n", + "created_at": "2024-06-28T22:12:06Z", + "author": "GKNM995" + }, + { + "body": "Yes, I see that a different version of simba is installed in the same environment: `simba-uw-no-tf`. I would try in a new conda environment. Or, alternatively, do `pip uninstall simba-uw-no-tf` and then try again?\r\n\r\n", + "created_at": "2024-06-28T22:22:09Z", + "author": "sronilsson" + }, + { + "body": "Got rid of the old environment and did the following:\r\n\r\nconda create --name simba python=3.6\r\nconda activate simba\r\npip install simba-uw-tf-dev\r\npip uninstall shapely\r\nconda install shapely\r\nsimba\r\n\r\nI get the same error. \r\n![Screenshot 2024-06-28 153258](https://github.com/sgoldenlab/simba/assets/174150767/d395de6b-123b-466d-a810-bfd3b9c7dc8a)\r\n", + "created_at": "2024-06-28T22:33:21Z", + "author": "GKNM995" + }, + { + "body": "And now, if you run ` pip install tabulate==0.8.3` and then open python with python and try again with `import tabulate`? ", + "created_at": "2024-06-28T22:42:04Z", + "author": "sronilsson" + }, + { + "body": "This is what happens (if I don't uninstall it before trying to install):\r\n![Screenshot 2024-06-28 154448](https://github.com/sgoldenlab/simba/assets/174150767/857e72f3-7ec9-4dd3-b514-7697dc32c547)\r\n", + "created_at": "2024-06-28T22:45:46Z", + "author": "GKNM995" + }, + { + "body": "This happens if I uninstall then reinstall it:\r\n![Screenshot 2024-06-28 154650](https://github.com/sgoldenlab/simba/assets/174150767/34092c0a-37dd-42d2-a1a0-ce1d3ed6fb0e)\r\n", + "created_at": "2024-06-28T22:47:23Z", + "author": "GKNM995" + }, + { + "body": "Nuts :) Sorry, I have to get back to you - I have to step away from the computer for now.", + "created_at": "2024-06-28T22:53:12Z", + "author": "sronilsson" + }, + { + "body": "Ok. Gonna step away from it as well", + "created_at": "2024-06-28T23:00:45Z", + "author": "GKNM995" + }, + { + "body": "I tried a few ways to recreate error but I've failed and not come across this before. \r\n\r\nCould there be some clash between the main python and python in the conda environment? \r\n\r\nE.g., if you do `conda deactivate` and step out of conda env, then type `pip install tabulate==0.8.3`, and open main python with `python`, can you then `import tabulate` ? If `tabulate` is installed in the main python, is it also found in the conda `simba` environment? \r\n\r\nIf none of the above makes anything clearer, can you tell me which versions of setuptools (`pip show setuptools`), pip, (`pip show pip`) and conda (`conda --version`) you have and I can make sure I have the same setup as you when testing? \r\n\r\n\r\n`\r\n\r\n\r\n\r\n", + "created_at": "2024-06-29T14:08:33Z", + "author": "sronilsson" + }, + { + "body": "When I install tabulate in base conda, I still can't import it for some reason (see screenshot). However, it is listed as intalled in both the base conda and simba environments. \r\n![Screenshot 2024-06-29 122151](https://github.com/sgoldenlab/simba/assets/174150767/a2c7e4d7-722d-4c75-b6d6-3445212f0948)\r\n\r\n\r\nHere are the versions I am using: \r\nConda version: 24.3.0\r\nSetuptools: 58.0.4\r\npip: 21.3.1\r\n", + "created_at": "2024-06-29T19:27:48Z", + "author": "GKNM995" + }, + { + "body": "Hi @GKNM995 - from the screenshot, it looks like `tabulate` DOES install correctly in the base environment. However, in your base environment, you have python 3.10, and you are installing a specific version of tabulate that does not fit with `collections` package which comes with base python 3.10. \r\n\r\nYou can try with just `pip install tabulate` to get the latest version and I suspect it will work in base. \r\n\r\nThe only thing that is different from my test environment is the conda version, I have 23.3.1. \r\n\r\nHow about [THIS](https://stackoverflow.com/a/67929906) suggestion? If you install tabulate with `conda install -c conda-forge tabulate` in your simba conda environment - does that change anything?\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2024-06-30T14:30:23Z", + "author": "sronilsson" + }, + { + "body": "You were right! Tabulate was successfully imported into the base environment when I reinstalled it. It was also successfully installed in the simba environment. \r\n![image](https://github.com/sgoldenlab/simba/assets/174150767/4c99a772-ffee-4efb-ac1e-3e796177b12a)\r\n\r\nHowever, I got new errors: \r\n![image](https://github.com/sgoldenlab/simba/assets/174150767/c1f8f056-7795-44d5-a7dd-707ce5482407)\r\nI manually installed dtreeviz, and it was installed as well. \r\n\r\nThen this happened:\r\n![image](https://github.com/sgoldenlab/simba/assets/174150767/f5fa7db5-6fac-45ea-8684-6005a47878d6)\r\n\r\nNot sure how to proceed from here. Thank you so much for your help!", + "created_at": "2024-07-03T02:49:06Z", + "author": "GKNM995" + }, + { + "body": "Hi @GKNM995 - yes it seems possible it wasn't just `tabulate` that is not accessable in the simba conda environment, but other dependencies too. And the version of `yellowbrick` and `scikit` don't seem to agree with each other. I'm guessing, but maybe there is something going on with the paths - where conda points to some other location for the dependencies even when you have activated your simba environment? \r\n\r\nWhat about if instead of conda, you use the python in-built virtual environment creator `venv` as documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/installation.md#installing-simba-with-venv) to create a simba enviornment, does that work?\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2024-07-03T12:38:01Z", + "author": "sronilsson" + }, + { + "body": "Hi! Using the built-in python environment worked!! Thank you. Hoping it keeps working:)\r\n", + "created_at": "2024-07-04T22:19:29Z", + "author": "GKNM995" + }, + { + "body": "Nice! I'm not sure why that happened with conda, but if you figure it out please let me know. Also let me know if anything else comes up. ", + "created_at": "2024-07-05T13:31:54Z", + "author": "sronilsson" + }, + { + "body": "Hi! Thank you for your help with my install. I am running my first project and I keep getting this error:\r\n![image](https://github.com/sgoldenlab/simba/assets/174150767/ed68aedf-0f2c-4140-a63c-6419246c79be)\r\n\r\nWhen I check the video_info.csv file, I can find the flagged video:\r\n![image](https://github.com/sgoldenlab/simba/assets/174150767/3d39d494-5323-41f6-b5f3-075e2ffee9fb)\r\n\r\nDo you have any idea what could be going wrong?\r\n\r\nSimba version: 1.95.7\r\nPython version: 3.10\r\nWindows 10\r\n\r\nThanks for your help!", + "created_at": "2024-07-11T02:31:38Z", + "author": "GKNM995" + }, + { + "body": "Thanks @GKNM995 yes that looks odd - would you mind sharing the video_info.csv logs file here and I can take look if anything is odd?\r\n\r\nI think you can drag and drop it in the thread, or you may have to zip it first. ", + "created_at": "2024-07-11T10:13:00Z", + "author": "sronilsson" + }, + { + "body": "... the most immediate things that comes to mind is if there are any trailing whitespaces in the `HS09F_948_5mm` in the video_info.csv? E.g., does it say `HS09F_948_5mm ` rather than `HS09F_948_5mm` ?", + "created_at": "2024-07-11T12:50:43Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Using Random forest behavioral classifiers from OSF repository ", + "body": "Hi, I'm trying to run resident intruder classifiers on one imported video, ten mins long, 60fps, and over 36,000 frames. I've been able to successfully import my H5 tracking data from SLEAP. \r\nMy skeleton is SLEAP based, 11 body parts per animal. \r\n![User Defined Skeleton](https://github.com/sgoldenlab/simba/assets/169295221/cddf468e-5682-47dc-b4b4-cb3b2d21e29f)\r\n\r\nI have four questions. \r\n**1. Is it possible to run the following behavioral classifiers (anogenital_sniff, attack, lateral_threat, pursuit, & tail_rattle) I found on the Random forest behavioral classifiers OSF repository on my data if the body-part config is user defined? If yes, am I correct in assuming that I can use PSEUDO-Labelling for this? **\r\nhttps://osf.io/3mc7g/\r\n\r\nIf the answer to this is no, I am fine with remedying this by deleting unnecessary nodes. \r\n\r\n**2. Is this the feature extractor (from SimBA Github) that I should be using, it seems to freeze once I use it?** \r\n[feature_extractor_user_defined.zip]\r\n(https://github.com/user-attachments/files/15779886/feature_extractor_user_defined.zip)\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/f6d16120-fa0a-4825-9f61-5e810945a11f)\r\n\r\n\r\n**3. Downloading CSV files from SLEAP puts both tracks (for resident and intruder) in a single column, it seems that to train the model, SimBA wants each node annotated in individual columns (example \"Resident_forelegL1_1_y\"). I would like to know how to remedy this.** \r\nExample Below: How SLEAP CSV files are exported and labeled\r\n![How SLEAP CSV files are exported and labeled](https://github.com/sgoldenlab/simba/assets/169295221/a725a0ec-11ae-4dec-87e2-41f64c5a3ada)\r\n\r\n4. Should I be using this meta data file? \r\n[BtWGaNP_meta.csv](https://github.com/user-attachments/files/15779964/BtWGaNP_meta.csv)\r\nhttps://github.com/sgoldenlab/simba/blob/master/misc/BtWGaNP_meta.csv\r\n\r\nHere is my Outlier correction settings in case it helps:\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/7eb06a2c-0fb8-488f-b954-ad5783077565)\r\n\r\n\r\nAny help is appreciated, thank you! \r\n\r\nWindows 10, version 22H2\r\nPython Version [3.10.14]\r\nusing miniconda3 Version [24.4.0]\r\n", + "user": "wallawhitecoat", + "reaction_cnt": 0, + "created_at": "2024-06-10T23:03:32Z", + "updated_at": "2024-07-11T22:35:20Z", + "author": "wallawhitecoat", + "comments": [ + { + "body": "Hi @[wallawhitecoat](https://github.com/wallawhitecoat)!\r\n\r\n\r\n1. To use the models on OSF, you need to track the same body-parts that were used to create the model. This ensures that SimBA computes the same features that the models expects. If you have different body-parts configurations, SimBA will compute different features and a different number of features, and the models will be confused what to do with the additional / missing features and throw you an error. To use these models, use this setting when creating your project:\r\n\r\n\"Screenshot\r\n\r\n\r\n\r\n2. One possibility that it “freezes”, is that the example script is expecting data columns that don’t exist in your data, and it errors out. If it errors out in a way that I haven’t yet been able to anticipate, no errors will be printed in the main SimBA interface. Instead, an error message from the standard python library will be printed in the main terminal which you used to launch SimBA. If you look in the terminal that you used to launch SimBA, do you see any error msg printed? \r\n\r\n\r\n3. On the difference between the SLEAP data, and the format in SimBA where the SLEAP data is transposed. When you import your data into SimBA, the data lands in the `project_folder/csv/input_csv` within your SimBA project. After you perform outlier correction (or skip to perform outlier correction), the corrected data is copied to the `project_folder/csv/outlier_corrected_location` `and project_folder/csv/outlier_corrected_movement_location` directories of your SimBA project with the additional corrected headers appended. If you want the transposed data, you can look in those folder as SimBA performs the transpose during import.\r\n\r\n4. Yes you can use [THIS](https://github.com/sgoldenlab/simba/blob/master/misc/BtWGaNP_meta.csv) hyper parameter meta file to create models to start. However, you may have to play with it, in particular the under sampling ratios, to get a model that performs best in your setup.", + "created_at": "2024-06-11T13:04:41Z", + "author": "sronilsson" + }, + { + "body": "Can I still use the tail_rattle classifier if the tail_end is not labeled?", + "created_at": "2024-06-11T15:19:34Z", + "author": "wallawhitecoat" + }, + { + "body": "No sorry - the tail end classifier is legacy. We had some trouble with this - it was very difficult to get tail end tracked reliably even after extensive training and labelling - you may have more luck though. It was often confused with the bedding material and we saw a lot of ID swappes with the tail end of the two animals beeing confused with each other. ", + "created_at": "2024-06-11T15:32:28Z", + "author": "sronilsson" + }, + { + "body": "Yes, I did experience that even after training close to 2000 frames. Could we use the tail_rattle classifier if the configuration looks like this? \r\n![SLEAP settings](https://github.com/sgoldenlab/simba/assets/169295221/77a6aac4-2a7b-4909-b16e-d16502846988)\r\n\r\nAlso, the reason I ask is because in this youtube video I don't see a tail end but tail_rattle is still being quantified. \r\nhttps://www.youtube.com/watch?v=bqWteWIxzGM", + "created_at": "2024-06-11T15:42:02Z", + "author": "wallawhitecoat" + }, + { + "body": "Yes - that should work. But - just for full disclosure - I created the tail rattle classifier in Seattle around 2019. Everyone who worked on this back then, including maintaining the OSF repository, have other interests and jobs today. I don't remember exactly which features the tail end classifier excepts, and no-one is available for me to ask. If you hit errors, I can start to dig through for old data and figure it out though and we could solve it together. \r\n\r\nA reason why it is not visualized is that although the classifications may be OKish: for that video we wanted to visualize the hull polygonal bounding box of the animals in white. We probably omitted the tail end to get that white polygon box thing around the animal to look neater. ", + "created_at": "2024-06-11T16:39:32Z", + "author": "sronilsson" + }, + { + "body": "Ok, took your advice, back after re-training and labeling with this skeleton. \r\n\r\n1. So, I'm able to run the feature extraction, but is a file supposed to be inserted in the project_folder ->csv->features_extracted? Because I do not see anything. \r\n![Feature Extraction](https://github.com/sgoldenlab/simba/assets/169295221/537ecb1f-7c80-4c13-bf6a-0049165e9df9)\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/e38ec6b7-e810-437d-a780-4f7c52d5b201)\r\n\r\n2. Additionally, I skipped the outlier correction, and the SLEAP data was successfully transposed into outlier_corrected_movement_location. I guess I mostly don't exactly understand what completing an outlier correction will do for my data, or what settings I should run it on. \r\n\r\n3. My last question resolves around how I should label. Do I just label all frames using the regular \"label behavior\" method? Or if I'm using the sav. classifiers should I use pseudo-labelling and/or advanced labeling? \r\n", + "created_at": "2024-06-24T23:25:37Z", + "author": "wallawhitecoat" + }, + { + "body": "Additional note on Question 3 here, I noticed that if I attempt to **Validate on a Single Video** using the .sav files I get this error. Is that because I did not perform my feature extraction correctly? \r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/69f760a2-d6b1-4e6c-9814-4b825555993d)\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/b9dd954b-d9cd-4397-a927-eb5e53fbc6e1)\r\n\r\nWhen I attempt to **run the machine model** with these settings I get this error message, which also notes to the features_extracted directory. \r\n![models](https://github.com/sgoldenlab/simba/assets/169295221/5f87f708-8663-4400-b2b4-2e97bdc4419c)\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/5bf33899-867e-4cfa-b7b4-9371e29ba2c8)\r\n", + "created_at": "2024-06-24T23:50:43Z", + "author": "wallawhitecoat" + }, + { + "body": "Hi @wallawhitecoat - \r\n\r\n1) Yes, it appears to go astray at the point of features extraction, with no files being created. From the screenshot, it looks like you have ticked the box to run your own feature extraction script like in this image below is this correct? \r\n\r\n\"image\"\r\n\r\nIf you have ticked this box, could you share with me the .py file you have used in the `Script path` file browse box? From your screenshoot, it looks like the script is running appropriately, but the final file might be saved in the wrong location or not saved at all. \r\n\r\n2) For outlier correction, I've tried to wrote some explanations [HERE](https://simba-uw-tf-dev.readthedocs.io/en/latest/tutorials_rst/scenario_1.html#step-4-outlier-correction) and [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#step-4-outlier-correction). In short, what it is trying to do, is to fix big pose-estimation inaccuracies based on user-defined (heuristic) rules. It may be that a body-part can't move N x the length of the animal in a single frame, or that a body-part can't be located distance N x the length of the animal from all the other body-parts of the animal. If it fails those rules, the body-parts are placed in their most recent reliable location. If you have very good pose-estimation tracking, you shouln't have to apply these rules though. \r\n\r\n3) I've reached out for some help to answer this one I think @goodwinnastacia can help", + "created_at": "2024-06-25T14:56:06Z", + "author": "sronilsson" + }, + { + "body": "Thank you! I was able to perform a feature extraction just using the \"extract features\" button and not with the user-defined settings. I was then able to use the .sav files after deleting the following features, the .sav files only recognize 490 features, not 498 features. I also had to add a prefix for the pose estimation locations (ex. \"**track_1**_Ear_left_1_x\")\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/ed6ea175-b88e-414e-8c99-2665b27a7e68)\r\n\r\nHowever, after running the machine model and generating the GANTT graphs and video, I found that the classifiers were too sensitive for some, and not sensitive enough for others. I've tried multiple discrimination thresholds, so far 0.1 is too low and quantifies behavior when it doesn't happen, and 0.7 is too high and doesn't quantify anything. My pose estimation tracking is pretty good, so I'm not exactly sure what the issue is, but thank you for the resource I will follow up with them. Thank you again for helping me troubleshoot! \r\n![labels v2001_final_image](https://github.com/sgoldenlab/simba/assets/169295221/1b62d2fa-97f9-4ca5-a4f9-6c33bb3ae9ab)\r\nc\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/0005ec49-fe8e-403b-b687-9387924d3ab1)\r\n", + "created_at": "2024-06-26T23:32:25Z", + "author": "wallawhitecoat" + }, + { + "body": "Hi @wallawhitecoat - thanks for sharing - and thanks for figuring out the additional features that had to be removed - I wasn't sure if that would come up and it was in the back of my mind. \r\n\r\nFor the predictions - could we confirm that it is not the tracking that has been disrupted somehow along the process? Can you run the the classification visualization in the menu below and share the video? \r\n\r\n\"image\"\r\n", + "created_at": "2024-06-27T12:49:34Z", + "author": "sronilsson" + }, + { + "body": "> 3\\. My last question resolves around how I should label. Do I just label all frames using the regular \"label behavior\" method? Or if I'm using the sav. classifiers should I use pseudo-labelling and/or advanced labeling?\r\n\r\nHi, I would always start by labeling a few full videos using the label behavior method just so that you have a good representation of what tail rattle ISN'T, since you have to teach the algorithm both what positive and negative frames look like. I would then create short clips with tail rattle present and label those with the label behavior method. Once you've got some videos labeled this way, you can take a look at using pseudolabeling. \r\n\r\nFor the tail rattle classifier we have posted, our tracking wasn't good enough for good behavioral classification because we only did tail base and tail end. Your tracking should be better!", + "created_at": "2024-06-27T19:29:15Z", + "author": "goodwinnastacia" + }, + { + "body": "Of course, thank you for the advice, I performed the visualize quantifications (with the body-part visualization threshold set to 0.0), here are the results (no classifiers shown). It's a youtube link because the video was too large to post (even when zipped). Below you can see the settings I used and my rational. \r\nhttps://www.youtube.com/watch?v=THQ2TnCWfsw\r\n\r\nDecided to try these settings for running the machine labelling because they are detailed ([in this paper](https://www.nature.com/articles/s41593-024-01649-9)) and I just set the tail_rattle classifier to 0.5 (even if it isn't good enough). Still not sure what to set my min. bout length to though. \r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/e69298b5-16cd-432a-a2df-c1810928a269)\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/7d054561-7f40-456b-aa4a-3e7dfac3f91c)\r\n\r\nAs for labelling full videos, the reason I wanted to use existing classifiers is because they had been recently published in that paper, so I had thought users would be able to apply them to their own data (I had found this information from this quote in the paper **\"To perform supervised behavioral classification, users can download pre-made classifiers from our OSF repository, request classifiers from collaborators or create classifiers by annotating new videos in the scoring interface\"**. \r\n\r\nHow would the existing classifiers/.sav files fit into my training If I manually annotate all the behaviors on my own? Would a wise course of action be to just get machine-results using the existing classifiers, and then do pseudo labeling? \r\n\r\nThank you again for your continued help on this :)", + "created_at": "2024-06-28T22:36:26Z", + "author": "wallawhitecoat" + }, + { + "body": "So there are two potential ways to do this. 1) download our videos, track them in SLEAP using your 11 point model, and then append our ground truth annotations. I think this is advantageous because the extra tail point is going to help with your tail rattle classifications. 2) Download our targets inserted files off of OSF, add them to your project folder -> csvs -> targets inserted folder, and download the video info for those projects and add them to your own video info log. This will use our original DLC tracking for our videos with the less than ideal tail end tracking and could potentially make your classifications a bit worse at first. The advantages of this option are that you're diversifying your tracking so your models should ultimately be more robust, and you don't have to run more videos through pose estimation. Let me know which you'd like to move forward with and I can help you out with it. \r\n\r\nIt does look like there's a glitch on OSF right now and our files aren't showing up, but I should have that fixed by the end of the weekend. ", + "created_at": "2024-06-28T23:29:43Z", + "author": "goodwinnastacia" + }, + { + "body": "I think the second method would work out well, not having a perfect tail_rattle classifier is not a deal breaker for my team. If we could have attack and anogenital sniff work well that would be satisfactory. Also, no worries thank you for fixing! ", + "created_at": "2024-06-28T23:56:05Z", + "author": "wallawhitecoat" + }, + { + "body": "Hi, to clarify - you currently have files with 16 total body parts and 498 features, correct? \r\n\r\nI put the hand-annotated training sets on OSF and you can find them through the filepaths shown in the screenshot. You should be able to copy these csv files into your targets inserted folder. After that, you need to add the video names and associated info to your \"video_info\" log in the project_folder -> logs folder. This info can be found in the video_info folder in the folder: 16BP_498_Features_need_refinement_PREPRINT_CLASSIFIERS -> Video_Info.\r\n\r\nPlease let me know if you have any questions!\r\n\r\n\"Screenshot\r\n", + "created_at": "2024-07-03T21:42:16Z", + "author": "goodwinnastacia" + }, + { + "body": "Yes, they are 16 body parts. I had to manually remove 8 features from the features_extracted file in order to use the .sav classifiers, I assume those 8 had been added to SimBA after the .sav classifiers were put on the OSF (see the above excel spreadsheet to see which ones were removed. For what I did below, I did not remove those 8 features. \r\n\r\nAfter I added the annotations and the corresponding video_info file, I found I could Train multiple models (one for each saved setting) with the error below mentioned. I saved the settings as they were, this .csv file appeared in configs. \r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/f3e67a90-c927-4fc9-b831-7b3c6fc765f4)\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/ca8ac823-9762-4a1f-af43-b6f5ac31a76d)\r\n\r\nHere is what SimBA output for me, it took about a half hour. \r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/171fe07e-3ea9-4dae-a4d4-7916b2a9a00c)\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/0872be4b-8375-4b8b-b260-65ce843d20d0)\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/55a074fb-85e3-40e9-aed8-deb8cf48dd25)\r\n\r\nThen I re-named the .sav file to just \"Tail_rattle\" and ran \"Validate model on single video\" . \r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/04663b5b-8b86-4057-b758-ef50565238f3)\r\n\r\nBased off of the graph, it didn't seem that the classifier was more accurate, so I created a video w/a GANTT chart **(Discrimination threshold- 0.4, Minimum bout length (MS)- 1)**. I think it is more accurate than my previous results, but still needs some work. I'll attach a video link. \r\n\r\nhttps://youtu.be/cdtEezm4i7c\r\n\r\nAnyways, all this info is just to ask what the next step is. Additionally, is this something I should replicate for the other classifiers to improve their accuracy? Thank you! \r\n\r\n", + "created_at": "2024-07-08T21:05:22Z", + "author": "wallawhitecoat" + }, + { + "body": "\"image\"\r\n", + "created_at": "2024-07-11T16:46:54Z", + "author": "goodwinnastacia" + }, + { + "body": "Have your thresholding graphs always looked like that, or did they previously look more like the top two graphs in the flowchart I just sent? Usually when we see a threshold plot like this without a low baseline, there's some error in the annotations", + "created_at": "2024-07-11T16:48:01Z", + "author": "goodwinnastacia" + }, + { + "body": "The thresholding graphs have looked like this so far, what should I do to fix this? I also wanted to know if I need the corresponding videos for the targets_inserted files. So far, I have only run this on my 10min long video, not any on the OSF. ", + "created_at": "2024-07-11T17:14:45Z", + "author": "wallawhitecoat" + }, + { + "body": "Can you please send me one of your targets inserted files that you hand annotated, and then one from the training sets we provided? I want to make sure that no columns got scrambled. You can download the OSF vids if you want to double check the annotations or something like that, but you don't need to if you don't want to visualize anything. ", + "created_at": "2024-07-11T22:35:20Z", + "author": "goodwinnastacia" + } + ] + }, + { + "title": "How to remove video from behavioral annotation dataset", + "body": "Hello. Thanks for all the hard work to make such a detailed workflow. I wanted to ask if there is any way to remove a video from the training dataset using the SimBA GUI? I couldn't seem to find anything about this in my searches, so I wasn't sure if I have to manually go into the project folder and delete all related csv and .mp4 files for the video. I don't want to annotate behaviors for some videos anymore, so this would really help. Please let me know!\r\n", + "user": "lpereiras17", + "reaction_cnt": 0, + "created_at": "2024-06-09T12:56:47Z", + "updated_at": "2024-06-09T15:05:54Z", + "author": "lpereiras17", + "comments": [ + { + "body": "Hello @lpereiras17! \r\n\r\nTo train the models, SimBA uses all the files inside the `project_folder/csv/targets_inserted`. \r\n\r\nTo omit a video from training, remove the file representing this video from the `project_folder/csv/targets_inserted` (or, place it inside a subfolder inside `project_folder/csv/targets_inserted`) and SimBA won't see it when training models. \r\n\r\nYou don't have the remove the file from any other folders for it to be omitted from training. \r\n\r\n", + "created_at": "2024-06-09T15:05:53Z", + "author": "sronilsson" + } + ] + }, + { + "title": "SIMBA VALUE ERROR- Issue Importing H5 and slp tracking data ", + "body": "\r\n![Simba Problem](https://github.com/sgoldenlab/simba/assets/169295221/57a868cd-de39-4b50-87ee-56e69cbcea1b)\r\n![SimBA Problem 2](https://github.com/sgoldenlab/simba/assets/169295221/5e61e32f-97bb-4c7e-b29f-8b20ceae86e9)\r\n\r\nI'm trying to start a new project and import labels from sleap (resident intruder). I tried importing as a .slp file and a H5 file I had. Neither worked and I get this message from SimBA, or the GUI quits. I only imported 1 video, ten mins long, 60fps, and over 36,000 frames. Any help would be appreciated, thank you! \r\n\r\n- Windows 10, version 22H2\r\n- Python Version [3.10.14]\r\n- using miniconda3 Version [24.4.0]\r\n\r\n", + "user": "wallawhitecoat", + "reaction_cnt": 0, + "created_at": "2024-05-30T16:04:17Z", + "updated_at": "2024-06-06T23:31:27Z", + "author": "wallawhitecoat", + "comments": [ + { + "body": "Hi @wallawhitecoat thanks for reporting - this error happens as SimBA looks for the body-parts from SLEAP that are most proximal to your button clicks, in order to assign the animals their correct identities across videos. \r\n\r\nLooking at the image of the resident and Intruder above, your SLEAP tracking does not appear to contain any data for the frame being shown. If there is no data (and you perform no interpolation), SimBA places the body-parts at coordinates 0,0 - top left of the image: \r\n\r\n\"image\"\r\n\r\nYou could try to perform interpolation as you import the data into SimBA to get body-part predictions for all frames. Alternatively is to improve the pose-estimation model so it generates accurate predictions for all frames and no interpolation is needed. \r\n\r\nLet me know if that makes sense!\r\n\r\nSimon\r\n\r\n", + "created_at": "2024-05-30T16:38:14Z", + "author": "sronilsson" + }, + { + "body": "Hi Simon, thank you for the response. I was able to export an H5 file with predictions on every frame. Here is what my skeleton looks like in SimBA and in SLEAP. \r\n![Skeleton settings](https://github.com/sgoldenlab/simba/assets/169295221/e6b11fd2-6397-437b-8834-b8032723ec50)\r\n![Project Config SimBA](https://github.com/sgoldenlab/simba/assets/169295221/c87ea471-b5ba-48d3-b681-bbbc8ec898d0)\r\n![SLEAP Skeleton](https://github.com/sgoldenlab/simba/assets/169295221/8f80ff14-710d-4205-aadc-45607af768c6)\r\n\r\nUnfortunately, I keep getting this notification from SimBA, what should I do? Thank you!\r\n![SimBA Problem3](https://github.com/sgoldenlab/simba/assets/169295221/2ab73554-b6e4-4b8b-b775-65f575a1cf16)\r\n", + "created_at": "2024-06-05T17:59:01Z", + "author": "wallawhitecoat" + }, + { + "body": "Hi @wallawhitecoat ! \r\n\r\nCan you drop the `labels.v0012.h5` in this thread and I can take a look what is going on in there? You should be able to zip the file up and drag it into the thread. I am not sure why SimBA thinks there are only 14 body-parts in there.", + "created_at": "2024-06-05T18:34:42Z", + "author": "sronilsson" + }, + { + "body": "Sure, let me know if this file works! \r\n\r\n[labels.v0012.zip](https://github.com/user-attachments/files/15595702/labels.v0012.zip)\r\n", + "created_at": "2024-06-05T19:43:15Z", + "author": "wallawhitecoat" + }, + { + "body": "Thanks - just a sanity check - the file `labels.v0012.h5` is only 93kB, very small, and as it's called \"labels\", I have to ask: \r\n\r\nDoes this file contain your hand annotations SLEAP ? Or do the files contain machine generated body-part predictions for all frames in the respective videos? I ask as previously [HERE](https://github.com/sgoldenlab/simba/issues/339) and more specifically [HERE](https://github.com/sgoldenlab/simba/issues/339#issuecomment-1972002345) someone also tried to use a file with the same filenames as yours and I think it was just hand labels and not machine learning predictions. ", + "created_at": "2024-06-05T20:00:58Z", + "author": "sronilsson" + }, + { + "body": "It contains both hand annotations and machine generated body-part predictions for all frames. I went back to sleap and predicted all of the frames on a copy of the video I was using and exported that prediction as an H5 file. The new file is 4,094KB.\r\nNow I get a similar problem as before, but instead of saying that the number of body parts is 11 instead of 22. \r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/1aa26024-a589-4ed7-985b-12902719680d)\r\n[labels.v0013.zip](https://github.com/user-attachments/files/15596814/labels.v0013.zip)\r\n\r\nThis is how I'm saving in SLEAP\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/aec3e185-4070-4bad-a50d-66c456da28ba)\r\n\r\n", + "created_at": "2024-06-05T21:28:31Z", + "author": "wallawhitecoat" + }, + { + "body": "UPDATE: I was able to download a csv file, but it only shows tracking on one mouse \r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/9776dd7c-d059-4189-8f86-af5eb3389d38)\r\n\r\nThe CSV file looks like this when I open it, I notice that there is only one \"instance score\" for each frame, if that is helpful in determining why only 11 nodes are being recognized. \r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/37228eb4-a2af-46af-9373-1179c7d252dd)\r\n", + "created_at": "2024-06-05T21:58:57Z", + "author": "wallawhitecoat" + }, + { + "body": "Yeah that seems likely - is the `track` field all blank? Even if you sort by `frame_idx` field? That would explain why there are only have the expected body-parts. \r\n\r\nIn the `frame_idx` field, are there two instances of every frame number? \r\n\r\n ", + "created_at": "2024-06-06T00:46:46Z", + "author": "sronilsson" + }, + { + "body": "That was it, thank you! I re ran my trained model and included tracks on sleap, so all of my predictions have tracks now. \r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/e29853a2-03c2-4685-95b7-8c4f99c94cdc)\r\n![image](https://github.com/sgoldenlab/simba/assets/169295221/56e37fde-edd8-4a1f-aa9f-f9b6c0bdb2e0)\r\n", + "created_at": "2024-06-06T23:04:49Z", + "author": "wallawhitecoat" + }, + { + "body": "Great news, onwards and upwards! :) let me know if anything else comes up", + "created_at": "2024-06-06T23:31:26Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Undersample still results serious imbalance", + "body": "I have used random undersample for ratios around 1, but the imbalance situation has great fluctuation in different experiments. Even when the same ratio applies, the number of frames still has differences. I am wondering whether I misinterpreted the usage of undersample. I greatly appreciate your information and suggestions on fine-tuning the hyperparameters!\r\nThe first 3 images are reports for random undersample ratios of 1, 1.1, and 0.9 respectively (other hyperparameters are all the same, 500 estimators, sqrt features, BOUTS split, and balanced weight) and the last image is the report of random undersample of the ratio of 1 with behavior present weight of 2 and absent weight of 1 (other hyperparameters are same). The order of images is the same as the description\r\n![IMG_1082](https://github.com/sgoldenlab/simba/assets/110661816/c879fe1b-fce4-4b6a-9f8d-74acbbc51b3d)\r\n![IMG_1083](https://github.com/sgoldenlab/simba/assets/110661816/853cba42-52d5-40bd-87d3-fcd6f1184e43)\r\n![IMG_1084](https://github.com/sgoldenlab/simba/assets/110661816/04fbf42b-5945-4b14-afae-b98136d11409)\r\n![IMG_1085](https://github.com/sgoldenlab/simba/assets/110661816/59fc8890-b2de-4d06-9402-8cd5abc40df8)\r\n", + "user": "AndyWeasley2004", + "reaction_cnt": 0, + "created_at": "2024-05-28T21:17:11Z", + "updated_at": "2024-05-29T17:10:33Z", + "author": "AndyWeasley2004", + "comments": [ + { + "body": "Thanks for sharing! First, about the “support” numbers and its consistency despite different under sampling ratios:\r\n\r\nWhen you train a model, SimBA splits the data into training and testing sets, using your specified Train-Test split ratio (e.g. 80 vs 20%). It then under samples the data in the training set, retaining he same number of sniffing events and non-sniffing events in the training set **(if under sample ratio is set to 1.0)**. However, SimBA does not touch the test set: it will be 20% of your dataset pre-under sampling. We don’t want to bias the test sample, so therefore it remains untouched. \r\n\r\nAbout the fluctuations: When you sample frames using BOUTS, it ensures that the frames from the same “sniffing” event/bout don’t end up both in the training and testing set. One thing that comes to mind that even if there are a lot of non-sniffing and sniffing frames, they could be relatively long events and relatively few bouts. When training the model, a few non-sniffing bouts may be selected for training, and as there are relatively few, and they vary, you get this fluctuations between runs depending on the random selections.\r\n\r\nDo you know how many bouts of sniffing and non-sniffing you have? How does it look when you select by frames rather than bouts?\r\n\r\nAlthough the third truth table looks promising, the others seem the other primarily issues in precision over recall. That means it is over-predicting sniff events, and we may want to show the model more non-sniffing events to make it more balanced and let the model learn better what a non-sniffing event/frames look like - how does it look if you say set the under sample ratio to 2, 5 and 10? ", + "created_at": "2024-05-29T01:07:47Z", + "author": "sronilsson" + }, + { + "body": "According to the log in the main SimBA window, both the train and test set have 402 BOUTS. I at most can set the under-sample ratio to around 2.5. I have tried ratios of 2, 2.25, and 2.5, and all of them cause more serious imbalances. The following 3 reports are for ratio of 2, 2.25 and 2.5 respectively.\r\n![image](https://github.com/sgoldenlab/simba/assets/110661816/d1c6e377-1142-4ad1-8d59-cd01683b0667)\r\n![image](https://github.com/sgoldenlab/simba/assets/110661816/56c77e44-0004-450d-a744-05480f67301e)\r\n![image](https://github.com/sgoldenlab/simba/assets/110661816/6c75de72-c530-494b-bcd5-52f5b3034c5e)\r\nAnd the experiment on my dataset shows that the performance of ratio around 1 could sometimes give the most balanced model still though it sometimes still gives imbalanced results.\r\nI'm continuing experiments and I'll let you know if anything solves this issue.\r\n", + "created_at": "2024-05-29T17:10:32Z", + "author": "AndyWeasley2004" + } + ] + }, + { + "title": "Analyze ROI data not working for multiple videos", + "body": "Here are the steps I took:\r\n1.) added video and DLC csv to existing project\r\n2.) added a measurement for the new video in video parameters \r\n3.) Defined ROI for the new video \r\n4.) run analyze roi data aggregates and Analyze Distances \r\nProblem: \r\nfor step 4 I am only getting results for the first video, when I want results for the new video. \r\nVideo of what I am seeing on my end found here:\r\nhttps://youtu.be/yxb-5UfGMjw\r\n\r\nThank you!\r\n\r\n", + "user": "breannashi", + "reaction_cnt": 0, + "created_at": "2024-05-28T17:35:34Z", + "updated_at": "2024-05-29T23:57:48Z", + "author": "breannashi", + "comments": [ + { + "body": "Hi @breannashi - I think (maybe wrong) someone reported this issue a few weeks ago and we fixed it. \r\n\r\nWhich version of SimBA are you running? `pip show simba-uw-tf-dev`\r\n\r\nIf you run `pip install simba-uw-tf-dev --upgrade` do you still see this error?\r\n\r\nPS. I think github has fixed so you can drag and drop video mp4 files straight into these chats. ", + "created_at": "2024-05-28T17:39:01Z", + "author": "sronilsson" + }, + { + "body": "Ah I can see the version number in your video :) Anyway, try to upgrade to latest as above, and let me know if that fixes it or not please.", + "created_at": "2024-05-28T17:58:14Z", + "author": "sronilsson" + }, + { + "body": "This did not fix the error. It is functioning exactly the same. ", + "created_at": "2024-05-29T01:11:55Z", + "author": "breannashi" + }, + { + "body": "for context:\r\nSimba: SimBA v.1.94.2\r\nOS: Ubuntu 22.04.4 LTS\r\n\r\n", + "created_at": "2024-05-29T01:29:57Z", + "author": "breannashi" + }, + { + "body": "Thanks @breannashi - very helpful. So I better understand the issue, please correct me if I am wrong:\r\n\r\nYou first define ROIs on more than one videos, you then analyze ROI aggregate data including distances moved for these videos. However, the results you see in only contain data for a single video? \r\n\r\nSorry I only have a Mac at present, but are you essentially doing this process in the video below, but the final CSV only has data for one video?\r\n\r\nhttps://github.com/sgoldenlab/simba/assets/34761092/f1a76cfc-8739-4df8-8923-1bca9c1c4342\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2024-05-29T01:38:06Z", + "author": "sronilsson" + }, + { + "body": "Yes that's right. and where your simba prints that it runs on both videos mine only prints one. ", + "created_at": "2024-05-29T02:27:38Z", + "author": "breannashi" + }, + { + "body": "Thanks @breannashi, two more questions:\r\n\r\n1) Can you send me your `/project_folder/logs/measures/ROI_definitions.h5` file and I will take a look at it? This file stores your drawn ROI definitions. You should be able to zip it and drop it into this chat thread.\r\n\r\n2) How many files do you have represented in the `/project_folder/csv/outlier_corrected_movement_location` directory of your SimBA project - can you send me a screenshot of the content of this directory? \r\n", + "created_at": "2024-05-29T11:08:23Z", + "author": "sronilsson" + }, + { + "body": "Thank you! here are the files requested!\r\n![image](https://github.com/sgoldenlab/simba/assets/74272793/7e05c50b-3ac6-4990-8502-56dfad8fe9f1)\r\n![image](https://github.com/sgoldenlab/simba/assets/74272793/54f098d3-9f12-47cd-b30a-b75bc3d9f0d2)\r\n[ROI_definitions.zip](https://github.com/sgoldenlab/simba/files/15487018/ROI_definitions.zip)\r\n", + "created_at": "2024-05-29T15:52:58Z", + "author": "breannashi" + }, + { + "body": "Thanks @breannashi!\r\n\r\nFor the screenshoot, I can see that you have data for a single video (`CC1_tank_exploration_29-11-22`). \r\n\r\nOpening the ROI definitions, I can also see that you have polygons drawn for two videos (with or without the `_29-11-22` suffix):\r\n\r\n\"image\"\r\n\r\nIf you want to compute the ROI data for both `CC1_tank_exploration_29-11-22` and `CC2_Tank_exploration`, there needs to be data-files inside the `/project_folder/csv/outlier_corrected_movement_location` directory representing the two videos. \r\n\r\nThe reason you are getting results for only a single video file, is likely because there is only a single file in this directory, SimBA can't find any more data. Let me know if that makes sense.\r\n\r\n \r\n\r\n\r\n\r\n", + "created_at": "2024-05-29T16:01:03Z", + "author": "sronilsson" + }, + { + "body": "That makes sense. I am wondering why it defaults to add my regions to video 1 csv rather than make a new one? and will the .h5 also need to be altered? ", + "created_at": "2024-05-29T17:44:20Z", + "author": "breannashi" + }, + { + "body": "Hi @breannashi - I am note completly sure what you mean. When you say \"defaults\" what do you mean? \r\n\r\nSome notes that may be relevant: \r\n\r\nThe videos you see listed in the image below are videos that SimBA find in the `project_folder/videos/` directory of your SimBA project. If the video is not there, there is nothing to draw your regions on. Add the video to that folder to be able to draw on it.\r\n\r\n\"image\"\r\n\r\nTo duplicate the ROIs you've drawn on one video, to all the other videos listed, click the `Apply to all` button. For example, of I have drawn polygons on the first video in the list, but not the others, but I want the same polygons on the remaining 7 videos, I click the `apply to all` button associated with row 1: \r\n\r\n\"image\"\r\n\r\n\r\n", + "created_at": "2024-05-29T17:56:40Z", + "author": "sronilsson" + }, + { + "body": "Never-mind! I did not read properly. The problem is I forgot to click skip outlier correction. Thank you for your help!!", + "created_at": "2024-05-29T23:09:28Z", + "author": "breannashi" + }, + { + "body": "No problem, just let me know if anything else comes up", + "created_at": "2024-05-29T23:57:46Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Unable to add ROIs Ubuntu", + "body": "When using simba gui to select region of interest it fails to show screenshot from video. \r\n\r\nI create a project following this \r\n[simba walk through](https://simba-docs.readthedocs.io/en/latest/docs/tutorials/simba_walkthrough.html)\r\nThe ROI section came up as a white screen\r\n[Video showing error](https://www.youtube.com/watch?v=yxb-5UfGMjw)\r\n\r\nI tried to change the opencv as documented here:\r\nThe instructions did not resolve issue.\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md\r\n\"2. When I click on a video to set its parameters (i.e., \"pixel per millimeter\"), or try to open a video to dray ROI regions I get an OpenCV, I get an error report with something like \"cv2.Error. The function is not implemented. Rebuild the library with Windows...\"\r\nShow solutions!\r\n\r\n\r\nTo fix this, make sure your python environment has the correct version of OpenCV installed. Within your environment, try to type:\r\n(1) pip install opencv-python==3.4.5.20 or conda install opencv-python==3.4.5.20.\r\n\r\nThen launch SimBA by typing SimBA, and see if that fixes the issue.\r\n\r\n(2) Alternatively, if this does not fix it, try typing:\r\n\r\npip uninstall opencv-python followed by either pip install opencv-contrib-python or conda install opencv-contrib-python\r\n\r\nThen launch SimBA by typing SimBA, and see if that fixes the issue.\"\r\n\r\n\r\nI am unable to add ROI\r\n\r\n - OS: Ubuntu 22.04.4 LTS\r\n - Python 3.6\r\n - in conda evironment\r\n", + "user": "breannashi", + "reaction_cnt": 0, + "created_at": "2024-05-16T19:00:09Z", + "updated_at": "2024-05-17T16:53:13Z", + "author": "breannashi", + "comments": [ + { + "body": "Hi @breannashi - thanks for reporting, super helpful. It's not the OpenCV version: I was able to replicate the issue on my local Ubuntu 22.04.4 LTS with OpenCV 3.4.5.20.\r\n\r\nA fix on my end seems to be to introduce a 100ms wait `cv2.waitKey(100)` each time after displaying/updating the video image. I'm not sure why this would be needed in ubuntu but but not MacOS or Windows though. I will test a little more and push an update to pip and let you know when!", + "created_at": "2024-05-16T20:30:24Z", + "author": "sronilsson" + }, + { + "body": "@breannashi if you update simba to latest version using `pip install simba-uw-tf-dev --upgrade`, how does it look? ", + "created_at": "2024-05-17T13:46:59Z", + "author": "sronilsson" + }, + { + "body": "Thanks for the quick response! ROI is working now THX!", + "created_at": "2024-05-17T16:53:12Z", + "author": "breannashi" + } + ] + }, + { + "title": "Certain filename patterns prevent importing SLEAP H5 files", + "body": "**Describe the bug**\r\nsimba.utils.errors.NoFilesFoundError: SIMBA NO FILES FOUND ERROR: SimBA could not locate a video file in your SimBA project for data file TPT_TOP.v001.016_G1F3_G1F4_cfa_2wk.analysis\r\n\r\nFilenames with the underscore character (\"_\") in the name of the SLEAP project generate the above error after importing the videos and trying the import the SLEAP H5 video\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Import videos with \"_\" in the filename for the project\r\n2. Attempt to import the analysis files from SLEAP\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 11\r\n - Python Version 3.6.13\r\n - Miniconda\r\n \r\n\"TPT_TOP.v001.016_G1F3_G1F4_cfa_2wk.analysis\" does not work\r\nTPTTOP.v001.016_G1F3_G1F4_cfa_2wk.analysis works perfectly\r\n\r\nPresumably this is related to the fact that SLEAP separates the project name from the individual video names with a \"_\"\r\n", + "user": "NSGregory", + "reaction_cnt": 0, + "created_at": "2024-05-15T21:42:08Z", + "updated_at": "2024-05-20T14:55:18Z", + "author": "NSGregory", + "comments": [ + { + "body": "Hi @NSGregory! Thanks for reporting - I have to check this.. but if you have multiple animals, then we need to pair each track with the correct animal across videos, so SimBA needs to bring up the video associated with your imported SLEAP data file. \r\n\r\nThe code looks in the `project_folder/videos` directory for the video file associated with the data file being imported, using the file-names to pair them. Do you have a video file in `project_folder/videos` named `TPT_TOP` or `TPTTOP`?\r\n\r\n ", + "created_at": "2024-05-15T23:30:47Z", + "author": "sronilsson" + }, + { + "body": "The videos were in the correct location when I ran into the issue\r\n\r\n\r\nThe video name is G1F3_G1F4_cfa_2wk.mp4\r\nThe project name is TPT_TOP.v001.slp \r\n\r\nThe naming convention for the H5 file appears to be {project name}.{video number in project}_{video name)\r\n\r\nSo actually changing the H5 file to the _wrong_ name made it work.", + "created_at": "2024-05-16T01:00:57Z", + "author": "NSGregory" + }, + { + "body": "Ah got it - thanks, let me try this out.", + "created_at": "2024-05-16T11:44:05Z", + "author": "sronilsson" + }, + { + "body": "And yes - I can see the issue in the logic in the SLEAP filename cleaning function that tries to tease out the video name [HERE](https://github.com/sgoldenlab/simba/blob/e12aabbd47613527e69c4c9080637b3fcb0c3692/simba/utils/read_write.py#L1575), when underscores in the project name.\r\n```\r\n>>> clean_sleap_file_name(\"TPT_TOP.v001.016_G1F3_G1F4_cfa_2wk.analysis\")\r\n'TOP.v001.016_G1F3_G1F4_cfa_2wk'\r\n>>> clean_sleap_file_name(\"TPTTOP.v001.016_G1F3_G1F4_cfa_2wk.analysis\")\r\n'G1F3_G1F4_cfa_2wk'\r\n```\r\n\r\n\r\n", + "created_at": "2024-05-16T12:57:36Z", + "author": "sronilsson" + }, + { + "body": "@NSGregory what do you think of something like instead, would it work on your end or can you see any fallacies? \r\n\r\n```\r\ndef clean_sleap_file_name(filename: str) -> str:\r\n if (\".analysis\" in filename) and (\"_\" in filename) and (filename.count('.') >= 3):\r\n filename_parts = filename.split('.')\r\n video_num_name = filename_parts[2]\r\n if '_' in video_num_name:\r\n return video_num_name.split('_', 1)[1]\r\n else:\r\n return filename\r\n else:\r\n return filename\r\n```\r\n\r\nIt works on my test cases and your video file names, but admittently I have not see a lot of SLEAP h5 file names and may be some other cases I don't know about.", + "created_at": "2024-05-16T13:09:52Z", + "author": "sronilsson" + }, + { + "body": "Hi @NSGregory - when you get a chance, if you update simba with `pip install simba-uw-tf-dev --upgrade`, how does the import look on your end with the file names it previously struggled with?", + "created_at": "2024-05-17T14:23:09Z", + "author": "sronilsson" + }, + { + "body": "Hi Simon, I am traveling, but I will give it a try as soon as I can.\r\nThanks for the quick response.\r\n\r\nOn Fri, May 17, 2024 at 7:23 AM Simon Nilsson ***@***.***>\r\nwrote:\r\n\r\n> Hi @NSGregory - when you get a chance, if\r\n> you update simba with pip install simba-uw-tf-dev --upgrade, how does the\r\n> import look on your end with the file names it previously struggled with?\r\n>\r\n> —\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you were mentioned.Message ID:\r\n> ***@***.***>\r\n>\r\n", + "created_at": "2024-05-19T05:12:13Z", + "author": "NSGregory" + }, + { + "body": "The change works for loading the tracking data. I got a new warning when starting up simba though, not sure if it's related. Didn't seem to impact function at all but I didn't try any other functions yet.\r\n\r\n`(simba) C:\\Users\\Nick>simba\r\nC:\\Users\\Nick\\anaconda3\\envs\\simba\\lib\\site-packages\\numpy\\_distributor_init.py:32: UserWarning: loaded more than 1 DLL from .libs:\r\nC:\\Users\\Nick\\anaconda3\\envs\\simba\\lib\\site-packages\\numpy\\.libs\\libopenblas.NOIJJG62EMASZI6NYURL6JBKM4EVBGM7.gfortran-win_amd64.dll\r\nC:\\Users\\Nick\\anaconda3\\envs\\simba\\lib\\site-packages\\numpy\\.libs\\libopenblas.WCDJNK7YVMPZQ2ME2ZZHJJRJ3JIKNDB7.gfortran-win_amd64.dll\r\n stacklevel=1)`", + "created_at": "2024-05-20T14:01:40Z", + "author": "NSGregory" + }, + { + "body": "Thanks @NSGregory - if you hit some error let me know, or try in a fresh conda environment. I pinned a lot of dependency versions yesterday (including numpy which the warning is about) so there may be duplicate versions installed in your python environment for some reason.", + "created_at": "2024-05-20T14:30:49Z", + "author": "sronilsson" + }, + { + "body": "A clean install fixed the warning. ", + "created_at": "2024-05-20T14:55:16Z", + "author": "NSGregory" + } + ] + }, + { + "title": "SHAP Multiprocessing Failed", + "body": "My error looks like this and I have set multicore to be True.\r\n![IMG_0945](https://github.com/sgoldenlab/simba/assets/110661816/02ae8780-a0bd-44c8-84d8-be9fa9244956)\r\n\r\nMy CPU info is below:\r\n![IMG_0946](https://github.com/sgoldenlab/simba/assets/110661816/48072bcf-8968-421d-ac36-6582e8378e4d)\r\nIs multiprocessing not supported on this CPU? Thank you for your information.\r\n", + "user": "AndyWeasley2004", + "reaction_cnt": 0, + "created_at": "2024-05-09T21:09:50Z", + "updated_at": "2024-05-28T20:51:28Z", + "author": "AndyWeasley2004", + "comments": [ + { + "body": "Yes it should be supported, it should run them in parallel on each of your selected cores, your computer looks good.\r\n\r\nI haven't seen this error before. It looks like it is trying to process some large value which is larger than or smaller than 2148483648 which is more than a 32bit floating point can handle... possibly? I am away for the next couple of days but will see if I can recreate this and get back to you.\r\n\r\n", + "created_at": "2024-05-09T23:24:22Z", + "author": "sronilsson" + }, + { + "body": "... @AndyWeasley2004 One thing you could help me with:\r\n\r\nIf you try it on fewer cores - e.g., 5 does it run? \r\n\r\nOr does it run of you build the model with fewer trees/estimators? Say 200? \r\n\r\nIt could be a RAM related issue if those things fixes it.", + "created_at": "2024-05-09T23:29:11Z", + "author": "sronilsson" + }, + { + "body": "I will try that when I go back to work. Thank you for your information!", + "created_at": "2024-05-09T23:38:59Z", + "author": "AndyWeasley2004" + }, + { + "body": "👍🏻 I googled around and yes, the issue is discussed [here](https://stackoverflow.com/a/47776649) as a possible memory error. \r\n\r\nI don't know how much data you have, but if above doesn't fix it, check if it would run with less data (fewer, or smaller files inside the `project_folder/csv/targets_inserted` directory). If it runs then it would also suggest memory related.", + "created_at": "2024-05-09T23:57:35Z", + "author": "sronilsson" + } + ] + }, + { + "title": "SHAP visualization failed", + "body": "I have run Simba with videos of a single animal and a single behavior to classify.\r\n\r\nAnd I have created three model settings saved in configs folder and I clicked the run all specific models. After SHAP score is calculated, the error says the SHAP visualizations/aggregate stats failed. \r\nMy python is 3.6 and the computer is using Ubuntu 22.04.3.\r\n\r\nThe error message is here:\r\n![IMG_0930](https://github.com/sgoldenlab/simba/assets/110661816/9c8c993c-502d-4225-9048-3b44e98de14a)\r\n", + "user": "AndyWeasley2004", + "reaction_cnt": 0, + "created_at": "2024-05-08T17:36:26Z", + "updated_at": "2024-05-28T20:51:42Z", + "author": "AndyWeasley2004", + "comments": [ + { + "body": "In case it's also important: we have created a customed 6 body parts for this project.", + "created_at": "2024-05-08T17:42:02Z", + "author": "AndyWeasley2004" + }, + { + "body": "Hi @AndyWeasley2004 - many thanks for reporting 👍🏻 one question before I dig: which version of simba are you running and you see when running `pip show simba-uw-tf-dev`?\r\n", + "created_at": "2024-05-08T17:45:09Z", + "author": "sronilsson" + }, + { + "body": "Hi, thank you for your help in advance. I'm using 1.88.6 of Simba.", + "created_at": "2024-05-08T17:49:05Z", + "author": "AndyWeasley2004" + }, + { + "body": "Yes cheers, I will see if I can recreate. it should just be a warning msg telling you that you cannot create graphics showing aggregate binned SHAP values (which is reserved for non-custom body-parts), but but seems to be a bug in the warning msg.", + "created_at": "2024-05-08T17:57:33Z", + "author": "sronilsson" + }, + { + "body": "@AndyWeasley2004 - if you upgrade to latest version of SimBA with `pip install simba-uw-tf-dev --upgrade` and try it again, how does it look on your end? ", + "created_at": "2024-05-08T18:16:31Z", + "author": "sronilsson" + }, + { + "body": "Sorry for the late response. I have use a toy example to quickly get the result, and the model can successfully save and just the visualization is still skipped, but I think it solve my issue already! Thank you for your help\r\n![IMG_0944](https://github.com/sgoldenlab/simba/assets/110661816/ff665933-0a13-400e-acc8-88bd7b096c9e)\r\n", + "created_at": "2024-05-09T20:28:17Z", + "author": "AndyWeasley2004" + }, + { + "body": "That's it, thank you for letting me know @AndyWeasley2004 ! The visualization is meant to be a plot like [THIS ONE](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.plotting.html#simba.plotting.shap_agg_stats_visualizer.ShapAggregateStatisticsVisualizer). However, if you have a user-defined pose-estimation schema, we don't make any assumptions even about which species you have, so it's not possible to bucket the features easily into categories, so that part is skipped.", + "created_at": "2024-05-09T20:35:40Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Problem analyzing ROI data Aggregates", + "body": "**Describe the bug**\r\nAfter upgrading to the new version of SIMBA (1.90.7), when I tried to redo some ROI analyses, I got the error that I copy at the end. I tried to redo the project from 0 and I still get the same error. \r\nError: \r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Usuario\\anaconda3\\envs\\simba-latest\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\Usuario\\anaconda3\\envs\\simba-latest\\lib\\site-packages\\simba\\mixins\\pop_up_mixin.py\", line 308, in \r\n self.run_frm, text=title, fg=\"blue\", command=lambda: run_function()\r\n File \"C:\\Users\\Usuario\\anaconda3\\envs\\simba-latest\\lib\\site-packages\\simba\\ui\\pop_ups\\roi_analysis_pop_up.py\", line 104, in run\r\n roi_analyzer.run()\r\n File \"C:\\Users\\Usuario\\anaconda3\\envs\\simba-latest\\lib\\site-packages\\simba\\roi_tools\\ROI_analyzer.py\", line 186, in run\r\n location_2=np.array([center_x, center_y]),\r\nTypeError: not enough arguments: expected at least 3, got 2\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. simba\r\n2. roi window\r\n3.click on analyse roi data: aggregates\r\n4. See error\r\n\r\n**Expected behavior**\r\nUsually it runs the analysis and then creates the csv file with the ROI data\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 11\r\n - Python Version: 3.6.0 \r\n - Are you using anaconda: Yes\r\n \r\n", + "user": "Monica9577", + "reaction_cnt": 0, + "created_at": "2024-04-26T10:06:28Z", + "updated_at": "2024-05-10T22:05:14Z", + "author": "Monica9577", + "comments": [ + { + "body": "Thanks @Monica9577 - Can you do me a massive favor and share your `/project_folder/logs/measures/ROI_definitions.h5` file with me? \r\n\r\nI was just looking at this the other day but appears to have some issues with circles not beeing stored correctly. \r\n", + "created_at": "2024-04-26T10:25:08Z", + "author": "sronilsson" + }, + { + "body": "Of course, here it goes!\r\n\r\n[ROI_definitions.zip](https://github.com/sgoldenlab/simba/files/15129475/ROI_definitions.zip)\r\n\r\n\r\n\r\n", + "created_at": "2024-04-26T10:39:55Z", + "author": "Monica9577" + }, + { + "body": "@Monica9577 - thanks! Let me know how it looks like with circles in version `1.90.8`, after updating. ", + "created_at": "2024-04-26T11:36:49Z", + "author": "sronilsson" + }, + { + "body": "Good morning \r\nAfter upgrading to the latest version it now allows me to analyze the ROI, however, when checking the final video, you can see how at a specific moment of the video there is a cut (which is not in the original video) and from there it starts to count a “ghost” animal.\r\n\r\nI send you the original video and the video generated by simba.\r\nThis cut can be seen at the second 1:25\r\n\r\nAfter checking both videos, I have seen that the results would be correct, since the position estimates that are tracked correspond to the original animal.", + "created_at": "2024-04-28T18:30:56Z", + "author": "Monica9577" + }, + { + "body": "I'm attaching the videos I commented on github, as they are too big to send\r\nthem via github.\r\n NOR ENCODING FExMP8 ROI.zip\r\n\r\n\r\n\r\n\r\nLibre\r\nde virus.www.avast.com\r\n\r\n<#DAB4FAD8-2DD7-40BB-A1B8-4E2AA1F9FDF2>\r\n\r\nEl vie, 26 abr 2024 a las 13:37, Simon Nilsson ***@***.***>)\r\nescribió:\r\n\r\n> @Monica9577 - thanks! Let me know how it\r\n> looks like with circles in version 1.90.8, after updating.\r\n>\r\n> —\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you were mentioned.Message ID:\r\n> ***@***.***>\r\n>\r\n", + "created_at": "2024-04-28T18:32:54Z", + "author": "Monica9577" + }, + { + "body": "Hi again, \r\n\r\nAfter checking the results csv, it seems is only analyzing the first video and not all of them", + "created_at": "2024-04-28T18:46:05Z", + "author": "Monica9577" + }, + { + "body": "Thanks @Monica9577 - I made a lot of changes over the beginning of the week, and it sounds like a few bugs sneaked it. I appreciate your help and I will get them fixed and get back to you.", + "created_at": "2024-04-28T19:12:59Z", + "author": "sronilsson" + }, + { + "body": "I've requested access to the Gdrive file @Monica9577 ", + "created_at": "2024-04-28T19:28:04Z", + "author": "sronilsson" + }, + { + "body": "Yes, I have approved the request\r\n\r\nEl dom, 28 abr 2024 21:28, Simon Nilsson ***@***.***>\r\nescribió:\r\n\r\n> I've requested access to the Gdrive file @Monica9577\r\n> \r\n>\r\n> —\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you were mentioned.Message ID:\r\n> ***@***.***>\r\n>\r\n", + "created_at": "2024-04-28T19:29:16Z", + "author": "Monica9577" + }, + { + "body": "Nice! I found the issue about the missing ROI CSV data. If you update to the latest version, how does it look on your end?\r\n\r\nFor the visualization, I can see the problem in your videos (thank you for sharing) but I cannot recreate it on my end. Can you share with me a screenshot of how your selections look like in the ROi visualization pop-up menu before you click to run the visualization? ", + "created_at": "2024-04-28T19:43:11Z", + "author": "sronilsson" + }, + { + "body": "The csv is perfect now :)\r\n\r\nAbout the screenshot\r\nHere it goes !\r\n(I'm still having the same problem with the generated videos in the new\r\nversion)\r\n[image: image.png]\r\n\r\n\r\nLibre\r\nde virus.www.avast.com\r\n\r\n<#m_3491437334582741493_m_5200625503861592641_DAB4FAD8-2DD7-40BB-A1B8-4E2AA1F9FDF2>\r\n\r\nEl dom, 28 abr 2024 a las 21:43, Simon Nilsson ***@***.***>)\r\nescribió:\r\n\r\n> Nice! I found the issue about the missing ROI CSV data. If you update to\r\n> the latest version, how does it look on your end?\r\n>\r\n> For the visualization, I can see the problem in your videos (thank you for\r\n> sharing) but I cannot recreate it on my end. Can you share with me a\r\n> screenshot of how your selections look like in the ROi visualization pop-up\r\n> menu before you click to run the visualization?\r\n>\r\n> —\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you were mentioned.Message ID:\r\n> ***@***.***>\r\n>\r\n", + "created_at": "2024-04-28T20:08:12Z", + "author": "Monica9577" + }, + { + "body": "Good about the CSV! But I can't see the screengrab for some reason.", + "created_at": "2024-04-28T20:26:58Z", + "author": "sronilsson" + }, + { + "body": "... possibly because the screenshot image has been attached to an email rather than in the GitHub issue thread?", + "created_at": "2024-04-28T20:39:00Z", + "author": "sronilsson" + }, + { + "body": "![Captura de pantalla 2024-04-28 215221](https://github.com/sgoldenlab/simba/assets/160180401/0a74d063-d647-4fcc-be55-bcdadcd745fc)\r\n", + "created_at": "2024-04-28T20:47:55Z", + "author": "Monica9577" + }, + { + "body": "Ah it's \"ROI features\".. thank you let me try that.", + "created_at": "2024-04-28T20:54:19Z", + "author": "sronilsson" + }, + { + "body": "Hi @Monica9577 - can you let me know if you still see the issue in the ROI feature visualization in version `1.91.5` ? \r\n\r\nI can't reproduce the error, but I can see how it is possible for it to happen earlier if the index of the file `/project_folder/csv/outlier_corrected_movement_location/NOR ENCODING FExMP8.csv` was not sequential or it contained duplicates values in the index. \r\n\r\nIf it is still causing an issue, would you mind sharing the `/project_folder/csv/outlier_corrected_movement_location/NOR ENCODING FExMP8.csv` file with me?\r\n\r\nThanks!\r\nSimon", + "created_at": "2024-04-29T19:21:50Z", + "author": "sronilsson" + }, + { + "body": "Yes, unfortunately it's not working ... \r\nI'm sending you the corrected location csv\r\n\r\nThanks in advance\r\n[NOR ENCODING FExMP8.csv](https://github.com/sgoldenlab/simba/files/15224510/NOR.ENCODING.FExMP8.csv)\r\n\r\n", + "created_at": "2024-05-06T17:47:19Z", + "author": "Monica9577" + }, + { + "body": "I did a super quick dirty test - but I just can't see the jump between the video and tracking data :( https://drive.google.com/file/d/1xSvjJpxgBrgNmdxFlODjYJOMfrG_olLi/view?usp=sharing\r\n\r\n\r\nCan you do me one favor and just check which version of SimBA you have with `pip show simba-uw-tf-dev` and paste me what you see printed? \r\n\r\n\r\n", + "created_at": "2024-05-06T18:41:47Z", + "author": "sronilsson" + }, + { + "body": "Name: Simba-UW-tf-dev\r\nVersion: 1.91.5\r\nSummary: Toolkit for computer classification of behaviors in experimental\r\nanimals\r\nHome-page: https://github.com/sgoldenlab/simba\r\nAuthor: Simon Nilsson, Jia Jie Choong, Sophia Hwang\r\nAuthor-email: ***@***.***\r\nLicense: GNU Lesser General Public License v3 (LGPLv3)\r\nLocation: c:\\users\\monip\\anaconda3\\envs\\new_simba\\lib\\site-packages\r\nRequires: joblib, geos, dash-colorscales, pandas, scipy, dash-color-picker,\r\nh5py, imblearn, numba, shap, shapely, graphviz, tqdm, imutils, eli5,\r\npyarrow, tables, matplotlib, seaborn, ffmpeg-python, statsmodels,\r\ncefpython3, plotly, dash, imgaug, yellowbrick, xlrd, kaleido, scikit-image,\r\ntrafaret, pyyaml, dtreeviz, xgboost, scikit-learn, dash-html-components,\r\nnumpy, psutil, Pillow, numexpr, tabulate, dash-core-components,\r\nopencv-python\r\nRequired-by:\r\n\r\nI did everything again and stil happening... Either with square or circle\r\nROIs.\r\nI also tried with another set of videos, and I have the same problem.\r\n\r\nAlso this error warning appears when creating the ROI features video\r\n\r\nSIMBA WARNING: FrameRangeWarning: The video\r\nE:/SIMBA/PROJECTS\\NOR_Circle_PUPS_Trial\\project_folder\\videos\\NOR ENCODING\r\nFExMP8.mp4 has 18000 frames, but the associated data file for this video\r\nhas 18001 rows\r\n\r\n\r\nLibre\r\nde virus.www.avast.com\r\n\r\n<#DAB4FAD8-2DD7-40BB-A1B8-4E2AA1F9FDF2>\r\n\r\nEl lun, 6 may 2024 a las 20:42, Simon Nilsson ***@***.***>)\r\nescribió:\r\n\r\n> I did a super quick dirty test - but I just can't see the jump between the\r\n> video and tracking data :(\r\n> https://drive.google.com/file/d/1xSvjJpxgBrgNmdxFlODjYJOMfrG_olLi/view?usp=sharing\r\n>\r\n> Can you do me one favor and just check which version of SimBA you have\r\n> with pip show simba-uw-tf-dev and paste me what you see printed?\r\n>\r\n> —\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you were mentioned.Message ID:\r\n> ***@***.***>\r\n>\r\n", + "created_at": "2024-05-06T22:38:38Z", + "author": "Monica9577" + }, + { + "body": "Thanks @Monica9577 for testing - I will take another look tomorrow to see how this could possibly happen", + "created_at": "2024-05-06T22:45:22Z", + "author": "sronilsson" + }, + { + "body": "@Monica9577 - good news is that I was able to recreate this issue when I ran it on a PC instead of my Mac.. not I just have to figure out why that is the case... :)", + "created_at": "2024-05-07T12:38:41Z", + "author": "sronilsson" + }, + { + "body": "For whatever reason, it is some frames e.g., frame `15429` in the video `NOR ENCODING FExMP8.mp4` that does not read in properly on PC, so it brakes, while it reads in fine on Mac. Pretty odd, anyway, will continoue digging.", + "created_at": "2024-05-07T14:23:42Z", + "author": "sronilsson" + }, + { + "body": "@Monica9577 The problem is that in your video files, we could not reliably read some specific frames **when on a PC**. E.g., when SimBA tried to grab frame 15429 and draw your ROIs and body-parts etc on it, nothing came back, and it silently errored out (it was kind of hard to see as it is soo much printed out when multiprocessing). \r\n\r\nTo fix it, I used this menu below to \"re-encode\" your videos, basically converting your `NOR ENCODING FExMP8.mp4` into a new mp4 file, and then I used that new MP4 file in the SimBA project, and then it worked on a PC.\r\n\r\nCan you let me know if that fixes it on your end?\r\n\r\n\"image\"\r\n", + "created_at": "2024-05-07T15:40:14Z", + "author": "sronilsson" + }, + { + "body": "I converted the video to avi and then to mp4 (it didn't let me converte it\r\nfrom mp4 to mp4) and use that video in the same project. I didn't work\r\nThen I created the project from the beginning. Then, this error showed up:\r\n\r\nException in Tkinter callback\r\nmultiprocessing.pool.RemoteTraceback:\r\n\"\"\"\r\nTraceback (most recent call last):\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\indexes\\range.py\",\r\nline 376, in get_loc\r\n return self._range.index(new_key)\r\nValueError: 18000 is not in range\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\multiprocessing\\pool.py\", line\r\n119, in worker\r\n result = (True, func(*args, **kwds))\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\plotting\\ROI_feature_visualizer_mp.py\",\r\nline 89, in _roi_feature_visualizer_mp\r\n bp_cords = data_df.loc[current_frm, list(bp)].values.astype(np.int64)\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\indexing.py\",\r\nline 1418, in __getitem__\r\n return self._getitem_tuple(key)\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\indexing.py\",\r\nline 805, in _getitem_tuple\r\n return self._getitem_lowerdim(tup)\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\indexing.py\",\r\nline 929, in _getitem_lowerdim\r\n section = self._getitem_axis(key, axis=i)\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\indexing.py\",\r\nline 1850, in _getitem_axis\r\n return self._get_label(key, axis=axis)\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\indexing.py\",\r\nline 160, in _get_label\r\n return self.obj._xs(label, axis=axis)\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\generic.py\",\r\nline 3737, in xs\r\n loc = self.index.get_loc(key)\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\indexes\\range.py\",\r\nline 378, in get_loc\r\n raise KeyError(key)\r\nKeyError: 18000\r\n\"\"\"\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\tkinter\\__init__.py\",\r\nline 1705, in __call__\r\n return self.func(*args)\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\ui\\pop_ups\\roi_features_plot_pop_up.py\",\r\nline 152, in \r\n command=lambda: self.run(multiple=False),\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\ui\\pop_ups\\roi_features_plot_pop_up.py\",\r\nline 247, in run\r\n threading.Thread(target=roi_feature_visualizer.run()).start()\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\plotting\\ROI_feature_visualizer_mp.py\",\r\nline 380, in run\r\n for cnt, result in enumerate(pool.imap(constants, frame_range,\r\nchunksize=self.multiprocess_chunksize)):\r\n File\r\n\"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\multiprocessing\\pool.py\", line\r\n735, in next\r\n raise value\r\nKeyError: 18000\r\n\r\nAlso this error shows up in the main simba window: \r\nSIMBA WARNING: FrameRangeWarning: The video E:/SIMBA/PROJECTS\\nor_trial\\project_folder\\videos\\NOR ENCODING FExMP8.mp4 has 18002 frames, but the associated data file for this video has 18000 rows ❗️\r\nCreating ROI feature images, multiprocessing (chunksize: 1, cores: 7)...\r\nBatch core 1/7 complete...\r\nBatch core 2/7 complete...\r\nBatch core 3/7 complete...\r\nBatch core 4/7 complete...\r\nBatch core 5/7 complete...\r\nBatch core 6/7 complete...\r\n\r\n", + "created_at": "2024-05-10T15:45:24Z", + "author": "Monica9577" + }, + { + "body": "Hi Again\r\n\r\nI just re-analyzed the video in dlc and run the project again, and it worked for me", + "created_at": "2024-05-10T17:08:17Z", + "author": "Monica9577" + }, + { + "body": "Hi @Monica9577 thanks for letting me know. \r\n\r\nFYI I know the video format conversion is clunky (e.g., as you say you can't convert/change mp4->MP4). However, I'm working on making the menus more flexible, they were written 4 years ago, should be done next week. ", + "created_at": "2024-05-10T22:05:13Z", + "author": "sronilsson" + } + ] + }, + { + "title": "labelling behavior", + "body": "Hi!\r\nI'm following the steps to train and run the machine model, but I got stuck in the label behavior step. When I click on _select video (create new video annotation)_, it only open a window with the video and two options below (frame number and jump to select frame), with no lateral bar for checking behaviors and in the main window this appears: \r\n\r\nANNOTATING VIDEO raw_clip1\r\n VIDEO INFO: {'video_name': 'raw_clip1', 'fps': 60, 'width': 1280, 'height': 720, 'frame_count': 7200, 'resolution_str': '1280 x 720', 'video_length_s': 120}\r\nUSER FRAME SELECTION(S):\r\n- ABSENT IN FRAME 0\r\nUSER FRAME SELECTION(S):\r\n- ABSENT IN FRAME 0\r\nUSER FRAME SELECTION(S):\r\n- ABSENT IN FRAME 1\r\nUSER FRAME SELECTION(S):\r\n- ABSENT IN FRAME 2\r\n...\r\nI have already extracted the frames but the labelling behavior window doesn't open to me. I know that there is something missing to be done but I don't know what. \r\nHope you can help me.\r\n", + "user": "fidelDLC", + "reaction_cnt": 0, + "created_at": "2024-04-17T21:53:05Z", + "updated_at": "2024-04-30T22:14:25Z", + "author": "fidelDLC", + "comments": [ + { + "body": "Thanks for reporting @fidelDLC - yes I can help. To troubleshoot, first can you tell me:\r\n\r\ni) Which version of simba you are running - you can find out by typing `pip show simba-uw-tf-dev` in your environment here SimBA is installed.\r\n\r\nii) When this happens, do you see any error msg is the operating system terminal where you launched SimBA from? \r\n\r\nThanks!\r\n\r\n", + "created_at": "2024-04-17T22:36:36Z", + "author": "sronilsson" + }, + { + "body": "This is the SimBa version: \r\nName: Simba-UW-tf-dev\r\nVersion: 1.89.6\r\n\r\nI launch SimBa from Anaconda Powershell and nothing appears when the annotation interface open \r\n![imagen_2024-04-17_181749810](https://github.com/sgoldenlab/simba/assets/167367948/8e15c81f-76b9-4c7f-a4b3-73b0b32a9115)\r\n\r\nI looked for more detailed information and extracted video frames to continue with this step: _under Label Behavior click on Select folder with frames_ but I found no such button and the closest one was _IMPORT FRAMES DIRECTORY TO SIMBA PROJECT_ in the _Further imports_ section, so, I selected the frame file and this appears in the main SimBa window \r\n\r\nSIMBA DIRECTORY ALREADY EXIST ERROR: SIMBA ERROR: C:/Users/fisio/Programs\\prueba4\\project_folder\\frames\\input\\raw_clip1 already exist in SimBA project. 🚨\r\n\r\nAnd the same in the Anaconda Powershell\r\n\r\n File \"C:\\Users\\fisio\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\video_processors\\video_processing.py\", line 1657, in copy_img_folder\r\n source=copy_img_folder.__name__,\r\nsimba.utils.errors.DirectoryExistError: SIMBA DIRECTORY ALREADY EXIST ERROR: SIMBA ERROR: C:/Users/fisio/Programs\\prueba4\\project_folder\\frames\\input\\raw_clip1 already exist in SimBA project.\r\n\r\nI know this file already exists in my simba files and it cannot be imported, so in this step I am completely lost .\r\n\r\nThanks.", + "created_at": "2024-04-18T01:29:42Z", + "author": "fidelDLC" + }, + { + "body": "Hi @fidelDLC !\r\n\r\nFirst - can you give me a link to the documentation/tutorial you are following? I ask as there is **no need to extract frames** to perform labelling (although this was a requirement 5ish years ago) so you may be looking at documentation that is old and should be removed/updated. Sorry about that. To label behavior, try following [THIS](https://github.com/sgoldenlab/simba/blob/master/docs/label_behavior.md) documentation and let me know how goes.\r\n\r\nWhen you click the button to create a new video annotation, select a video file inside the `project_folder/videos` directory of your SimBA project: \r\n\r\n\"image\"\r\n\r\nSimon\r\n\r\n\r\n", + "created_at": "2024-04-18T12:37:54Z", + "author": "sronilsson" + }, + { + "body": "Hi!\r\n[This](https://github.com/sgoldenlab/simba/blob/master/docs/tutorial.md#step-4-extract-frames-into-project-folder) is the documentation I followed. I started a new project and in the label behavior part I followed the documentation you send me and this happened in the SimBa terminal: \r\n![image](https://github.com/sgoldenlab/simba/assets/167367948/33c0f471-ae80-403e-973b-9427105b9b4d)\r\nand the video annotation interface is the one that I send you before, the thing is that I want to figure out what do I have to do to get this window open:\r\n![image](https://github.com/sgoldenlab/simba/assets/167367948/ac78547f-41f8-46f6-98e2-4a744a444f69)\r\n\r\nSorry if I'm not clear enough. :S\r\n\r\n", + "created_at": "2024-04-19T20:28:02Z", + "author": "fidelDLC" + }, + { + "body": "Hi @fidelDLC! I kind of understand - when you click to open the annotation interface, you only see the left portion of the annotation interface, and the right portion is not showing appropriately. \r\n\r\nIf the right portion is not showing, there is likely to be due to an error that is not caught appropriately. This error may be shown in the operating system terminal, where you typed `simba` to launch SimBA. \r\n\r\nI only have a Mac available at the moment, so will look slightly different aesthetically if you are on Windows.But in this video below and launch the annotation interface. Right at the end, I go back to the terminal which I use to launch the SimBA - do you see anything printed in this terminal after the right portion of the annotation interface fails to launch? \r\n\r\n\r\nhttps://github.com/sgoldenlab/simba/assets/34761092/7aec4cb4-3d30-4a4e-9b5c-dbafb84ec043\r\n\r\n\r\n", + "created_at": "2024-04-20T15:05:32Z", + "author": "sronilsson" + }, + { + "body": "Hi There is no error in the terminal:\r\n![image](https://github.com/sgoldenlab/simba/assets/167367948/5ce523ca-298c-41d2-ac4f-1094fbe13638)\r\nMaybe the problem is that I'm not using one of the pose estimation data that comes from default in SimBa and I created a user-defined pose configuration, but I have the .csv corresponding to the video. I'm not sure. \r\n\r\nThanks :S \r\n\r\n", + "created_at": "2024-04-22T18:05:22Z", + "author": "fidelDLC" + }, + { + "body": "Hi @fidelDLC - thanks for trying - no, that should not cause any issues. Its tricky to troubleshoot without error msgs. Is there any chance you could share your project with only a single video, through a gdrive or similar, and I can give it a try and hopefully I can recreate the error on my end? ", + "created_at": "2024-04-22T18:14:05Z", + "author": "sronilsson" + }, + { + "body": "These are the [files](https://drive.google.com/drive/folders/1f3VkFmhi84wX3b6eAYFqli-h9n4VmLqD?usp=drive_link) I´m using, These are from another github repo because I want to learn how to use SimBa before running my project (I'm starting at the lab). I'm gonna try with another video that I just took today.\r\n\r\nThank you!", + "created_at": "2024-04-22T20:33:41Z", + "author": "fidelDLC" + }, + { + "body": "Thanks I will take a look, I've requested access", + "created_at": "2024-04-22T23:47:31Z", + "author": "sronilsson" + }, + { + "body": "I'm sorry, I've already give you access. :D", + "created_at": "2024-04-23T17:30:05Z", + "author": "fidelDLC" + }, + { + "body": "I am not sure why, but when I click on the link I see this?\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/93c5a03c-40f3-45d7-8c92-9d09045dffac)\r\n", + "created_at": "2024-04-23T18:01:26Z", + "author": "sronilsson" + }, + { + "body": "My fault, that's it. ", + "created_at": "2024-04-23T18:06:24Z", + "author": "fidelDLC" + }, + { + "body": "Got it!", + "created_at": "2024-04-23T18:19:19Z", + "author": "sronilsson" + }, + { + "body": "I still don't know I'm sorry we have to do some more troubleshooting, it loads on my end... \r\n\r\n\"image\"\r\n\r\n\r\nCan you send me the project_config.ini associated with the project? \r\n\r\nCan you also send me the CSV associated with raw_clip1 from the `project_folder/csv/features_extracted directory for your SImBA project? ", + "created_at": "2024-04-24T17:16:09Z", + "author": "sronilsson" + }, + { + "body": "Hi, Thank you! \r\nThe files are already in the drive. :D", + "created_at": "2024-04-24T19:22:06Z", + "author": "fidelDLC" + }, + { + "body": "Got it, still can't recreate it but will try a few more things and get bacj tou you - btw you have some great tracking going on this guy, do you mind if I use this data you shared for some example videos?", + "created_at": "2024-04-25T15:30:26Z", + "author": "sronilsson" + }, + { + "body": "Those are not my files, I took them from another program that I tried to install called B-SOID, but there wasn't enough information and I couldn't move forward, then I found SimBa. It is also an open source tracking program so I think there is no problem if you want to use it. Those files were made by DeepLabCut.\r\n", + "created_at": "2024-04-25T22:21:54Z", + "author": "fidelDLC" + }, + { + "body": "Alright, thanks!\r\n\r\nI cannot replicate the error.. so just one question to be sure (as there are no error msgs). The video is large in pixels, are you sure the panels are not hidden somehow, outside the viewing area of your monitor if it is smaller, and you have to drag the window to see it as below?\r\n\r\n\r\nhttps://github.com/sgoldenlab/simba/assets/34761092/ed8ca30b-4a16-4f5f-a22e-4fdca8618321\r\n\r\n\r\n", + "created_at": "2024-04-26T15:44:13Z", + "author": "sronilsson" + }, + { + "body": "That was the frist thing I thought because the window is so large, but I move it looking for hidden panels and found nothing... But now, I realized that in your video the image is bigger than mine in the right side where the options are suppose to be. Probably you are right, but I cannot expand it more. Gonna look for solutions. Any suggestion? hehe\r\n\r\nThank you :3\r\n![image](https://github.com/sgoldenlab/simba/assets/167367948/84e45817-a6f6-4297-8968-0a9a46ad90bb)\r\n", + "created_at": "2024-04-29T20:17:41Z", + "author": "fidelDLC" + }, + { + "body": "Absolutely my bad, I changed my monitor and now I have the full view of the annotaton interface.\r\nThank you!! \r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/167367948/b92813ef-a42a-4187-b306-071a681b17c7)\r\n", + "created_at": "2024-04-30T00:35:19Z", + "author": "fidelDLC" + }, + { + "body": "Ah! Cool. I was going to suggest maybe change the resolution in [Microsoft Windows display settings](https://www.intel.com/content/www/us/en/support/articles/000023767/graphics.html), or maybe use videos with smaller resolution thats better suited for the monitor, `raw_clip1.mp4` is 1280 x 720, maybe [downsample](https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_tools.md#customize-resolution) it before annotating.", + "created_at": "2024-04-30T12:09:29Z", + "author": "sronilsson" + }, + { + "body": "I'm gonna take it into consideration. Thank you so much for your help. Hope to be able to handle the next steps from now on. \r\n:D \r\n ", + "created_at": "2024-04-30T22:14:24Z", + "author": "fidelDLC" + } + ] + }, + { + "title": "Segementation fault when creating a new project", + "body": "When I create a new project and try to browse for the project directory or add predictive classifiers simba crashes with a \"zsh: segmentation fault simba\". I can write in the text boxes and change the drop-down menus under animal settings without issue.\r\n\r\nOS: Sonoma 14.4.1 macOS.\r\nPython version: 3.10\r\nNot using anaconda", + "user": "idawettergren", + "reaction_cnt": 0, + "created_at": "2024-04-17T11:54:12Z", + "updated_at": "2024-04-26T11:39:31Z", + "author": "idawettergren", + "comments": [ + { + "body": "Never mind, updated to simba 1.90.3 and now it works", + "created_at": "2024-04-17T11:59:17Z", + "author": "idawettergren" + }, + { + "body": "Thanks for reporting @idawettergren !", + "created_at": "2024-04-17T12:54:22Z", + "author": "sronilsson" + }, + { + "body": "I unfortunately still have the same issue. After updating simba I could create a new project, upload videos and an .slp file but when I pressed c to assign the tracks the program crashed with another zsh: segmentation fault and then kept crashing with the same error when I tried to browse for the project configuration file", + "created_at": "2024-04-18T16:34:11Z", + "author": "idawettergren" + }, + { + "body": "Thanks @idawettergren - I don’t have access to a machine running Sonoma 14.4.1 Mac OS - closest is Ventura 13.4. I tested importing multi-animal sleap H5 on there using python 3.10 and 3.6, and I did hit a related error in python3.10, not with segmentation but. “[Python app crashed with \"PyEval_RestoreThread: the function must be called with the GIL held, but the GIL is released](https://stackoverflow.com/questions/66795159/python-app-crashed-with-pyeval-restorethread-the-function-must-be-called-with)” that I should look into, but it ran in python 3.6. \r\n\r\nIf you sing python 3.6 - how does it run on your end?", + "created_at": "2024-04-18T18:10:59Z", + "author": "sronilsson" + }, + { + "body": "If I install and run simba in a venv environment with python 3.6.15 I still get the same error. I also for some reason still get the warning on the homepage that I'm using python 3.10", + "created_at": "2024-04-26T09:00:44Z", + "author": "idawettergren" + }, + { + "body": "Hi @idawettergren ! Yes the warning message makes me think it is still possible it isn't running 3.6. \r\n\r\nIf you activate the venv and run `python --version`, as below, do you see python 3.6? \r\n\r\n\"image\"\r\n", + "created_at": "2024-04-26T11:22:59Z", + "author": "sronilsson" + }, + { + "body": "Yes I see \"Python 3.6.15\". Without changing anything it is now working again, even if I still see the warning message. Will update here when I manage to start the training (or if it crashes again before then)", + "created_at": "2024-04-26T11:28:01Z", + "author": "idawettergren" + }, + { + "body": "If it crashes, would you mind trying it out in conda and I can troubleshoot with you? I don't work much in venv, and it might be that you have to troubleshoot venv with me at the same time as I troubleshoot segment fault with you :)", + "created_at": "2024-04-26T11:32:25Z", + "author": "sronilsson" + }, + { + "body": "Yes of course, I can switch to conda if it crashes again. Will come back with an update asap (probably not today as I'm uploading multiple slp-files and it looks like it will take some time)", + "created_at": "2024-04-26T11:38:58Z", + "author": "idawettergren" + } + ] + }, + { + "title": "SIMBA Crashing on V 1.89.4 when using GPU to Downsample Videos", + "body": "Simba (1.89.4) will sometimes crash or refuse to down sample videos when the \"use gpu\" option is selected. No issues arise when the \"use gpu\" button is not selected. The first time I tried to down sample videos with GPU selected, it took over twice the amount of time to down sample compared to using CPU. Another attempt lead to a crash. Videos are all AVI format. No other manipulation was done other than attempting to down sample. Any idea why this happens or how to fix it? Thanks!", + "user": "Jsn-zhang", + "reaction_cnt": 0, + "created_at": "2024-04-16T00:15:00Z", + "updated_at": "2024-04-16T00:26:43Z", + "author": "Jsn-zhang", + "comments": [ + { + "body": "Thanks for reporting @Jsn-zhang! Yes, would be helpful to see the error msg if you can recreate it? To troubleshoot - if you are trying to recreate the error, it's helpful to have one or a couple of short video at hand.\r\n\r\nAlso to troubleshoot, if it has something to do with the FFMPEG commands when using AVI + GPU, if you convert your videos to mp4's using this menu below and then downsample, is it just as slow / errors out?\r\n\r\n\"image\"\r\n", + "created_at": "2024-04-16T00:26:42Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Error when creating Validation video [out#0/mp4 @ 00000294EB792500] ", + "body": "Hi, can anyone please help me find out how to resolve this issue:\r\nWhen I press create validation video it creates this error message in the terminal:\r\n\r\n[out#0/mp4 @ 00000294EB792500] Output file does not contain any stream\r\nError opening output file C:/Users/-/Desktop/Test/SimBA\\test1\\project_folder\\frames\\output\\validation\\Trial5downsampled.mp4.\r\nError opening output files: Invalid argument\r\n\r\nWeirdly, the CPU fans are still running hard after this error comes, until I close simba.\r\n\r\nffmpeg is installed and added to PATH. \r\n\r\nIt creates a folder in output > validation > temp with a few mp4 files that cant be opened and a log saying: \r\nfile 'C:\\Users\\-\\Desktop\\Test\\SimBA\\test1\\project_folder\\frames\\output\\validation\\temp\\0.mp4'\r\nfile 'C:\\Users\\-\\Desktop\\Test\\SimBA\\test1\\project_folder\\frames\\output\\validation\\temp\\1.mp4'\r\n... and so on.\r\n\r\n Thanks a lot for any help!", + "user": "beepboop174", + "reaction_cnt": 0, + "created_at": "2024-04-04T09:03:19Z", + "updated_at": "2024-04-12T15:41:30Z", + "author": "beepboop174", + "comments": [ + { + "body": "Hi @beepboop174! - which version of Simba are you running - `pip show simba-uw-tf-dev`? ", + "created_at": "2024-04-04T09:48:32Z", + "author": "sronilsson" + }, + { + "body": "\r\nHi @sronilsson thanks for the quick response! I use 1.87.5.\r\n", + "created_at": "2024-04-04T09:58:14Z", + "author": "beepboop174" + }, + { + "body": "Thanks @beepboop174 - can you check in the latest version of simba with `pip install simba-uw-tf-dev --upgrade` if you're still seeing the same issue? If you do I can dig to try and replicate the issue.", + "created_at": "2024-04-04T10:04:02Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson First of all, this makes sense, right?\r\n![image](https://github.com/sgoldenlab/simba/assets/165764972/82b0c86d-3d2e-4aef-9734-a8d1d02921e4)\r\n\r\n \r\nSo. I updated it and tried it same settings as always (multicore processing 10 cores) and still the same issue:\r\nBtw im running this on an Intel i9, 10 cores. And for some reason after the error, the Simba GUI is unresponsive, CPU usage is at like 10-20% and the fans go crazy, so it does seem like its doing something, not sure what though.\r\n\r\n[Parallel(n_jobs=20)]: Using backend ThreadingBackend with 20 concurrent workers.\r\n[Parallel(n_jobs=20)]: Done 10 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=20)]: Done 160 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=20)]: Done 410 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=20)]: Done 760 tasks | elapsed: 0.2s\r\n[Parallel(n_jobs=20)]: Done 1210 tasks | elapsed: 0.3s\r\n[Parallel(n_jobs=20)]: Done 1760 tasks | elapsed: 0.5s\r\n[Parallel(n_jobs=20)]: Done 2000 out of 2000 | elapsed: 0.5s finished\r\nTrailing option(s) found in the command: may be ignored.\r\nInput #0, concat, from 'C:\\Users\\taylorma\\Desktop\\Test\\SimBA\\test1\\project_folder\\frames\\output\\validation\\temp\\files.txt':\r\n Duration: N/A, bitrate: N/A\r\nOutput #0, mp4, to 'C:/Users/taylorma/Desktop/Test/SimBA\\test1\\project_folder\\frames\\output\\validation\\Trial1downsampled.mp4':\r\n[out#0/mp4 @ 000002B0A70EE0C0] Output file does not contain any stream\r\nError opening output file C:/Users/taylorma/Desktop/Test/SimBA\\test1\\project_folder\\frames\\output\\validation\\Trial1downsampled.mp4.\r\nError opening output files: Invalid argument\r\n\r\n\r\nNow, the weird thing is that its very different when I disable multicore processing. It also doesnt work, but for a different reason. It loads super fast and I get this message: \r\n\r\n[Parallel(n_jobs=20)]: Using backend ThreadingBackend with 20 concurrent workers.\r\n[Parallel(n_jobs=20)]: Done 10 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=20)]: Done 160 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=20)]: Done 410 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=20)]: Done 760 tasks | elapsed: 0.2s\r\n[Parallel(n_jobs=20)]: Done 1210 tasks | elapsed: 0.3s\r\n[Parallel(n_jobs=20)]: Done 1760 tasks | elapsed: 0.5s\r\n[Parallel(n_jobs=20)]: Done 2000 out of 2000 | elapsed: 0.5s finished\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\mixins\\pop_up_mixin.py\", line 308, in \r\n self.run_frm, text=title, fg=\"blue\", command=lambda: run_function()\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\ui\\pop_ups\\validation_plot_pop_up.py\", line 169, in run\r\n validation_video_creator.run()\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\plotting\\single_run_model_validation_video.py\", line 320, in run\r\n self.__create_video()\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\plotting\\single_run_model_validation_video.py\", line 204, in __create_video\r\n self.in_df.index[frm_cnt], [x_header, y_header]\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1418, in __getitem__\r\n return self._getitem_tuple(key)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 805, in _getitem_tuple\r\n return self._getitem_lowerdim(tup)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 961, in _getitem_lowerdim\r\n return getattr(section, self.name)[new_key]\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1424, in __getitem__\r\n return self._getitem_axis(maybe_callable, axis=axis)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1839, in _getitem_axis\r\n return self._getitem_iterable(key, axis=axis)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1133, in _getitem_iterable\r\n keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1092, in _get_listlike_indexer\r\n keyarr, indexer, o._get_axis_number(axis), raise_missing=raise_missing\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1177, in _validate_read_indexer\r\n key=key, axis=self.obj._get_axis_name(axis)\r\nKeyError: \"None of [Index(['Animal 1_Ear_left_1_x', 'Animal 1_Ear_left_1_y'], dtype='object')] are in the [index]\"\r\n\r\nThe CPU fans are silent and the SImba GUI is responsive. I get a mp4 file in the frames>output>validation folder, but its 5,54kb and doesnt work.\r\n\r\n\r\nBy the way when I do a validation plot everything works fine and seems good:\r\n\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/165764972/6569657c-1a8a-4fbb-9241-c49b6ef04bf8)\r\n\r\n\r\n\r\nSo yeah, seems like two differnt issues, one with multicore processing and one without.\r\n\r\n\r\n", + "created_at": "2024-04-04T10:54:58Z", + "author": "beepboop174" + }, + { + "body": "Thanks @beepboop174 - yes, two different errors and I will look into and catch the errors, but just in brief:\r\n\r\nThe first error typically happens when we say we want to create say a 100x100 pixel video, but then we start to stack 101x105 frames into that video. OpenCV and FFMpeg doesn't complain, the video appears to be created, but if you look closely, the videos are all 0-1kb. Next, we want to join all those small bits of video that has been created on each core in parallel into a single video. Thats when the error pops up - can't concatenate videos that don't have any frames! \r\n", + "created_at": "2024-04-04T11:05:32Z", + "author": "sronilsson" + }, + { + "body": "Hi @beepboop174 thanks again for reporting this. \r\n\r\nUnfortunately I could not recreate your issues, but I went over the code.. \r\n\r\nI inserted some checks to ensure that any misalignment in the video and frame sizes are corrected for, and control for events when there are different number of frames in the pose-estimation data and the videos, etc and checks for all the arguments going into the functions to ensure they are valid, for my one and two animal test projects. \r\n\r\n**Could you update simba again using `pip install simba-uw-tf-dev --upgrade`, and let me know how it looks when you run it in your end?** If not, could you send me the traceback errors you see again?\r\n\r\nSimon", + "created_at": "2024-04-04T17:33:02Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson thanks again, Just tried it and the error message I got isnt there anymore, but its still not working :( \r\n\r\nWithout multicore processing:\r\n(SimBA) C:\\Users\\taylorma>simba\r\n[Parallel(n_jobs=20)]: Using backend ThreadingBackend with 20 concurrent workers.\r\n[Parallel(n_jobs=20)]: Done 10 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=20)]: Done 160 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=20)]: Done 410 tasks | elapsed: 0.1s\r\n[Parallel(n_jobs=20)]: Done 760 tasks | elapsed: 0.2s\r\n[Parallel(n_jobs=20)]: Done 1210 tasks | elapsed: 0.3s\r\n[Parallel(n_jobs=20)]: Done 1760 tasks | elapsed: 0.5s\r\n[Parallel(n_jobs=20)]: Done 2000 out of 2000 | elapsed: 0.6s finished\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\mixins\\pop_up_mixin.py\", line 308, in \r\n self.run_frm, text=title, fg=\"blue\", command=lambda: run_function()\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\ui\\pop_ups\\validation_plot_pop_up.py\", line 178, in run\r\n threading.Thread(target=validation_video_creator.run()).start()\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\plotting\\single_run_model_validation_video.py\", line 227, in run\r\n self.data_df.index[frm_cnt], [x_header, y_header]\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1418, in __getitem__\r\n return self._getitem_tuple(key)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 805, in _getitem_tuple\r\n return self._getitem_lowerdim(tup)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 961, in _getitem_lowerdim\r\n return getattr(section, self.name)[new_key]\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1424, in __getitem__\r\n return self._getitem_axis(maybe_callable, axis=axis)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1839, in _getitem_axis\r\n return self._getitem_iterable(key, axis=axis)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1133, in _getitem_iterable\r\n keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1092, in _get_listlike_indexer\r\n keyarr, indexer, o._get_axis_number(axis), raise_missing=raise_missing\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1177, in _validate_read_indexer\r\n key=key, axis=self.obj._get_axis_name(axis)\r\nKeyError: \"None of [Index(['Animal 1_left_ear_x', 'Animal 1_left_ear_y'], dtype='object')] are in the [index]\"\r\n\r\n\r\nI think the problem may be that in the features_extracted files etc., the bodypart names are represented with different names, like \"center\", while in my DLC H5 pose file its called \"back\", for example. But changing the project_bp_names file didnt help.\r\nThen I tried making an entirely new proejct but that also didnt help, same error (also shows up when training the model). Then I delted all files like extracted frames or movement_outliers_, changed the bp_name log and re ran them, but still the same error. And still the names like\"center\" in these files, even though my names are in the bp log.\r\n\r\nSo do you think this is the issue and how might I solve it? Create a new project again and first thing I do is change the bp log file?\r\n\r\n\r\nThanks for any help!", + "created_at": "2024-04-08T15:15:32Z", + "author": "beepboop174" + }, + { + "body": "Hi @beepboop174! Yes nearly there now.. :) Fastest way to solve it, if possible, is to share a little bit of the project, enough for me to replicate the error, so I can see how this has happened, and fix it, and I can make sure no-one else bumps into it? Perhaps on a gdrive or similar?\r\n\r\nExactly as you say, SimBA loops through the body-parts it expects in the project to place a little circle on each frame and each body-part, the first body-part it expects is the left ear for Animal 1. But in year case, it's not there, and it fails. If you are using one of the standard built-in default body-part configurations, SimBa should make sure that you have a body-part called left ear, so not sure how this would come about.\r\n\r\nOne possible way that comes to me as I am typing: when you select a features file to run the validation on, are you selecting a file in the `project_folder/csv/features_extracted` directory or are you selecting a different file?\r\n\r\nSimon\r\n\r\n\r\n\r\n", + "created_at": "2024-04-08T15:38:26Z", + "author": "sronilsson" + }, + { + "body": "Hey @sronilsson thanks alot! Yes Here I uploaded my project files except for the videos via wetransfer: https://we.tl/t-aBodyFADfL\r\n\r\nYes, I am using the features_extracted csv files for the Validation. And yes I use the normal 2 animal 8 bodypart configuration, my boidyparts I labelled in maDLC are in the same order and lcoation as the SimBA config suggests. I only have different names, like \"left_ear\" instead of \"Ear_left\". Also btw why is there Ear_left_x, Ear_left_y and Ear_left_p in my files?\r\n\r\nThanks! ", + "created_at": "2024-04-09T07:49:22Z", + "author": "beepboop174" + }, + { + "body": "Thanks @beepboop174 - I will take a look. At first glance, I can't see any video files, would you mind sharing one for one of the data files so I can try to replicate the error in full? \r\n\r\n**I only have different names, like \"left_ear\" instead of \"Ear_left\". Also btw why is there Ear_left_x, Ear_left_y and Ear_left_p in my files**\r\n\r\nWhen you choose one of the default body-part configurations in SimBA, then SimBA will use hard-coded names for each one of your body-part, like \"Ear_left\" instead of your name \"left_ear\". The reason for this is to standardize the naming and make all the functions downstream know, that if you want to compute anything using the left ear, then look in the \"Ear_left\" column, regardless if users named them \"my_LEFT_eAr\" or \"head_left_Side\", it will be forced to \"Ear_left\".", + "created_at": "2024-04-09T12:28:52Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Sure, heres the video folder: https://we.tl/t-YX4IxUyPC5 for some reason I cant upload an entire project folder but you should have all the pieces now.\r\n\r\nThnak you so much!", + "created_at": "2024-04-09T12:50:21Z", + "author": "beepboop174" + }, + { + "body": "No problem @beepboop174 ! \r\n\r\nI can see what is happening, but not sure how this could happen - I will insert some more informative error messages at least\r\n\r\nWe might have chatted about it above but regardless..\r\n\r\ni) When you create a SimBA project with 16 body-parts, the project contains a CSV at `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` that lists a set body-part names in the project. \r\n\r\nii) Your project data files indeed contained all these body-part names as listed in the expected CSV (that is produced when using a 16-body-part project). However, the expected CSV had changed, into other names. So there was a mismatch in the expected and actual column names, causing the issue. \r\n\r\nTo fix it, replace the `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` in your project with the below file (just unzip it first). And rerun your validation. Let me know how it goes please!\r\n\r\n[project_bp_names.csv.zip](https://github.com/sgoldenlab/simba/files/14919471/project_bp_names.csv.zip)\r\n\r\n NOTE: \r\n\r\nI see in a single validation video I made, that your body-parts appear \"stuck\" and sometimes not move with the rest of the animal. \r\n\r\nhttps://github.com/sgoldenlab/simba/assets/34761092/a3a8d06a-eeec-45c6-9be0-1bcb5d935ea0\r\n\r\nThis can be produced by using a too stringent outlier critera, see [POINT 3 HERE](https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#25-my-pose-estimation-tracking-looks-good-when-i-visualize-it-in-the-pose-estimation-tool-however-after-i-import-it-into-simba-it-doesnt-look-good-anymore-why). \r\n\r\nTo fix it, consider skipping heuristic outlier critera fixing, if your pose is good, or alternatively setting higher outlier criteria. \r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2024-04-09T14:03:55Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Thank you soo much, its finally mostly working! So far, the validation video works, and path plot, gantt plot, heatmap work! Yet for some reason running data plot gives me this:\r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\ui\\pop_ups\\data_plot_pop_up.py\", line 115, in \r\n command=lambda: self.__create_data_plots(multiple_videos=False),\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\ui\\pop_ups\\data_plot_pop_up.py\", line 221, in __create_data_plots\r\n video_setting=self.data_videos_var.get(),\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\plotting\\data_plotter.py\", line 60, in __init__\r\n self.process_movement()\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\plotting\\data_plotter.py\", line 90, in process_movement\r\n movement_processor.run()\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\data_processors\\movement_calculator.py\", line 101, in run\r\n self.data_df = self.data_df[self.bp_list]\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\frame.py\", line 3001, in __getitem__\r\n indexer = self.loc._convert_to_indexer(key, axis=1, raise_missing=True)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1285, in _convert_to_indexer\r\n return self._get_listlike_indexer(obj, axis, **kwargs)[1]\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1092, in _get_listlike_indexer\r\n keyarr, indexer, o._get_axis_number(axis), raise_missing=raise_missing\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1177, in _validate_read_indexer\r\n key=key, axis=self.obj._get_axis_name(axis)\r\nKeyError: \"None of [Index(['Nose_1_x', 'Nose_1_y', 'Nose_1_p', 'Nose_2_x', 'Nose_2_y', 'Nose_2_p'], dtype='object')] are in the [columns]\"\r\n\r\n\r\nSimiliarly, distance plot gives me this: \r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexes\\base.py\", line 2897, in get_loc\r\n return self._engine.get_loc(key)\r\n File \"pandas/_libs/index.pyx\", line 107, in pandas._libs.index.IndexEngine.get_loc\r\n File \"pandas/_libs/index.pyx\", line 131, in pandas._libs.index.IndexEngine.get_loc\r\n File \"pandas/_libs/hashtable_class_helper.pxi\", line 1607, in pandas._libs.hashtable.PyObjectHashTable.get_item\r\n File \"pandas/_libs/hashtable_class_helper.pxi\", line 1614, in pandas._libs.hashtable.PyObjectHashTable.get_item\r\nKeyError: 'Nose_1_x'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\ui\\pop_ups\\distance_plot_pop_up.py\", line 143, in \r\n command=lambda: self.__create_distance_plots(multiple_videos=False),\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\ui\\pop_ups\\distance_plot_pop_up.py\", line 289, in __create_distance_plots\r\n distance_plotter.run()\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\plotting\\distance_plotter_mp.py\", line 124, in run\r\n (self.data_df[data[0] + \"_x\"] - self.data_df[data[1] + \"_x\"])\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\frame.py\", line 2995, in __getitem__\r\n indexer = self.columns.get_loc(key)\r\n File \"C:\\Users\\taylorma\\AppData\\Local\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\core\\indexes\\base.py\", line 2899, in get_loc\r\n return self._engine.get_loc(self._maybe_cast_indexer(key))\r\n File \"pandas/_libs/index.pyx\", line 107, in pandas._libs.index.IndexEngine.get_loc\r\n File \"pandas/_libs/index.pyx\", line 131, in pandas._libs.index.IndexEngine.get_loc\r\n File \"pandas/_libs/hashtable_class_helper.pxi\", line 1607, in pandas._libs.hashtable.PyObjectHashTable.get_item\r\n File \"pandas/_libs/hashtable_class_helper.pxi\", line 1614, in pandas._libs.hashtable.PyObjectHashTable.get_item\r\nKeyError: 'Nose_1_x'\r\n\r\nAnd: \"To fix it, consider skipping heuristic outlier critera fixing, if your pose is good, or alternatively setting higher outlier criteria.\" Thnaks, yea that seems to be happening. My pose-data isnt amazing, what outlier criterea settings would you suggest?\r\n\r\nEdit:\r\n\r\nAlso, when trying to merge videos, theres no \"run\" button for me, any idea what that might be?\r\n![image](https://github.com/sgoldenlab/simba/assets/165764972/0835ff89-4151-4956-a253-f3cf0f712d4c)", + "created_at": "2024-04-09T14:58:24Z", + "author": "beepboop174" + }, + { + "body": "Let me check on the data plot and the run button!", + "created_at": "2024-04-09T15:24:39Z", + "author": "sronilsson" + }, + { + "body": "The missing run button is a bug that has sneaked in, thanks @beepboop174!", + "created_at": "2024-04-09T15:40:04Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Thanks for the effort, youre doing an amazing job with fixing this stuff!\r\nFYI, so today I reran everything with new outlier criteria extracted new frames labeld more data etc: All in the same project that you fixed last time. Turns out I guess that replaced the pose log file and I again ran into the same issue, and again had to replace the pose log with the file you uploaded. Hope that helps with finding the issue.\r\n \r\nSo, since I cant really create a Full video trough ther Visualization Tab due to the the data plot and video merge issues, Is there maybe a way to run the validation video with two classifiers at the same time? Since thats kinda what I need for a presentation soon.\r\n\r\nThanks and have a good day!", + "created_at": "2024-04-10T13:33:41Z", + "author": "beepboop174" + }, + { + "body": "Hi @beepboop174 - let me take a look what is going on with the pose log as well. I kind of dug myself into a hole: I inserted the run button, but then I looked at the frame merging code and it is not well written, performs a lot of unnecessary slow operations and code duplications, so I want to fix that now. \r\n\r\nTo create visualizations with multiple classifiers, run your models on your files so you have data inside the `project_folder/csv/machine_results` directory. Next, go the the visualization tab and use this menu: \r\n\r\n\"image\"\r\n\r\nLet me know if that doesn't make sense or if issues!\r\n\r\n\r\n\r\n\r\n", + "created_at": "2024-04-10T13:45:27Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson thanks a lot! Visualize classifications was the only option I didnt try haha, didnt know that was the one. Works great, thanks for the hard work!\r\n\r\nPlese give me a heads up once the run merge button works!\r\n\r\nHave a good day!", + "created_at": "2024-04-11T10:55:20Z", + "author": "beepboop174" + }, + { + "body": "Thanks for letting me know @beepboop174! The frame merging is fixed but I have to update the pip package so you can reach it. I started looking at the distance plotting first , and when I look at code written by myself some time ago, it looks a bit \"smelly\" - I see that the code leaves doors open for errors that not necessarily will happen.. but could happen. So will fix that too before I update. I will let you know. ", + "created_at": "2024-04-11T11:10:41Z", + "author": "sronilsson" + }, + { + "body": "Can you give it a go in latest version with `pip install simba-uw-tf-dev —upgrade` and let me know how it works? I \r\n\r\n\r\nLikely this too much info but info below, ignore it, but at least for myself, if I have to revisit this in future :) \r\n\r\n\r\n\r\n\r\n\r\n.\r\nFor the frame merging, issue is we need to check and handle cases where users say they want to concatenate videos that are of different fps and different resolutions, and when they provide uneven number of videos. Because of these checks currently… it’s still not very quick, especially when using mosaic concatenations that involve both vertical and horizontal joins:\r\n\r\n\"image\"\r\n\r\nJust aheads up, I can see how it is useful to create showcase videos, but if it is taking too long, it might be worth it to just play the videos separately side by side… took me 5-10min to create a vertical join of your videos\r\n\r\nTo create distance plots, like line plots representing animal distances, it’s typical to use matplotlib. To plot distances between animals for each frame of the video though, we need to create several 1000s of plots and we should split the images and process them in parallel on each available core so we don’t have to wait forever. However, matplotlib doesn’t always play nice multiprocessing, especially on Mac computers, so I needed to dig a little for alternative.", + "created_at": "2024-04-12T01:34:06Z", + "author": "sronilsson" + }, + { + "body": "Thanks! I get this:\r\n\r\nCreating mixed mosaic video... \r\nCreating upper right mosaic ... (Step 1/4)\r\nCreating lower right mosaic ... (Step 2/4)\r\nJoining upper and lower right mosaic... (Step 3/4)\r\nConcatenating 2 videos vertically with a 720 pixel width...\r\nVertical concatenation complete. Saved at C:/Users/taylorma/Desktop/Test/SimBA/Dos/dos/project_folder/frames/output/gantt_plots\\temp_20240412133906\\mosaic.mp4 (Elapsed time: 0.0629s.)\r\nJoining left and right mosaic... (Step 4/4)\r\nSIMBA VIDEO FILE ERROR: Video mosaic either does not exist or has fps of 0 (full error video path: C:/Users/taylorma/Desktop/Test/SimBA/Dos/dos/project_folder/frames/output/gantt_plots\\temp_20240412133906\\mosaic.mp4). \r\n", + "created_at": "2024-04-12T11:40:09Z", + "author": "beepboop174" + }, + { + "body": "Hi @beepboop174 ! I tried it a few ways on your data and could not hit this error you are seeing, would you mind take a screenshot of the GUI window filled in so I see your choices just before clicking `RUN`?", + "created_at": "2024-04-12T15:41:29Z", + "author": "sronilsson" + } + ] + }, + { + "title": "SIMBA INDEX WARNING: Some frames appears to be missing in the dataframe", + "body": "I am using 2 animals project - user defined labeling - 8 bodyparts per animal side view camera \r\nWhen I run the model and then run interactive probability plot (or run models after models setup) I can see the error: bodypartcolumnnotfound\r\n![315261352-7682699a-ec56-4175-a9b8-dc8385befd9b](https://github.com/sgoldenlab/simba/assets/164436225/bb971eaf-9130-49f6-8840-6e45263163af)\r\n\r\nhowever interactive probability plot is working and after running models and seeing the error i still can analyze machine predictions and having csv files\r\nThe problem is that when I try to create video of sklearn_results or GANNT plot I see the following error (\"None of [Index(['Ear_1_x', 'Ear_1_y', 'Ear_1_p'], dtype='object')] are in the [index]\",) \"None of [Index(['Ear_1_x', 'Ear_1_y', 'Ear_1_p'], dtype='object')] are in the [index]\"\r\nSIMBA INDEX WARNING: Some frames appears to be missing in the dataframe and could not be created\r\nVideo TT ETH2 CTR4_basal_males saved...\r\n\r\nAnd the saved video is corrupted (1kb size)\r\n\r\n![315261401-3a21de68-a395-423a-840e-d3d3e601342d](https://github.com/sgoldenlab/simba/assets/164436225/5dda0d4f-ccf3-4c93-b86c-d0908c446216)\r\n![315261432-da555f31-2743-4799-8785-e28a268e8df7](https://github.com/sgoldenlab/simba/assets/164436225/afec1c18-8270-4bcf-926e-ef5419429cd9)\r\n\r\n\r\nOS: WIN11\r\nPython Version [e.g. 3.6.0]\r\nAre you using anaconda? yes\r\n\r\n \r\n\r\n", + "user": "MohamedAlyEbraheemZahran", + "reaction_cnt": 0, + "created_at": "2024-03-22T00:31:13Z", + "updated_at": "2024-03-23T16:34:11Z", + "author": "MohamedAlyEbraheemZahran", + "comments": [ + { + "body": "Hey @MohamedAlyEbraheemZahran! Thanks for the screengrabs very helpful. Looks like we are having troubles finding the data for the Ear body-part for the first animal at least in `project_folder/csv/machine_results/TT ETH2 CTR4_basal_males.csv` file. I saw this happening recently where the animal names had changed across sequential runs.\r\n\r\nIf you open that file, what headers do you see? E.g., is the Ear body-part columns actually named say `Animal_1_name_Ear_1_x` or something else? \r\n\r\nSimon\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2024-03-22T01:30:55Z", + "author": "sronilsson" + }, + { + "body": "PS. Which version of simba do you have `pip show simba-uw-tf-dev` and I'll see if I can recreate it.", + "created_at": "2024-03-22T01:32:56Z", + "author": "sronilsson" + }, + { + "body": "Hi Simon, \r\n\r\nI am using Simba 1.87.2 (I tried 1.55.9 and 1.87.5 and it is the same problem)\r\nI have 2 animals in tube test one on the left and one on the right and I named the animals in the project: Left and Right\r\nIn machine results csv files the ear body part named: Left_Ear_1_x\r\n\r\nNB: othe projects with topview one animal 8 bodyparts or 2 animals top view 8 bodyparts are fine so it seems this problem only in user defined labeling \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2024-03-23T01:06:36Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "@MohamedAlyEbraheemZahran Yes, what should happen - when you import the data from two animals user-defined body-part config, and name them `Left` and `Right`, is that SimBA should open up the `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` file and add prefixes representing your chosen animal names, `Left_` and `Right_` to the appropriate rows. \r\n\r\ni) For troubleshooting, if you add the prefixes `Left_` or `Right_` to each row, so e.g., the first row reads `Left_Ear_1` etc, does it work? \r\n\r\nii) For the files representing the same data prior to machine learning inference, e.g., `project_folder/csv/features_ecxtracted/TT ETH2 CTR4_basal_males.csv` does it still read `Left_Ear_1_x` or is it `Ear_1_x` in those files? ", + "created_at": "2024-03-23T01:27:06Z", + "author": "sronilsson" + }, + { + "body": "[ETF2 CTR3 _48 Female.csv](https://github.com/sgoldenlab/simba/files/14729768/ETF2.CTR3._48.Female.csv)\r\nI am attaching one file of the machine results", + "created_at": "2024-03-23T01:28:16Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "[CTR1 ETF2 48 Female.csv](https://github.com/sgoldenlab/simba/files/14729785/CTR1.ETF2.48.Female.csv)\r\nThis is an example of feature extracted (I think it is the same as machine results)\r\nAs for the project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv I usually open it an delete the added prefix otherwise it will give me an error of mismatched features number", + "created_at": "2024-03-23T01:33:31Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "[CTR1 ETF3 48 Female.csv](https://github.com/sgoldenlab/simba/files/14729800/CTR1.ETF3.48.Female.csv)\r\nThis is an example of target inserted files ", + "created_at": "2024-03-23T01:34:22Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "Thanks @MohamedAlyEbraheemZahran - it seems like in all of the files, the animal body-parts are represented by the `Left_` and `Right_` prefix. However, in the `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` file, which lists the body-part names that SimBA expects, there are no such left/right prefixes. \r\n\r\nOne question: When you “open it an delete the added prefix” and it works, do you see any warnings named BodypartColumnNotFoundWarning like the orange text in the first screen grab you sent? ", + "created_at": "2024-03-23T01:49:39Z", + "author": "sronilsson" + }, + { + "body": "Actually, the orange text in the first screen grab (BodypartColumnNotFoundWarning) is when the prefixs are deleted because if I readded the prefixs it will give me Mismaching features numbers (probably add more columns for the preffixs) ", + "created_at": "2024-03-23T01:55:08Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "![image](https://github.com/sgoldenlab/simba/assets/164436225/338e67ad-58e7-4e26-b20c-eb0453524ffd)\r\n![image](https://github.com/sgoldenlab/simba/assets/164436225/a4779d15-27cd-4ee9-b0d1-5bd24d051353)\r\nThis an example of having the preffix in the project_bp_names.csv", + "created_at": "2024-03-23T01:59:38Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "Got it - did you see the orange warning msg when you trained the model?\r\n\r\nWhat I am thinking is that the model mistakenly was trained using 801 features (with the body-parts coordinates included mistakenly). Next, when you add the Left_ and Right prefixes, SimBA drops the body-part columns which give syou the correct 753 columns, but the model was mistakenly trained with 801 features, and thats why the error shows?\r\n", + "created_at": "2024-03-23T02:04:43Z", + "author": "sronilsson" + }, + { + "body": "There was a slighly related issue [HERE](https://github.com/sgoldenlab/simba/issues/351#issuecomment-2010743213) the other day. It's not the same solution, but I described what it means when we drop the body-part coordinates prior to training the model.", + "created_at": "2024-03-23T02:06:17Z", + "author": "sronilsson" + }, + { + "body": "and what could be a solution for this?\r\n", + "created_at": "2024-03-23T02:15:15Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "one thing is that in the target inserted files the contained tow classifiers that I deleted from the project later because are not present enoughly in the videos. Do you this could be the reason? and how to fix this? ", + "created_at": "2024-03-23T02:20:27Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "Now I added the two classifiers again to the project and rerun training for one classifier and i can see the same orange warning during training ", + "created_at": "2024-03-23T02:29:29Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "I think this: \r\n\r\ni) Add the left and right prefixes again to your `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` file. \r\n\r\nii) Retrain the models (create new `.sav` files) for your behaviors. \r\n\r\niii) Run your models again on the new data and see if it persists, or send me any screengrab of any error you see.\r\n\r\nI don't think it is related to the deleted columns from the single classifier. It complains about 48 columns (801-753) which is the exact number of body-parts that you have - with a x, y , p value columnd for each: 48 / 3 = 16. Meaning the body-part columns where mistakenly used to train the classifier (I think). ", + "created_at": "2024-03-23T02:35:10Z", + "author": "sronilsson" + }, + { + "body": "Thanks alot this is what I have just tried; I retrained one classifier with the added prefix and the orange message didn't appear and I run this classifer and again the warning didn't apppear. I will do the same for all classiferes renalyze the data and try if visualization will work or not? And I will let you know.\r\n Do you think the results of analyze machine pedictions would be different?", + "created_at": "2024-03-23T02:54:29Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "I am not sure - but probably not. \r\n\r\nA bit of background: \r\n\r\nWe typically don't want to input the body-part coordinates directly when training a model: we don't want to train models based on the exact location of a body-part in time within the video. \r\n\r\nFor example, `Resistance` behavior could potentially happen anywhere in the tube. If you have annotated `Resistance` behavior only in the left side of the tube, and you train the model including your body-part locations, then the model might pick up on that and use that correlation between your body-part location and annotations, and conclude that _Resistance is a behavior that only can happen on the left side._ Next, when you have new videos, and restistance happens in the middle of tube, then the model won't score it correctly because it thinks that resistance only happens on the left. \r\n\r\nIf your behavior **do** have spatial associations. E.g., it is a fact that `Resistance` only can happen on the left side (if it happens elsewhere it is not `Resistance`) then I recommend drawing ROIs and using those ROIs to build features as documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial.md#part-3-generating-features-from-roi-data)\r\n\r\nlet me know if this makes sense!\r\n\r\nSimon\r\n\r\n\r\n\r\n", + "created_at": "2024-03-23T15:35:56Z", + "author": "sronilsson" + }, + { + "body": "Thank for your reply\r\nActually I am happy with the classifiers results in this project and they aren't location limited so I don't need ROI\r\nBut in other projects in which I have an object like a glove or a mirror, It will be benificial for sure. But did you mean that the ROI could be considered as features in a classifier training?\r\n", + "created_at": "2024-03-23T15:58:19Z", + "author": "MohamedAlyEbraheemZahran" + }, + { + "body": "Yes. \r\n\r\nFor example, you could add columns that represent:\r\n\r\n1 or 0 value noting if body-parts are located within, or outside, each of the ROIs.\r\nThe millimeter distance between body-parts and the center of each ROIs.\r\n1 or 0 value noting if the animal is directing towards the center each of the ROIs or not.\r\n\r\nThese you can get from the GUI menu documented in the link above. \r\n\r\n... just a note, users have had other questions requiring computing all kinds of different values representing the animal and its relationship with ROIs. I have written these methods [HERE](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#module-simba.mixins.geometry_mixin) so if something specific you want to calculate it can probably be done ", + "created_at": "2024-03-23T16:08:03Z", + "author": "sronilsson" + }, + { + "body": "Ps. like... might be useful to know how animal geometries are [overlapping etc](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.plotting.html#module-simba.plotting.geometry_plotter) in tube test", + "created_at": "2024-03-23T16:14:55Z", + "author": "sronilsson" + }, + { + "body": "Thanks so much for the information. \r\nBy the way I like the way you represent the Spontaneous alternation\r\n", + "created_at": "2024-03-23T16:33:59Z", + "author": "MohamedAlyEbraheemZahran" + } + ] + }, + { + "title": "\"Feature number mismatch error\" when analyzing new videos with a machine model ", + "body": "**Describe the bug**\r\nWhen trying to use a machine model on new videos, I am met with the following error message: \r\n\r\nSIMBA FEATURE NUMBER MISMATCH ERROR: Mismatch in the number of features in input file C:/Users/xktz/Desktop/DLC_SimBA_videos/simba/project_folder/csv/features_extracted/animal54_09_02_24_res60fps.csv, and what is expected by the model groom. The model expects 35 features. The data contains 31 features. 🚨\r\n\r\nThe same message appears when I attempt to validate model on a single video. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Train machine model for specific behavior \r\n2. Import new videos and CSV files from DeepLabCut\r\n3. Configure video parameters \r\n4. Skip outlier correction\r\n5. Extract features\r\n6. Run machine model \r\n\r\n**Expected behavior**\r\nAfter importing new videos and their CSVs from DeepLabCut, I fixed the video parameters, skipped outlier correction, and extracted features. For all steps, I used default settings and followed the guidelines on Github. I expect that I should be able to \"Run Machine Model\" using my newly trained behavior classifier named \"groom.\" \r\n\r\nThe machine model for \"groom\" was trained using default settings for training a single model. During model training, the output was as follows: \r\nSIMBA COMPLETE: Hyper-parameter config saved (9 saved in project_folder/configs folder). 🚀\r\nReading in 14 annotated files...\r\nDataset size: 74.592MB / 0.074592GB\r\nNumber of features in dataset: 35\r\nNumber of groom frames in dataset: 556.0 (0.22%)\r\nTraining and evaluating model...\r\nFitting shake model...\r\nSIMBA COMPLETE: Classifier groom saved in models/generated_models directory (elapsed time: 1125.2436s) 🚀\r\nSIMBA COMPLETE: Evaluation files are in models/generated_models/model_evaluations folders 🚀\r\n\r\nWhich indicates the number of features in the dataset is 35. What is the number of features referring to? Why do my new videos have only 31 features after extraction? I suspect I have misclicked something but was unable to find a solution in the documentation or on this forum. I had an earlier model working fine on new videos and was able to get summaries of grooming behavior in each video, but worry I have changed something by mistake. \r\n\r\nThank you very much in advance for any insights and help! This is all very new to me, so please let me know if I can provide additional information. \r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 11 Pro\r\n - Conda version: 23.7.4\r\n\r\n**Conda simbanenv**\r\n - Name: simbaenv\r\n - conda version: 23.7.4\r\n - conda-build version: 3.26.1\r\n - python version: 3.11.5.final.0\r\n - virtual packages: __archspec=1=x86_64, --cuda=12.2=0, __win=0=0", + "user": "xktz89", + "reaction_cnt": 0, + "created_at": "2024-03-20T21:21:39Z", + "updated_at": "2024-03-22T17:13:38Z", + "author": "xktz89", + "comments": [ + { + "body": "Hi @[xktz89](https://github.com/xktz89)! If you search the SimBA GitHub issues or the SimBA gitter channel for keyword e.g., “mismatch” - you will find discussions on this issue here and there. \r\n\r\nI’ll describe what is happening: \r\n\r\nWhen you trained the grooming model, SimBA grabbed all the CSV files inside the `project_folder/csv/targets_inserted` directory. Each of these files contain three “types” of columns: the first columns will be your body-part locations with x,y, and p-values. The very last columns will be your hand annotations, e.g., `groom` columns with zeros and ones. Everything column in between is your “features”: they are values representing the movement of your animals. SimBA will use these in-between columns to build your machine learning model. \r\n\r\nBefore training your groom model, SimBA looks at the body-part names of your project, and the names of your classifiers in your project, and removes these columns so we only have features left. If you look inside the `project_folder/csv/targets_inserted` directory and open one of the CSV files, you should see 35 of these in-between columns. \r\n\r\nNext, now you have new video files that you want to run the classifier on. These files are located in the `project_folder/csv/features_extracted` directory. SimBA opens the each one of these CSV files, beginning with `animal54_09_02_24_res60fps.csv` and removes the body-part columns and keeps the feature columns (we don't have any annotations to remove this time). It also opens the classifier that you previously trained. Before doing anything, SimBA checks that you have the same number of columns left in the `animal54_09_02_24_res60fps.csv` that you had for the videos that you built the classifier on, and at this time it complains - you do not, you only have 31 columns, and the classifier expects 35! You have 4 fewer columns that SimBA doesn’t know what to do with that. \r\n\r\nSo where did these 4 extra columns come from and what are there? Did the `project_folder/csv/targets_inserted` files contain any extra annotations columns (additional behaviors) that you failed to declare where annotations? Did any body-parts get added/removed to your project between training and the new videos? \r\n\r\nYou can always share one file each from `project_folder/csv/targets_inserted` and `project_folder/csv/features_extracted` and I can take a look and help with the digging.", + "created_at": "2024-03-20T22:16:17Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson! Thank you so much for your thorough reply! This was really helpful and will be good to keep in mind going forward. \r\n\r\nYou are correct, I previously labeled additional behaviors that were later removed from the project. I managed to fix the problem by creating a new project for a single behavior classifier (groom) then manually copying the column of hand annotations for groom from my old /targets_inserted and pasting them into the new /targets_inserted. \r\n\r\nThanks again!", + "created_at": "2024-03-22T16:01:08Z", + "author": "xktz89" + }, + { + "body": "Excellent, thanks for letting us know! There should be some scripts laying about for removing or moving columns between files in different directories, let me know if it happens again, if you have a lot of files to process it can become a chore to move manually.,", + "created_at": "2024-03-22T17:13:37Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Spontaneous alternation test", + "body": "**Is your feature request related to a problem? Please describe.**\r\nI'll like to use simba to analyze my spontaneus alternation test in rats. \r\nThis test consist in letting the animal freely explore a T maze for 10 min and then score how many times the animal does a correct, incorrect or neutral triplet: \r\n\r\nIf the last and second last-visited arms were diferent and the current choice was diferent from those two arms, then it was considered a correct choice. \r\nIf the animal chose the one of the already visited arms, the choice was considered incorrect. \r\nIf the last and second last-visited arms were the same, i.e., the mice consecutively visited the same arm twice, then the current arm choice was considered neutral. \r\nThe spontaneous alternation rate was calculated as the number of correct choices divided by the total number of correct and incorrect choices.\r\n\r\n**Describe the solution you'd like**\r\nI'd like to be posible to score this type of behaviour using ROI in simba. \r\nSo you draw the ROIs for each arm and from there being able to get a csv with the data of the triplets the animal did. \r\nAnd if it's possible the number of correct, incorrect and neutral ones \r\n\r\n\r\nThank you very much in advice!!\r\n", + "user": "Monica9577", + "reaction_cnt": 0, + "created_at": "2024-03-19T11:55:11Z", + "updated_at": "2024-04-26T13:47:14Z", + "author": "Monica9577", + "comments": [ + { + "body": "Good question @Monica9577. Good news is, I actually wrote the code to perform these calculations the other day, documented [HERE](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#simba.mixins.feature_extraction_supplement_mixin.FeatureExtractionSupplemental.spontaneous_alternations).\r\n\r\n\r\n\"image\"\r\n\r\nThe function gives you, for each video, the percent alternation, total alternation count, same-arm error return errors, alternate error counts, the times when each error occurs, and the times when each alternations occur (together with the specific unique arm sequence that was completed). \r\n\r\nThe not-so good news is, that the user that requested the function didn’t really get back to me to confirm it was working as expected, and I don’t have hand-annotated data (or commercial software data) to validate it against to confirm I get the same results. As of now, I also don’t have a graphical interface for it, but that would be quick to implement. \r\n\r\nTo take this forward, would it be possible for you to share a video with me, with some pose-estimation tracking data, for which you know the spontaneous alternation rate? I could confirm it is working, and then insert a menu in the GUI, where you select the arm ROI names from dropdown menus, and click to perform the spontaneous alternation calculations and get the results for each video in a CSV? \r\n", + "created_at": "2024-03-19T12:36:37Z", + "author": "sronilsson" + }, + { + "body": " spontaneus alternation test.zip\r\n\r\nHi !\r\nThat's wonderfull !\r\nI'm sending you a folder with an anotatted video and a excel file with the\r\nresults\r\n\r\nThank you !\r\n\r\n\r\nEl mar, 19 mar 2024 a las 13:36, Simon Nilsson ***@***.***>)\r\nescribió:\r\n\r\n> Good question. Good news is, I actually wrote the code to perform these\r\n> calculations the other day, documented HERE\r\n> \r\n> .\r\n> image.png (view on web)\r\n> \r\n>\r\n> The function gives you, for each video, the percent alternation, total\r\n> alternation count, same-arm error return errors, alternate error counts,\r\n> the times when each error occurs, and the times when each alternations\r\n> occur (together with the specific unique arm sequence that was completed).\r\n>\r\n> The not-so good news is, that the user that requested the function didn’t\r\n> really get back to me to confirm it was working as expected, and I don’t\r\n> have hand-annotated data (or commercial software data) to validate it\r\n> against to confirm I get the same results. As of now, I also don’t have a\r\n> graphical interface for it, but that would be quick to implement.\r\n>\r\n> To take this forward, would it be possible for you to share a video with\r\n> me, with some pose-estimation tracking data, for which you know the\r\n> spontaneous alternation rate? I could confirm it is working, and then\r\n> insert a menu in the GUI, where you select the arm ROI names from dropdown\r\n> menus, and click to perform the spontaneous alternation calculations and\r\n> get the results for each video in a CSV?\r\n>\r\n> —\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you authored the thread.Message ID:\r\n> ***@***.***>\r\n>\r\n", + "created_at": "2024-03-19T13:42:08Z", + "author": "Monica9577" + }, + { + "body": "Cheers I've requested acceess", + "created_at": "2024-03-19T14:03:33Z", + "author": "sronilsson" + }, + { + "body": "Yes!\r\nI have already accepted the access request", + "created_at": "2024-03-19T14:10:46Z", + "author": "Monica9577" + }, + { + "body": "Thanks - just two questions, which arm is named A, B, and C, i.e., do you use these name code? \r\n\"image\"\r\n\r\nAnd which body-part would you use to score an entry? E.g., nose or center? \r\n", + "created_at": "2024-03-19T14:13:41Z", + "author": "sronilsson" + }, + { + "body": "How you have annotate it in the image is correct, \r\n\r\nAnd for the entries I use the center \r\n", + "created_at": "2024-03-19T14:17:12Z", + "author": "Monica9577" + }, + { + "body": "thanks!", + "created_at": "2024-03-19T14:17:22Z", + "author": "sronilsson" + }, + { + "body": "@Monica9577 It would be good to get your input on one thing..\r\n\r\nYou see towards the end in the attached 30s video clip. Your animal is hovering around the entrance to `Arm A`. SimBA scores the center-point as entering and leaving `Arm A` a few times in quick succession, while if you score this same sequence manually, you probably would have scored it as a single entry to `Arm A` rather than several entry and exists.\r\n\r\nhttps://github.com/sgoldenlab/simba/assets/34761092/3d3575a9-2962-48f6-86f4-6c6cfde38967\r\n\r\n… I don't know how commercial tools like [AnyMaze](https://www.any-maze.com/applications/y-maze/) performs the calculation, but I suspect they count that **the entire animal (excluding the tail)** have to enter the ROI (or perhaps some threshold percent of the animal body have to the enter the ROI), for an arm entry to count. \r\n\r\nOne potential solution is that we also do this, e.g., if you say 50% in a drop-down, it would count as an entry while half or more of the animal is in an arm (like the left example below), or if you say 100%, it would count as an entry only if entire animal is an arm (like the right example below). \r\n\r\n\"image\"\r\n\r\n**However, I have never used AnyMaze and I have never scored spontaneous alternation, manually, what is the typical procedure and criterion for scoring an arm entry?**\r\n\r\n\r\n\r\n", + "created_at": "2024-03-20T00:19:29Z", + "author": "sronilsson" + }, + { + "body": "Hi ! \r\n\r\nWhen I use AnyMaze to score this type of tests, I usually put that at least 80% of the animal have to enter the ROI in order to count as an entry (or an exit). So if it's possible to score it that way, it would be perfect!\r\n\r\nThank you so much for the effort and the time !", + "created_at": "2024-03-20T11:01:48Z", + "author": "Monica9577" + }, + { + "body": "Ah that's cool thanks that's good to know - then I will reach out if I have any AnyMaze questions :) \r\n\r\n... I noticed AnyMaze have this visualization below, and we will probably need something similar, which also shows the inset names of the three most recent visit arms. So users can visually confirm that the numbers are accurate to make it useful. I need some time to write it and I will get back to you. \r\n\r\n\r\n\"image\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2024-03-20T11:24:41Z", + "author": "sronilsson" + }, + { + "body": "That's awesome ! \r\n\r\nYes feel free to ask any AnyMaze question, no problem !", + "created_at": "2024-03-20T12:00:53Z", + "author": "Monica9577" + }, + { + "body": "Alright, one AnyMaze question!\r\n\r\nCheck the attached video. Around the 30s-35s mark, the animal is nearly disappearing out of the image, and therefore out of the ROI. I see the AnyMaze example videos have a little safety distance between the edge of the video frame and the end of the arms, but we don't have that in this case. Do you know how AnyMaze solve this issues? \r\n\r\nhttps://github.com/sgoldenlab/simba/assets/34761092/108e4001-7b04-4192-aed0-ed6961fb74b4\r\n\r\n", + "created_at": "2024-03-20T18:59:51Z", + "author": "sronilsson" + }, + { + "body": "... could it be that in AnyMaze, you have to cover the entire maze with ROIs. If that is the case, and the animal \"disappears\", the animal is placed in the ROI where it was last reliably detected for the frames where it is missing? ", + "created_at": "2024-03-20T19:21:29Z", + "author": "sronilsson" + }, + { + "body": "In AnyMaze I had the same problem\r\nThe best way I found to solve it is telling the program to only score it as an exit if the animal enters another ROI, and and we establish an ROI at the intersection of the three arms, called the center.\r\nIn this way it would only count as an exit from arm A if the animal goes through the center. ", + "created_at": "2024-03-20T21:22:03Z", + "author": "Monica9577" + }, + { + "body": "Makes sense! Let's do that", + "created_at": "2024-03-20T21:52:08Z", + "author": "sronilsson" + }, + { + "body": "@Monica9577 A couple more question about how spontaneous alternation is calculated so we get it right, thanks for help so far .\r\n\r\n1) Can you check the following video and let me know if the calculations printed on the side are accurate or not? It is calculated as 100% of the animal has to enter / exit an arm for it to count. \r\n\r\nhttps://drive.google.com/file/d/1g9AYspK56d-de_mf5Og9W8WNwzjpGCwt/view?usp=sharing\r\n\r\n2) In this second example below, it is calculated as 60% of the animal that has to enter / exit an arm for it to count - and we get some \"revisit\" errors where e.g., the animal visits say A->A->A or B->A->A. Next, the animal visits C, making the sequence A->A->C. The visit to arm C, should that be counted as an error? I guess I am having a hard time getting me head around it as the animal has to visit either C or B in order to get the chance to fulfill a successful sequence in the next arm visit, but I'm guessing it should be an error. \r\n\r\nhttps://drive.google.com/file/d/1wtRq-HGl0Mralik24-n2GXk4iY3Rqece/view?usp=sharing\r\n\r\n\r\n\r\nThanks!\r\n\r\n \r\n", + "created_at": "2024-03-22T01:11:29Z", + "author": "sronilsson" + }, + { + "body": "Personally, I believe that when 100% of the animal is counted, there are several times when it would manually qualify as an entry, but in this example it is not counted as such. \r\n\r\nOn the other hand, when 60% of the animal is counted it is possible to have re-entry errors. \r\n\r\nI think this model looks very good. Maybe a solution is to set a value between 100 and 60, such as 80% to qualify as an entry to the arm.", + "created_at": "2024-03-26T19:25:36Z", + "author": "Monica9577" + }, + { + "body": "Thanks @Monica9577 I was thinking more about the actual spontaneous alternation protocol: \r\n\r\nIf an animal has visited arm A followed by the center, followed by arm A, followed by the center, and followed by arm A, the current three-arm sequence would read A A A. If the animal next visits C, is that visit to arm C coded as an error? \r\n\r\n", + "created_at": "2024-03-26T19:51:05Z", + "author": "sronilsson" + }, + { + "body": "Yes if he visits A- A- C, it would be an error", + "created_at": "2024-03-26T20:10:24Z", + "author": "Monica9577" + }, + { + "body": "Thanks @Monica9577 !\r\n\r\n\r\nSo you would have a menu like this (haven’t documented anything as yet so type it here):\r\n\r\nIn ARM DEFINITIONS you would just select which ROI that you have drawn represent which arms. \r\n\r\nIn ANIMAL SETTINGS menu you would set: \r\n\r\nPOSE ESTIMATION THRESHOLD: Any body-part detected with lower probability than this will be omitted when computing the animal shape.\r\n\r\nANIMAL AREA (%): The percent of the animal the have to enter/exit a region for it to count.\r\n\r\nANIMAL BUFFER (MM): Sometimes you may want to increase the animal size with a little safety buffer in millimeter (see [THIS](https://simba-uw-tf-dev.readthedocs.io/en/latest/_images/bodyparts_to_polygon.png) for example of what I mean)\r\n\r\nSAVE DETAILED DATA: If True, give you a separate CSV for each video, with the different arm sequences and successful alternations and errors with the frame numbers when they happened. Otherwise you will just get a summary file with the stats for each video containing alternation rate, error counts, alternation count, return errors and alternation errors. \r\n\r\nVERBOSE: Useful for troubleshooting, if True, print out what processing is being done, otherwise you will just get a print out when each file is complete.\r\n\r\n**Do you think we are missing anything?** \r\n\r\n\"image\"\r\n\r\nI have only really tested it so it runs as expected on F1_HAB video you shared, so may be some errors that could arise that I haven't been able to anticipate. If you have more videos I could test it on those before sharing the code.", + "created_at": "2024-03-28T11:52:57Z", + "author": "sronilsson" + }, + { + "body": "I am still testing this function and comparing it with manual analysis. \r\nSo far it seems very good, it would be great if a list with the order in which the animal has entered the different arms could be included in the csv sheet of results. \r\n(Ex. A,B,B,B,C,A,B,C,A....)", + "created_at": "2024-04-26T09:54:04Z", + "author": "Monica9577" + }, + { + "body": "@Monica9577 - definitly - thanks. How about if you tick the `detailed data` checkbox, you get an additional CSV for each video file looking something like this? Would that work? (with one of these files per video)\r\n\r\n\"image\"\r\n", + "created_at": "2024-04-26T10:51:17Z", + "author": "sronilsson" + }, + { + "body": "I inserted this output in version `1.90.8`, if you set the `SAVED DETAILED DATA` dropdown to True, but please let me know how goes.\r\n\r\n\"image\"\r\n", + "created_at": "2024-04-26T11:40:11Z", + "author": "sronilsson" + }, + { + "body": "FYI I realized you probably want the entry / exit times as well listed alongside, in frames and time, so added it, you should expects something line this.\r\n\r\n[F1 HAB_arm_entry_sequence.csv](https://github.com/sgoldenlab/simba/files/15131346/F1.HAB_arm_entry_sequence.csv)\r\n", + "created_at": "2024-04-26T13:47:13Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Unable to Start Simba GUI", + "body": "Hello simba team,\r\n\r\nI successfully installed Simba via the Anaconda Prompt and managed to launch it. However, the application freezes immediately upon launch, and I do not receive any error messages in the prompt either.\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/76165836/332c32a5-3545-445d-a729-54268d8ff417)\r\n\r\nI have reviewed the FAQ section on the Simba website but was unable to find a solution to my issue. At this point, I suspect the problem might be related to my GPU. I've included my GPU details below for your reference.\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/76165836/c7db91b2-95aa-4ba2-a0c1-f4610cce8962)\r\n\r\n\r\nLet me know what I can try to resolve this issue. \r\n\r\nThank you so much in advance!\r\n\r\n", + "user": "heiley8181", + "reaction_cnt": 0, + "created_at": "2024-03-19T07:43:33Z", + "updated_at": "2024-05-30T09:28:09Z", + "author": "heiley8181", + "comments": [ + { + "body": "Hi @heiley8181! Odd one and tricky without a traceback error message so I will have to ask some questions to dig - but I don't think it comes down to the GPU. \r\n\r\nDid you install SimBA by cloning the git repository, or via pip with `pip install simba-uw-if-dev`? \r\n\r\nWhich version of SimBA do you see when you type `pip show simba-uw-if-dev`? \r\n\r\nWhich version of python do you see when typing `python --version` in the terminal?\r\n\r\nIf you open the terminal in admin mode, and then launch SimBA, do you still see the same error? ", + "created_at": "2024-03-19T11:23:40Z", + "author": "sronilsson" + }, + { + "body": "Hello @sronilsson ,\r\n\r\nUsing anaconda navigator -> I typed `python --version` and got 3.6.15\r\n\r\nI installed SimBA using Anaconda using the pip install method.\r\n\r\nWhen I type pip show simba-uw-if-dev in the working directory, I get a warning message that says: Warning Package(s) not found: simba-uw-if-dev. So I tried the pip install method in the virtual environment I created (simba-test) and got this error message. \r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/76165836/f68d9877-73b9-4235-9ca6-e25ab965d894)\r\n\r\nLastly, I still see the same error when I open the terminal in admin mode.\r\n\r\nLet me know what I can try to fix this issue! Thank you in advance.\r\n\r\n\r\n\r\n", + "created_at": "2024-03-20T02:27:12Z", + "author": "heiley8181" + }, + { + "body": "Hey @heiley8181 ! \r\n\r\ni) The last screenshot contains some typos - it should be `pip show simba-uw-tf-dev`, **not** `pip install simba-uw-if-dev`. \r\n\r\nii) In the first screengrab, when you try to launch SimBA, I see no error messages, but I do see these what looks like cursor line selections. Line selections like this can lock the the terminal and prevent it from printing useful information, including what went wrong. \r\n\r\n\"image\"\r\n\r\n\r\nDo you see any error messages in the Windows terminal when trying th launch SimBA without these line selections?\r\n", + "created_at": "2024-03-20T11:19:08Z", + "author": "sronilsson" + }, + { + "body": "Thanks for the response, @sronilsson! \r\n\r\nI tried what you suggested and this is the warning message I received in the terminal:\r\n![image](https://github.com/sgoldenlab/simba/assets/76165836/cdeb32af-6710-4a9b-8188-50ae9b55b8db)\r\n\r\nAlso, launching simba without the cursor line selection still shows no errors.\r\n\r\n-Heiley", + "created_at": "2024-03-20T11:27:58Z", + "author": "heiley8181" + }, + { + "body": "Oh you still got the typo - you see you have written `simba-uw-tf-dev` rather than `simba-uw-if-dev` - you should substitute the `if` for `tf`.\r\n\r\nSimon", + "created_at": "2024-03-20T15:40:43Z", + "author": "sronilsson" + }, + { + "body": "Oops thanks for pointing that out. The version I have is 1.87.5", + "created_at": "2024-03-20T23:46:18Z", + "author": "heiley8181" + }, + { + "body": "Everything looks in order... \r\n\r\ni) Do you have any other versions of simba installed in the same environment? I.e., if you type `pip show simba-uw-tf` in the same conda environment, what do you see? \r\n\r\nii) You can create a list of all the packages in your simba environment and I can take a look at it if I see anything odd. if you type `conda list -e > requirement.txt`, this will create a text file of all the packages installed in the environment. Can you create it and drop it here in the chat?\r\n\r\n", + "created_at": "2024-03-21T12:48:20Z", + "author": "sronilsson" + }, + { + "body": "1. If I type `pip show simba-uw-tf` in the simba-test environment, I get `WARNING: Package(s) not found: simba-uw-tf`\r\n\r\n2. [requirement.txt](https://github.com/sgoldenlab/simba/files/14715453/requirement.txt) This is the text file. Hope this helps to tackle the issue :)\r\n\r\n-Heiley\r\n", + "created_at": "2024-03-22T00:58:09Z", + "author": "heiley8181" + }, + { + "body": "Thanks @heiley8181! I will create conda environment from the txt to see if I can recreate the error. One final question for now you may have told me already but can't find it..\r\n\r\nWhat is the operating system you run, is it windows 10 or 11?", + "created_at": "2024-03-22T02:09:24Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson , I am currently running on Windows 10.\r\n\r\nThank you!\r\n\r\n-Heiley", + "created_at": "2024-03-22T03:23:28Z", + "author": "heiley8181" + }, + { + "body": "Hi @heiley8181 ! Sorry for the delay on this, but I have tried and can't recreate the error 🤷🏻‍♂️ That said, I don't have access to Windows 10 machine, I only have Win11 so I can't fully follow your workflow.", + "created_at": "2024-04-05T13:22:32Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson ,\r\n\r\nThank you for the update. One thing I noticed is that in the task manager, python is not responding and when I close the blank simba GUI, I get a pop up that says python is not responding. Do you think I should try other versions of python in this case?", + "created_at": "2024-04-25T01:19:16Z", + "author": "heiley8181" + }, + { + "body": "Hi @heiley8181 - I don't think it is the python version, my thinking it is something to do with Windows 10.. I gave environment ago on Windows 11, and in Ubuntu 20/22, and on Mac 13.4, in python 3.6 and python 3.10 - and it didn't freeze. So that makes me think it is Windows 10 as you are running this and thats the one operating system I can't get hold of. So you have any other machines to run it on, to test if this is the issue, that is not running Win 10? \r\n\r\n\r\n ", + "created_at": "2024-04-25T13:54:47Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson ,\r\n\r\nI tried simba installation on a different computer with Windows Pro 11. Using the anaconda installation method, now I get an error message that says:\r\n\r\n```\r\n(simbaenv) C:\\Users\\User>simba\r\nwarning||SimBA could not find a FFMPEG installation on computer (as evaluated by \"ffmpeg\" returning None). SimBA works best with FFMPEG and it is recommended to install it on your computer\r\n```\r\n\r\nI downloaded the FFmpeg files, extracted and moved them to C:\\Program Files, and added the Path (for the user variable).\r\n![image](https://github.com/sgoldenlab/simba/assets/76165836/b12eff68-eae9-42a5-9951-b49422bc1f7d)\r\n\r\nAnd I still get the warning for ffmpeg. If possible, please let me know what I could try next to solve this error message.\r\n\r\nThank you.\r\n", + "created_at": "2024-04-26T02:23:37Z", + "author": "heiley8181" + }, + { + "body": "Hi @heiley8181! To confirm that ffmpeg is installed and accessable, open a terminal and type ``ffmpeg``, what do you see? \r\n", + "created_at": "2024-04-26T11:29:23Z", + "author": "sronilsson" + }, + { + "body": "just FYI This is how the path for ffmpeg looks like on my PC pointing to the FFmpeg bin folder:\r\n\r\n\r\n![Screenshot 2024-04-26 085656](https://github.com/sgoldenlab/simba/assets/34761092/bf7f468c-178f-401a-8fe0-27018ca6c839)\r\n\r\n", + "created_at": "2024-04-26T13:03:09Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson ! So it seems like I did not type correct path for ffmpeg. I changed the path and now I can detect ffmpeg in the terminal.\r\n\r\n```\r\n(simbaenv) C:\\Users\\User>ffmpeg\r\nffmpeg version 2024-04-25-git-cae0f2bc55-full_build-www.gyan.dev Copyright (c) 2000-2024 the FFmpeg developers\r\n built with gcc 13.2.0 (Rev5, Built by MSYS2 project)\r\n configuration: --enable-gpl --enable-version3 --enable-static --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libxevd --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxeve --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-dxva2 --enable-d3d11va --enable-d3d12va --enable-ffnvcodec --enable-libvpl --enable-nvdec --enable-nvenc --enable-vaapi --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint\r\n libavutil 59. 16.100 / 59. 16.100\r\n libavcodec 61. 5.103 / 61. 5.103\r\n libavformat 61. 3.100 / 61. 3.100\r\n libavdevice 61. 2.100 / 61. 2.100\r\n libavfilter 10. 2.101 / 10. 2.101\r\n libswscale 8. 2.100 / 8. 2.100\r\n libswresample 5. 2.100 / 5. 2.100\r\n libpostproc 58. 2.100 / 58. 2.100\r\nUniversal media converter\r\nusage: ffmpeg [options] [[infile options] -i infile]... {[outfile options] outfile}...\r\n\r\nUse -h to get full help or, even better, run 'man ffmpeg'\r\n```\r\n\r\nHowever, as I type simba and launch the GUI, I still see the frozen GUI with no error messages. Do you have any suggestion for this? \r\n\r\nThank you in advance!", + "created_at": "2024-04-29T05:15:14Z", + "author": "heiley8181" + }, + { + "body": "Thanks @heiley8181 - this is a tricky one.. \r\n\r\nWhen you boot up simba by typing `simba` do you see the splash screen below pop open before the main GUI? \r\n\r\n\"image\"\r\n\r\nif you type `pip show simba-uw-tf-dev` in your `simba-test` conda environment, it should print out where simba is installed:\r\n\r\n\"image\"\r\n\r\n\r\nIf you go to that path on your computer and open `simba/SimBA.py` in a text editor, and comment out lines 1956-1957, does it run?\r\n\r\n\"image\"\r\n\r\n\r\nThat seems to be the point where it freezes, when SimBA tries to print the welcome messages that I can't see in your screenshoots. Maybe those messages are causing the errors..", + "created_at": "2024-04-29T19:39:16Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson ,\r\n\r\n> When you boot up simba by typing simba do you see the splash screen below pop open before the main GUI?\r\n\r\nYes, I see the splash screen first and then the blank GUI.\r\n\r\nI commented out the part you mentioned, and now I finally see unfrozen GUI! \r\n![image](https://github.com/sgoldenlab/simba/assets/76165836/c93746ee-4175-4ecb-9505-fb77544439c6)\r\n\r\nThank you so much for all the help :)", + "created_at": "2024-04-29T23:40:07Z", + "author": "heiley8181" + }, + { + "body": "Cool! I'm slightly worried that there will be more issues downstream though.. as other things are printed in the terminal and it would freeze up when those things print, let me know if that happens and we could try commenting out some other lines.", + "created_at": "2024-04-30T12:11:11Z", + "author": "sronilsson" + }, + { + "body": "Hello @sronilsson ,\r\n\r\nI am trying to create the project for simba, and it seems like the GUI is frozen when I click create project config button. I do not see any error messages in the anaconda prompt as well. Do you think this could be solved by commenting out some other lines thay you mentioned?\r\n\r\nThanks in advance!\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/76165836/97b2a750-83fe-46b5-a50b-4da4c202a6aa)\r\n", + "created_at": "2024-05-29T08:34:12Z", + "author": "heiley8181" + }, + { + "body": "Hi @heiley8181 - thanks for reporting.\r\n\r\nPossibly yes - one question just: has it previously worked to create a project - or has it never worked on your end? \r\n\r\nI'm thinking it has to do with the printing and interpreting of emojis on your system. What I could probably do, if it has never worked, is to insert a single variable that prevents the printing of emojis so you only have to change a single line in the SimBA code.", + "created_at": "2024-05-29T11:11:17Z", + "author": "sronilsson" + }, + { + "body": "Thank you for the response. I have never been able to create a new project so far.", + "created_at": "2024-05-29T11:15:24Z", + "author": "heiley8181" + }, + { + "body": "Cheers, let me make emojis configurable and get back to you and lets see if that fixes it.", + "created_at": "2024-05-29T12:37:18Z", + "author": "sronilsson" + }, + { + "body": "Lets try this to start: \r\n\r\ni) Update simba using `pip install simba-uw-tf-dev —upgrade`\r\n\r\nii) Go to where simba is installed as [above](https://github.com/sgoldenlab/simba/issues/348#issuecomment-2083522050) and open `simba/SimBA.py` in a text editor. \r\n\r\niii) Go to approximately line 163 where you see `PRINT_EMOJIS = True` change this to `PRINT_EMOJIS = False`. Save and close the SimBA.py file. Note: This is the only thing you should have to do, you should no have to comment out the welcome message as you did previously. \r\n\"Pasted\r\n\r\niv) Try and restart SimBA using `simba` and let me know how goes. \r\n\r\n", + "created_at": "2024-05-29T12:59:31Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson ,\r\n\r\nI actually could not find the PRINT_EMOJIS = TRUE line at all. It seems like the emojis are called with get_emojis in my SimBA.py file. I am uploading the SimBA.py file I have on my computer.\r\n[SimBA.zip](https://github.com/sgoldenlab/simba/files/15492439/SimBA.zip)\r\n", + "created_at": "2024-05-30T01:35:00Z", + "author": "heiley8181" + }, + { + "body": "Thanks @heiley8181 - I will check first thing tomorrow, but meanwhile just to make sure - what version number do you see if you type `pip show simba-uw-tf-dev` ?", + "created_at": "2024-05-30T01:38:26Z", + "author": "sronilsson" + }, + { + "body": "Thanks for all the help. I have 1.90.4 version right now", + "created_at": "2024-05-30T01:40:06Z", + "author": "heiley8181" + }, + { + "body": "Ah, I think latest version is in the 1.94 numbers - try the upgrade command again and see if that fixes it.\r\n\r\n If not, I will double check tomorrow and get back to you, it's past my bedtime :) ", + "created_at": "2024-05-30T01:43:17Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson , I updated to 1.94.4 version and changed the print_emojis to FALSE and now it works! I see messages in the simba GUI as well. Thanks for all the help :) ", + "created_at": "2024-05-30T02:29:59Z", + "author": "heiley8181" + } + ] + }, + { + "title": "ROI analysis feature extraction ", + "body": "Hello hello!\r\n\r\nMore of a comprehension question: Once the ROI analysis is completed, the features (for example the percentage of session time the mouse is spending in each ROI) are extracted based on the csv files that the ROI analysis produced?\r\n\r\nI would like to use the mean of my earL and earR detections for a more reliable estimation (nose does not look so good). For this I have run the ROI analysis for both earL and earR for both animals (black and white) and have created a file where my body-part column is the concatenation of both body parts (EarLEarR) and the detections the mean of EarLEarR. This I have saved in the logs folder. I have rof course removed the original movement and time_data csv files from the same folder.\r\n\r\nHowever, once I want to extract the ROI features and append them by animal to the already extracted features, of course I need to select the body parts that I would like to work with. And here comes my question above, will it take into account my mean_csv file that is in the logs folder or will run some part of the ROI analysis again? \r\n\r\nAnd if so, how could I manage to use the mean of the two body parts as I described?\r\n\r\nThanks so much!", + "user": "DanaeNikol", + "reaction_cnt": 0, + "created_at": "2024-03-12T10:59:03Z", + "updated_at": "2024-03-19T11:10:20Z", + "author": "DanaeNikol", + "comments": [ + { + "body": "Hi @DanaeNikol!\r\n\r\nThe body-parts that are expected in your project, and you see in the drop-down, are listed in the `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` of your SimBA project - you could open that file, and add `EarLEarR` to the list, save the file, open your project again, and you should see `EarLEarR` in the dropdown but please let me know. \r\n\r\nNote: \r\n\r\nAn alternative approach to avoid running the ROI analysis several times, is to create a new body-part (e.g., \"head\") that lies on the 50th percentile between the left and right ear to all your files inside the simba project using [THIS](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#simba.mixins.feature_extraction_mixin.FeatureExtractionMixin.find_midpoints) function. \r\n\r\n\"image\"\r\n\r\nI can give you some example python code that would do it if you wanted to go this route.\r\n\r\n\r\n", + "created_at": "2024-03-12T11:40:22Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson !\r\n\r\nI does work!\r\nBut then this means that as input for the feature extraction the csv file in the logs folder will be used? Meaning the mean file I created?\r\n\r\nCould you indeed also provide some example python code, if that would be easy for you?\r\n", + "created_at": "2024-03-12T13:49:51Z", + "author": "DanaeNikol" + }, + { + "body": "However, when I try to run the \"Append ROI features\" I bump into this error:\r\n![image](https://github.com/sgoldenlab/simba/assets/97169827/013c3d71-905a-4659-994c-5729ae86939f)\r\n\r\n\r\nI would guess then that the other alternative would actually be the only way, else I should already manipulate the deeplabcut output I import in simba, right?\r\n", + "created_at": "2024-03-12T13:52:14Z", + "author": "DanaeNikol" + }, + { + "body": "Yes, about the \"Append ROI features\" error first: \r\n\r\nWhen clicking to \"Append ROI features\", SimBA reads in all your files inside the `project_folder/csv/outlier_corrected_movement_location` directory, and checks each file that it reads in has the correct number of columns. What the \"correct\" number of columns is, is dictated by how many body-parts you track, which is stored in the `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` file. If the `project_bp_names.csv` file has 18 body-parts listed, the \"Append ROI features\" function anticipates 18x3 = 54 (with an x, y and p column for each body-part) columns in each file in the in the `project_folder/csv/outlier_corrected_movement_location` directory. \r\n\r\nThe error message above suggests that `3213_Scan11-converted-converted_2.csv` fails this check - this file only has 48 columns. Perhaps it is related to adding the `EarLEarR` or other body-parts to the project_bp_names.csv without the relevant data existing in the `3213_Scan11-converted-converted_2.csv` file? \r\n\r\n\r\n\r\n", + "created_at": "2024-03-12T14:08:24Z", + "author": "sronilsson" + }, + { + "body": "Yes, this is absolutely true. There is not such body part in the deeplabcut csv input and everything has been run up to the ROI analysis without the EarLEarR bodypart. So the error is actuallly correct, I am just not sure if there could be any workaround (?) ", + "created_at": "2024-03-12T14:12:33Z", + "author": "DanaeNikol" + }, + { + "body": "There are always a few workarounds :) Just so I understand and we are on the same page - do you want to use `EarLEarR` when Appending ROI features, or have you finished the analysis with this body-part and now want to drop it?", + "created_at": "2024-03-12T14:22:07Z", + "author": "sronilsson" + }, + { + "body": "So my initial body parts(also used in deeplbacut processing) are: \r\nEar_left_1\r\nEar_right_1\r\nNose_1\r\nCenter_1\r\nLat_left_1\r\nLat_right_1\r\nTail_base_1\r\nTail_end_1\r\nEar_left_2\r\nEar_right_2\r\nNose_2\r\nCenter_2\r\nLat_left_2\r\nLat_right_2\r\nTail_base_2\r\nTail_end_2\r\n\r\nI have run the ROI analysis using Ear_left_1 & Ear_left_2, and Ear_right_1 & Ear_right_2. \r\nBecause I want a more reliable measurement, I would like to extract the percentage of the session time that each mouse spends in each ROI with the mean of Ear_left and Ear_right for both animals.\r\nFor this, I manipulated the csv outputs I got from Simba ROI analysis and calculated the mean, composing another csv file saved in the logs folder.\r\n\r\nNow, I want to Append the ROI features (to get the percentage of session time in each ROI) using the mean of Ear_left and Ear_right I calculated. \r\n\r\nSo I guess, to answer your question, I want to use this body part only to Append ROI features\r\n(All this so that my analysis is comparable to a previously done by a colleague)\r\n", + "created_at": "2024-03-12T14:27:44Z", + "author": "DanaeNikol" + }, + { + "body": "Thanks @DanaeNikol, got it - just one last question so I understand - when you say the \"the mean of Ear_left and Ear_right\" you mean the mean of the left ear and right ear pose-estimated coordinates (so the top of the head)? ", + "created_at": "2024-03-12T14:48:05Z", + "author": "sronilsson" + }, + { + "body": "One more thing: are you only after the percent of time spent in each ROI, and you are not after creating features for later classifiers correct?", + "created_at": "2024-03-12T14:51:21Z", + "author": "sronilsson" + }, + { + "body": "I have calculated the mean on the level of the movement/velocity/time that is the output of the ROI analysis. I have not manipulated anything on the level of the pose-estimated coordinates. This could also be an option, but it seemed more tricky.\r\nThe function you suggested would actually go in this direction, right? because then maybe, this could be a very good option, if you could provide the example script.", + "created_at": "2024-03-12T14:52:02Z", + "author": "DanaeNikol" + }, + { + "body": "For now, we are only after the percentage, but later maybe we want to move further than that.", + "created_at": "2024-03-12T14:52:49Z", + "author": "DanaeNikol" + }, + { + "body": "Alright, how I would solve it would probably be to create a new body-part which is located half way between the left ear and right ear, and then work with that. I can send some code and instructions in a bit?", + "created_at": "2024-03-12T15:11:16Z", + "author": "sronilsson" + }, + { + "body": "Sounds very good, thanks for your precious help!", + "created_at": "2024-03-12T15:12:28Z", + "author": "DanaeNikol" + }, + { + "body": "First things first - let’s first see if we can create a bunch of files with an additional body-part, with that body-part being half way between the ears, using the attached script?\r\n\r\ni) Open the attached compressed file and change the config path, the save directory, and the names of your body-parts, and save the file. You should only have to edit the lines above the “########################” mark. I have added comments to each line, please let me know if something doesn’t make sense.\r\n\r\nii) Activate your SimBA conda environment and navigate to the directory where add_body_part.py is stored, and type python add_body_part.py.\r\n\r\niii) New files, one for each file stored in your outlier_corrected_movement_location directory will be stored in your specified output directory. The files will have additional columns representing your new body-part.\r\n\r\n[add_body_part.py.zip](https://github.com/sgoldenlab/simba/files/14576675/add_body_part.py.zip)\r\n", + "created_at": "2024-03-12T17:58:51Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson!\r\n\r\nThanks so much for your help:)\r\n\r\nUnfortunately the zip file is not found, there is an error. Could you reupload it?", + "created_at": "2024-03-12T22:44:03Z", + "author": "DanaeNikol" + }, + { + "body": "Hmm doesn't want to play a long for some reason.. can you try this gdrive link? https://drive.google.com/file/d/1RkqYIWMm4WXyIOKkLF5p8T8Up5-nc1Fo/view?usp=sharing", + "created_at": "2024-03-12T23:12:00Z", + "author": "sronilsson" + }, + { + "body": "Everything works perfectly!\r\nThank you so much @sronilsson !", + "created_at": "2024-03-13T10:32:32Z", + "author": "DanaeNikol" + }, + { + "body": "Great @DanaeNikol ! Are you OK for the next steps? I was thinking: \r\n\r\ni) Move your new files, containing the new extra, body-part data, to your SimBA project and the `project_folder/csv/outlier_corrected_movement_location` directory. \r\n\r\nii) Add the new body-part name to your SimBA project body-part list in the `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` file. \r\n\r\niii) Proceed to compute ROI features by selecting the new body-part name from the dropdown in the GUI?\r\n\r\n", + "created_at": "2024-03-13T11:16:21Z", + "author": "sronilsson" + }, + { + "body": "All clear! I proceeded with everything and it all seems to be going well for now!\r\nThanks a lot for the precious help!\r\n\r\nOne more short question: Are the ROI definitions saved somewhere? Is it the h5 file in the '/logs/measures/' folder?\r\nFor example if I want to run another ROI analysis for different ROIs can I backup my currently defined ROIs somewhere to potentially use them again at some point for further analyses? ", + "created_at": "2024-03-13T12:38:46Z", + "author": "DanaeNikol" + }, + { + "body": "Yes - the ROI definitions are stored in a file at location `project_folder/logs/measures/ROI_definitions.h5`. \r\n\r\nAlso, if you wanted to convert this `h5` file to a CSV file and see what is in it, you can do that with this pop-up documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_tools.md#extract-roi-definitions-to-human-readable-format).\r\n\r\nLet me know if anything else comes up!", + "created_at": "2024-03-13T12:53:57Z", + "author": "sronilsson" + }, + { + "body": "Thank you @sronilsson !!\r\n\r\nOne more question then (hehe), would it be possible to make some changes in the csv file of the ROI definitions and then convert it back to the h5?", + "created_at": "2024-03-14T07:31:29Z", + "author": "DanaeNikol" + }, + { + "body": "Hi @DanaeNikol ! Yes you could, but again I don't have a graphical interface for it, and it would have to be code. We need to chunk up the CSV files into ha new `h5` file that has three dataframes inside of it for circles, polygons, and rectangles. The dataframes can be empty if a specific shape-type doesn't exist, but at least an empty entry has to be there. I can give you the code for it if that helps? \r\n\r\n ", + "created_at": "2024-03-14T11:28:50Z", + "author": "sronilsson" + }, + { + "body": "If that would be easy to share, it would be of great help I think!", + "created_at": "2024-03-14T12:22:30Z", + "author": "DanaeNikol" + }, + { + "body": "Np - it would be something like this, again you just have to edit the lines above the `##...` to your paths. \r\n\r\nIt takes a bit of wrangling to get the data represented as strings in the CSV into the appropriate formats. \r\n\r\nLet me know how it goes please!\r\n\r\n[roi_definition_csvs_to_h5.py.zip](https://github.com/sgoldenlab/simba/files/14604044/roi_definition_csvs_to_h5.py.zip)\r\n", + "created_at": "2024-03-14T14:59:29Z", + "author": "sronilsson" + }, + { + "body": "Also let me know if it downloads OK or if you need a gdrive link", + "created_at": "2024-03-14T14:59:50Z", + "author": "sronilsson" + }, + { + "body": "I works! Thanks a lot @sronilsson!", + "created_at": "2024-03-19T09:53:37Z", + "author": "DanaeNikol" + }, + { + "body": "And one other question, in the ROI analysis, is it possible to have ROIs that overlap with each other in the same ROI analysis round? For example if I have many square ROIs covering my cage and then one big for the whole cage, would this be a problem? Or should it be okay? ", + "created_at": "2024-03-19T10:23:29Z", + "author": "DanaeNikol" + }, + { + "body": "@DanaeNikol that's no problem, ROIs can overlap however you wish, and could contain wholly or partly shared regions", + "created_at": "2024-03-19T11:10:19Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Error during user defined Project Configuration for custom multi-animal tracking", + "body": "**Describe the bug**\r\nIn the define pose window: during assigning body parts to individual animals on the image, the screen has the selected image and there seem to be buttons above the image but they are not visible. \r\nAfter assigning the body parts to the animals the define pose window closes and the project configuration page still has different tabs but they are all blank. The main window shows that user defined pose configuration has been created. But since the rest of the tabs on the project configuration page are blank I cannot finish creating the project. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to File -> create new project\r\n2. select multi animal tracking and custom tracking (2 animals and 6 body parts per animal)\r\n3. assign user defined nodes to the selected image\r\n4. See error\r\n\r\n**Screenshots**\r\nTerminal:\r\n![Pasted image 3](https://github.com/sgoldenlab/simba/assets/162177197/96c559a6-bcb7-4798-89a2-107a126958e8)\r\n\r\nDefine pose page during node assignment:\r\n![Pasted image](https://github.com/sgoldenlab/simba/assets/162177197/1a38f81e-fe40-4d74-9bdd-3ce653708c0f)\r\n\r\nBlank project configuration page after node assignment\r\n![Pasted image 1](https://github.com/sgoldenlab/simba/assets/162177197/3a4ce731-ef10-4470-96c5-f453bb485c2f)\r\n\r\nMain simba window after node assignment:\r\n![Pasted image 2](https://github.com/sgoldenlab/simba/assets/162177197/6866e23d-f31d-47f4-833e-4c06a31374f1)\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - Fedora Linux 38\r\n - Python Version 3.6.0\r\n - Are you using anaconda? Yes, I created an environment using conda and then installed simba using pip install simba-uw-tf-dev command\r\n ", + "user": "drish-soneja", + "reaction_cnt": 0, + "created_at": "2024-03-04T17:33:49Z", + "updated_at": "2024-03-08T23:20:08Z", + "author": "drish-soneja", + "comments": [ + { + "body": "Thanks for reporting @drish-soneja - can you upgrade to 1.87.1 with `pip install simba-uw-tf-dev --upgrade` and let me know how it looks?\r\n\r\nIf that does not solve it, how does it look if you install 1.87.1 in a new conda environment, or run `pip uninstall simba-uw-tf-dev` and then `pip install simba-uw-tf-dev` ?", + "created_at": "2024-03-04T20:30:28Z", + "author": "sronilsson" + }, + { + "body": "hi @sronilsson thank you for your help, the update fixed the problem I was having! \r\n\r\nI did run into another issue when I was trying to annotate my video. None of the key shortcuts were working to navigate the video. \r\n![Pasted image 1](https://github.com/sgoldenlab/simba/assets/162177197/ffbd0749-cdca-48f8-b2d7-6281d533a89c)\r\nLet me know if I should open a new issue for this. \r\n", + "created_at": "2024-03-08T01:07:19Z", + "author": "drish-soneja" + }, + { + "body": "Thanks for letting me know @drish-soneja, no need to open another issue. I will see if I can replicate it and get back to you", + "created_at": "2024-03-08T01:25:51Z", + "author": "sronilsson" + }, + { + "body": "@drish-soneja I tried on MacOS and in Ubuntu and unfortunately I can't replicate the error and I don't have a machine running fedora around. The keyboard bindings are controlled though opencv, perhaps it is a opencv version issue - which version of opencv do you have in your simba conda environment? This is what I have. \r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/8ee14355-caf4-4e62-b882-f37829b66983)\r\n\r\n", + "created_at": "2024-03-08T01:49:52Z", + "author": "sronilsson" + }, + { + "body": "I have the same version as you do. Do you think I should update opencv?\r\n![Pasted image 2](https://github.com/sgoldenlab/simba/assets/162177197/686ae02b-3591-4df1-9861-d45a45158e5c)\r\n", + "created_at": "2024-03-08T15:04:01Z", + "author": "drish-soneja" + }, + { + "body": "No that looks OK.. one more question. Which version of tkiinter does your environment have? \r\n\r\n\"image\"\r\n", + "created_at": "2024-03-08T15:53:27Z", + "author": "sronilsson" + }, + { + "body": "I have the same version as you for tkinter too\r\n![Pasted image 3](https://github.com/sgoldenlab/simba/assets/162177197/80cd7495-8cc1-4b7e-ba33-8ebcf2231ab8)\r\n", + "created_at": "2024-03-08T17:17:44Z", + "author": "drish-soneja" + }, + { + "body": "the keybinds worked for labelling the body parts when I was defining the pose. I was able the use the x and c keys to change the frame and continue. So I think it might be something specific to this window?", + "created_at": "2024-03-08T17:25:20Z", + "author": "drish-soneja" + }, + { + "body": "Yeah - sorry for mentioning opencv thing earlier, it's not actually opencv that binds the keys while labelling, its through tkinter right [here](https://github.com/sgoldenlab/simba/blob/163bd746258fb82b773a60d7a759c6cbe48d33f1/simba/labelling/labelling_interface.py#L326) in the code. The x and z keys used earlier, is through opencv. \r\n\r\nMy suspicion, as I don't see this error on mac or ubuntu, is that it has to do with with the tkinter version running on fedora. One more thing you could help me try to see if this is the cause: you could try to run simba in python3.9 or python3.10 conda environment (I know there will be some warnings but it should run), that gives you a different tkinter version automatically through the python3.9 or python 3.10 standard library, and you could see if the error persist? Also make sur eyou don't have caps lock pressed.", + "created_at": "2024-03-08T17:49:01Z", + "author": "sronilsson" + }, + { + "body": "For python = 3.6 \r\n\r\nI realised that the frame navigation keybinds (ctl+a, right, left, etc) are working, its only the video navigation(p, o, etc) that wasnt working. So I can use the frame navigation keybinds to go through my video.\r\n\r\nAlso when I try to draw an ROI this is what the screen looks like: \r\n![Pasted image 4](https://github.com/sgoldenlab/simba/assets/162177197/8ec12e3c-1cff-4f86-b785-43305873ebaf)\r\nI'm not able to see frames from my videos but the video information is dispayed in the ROI settings window. \r\n\r\n\r\nFor python = 3.9\r\nI got a tkinter error when trying to load the project. \r\n![Pasted image](https://github.com/sgoldenlab/simba/assets/162177197/44254c50-fec5-416d-bc42-3136f0003a82)\r\n\r\nalso thank you so much for all your help! ", + "created_at": "2024-03-08T22:11:29Z", + "author": "drish-soneja" + }, + { + "body": "Thank you, super helpful - one question about the ROI issue before I dig in those issues: do you see any error message in the Linux terminal when you \"top level not responding\" pop up?", + "created_at": "2024-03-08T23:20:07Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Bump dash-html-components from 1.0.3 to 2.0.0 in /docs", + "body": "Bumps [dash-html-components](https://github.com/plotly/dash-html-components) from 1.0.3 to 2.0.0.\n
\nChangelog\n

Sourced from dash-html-components's changelog.

\n
\n

[2.0.0] - 2021-08-23

\n

As of Dash 2, the development of dash-html-components has been moved to the main Dash repo

\n

This package exists for backward compatibility

\n

[1.1.4] - 2021-07-09

\n

Changed

\n
    \n
  • #194 Updated dependencies and build process
  • \n
  • #190 Updated R package vignettes and dash-info.yaml to regenerate examples without attaching now-deprecated core component packages (dashHtmlComponents, dashCoreComponents, or dashTable).
  • \n
\n

[1.1.3] - 2021-04-08

\n

Fixed

\n
    \n
  • \n

    #179 - Fixes #77 Added allow and referrerPolicy properties to html.Iframe

    \n
  • \n
  • \n

    #178 - Fix #161 data property, and fix #129 obsolete, deprecated, and discouraged elements. No elements were removed, but comments were added to the documentation about these elements detailing their limitations.

    \n
  • \n
\n

[1.1.2] - 2021-01-19

\n

Fixed

\n\n

[1.1.1] - 2020-09-03

\n
    \n
  • Dash.jl Julia component generation
  • \n
\n

[1.1.0] - 2020-08-25

\n

Added

\n
    \n
  • #165 Add support for Dash.jl Julia component generation.
  • \n
\n
\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=dash-html-components&package-manager=pip&previous-version=1.0.3&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-03-01T15:57:48Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump dash-core-components from 1.10.2 to 2.0.0", + "body": "Bumps [dash-core-components](https://github.com/plotly/dash-component-boilerplate) from 1.10.2 to 2.0.0.\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=dash-core-components&package-manager=pip&previous-version=1.10.2&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-03-01T15:57:47Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump dash-html-components from 1.0.3 to 2.0.0", + "body": "Bumps [dash-html-components](https://github.com/plotly/dash-html-components) from 1.0.3 to 2.0.0.\n
\nChangelog\n

Sourced from dash-html-components's changelog.

\n
\n

[2.0.0] - 2021-08-23

\n

As of Dash 2, the development of dash-html-components has been moved to the main Dash repo

\n

This package exists for backward compatibility

\n

[1.1.4] - 2021-07-09

\n

Changed

\n
    \n
  • #194 Updated dependencies and build process
  • \n
  • #190 Updated R package vignettes and dash-info.yaml to regenerate examples without attaching now-deprecated core component packages (dashHtmlComponents, dashCoreComponents, or dashTable).
  • \n
\n

[1.1.3] - 2021-04-08

\n

Fixed

\n
    \n
  • \n

    #179 - Fixes #77 Added allow and referrerPolicy properties to html.Iframe

    \n
  • \n
  • \n

    #178 - Fix #161 data property, and fix #129 obsolete, deprecated, and discouraged elements. No elements were removed, but comments were added to the documentation about these elements detailing their limitations.

    \n
  • \n
\n

[1.1.2] - 2021-01-19

\n

Fixed

\n\n

[1.1.1] - 2020-09-03

\n
    \n
  • Dash.jl Julia component generation
  • \n
\n

[1.1.0] - 2020-08-25

\n

Added

\n
    \n
  • #165 Add support for Dash.jl Julia component generation.
  • \n
\n
\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=dash-html-components&package-manager=pip&previous-version=1.0.3&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-03-01T15:57:38Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump dash-html-components from 1.0.3 to 2.0.0 in /simba", + "body": "Bumps [dash-html-components](https://github.com/plotly/dash-html-components) from 1.0.3 to 2.0.0.\n
\nChangelog\n

Sourced from dash-html-components's changelog.

\n
\n

[2.0.0] - 2021-08-23

\n

As of Dash 2, the development of dash-html-components has been moved to the main Dash repo

\n

This package exists for backward compatibility

\n

[1.1.4] - 2021-07-09

\n

Changed

\n
    \n
  • #194 Updated dependencies and build process
  • \n
  • #190 Updated R package vignettes and dash-info.yaml to regenerate examples without attaching now-deprecated core component packages (dashHtmlComponents, dashCoreComponents, or dashTable).
  • \n
\n

[1.1.3] - 2021-04-08

\n

Fixed

\n
    \n
  • \n

    #179 - Fixes #77 Added allow and referrerPolicy properties to html.Iframe

    \n
  • \n
  • \n

    #178 - Fix #161 data property, and fix #129 obsolete, deprecated, and discouraged elements. No elements were removed, but comments were added to the documentation about these elements detailing their limitations.

    \n
  • \n
\n

[1.1.2] - 2021-01-19

\n

Fixed

\n\n

[1.1.1] - 2020-09-03

\n
    \n
  • Dash.jl Julia component generation
  • \n
\n

[1.1.0] - 2020-08-25

\n

Added

\n
    \n
  • #165 Add support for Dash.jl Julia component generation.
  • \n
\n
\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=dash-html-components&package-manager=pip&previous-version=1.0.3&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-03-01T15:57:32Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump dash-core-components from 1.10.2 to 2.0.0 in /docs", + "body": "Bumps [dash-core-components](https://github.com/plotly/dash-component-boilerplate) from 1.10.2 to 2.0.0.\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=dash-core-components&package-manager=pip&previous-version=1.10.2&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-03-01T15:55:33Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump dash-core-components from 1.10.2 to 2.0.0 in /simba", + "body": "Bumps [dash-core-components](https://github.com/plotly/dash-component-boilerplate) from 1.10.2 to 2.0.0.\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=dash-core-components&package-manager=pip&previous-version=1.10.2&new-version=2.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-03-01T15:55:01Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "outliers correction ", + "body": "Hello, I'm using SLEAP H5 data for Simba, but when but when I run the outlier corrections it states is done but the document is empty \r\n\r\n\r\n\r\n![Screenshot](https://github.com/sgoldenlab/simba/assets/158096575/6d40642a-5473-4af0-8643-3f6f0ec014dc)\r\n\r\n\r\n\r\n", + "user": "Maroca5", + "reaction_cnt": 0, + "created_at": "2024-02-29T16:31:50Z", + "updated_at": "2024-02-29T22:43:00Z", + "author": "Maroca5", + "comments": [ + { + "body": "Hey @Maroca5! Thank you for reporting. Do you also have a screengrab of what is being printed out in the main SimBA terminal, and the Microsoft terminal, when this happens? It would help me to figure out what is going on. \r\n\r\n", + "created_at": "2024-02-29T16:41:38Z", + "author": "sronilsson" + }, + { + "body": "hello, I don't have it sorry but now I'm running into another issue, Simba was using Python 3.10, I uninstall Python 3.10 to see if it will run with 3.6 but is not, is giving me an error [Fatal error in launcher: Unable to create process using '\"C:\\Users\\Venere\\AppData\\Local\\Programs\\Python\\Python310\\python.exe\" \"C:\\Users\\Venere\\AppData\\Local\\Programs\\Python\\Python310\\Scripts\\simba.exe\" ': The system cannot find the file specified.] how can I fix this?. thank you in advance.", + "created_at": "2024-02-29T18:15:45Z", + "author": "Maroca5" + }, + { + "body": "Hi @Maroca5 - that looks a little odd, I don't know what `simba.exe` is. Did you install simba with `pip install simba-uw-tf-dev` ?\r\n\r\nIt seems to be still using python3.10. Conda is an alternative, there is some installation instructions [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/anaconda_installation.md)", + "created_at": "2024-02-29T18:31:35Z", + "author": "sronilsson" + }, + { + "body": "Yes I used pip install simba-uw-tf-dev", + "created_at": "2024-02-29T18:34:25Z", + "author": "Maroca5" + }, + { + "body": "I'd recommend creating a new python environment in Conda, and installing simba in that environment. It may be that you have two python environments installed as base outside of conda, and it could be a little tricky to know which one is running. ", + "created_at": "2024-02-29T18:39:33Z", + "author": "sronilsson" + }, + { + "body": "Hello again, so is installed with Anaconda, but is not finding ffmpeg [SimBA could not find a FFMPEG installation on computer (as evaluated by \"ffmpeg\" returning None). SimBA works best with FFMPEG and it is recommended to install it on your computer]\r\nbut it is installed \r\n![Screenshot (5)](https://github.com/sgoldenlab/simba/assets/158096575/59a55111-1ba1-429f-bde3-1e2d7bf379d9)\r\n", + "created_at": "2024-02-29T19:23:43Z", + "author": "Maroca5" + }, + { + "body": "Interesting just to check - to see if ffmpeg is installed, SimBA runs the command `ffmpeg` - do you see a similar print out when you run that command (i.e., just ffmpeg, not ffmpeg --version)\r\n", + "created_at": "2024-02-29T19:46:11Z", + "author": "sronilsson" + }, + { + "body": "yes, \r\n![Screenshot (6)](https://github.com/sgoldenlab/simba/assets/158096575/0e38ab76-69f4-44e0-a587-4b437a0c7c89)\r\n", + "created_at": "2024-02-29T19:49:50Z", + "author": "Maroca5" + }, + { + "body": "It runs it in the administrative and normal command prompt, but it doesn't on the anaconda terminal.", + "created_at": "2024-02-29T19:52:08Z", + "author": "Maroca5" + }, + { + "body": "@Maroca5 - so I understand, you get the ffmpeg warning when you launch simba in the conda navigator terminal ?", + "created_at": "2024-02-29T19:55:26Z", + "author": "sronilsson" + }, + { + "body": "figured it out Thank you, I installed it with anaconda and it worked, Thank you I will try to run a project hopefully I don't have any issues", + "created_at": "2024-02-29T19:59:31Z", + "author": "Maroca5" + }, + { + "body": "Now when I'm trying to upload SLEAP files is giving me this error.\r\nSIMBA NO DATA ERROR: No files with ['.slp'] extensions found in C:/Users/Venere/Desktop/MP4 videos new folder/Behavior.\r\nI tried with .slp, H5 and CVS ", + "created_at": "2024-02-29T20:56:27Z", + "author": "Maroca5" + }, + { + "body": "What is the content of the `C:/Users/Venere/Desktop/MP4 videos new folder/Behavior` directory?\r\n\r\nDoes it have your SLEAP data in it?", + "created_at": "2024-02-29T21:03:17Z", + "author": "sronilsson" + }, + { + "body": "yes I moved all the SLEAP data there, when I first downloaded SIMBA with pip it will identify the files, only the H5, but now is not finding any of them.", + "created_at": "2024-02-29T21:06:27Z", + "author": "Maroca5" + }, + { + "body": "What is the file extensions of the files in the `C:/Users/Venere/Desktop/MP4 videos new folder/Behavior` directory? Can you show me a screengrab from that folder?\r\n", + "created_at": "2024-02-29T21:09:49Z", + "author": "sronilsson" + }, + { + "body": "![Screenshot (7)](https://github.com/sgoldenlab/simba/assets/158096575/540143a0-9c4f-4c48-a800-1b4e24fb138f)\r\n", + "created_at": "2024-02-29T21:21:02Z", + "author": "Maroca5" + }, + { + "body": "Thanks @Maroca5 - that folder does not contain any tracking data, there are no files with h5, csv or slp extensions - I see 5 video MP4 files. That could be what SimBA is complaining about. If they are located in a sub-folder to `C:/Users/Venere/Desktop/MP4 videos new folder/Behavior`, e,g, `:/Users/Venere/Desktop/MP4 videos new folder/Behavior/Behavior`, then try pointing SimBA to that directory. ", + "created_at": "2024-02-29T21:24:05Z", + "author": "sronilsson" + }, + { + "body": "I did and is still did work", + "created_at": "2024-02-29T21:25:58Z", + "author": "Maroca5" + }, + { + "body": "And what are located in `C:/Users/Venere/Desktop/MP4 videos new folder/Behavior/Behavior` ? ", + "created_at": "2024-02-29T21:26:44Z", + "author": "sronilsson" + }, + { + "body": "so I have\r\n![Screenshot (8)](https://github.com/sgoldenlab/simba/assets/158096575/47d25fe0-7be0-4d00-abfb-669d99366963)\r\n all the H5 files here ", + "created_at": "2024-02-29T21:28:48Z", + "author": "Maroca5" + }, + { + "body": "and I use this directory \r\n![Screenshot (9)](https://github.com/sgoldenlab/simba/assets/158096575/4e7fec3a-ae7b-4b61-b7f5-903cedb1c4af)\r\n", + "created_at": "2024-02-29T21:29:32Z", + "author": "Maroca5" + }, + { + "body": "And just to confirm, what error do you see then?", + "created_at": "2024-02-29T21:31:18Z", + "author": "sronilsson" + }, + { + "body": "Note: I would avoid having the best_model.h5 in that folder, because that is not a data file, and you don't want to try to import that as a data file. ", + "created_at": "2024-02-29T21:32:06Z", + "author": "sronilsson" + }, + { + "body": "Another note: .. the data files begin with `labels` prefix and are only 12kb large - just to check, its not your hand annotations in sleap is it? Or do the files contain machine generated body-part predictions for all frames in the respective videos?", + "created_at": "2024-02-29T21:35:44Z", + "author": "sronilsson" + }, + { + "body": "so it started to run when I took out the best-model but then it gave me this error\r\n![Screenshot (10)](https://github.com/sgoldenlab/simba/assets/158096575/0a69f352-4f01-4041-a5f4-bdd15aa5acdc)\r\n", + "created_at": "2024-02-29T21:54:15Z", + "author": "Maroca5" + }, + { + "body": "SimBA needs a way to pair your SLEAP video files, to your SLEAP data files. SimBA does this by looking at the filenames, `Video1.mp4` is paired with `Video1.h5` etc. In your case, your sleap h5 files is e.g., named `somethingveryverylong.h5`. Then SimBA searches for `somethingveryverylong.mp4` but can't find it, thats why you see this error:\r\n\r\n\"image\"\r\n\r\nTry renaming your sleap data to align with your video data file names. \r\n\r\n\r\n", + "created_at": "2024-02-29T21:59:36Z", + "author": "sronilsson" + }, + { + "body": "It worked, but now the body parts don't match I'll fix that and continue, thank you so much for your help.", + "created_at": "2024-02-29T22:11:57Z", + "author": "Maroca5" + }, + { + "body": "one last question, is there any way to do the analysis throughout simba, without having to use SLEAP or other software data? just doin everything with simba?", + "created_at": "2024-02-29T22:23:51Z", + "author": "Maroca5" + }, + { + "body": "@Maroca5 you'd have to import it from another tool like SLEAP, or software that does pose estimation. I agree it would be good to have a basic pose estimation pipeline within simba but no plans for that at the moment ", + "created_at": "2024-02-29T22:42:59Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Error in model evaluation", + "body": "**Describe the bug**\r\nWhen trying to create evaluation video, the main console gives back an error saying there is a mismatch in the number of features in input file.\r\n\r\n**To Reproduce**\r\nAfter succesfully label the classifier. \r\nin \"run machine model window\". \r\n- select the video file and classifier model file\r\n- run model\r\n- create interactive probability plot\r\n- select the best probability for the given classifier and the minimum bout ms \r\n- click create validation video\r\n\r\n\"Captura\r\n\r\n\r\n**Expected behavior**\r\n Once complete, you should see a video file representing the analyzed file inside the project_folder/frames/output/validation directory\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: windows 11\r\n - Python Version 3.6.8\r\n - Are you using anaconda. Yes\r\n - Simba version: 1.86.4\r\n \r\n\r\n**Additional context**\r\nIs my first time creating a new model, so maybe I'm doing something wrong, but I followed the tutorial on this github page. \r\nAnyway, I apologize in advance if it was my mistake and not the software's. \r\nthanks in advance for your help\r\n", + "user": "Monica9577", + "reaction_cnt": 0, + "created_at": "2024-02-24T18:25:15Z", + "updated_at": "2024-02-28T01:17:12Z", + "author": "Monica9577", + "comments": [ + { + "body": "Hi @Monica9577 ! SimBA takes all your files inside the `project_folder/csv/targets_inserted` directory and build a model from these files. In each one of these files, if you remove the body-part data columns (in the beginning) and your annotations (in the end of the file), you are left with **221** columns of *features*. \r\n\r\nNext, you want to use this model .sav file, that is built using **221** columns of features, on new data inside your `project_folder/csv/features_extracted` directory. \r\nSimBA goes ahead and opens the first file inside the `project_folder/csv/features_extracted` directory, and tries to analyze it. However, it finds 245 columns. It doesn’t know what to do, as the model was trained with 221 columns, and it now sees 245 columns - what should it do with all these extra columns? So it give you the error. \r\n\r\nOne possible way that could cause a mismatch in the number of columns is, for example, you added ROI features to your new data inside the `project_folder/csv/features_extracted` directory, but you did **not** add it to the files you used to train the model with - could this be possible?", + "created_at": "2024-02-24T20:55:53Z", + "author": "sronilsson" + }, + { + "body": "No, I haven't add any ROIs to any of the files... \r\nBut I'm going to check if there's any difference between the features csv of the video I used to train the model and the one I'm using to evaluate it \r\nThanks!", + "created_at": "2024-02-26T10:27:37Z", + "author": "Monica9577" + }, + { + "body": "Thanks!\r\n\r\n\r\nOne more potential reason for this error I have seen before: Within a single SimBA project, be sure you are working with the same Animal names and the same body-part names in all files.\r\n\r\nThe file `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` stores the names of your body-parts in your SimBA project. Before training models, SimBA drops the data for your body-parts - we don't want to use the locations of the body-parts in any model. However, if the body-parts names or animal names for any reason changes across data files, then the appropriate columns will not be recognized at body-parts and SimBA will fail to drop them in some files, causing the mismatch in the column numbers errors as you see. ", + "created_at": "2024-02-26T12:47:12Z", + "author": "sronilsson" + }, + { + "body": "Hi \r\nYes I'm using the same body parts and animal names in every video, \r\nbut is still giving problems\r\n", + "created_at": "2024-02-28T00:14:16Z", + "author": "Monica9577" + }, + { + "body": "@Monica9577 - if you look at the a file inside the `project_folder/csv/targets_inserted` directory, and compare it against a file inside the `project_folder/csv/features_extracted` directory, what differences to you see in column names and the number of columns? You could also zip up and share a file from each directory here with me or through gdrive link and I can look?", + "created_at": "2024-02-28T01:17:11Z", + "author": "sronilsson" + } + ] + }, + { + "title": "ROI analysis - not possible ROI definition", + "body": "Hi there!\r\n**ROI definition not possible*\r\nAfter the outlier correction, when trying to proceed to the ROI analysis, it is impossible to define the shape and number of ROIs. \r\nWhen I try to draw the ROIs for an individual video, after selecting the shape, I get stuck in the shape definition where I get the following:\r\n\r\n![Screenshot at 2024-02-24 14-50-27](https://github.com/sgoldenlab/simba/assets/97169827/a29667fe-8830-4450-ae09-31dc4fea1167)\r\n\r\nThere is no traceback error on the prompt or any sign that anything is going on. \r\nIf I try to proceed to the drawing(after the frozen \"Define shape\" window), then I get the following Error: \r\n\r\n![Screenshot at 2024-02-24 15-00-04](https://github.com/sgoldenlab/simba/assets/97169827/23ca5da0-10bf-406b-9fcb-0a44edd14e0d)\r\n\r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'ROI\" tab on your project\r\n2. Click on 'Define ROIs'\r\n3. Select Draw for one video\r\n4. Select shape type of ROI\r\n\r\n****If I try to close the window of \"define shape\", I get a message that it is not responding.\r\n\r\nI am using the last Simba update.\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Linux\r\n - Python Version: 3.6\r\n\r\nThanks a lot!\r\n\r\n", + "user": "DanaeNikol", + "reaction_cnt": 0, + "created_at": "2024-02-24T14:04:54Z", + "updated_at": "2024-03-01T14:43:35Z", + "author": "DanaeNikol", + "comments": [ + { + "body": "Hi @DanaeNikol! Thanks for reporting. Just before I start to dig: \r\n\r\nWhich version of SimBA do you see if you run `pip show simba-uw-tf-dev` ? \r\n\r\nI ask because I had some issues related to threading introduced a week-ish ago and I thought I'd fixed recently, I want to make sure its not the same issue. ", + "created_at": "2024-02-24T20:38:34Z", + "author": "sronilsson" + }, + { + "body": "Couple more question - as I can see the `Define shape` window is bank. \r\n\r\nWhen you first open the window by clicking on DRAW, do you see any warnings or errors in the terminal complaining about not being able to open the video file? \r\n\r\nDo you get the same error for all the video files, or is it specific to this video? \r\n\r\n\"image\"\r\n\r\nAre the project located on a server with any read or write permissions that could block you opening the video and reading frames, or is it locally in the e.g., /home/ dir somewhere? \r\n", + "created_at": "2024-02-24T21:01:42Z", + "author": "sronilsson" + }, + { + "body": "It seems like I am using version 1.82.7. \r\n\r\nThe same situation happens on another couple of videos I tried to also draw ROIs for. The project is located on a server but I would guess this should not be a problem, since the permissions are all set to read and write access for everyone. \r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/97169827/a5877619-54df-450c-ae23-735928cee119)\r\n\r\n", + "created_at": "2024-02-26T10:18:45Z", + "author": "DanaeNikol" + }, + { + "body": "I just found out that I needed to put a name on the shape name for the frame to appear. Then it actually allows me to draw. Thanks a lot!", + "created_at": "2024-02-26T10:30:10Z", + "author": "DanaeNikol" + }, + { + "body": "Thanks for letting me know @DanaeNikol - I just found myself a linux computer and was about to see if I could recreate this problem, but will leave it for now as it seems to be running? But I have it now, if another issue pops up I can attempt to recreate.", + "created_at": "2024-02-26T12:25:14Z", + "author": "sronilsson" + }, + { + "body": "Thank you @sronilsson! \r\nA very quick question: in ROI definition is there a way to move all ROIs together at once? I tried different key combinations and went through the documentation, but found nothing. And it would be a very helpful tool :)", + "created_at": "2024-02-29T18:33:46Z", + "author": "DanaeNikol" + }, + { + "body": "It's a good point, I have not written a function like that.. just to confirm how it would work, when you move all ROIs, you would click once on the image - it would then lock to the current centroid of all ROIs. Then you would move your mouse and click again. That would cause all ROI centroids to move the delta(x) and delta(y) which is the difference between the first mouse click and the second mouse click? (and the other outer boundaries would of course be corrected but remain the same relative to the new centroid location).\r\n\r\n\r\n... I can see the possibility of moving some ROIs by mistake outside the image lol so have to have some control for that", + "created_at": "2024-02-29T18:45:03Z", + "author": "sronilsson" + }, + { + "body": "I would guess this could be a good idea, yes.\r\nCould it potentially also be possible to select the ROIs one wants and not all of them potentially? But then I guess it would be a bit more complicated with the centroid and all? ", + "created_at": "2024-03-01T07:55:05Z", + "author": "DanaeNikol" + }, + { + "body": "Thanks @DanaeNikol - that's even better I think.\r\n\r\nJust trying to get my head around anticipated behavior. Say you have selected several ROIs by holding down the shift key. What happens next? You click on a final location **without** holding down the shift key, and then do the first selected ROI end up in the clicked location and all the other clicked ROIs move but keep their relative position to the first ROI location?\r\n\r\n", + "created_at": "2024-03-01T14:43:34Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Outlier Correction - Location", + "body": "**Hi there! **\r\nWhen I run the outlier correction, the correction for the movement works fine and is completed but once the first video starts being processed for the location outlier correction, it does not go through and it seems as if Simba(GUI) has crashed. From the final output, there is only one csv file for the movement correction.\r\n\r\nWhat could be wrong?\r\n\r\nThank you!\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Linux.\r\n ", + "user": "DanaeNikol", + "reaction_cnt": 0, + "created_at": "2024-02-22T13:24:49Z", + "updated_at": "2024-02-26T09:58:57Z", + "author": "DanaeNikol", + "comments": [ + { + "body": "Hey @DanaeNikol ! Did you see any traceback error messages in the linux terminal after SimBA crashed? Would be helpful for me to try to replicate the issue and see where it went wrong. ", + "created_at": "2024-02-22T13:47:31Z", + "author": "sronilsson" + }, + { + "body": "Thanks for your help! I noticed that in the end it was processing everything but on a super slow pace (only visible after an hour that sth was actually happening). Thus there were no traceback error messages.\r\n\r\nHowever, for some videos it would say processing.. but right after 'skipping outlier correction' for another video. \r\nWould this be normal?", + "created_at": "2024-02-23T15:51:25Z", + "author": "DanaeNikol" + }, + { + "body": "Hey @DanaeNikol ! Not entirely sure what's going on but I can make some guesses... :)\r\n\r\nIf outlier correction is super slow: is it possible that the outlier criteria is very stringent, and the data is large? Say that using your criteria means that most or all body-part movements are classified as outliers. That would mean all body-parts have to be corrected, which sould take significant time if the data is big? You can try increasing the criteria to see if that fixes it. \r\n\r\nFor the ``... right after 'skipping outlier correction' for another video``, could it be that you clicked to run outlier correction, and as it was so slow, you clicked `Skip outlier correction` while the outlier correction was still running? This would mean that the two processes (running outlier correction, and skipping outlier correction) would run simultaneously on two threads on your computer and you would get a messages from both these processes? \r\n\r\n", + "created_at": "2024-02-23T16:15:16Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson! \r\n\r\nI think both your guesses are correct. \r\nRegarding the outlier correction, as criteria i used what was recommended, meaning 1.5 for the movement and 1.7 for the location.\r\nIt is indeed true that (I think) all of my body part movements seem to have been corrected for a very high number of frames (each video should have in total approx 8900 frames). Here is a brief screenshot:\r\n\r\n![Screenshot at 2024-02-24 15-23-55](https://github.com/sgoldenlab/simba/assets/97169827/603e8d4f-8404-4ae3-a6ef-e0fbace1eccb)\r\n \r\nIf I get such results, then I guess it would make a lot of sense to either change my criterion or the body parts I am using to calculate the reference value?\r\n\r\nThe quality of the detections, at least as judged by the dlc created videos seemed very good to get such intense outlier correction. As bodyparts for the outlier correction I used the defaults provided by Simba as suggested, but these were the two ears, thus the gross correction could be partially explained by that?\r\n\r\nI am sorry for the naivity of my questions, I am very new in this field. \r\n\r\nYour second guess is very correct, I reran everything and it all seemed normal! \r\n\r\nThanks a lot!!", + "created_at": "2024-02-24T14:30:36Z", + "author": "DanaeNikol" + }, + { + "body": "Hi @DanaeNikol !\r\n\r\nYes, the outlier correction code looks at the aggregate mean or median distances and movement between body-parts in the video, and takes that aggregate value and multiplies it by your criterion value (e.g., 1.5) to produce a **threshold** value. It might be, that that threshold value is too low in your case. That threshold value, from the documentation, worked well for me in my tutorial example, but it will **not** be universally true and is dictated by your animal body-parts and quality of tracking. The model I used for the tutorial was trained with I think 3-4k annotated images, so it was very exact and maybe, looking back, not the best example to use in a tutorial... 😬 ", + "created_at": "2024-02-24T20:48:20Z", + "author": "sronilsson" + }, + { + "body": "Thank you @sronilsson!", + "created_at": "2024-02-26T09:58:54Z", + "author": "DanaeNikol" + } + ] + }, + { + "title": "Selective ROI request", + "body": "\r\n\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nI am interested in analysing the head direction toward two specific ROI but for the rest of them I just need the number of entrys, time spent, velocity... this kind of parameters. \r\n\r\nas far as I know in the current simba version, when creating the visualize roi features (and select to show directionality) it creates a video where it shows the direction to ALL ROIs specified in the video. (Please find attached an example). \r\n\r\n\r\nhttps://github.com/sgoldenlab/simba/assets/160180401/d459c7ae-7b15-4086-ac7f-5b1726e04bed\r\n\r\n\r\nthis other video represents the ideal directionality but we have to create the rest of the ROIs separete it \r\n\r\n\r\nhttps://github.com/sgoldenlab/simba/assets/160180401/164b035e-8970-473b-b208-3525109df712\r\n\r\n\r\n\r\n**Describe the solution you'd like**\r\nIt would be wonderfull if in this visualize roi features you can choose the body part you are interested in AND a selection of ROIs you want the directionality to be plotted. \r\n\"Captura\r\n\r\nThankyou very much for your excellent work and effort\r\n\r\n\r\n\r\n\r\n\r\n", + "user": "Monica9577", + "reaction_cnt": 0, + "created_at": "2024-02-22T00:38:03Z", + "updated_at": "2024-02-24T20:36:13Z", + "author": "Monica9577", + "comments": [ + { + "body": "Hey @Monica9577 ! Yes I think I understand - you’re looking for an option to suppress directionaly visualization to some ROIs?\r\n\r\nThe most obvious solution would be as you say, providing checkboxes for each ROI, and then you check the boxes for the ROIs you want to show directionality for in the GUI menu. BUT, some users have **many** ROIs (e.g., say 10-20), and the menu would become very big, or it would be a pop-up inside the pop-up, but I can see the menu becoming confusing for some users.. \r\n\r\nAnother alternative would be that we provide a jupyter notebook example, where you can specify the select ROI names similar to these [HERE](https://simba-uw-tf-dev.readthedocs.io/en/latest/notebooks.html#visualizations) so you can create the videos with the select ROIs without using the GUI, do you think that could work? \r\n\r\nEDIT:\r\n\r\nE.g., [THIS](https://simba-uw-tf-dev.readthedocs.io/en/latest/nb/roi_feature_visualizer.html) notebook gives you ROI features without using GUI, we could write a similar one that also includes the ROIs that should be considered for directionality?", + "created_at": "2024-02-22T01:11:08Z", + "author": "sronilsson" + }, + { + "body": "Yes ! I'm looking for an option to suppress directionaly visualization to some ROIs \r\n\r\nI think maybe this edited version you're offering could work out \r\n\r\nThanks for the quick response", + "created_at": "2024-02-22T01:46:42Z", + "author": "Monica9577" + }, + { + "body": "@Monica9577 do you think [THIS](https://simba-uw-tf-dev.readthedocs.io/en/latest/nb/roi_feature_visualizer.html#EXAMPLE-2) could work? You'd need to copy that code over to a python script or notebook file in your SimBA environment and run it though.\r\n\r\nAlso, make sure you have simba version 1.86.3 or above installed for it to recognize the ``Directionality_roi_subset`` argument.", + "created_at": "2024-02-22T14:57:07Z", + "author": "sronilsson" + }, + { + "body": "hi \r\nSorry, I'm not familiar with the notebooks usage \r\nWhat do you mean with copy it in my simBa environment ?", + "created_at": "2024-02-24T00:12:42Z", + "author": "Monica9577" + }, + { + "body": "Hi @Monica9577 - alright, no problem we are all new to this!\r\n\r\nLet me try to suggest some different. \r\n\r\n1). Download the attached ZIP file and un-zip it. \r\n\r\n2). Open the `.py` file in a text editor. E.g., Notepad or Textedit. and make some changes:\r\n\r\nChange this line to be the actual ROI names you want to visualize, rather than ``My_first_polygon`` etc. \r\n```\r\nroi_subset = ['My_first_polygon', 'My_second_poygon']\r\n```\r\n\r\n Change this line to your actual project_config path: e.g.,` CONFIG_PATH = c:\\my_simba_project\\project_config.ini`\r\n```\r\nCONFIG_PATH = '/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini',\r\n```\r\n\r\nChange this to the actual name of the video you want to visualize, e.g., VIDEO_NAME = 'Monica9577.mp4`\r\n```\r\nVIDEO_NAME = 'Together_1.avi'\r\n```\r\n\r\n3. Save the file and navigate to the directory of the file inside the Windows terminal. Once in the terminal, type:\r\n```\r\npython roi_feature_visualizer_example.py\r\n``` \r\n\r\n4. Hit enter, voila, your video should be created fingers crossed :)\r\n\r\n\r\n\r\nDOWNLOAD THIS FILE:\r\n\r\n[roi_feature_visualizer_example.py.zip](https://github.com/sgoldenlab/simba/files/14394387/roi_feature_visualizer_example.py.zip)\r\n\r\n\r\n\r\n", + "created_at": "2024-02-24T20:36:12Z", + "author": "sronilsson" + } + ] + }, + { + "title": "error when in creating animal direction to body part videos", + "body": "**Describe the bug**\r\nwhen trying to create the video an error appears the next error \r\n\r\nSIMBA INVALID FILE TYPE ERROR: C:\\Users\\monip\\Desktop\\TRIAL DEFINITIVO\\project_folder\\logs\\body_part_directionality_dataframes\\RI male M47-V_LEFT.csv is not a valid CSV file\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Load project \r\n2. Set video parameters\r\n3. outlier correction\r\n4. extract features\r\n5. In ROI windows - analyze distances; analyze directionality between animals, analyze directionality between body parts; Aggregate boolean conditional statics\r\n6. In ROI windows - Visualize directionality between body parts; choose body parts and run\r\n\r\n**Expected behavior**\r\nA video with lines connecting the 2 selected body parts of the 2 animals\r\n\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: windows 11\r\n - Python Version 3.6.8\r\n - Are you using anaconda: yes\r\n \r\n\r\n**Additional context**\r\nI already have updated the simba software to the last version (1.86.1)\r\n", + "user": "Monica9577", + "reaction_cnt": 0, + "created_at": "2024-02-21T16:48:33Z", + "updated_at": "2024-02-24T00:45:48Z", + "author": "Monica9577", + "comments": [ + { + "body": "Hey @Monica9577! Do you have a full traceback screengrab of the error message? ", + "created_at": "2024-02-21T17:20:37Z", + "author": "sronilsson" + }, + { + "body": "Yes, I'm pasting you the code\r\n\r\n(NEW_SIMBA) C:\\Users\\monip>simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\utils\\read_write.py\", line 131, in read_df\r\n df = df.astype(np.float32)\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\generic.py\", line 5882, in astype\r\n dtype=dtype, copy=copy, errors=errors, **kwargs\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\internals\\managers.py\", line 581, in astype\r\n return self.apply(\"astype\", dtype=dtype, **kwargs)\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\internals\\managers.py\", line 438, in apply\r\n applied = getattr(b, f)(**kwargs)\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 559, in astype\r\n return self._astype(dtype, copy=copy, errors=errors, values=values, **kwargs)\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 643, in _astype\r\n values = astype_nansafe(vals1d, dtype, copy=True, **kwargs)\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\pandas\\core\\dtypes\\cast.py\", line 729, in astype_nansafe\r\n return arr.astype(dtype, copy=True)\r\nValueError: could not convert string to float: 'RI female M22-V_RIGHT'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\ui\\pop_ups\\directing_animal_to_bodypart_plot_pop_up.py\", line 99, in \r\n command=lambda: self.__create_directionality_plots(multiple_videos=False),\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\ui\\pop_ups\\directing_animal_to_bodypart_plot_pop_up.py\", line 162, in __create_directionality_plots\r\n style_attr=style_attr,\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\plotting\\directing_animals_to_bodypart_visualizer.py\", line 62, in __init__\r\n self.data_dict = self.direction_analyzer.read_directionality_dfs()\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\data_processors\\directing_animal_to_bodypart.py\", line 169, in read_directionality_dfs\r\n results[file_name] = read_df(file_path, self.file_type)\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\utils\\read_write.py\", line 136, in read_df\r\n msg=f\"{file_path} is not a valid CSV file\", source=read_df.__name__\r\nsimba.utils.errors.InvalidFileTypeError: SIMBA INVALID FILE TYPE ERROR: C:\\Users\\monip\\Desktop\\TRIAL DEFINITIVO\\project_folder\\logs\\body_part_directionality_dataframes\\RI female M22-V_RIGHT.csv is not a valid CSV file", + "created_at": "2024-02-21T17:42:56Z", + "author": "Monica9577" + }, + { + "body": "hello again \r\n\r\nI have updated to the last version and it seems that it doesn't show this error anymore, eventhough in the result video I can't see the expected funnels. But I truly believe it's a problem in the annotations, I'll try to corrected and i'll tell you if the error it's solved ", + "created_at": "2024-02-24T00:45:47Z", + "author": "Monica9577" + } + ] + }, + { + "title": "Impossible to create 2 animal trackplots", + "body": "**Describe the bug**\r\nWhen trying to create 2 animal trackplots on the visualization window it doesn't create it \r\n\r\n**To Reproduce**\r\nin the visualitzation window, click on visualize paths. Select 2 animals, and select the body parts to track\r\n\r\n**Expected behavior**\r\nI expected the program to create a video with the 2 animal trackplots\r\n\r\n**Screenshots**\r\n\"Captura\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: windows 10\r\n - Python Version 3.6.8\r\n - Are you using anaconda yes\r\n \r\n\r\n**Additional context**\r\nI already have updated simba to the 1.85.9 version\r\n", + "user": "Monica9577", + "reaction_cnt": 0, + "created_at": "2024-02-21T13:10:33Z", + "updated_at": "2024-02-24T00:59:45Z", + "author": "Monica9577", + "comments": [ + { + "body": "Thanks again @Monica9577! Can you let me know hoe it looks like in version 1.86.1? ", + "created_at": "2024-02-21T13:58:25Z", + "author": "sronilsson" + }, + { + "body": "hello \r\nNow when I try to create the video, it seems like its working but then the program suddenly crushes and close itself... T-T", + "created_at": "2024-02-24T00:59:44Z", + "author": "Monica9577" + } + ] + }, + { + "title": "Impossible to create ROI analysis", + "body": "**Describe the bug**\r\nWhen trying to create ROI analysis, it doesn't create it \r\n- Error:\r\n![2024-02-21](https://github.com/sgoldenlab/simba/assets/160180401/33f059e2-ac1f-44ca-a187-55de1dd6c6d4)\r\n\r\n**To Reproduce**\r\nAfter loading the project\r\nimport tracking data and videos\r\nadd the video parameters\r\ndraw ROIs (and save)\r\nIn analyse ROI data: click on analyse ROI data: aggregates\r\n\r\n**Expected behavior**\r\nA statement will be printed in the main SimBA terminal window noting that the process is complete. The ROI descriptive statistics can be found in the Project_folder/logs directory in two time-stamped files.\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: windows 10 (IntelCore I7; nividia gforce gpu)\r\n - Python Version 3.6.8\r\n - Are you using anaconda: Yes\r\n \r\n\r\n**Additional context**\r\nI already have the last version of Simba (1.85.5)\r\n", + "user": "Monica9577", + "reaction_cnt": 0, + "created_at": "2024-02-21T01:16:26Z", + "updated_at": "2024-02-21T13:58:46Z", + "author": "Monica9577", + "comments": [ + { + "body": "Thanks @Monica9577 - you caught a bug, that was likely introduced last week, I appreciate it! How does it look if you update SimBA with `pip install simba-uw-tf-dev --upgrade` version 1.85.6?", + "created_at": "2024-02-21T02:31:57Z", + "author": "sronilsson" + }, + { + "body": "Yes, now is working \r\n\r\nBut when I create a ROI video Features, it does all the process but the video is not in the folder it says its supposed to be", + "created_at": "2024-02-21T03:04:07Z", + "author": "Monica9577" + }, + { + "body": "Thanks Monica - let me check on that too. FYI there was a bunch of updates last week that leaked in without being properly tested.. sorry about this. ", + "created_at": "2024-02-21T03:13:51Z", + "author": "sronilsson" + }, + { + "body": "@Monica9577 - when you get a chance, could you test in version 1.85.7? I've reverted to the old files and confirmed it works on my end but please let me know how it runs for you.", + "created_at": "2024-02-21T03:50:07Z", + "author": "sronilsson" + }, + { + "body": "Now it works perfectly !!!\r\n\r\nThank you very much for your invaluable help, you and your team are doing a wonderful job.", + "created_at": "2024-02-21T04:30:57Z", + "author": "Monica9577" + }, + { + "body": "Cheers @Monica9577 ! I think we have smoothened over must bugs from that kerfuffle, but please let me know if you bump into more issues so we can fix those too, I don't have enough tests implemented yet, so your reports are very valuable", + "created_at": "2024-02-21T04:39:50Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Creation configuration project problem", + "body": "**Describe the bug**\r\nWhen creating a new project, when I click on create a new configuration project, nothing happens in the main console of simba. And the folder where is suposed to be the new project lacks the configuration file.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Open Anaconda\r\n2. Open simba\r\n3. go to file\r\n4. create a new project\r\n5. select the project directory and project name\r\n6. add number of classifiers \r\n7. instert names of classifiers\r\n8. Select tracking method and body points\r\n9. create config file\r\n\r\nCode error:\r\n(NEW_SIMBA) C:\\Users\\monip>simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\ui\\create_project_ui.py\", line 216, in \r\n command=lambda: self.run(),\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\ui\\create_project_ui.py\", line 348, in run\r\n file_type=self.file_type_dropdown.getChoices(),\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\utils\\config_creator.py\", line 59, in __init__\r\n self.__create_configparser_config()\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\site-packages\\simba\\utils\\config_creator.py\", line 195, in __create_configparser_config\r\n ] = Dtypes.NONE.value\r\n File \"C:\\Users\\monip\\anaconda3\\envs\\NEW_SIMBA\\lib\\enum.py\", line 326, in __getattr__\r\n raise AttributeError(name) from None\r\nAttributeError: CLASSIFIER\r\n\r\n**Expected behavior**\r\nThe main console should show a succesfull project creation. And the configuration file should appear inside the \"project_folder\"\r\n\r\n\"Captura\r\n\r\n\"image\"\r\n\r\n\"image\"\r\n\r\n\r\n**Desktop **\r\n - OS: Windows 11\r\n - Python Version 3.6.13\r\n - Are you using anaconda? Yes \r\n \r\n\r\n", + "user": "Monica9577", + "reaction_cnt": 1, + "created_at": "2024-02-15T21:51:44Z", + "updated_at": "2024-02-21T13:59:02Z", + "author": "Monica9577", + "comments": [ + { + "body": "Got it, thanks for letting me know @Monica9577 ! \r\n\r\nIf you update simba to the latest version (using `pip install simba-uw-tf-dev --upgrade`) - does that fix the issue?\r\n\r\n", + "created_at": "2024-02-15T22:00:36Z", + "author": "sronilsson" + }, + { + "body": "No, unfortunatelly it doesn't fix it", + "created_at": "2024-02-15T22:07:10Z", + "author": "Monica9577" + }, + { + "body": "just want to add I'm having the exact same issue.\r\nI'm on `simba-uw-tf-dev 1.85.2` and ubuntu 22.04.", + "created_at": "2024-02-15T22:11:39Z", + "author": "phildong" + }, + { + "body": "Yes, I'm also on simba-uw-tf-dev 1.85.2 , with anaconda (also updated)", + "created_at": "2024-02-15T22:45:17Z", + "author": "Monica9577" + }, + { + "body": "Thanks @Monica9577 and @phildong - I appreciate it. It's a bug you caught. I was able to recreate it and I inserted the fix in version `1.85.3` - if `pip install simba-uw-tf-dev --upgrade` didn't get you that version - can tou try `pip install simba-uw-tf-dev==1.85.3` and let me know how it looks on your end?", + "created_at": "2024-02-15T23:08:20Z", + "author": "sronilsson" + }, + { + "body": "It's working now \r\nThank you !\r\n\r\nI used pip install simba-uw-tf-dev --upgrade and it installed the 1.85.3 version ", + "created_at": "2024-02-16T08:25:47Z", + "author": "Monica9577" + }, + { + "body": "Thanks @Monica9577 👍🏻 - please let me know if anything else comes up. @phildong - please let me.know if you see anything different ", + "created_at": "2024-02-16T08:57:53Z", + "author": "sronilsson" + }, + { + "body": "same here. I re-created my environment and I can confirm this bug is gone in `simba-uw-tf-dev 1.85.5`. Thank you @sronilsson !", + "created_at": "2024-02-19T16:40:23Z", + "author": "phildong" + }, + { + "body": "thanks for letting me know @phildong !", + "created_at": "2024-02-19T16:57:39Z", + "author": "sronilsson" + } + ] + }, + { + "title": "MetaKeys ImportError", + "body": "I am trying to follow this notebook to use SimBA as python file: https://simba-uw-tf-dev.readthedocs.io/en/latest/nb/CLI%20Example%201.html\r\n\r\nI haven't changed too much, apart from paths and the video settings. The problem comes from this line `InferenceBatch(config_path=CONFIG_PATH).run()`. Which causes this error: `from simba.utils.enums import (ConfigKey, Defaults, Dtypes, MetaKeys, Methods, ImportError: cannot import name 'MetaKeys' from 'simba.utils.enums' (/simba-master/simba/utils/enums.py)\r\n`\r\nI see that the MetaKeys are not defined in the enums.py. What are the MetaKeys and how do I define them?\r\n\r\nMany thanks\r\n\r\n\r\n", + "user": "rjd55", + "reaction_cnt": 0, + "created_at": "2024-02-06T21:01:43Z", + "updated_at": "2024-02-08T13:44:44Z", + "author": "rjd55", + "comments": [ + { + "body": "Thanks @rjd55 - yes I think I know this one. There where a bunch of enums that where duplicated, so we recently removed some and placed them under a single enum class. \r\n\r\nDid you install simba by cloning the repo? I see that these updates are present in the pip package, but not the repo for some reason but I will fix that. \r\n\r\n", + "created_at": "2024-02-06T21:17:00Z", + "author": "sronilsson" + }, + { + "body": "@rjd55 - I've update the repo now. If you run the latest version of simba (installed either through repo or pip) how does it look?", + "created_at": "2024-02-06T21:41:57Z", + "author": "sronilsson" + }, + { + "body": "Hi, I haven't checked through pip. But works with the cloned repo. Thank you!\r\n", + "created_at": "2024-02-08T13:44:44Z", + "author": "rjd55" + } + ] + }, + { + "title": "Analyze Machine Predictions: Aggregate for Three Animals", + "body": "Hello! I started using SimBA recently, and I'm using a model with 3 animals that I imported from SLEAP. During the analysis portion of my model, I was hoping to get the number of behavior bouts for different animal pairs, but I noticed it just aggregates the behavioral counts altogether. I can see how this would work fine with 2 animals only, but I'm specifically interested in looking at how many bouts of behavior each animal has so I can compare behavioral interactions between Animal 1 and Animal 2 versus how Animal 2 and Animal 3 interact, for example. \r\n\r\nIs there any way I can get this information in the analysis portion of the program, or is this feature unavailable at this time? Thank you for your help!", + "user": "olivialaw", + "reaction_cnt": 0, + "created_at": "2024-02-06T01:11:37Z", + "updated_at": "2024-02-06T15:34:30Z", + "author": "olivialaw", + "comments": [ + { + "body": "Hi @olivialaw - Good question. A single classifier will only provide one boolean true vs false output value (BEHAVIOR PRESENT vs BEHAVIOR ABSENT) for each frame. There is currently no way if teasing out which animal or animals performed the action if the action is e.g., \"interact\". So, if you have three animals and you want to score specific interactions, you would need three classifiers. (Animal_1 <-> Animal_2, Animal_2 <-> Animal_3, Animal_1 <-> Animal_3). Note that if you have any type of “directionality” in your behaviors e.g., “following other animal” or “sniffing other animal” the number of required classifiers can balloon and the number of classifiers can become tricky to work with (Animal_1 -> Animal_2, Animal_2 -> Animal_1, Animal_1 -> Animal_3, Animal_3 -> Animal_1, Animal_2 -> Animal_3, Animal_3 -> Animal_2).. \r\n\r\nThree classifiers would be doable. The workflow I’ve come across is to annotate e.g., “interact” in BORIS, with a “ANIMAL” flag, saying which animals are performing the interact action. Then you can concatenate the animal column and the behavior column in the BORIS files into three separate behaviors as discussed in Gitter [HERE](https://matrix.to/#/!afKEsAvrtNfxHHEeIJ:gitter.im/$vMyO_w-ObWfFS2nzh7C55ODNhUkRtPxO2rnmZ8H-4ow?via=gitter.im&via=matrix.org&via=matrix.freyachat.eu). You then append that BORIS data to your data in SimBA to create three different classifiers. ", + "created_at": "2024-02-06T15:34:29Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Bump dash from 1.14.0 to 2.15.0 in /docs", + "body": "Bumps [dash](https://github.com/plotly/dash) from 1.14.0 to 2.15.0.\n
\nRelease notes\n

Sourced from dash's releases.

\n
\n

Dash v2.15.0

\n

Added

\n
    \n
  • #2695 Adds triggered_id to dash_clientside.callback_context. Fixes #2692
  • \n
  • #2723 Improve dcc Slider/RangeSlider tooltips. Fixes #1846\n
      \n
    • Add tooltip.template a string for the format template, {value} will be formatted with the actual value.
    • \n
    • Add tooltip.style a style object to give to the div of the tooltip.
    • \n
    • Add tooltip.transform a reference to a function in the window.dccFunctions namespace.
    • \n
    \n
  • \n
  • #2732 Add special key _dash_error to setProps, allowing component developers to send error without throwing in render. Usage props.setProps({_dash_error: new Error("custom error")})
  • \n
\n

Fixed

\n
    \n
  • #2732 Sanitize html props that are vulnerable to xss vulnerability if user data is inserted. Fix Validate url to prevent XSS attacks #2729
  • \n
\n

Changed

\n
    \n
  • #2652 dcc.Clipboard supports htm_content and triggers a copy to clipboard when n_clicks are changed
  • \n
  • #2721 Remove ansi2html, fixes #2613
  • \n
\n

Dash v2.14.2

\n

Fixed

\n
    \n
  • #2700 Fix _allow_dynamic_callbacks for newly-added components.
  • \n
\n

Dash v2.14.1

\n

Fixed

\n
    \n
  • #2672 Fix get_caller_name in case the source is not available.
  • \n
\n

Changed

\n
    \n
  • #2674 Raise flask & werkzeug limits to <3.1
  • \n
\n

Dash v2.14.0

\n

Fixed

\n
    \n
  • #2634 Fix deprecation warning on pkg_resources, fix #2631
  • \n
\n

Changed

\n
    \n
  • #2635 Get proper app module name, remove need to give __name__ to Dash constructor.
  • \n
\n

Added

\n
    \n
  • #2647 routing_callback_inputs allowing to pass more Input and/or State arguments to the pages routing callback
  • \n
  • #2649 Add _allow_dynamic_callbacks, register new callbacks inside other callbacks.\nWARNING: dynamic callback creation can be dangerous, use at you own risk. It is not intended for use in a production app, multi-user or multiprocess use as it only works for a single user.
  • \n
\n

Dash v2.13.0

\n

Changed

\n
    \n
  • #2610 Load plotly.js bundle/version from plotly.py
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from dash's changelog.

\n
\n

[2.15.0] - 2024-01-31

\n

Added

\n
    \n
  • #2695 Adds triggered_id to dash_clientside.callback_context. Fixes #2692
  • \n
  • #2723 Improve dcc Slider/RangeSlider tooltips. Fixes #1846\n
      \n
    • Add tooltip.template a string for the format template, {value} will be formatted with the actual value.
    • \n
    • Add tooltip.style a style object to give to the div of the tooltip.
    • \n
    • Add tooltip.transform a reference to a function in the window.dccFunctions namespace.
    • \n
    \n
  • \n
  • #2732 Add special key _dash_error to setProps, allowing component developers to send error without throwing in render. Usage props.setProps({_dash_error: new Error("custom error")})
  • \n
\n

Fixed

\n
    \n
  • #2732 Sanitize html props that are vulnerable to xss vulnerability if user data is inserted. Fix Validate url to prevent XSS attacks #2729
  • \n
\n

Changed

\n
    \n
  • #2652 dcc.Clipboard supports htm_content and triggers a copy to clipboard when n_clicks are changed
  • \n
  • #2721 Remove ansi2html, fixes #2613
  • \n
\n

[2.14.2] - 2023-11-27

\n

Fixed

\n
    \n
  • #2700 Fix _allow_dynamic_callbacks for newly-added components.
  • \n
\n

[2.14.1] - 2023-10-26

\n

Fixed

\n
    \n
  • #2672 Fix get_caller_name in case the source is not available.
  • \n
\n

Changed

\n
    \n
  • #2674 Raise flask & werkzeug limits to <3.1
  • \n
\n

[2.14.0] - 2023-10-11

\n

Fixed

\n
    \n
  • #2634 Fix deprecation warning on pkg_resources, fix #2631
  • \n
\n

Changed

\n
    \n
  • #2635 Get proper app module name, remove need to give __name__ to Dash constructor.
  • \n
\n

Added

\n
    \n
  • #2647 routing_callback_inputs allowing to pass more Input and/or State arguments to the pages routing callback
  • \n
  • #2649 Add _allow_dynamic_callbacks, register new callbacks inside other callbacks.\nWARNING: dynamic callback creation can be dangerous, use at you own risk. It is not intended for use in a production app, multi-user or multiprocess use as it only works for a single user.
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=dash&package-manager=pip&previous-version=1.14.0&new-version=2.15.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-02-02T18:14:42Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump dash from 1.14.0 to 2.15.0", + "body": "Bumps [dash](https://github.com/plotly/dash) from 1.14.0 to 2.15.0.\n
\nRelease notes\n

Sourced from dash's releases.

\n
\n

Dash v2.15.0

\n

Added

\n
    \n
  • #2695 Adds triggered_id to dash_clientside.callback_context. Fixes #2692
  • \n
  • #2723 Improve dcc Slider/RangeSlider tooltips. Fixes #1846\n
      \n
    • Add tooltip.template a string for the format template, {value} will be formatted with the actual value.
    • \n
    • Add tooltip.style a style object to give to the div of the tooltip.
    • \n
    • Add tooltip.transform a reference to a function in the window.dccFunctions namespace.
    • \n
    \n
  • \n
  • #2732 Add special key _dash_error to setProps, allowing component developers to send error without throwing in render. Usage props.setProps({_dash_error: new Error("custom error")})
  • \n
\n

Fixed

\n
    \n
  • #2732 Sanitize html props that are vulnerable to xss vulnerability if user data is inserted. Fix Validate url to prevent XSS attacks #2729
  • \n
\n

Changed

\n
    \n
  • #2652 dcc.Clipboard supports htm_content and triggers a copy to clipboard when n_clicks are changed
  • \n
  • #2721 Remove ansi2html, fixes #2613
  • \n
\n

Dash v2.14.2

\n

Fixed

\n
    \n
  • #2700 Fix _allow_dynamic_callbacks for newly-added components.
  • \n
\n

Dash v2.14.1

\n

Fixed

\n
    \n
  • #2672 Fix get_caller_name in case the source is not available.
  • \n
\n

Changed

\n
    \n
  • #2674 Raise flask & werkzeug limits to <3.1
  • \n
\n

Dash v2.14.0

\n

Fixed

\n
    \n
  • #2634 Fix deprecation warning on pkg_resources, fix #2631
  • \n
\n

Changed

\n
    \n
  • #2635 Get proper app module name, remove need to give __name__ to Dash constructor.
  • \n
\n

Added

\n
    \n
  • #2647 routing_callback_inputs allowing to pass more Input and/or State arguments to the pages routing callback
  • \n
  • #2649 Add _allow_dynamic_callbacks, register new callbacks inside other callbacks.\nWARNING: dynamic callback creation can be dangerous, use at you own risk. It is not intended for use in a production app, multi-user or multiprocess use as it only works for a single user.
  • \n
\n

Dash v2.13.0

\n

Changed

\n
    \n
  • #2610 Load plotly.js bundle/version from plotly.py
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from dash's changelog.

\n
\n

[2.15.0] - 2024-01-31

\n

Added

\n
    \n
  • #2695 Adds triggered_id to dash_clientside.callback_context. Fixes #2692
  • \n
  • #2723 Improve dcc Slider/RangeSlider tooltips. Fixes #1846\n
      \n
    • Add tooltip.template a string for the format template, {value} will be formatted with the actual value.
    • \n
    • Add tooltip.style a style object to give to the div of the tooltip.
    • \n
    • Add tooltip.transform a reference to a function in the window.dccFunctions namespace.
    • \n
    \n
  • \n
  • #2732 Add special key _dash_error to setProps, allowing component developers to send error without throwing in render. Usage props.setProps({_dash_error: new Error("custom error")})
  • \n
\n

Fixed

\n
    \n
  • #2732 Sanitize html props that are vulnerable to xss vulnerability if user data is inserted. Fix Validate url to prevent XSS attacks #2729
  • \n
\n

Changed

\n
    \n
  • #2652 dcc.Clipboard supports htm_content and triggers a copy to clipboard when n_clicks are changed
  • \n
  • #2721 Remove ansi2html, fixes #2613
  • \n
\n

[2.14.2] - 2023-11-27

\n

Fixed

\n
    \n
  • #2700 Fix _allow_dynamic_callbacks for newly-added components.
  • \n
\n

[2.14.1] - 2023-10-26

\n

Fixed

\n
    \n
  • #2672 Fix get_caller_name in case the source is not available.
  • \n
\n

Changed

\n
    \n
  • #2674 Raise flask & werkzeug limits to <3.1
  • \n
\n

[2.14.0] - 2023-10-11

\n

Fixed

\n
    \n
  • #2634 Fix deprecation warning on pkg_resources, fix #2631
  • \n
\n

Changed

\n
    \n
  • #2635 Get proper app module name, remove need to give __name__ to Dash constructor.
  • \n
\n

Added

\n
    \n
  • #2647 routing_callback_inputs allowing to pass more Input and/or State arguments to the pages routing callback
  • \n
  • #2649 Add _allow_dynamic_callbacks, register new callbacks inside other callbacks.\nWARNING: dynamic callback creation can be dangerous, use at you own risk. It is not intended for use in a production app, multi-user or multiprocess use as it only works for a single user.
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=dash&package-manager=pip&previous-version=1.14.0&new-version=2.15.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nYou can trigger a rebase of this PR by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
\n\n> **Note**\n> Automatic rebases have been disabled on this pull request as it has been open for over 30 days.\n", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-02-02T18:14:34Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump dash from 1.14.0 to 2.15.0 in /simba", + "body": "Bumps [dash](https://github.com/plotly/dash) from 1.14.0 to 2.15.0.\n
\nRelease notes\n

Sourced from dash's releases.

\n
\n

Dash v2.15.0

\n

Added

\n
    \n
  • #2695 Adds triggered_id to dash_clientside.callback_context. Fixes #2692
  • \n
  • #2723 Improve dcc Slider/RangeSlider tooltips. Fixes #1846\n
      \n
    • Add tooltip.template a string for the format template, {value} will be formatted with the actual value.
    • \n
    • Add tooltip.style a style object to give to the div of the tooltip.
    • \n
    • Add tooltip.transform a reference to a function in the window.dccFunctions namespace.
    • \n
    \n
  • \n
  • #2732 Add special key _dash_error to setProps, allowing component developers to send error without throwing in render. Usage props.setProps({_dash_error: new Error("custom error")})
  • \n
\n

Fixed

\n
    \n
  • #2732 Sanitize html props that are vulnerable to xss vulnerability if user data is inserted. Fix Validate url to prevent XSS attacks #2729
  • \n
\n

Changed

\n
    \n
  • #2652 dcc.Clipboard supports htm_content and triggers a copy to clipboard when n_clicks are changed
  • \n
  • #2721 Remove ansi2html, fixes #2613
  • \n
\n

Dash v2.14.2

\n

Fixed

\n
    \n
  • #2700 Fix _allow_dynamic_callbacks for newly-added components.
  • \n
\n

Dash v2.14.1

\n

Fixed

\n
    \n
  • #2672 Fix get_caller_name in case the source is not available.
  • \n
\n

Changed

\n
    \n
  • #2674 Raise flask & werkzeug limits to <3.1
  • \n
\n

Dash v2.14.0

\n

Fixed

\n
    \n
  • #2634 Fix deprecation warning on pkg_resources, fix #2631
  • \n
\n

Changed

\n
    \n
  • #2635 Get proper app module name, remove need to give __name__ to Dash constructor.
  • \n
\n

Added

\n
    \n
  • #2647 routing_callback_inputs allowing to pass more Input and/or State arguments to the pages routing callback
  • \n
  • #2649 Add _allow_dynamic_callbacks, register new callbacks inside other callbacks.\nWARNING: dynamic callback creation can be dangerous, use at you own risk. It is not intended for use in a production app, multi-user or multiprocess use as it only works for a single user.
  • \n
\n

Dash v2.13.0

\n

Changed

\n
    \n
  • #2610 Load plotly.js bundle/version from plotly.py
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from dash's changelog.

\n
\n

[2.15.0] - 2024-01-31

\n

Added

\n
    \n
  • #2695 Adds triggered_id to dash_clientside.callback_context. Fixes #2692
  • \n
  • #2723 Improve dcc Slider/RangeSlider tooltips. Fixes #1846\n
      \n
    • Add tooltip.template a string for the format template, {value} will be formatted with the actual value.
    • \n
    • Add tooltip.style a style object to give to the div of the tooltip.
    • \n
    • Add tooltip.transform a reference to a function in the window.dccFunctions namespace.
    • \n
    \n
  • \n
  • #2732 Add special key _dash_error to setProps, allowing component developers to send error without throwing in render. Usage props.setProps({_dash_error: new Error("custom error")})
  • \n
\n

Fixed

\n
    \n
  • #2732 Sanitize html props that are vulnerable to xss vulnerability if user data is inserted. Fix Validate url to prevent XSS attacks #2729
  • \n
\n

Changed

\n
    \n
  • #2652 dcc.Clipboard supports htm_content and triggers a copy to clipboard when n_clicks are changed
  • \n
  • #2721 Remove ansi2html, fixes #2613
  • \n
\n

[2.14.2] - 2023-11-27

\n

Fixed

\n
    \n
  • #2700 Fix _allow_dynamic_callbacks for newly-added components.
  • \n
\n

[2.14.1] - 2023-10-26

\n

Fixed

\n
    \n
  • #2672 Fix get_caller_name in case the source is not available.
  • \n
\n

Changed

\n
    \n
  • #2674 Raise flask & werkzeug limits to <3.1
  • \n
\n

[2.14.0] - 2023-10-11

\n

Fixed

\n
    \n
  • #2634 Fix deprecation warning on pkg_resources, fix #2631
  • \n
\n

Changed

\n
    \n
  • #2635 Get proper app module name, remove need to give __name__ to Dash constructor.
  • \n
\n

Added

\n
    \n
  • #2647 routing_callback_inputs allowing to pass more Input and/or State arguments to the pages routing callback
  • \n
  • #2649 Add _allow_dynamic_callbacks, register new callbacks inside other callbacks.\nWARNING: dynamic callback creation can be dangerous, use at you own risk. It is not intended for use in a production app, multi-user or multiprocess use as it only works for a single user.
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=dash&package-manager=pip&previous-version=1.14.0&new-version=2.15.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-02-02T18:14:33Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Can´t pickle_thread.RLocj objects error while Creating Location Heatmaps", + "body": "Hi everyone,\r\nI'm having this error after trying to produce a heatmap from a single animal tracking:\r\n\r\n` Traceback (most recent call last):\r\n File \"/Applications/anaconda3/envs/SIMBA/lib/python3.6/tkinter/__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"/Applications/anaconda3/envs/SIMBA/lib/python3.6/site-packages/simba/ui/pop_ups/heatmap_location_pop_up.py\", line 58, in \r\n self.run_single_video_btn = Button(self.run_single_video_frm, text='Create single video', fg='blue', command=lambda: self.__create_heatmap_plots(multiple_videos=False))\r\n File \"/Applications/anaconda3/envs/SIMBA/lib/python3.6/site-packages/simba/ui/pop_ups/heatmap_location_pop_up.py\", line 130, in __create_heatmap_plots\r\n heatmapper_clf.run()\r\n File \"/Applications/anaconda3/envs/SIMBA/lib/python3.6/site-packages/simba/plotting/heat_mapper_location_mp.py\", line 275, in run\r\n for cnt, result in enumerate(pool.imap(constants, frame_arrays, chunksize=self.multiprocess_chunksize)):\r\n File \"/Applications/anaconda3/envs/SIMBA/lib/python3.6/multiprocessing/pool.py\", line 735, in next\r\n raise value\r\n File \"/Applications/anaconda3/envs/SIMBA/lib/python3.6/multiprocessing/pool.py\", line 424, in _handle_tasks\r\n put(task)\r\n File \"/Applications/anaconda3/envs/SIMBA/lib/python3.6/multiprocessing/connection.py\", line 206, in send\r\n self._send_bytes(_ForkingPickler.dumps(obj))\r\n File \"/Applications/anaconda3/envs/SIMBA/lib/python3.6/multiprocessing/reduction.py\", line 51, in dumps\r\n cls(buf, protocol).dump(obj)\r\nTypeError: can't pickle _thread.RLock objects`\r\n\r\nMy setup: \r\n1) iMac OSX 10.13.6 | python 3.6.15\r\n2) 264 x 352 video (30fps), 0.44 px/mm\r\n3) One animal tracking (white on black background using deeplabcut)\r\n4) I was able to analyze and generate a video with ROI tracking but got this error during heatmap generation. \r\n\r\nAny ideas as for the reason i'm getting this?\r\n\r\nthanx a lot,\r\nRodrigo\r\n", + "user": "rnrpereira", + "reaction_cnt": 0, + "created_at": "2024-01-24T22:41:39Z", + "updated_at": "2024-01-26T20:46:42Z", + "author": "rnrpereira", + "comments": [ + { + "body": "Hi @rnrpereira! It is likely a bug, for reporting this one I appreciate it.\r\n\r\n**Which version of SimBA so you see with `pip show simba-uw-tf-dev`?** \r\n\r\nPS. For anyone bumping into the `TypeError: can't pickle _thread.RLock objects` error: This can happen when passing objects with a python logger attribute as a constant to a multiprocessing method. Drop the logger from the object and it should run. \r\n\r\n", + "created_at": "2024-01-24T23:51:20Z", + "author": "sronilsson" + }, + { + "body": "Thank you for your reply.\r\nI am using SIMBA v.1.82.4\r\nR.", + "created_at": "2024-01-25T16:23:17Z", + "author": "rnrpereira" + }, + { + "body": "Thanks @rnrpereira - I've updated the code, if you update simba with `pip install simba-uw-tf-dev --upgrade` can you let me know how it runs and looks and if the error still appears on your end? \r\n\r\n", + "created_at": "2024-01-26T02:23:23Z", + "author": "sronilsson" + }, + { + "body": "It's working fine!\r\nthank you...\r\nR.", + "created_at": "2024-01-26T20:13:12Z", + "author": "rnrpereira" + }, + { + "body": "Thanks for letting me know!", + "created_at": "2024-01-26T20:46:42Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 10.2.0 in /docs", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 10.2.0.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

10.2.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/10.2.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

10.2.0 (2024-01-02)

\n
    \n
  • \n

    Add keep_rgb option when saving JPEG to prevent conversion of RGB colorspace #7553\n[bgilbert, radarhere]

    \n
  • \n
  • \n

    Trim glyph size in ImageFont.getmask() #7669, #7672\n[radarhere, nulano]

    \n
  • \n
  • \n

    Deprecate IptcImagePlugin helpers #7664\n[nulano, hugovk, radarhere]

    \n
  • \n
  • \n

    Allow uncompressed TIFF images to be saved in chunks #7650\n[radarhere]

    \n
  • \n
  • \n

    Concatenate multiple JPEG EXIF markers #7496\n[radarhere]

    \n
  • \n
  • \n

    Changed IPTC tile tuple to match other plugins #7661\n[radarhere]

    \n
  • \n
  • \n

    Do not assign new fp attribute when exiting context manager #7566\n[radarhere]

    \n
  • \n
  • \n

    Support arbitrary masks for uncompressed RGB DDS images #7589\n[radarhere, akx]

    \n
  • \n
  • \n

    Support setting ROWSPERSTRIP tag #7654\n[radarhere]

    \n
  • \n
  • \n

    Apply ImageFont.MAX_STRING_LENGTH to ImageFont.getmask() #7662\n[radarhere]

    \n
  • \n
  • \n

    Optimise ImageColor using functools.lru_cache #7657\n[hugovk]

    \n
  • \n
  • \n

    Restricted environment keys for ImageMath.eval() #7655\n[wiredfool, radarhere]

    \n
  • \n
  • \n

    Optimise ImageMode.getmode using functools.lru_cache #7641\n[hugovk, radarhere]

    \n
  • \n
  • \n

    Fix incorrect color blending for overlapping glyphs #7497\n[ZachNagengast, nulano, radarhere]

    \n
  • \n
  • \n

    Attempt memory mapping when tile args is a string #7565\n[radarhere]

    \n
  • \n
  • \n

    Fill identical pixels with transparency in subsequent frames when saving GIF #7568\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 6956d0b 10.2.0 version bump
  • \n
  • 31c8dac Merge pull request #7675 from python-pillow/pre-commit-ci-update-config
  • \n
  • 40a3f91 Merge pull request #7674 from nulano/url-example
  • \n
  • cb41b0c [pre-commit.ci] pre-commit autoupdate
  • \n
  • de62b25 fix image url in "Reading from URL" example
  • \n
  • 7c526a6 Update CHANGES.rst [ci skip]
  • \n
  • d93a5ad Merge pull request #7553 from bgilbert/jpeg-rgb
  • \n
  • aed764f Update CHANGES.rst [ci skip]
  • \n
  • f8df530 Merge pull request #7672 from nulano/imagefont-negative-crop
  • \n
  • 24e9485 Merge pull request #7671 from radarhere/imagetransform
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=10.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-01-22T22:58:45Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump pillow from 5.4.1 to 10.2.0", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 10.2.0.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

10.2.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/10.2.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

10.2.0 (2024-01-02)

\n
    \n
  • \n

    Add keep_rgb option when saving JPEG to prevent conversion of RGB colorspace #7553\n[bgilbert, radarhere]

    \n
  • \n
  • \n

    Trim glyph size in ImageFont.getmask() #7669, #7672\n[radarhere, nulano]

    \n
  • \n
  • \n

    Deprecate IptcImagePlugin helpers #7664\n[nulano, hugovk, radarhere]

    \n
  • \n
  • \n

    Allow uncompressed TIFF images to be saved in chunks #7650\n[radarhere]

    \n
  • \n
  • \n

    Concatenate multiple JPEG EXIF markers #7496\n[radarhere]

    \n
  • \n
  • \n

    Changed IPTC tile tuple to match other plugins #7661\n[radarhere]

    \n
  • \n
  • \n

    Do not assign new fp attribute when exiting context manager #7566\n[radarhere]

    \n
  • \n
  • \n

    Support arbitrary masks for uncompressed RGB DDS images #7589\n[radarhere, akx]

    \n
  • \n
  • \n

    Support setting ROWSPERSTRIP tag #7654\n[radarhere]

    \n
  • \n
  • \n

    Apply ImageFont.MAX_STRING_LENGTH to ImageFont.getmask() #7662\n[radarhere]

    \n
  • \n
  • \n

    Optimise ImageColor using functools.lru_cache #7657\n[hugovk]

    \n
  • \n
  • \n

    Restricted environment keys for ImageMath.eval() #7655\n[wiredfool, radarhere]

    \n
  • \n
  • \n

    Optimise ImageMode.getmode using functools.lru_cache #7641\n[hugovk, radarhere]

    \n
  • \n
  • \n

    Fix incorrect color blending for overlapping glyphs #7497\n[ZachNagengast, nulano, radarhere]

    \n
  • \n
  • \n

    Attempt memory mapping when tile args is a string #7565\n[radarhere]

    \n
  • \n
  • \n

    Fill identical pixels with transparency in subsequent frames when saving GIF #7568\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 6956d0b 10.2.0 version bump
  • \n
  • 31c8dac Merge pull request #7675 from python-pillow/pre-commit-ci-update-config
  • \n
  • 40a3f91 Merge pull request #7674 from nulano/url-example
  • \n
  • cb41b0c [pre-commit.ci] pre-commit autoupdate
  • \n
  • de62b25 fix image url in "Reading from URL" example
  • \n
  • 7c526a6 Update CHANGES.rst [ci skip]
  • \n
  • d93a5ad Merge pull request #7553 from bgilbert/jpeg-rgb
  • \n
  • aed764f Update CHANGES.rst [ci skip]
  • \n
  • f8df530 Merge pull request #7672 from nulano/imagefont-negative-crop
  • \n
  • 24e9485 Merge pull request #7671 from radarhere/imagetransform
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=10.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nYou can trigger a rebase of this PR by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
\n\n> **Note**\n> Automatic rebases have been disabled on this pull request as it has been open for over 30 days.\n", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2024-01-22T22:57:54Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Identify Behavior Screen Opens as Blank", + "body": "**Describe the bug**\r\nI created a SimBa project and added one video alongside the slp file with the same name which included the pose estimation. When I went to the Label behavior tab and tried to annotate a new video, a new tab opened which said SimBa at the top but was completely blank. I tried repeatedly with new videos to get the screen to open, but it is always blank.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create new SimBa Project with one video and slp data\r\n2. Open project in SimBa\r\n3. Click on Label Behavior\r\n4. Click on 'Select video (create new video annotation)'\r\n5. See error\r\n\r\n**Expected behavior**\r\nI believe it should be a screen similar to the one that opened when I identified the two animals initially.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 11\r\n - I would have to go and check Python version\r\n - Using Anaconda\r\n ", + "user": "yampants", + "reaction_cnt": 0, + "created_at": "2024-01-22T17:53:25Z", + "updated_at": "2024-01-26T02:30:16Z", + "author": "yampants", + "comments": [ + { + "body": "Hi @yampants and thanks for reporting. One question to help me find the issue: When the blank screen pops open, do you see any error msgs in the main Windows terminal window you used to open SimBA with? If so, would you mind sending me a screengrab of the error?", + "created_at": "2024-01-22T17:56:54Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson, of course! I went to the file location in the error and the video does exist there, but I'm not quite sure I understand the error.\r\n![Screenshot 2024-01-25 143740](https://github.com/sgoldenlab/simba/assets/97981702/866610d9-8ad1-44d9-b450-bf74213ce23d)\r\n", + "created_at": "2024-01-25T19:42:50Z", + "author": "yampants" + }, + { + "body": "Ah got thanks @yampants! When labelling videos, SimBA will need a file to append the annotations to. This file lives in the `project_folder/csv/features_extracted`, and will have the same name your video file. To create it, try clicking on the `Extract features` button and let me know how it goes: \r\n\r\n\"image\"\r\n\r\nThat said.. just to check: the video name `labels.v001` look a little suspicious. Did you feed in a video called labels.v001 to SLEAP or deeplabcut, and sleap/deeplabcut generated a file called `labels.v001` containing body-part predictions for the video `labels.v001`?\r\n ", + "created_at": "2024-01-26T02:30:15Z", + "author": "sronilsson" + } + ] + }, + { + "title": "reciprocal directionality between animals #2", + "body": "Hello there! I've read in the previous issue that to analyze reciprocal directionality between animals is possible to utilize the \"aggregate boolean conditional statistic\". However, it is not possible for me to select the two conditions \"animal_1 directs animal_2\" and \"animal_2 directs animal_1\". I am allowed to select just the directionality between one animal and a specific ROI (ex. ROI animal_1 facing). Do I need to do something else before in order to be able to visualize the two conditions? Thanks for your time!!\r\n\"image\"\r\n", + "user": "charliev99", + "reaction_cnt": 0, + "created_at": "2024-01-19T19:09:07Z", + "updated_at": "2024-01-22T19:53:42Z", + "author": "charliev99", + "comments": [ + { + "body": "Hey @charliev99! \r\n\r\nHi! Gotcha, yes, the columns you can see on the dropdowns, are boolean columns (columns that contain only 0 and 1s) that SimBA has identified in the files within the `project_folder/csv/features_extracted` directory. My guess is that inside your project, you do not have `animal_1 facing animal 2` or `animal_2 facing animal 1` columns, that’s why you can’t compute the thing that you want. So before we can compute conditional aggregate boolean statistics, we need to get the directionality data in that folder. There is a few ways to do it on top of my mind. \r\n\r\nHow to calculate the directionality statistics between animals, is documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/directionality_between_animals.md). Take a look at the documentation first before reading below so what I say makes sense: \r\n\r\ni) You can check the `CREATE BOOLEAN TABLES` checkbox, which gives you a folder with CSV files in the logs directory, with one CSV file for each file file inside your `project_folder/csv/outlier_corrected_movement_location` directory. These files will contain the data you’re looking for, with each column representing the relationship between an animal and a body-part belonging to another animal, with a `1` of the body-part is within “line of sight” and `0` if it is not. \r\n\r\nii) Now, we need to get that data into the `project_folder/csv/features_extracted` directory, so that the conditional calculator can see it. You can do this in two ways. \r\n\r\nYou can check the `APPEND BOOLEAN TABLES TO FEATURES` checkbox and it will do it automatically. However, this assumes that you already have files in your `project_folder/csv/features_extracted` directory. It will also affect the number of features you have in any supervised classifier you may have, so you may not want to append it willy nilly. \r\n\r\nAnother alternative is to just check the `CREATE BOOLEAN TABLES` checkbox, then move those output files to the `project_folder/csv/features_extracted` folder (and move any other files away from that folder temporarily). \r\n\r\n(iii) Now you can run the conditional calculator and you should see the directionality data in the dropdown. \r\n\r\n\r\nPlease let me know if any of this doesn’t make sense. Also, I am thinking it may be beneficial to select the input directory in the conditional boolean menu, so you don’t have to move files around and just select the folder in the logs directory immediately. Let me know if you think this could help. ", + "created_at": "2024-01-20T16:10:56Z", + "author": "sronilsson" + }, + { + "body": "It does totally makes sense and also it works! Thank you so much for your time! ", + "created_at": "2024-01-22T19:47:03Z", + "author": "charliev99" + } + ] + }, + { + "title": "reciprocal directionality between animals", + "body": "Hey there, thanks again for the amazing software, I just wanted to ask whether there is the possibility of having a measure of reciprocal directionality between animals (ie, both animals are directed towards each other) in SimBA? It would be super helpful, in terms of social behavior analysis, to know when the animals are looking at each other at the same time. For now, I understand that you only can check when animal 1 is directed towards animal 2 or vice versa, but not the combined information. Please, let me know if I am missing something and what I'm proposing might be useful and doable. Many thanks again :)\r\n", + "user": "filos93", + "reaction_cnt": 0, + "created_at": "2024-01-17T14:02:35Z", + "updated_at": "2024-01-17T16:59:48Z", + "author": "filos93", + "comments": [ + { + "body": "Hi @filos93! I am not sure if this below is relevant, but if not, let me know why this wouldn't work and we can think of something else and make sure you get the output you are looking for:\r\n\r\nThere is a method in the GUI do compute \"aggregate conditional boolean statistics\" described [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial.md#compute-aggregate-conditional-statistics-from-boolean-fields). \r\n\r\nThe method is used to string together a bunch of user-defined boolean (true vs false) conditions into a more complex condition, and compute the time in each video that strung together conditions is TRUE. For your scenario, it would be be a simpler input (animal_1 directs animal_2 = TRUE + animal_2 directs animal_1 = TRUE) that gives you the number of frames and time where both animal_1 directs animal_2 is true and animal_1 directs animal_2 is true. ", + "created_at": "2024-01-17T14:27:15Z", + "author": "sronilsson" + }, + { + "body": "Ok, I got it, thank you so much, again thanks for the amazing software :)", + "created_at": "2024-01-17T16:59:48Z", + "author": "filos93" + } + ] + }, + { + "title": "some fixes", + "body": "so the list of fixes is:\n1.Reordered code in the enum class and the machine learning interface.\n2.Fixed an issue where non-existent files were being called for app visibility.\n3.Resolved a problem with loading the ini file due to the absence of the 'file_types' parameter in the FileSelect class.\n\n There are likely more fixes; feel free to inquire about each one. Manual testing was conducted prior to creating this pull request", + "user": "tzukpolinsky", + "reaction_cnt": 0, + "created_at": "2024-01-01T13:03:43Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "tzukpolinsky", + "comments": [] + }, + { + "title": "moved body points", + "body": "**Describe the bug**\r\nbody points are being moved to the top left corner of the video (same location in multiple videos). Videos were tracked in SLEAP, and the body points are never in this location. This is still present, even with outlier correction.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. I updated SimBA with: pip install simba-uw-tf-dev --upgrade\r\n2. I set up a project, imported all videos and .h5 files from sleap (interpolation: linear; smoothing: none), set distances etc\r\n3. Tried both with and without outlier correction (location and movement set to 1.5, nose to tail base on each animal)\r\n4. analyzed directing to animals & visualized\r\n\r\n**Expected behavior**\r\nI expected that the video would show body points on each mice (like when tracked in sleap) & when they are directed toward the other mouse. This was mostly the case except for long periods of time where a body point is placed in the top left corner of the video (same spot on multiple different videos - maybe the numbers are just being set to 0?) and counted in the direction data.\r\n\r\n![simba](https://github.com/sgoldenlab/simba/assets/95374245/f95b7ace-b3a5-4501-af70-691eaf7a6945)\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: windows 11\r\n - Python Version [e.g. 3.6.0]\r\n - Are you using anaconda? yes\r\n\r\n", + "user": "ZelikowskyLab", + "reaction_cnt": 0, + "created_at": "2023-12-19T22:57:32Z", + "updated_at": "2023-12-21T17:54:59Z", + "author": "ZelikowskyLab", + "comments": [ + { + "body": "Hi @ZelikowskyLab !\r\n\r\nTLDR: If you run `Body-part: Nearest` interpolation on the data, does it look better?\r\n\r\nThe 0,0 coordinate (top left corner) data is where SimBA places body-parts when the pose-estimation package doesn't provide any location for a given frame. \r\n\r\nThere are two ways to get rid of these missing data points: (i) train a better pose estimation model which provides good body parts predictions for all or nearly all frames or (ii) interpolate the missing data. \r\n\r\nThere are several interpolation methods in SimBA. They can be specified to be performed only when entire animals are missing, or when individual body-parts are missing, and can be quadratic, linear or nearest. I see that you use linear interpolation but I don't know if it is animal or body-part based. If using animal based interpolation, missing data will still be retained unless all of the bodyparts of an animal is missing. Linear interpolation is likely to be a little bit off if data is missing for extended periods of time relative to the other methods. That said, all interpolation methods will be inaccurate if data is missing for many consecutive seconds. Best method, if possible, is always to train a pose-estimation model that provides better data without missing frames so interpolation is rarely used. ", + "created_at": "2023-12-20T11:55:05Z", + "author": "sronilsson" + }, + { + "body": "That worked. Thanks!", + "created_at": "2023-12-21T17:40:47Z", + "author": "ZelikowskyLab" + }, + { + "body": "Thanks for letting me know @ZelikowskyLab !", + "created_at": "2023-12-21T17:54:58Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 10.0.1", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 10.0.1.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

10.0.1

\n

https://pillow.readthedocs.io/en/stable/releasenotes/10.0.1.html

\n

Changes

\n\n

10.0.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/10.0.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

10.0.1 (2023-09-15)

\n
    \n
  • \n

    Updated libwebp to 1.3.2 #7395\n[radarhere]

    \n
  • \n
  • \n

    Updated zlib to 1.3 #7344\n[radarhere]

    \n
  • \n
\n

10.0.0 (2023-07-01)

\n
    \n
  • \n

    Fixed deallocating mask images #7246\n[radarhere]

    \n
  • \n
  • \n

    Added ImageFont.MAX_STRING_LENGTH #7244\n[radarhere, hugovk]

    \n
  • \n
  • \n

    Fix Windows build with pyproject.toml #7230\n[hugovk, nulano, radarhere]

    \n
  • \n
  • \n

    Do not close provided file handles with libtiff #7199\n[radarhere]

    \n
  • \n
  • \n

    Convert to HSV if mode is HSV in getcolor() #7226\n[radarhere]

    \n
  • \n
  • \n

    Added alpha_only argument to getbbox() #7123\n[radarhere. hugovk]

    \n
  • \n
  • \n

    Prioritise speed in repr_png #7242\n[radarhere]

    \n
  • \n
  • \n

    Do not use CFFI access by default on PyPy #7236\n[radarhere]

    \n
  • \n
  • \n

    Limit size even if one dimension is zero in decompression bomb check #7235\n[radarhere]

    \n
  • \n
  • \n

    Use --config-settings instead of deprecated --global-option #7171\n[radarhere]

    \n
  • \n
  • \n

    Better C integer definitions #6645\n[Yay295, hugovk]

    \n
  • \n
  • \n

    Fixed finding dependencies on Cygwin #7175\n[radarhere]

    \n
  • \n
  • \n

    Changed grabclipboard() to use PNG instead of JPG compression on macOS #7219\n[abey79, radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=10.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-12-11T20:11:34Z", + "updated_at": "2024-01-22T22:57:57Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #321.", + "created_at": "2024-01-22T22:57:55Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 10.0.1 in /docs", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 10.0.1.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

10.0.1

\n

https://pillow.readthedocs.io/en/stable/releasenotes/10.0.1.html

\n

Changes

\n\n

10.0.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/10.0.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

10.0.1 (2023-09-15)

\n
    \n
  • \n

    Updated libwebp to 1.3.2 #7395\n[radarhere]

    \n
  • \n
  • \n

    Updated zlib to 1.3 #7344\n[radarhere]

    \n
  • \n
\n

10.0.0 (2023-07-01)

\n
    \n
  • \n

    Fixed deallocating mask images #7246\n[radarhere]

    \n
  • \n
  • \n

    Added ImageFont.MAX_STRING_LENGTH #7244\n[radarhere, hugovk]

    \n
  • \n
  • \n

    Fix Windows build with pyproject.toml #7230\n[hugovk, nulano, radarhere]

    \n
  • \n
  • \n

    Do not close provided file handles with libtiff #7199\n[radarhere]

    \n
  • \n
  • \n

    Convert to HSV if mode is HSV in getcolor() #7226\n[radarhere]

    \n
  • \n
  • \n

    Added alpha_only argument to getbbox() #7123\n[radarhere. hugovk]

    \n
  • \n
  • \n

    Prioritise speed in repr_png #7242\n[radarhere]

    \n
  • \n
  • \n

    Do not use CFFI access by default on PyPy #7236\n[radarhere]

    \n
  • \n
  • \n

    Limit size even if one dimension is zero in decompression bomb check #7235\n[radarhere]

    \n
  • \n
  • \n

    Use --config-settings instead of deprecated --global-option #7171\n[radarhere]

    \n
  • \n
  • \n

    Better C integer definitions #6645\n[Yay295, hugovk]

    \n
  • \n
  • \n

    Fixed finding dependencies on Cygwin #7175\n[radarhere]

    \n
  • \n
  • \n

    Changed grabclipboard() to use PNG instead of JPG compression on macOS #7219\n[abey79, radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=10.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-12-11T20:11:07Z", + "updated_at": "2024-01-22T22:58:49Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #322.", + "created_at": "2024-01-22T22:58:47Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump pyarrow from 6.0.1 to 14.0.1 in /docs", + "body": "Bumps [pyarrow](https://github.com/apache/arrow) from 6.0.1 to 14.0.1.\n
\nCommits\n
    \n
  • ba53748 MINOR: [Release] Update versions for 14.0.1
  • \n
  • 529f376 MINOR: [Release] Update .deb/.rpm changelogs for 14.0.1
  • \n
  • b84bbca MINOR: [Release] Update CHANGELOG.md for 14.0.1
  • \n
  • f141709 GH-38607: [Python] Disable PyExtensionType autoload (#38608)
  • \n
  • 5a37e74 GH-38431: [Python][CI] Update fs.type_name checks for s3fs tests (#38455)
  • \n
  • 2dcee3f MINOR: [Release] Update versions for 14.0.0
  • \n
  • 297428c MINOR: [Release] Update .deb/.rpm changelogs for 14.0.0
  • \n
  • 3e9734f MINOR: [Release] Update CHANGELOG.md for 14.0.0
  • \n
  • 9f90995 GH-38332: [CI][Release] Resolve symlinks in RAT lint (#38337)
  • \n
  • bd61239 GH-35531: [Python] C Data Interface PyCapsule Protocol (#37797)
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyarrow&package-manager=pip&previous-version=6.0.1&new-version=14.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-12-11T20:11:05Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump pyarrow from 6.0.1 to 14.0.1", + "body": "Bumps [pyarrow](https://github.com/apache/arrow) from 6.0.1 to 14.0.1.\n
\nCommits\n
    \n
  • ba53748 MINOR: [Release] Update versions for 14.0.1
  • \n
  • 529f376 MINOR: [Release] Update .deb/.rpm changelogs for 14.0.1
  • \n
  • b84bbca MINOR: [Release] Update CHANGELOG.md for 14.0.1
  • \n
  • f141709 GH-38607: [Python] Disable PyExtensionType autoload (#38608)
  • \n
  • 5a37e74 GH-38431: [Python][CI] Update fs.type_name checks for s3fs tests (#38455)
  • \n
  • 2dcee3f MINOR: [Release] Update versions for 14.0.0
  • \n
  • 297428c MINOR: [Release] Update .deb/.rpm changelogs for 14.0.0
  • \n
  • 3e9734f MINOR: [Release] Update CHANGELOG.md for 14.0.0
  • \n
  • 9f90995 GH-38332: [CI][Release] Resolve symlinks in RAT lint (#38337)
  • \n
  • bd61239 GH-35531: [Python] C Data Interface PyCapsule Protocol (#37797)
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyarrow&package-manager=pip&previous-version=6.0.1&new-version=14.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nYou can trigger a rebase of this PR by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
\n\n> **Note**\n> Automatic rebases have been disabled on this pull request as it has been open for over 30 days.\n", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-12-11T20:09:32Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump urllib3 from 1.26.6 to 1.26.18 in /docs", + "body": "Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.6 to 1.26.18.\n
\nRelease notes\n

Sourced from urllib3's releases.

\n
\n

1.26.18

\n
    \n
  • Made body stripped from HTTP requests changing the request method to GET after HTTP 303 "See Other" redirect responses. (GHSA-g4mx-q9vg-27p4)
  • \n
\n

1.26.17

\n
    \n
  • Added the Cookie header to the list of headers to strip from requests when redirecting to a different host. As before, different headers can be set via Retry.remove_headers_on_redirect. (GHSA-v845-jxx5-vc9f)
  • \n
\n

1.26.16

\n
    \n
  • Fixed thread-safety issue where accessing a PoolManager with many distinct origins would cause connection pools to be closed while requests are in progress (#2954)
  • \n
\n

1.26.15

\n\n

1.26.14

\n
    \n
  • Fixed parsing of port 0 (zero) returning None, instead of 0 (#2850)
  • \n
  • Removed deprecated HTTPResponse.getheaders() calls in urllib3.contrib module.
  • \n
\n

1.26.13

\n
    \n
  • Deprecated the HTTPResponse.getheaders() and HTTPResponse.getheader() methods.
  • \n
  • Fixed an issue where parsing a URL with leading zeroes in the port would be rejected even when the port number after removing the zeroes was valid.
  • \n
  • Fixed a deprecation warning when using cryptography v39.0.0.
  • \n
  • Removed the <4 in the Requires-Python packaging metadata field.
  • \n
\n

1.26.12

\n
    \n
  • Deprecated the urllib3[secure] extra and the urllib3.contrib.pyopenssl module. Both will be removed in v2.x. See this GitHub issue for justification and info on how to migrate.
  • \n
\n

1.26.11

\n

If you or your organization rely on urllib3 consider supporting us via GitHub Sponsors.

\n

:warning: urllib3 v2.0 will drop support for Python 2: Read more in the v2.0 Roadmap

\n
    \n
  • Fixed an issue where reading more than 2 GiB in a call to HTTPResponse.read would raise an OverflowError on Python 3.9 and earlier.
  • \n
\n

1.26.10

\n

If you or your organization rely on urllib3 consider supporting us via GitHub Sponsors.

\n

:warning: urllib3 v2.0 will drop support for Python 2: Read more in the v2.0 Roadmap

\n

:closed_lock_with_key: This is the first release to be signed with Sigstore! You can verify the distributables using the .sig and .crt files included on this release.

\n
    \n
  • Removed support for Python 3.5
  • \n
  • Fixed an issue where a ProxyError recommending configuring the proxy as HTTP instead of HTTPS could appear even when an HTTPS proxy wasn't configured.
  • \n
\n

1.26.9

\n

If you or your organization rely on urllib3 consider supporting us via GitHub Sponsors.

\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from urllib3's changelog.

\n
\n

1.26.18 (2023-10-17)

\n
    \n
  • Made body stripped from HTTP requests changing the request method to GET after HTTP 303 "See Other" redirect responses.
  • \n
\n

1.26.17 (2023-10-02)

\n
    \n
  • Added the Cookie header to the list of headers to strip from requests when redirecting to a different host. As before, different headers can be set via Retry.remove_headers_on_redirect. ([#3139](https://github.com/urllib3/urllib3/issues/3139) <https://github.com/urllib3/urllib3/pull/3139>_)
  • \n
\n

1.26.16 (2023-05-23)

\n
    \n
  • Fixed thread-safety issue where accessing a PoolManager with many distinct origins\nwould cause connection pools to be closed while requests are in progress ([#2954](https://github.com/urllib3/urllib3/issues/2954) <https://github.com/urllib3/urllib3/pull/2954>_)
  • \n
\n

1.26.15 (2023-03-10)

\n
    \n
  • Fix socket timeout value when HTTPConnection is reused ([#2645](https://github.com/urllib3/urllib3/issues/2645) <https://github.com/urllib3/urllib3/issues/2645>__)
  • \n
  • Remove "!" character from the unreserved characters in IPv6 Zone ID parsing\n([#2899](https://github.com/urllib3/urllib3/issues/2899) <https://github.com/urllib3/urllib3/issues/2899>__)
  • \n
  • Fix IDNA handling of '\\x80' byte ([#2901](https://github.com/urllib3/urllib3/issues/2901) <https://github.com/urllib3/urllib3/issues/2901>__)
  • \n
\n

1.26.14 (2023-01-11)

\n
    \n
  • Fixed parsing of port 0 (zero) returning None, instead of 0. ([#2850](https://github.com/urllib3/urllib3/issues/2850) <https://github.com/urllib3/urllib3/issues/2850>__)
  • \n
  • Removed deprecated getheaders() calls in contrib module. Fixed the type hint of PoolKey.key_retries by adding bool to the union. ([#2865](https://github.com/urllib3/urllib3/issues/2865) <https://github.com/urllib3/urllib3/issues/2865>__)
  • \n
\n

1.26.13 (2022-11-23)

\n
    \n
  • Deprecated the HTTPResponse.getheaders() and HTTPResponse.getheader() methods.
  • \n
  • Fixed an issue where parsing a URL with leading zeroes in the port would be rejected\neven when the port number after removing the zeroes was valid.
  • \n
  • Fixed a deprecation warning when using cryptography v39.0.0.
  • \n
  • Removed the <4 in the Requires-Python packaging metadata field.
  • \n
\n

1.26.12 (2022-08-22)

\n
    \n
  • Deprecated the urllib3[secure] extra and the urllib3.contrib.pyopenssl module.\nBoth will be removed in v2.x. See this GitHub issue <https://github.com/urllib3/urllib3/issues/2680>_\nfor justification and info on how to migrate.
  • \n
\n

1.26.11 (2022-07-25)

\n
    \n
  • Fixed an issue where reading more than 2 GiB in a call to HTTPResponse.read would
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 9c2c230 Release 1.26.18 (#3159)
  • \n
  • b594c5c Merge pull request from GHSA-g4mx-q9vg-27p4
  • \n
  • 944f0eb [1.26] Use vendored six in urllib3.contrib.securetransport
  • \n
  • c9016bf Release 1.26.17
  • \n
  • 0122035 Backport GHSA-v845-jxx5-vc9f (#3139)
  • \n
  • e63989f Fix installing brotli extra on Python 2.7
  • \n
  • 2e7a24d [1.26] Configure OS for RTD to fix building docs
  • \n
  • 57181d6 [1.26] Improve error message when calling urllib3.request() (#3058)
  • \n
  • 3c01480 [1.26] Run coverage even with failed jobs
  • \n
  • d94029b Release 1.26.16
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=urllib3&package-manager=pip&previous-version=1.26.6&new-version=1.26.18)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-12-11T20:09:30Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Bump pyarrow from 6.0.1 to 14.0.1 in /simba", + "body": "Bumps [pyarrow](https://github.com/apache/arrow) from 6.0.1 to 14.0.1.\n
\nCommits\n
    \n
  • ba53748 MINOR: [Release] Update versions for 14.0.1
  • \n
  • 529f376 MINOR: [Release] Update .deb/.rpm changelogs for 14.0.1
  • \n
  • b84bbca MINOR: [Release] Update CHANGELOG.md for 14.0.1
  • \n
  • f141709 GH-38607: [Python] Disable PyExtensionType autoload (#38608)
  • \n
  • 5a37e74 GH-38431: [Python][CI] Update fs.type_name checks for s3fs tests (#38455)
  • \n
  • 2dcee3f MINOR: [Release] Update versions for 14.0.0
  • \n
  • 297428c MINOR: [Release] Update .deb/.rpm changelogs for 14.0.0
  • \n
  • 3e9734f MINOR: [Release] Update CHANGELOG.md for 14.0.0
  • \n
  • 9f90995 GH-38332: [CI][Release] Resolve symlinks in RAT lint (#38337)
  • \n
  • bd61239 GH-35531: [Python] C Data Interface PyCapsule Protocol (#37797)
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyarrow&package-manager=pip&previous-version=6.0.1&new-version=14.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nYou can trigger a rebase of this PR by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
\n\n> **Note**\n> Automatic rebases have been disabled on this pull request as it has been open for over 30 days.\n", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-12-11T20:09:29Z", + "updated_at": "2024-04-25T16:25:17Z", + "author": "dependabot[bot]", + "comments": [] + }, + { + "title": "Path plots", + "body": "Hello. I'm currently making path plots and I had another question about how missing points are dealt with. \r\n\r\nThe nose point in my video is sometimes not visible (certain poses where the head is down or out of frame). I skipped outlier correction as that was causing issues with tracking before. I previously used quadratic interpolation for body parts, but I've found that with some frames that puts points in the wrong location. However, when I skip these and simba puts missing points in the corner of the screen (0,0) it looks like they are tracked in the path plots? I'm not sure that's the case but it would make sense for what I'm seeing from the plot (see attached). How can I make a cleaner plot and what could this be from?\r\n![FR3_training_final_frame](https://github.com/sgoldenlab/simba/assets/70863857/db8e6686-e7bb-4e65-8e19-8aeb4e4c31c0)\r\n\r\nAlso- I'm not sure if this is possible but it would be a cool feature if you could make path plots of a final frame without going through the machine training (I think there's an option in the main menu but it makes a whole video). Thanks!", + "user": "IsabelleSajonia", + "reaction_cnt": 0, + "created_at": "2023-11-28T21:55:28Z", + "updated_at": "2023-11-29T20:31:54Z", + "author": "IsabelleSajonia", + "comments": [ + { + "body": "Hi @IsabelleSajonia!\r\n\r\nYes, about the different interpolation methods. I mentioned it in an earlier issue, and there is a graph representation of the different interpolation methods [HERE](https://github.com/sgoldenlab/simba/blob/master/images/Interpolation_comparison.png). \r\n\r\nIn brief, say your data is [1, 2, m, m, m, m, 7, 4] where m is missing data, you might want to try a linear interpolation which would give you [1,2, 3, 4, 5, 6, 7, 4], or a nearest interpolation which gives you [1, 2, 2, 2, 7, 7, 4]. \r\n\r\nSome other thought:\r\n\r\n1) Instead of using the nose, could you use a different body-part that is visible and is accurately tracked? \r\n\r\n2) I've seen a lot of users not label or track the nose or other body-parts in DLC or SLEAP lately, even though the experimenter knows where in the image where the nose is. The nose might be obscured by a headstage, or some bedding material, or the mouse sniffs the floor at a tilt. But if you ask the experimenter where the nose they would know with certainty where it is, but still they skip to label it. If you know where in the image the nose is, even though it is obscured, I think you should label and track that position. You can read about it in the DLC or SLEAP documentation I think thats what they also suggest. \r\n\r\n3) Also, in new future videos - make sure the animal can't be outside of the recording environment. \r\n\r\nFinally: Yes I will insert a fix - for now - if you don't have to train a model, you can manually move the CSV files from the `project_folder/csv/outlier_corrected_movement_location` folder to the `project_folder/csv/machine_results` folder and the path plot function will find them. \r\n\r\n\r\n", + "created_at": "2023-11-28T23:49:25Z", + "author": "sronilsson" + }, + { + "body": "For most frames I know where the nose is and should be able to add that label in. The tail points are not always visible to me since it blends in with the color of our operant chamber walls. I'll consider not labeling as many tail points.\r\n\r\nThank you!", + "created_at": "2023-11-29T20:05:39Z", + "author": "IsabelleSajonia" + }, + { + "body": "yes, tails I've always had trouble with...\r\n\r\nYes I'm thinging of scenarious like this: \r\n\r\n\"image\"\r\n\r\nI would put a label where the nose is, even though head is covering it and I cant see it directly. \r\n", + "created_at": "2023-11-29T20:14:47Z", + "author": "sronilsson" + }, + { + "body": "I'll give that a try thanks again! ", + "created_at": "2023-11-29T20:31:40Z", + "author": "IsabelleSajonia" + } + ] + }, + { + "title": "Total distance traveled and velocity", + "body": "I tried to make a path plot and look at movement data like total distance and velocity to compare them to Ethovision (for validation). I'm seeing that my total distance looks very different than ethovision but they are both in cm and standardized to the same known distance. The plot path also looks a little off compared to the original raw video, and when I look at the frames that aren't being tracked, the labels look ok in sleap. I'm not sure what the issue might be but if you have any ideas of what I could try in order to compare Ethovision to simba let me know. Thank you!", + "user": "IsabelleSajonia", + "reaction_cnt": 0, + "created_at": "2023-11-03T19:43:47Z", + "updated_at": "2023-11-07T18:30:58Z", + "author": "IsabelleSajonia", + "comments": [ + { + "body": "Hi @IsabelleSajonia! - it sounds like the tracking is off in, SimBA and we should figure out why. \r\n\r\nCan you tell me what you mean here? \r\n\r\n**when I look at the frames that aren't being tracked, the labels look ok in sleap**\r\n\r\n\r\n\r\nThere is a few ways the tracking could become mangled when imported it into SimBA, I have written about some [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#25-my-pose-estimation-tracking-looks-good-when-i-visualize-it-in-the-pose-estimation-tool-however-after-i-import-it-into-simba-it-doesnt-look-good-anymore-why). \r\n\r\nThere is a method accessable in the tools menu described [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_tools.md#visualize-pose-estimation-in-folder). If you use that tool, and point it too the videos in the `project_folder/csv/outlier_corrected_movement_location`, how does it look? \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-11-03T20:11:31Z", + "author": "sronilsson" + }, + { + "body": "[activity_valid - Trim.zip](https://github.com/sgoldenlab/simba/files/13282103/activity_valid.-.Trim.zip)\r\nI'm attaching a trimmed version of what I see when I do this. I know that my resolution is correct in simba but I'll check fps. I used criterion within the recommended range but I could try decreasing it. I do have frames where a body part is not visible so using sleap I train pose estimation to toggle visibility in frames where those points aren't present. I didn't apply any smoothing so that could be part of the problem, although the body parts are present in many of the frames that look bad here. Let me know what you think, thank you!", + "created_at": "2023-11-07T15:19:19Z", + "author": "IsabelleSajonia" + }, + { + "body": "Thanks @IsabelleSajonia - that video is very helpful. I think it related to point 3 [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#25-my-pose-estimation-tracking-looks-good-when-i-visualize-it-in-the-pose-estimation-tool-however-after-i-import-it-into-simba-it-doesnt-look-good-anymore-why):\r\n\r\n***Say you perform outlier correction, but you apply a criterion that is too stringent. SimBA will then remove body-part movements that are true body-part movement, and the animal body-part location predictions can appear to be “stuck” in the video while the animal is actually moving.***\r\n\r\nIf your tracking looks alright in SLEAP, how does the tracking look if you don't apply outlier correction (click the skip button) ? ", + "created_at": "2023-11-07T15:36:20Z", + "author": "sronilsson" + }, + { + "body": "[activity_valid - Trim.zip](https://github.com/sgoldenlab/simba/files/13282983/activity_valid.-.Trim.zip)\r\nThis looks much better! Should I make criterion much lower for correction or is there a downside to skipping it in general if I'm confident in the tracking data?", + "created_at": "2023-11-07T15:53:18Z", + "author": "IsabelleSajonia" + }, + { + "body": "Yes that looks better, I can see you have some jumps to (0,0) (top left corner for missing body-parts). I don't know if you used interpolation, but you can try using Interpolation body-part nearest to get rid of them and then skip outlier correction. Interpolation is described [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#to-import-multiple-dlc-csv-files) and there is an image [HERE](https://github.com/sgoldenlab/simba/blob/master/images/Interpolation_comparison.png) that describes what happens, you can think of the red parts as your missing data. \r\n\r\nOften, in pose-estimation, there is some \"jitter\" - animals are standing still but the pose-estimate body-part location is jumping a few pixels here and there, which can cause some overestimation of the animal movement compared to whatever method Ethovision is using (I don't know how they do it at Noldus, I can't see their code, if you know please let me know). To reduce jitter, you can use smoothing. \r\n\r\n\r\n\r\n\r\n ", + "created_at": "2023-11-07T16:06:27Z", + "author": "sronilsson" + }, + { + "body": "I'll look into that thank you!", + "created_at": "2023-11-07T18:30:50Z", + "author": "IsabelleSajonia" + } + ] + }, + { + "title": "Error when correcting outliers", + "body": "![error'](https://github.com/sgoldenlab/simba/assets/70863857/19f4d977-2662-4b56-ac79-a5744c29266e)\r\n![csv](https://github.com/sgoldenlab/simba/assets/70863857/9f951fe5-555f-46b0-b13f-1b353ea35852)\r\n\r\nI get the following error when I try to correct outliers. I'm using a single video with a .slp file and the frame extraction process seems to have completed. The csv was created but is empty.", + "user": "IsabelleSajonia", + "reaction_cnt": 0, + "created_at": "2023-11-02T21:31:07Z", + "updated_at": "2023-11-03T14:04:28Z", + "author": "IsabelleSajonia", + "comments": [ + { + "body": "Hi @IsabelleSajonia, thanks for reporting - this error happens, when SimBA can't determine the number of header rows in a CSV. \r\n\r\nIs the lower screengrab taken from the `project_folder/csv/input_csv/activity_valid.csv` file? If not, how does that file look like? \r\n\r\nAlso, the .slp data import is slow and can be a little wobbly compared to the sleap .h5 import code. If you do also have an `.h5` data file for activity_valid, I recommend trying to import that instead. ", + "created_at": "2023-11-02T23:21:37Z", + "author": "sronilsson" + }, + { + "body": "Using the H5 file seems to fix this issue. My activity_valid.csv is full now. Thank you!", + "created_at": "2023-11-03T14:04:28Z", + "author": "IsabelleSajonia" + } + ] + }, + { + "title": "Visualising ROI not working", + "body": "**Describe the bug**\r\nI have saved my ROI data and analysis went fine. When I try to visualise the ROI tracking, however, it gives me an error.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'Visualise ROI Tracking'\r\n2. See error\r\n![image](https://github.com/sgoldenlab/simba/assets/144468935/354dc592-e916-497f-a4ce-346f98012e85)\r\nThe homescreen also pause like this:\r\n![image](https://github.com/sgoldenlab/simba/assets/144468935/02d77543-ff88-43a5-8b0f-1347f2eab596)\r\n\r\nI have updated SIMBA and it did not fix.\r\n\r\n**Expected behavior**\r\nLabelled videos in the output frames folder.\r\n\r\n**Desktop (please complete the following information):**\r\nOS: Windows 11 Enterprise\r\nPython Version Python 3.10.10\r\nAre you using anaconda? Yes\r\n\r\nThank you in advance for helping!\r\n", + "user": "neurorie", + "reaction_cnt": 0, + "created_at": "2023-11-02T09:14:58Z", + "updated_at": "2023-11-02T17:58:04Z", + "author": "neurorie", + "comments": [ + { + "body": "This is super helpful, thank you @neurorie! I inserted a another `detailed bout` dataframe output yesterday, in response to a comment on Gitter, I removed the old one as I didn't think it was needed.. but it looks like the plotting was using it.. \r\n\r\nI have inserted it again version in `1.76.9` , if you update how does it look? Can you also confirm how it looks if you tick the multiprocess checkbox?", + "created_at": "2023-11-02T11:32:34Z", + "author": "sronilsson" + }, + { + "body": "It seems to be working now! Thank you, I tried both multiprocess and not and both seems to output the correct video :) The only thing is, it is minor, but when the video is being made a temporal folder is made in the ROI_analysis folder which does not get deleted. It's just empty so I can just delete myself, but wanted to make a note of it.", + "created_at": "2023-11-02T17:50:42Z", + "author": "neurorie" + }, + { + "body": "Thanks for letting me know and yes, I can see the temp folder being cleared of content, but the folder is not properly deleted, I should fix that.", + "created_at": "2023-11-02T17:58:04Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Outlier correction - movement", + "body": "Hello! I have a problem with outlier correction. The bodyparts I used in outlier correction were \"tip of the nose\" and \"tail base\". Whichever value I put as \"Movement criterion\", I always end up with all 253 videos corrected. The percentage of correction is 1 in all cases. I've tried values of 1, 2, and even 3. My frame rate is 25 frames per second - could that be the problem?\r\n\r\n\r\n1. Choose bodyparts \"nose tip\" and \"tail base\" for outlier correction\r\n2. Set \"Movement criterion\" to 1-3\r\n3. Everything is corrected\r\n\r\n\r\n\r\n\r\n", + "user": "linab8", + "reaction_cnt": 0, + "created_at": "2023-10-27T17:23:12Z", + "updated_at": "2023-10-29T18:59:32Z", + "author": "linab8", + "comments": [ + { + "body": "Hi @linab8! Just so I understand what is happening: is it that all of your body-parts, in all videos, are deemed an outlier? \r\n\r\nWhat happens if you set the outlier criterion to some super high value, like 10000? ", + "created_at": "2023-10-27T17:30:56Z", + "author": "sronilsson" + }, + { + "body": "Yes, all body-parts in all videos are labelled as outliers. I'll try that now, but I wasn't sure how high I should go since it's recommended 0.7 - 1.5 and I already used a larger value. @sronilsson ", + "created_at": "2023-10-27T17:32:35Z", + "author": "linab8" + }, + { + "body": "Yeah I was just suggesting the large value for troubleshooting, to figure out what the error may, if it's unrelated to what value you put in", + "created_at": "2023-10-27T17:37:38Z", + "author": "sronilsson" + }, + { + "body": "So, I tried with 10000 as a Movement criterion value and it still deemed all body parts in all videos as outliers. @sronilsson Do you have any other idea?", + "created_at": "2023-10-27T18:23:51Z", + "author": "linab8" + }, + { + "body": "I am not sure, I need some more information. Do you have the output file with the outlier scores you can show me where you see that all the values are deemed outliers?\r\n\r\nAlso, if possible, could you share me a very small project to see if I can reproduce your error? Say sharing your project through a gdrive but without any videos, and only with a single file inside the `project_folder/csv/input_csv` directory? \r\n\r\n", + "created_at": "2023-10-27T20:10:29Z", + "author": "sronilsson" + }, + { + "body": "Hi again @linab8 - I've played around a little trying to recreate the issue you're seeing. \r\n\r\nIn short, I could recreate something very similar to what you are seeing if the [Outlier settings] part of your SimBA project_config.ini contains multiple different settings for each animal (the section in the screengrab below). I will insert a code to prevent this from happening, and let you know when done. \r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/ee8dc029-31c5-4807-bd7e-7728612eda00)\r\n", + "created_at": "2023-10-27T21:56:33Z", + "author": "sronilsson" + }, + { + "body": "@linab8 \r\n\r\nIf you update simba using `pip install simba-uw-tf-dev --upgrade`, and try again by setting the body-parts to use to correct outliers - do you still see all the body-parts being corrected? \r\n\r\nWhat I did was to insert a potential fix which removes any previous information on which body-parts to use inside the project_config, before inserting the body-parts you set in the menu.", + "created_at": "2023-10-27T22:25:05Z", + "author": "sronilsson" + }, + { + "body": "![snip](https://github.com/sgoldenlab/simba/assets/148640900/dca4b893-7237-4e6b-944b-83d3049be777)\r\n![snip2](https://github.com/sgoldenlab/simba/assets/148640900/9e52a643-3d07-49eb-b8fb-0f1e2ba53f9b)\r\n\r\nThank you for your help. I've updated Simba and am now waiting for the results of outlier correction. Here's what my results looked like yesterday. The first picture is Movement, and the second is Location. @sronilsson ", + "created_at": "2023-10-28T16:38:01Z", + "author": "linab8" + }, + { + "body": "Unofortunately, I'm getting the same results even with the updated Simba. @sronilsson ", + "created_at": "2023-10-28T18:03:37Z", + "author": "linab8" + }, + { + "body": "Got it, thanks for testing @linab8. Could you paste me your `project_config.ini` file here and I can check what is going on? I think you can drag it into the box. You may have to zip it first. ", + "created_at": "2023-10-28T18:15:22Z", + "author": "sronilsson" + }, + { + "body": "[linab_simba.zip](https://github.com/sgoldenlab/simba/files/13196489/linab_simba.zip)\r\nHere's the zipped folder with the config.ini inside. Thank you for still working on this with me!", + "created_at": "2023-10-28T18:25:15Z", + "author": "linab8" + }, + { + "body": "That looks like it should.. \r\n\r\nOne thing that comes to mind is if \"glave\" and \"repa\" are **not** the body-parts you think they are. Say that the \"glave\" columns in your data is actually the data of the left ear, and \"repa\" is the data for your nose. Then SimBA would look for movement outliers using a criterion of 1.5x the mean left ear <-> nose distance which is small.. When you created your body-part configuration in SimBA, and listed all the body-parts, did you do it in the same order as they were annotated in the pose-estimation package? ", + "created_at": "2023-10-28T18:42:54Z", + "author": "sronilsson" + }, + { + "body": "Yes, but I don't have \"left ear\" or \"right ear\", but rather \"beginning of the neck\" and \"end of the neck\" since I don't work on mice, but I do have tip of the nose and tail base as the 1st and 8th point. I used the custom user defined body parts option. @sronilsson ", + "created_at": "2023-10-28T18:57:56Z", + "author": "linab8" + }, + { + "body": "I'll try once more with my 3rd point and 8th point (rather than 1st and 8th), in the case Simba recorded it in the order like it does with mice so it might read the 3d point as the tip of the nose. However, it was clear on my tracking model image that the \"tip of the nose\" is the 1st point. @sronilsson ", + "created_at": "2023-10-28T19:02:08Z", + "author": "linab8" + }, + { + "body": "Perhaps we can go back a few steps, I'd like to figure this one out.... \r\n\r\nCould you share me a very small project to see if I can reproduce your error? Say sharing your project through a gdrive? I know your project is very large, so I am just looking for a single video inside `project_folder/videos`, and a single imported file inside the `project_folder/csv/input_csv` directory, and **no** files needed inside the `project_folder/csv/outlier_corrected_location` or `project_folder/csv/outlier_corrected_movement_location` directories. \r\n\r\nPS. you could try and click `skip outlier correction` and move on from this (but there might be some related errors downstream). ", + "created_at": "2023-10-28T19:02:40Z", + "author": "sronilsson" + }, + { + "body": "Okay, here's the link to the drive. I can create the new small project after another attempt of outlier correction finishes so I can close that project for a moment. @sronilsson \r\nhttps://drive.google.com/drive/folders/1N3EFNkCKtmzxnQ3KklQO6RGImw-mY7q5?usp=sharing\r\nI'd rather not skip outlier correction if possible since it was clear from the \"Location\" correction that some videos needed it.", + "created_at": "2023-10-28T19:11:25Z", + "author": "linab8" + }, + { + "body": "Thank you @linab8 - have to step away from computer but will check later and let you know. ", + "created_at": "2023-10-28T19:18:59Z", + "author": "sronilsson" + }, + { + "body": "Oh, can I also get the `project_folder/logs` folder ? ", + "created_at": "2023-10-28T19:19:38Z", + "author": "sronilsson" + }, + { + "body": "No problem, I have to step away too. I've imported the Logs folder. @sronilsson ", + "created_at": "2023-10-28T19:35:41Z", + "author": "linab8" + }, + { + "body": "One @linab8 ! One more question - when you visualize your data in DLC, how does the tracking look like (without using any filtering)? \r\n\r\nI am asking since I opened the `AGGRESSIVITY_4_11_21_Trial_2_camera1.csv` file. Looking at the first two columns (representing the X and Y coordinate of the first body-part) is largely static for the first several hundred frames, other body-parts show the same static integers for many many frames. \r\n\r\nMy guess is this: A lot of the body-part predictions are missing in your DLC data. I.e., in more than half of the frames, there is at least one and typically more body-part that your DeepLabCut model could not find (and gives it a None or NaN value). You then performed nearest interpolation in SimBA, to get body-part prediction in all frames. Although interpolation can be a solution, the interpolation will suffer when so many body-parts are missing predictions for extended time. Next, SimBA tries to calculate the mean distance between \"glave\" and \"repa\" in the video using these (predominantly) interpolated values. However, most of these are not accurate, and the outlier correction fails. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-10-29T14:48:01Z", + "author": "sronilsson" + }, + { + "body": "There are some problems when visualizing data - some ID swaps exist even in that first video, however, we thought that we'd smooth that with outlier correction in Simba. Because after evaluating network in DLC, I retrained it with additional frames and it showed better results. Do you suggest more training in DLC? I understand what you're saying and it does make sense. I just didn't got the impression that more than half of the frames in my videos have some body part prediction missing. Would better tracking data (less ID swaps or missing body parts) fix what I'm experience in Simba outlier correction? @sronilsson Thank you so much for looking into my project.", + "created_at": "2023-10-29T17:58:49Z", + "author": "linab8" + }, + { + "body": "@linab8 yes, there are a few things in DLC that could potentially mask the performance and make it look better than it actually is, and watch out for these, there is more discussion [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#25-my-pose-estimation-tracking-looks-good-when-i-visualize-it-in-the-pose-estimation-tool-however-after-i-import-it-into-simba-it-doesnt-look-good-anymore-why), I recommend getting good predictions on as much data as possible and more training (from what I have seen)\r\n\r\n(i) DLC has a filtering setting (I don't know if it is used by default or if you used it). This sets all body-part predictions, where the confidence are below a certain user-specified threshold, no \"None\" and hides them. If you have used it, that could explain why you have so many 'None' values that SimBA has to interpolate. I would not recommend using filtering. \r\n\r\n(ii) I have sometimes seen DLC plot either all animals in the same colors, or e.g., the left ear of one animal in the same color as the left ear of animal two. If there is an ID switch, and the left ear of animal one becomes the left ear of animal two, you won't notice because they are the same color and it masks incorrect performance of the tracking model. \r\n\r\n\r\n\r\n", + "created_at": "2023-10-29T18:55:28Z", + "author": "sronilsson" + }, + { + "body": "I'll try some additional training and check that the filtering option is turned off - it might not be. Concerning colours, I used the option of colouring by individual so it's easy to see when ID is switched in some cases. Thank you for everything and I'll comment on this thread when I succeed or if I need additional help!", + "created_at": "2023-10-29T18:59:32Z", + "author": "linab8" + } + ] + }, + { + "title": "I can't extract frames", + "body": "Hello! I finished assigning IDs to multiple animals in Simba, but when I click on \"Extract Frames\" nothing happens. There's nothing going on in the Simba console nor in the command prompt, and the folder in which the frames should be is empty.\r\n\r\n1. I uploaded the videos\r\n2. Uploaded H5 files\r\n3. Assigned IDs\r\n4. Clicked on Extract Frames\r\n5. Nothing happens\r\n\r\n![extract](https://github.com/sgoldenlab/simba/assets/148640900/e0fe4aeb-f429-4d9c-8b1b-aa5dfc102571)\r\n \r\nHowever, I see that it isn't necessary to extract frames any more, but I wasn't sure if I should skip it since the tutorials still mention extracting. \r\n\r\n", + "user": "linab8", + "reaction_cnt": 0, + "created_at": "2023-10-24T16:44:37Z", + "updated_at": "2023-10-24T16:49:54Z", + "author": "linab8", + "comments": [ + { + "body": "Hi @linab8 - there is no need to extract frames! You can proceed without that step", + "created_at": "2023-10-24T16:48:16Z", + "author": "sronilsson" + }, + { + "body": "Thank you! @sronilsson ", + "created_at": "2023-10-24T16:49:54Z", + "author": "linab8" + } + ] + }, + { + "title": "this is a long commit", + "body": "1. i added some destroy to the windows to clean up after an operation was finished\r\n2. i replaced some hard coded string with the enums of the tkinter lib in order to preserve them if and when they will update the package.\r\n3. i changed the default size of the pop ups, because most of them are larger than the original value.\r\n4. some save paths and printing string were incorrect\r\n5. enabled running on multiple video in paralleled\r\n6. added directionality analyst and visualizer for a deepLabCut features and not just between mice's", + "user": "tzukpolinsky", + "reaction_cnt": 0, + "created_at": "2023-10-24T15:50:09Z", + "updated_at": "2023-11-26T23:04:00Z", + "author": "tzukpolinsky", + "comments": [] + }, + { + "title": "Stopped while assigning IDs to two animals", + "body": "Hi! I was assigning identities to two animals in SimBa. After I clicked X for a couple of times (because the frames weren't good enough for me to figure out the IDs), the small screen for assigning identity closed, and I was left with this (picture).\r\nIs there a way to continue assigning IDs because I fear that if I leave the app and load the project, it'll skip this step, as has once happened already so I had to start a new project.\r\n\r\n\r\nSteps to reproduce the behavior:\r\n1. FInished creating project config and importing videos\r\n2. Clicked to Import DLC .H5\r\n3. Assigned ID to 70/253 videos\r\n4. Clicked X a few times in one video\r\n5. Error\r\n\r\n![Capture](https://github.com/sgoldenlab/simba/assets/148640900/4fcacb6c-bbb7-4901-a64d-1b7e76398ce3)\r\n\r\n\r\n", + "user": "linab8", + "reaction_cnt": 0, + "created_at": "2023-10-21T15:40:56Z", + "updated_at": "2023-10-24T16:39:54Z", + "author": "linab8", + "comments": [ + { + "body": "Hi @linab8! SimBA seems to have trouble with one of your files. Would you mind sharing the error you are seeing in the operating system terminal where you launched Simba when this happens either as a screengrab or copy paste it here?", + "created_at": "2023-10-21T15:47:09Z", + "author": "sronilsson" + }, + { + "body": "Hi! Thank you for your answer, here's a screenshot of the terminal. @sronilsson \r\n![error](https://github.com/sgoldenlab/simba/assets/148640900/e63a0a57-833d-40cb-819f-2dd866e1c701)\r\n", + "created_at": "2023-10-22T14:26:59Z", + "author": "linab8" + }, + { + "body": "Thanks @linab8! I can tell you what I think is happening, and I will insert something in the code that presents a interpretable error message and prevent this from happening. \r\n\r\nEach time you click to go `X` to go to another frame, SimBA goes 50 frames forward in the video. You click and eventually you get to frame `15050`. Next, SimBA tries to pull up your DeepLabCut tracking data for frame `15050` to display it on the screen. However, I suspect that the DeepLabCut tracking data for this video has data for fewer than `15050` frames. So SimBA can't find the data for frame number `15050`, and it breaks down.\r\n\r\n", + "created_at": "2023-10-22T15:32:57Z", + "author": "sronilsson" + }, + { + "body": "Thank you for the info, it makes sense now! Do you know if there's a way to continue assigning IDs from this video onward or the only option is to start everything from scratch? @sronilsson ", + "created_at": "2023-10-22T16:03:58Z", + "author": "linab8" + }, + { + "body": "One possible way:\r\n\r\nWhen you import data, you select a folder that contains DeepLabCut tracking data in h5 format. SimBA will search that folder for DeepLabCut .h5 files and loop over each one it finds and import it into your SimBA project. \r\n\r\nIf you remove the h5 files from that selected folder which you have already imported, then SimBA shouldn't find them and should not try to import them again. ", + "created_at": "2023-10-22T16:10:26Z", + "author": "sronilsson" + }, + { + "body": "Thank you! It worked! @sronilsson ", + "created_at": "2023-10-24T16:39:39Z", + "author": "linab8" + } + ] + }, + { + "title": "Distance visualisation", + "body": "Hello, \r\n\r\nI'm using SimBA 1.75.3 on Windows 10, from ma-DLC data.\r\n\r\nI have some doubts about the accuracy of the quantification of distances.\r\n\r\n1) Is there a way to visualise live the quantification of distance? When using thee \"visualize ROI\" function, I do not see the distance, but only thee different ROIs. \r\n\r\n2) For now I'm using the smoothing function to reeducxe noise/signal ratio. Do you suggest other tools / strategy? \r\n\r\nThank you \r\n", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2023-10-20T15:39:47Z", + "updated_at": "2023-10-23T07:56:47Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli!\r\n\r\n(i) there is a tool for visualizing ROI features described [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial.md#part-5-visualizing-roi-features) which should include distances to ROI centers. It should show, for each frame, how far the animals are from the center of each ROI. Let me know how it runs on your end. \r\n\r\n(ii) It kind of depends on the noise. The very best way (but also the most time-consuming) is to improve the tracking model, and annotate frames where the model gets the body-parts wrong. \r\n", + "created_at": "2023-10-20T16:04:47Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson thank you for the quick answer. I'm now trying the ROI feature option. I keep you update about the outcome. \r\n\r\nOther point: I'm facing issue when trying to run \"visualize distance\" as reported here: \r\n\r\n![Screenshot 2023-10-20 alle 19 17 13](https://github.com/sgoldenlab/simba/assets/66886884/ee7a4ee4-89e0-450b-8647-fead97c2a527)\r\n\r\nCan you identify the error? ", + "created_at": "2023-10-20T16:17:47Z", + "author": "DorianBattivelli" + }, + { + "body": "Yep thanks for reporting this error @DorianBattivelli I appreciate it - I will see if I can replicate. ", + "created_at": "2023-10-20T16:21:33Z", + "author": "sronilsson" + }, + { + "body": "Ok, I fixed the last bug: I had to select a resolution. \r\nStill I'm facing a problem when selecting only \"create last frame\" SimBA prompt returns that corresponding plot is saved in \"line plot\" folder, but no file is there. Saving works only when I select \"create frames\" . Is that expected? \r\n", + "created_at": "2023-10-20T16:21:35Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - which version of SimBA are you running? There shouldn't be a \"As input\" option and I can't see it in the latest version. ", + "created_at": "2023-10-20T16:29:52Z", + "author": "sronilsson" + }, + { + "body": "No - `last frame` should create a single image called something like `MyVideoName_final_img.png` located in `/project_folder/frames/output/line_plot`. ", + "created_at": "2023-10-20T16:31:46Z", + "author": "sronilsson" + }, + { + "body": "Ah hang on, I could replicate the error when the directory `/project_folder/frames/output/line_plot` doesn't exist and trying to write the file there, let me see if I can insert a fix. ", + "created_at": "2023-10-20T16:33:10Z", + "author": "sronilsson" + }, + { + "body": "I'm using SimBA 1.75.3 on Windows 10", + "created_at": "2023-10-20T16:33:11Z", + "author": "DorianBattivelli" + }, + { + "body": "... or it turns out the file isn't saved when \"multi-processed\" is ticked. ", + "created_at": "2023-10-20T16:35:04Z", + "author": "sronilsson" + }, + { + "body": "Yes in my case I ticked \"multi-processed\"", + "created_at": "2023-10-20T16:35:45Z", + "author": "DorianBattivelli" + }, + { + "body": "Yes, it turns out I forgot to pass `save_img=True`, when multiprocessing. It only stored the final image in memory..\r\n\r\n\r\nHow does it look in version 1.75.4? Thanks again.\r\n", + "created_at": "2023-10-20T16:41:41Z", + "author": "sronilsson" + }, + { + "body": "But there should not be any \"As input\" option which is a little confusing that you see that... \r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/8d83ca05-f9d6-488e-81f8-93c200fc1938)\r\n", + "created_at": "2023-10-20T16:43:44Z", + "author": "sronilsson" + }, + { + "body": "It works! No \"as input\" in the list, and file saved with \"create last frame\" even with multi-processed ticked, thank you :)", + "created_at": "2023-10-20T17:09:04Z", + "author": "DorianBattivelli" + }, + { + "body": "Tha is for letting me know! Also let me know how the ROI features runs or if there are any issues. ", + "created_at": "2023-10-20T17:12:35Z", + "author": "sronilsson" + }, + { + "body": "O, I realized that the issue for saving persists when I try to generate the last frame for \"all the videos\" instead of once at a time", + "created_at": "2023-10-20T18:10:02Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah, thank you! Let's fix that.\r\n", + "created_at": "2023-10-20T18:17:42Z", + "author": "sronilsson" + }, + { + "body": "It should be fixed in 1.75.5 but please let me know, and please let me know if you see any other bugs. ", + "created_at": "2023-10-20T18:35:51Z", + "author": "sronilsson" + }, + { + "body": "Thanks for the fix, I'll try it soon. Meantime, another error/issue I face:\r\nI tried to visualize features for one video, but the prompt returns:\r\n\r\n\"Screenshot\r\n\r\nAnd indeed, on this specific video, Up-HT is not present, but it is for other videos. Is that mandatory to have exactly the same ROIs for all videos of this project to be able to run this command? \r\n", + "created_at": "2023-10-20T18:57:12Z", + "author": "DorianBattivelli" + }, + { + "body": "It appears so! I could remove the requirement. But - the reason it is required is that the function mainly targets people who want to create features from ROIs for building classifiers. In this scenario, it is not possible to create some ROI-based features for some videos, and skip them for other videos. \r\n\r\n\r\nI can remove it and print a warning instead.\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-10-20T19:02:13Z", + "author": "sronilsson" + }, + { + "body": "Yes it would be nice ;) thank you ", + "created_at": "2023-10-20T19:06:22Z", + "author": "DorianBattivelli" + }, + { + "body": "Also I was wondering, when ploting paths, would that be possible to set an option allowing to choose a frame from the video as background instead of a plain color? ", + "created_at": "2023-10-20T19:41:10Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah thats a good point. Yes I can insert that. I have to do the ROI feature plotting first. It will take a little longer because I realized you can't choose which body-parts to use in the pop-up menu, it says you have to analyze body-parts first which is not particularly good...", + "created_at": "2023-10-20T19:43:39Z", + "author": "sronilsson" + }, + { + "body": "Not sure to understand, in the path-plot menu, I can choose the BP to plote\r\n\r\n\"Screenshot\r\n\r\n", + "created_at": "2023-10-20T19:49:13Z", + "author": "DorianBattivelli" + }, + { + "body": "Oh no, I mean the `ROI feature plotting`, where you visualize the distances between ROIs and the animals. ", + "created_at": "2023-10-20T20:00:16Z", + "author": "sronilsson" + }, + { + "body": "Ah I see, instead I suggested frame background for the paths visualisation plots, would this be possible?", + "created_at": "2023-10-20T20:06:08Z", + "author": "DorianBattivelli" + }, + { + "body": "yes, that's possible we should include it, I let you know when done, but I can't get to it today. \r\n", + "created_at": "2023-10-20T20:07:00Z", + "author": "sronilsson" + }, + { + "body": "Amazing thank you !", + "created_at": "2023-10-20T20:08:41Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - before I go on, is this the sort of thing you are looking for:\r\n\r\nYou can select to use `video` as background. If you select `video`, you also get the option to set the opacity of the background (set it to a lower value if you want to make it a little less salient). \r\n\r\n\"image\"\r\n\r\nRight now it takes the first frame of the video as default, do you need an option to take a different frame? \r\n\r\nThe output looks something like this (I have a troubleshooting project with your name, but I have most likely I mixed up which tracking files belong to which videos).\r\n\r\n![HybCD1-B2-D6-Urine_final_frame](https://github.com/sgoldenlab/simba/assets/34761092/7cdd9e6f-17d5-461b-980b-016a24b6dcf2)\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-10-21T14:26:55Z", + "author": "sronilsson" + }, + { + "body": "... or is that you want the entire video (updating frames) as background ? ", + "created_at": "2023-10-21T14:39:36Z", + "author": "sronilsson" + }, + { + "body": "> @DorianBattivelli - before I go on, is this the sort of thing you are looking for:\n> \n> \n> \n> You can select to use `video` as background. If you select `video`, you also get the option to set the opacity of the background (set it to a lower value if you want to make it a little less salient). \n> \n> \n> \n> \"image\"\n> \n> \n> \n> Right now it takes the first frame of the video as default, do you need an option to take a different frame? \n> \n> \n> \n> The output looks something like this (I have a troubleshooting project with your name, but I have most likely I mixed up which tracking files belong to which videos).\n> \n> \n> \n> ![HybCD1-B2-D6-Urine_final_frame](https://github.com/sgoldenlab/simba/assets/34761092/7cdd9e6f-17d5-461b-980b-016a24b6dcf2)\n> \n> \n> \n> \n> \n> \n> \n> \n> \n> \n> \n> \n> \n> \nThis is the solution I need, if it's also possible to select a precise frame it's perfect ", + "created_at": "2023-10-21T16:09:18Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "Validate Model on Single Video selection is bugged in current version", + "body": "**Describe the bug**\r\nWhen I click Browse File next to Select Model File on the [Run Machine Model] page I get a weird error instead of loading the file. \r\n\r\nI rolled back to simba-uw-tf-dev==1.73.9 and selection works fine and the issue is resolved.\r\n\r\n\r\n**Error**\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\RFK\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\RFK\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\ui\\tkinter_functions.py\", line 130, in setFilePath\r\n file_selected = askopenfilename(title=self.title, parent=self.parent, filetypes=self.file_type)\r\n File \"C:\\Users\\RFK\\anaconda3\\envs\\simba\\lib\\tkinter\\filedialog.py\", line 375, in askopenfilename\r\n return Open(**options).show()\r\n File \"C:\\Users\\RFK\\anaconda3\\envs\\simba\\lib\\tkinter\\commondialog.py\", line 35, in show\r\n self._fixoptions()\r\n File \"C:\\Users\\RFK\\anaconda3\\envs\\simba\\lib\\tkinter\\filedialog.py\", line 302, in _fixoptions\r\n self.options[\"filetypes\"] = tuple(self.options[\"filetypes\"])\r\nTypeError: 'NoneType' object is not iterable\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Win 10, 64-bit\r\n - Python Version 3.6.13\r\n - Are you using anaconda? Yes\r\n\r\nThank you!\r\nRyan\r\n", + "user": "rfkova", + "reaction_cnt": 0, + "created_at": "2023-10-20T06:00:47Z", + "updated_at": "2024-04-04T17:02:06Z", + "author": "rfkova", + "comments": [ + { + "body": "A separate, but possibly related issue, in 1.73.9, I was able to run a model on a features_extracted file, then open the Interactive Probability Plot and scan around, but when I tried to CREATE VALIDATION VIDEO I get the following error... \r\n\r\n> SIMBA FEATURE NUMBER MISMATCH ERROR: Mismatch in the number of features in input file R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/camera_stuff/SIMBA/dam_nest-c-only/project_folder/csv/features_extracted/LBNF2_Ctrl_P04_4_2021-03-18_19-49-46c.csv, and what is expected by the model dam_in_nest_attentive. The model expects 82 features. The data contains 97 features.\r\n\r\nThis strikes me as odd, because the could be run on the same file to generate the probabilities... I noticed the model generated a validation folder with a new file in it... this file produced a similar error.\r\n\r\nHelp!\r\nRyan\r\n", + "created_at": "2023-10-20T06:48:24Z", + "author": "rfkova" + }, + { + "body": "Thank you @rfkova ! The first issue comes from me being a bit gung-ho yesterday and updating without testing. Many thanks for reporting. ", + "created_at": "2023-10-20T12:40:11Z", + "author": "sronilsson" + }, + { + "body": "Could you update SimBA again (to version 1.75.3) and let me know the error you see in the terminal when trying to create the validation video? \r\n\r\nDo you see the same error for all of the files inside the `features_extracted` directory? Or, do all of your files inside the `features_extracted` directory contain the same number of columns?\r\n\r\n", + "created_at": "2023-10-20T12:49:29Z", + "author": "sronilsson" + }, + { + "body": "I have tried two videos previously. They are all using the standard feature extractor and so yes, have the same number of columns. I haven't verified for EVERY file, but those I have tried are the same and produce the same error and I spot checked a few others, so there is mounting evidence, at least, to suggest the files are all the same, haha.\r\n\r\n**An aside**\r\nAlso, I do not know if it is a bug, but I have to change the .sav file of the model to perfectly match the behavior markers- the model saves with an underscore_number that I had to remove... otherwise I get an error like this:\r\nSIMBA VALUE ERROR: The classifier dam_in_nest_attentive_1 is not a classifier in the SimBA project: ['dam_in_nest', 'dam_in_nest_attentive', 'dam_in_nest_active'] 🚨\r\n\r\n**Back to it!**\r\nOk! Different problem now. I still get the above error if I don't change the .sav file --> I CAN run the model on my extracted feature file, but now when I try to open the INTERACTIVE PROBABILITY PLOT a blank GUI window opens and I get this error:\r\n`SIMBA VIDEO FILE ERROR: Video LBNF2_Ctrl_P04_4_2021-03-18_19-49-46c either does not exist or has fps of 0 (full error video path: R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/camera_stuff/SIMBA\\dam_nest-c-only\\project_folder\\videos\\LBNF2_Ctrl_P04_4_2021-03-18_19-49-46c.mp4). 🚨`\r\n ... so I opened the same video file afterwards with Label Behavior portion of the GUI and it is totally fine... and again, this part worked in 1.73.9. I do notice the slashes are flipping out, but idk if that's normal. \r\n\r\nLet me know if there is anything I can upload for testing :D\r\nRyan\r\n\r\nPS\r\n@sronilsson, this is a symlink... idk if the labeling code and the INTERACTIVE PROBABILITY PLOT handle those differently or not...\r\n", + "created_at": "2023-10-20T13:52:25Z", + "author": "rfkova" + }, + { + "body": "Thanks @rfkova - I will test it, using symlinks, but bogged down today with a deadline, I will get back to you asap. If there is a project I can use on a gdrive you could share that would be good (removing any unnessery big files) in the case I can reproduce it using my own projects. ", + "created_at": "2023-10-20T14:00:34Z", + "author": "sronilsson" + }, + { + "body": "Thanks for the heads up about timing. Good luck with your deadline and best wishes for health and success!\r\n\r\nI will upload something for you today and be back with the a link... However, the model files are kind of large themselves and the videos aren't tiny... I'll trim what I can to provide a workable example including the offending video. ", + "created_at": "2023-10-20T14:05:43Z", + "author": "rfkova" + }, + { + "body": "Cheers - I did try it with symlinks on a small testing project I have and didn't get any errors on interactive plot. If you have the error traceback from the operating system terminal used to launch simba that would help a lot too to see what functions are fed something they don't like. ", + "created_at": "2023-10-20T14:46:56Z", + "author": "sronilsson" + }, + { + "body": "Ohhhhhkay. Not sure what was happening but in the course of troubleshooting, the extracted features for the video I was trying got misplaced - INTERACTIVE PROBABILITY PLOT is back working, but same error with the validation video. Here I:\r\n\r\n1. re-ran the model to make sure all the files were in place\r\n2. Checked the probability plot\r\n3. Tried to create validation video - which produced an error like above.\r\n\r\n**Command Line**\r\n```simba\r\n[Parallel(n_jobs=16)]: Using backend ThreadingBackend with 16 concurrent workers.\r\n[Parallel(n_jobs=16)]: Done 18 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=16)]: Done 168 tasks | elapsed: 0.2s\r\n[Parallel(n_jobs=16)]: Done 418 tasks | elapsed: 0.6s\r\n[Parallel(n_jobs=16)]: Done 768 tasks | elapsed: 1.3s\r\n[Parallel(n_jobs=16)]: Done 1218 tasks | elapsed: 2.0s\r\n[Parallel(n_jobs=16)]: Done 1768 tasks | elapsed: 2.9s\r\n[Parallel(n_jobs=16)]: Done 2000 out of 2000 | elapsed: 3.2s finished\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 388, in \r\n button_generateplot = Button(label_model_validation, text=\"INTERACTIVE PROBABILITY PLOT\", fg='blue', command= lambda: self.launch_interactive_plot())\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 642, in launch_interactive_plot\r\n interactive_grapher.run()\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\plotting\\interactive_probability_grapher.py\", line 97, in run\r\n fig.canvas.draw()\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\matplotlib\\backends\\backend_tkagg.py\", line 10, in draw\r\n _backend_tk.blit(self._tkphoto, self.renderer._renderer, (0, 1, 2, 3))\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\matplotlib\\backends\\_backend_tk.py\", line 88, in blit\r\n photoimage.blank()\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 3548, in blank\r\n self.tk.call(self.name, 'blank')\r\n_tkinter.TclError: invalid command name \"pyimage398\"\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\mixins\\pop_up_mixin.py\", line 164, in \r\n self.run_btn = Button(self.run_frm, text=title, fg='blue', command=lambda: run_function())\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\ui\\pop_ups\\validation_plot_pop_up.py\", line 111, in run\r\n validation_video_creator.run()\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\plotting\\single_run_model_validation_video_mp.py\", line 146, in run\r\n self.__run_clf()\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\plotting\\single_run_model_validation_video_mp.py\", line 86, in __run_clf\r\n self.in_df[self.prob_col_name] = self.clf_predict_proba(clf=self.clf, x_df=self.in_df, model_name=self.clf_name, data_path=self.feature_file_path)\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\mixins\\train_model_mixin.py\", line 996, in clf_predict_proba\r\n raise FeatureNumberMismatchError(f'Mismatch in the number of features in input file {data_path}, and what is expected by the model {model_name}. The model expects {clf_n_features} features. The data contains {len(x_df.columns)} features.', source=self.__class__.__name__)\r\nsimba.utils.errors.FeatureNumberMismatchError: SIMBA FEATURE NUMBER MISMATCH ERROR: Mismatch in the number of features in input file R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/camera_stuff/SIMBA/dam_nest-c-only/project_folder/csv/features_extracted/LBNF2_Ctrl_P04_4_2021-03-18_19-49-46c.csv, and what is expected by the model dam_in_nest_active. The model expects 82 features. The data contains 97 features.\r\n```\r\n\r\n**GUI log:**\r\n`SIMBA COMPLETE: Loaded project R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/camera_stuff/SIMBA/dam_nest-c-only/project_folder/project_config.ini 🚀\r\nSIMBA COMPLETE: Loaded project R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/camera_stuff/SIMBA/dam_nest-c-only - Copy (2)/project_folder/project_config.ini 🚀\r\nSIMBA COMPLETE: Loaded project R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/camera_stuff/SIMBA/dam_nest-c-only/project_folder/project_config.ini 🚀\r\nPose-estimation body part setting for feature extraction: 2 animals user_defined body-parts\r\nExtracting features from 1 file(s)...\r\nExtracting features for video 1/1...\r\nCalculating euclidean distances...\r\nCalculating movements of all body-parts...\r\nCalculating rolling windows data: distances between body-parts...\r\nCalculating rolling windows data: animal movements...\r\nFeature extraction complete for video LBNF2_Ctrl_P04_4_2021-03-18_19-49-46c (elapsed time: 3.6791s)\r\nSIMBA COMPLETE: Feature extraction complete for 1 video(s). Results are saved inside the project_folder/csv/features_extracted directory (elapsed time: 3.7578s) 🚀\r\nSIMBA COMPLETE: Validation predictions generated for \"LBNF2_Ctrl_P04_4_2021-03-18_19-49-46c\" within the project_folder/csv/validation directory (elapsed time: 81.1842s) 🚀\r\nClick on \"Interactive probability plot\" to inspect classifier probability thresholds. If satisfactory proceed to specify threshold and minimum bout length and click on \"Validate\" to create video.\r\nSIMBA FEATURE NUMBER MISMATCH ERROR: Mismatch in the number of features in input file R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/camera_stuff/SIMBA/dam_nest-c-only/project_folder/csv/features_extracted/LBNF2_Ctrl_P04_4_2021-03-18_19-49-46c.csv, and what is expected by the model dam_in_nest_active. The model expects 82 features. The data contains 97 features. 🚨`\r\n\r\n**Link to Drive file is incoming, upload is taking forever**", + "created_at": "2023-10-20T16:57:36Z", + "author": "rfkova" + }, + { + "body": "@sronilsson Please hold. I created the validation video without multiprocessing and with no minimum bout length without an error but with a warning that \"Some frames appear to be missing in the video vs the data file\"... but I can't open the file... let me explore and get back to you", + "created_at": "2023-10-20T17:29:16Z", + "author": "rfkova" + }, + { + "body": "I was able to get a corrupted video (read unplayable and like 6kb in size) so long as I don't use multiprocessing. Multiprocessing is causing the specific bug I showed earlier with the 97 features.\r\n\r\nIf I disable Show Pose in Tracking options, it looks like I can get a video out. \r\n\r\nI would really like to be able to get the pose estimation on there... kind of an aside, but is there an easy way in the GUI to visualize the poses on a video without the probability (i.e. prior to running a model) out of it for the purpose to evaluate the outlier correction/interpolation.\r\n\r\n[Here's the drive link](https://drive.google.com/drive/folders/1Fk8ekO0E4vgjbtAMcbCYiq5NsFtFfSSG?usp=share_link). I guess the symlinks aren't transferrable, so the file structure is only so useful. Let me know if I left anything you might need out!", + "created_at": "2023-10-20T17:54:57Z", + "author": "rfkova" + }, + { + "body": "@rfkova Yes, I found the error. It comes from when SimBA is trying to calculate how many colors will be needed, in how many different palettes, to draw the images. To do this it gets all the unique body-parts and divides it with the number of animals:\r\n\r\nYou would get (rounded up for \"false\" safety) 5 / 2 = 3 colors per color palette and animal. But in your use-case this won't be enough: you have an animal with 4 body-parts and there is no 4th color in the animals pallette so it breaks. I will insert some better logic. The error is kind of badly hidden in a try and except statement also so its tricky to troubleshoot. The try and excpet statement is there because sometimes, users have a mismatch between the frames they have in the video, and the number of frames in the pose-estimation data, but they don't want it to error out, they want to see the video up until the point it errored out. And in your case it errors out on frame `0`. \r\n\r\n\r\nEDIT: Thanks for reporting this btw. It is very helpful. The code runs, I'm just going to make sure your video is created using multiptocessing and I can watch it before pushing an update. ", + "created_at": "2023-10-22T18:32:51Z", + "author": "sronilsson" + }, + { + "body": "Let me know how it goes in version `1.75.6` - you have tons of frames so I recommend using multiprocessing, and maybe hide the animal names as it looks a little weird when the nest is called `Animal_2`. ", + "created_at": "2023-10-22T19:40:08Z", + "author": "sronilsson" + }, + { + "body": "Hey! It's working! Thank you so much :D\r\n\r\nOne outstanding questions not related to the errors: \r\nIs there an easy way in the GUI to visualize the poses on a video without the probability (i.e. prior to running a model) out of it for the purpose to evaluate the outlier correction/interpolation.\r\n", + "created_at": "2023-10-23T15:11:01Z", + "author": "rfkova" + }, + { + "body": "Yes there is a tool through the `Tools` drop-down menu called `Visualize pose-estimation in folder` documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_tools.md#visualize-pose-estimation-in-folder) let me know if there are any problems though. \r\n\r\n\r\nPS. [THIS](https://github.com/sgoldenlab/simba/blob/f9a528627ab07457a8eec4f144ecbdb12ead89fc/simba/plotting/pose_plotter_mp.py#L63C7-L63C18) is the code that the GUI calls, when using more than 1 core. ", + "created_at": "2023-10-23T15:27:01Z", + "author": "sronilsson" + }, + { + "body": "It worked for the first video in the pile but I got this error in the terminal before it got to the next video:\r\n\r\n`Exception in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\ui\\pop_ups\\visualize_pose_in_dir_pop_up.py\", line 31, in \r\n run_btn = Button(self.main_frm, text='VISUALIZE POSE', font=Formats.LABELFRAME_HEADER_FORMAT.value, fg='blue', command= lambda: self.run())\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\ui\\pop_ups\\visualize_pose_in_dir_pop_up.py\", line 68, in run\r\n pose_plotter.run()\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\plotting\\pose_plotter_mp.py\", line 115, in run\r\n self.config.animal_bp_dict[animal]['colors'] = tuple(self.config.animal_bp_dict[animal]['colors'][0])\r\nTypeError: 'numpy.float64' object is not iterable`", + "created_at": "2023-10-23T16:15:49Z", + "author": "rfkova" + }, + { + "body": "Cheers, lets take a look!", + "created_at": "2023-10-23T16:20:47Z", + "author": "sronilsson" + }, + { + "body": "I recreated the error and inserted a fix in `1.75.7` but let me know if it fails on your end. ", + "created_at": "2023-10-23T17:19:58Z", + "author": "sronilsson" + }, + { + "body": "Hm, it got through two this time. I see the colors are changing on the body parts from video to video. Not sure if that is intended. Here's my command line error after 2 successful videos:\r\n\r\n`Exception in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\ui\\pop_ups\\visualize_pose_in_dir_pop_up.py\", line 31, in \r\n run_btn = Button(self.main_frm, text='VISUALIZE POSE', font=Formats.LABELFRAME_HEADER_FORMAT.value, fg='blue', command= lambda: self.run())\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\ui\\pop_ups\\visualize_pose_in_dir_pop_up.py\", line 68, in run\r\n pose_plotter.run()\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\plotting\\pose_plotter_mp.py\", line 115, in run\r\n self.config.animal_bp_dict[animal]['colors'] = self.config.animal_bp_dict[animal]['colors'][0]\r\nIndexError: invalid index to scalar variable.`", + "created_at": "2023-10-23T18:40:34Z", + "author": "rfkova" + }, + { + "body": "Bugs bugs, sorry about this I should take better care, thanks for sticking with me!", + "created_at": "2023-10-23T18:49:44Z", + "author": "sronilsson" + }, + { + "body": "I'm honestly glowing with the experience. I know the bugs aren't ideal, but like, this is all coming together. Happy to help after all we've been through tailoring things to my application, haha!", + "created_at": "2023-10-23T19:06:42Z", + "author": "rfkova" + }, + { + "body": "Thank you :) I have your project so I will test on that instead as I cant replicate this one on my test projects.", + "created_at": "2023-10-23T19:44:26Z", + "author": "sronilsson" + }, + { + "body": "When you get a chance, how does it look in version 1.75.8? \r\n\r\n", + "created_at": "2023-10-23T19:50:27Z", + "author": "sronilsson" + }, + { + "body": "Works like a charm!!!!!!! Thank you so much :D", + "created_at": "2023-10-23T22:04:50Z", + "author": "rfkova" + }, + { + "body": "Hello @sronilsson!\r\n\r\nIt has been some time :) - since my issue relates directly to this workflow I'm adding on here once again (lmk if this isn't your favored policy).\r\n\r\nI'm adapting this workflow to another set of data that is less complicated but it seemed easiest to adapt this than figure out a separate workflow for what I need from command line and I ran into some issues that I don't understand. \r\n\r\n1. This should be easy - when I run OutlierCorrecterLocationAdvanced I don't get a csv out I get a file that has csv appended to the filename without a '.' ... I suspect f\"{self.file_type} in 252 of OutlierCorrecterLocationAdvanced should be like [\".\" + self.file_type] from earlier?\r\n2. I can get through my script import_h5_shuttlebox_avoidance.py (see below for code in Drive folder) which imports videos, h5 pose estimation, runs movement outlier correction (if the above is fixed, also location), and interpolation but then I get an error about symlink/video accessibility when I run smoothing... which is confusing since the prior steps all do the same to the same file.\r\n\r\nI can actually use the GUI for this project which I will switch to, but I really would like to understand why I got that error so it won't haunt me later. Here is the text of the error from the command line:\r\n\r\n```\r\nsmoother.run()\r\nSmoothing data in video Box1_14Jun2022_16-10-21...\r\nwarning: Error opening file (/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:901)\r\nwarning: R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA\\shuttlebox_avoidance\\project_folder\\videos\\Box1_14Jun2022_16-10-21.mp4 (/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:902)\r\nackages\\simba\\data_processors\\interpolation_smoothing.py\", line 680, in run\r\n video_meta_data = get_video_meta_data(video_path=video_path)\r\n File \"C:\\Users\\rfk416\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\utils\\read_write.py\", line 447, in get_video_meta_data\r\n source=get_video_meta_data.__name__,\r\nsimba.utils.errors.InvalidVideoFileError: SIMBA VIDEO FILE ERROR: Video Box1_14Jun2022_16-10-21 either does not exist or has fps of 0 (full error video path: R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA\\shuttlebox_avoidance\\project_folder\\videos\\Box1_14Jun2022_16-10-21.mp4).>>> SMOOTHING_SETTINGS\r\n{'Animal_1': {'method': 'Savitzky Golay', 'time_window': 100}}\r\n```\r\n\r\n[Here is a link to a Drive folder](https://drive.google.com/drive/folders/19_u7IxoFC2baqXTorYChp7EIUWOIOmuL?usp=drive_link) with the video, h5 file, full cmd line text from the run of the code that made the error, and the offending project file. I tried this with the previous version of simba that was working and also an --upgrade version and both have the same issues. \r\n\r\nAnything in my code after the smooth.run() is really my problem... I think it might be partially finished/working at the moment but that's where I'm stuck.\r\n\r\nThanks so much! If other things are pressing this is only moderate priority for me since I do have a path forward.\r\nBest,\r\nRyan", + "created_at": "2024-03-27T16:24:40Z", + "author": "rfkova" + }, + { + "body": "@sronilsson oh, interesting, actually using the GUI also produces the symlink error... This wasn't an issue for me previously and I'm not sure how to proceed. For now I'll skip smoothing, but I would like to have that feature if possible... that said it may be difficult to troubleshoot since my files are on a server... however, the way that the interpolation runs and the smoothing doesn't seems odd.\r\n\r\nHere's the log text from the GUI run:\r\n`2024-03-27T12:02:59Z|SimbaProjectPopUp.stdout_success||complete||Loaded project R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA/shuttlebox_avoidance/project_folder/project_config.ini\r\n2024-03-27T12:04:41Z|.PermissionError||error||SIMBA PERMISSION ERROR: Symbolic link privilege not held. Try running SimBA in terminal opened in admin mode\r\n2024-03-27T12:05:37Z|SimbaProjectPopUp.stdout_success||complete||Loaded project R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA/shuttlebox_avoidance/project_folder/project_config.ini\r\n2024-03-27T12:06:03Z|copy_multiple_videos_to_project.stdout_success||complete||148 videos copied to project.\r\n2024-03-27T12:07:08Z|SLEAPImporterH5||CLASS_INIT||smoothing_settings: {'Method': 'Savitzky Golay', 'Parameters': {'Time_window': '100'}}, interpolation_settings: Animal(s): Quadratic, id_lst: ['Animal_1'], data_folder: R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA/h5_file_repository/Awaiting Import, config_path: R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA/shuttlebox_avoidance/project_folder/project_config.ini\r\n2024-03-27T12:07:14Z|Interpolate||CLASS_INIT||initial_import_multi_index: True, method: Animal(s): Quadratic, config_path: R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA/shuttlebox_avoidance/project_folder/project_config.ini, input_path: R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA\\shuttlebox_avoidance\\project_folder\\csv\\input_csv\\Box1_06Jun2022_12-32-46.csv\r\n2024-03-27T12:07:17Z|Interpolate.stdout_success||complete||1 data file(s) interpolated)\r\n2024-03-27T12:07:17Z|Smooth||CLASS_INIT||input_path: R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA\\shuttlebox_avoidance\\project_folder\\csv\\input_csv\\Box1_06Jun2022_12-32-46.csv, time_window: 100, smoothing_method: Savitzky Golay, initial_import_multi_index: True\r\n2024-03-27T12:07:20Z|get_video_meta_data.InvalidVideoFileError||error||SIMBA VIDEO FILE ERROR: Video Box1_06Jun2022_12-32-46 either does not exist or has fps of 0 (full error video path: R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA\\shuttlebox_avoidance\\project_folder\\videos\\Box1_06Jun2022_12-32-46.mp4).\r\n`", + "created_at": "2024-03-27T17:11:15Z", + "author": "rfkova" + }, + { + "body": "Hey @rfkova! \r\n\r\nA little background to see if we can fix it. \r\n\r\nWhen performing smoothing and providing a time window, we need to know the FPS of the video so we know how many frames represent a 100 milliseconds so we can smooth the data according to your settings. One possibility is to read the FPS from the appropriate row in the `project_folder/logs/video_info.csv` file, which SimBA often does, but in some cases, like this one, we can't do that - because we are importing the video and we can't assume that Box1_06Jun2022_12-32-46 exist in the `project_folder/logs/video_info.csv` as yet. \r\n\r\nAlternative is to get the FPS from reading the meta data from the actual video file, which we make the assumption to be in the videos directory of the SimBa project, at location `project_folder\\videos\\Box1_06Jun2022_12-32-46.mp4`. [THIS](https://github.com/sgoldenlab/simba/blob/500a0310e17231566c1b94434a56300415a51e63/simba/utils/read_write.py#L418C5-L418C24) is the function which that path passes through. However, sometimes this file doesn't exist, or is corrupted, and then we have nowhere to turn to get the FPS, and this error comes up. \r\n\r\nJust to check, does the `R:/Basic_Sciences/Phys/Lerner_Lab_tnl2633/Ryan/Gaby_SIMBA\\shuttlebox_avoidance\\project_folder\\videos\\Box1_06Jun2022_12-32-46.mp4` file exist?\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2024-03-27T17:39:49Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson,\r\n\r\nWell, yes, a symlink exists for project_folder\\videos\\Box1_06Jun2022_12-32-46.mp4 but not the video (I wasn't able to upload the symlink in the Drive folder provided). The way this is set up, the video will be imported first, so in this case we _can_ assume the info will be in project_folder/logs/video_info.csv if that helps. \r\n\r\nThe way my code (import_h5_shuttlebox_avoidance.py) is set up the video is imported first which I kind of thought was a prerequisite for smoothing but you imply may not be. The video is indeed at the location indicated by the symlink, if it wasn't the symlink wouldn't be created.\r\n\r\nLet me know if there is anything else I should check or try!\r\nBest,\r\nRyan", + "created_at": "2024-03-27T18:13:10Z", + "author": "rfkova" + }, + { + "body": "@sronilsson,\r\n\r\nJust tested and it works if I import the videos directly. Would be nice to use symlinks so I don't waste a ton of time moving/copying videos... this is more relevant to another project with like 1000 videos. This one only has 148 so it wouldn't be the worst, but would like symlinks to work too!", + "created_at": "2024-03-27T18:32:46Z", + "author": "rfkova" + }, + { + "body": "@rfkova Yeah of course, definitely this should be fixed - I'll see if I can recreate it with symlinks, but I can only do it locally 😬 I don't have a server available to me so can't completely try to recreate - could there be any permission errors that fail silently, if you run the conda environment in admin mode, you get the same error?", + "created_at": "2024-03-27T18:45:14Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Yeah, I had errors earlier in the process if I didn't run in admin mode so I am now... I also had to run this in my terminal to get admin-terminal to see my server folders too for some reason at earlier parts of the process (importing videos and generating the symlinks I think): net use R: \\\\[fsmresfiles.fsm.northwestern.edu](http://fsmresfiles.fsm.northwestern.edu/)\\fsmresfiles /savecred /p:yes\r\n\r\nAfter I found that fix everything worked before... but not in this new project... it's possible I'm doing something wrong but the way everything else seems to work has me thinking it might not be me.\r\n\r\nI'm skeptical it's a permission issue, but it isn't impossible... if you can think of any other way to check that lmk...", + "created_at": "2024-03-27T18:49:06Z", + "author": "rfkova" + } + ] + }, + { + "title": "SIMBA VIDEO PARAMETERS FILE ERROR", + "body": "Hi! \r\n\r\nI am trying to to the ROI analysis with simBA (1.74.3). \r\nI refined all the ROIs and moved to the step \"analyze RO data: aggregates\". Here I selected # of animals (1) and the body part (nose). I left the probability threshold at 0.00 and ticked the box for calculate distance moved within ROI. When I clicked run I get the following error message in the simba terminal:\r\n\r\nAnalysing ROI data for video 16-02-231_1_20230216_3CT_hungry_leftshort...\r\nSIMBA VIDEO PARAMETERS FILE ERROR: SimBA could not find 16-02-231_1_20230216_3CT_hungry_leftshort in the video_info.csv file. Make sure all videos analyzed are represented in the project_folder/logs/video_info.csv file. 🚨\r\n\r\nWhen I check the project_folder/logs/video_info.csv file the video 16-02-231_1_20230216_3CT_hungry_leftshort is in there. \r\n\r\nWhat can I do to avoid this error message?\r\nThanks for your help!\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/147690960/426dd28a-4504-4755-b09c-c682f762bc82)\r\n![image](https://github.com/sgoldenlab/simba/assets/147690960/3e8b2458-4d4f-444d-8869-52014fe13841)\r\n\r\n", + "user": "Chantal-Wi", + "reaction_cnt": 0, + "created_at": "2023-10-17T12:21:38Z", + "updated_at": "2023-10-17T14:41:10Z", + "author": "Chantal-Wi", + "comments": [ + { + "body": "Hi @Chantal-Wi - thanks for the screengrabs, very helpful. \r\n\r\nLooking at your `video_info.csv` file, you have 8 files with filenames all ending with the suffix `xxxxx_filtered_labeled`. \r\n\r\nI think these videos that you have imported into SimBA are not the original videos recorded by you, but are video files created by the deeplabcut visualization tools. Rather than the video created by deeplabcut, you should import the **original videos** you recorded and analyzed in deeplabcut. I.e., you short import video `16-02-231_1_20230216_3CT_hungry_leftshort.mp4`, not `16-02-231_1_20230216_3CT_hungry_leftshort_xxxxx_filtered_labeled.mp4` to your SimBA project. Let me know if that makes sense!\r\n\r\n\r\n\r\n", + "created_at": "2023-10-17T14:40:43Z", + "author": "sronilsson" + } + ] + }, + { + "title": "ROI analysis video quality", + "body": "Describe the bug\r\nWhen visualising ROI data, the numbers on the video is not readable and the top output is cut\r\nIs there a way to fix this?\r\nAdditionally, if I have generated several ROI analysis (e.g. nose tracking and left ear tracking), how can I check which data is being used to generate the video?\r\nThank you in advance for always being such a great help!\r\n\r\nTo Reproduce\r\nGo to 'ROI'\r\n\r\nClick on 'Visualize ROI Tracking'\r\n\r\nInitial error:\r\n![image](https://github.com/sgoldenlab/simba/assets/144468935/b3fff0b9-6d84-4a6f-9ad1-a17c59dc652c)\r\n\r\nExpected behavior\r\nVideo output with animal tracking based on ROI analysis of the desired bodypart\r\n\r\nDesktop (please complete the following information):\r\n\r\nOS: Windows 11 Enterprise\r\nPython Version Python 3.10.10\r\nAre you using anaconda? Yes", + "user": "neurorie", + "reaction_cnt": 0, + "created_at": "2023-10-17T11:37:20Z", + "updated_at": "2023-10-20T14:56:32Z", + "author": "neurorie", + "comments": [ + { + "body": "Hi @neurorie - thanks for reporting this. \r\n\r\n\r\n \r\n**For the first question:**\r\n\r\nIn the back, SimBA tries to guesstimate the best font sizes, text spacings using the resolution of your videos. Sometime this breaks down, particularly when the video resolution is small. \r\n\r\nUnfortunately I don't have a graphical interface to control the ROI visualization text attributes, yet, as exist for the [classification visualizations](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario2.md#visualizing-classifications). However, you can go into the SimBA code and change and play around with the numbers to get it too look better in your use-case. \r\n\r\n(i) First, update simba using `pip install simba-uw-tf-dev --upgrade` to make sure you are running version 1.74.9 or later.\r\n\r\n(ii) Next, you can find out where the SimBA code lives by typing `pip show simba-uw-tf-dev`:\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/92eb1025-29ed-4990-a853-243992114e94)\r\n\r\n(iii) Go do that directory, and find the folder `simba` -> `utils` and open the `enums.py` in a text editor. On around line 207, you should see some numbers under `TextOptions` that SimBA uses to scale your text with some descriptions: \r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/93056864-b011-4d52-a8f9-003903923fff)\r\n\r\nTo start, I recommend decreasing the RESOLUTION_SCALER value, and increasing the FONT_SCALER and see if that solves it. Then save the file, open SimBA, and run it again and let me know how it works and please let me know if you have any issues.\r\n\r\n**For the second question:**\r\n\r\nTwo days ago, I inserted an additional column in the ROI analysis in response to [this issue](https://github.com/sgoldenlab/simba/issues/295#issuecomment-1763451878). The ROI data now contains an additional column which states which body-part you used. Let me know if that solves the problem. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-10-17T14:25:55Z", + "author": "sronilsson" + }, + { + "body": "Thank you, I will give it a try for the first suggestion! I will keep you posted.\r\n\r\nThe update for showing which body part was analysed is a great addition! Say if we analysed the location for centre, and then the nose, I believe by default creating videos will then create videos of nose (the most recent) ROI analysis. How can I let it create video tracking of other (not most recent) analysis?", + "created_at": "2023-10-18T08:26:37Z", + "author": "neurorie" + }, + { + "body": "That makes sense, let me insert some dropdowns so that you can choose, and get back to you. ", + "created_at": "2023-10-18T11:42:38Z", + "author": "sronilsson" + }, + { + "body": "Hi @neurorie - let me know if this is what you are looking for and how it works on your end, after I will update the documentation:\r\n\r\n\"image\"\r\n\r\nSelect the number of animals you want to visualize ROI data for (in your case the only option is `1`) and select the animal body-part you want to use from the drop-down and click `RUN`. \r\n\r\n\r\nYou should see it if you update to latest version 1.75.1 with `pip install simba-uw-tf-dev --upgrade`. \r\n", + "created_at": "2023-10-18T15:42:49Z", + "author": "sronilsson" + }, + { + "body": "This is exactly what I meant, amazing, thank you! Such a rapid fix too!\r\n\r\nAlso if it is possible, can the body part that is being tracked also be mentioned in the video (e.g. Part tracked: nose)? My picture is above, I got the example video from the website screenshot, which shows which body part is being tracked.\r\n![image](https://github.com/sgoldenlab/simba/assets/144468935/bc739fb9-87a1-4235-aae1-915aa5ae0e1b)\r\n![image](https://github.com/sgoldenlab/simba/assets/144468935/4fcbfecc-4d70-4a73-bd6e-a03c60351363)\r\n\r\nLastly, if the changing the size of tag on the animal (the yellow dot) is a easy fix, I would like to know how to do that also!\r\n\r\nFor the video resolution, RESOLUTION_SCALER 700 and FONT_SCALER 1 works pretty good for me :) ", + "created_at": "2023-10-18T16:54:51Z", + "author": "neurorie" + }, + { + "body": "Sure, I will insert the the body-part - you may have to play with the RESOLUTION_SCALER and FONT_SCALER again as the text will become longer and may eat in the the counters with the body-part name included. \r\n\r\nTo change the yellow dot size, change the `RADIUS_SCALER`, decrease the number to make it smaller:\r\n\r\n\"image\"\r\n", + "created_at": "2023-10-18T17:08:37Z", + "author": "sronilsson" + }, + { + "body": "Hello again, I am realising that there may be a mismatch for chosen body part vs what is being analysed. The picture on the left is SIMBA, on the right is DLC from the same timepoint. I chose to track the nose on SIMBA, labelled in purple on DLC, but it seems to be tracking something else (maybe ear_left as it is the default choice on the \"visualise ROI tracking\" pop-up. \r\n![image](https://github.com/sgoldenlab/simba/assets/144468935/44b45146-39e8-4e2d-af58-78ded94bc739)\r\n", + "created_at": "2023-10-20T07:56:54Z", + "author": "neurorie" + }, + { + "body": "Hi @neurorie ! Before digging, can we confirm that it is not the same issue as discussed in [THIS](https://github.com/sgoldenlab/simba/issues/295) thread? (only difference is that they have a different pose-configuration)\r\n\r\nWhen you are using one of the in-built default pose-configurations in SimBA, it is expected that you have annotated your body-parts in a specific order, as shown by the numbers in this image:\r\n\r\n\"image\"\r\n\r\nI.e., the first annotated body-part is the left ear, followed by the right ear, and then the nose etc...\r\n\r\nIf you instead instead have annotated the nose first, then SimBA would put the label \"left ear\" on the nose data.\r\n\r\nLet me know if this could be the case.\r\n\r\n", + "created_at": "2023-10-20T12:37:33Z", + "author": "sronilsson" + }, + { + "body": "This could very much be the case! Thank you for catching, I will check and get back to you.", + "created_at": "2023-10-20T12:51:05Z", + "author": "neurorie" + }, + { + "body": "👍🏻 There is a manual fix for it in [THIS](https://github.com/sgoldenlab/simba/issues/295#issuecomment-1763484296) response. ", + "created_at": "2023-10-20T14:56:32Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Analysing wrong bodypart", + "body": "**Describe the bug**\r\n\r\nI have a 10-minute video with one animal, and I labeled 4 bodyparts using DeepLabCut (in order: nose, left ear, right ear, tail base). When I want to analyse the data, no matter which body part I choose, the results is always for the left ear, which is the first bodypart in CHOOSE ANIMAL BODI-PARTS.\r\nI reset the defined pose config and defined the bodyparts again, but still when I run the analyse ROI data instead of nose, it track the left ear. \r\nI updated my Simba, but still I have the same problem. I'm not sure what I should do to solve this problem and I appreciate any advice you may have. \r\n\r\n**Expected behavior**\r\nI want to use Simba for novel object recognition test and record the time for the nose when it is within the ROI. \r\n\r\n**Screenshots**\r\n![2023-10-15_133010](https://github.com/sgoldenlab/simba/assets/144938166/e7811583-8d6f-46d9-a38d-b272486214b2)\r\n\r\n![2023-10-15_133423](https://github.com/sgoldenlab/simba/assets/144938166/a34fbf7a-0d24-4774-8878-0f51aa860722)\r\n![2023-10-15_133256](https://github.com/sgoldenlab/simba/assets/144938166/60849f9a-9fca-4a98-b0dd-ad015571eac6)\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows 10\r\n - Python 3.6.13\r\n - I am using anaconda \r\n \r\n\r\n**Additional context**\r\nEverything runs without any problems or errors.\r\n", + "user": "FarahnazYF", + "reaction_cnt": 0, + "created_at": "2023-10-15T13:00:53Z", + "updated_at": "2023-10-15T20:18:39Z", + "author": "FarahnazYF", + "comments": [ + { + "body": "Hi @FarahnazYF - thanks for reporting! Let me see if I can replicate this and see what is going on... ", + "created_at": "2023-10-15T16:41:27Z", + "author": "sronilsson" + }, + { + "body": "Hi @FarahnazYF - if I understand correctly: regardless which body-part you select in the body-part drop-down, you get the ROI statistics for the `Left_ear` body-part?\r\n\r\nUnfortunately I couldn't replicate this issue - I get different results depending on which body-parts I choose using the pop-up in your screengrab. However, one thing that made it tricker to evaluate this issue, was that many of output files (in your lower screengrab) does not contain a column saying which body-part was used to compute the statistics. I have now included it so you should see this:\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/f48874e9-d79d-49b7-8ac1-7ff8b629e66e)\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/2bb1bf5a-db78-4222-a0f0-abac1c6bec9d)\r\n\r\nIf you update simba with `pip install simba-uw-tf-dev --upgrade` (version 1.74.8) and run it, do you see the same body-part in the `BODY-PART` column and statistic in the output files, regardless what you choose in the body-part dropdown? \r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-10-15T17:17:23Z", + "author": "sronilsson" + }, + { + "body": "Dear @sronilsson,\r\n\r\nThank you so much for checking it. I think updating my Simba helped a lot, because now I can choose different boyparts and I got their relative results. \r\n\r\n![2023-10-15_194644](https://github.com/sgoldenlab/simba/assets/144938166/167b0e1d-93a2-470e-9bd2-1704c131c362)\r\n\r\nHowever, if I want the data for the nose, still I have to choose left ear, and I guess this is because when I was labeling the bodyparts in the DeepLabCut, first I labeled the nose, but in Simba labeling order starts from the left ear. I hope my explanation make sense. Maybe if I label my frames again with the same order as Simba uses, it helps.\r\n\r\n![2023-10-15_193155](https://github.com/sgoldenlab/simba/assets/144938166/d71fb068-c9a9-402d-9b8e-08db881e8908)\r\n\r\n\r\n![2023-10-15_193649](https://github.com/sgoldenlab/simba/assets/144938166/bdd91959-e1cc-4921-8451-125f541ed3cd)\r\n\r\nThis picture is for a video when I chose left ear, but you can see that it tracked nose.\r\n![2023-10-15_195843](https://github.com/sgoldenlab/simba/assets/144938166/07d5d995-dbb9-4249-8d17-dc9365ba8cd0)\r\n\r\nThanks again for you help. \r\n\r\n", + "created_at": "2023-10-15T19:04:41Z", + "author": "FarahnazYF" + }, + { + "body": "Ah thanks @FarahnazYF - that info helped a lot. \r\n\r\nYou are correct, when you are using the in-built pose-configuration from your screengab, SimBA expects the body-part data from DeepLabCut to be in a specific order. \r\n\r\nHowever, you don't have to label frames in DeepLabCut again. \r\n\r\nWhen you create a SimBA project, a file gets stored in your project at location `/project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` and if you open it, it will look like this ans stores the names of your body-parts and their order: \r\n\r\n\"image\"\r\n\r\nYou can try and change this order to the actual order you have your body-parts in, e.g.,:\r\n\r\n\"image\"\r\n\r\nThen save that file, and finally import your data to your SimBA project. \r\n\r\nNow your SimBA project should have the right body-part names associated with the right columns and data.\r\n", + "created_at": "2023-10-15T19:36:02Z", + "author": "sronilsson" + }, + { + "body": "Dear @sronilsson,\r\n\r\nThank you so much for your precious help. I just wanted to let you know that I could run the analysis without any problem.\r\n\r\nBest,", + "created_at": "2023-10-15T20:08:49Z", + "author": "FarahnazYF" + }, + { + "body": "Thanks for letting me know!", + "created_at": "2023-10-15T20:18:38Z", + "author": "sronilsson" + } + ] + }, + { + "title": "File in Outlier_Movement__Location Folder Not Found for Analysis", + "body": "When we go to analyze the ROI data, there is no file found in the outlier_movement_location folder. We do not know what file is supposed to be there, but it seems to be required for the process to continue.\r\nTo reproduce:\r\nWe imported data and video onto the GUI. We put in video parameters and performed outlier correction and ran it. We defined ROIs, then hit analyze ROI data aggregates (1 animal, body part 1 is set to center, probability threshold set to 0.02). Then we get this error: SIMBA NO FILES FOUND ERROR: No data files found in C:/Users/(username)/sleap/Simba\\practice1\\project_folder\\csv\\outlier_corrected_movement_location\r\nExpected behavior: should be a file at the given location specified for the error\r\nOS: Windows 10\r\nPython: 3.6.13\r\nWe are using Anaconda\r\nAdditional context: the actual folder in question is empty in the directory. We do not know when this exact is created or how it is supposed to get there. Looking for information on that specific issue.\r\n![Screenshot 2023-10-09 150010](https://github.com/sgoldenlab/simba/assets/69989544/13255e7d-55ab-4d7f-ad01-e438e8502394)", + "user": "nschapp", + "reaction_cnt": 0, + "created_at": "2023-10-12T13:14:22Z", + "updated_at": "2024-02-20T13:04:13Z", + "author": "nschapp", + "comments": [ + { + "body": "Hi @nschapp! After you have imported the data into your SimBA project, it lands in the `project_folder/csv/input_csv` directory. The next step in SimBA it to perform (or indicate to skip) outlier correction, as documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#step-4-outlier-correction). E.g., if you click this button:\r\n\r\n\"image\"\r\n\r\nThis copies the files in the `project_folder/csv/input_csv` folder to the `project_folder\\csv\\outlier_corrected_movement_location` directory and formats the files into a standard format regardless of the pose-estimation tool you used suitable for whatever operation you want to do in SimBA. ", + "created_at": "2023-10-12T13:47:04Z", + "author": "sronilsson" + }, + { + "body": "Hi again. This reply is coming a bit late, but we were unable to resolve the issue. The initial imported data does not appear to be showing up in the project_folder/csv/input_csv directory in the first place. We don't know where the data is going and how/what exactly to move to that location. If we can get the imported data to land there it should be fine. It should be noted that there is no completion message for \"import tracking data\" step nor an \"error this already exists\" message when trying to import again.", + "created_at": "2024-01-16T21:43:56Z", + "author": "nschapp" + }, + { + "body": "Hi @nschapp! Sorry might have asked before but just checking:\r\n\r\nYou say there is no completion message for \"import tracking data\", which makes me think it errors out unexpectedly. Just to confirm, I understand you don't see an error msg in the SimBA window, but do you see any error msg in the Windows terminal that you used to boot up SimBA? ", + "created_at": "2024-01-16T21:54:11Z", + "author": "sronilsson" + }, + { + "body": "This appears to be what is seen Windows Terminal after booting up SimBa.\r\n![image (4)](https://github.com/sgoldenlab/simba/assets/69989544/690a06f9-f13f-4d90-83b4-bea3e1b1213e)\r\n", + "created_at": "2024-01-22T23:47:05Z", + "author": "nschapp" + }, + { + "body": "Thanks @nschapp! It seems to have some trouble with your sleap file. It would be easiest and fastest to solve by taking a look at the tracking data file - would you mind sharing the H5 file from sleap? You can zip it up and drag it into this chat (if not too large, if very big, you could share it through a gdrive link)", + "created_at": "2024-01-22T23:51:39Z", + "author": "sronilsson" + }, + { + "body": "https://drive.google.com/file/d/1s9pc1BEbm9I8-WIHHqtRp3lGXCDBAfF3/view?usp=sharing\r\n\r\nHere is a google drive link for our H5 file. It is the one we've been using for all of this testing so far.", + "created_at": "2024-01-29T19:26:32Z", + "author": "nschapp" + }, + { + "body": "Thanks @nschapp - I can see an h5 file called `best_model.h5`. I've opened the `best_model.h5`, and it doesn't look like it contains body-part location predictions for a video file. Rather, **it looks like a model file, that you can use within the SLEAP interface, to *create* the CSV or H5 files which in turn contains predictions for the locations of the body-parts within videos.** \r\n\r\nOnce you create those files (the CSV or H5 files with body-part predictions) using the H5 you just shared, you can import those into SimBA. Let me know if that makes sense. ", + "created_at": "2024-01-29T20:43:59Z", + "author": "sronilsson" + }, + { + "body": "Hi, I also have issues with importing the tracking data. Not sure though whether the error stems from SLEAP not saving the data as it should (bug on their side) or whether there might be an issue with SimBA importing the tracking data...\r\n\r\nI tried importing an SLP file, which I thought was working, but I just noticed this traceback:\r\n![image](https://github.com/sgoldenlab/simba/assets/160256830/89d45573-0f29-4a59-b018-278d1e829d6c)\r\n\r\nThere is also no input_csv file saved afterwards (which is needed for the outlier correction I assume). When I try to continue setting up and loading the project in SimBA, it seems to work up until the outlier correction. After trying to do that I receive the same error message as nschapp reported (SIMBA NO FILES FOUND ERROR: No datafiles found in C:/Users/User/Desktop/simba_test_v11\\test\\project_folder\\csv\\input_csv). But that is probably because of this initial traceback when importing the data?\r\n\r\nI found this thread and then also tried to import the H5 file instead, but that gives me an error about a video file that cannot be found:\r\n![image](https://github.com/sgoldenlab/simba/assets/160256830/a9c24631-95e5-41d3-860c-8a9e107f0b16)\r\nI believe that this person had a similar issue: https://github.com/sgoldenlab/simba/issues/53\r\n\r\nLastly, I also tried importing the tracking data from a CSV file, and doing that, my imported video shows up and I can select the two animals in my video (they don't show any tracking points however, and I am unsure whether they're supposed to...)\r\n![image](https://github.com/sgoldenlab/simba/assets/160256830/8b03c148-3eef-473c-bc7b-876e68a1e150)\r\n\r\nHowever, after selecting and confirming the two animals shown in the video, I receive the following error message:\r\n![image](https://github.com/sgoldenlab/simba/assets/160256830/ac3eddb8-fe9a-4e1f-9b48-45fb4ada4a81)\r\n\r\nAs mentioned before, I think the error could perhaps also be with SLEAP (https://github.com/talmolab/sleap/issues/1681), but this thread and the one linked below from the SLEAP github seem to be the most similar to my issue.\r\n\r\nAny help/tips would be very much appreciated :) Thank you!\r\n\r\n", + "created_at": "2024-02-16T16:08:58Z", + "author": "glinnes12" + }, + { + "body": "Hi @glinnes12 ! Let me take a look - can you share an example SLP, CSV and H5 file from an example video (together with the video) and I can see whats going wrong? ", + "created_at": "2024-02-16T17:22:27Z", + "author": "sronilsson" + }, + { + "body": "Hi Simon,\r\n\r\nThank you for your help! I 've uploaded the files on WeTransfer as they are a bit too large to send via email.\r\nHere's the link to that: https://we.tl/t-GWjiEXlmYw\r\n\r\n- Hannah\r\n\r\n\r\n________________________________\r\nFrom: Simon Nilsson ***@***.***>\r\nSent: 16 February 2024 18:22\r\nTo: sgoldenlab/simba ***@***.***>\r\nCc: glinnes12 ***@***.***>; Mention ***@***.***>\r\nSubject: Re: [sgoldenlab/simba] File in Outlier_Movement__Location Folder Not Found for Analysis (Issue #294)\r\n\r\n\r\nHi @glinnes12 ! Let me take a look - can you share an example SLP, CSV and H5 file from an example video (together with the video) and I can see whats going wrong?\r\n\r\n—\r\nReply to this email directly, view it on GitHub, or unsubscribe.\r\nYou are receiving this because you were mentioned.Message ID: ***@***.***>\r\n", + "created_at": "2024-02-19T10:06:54Z", + "author": "glinnes12" + }, + { + "body": "Thanks @glinnes12 ! My computer unfortunately blocks WeTransfer, I am not allowed to go there for whatever reason. Could you share it through gdrive? ", + "created_at": "2024-02-19T12:14:08Z", + "author": "sronilsson" + }, + { + "body": "Of course, here's the Google Drive link: https://drive.google.com/drive/folders/1S7yV9T8XclFAmB7-yPO30G6dgqUzofWK?usp=drive_link\r\n", + "created_at": "2024-02-19T12:37:16Z", + "author": "glinnes12" + }, + { + "body": "Cheers! I've requested access", + "created_at": "2024-02-19T12:40:38Z", + "author": "sronilsson" + }, + { + "body": "Done! :)", + "created_at": "2024-02-19T12:42:30Z", + "author": "glinnes12" + }, + { + "body": "> Done! :)\r\n\r\nGot it thanks!", + "created_at": "2024-02-19T12:46:04Z", + "author": "sronilsson" + }, + { + "body": "@glinnes12 I opened the `simba_test_v11.csv`. This file suggests that the video you have generated predictions for contains **at least** 53438 frames. There are two animals, so with an optimal sleap tracking model, I would expect to see that `simba_test_v11.csv` contains 53438 x 2 = 106876 rows with body-part prediction data. \r\n\r\nHowever, simba_test_v11.csv contains 231 rows, meaning that 106876 - 231 = 106645 animals are missing in the sleap prediction data. \r\n\r\nWhen the data is missing, you can try to interpolate the data in SimBA. However, as we have 106645 frames x 8 body-parts = 853160 missing body-parts that would need to be interpolated, that interpolation is going to be very tough and likely interpolated inaccurately \r\n\r\nWhen SimBA finds missing data, and no interpolation is done. It places the body-parts at the top left of the image at coordinate (0,0). If you look closely at the image at the bottom that you sent, where SimBA displays image 1 from the video, you’ll see the body-parts up there stuck on top of each other, as the SLEAP predictions don’t contain any data for this frame and no interpolation is done: \r\n\r\n\"image\"\r\n\r\nI will dig into the slp and h5 files, as you shouldn't be thrown these errors. However, downstream, you will encounter other errors or receive inaccurate results as the data is so sparse and mainly missing. Is there any way to go back to sleap to get accurate body-part predictions for at least the majority of the frames? \r\n\r\n", + "created_at": "2024-02-19T14:47:18Z", + "author": "sronilsson" + }, + { + "body": "Hmmm, this seems really strange! We got pretty accurate predictions in sleap (around 5-6 points in the inference score). The simba_test_v11.slp file should be predictions for 54'044/54'050 frames. the H5 file i cannot open for some reason...I wonder whether only my labelled frames were saved in the csv and H5?", + "created_at": "2024-02-19T14:58:22Z", + "author": "glinnes12" + }, + { + "body": "Potentially, there are no probability scores associated with the body-part predictions. And the frames that the data contains in general seems to have both animals detected (there are only few frames with only one animal detected), that would be in agreement of these being human labelled I think? \r\n\r\n\"image\"\r\n", + "created_at": "2024-02-19T15:28:06Z", + "author": "sronilsson" + }, + { + "body": "yes, this seems to be labelled data and corrected predictions, which is why they don't contain prediction and instance scores either.\r\nI must have missed some of the predictions where there's only one animal in the frame.\r\n\r\nAnd the SLP file must then contain all of the computer's predictions, which are the ones we would like to use in SimBA.", + "created_at": "2024-02-19T15:49:56Z", + "author": "glinnes12" + }, + { + "body": "Let me come back to you a little later to see what happened with the SLP file import. \r\n\r\nJust a note on the SLP import as I am not sure if I mention it anywhere in the documentation:\r\n\r\nIt takes a fair amount of data wrangling to import the SLP data into SimBA compared to the H5 and CSV. I talked to the SLEAP developers some months back, and they helped improve the CSV and H5 import, but not the SLP import into SimBA. I therefore recommend the H5 and CSV over SLP, as it is so much quicker. \r\n\r\n", + "created_at": "2024-02-19T16:07:08Z", + "author": "sronilsson" + }, + { + "body": "@glinnes12 - I tried with you H5 file and that imported fine, and seems to contain all data! but one thing to get it to work:\r\n\r\nThe h5 file is called `labels.v002.bottomup.newskeleton.test.slp.240209_114949.predictions.000_simba_test_v11.analysis.h5` while the video file is called `simba_test_v11.avi`. SimBA needs a way of pairing each data file to the corresponding video file, and it does that by looking at the file names. I renamed `labels.v002.bottomup.newskeleton.test.slp.240209_114949.predictions.000_simba_test_v11.analysis.h5` -> `simba_test_v11.h5` and then imported it. Can you check if that works on your end? ", + "created_at": "2024-02-19T19:24:18Z", + "author": "sronilsson" + }, + { + "body": "PS. If you want to open and look at the h5, there is a desktop tool called [HDFView](https://www.hdfgroup.org/downloads/hdfview/) that can be helpful. To read it in python to look at it, you can do:\r\n\r\n```\r\nimport h5py\r\n\r\nslp_path = r'/Users/simon/Desktop/envs/simba/troubleshooting/sleap_two_animals/h5_import/simba_test_v11.h5'\r\nsleap_file = h5py.File(slp_path, \"r\")\r\n\r\n## PRINT DATA IN FILE\r\nprint(sleap_file.keys())\r\n```", + "created_at": "2024-02-19T19:29:59Z", + "author": "sronilsson" + }, + { + "body": "> Collaborator\r\n\r\nRenaming the H5 file seemed to work! Thanks a lot :)\r\n", + "created_at": "2024-02-20T12:40:03Z", + "author": "glinnes12" + }, + { + "body": "Thanks @glinnes12! .. PS. Just remembered there should be a function call that automatically detects and renames the SLEAP tracking data file name to match the video name. Not sure why that didn't happen, and I will look into it. ", + "created_at": "2024-02-20T13:04:12Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Include Timestamp for Each Behavioral Bout", + "body": "Would it be possible for SimBA to include the timestamp for each behavioral bout in a video? I know that the time of first occurrence for each behavior is reported in the .csv files under \"Analyze Machine Predictions\" and in each time bin in \"Time Bins: Machine Predictions.\" However, the timestamp for every behavioral bout in the video is not listed. By including the timestamp for each behavioral bout, patterns of behavior can be identified in a video. \r\n", + "user": "mrnels19", + "reaction_cnt": 0, + "created_at": "2023-10-10T21:39:41Z", + "updated_at": "2023-10-11T13:37:37Z", + "author": "mrnels19", + "comments": [ + { + "body": "Hi @mrnels19, that's a good point. To be honest, I'm losing track on all the functionalities and it feels like this should already be available through some menu somewhere but I don't know lol. It is calculated by [THIS function](https://github.com/sgoldenlab/simba/blob/0e4572b854aa76d596560258d121f56156dfb32a/simba/utils/data.py#L42C5-L42C17) and SimBA uses it as base for many of other calculations. \r\n\r\nHow about I insert another checkbox in this pop-up menu in the screengrab below, that when checked outputs a CSV that list the start times, end times, start frames and end-frames of all selected behaviors in all videos, does that work? \r\n\r\n\"image\"\r\n \r\n\r\n ", + "created_at": "2023-10-10T22:31:53Z", + "author": "sronilsson" + }, + { + "body": "Thank you! This update sounds great.", + "created_at": "2023-10-11T05:43:19Z", + "author": "mrnels19" + }, + { + "body": "By the way, do you know the name of the most updated version of SimBA that will have this feature?", + "created_at": "2023-10-11T05:50:47Z", + "author": "mrnels19" + }, + { + "body": "@mrnels19 It will be in `1.74.4` - if you update with `pip install simba-uw-tf-dev --upgrade` you should see it:\r\n\r\n\"image\"\r\n\r\nIf you check this button, you can expect a file in the logs folder named and looking like this:\r\n\r\n[detailed_bout_data_summary_20231011091832.csv](https://github.com/sgoldenlab/simba/files/12870302/detailed_bout_data_summary_20231011091832.csv)\r\n\r\nLet me know if that is what your looking for or if anything is missing!\r\n\r\nSimon\r\n", + "created_at": "2023-10-11T13:25:49Z", + "author": "sronilsson" + } + ] + }, + { + "title": "superimpose frame count on video", + "body": "**Describe the bug**\r\nI would like to superimpose frame count on a video, however, I keep getting the same error message \"Fontconfig error: Cannot load default config file: No such file: (null)\", the SimBA GUI tells me that the video is generated and I can see it in the folder but it has 0 bytes, any idea how to solve this? thanks in advance \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'SimBA GUI\r\n2. Click on 'Tools'\r\n3. Scroll down to 'Superimpose frame numbers on video'\r\n4. Browse 'VIDEO PATH' and RUN (no GPU used)\r\n\r\n**Expected behavior**\r\nI would expect to get frame number overlay on the video.\r\n\r\n**Screenshots**\r\nin attachment\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows 11\r\n - Python Version 3.6.13\r\n - Are you using anaconda? Yes, created a virtual env with SimBA\r\n \r\n\r\n**Additional context**\r\nn/a.\r\n\r\n![Screenshot (829)](https://github.com/sgoldenlab/simba/assets/64312024/fdde8513-b1af-4167-9cf2-f89f7df6f2b2)\r\n![Screenshot (830)](https://github.com/sgoldenlab/simba/assets/64312024/f2fb23f4-6cb7-4422-892e-f80cfb5b57a2)\r\n\r\n", + "user": "filos93", + "reaction_cnt": 0, + "created_at": "2023-10-09T16:18:11Z", + "updated_at": "2023-10-11T16:31:10Z", + "author": "filos93", + "comments": [ + { + "body": "Thanks for reporting this @filos93 ! Let me see if I can recreate it and insert a fix.", + "created_at": "2023-10-09T16:42:50Z", + "author": "sronilsson" + }, + { + "body": "Hi @filos93 - I tried to recreate your error, but I couldn't, neither on Windows10 or MacOS (I don't have access to Win11 at the moment). \r\n\r\nI tried to insert a fix nevertheless, if you update SimBA again (`pip install simba-uw-tf-dev --upgrade`) what do you see?\r\n\r\n The fix is (i) use the font that comes with SimBA, (ii) if that font can't be found, then use Arial.ttf located in `C:/Windows/fonts` (as far I know it is located in `C:/Windows/fonts` in Win11), (iii) if Arial can't be found, use the first font found in `C:/Windows/fonts`. \r\n\r\n", + "created_at": "2023-10-09T22:54:23Z", + "author": "sronilsson" + }, + { + "body": "hey @sronilsson thanks for the support, unfortunately I could not make it work even upgrading, SimBA tells me the file is generated but when I check it is a 0-byte file, the terminal continues to generate the same error message of \"Fontconfig error: Cannot load default config file\". I attached some screenshots about what I get in the terminal, how it looks in SimBA and how the code looks to me, hope these can help us, please let me know\r\n\r\n![Screenshot (840)](https://github.com/sgoldenlab/simba/assets/64312024/7b810de8-5462-4395-8a4c-ba6fbaa10265)\r\n![Screenshot (837)](https://github.com/sgoldenlab/simba/assets/64312024/8af94281-1455-405e-bffd-bee4ab84edd4)\r\n![Screenshot (841)](https://github.com/sgoldenlab/simba/assets/64312024/c2162c1c-ff71-4875-b0bf-49e1555dd724)\r\n", + "created_at": "2023-10-10T17:02:21Z", + "author": "filos93" + }, + { + "body": "Thanks for testing @filos93 - lets see how this could come about. \r\n\r\nIf you look at the path that SimBA is printing out in the file explorer:\r\n\r\n\"image\"\r\n\r\nDoes this file exist on your system?\r\n\r\nEDIT: If the file exist, is there anything odd with it? E.g. is the fint file 0kb, or corrupted somehow ?\r\n\r\n\r\n", + "created_at": "2023-10-10T20:25:25Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson the file \"UbuntuMono-Regular\" exists and it is 185kb, I also checked on C:/Windows/Fonts and the file is there, don't think is corrupted, I attached some screenshots to help, thanks again \r\n\r\n![Screenshot (843)](https://github.com/sgoldenlab/simba/assets/64312024/eb2cef56-bbf4-4eee-9c1c-2386d30d5336)\r\n![Screenshot (844)](https://github.com/sgoldenlab/simba/assets/64312024/0a842e43-d8f8-4bb0-8d11-f184781c4380)\r\n![Screenshot (847)](https://github.com/sgoldenlab/simba/assets/64312024/be016b64-a9ba-412c-bffb-bdcf99be2705)\r\n\r\n\r\n", + "created_at": "2023-10-11T08:39:27Z", + "author": "filos93" + }, + { + "body": "That all looks in order.. I don't get - yet- how this can happen. I am going to have to borrow and troubleshoot on a Win11 computer today and see if I can recreate this error myself. ", + "created_at": "2023-10-11T13:29:26Z", + "author": "sronilsson" + }, + { + "body": "Alright @filos93, I think I might have figured it out.. On Windows, the paths should be expressed with `/` and not `\\` and the drive initial should be omitted. At least that helped me remove the font config warning on my end.\r\nIf that fails FFMpeg appears to pick system default font, so I don't know why that didn't happen in your case so I am still a little unsure. \r\n\r\nIf you do `pip install simba-uw-tf-dev --upgrade`to version 1.74.5, how does it look? \r\n\r\n\r\n", + "created_at": "2023-10-11T14:47:16Z", + "author": "sronilsson" + }, + { + "body": "hey @sronilsson it worked thank you so much :) the only thing the SimBA GUI did not tell me that the video was created and I could not see the video at first, when I tried to superimpose again I was able to find the video in the right folder within SimBA, so I had to move the video when using SimBA to another location and at that point I could see the video, this was just to let you and the other users know, again thanks a lot for all your support!!\r\n\r\n![Screenshot (851)](https://github.com/sgoldenlab/simba/assets/64312024/f1d2af63-0ab6-4910-b443-7366b3b6aa92)\r\n![Screenshot (849)](https://github.com/sgoldenlab/simba/assets/64312024/43134089-099b-4dfc-9a33-ca070d2b7ecb)\r\n![Screenshot (850)](https://github.com/sgoldenlab/simba/assets/64312024/d4681f08-ce7a-4eba-b71a-30099aeca6c7)\r\n", + "created_at": "2023-10-11T16:06:16Z", + "author": "filos93" + }, + { + "body": "Ah good point @filos93, seem to have dropped the print statement in the speed of things. I added it in version `1.74.6` but please let me know if any more issues or oddeties pop up. \r\n\r\nSimon ", + "created_at": "2023-10-11T16:17:08Z", + "author": "sronilsson" + }, + { + "body": "hey @sronilsson thank you so much, everything worked perfectly, really thanks for all the troubleshooting and the support :)\r\n\r\n![Screenshot (853)](https://github.com/sgoldenlab/simba/assets/64312024/63c5eeb5-fd27-41ce-9ff9-1d888763e120)\r\n![Screenshot (854)](https://github.com/sgoldenlab/simba/assets/64312024/80df7cb6-a4cc-4ca8-8a2e-d2338a2713d3)\r\n", + "created_at": "2023-10-11T16:31:09Z", + "author": "filos93" + } + ] + }, + { + "title": "info on linked events regarding multiple animals occurring in ROI", + "body": "**Is your feature request related to a problem? Please describe.**\r\nFirst of all, thanks for the tool, it is great and really really useful, it would be fantastic to have the possibility to get combined info and summary data on linked events/actions (time in, time facing, movement in) performed by multiple animals and occurring simultaneously in the ROI domain\r\n\r\n**Describe the solution you'd like**\r\nI would like the possibility to combine info regarding multiple animals' actions performed in the ROI with associated ROI CSV files (ROI entry, time, and movement data), for instance, the total sums (s) of animal 1 and animal 2 in ROI or when animal 1 and animal 2 are both facing the ROI or when animal 2 is facing the ROI and animal 1 is in the ROI (disjointed)\r\n\r\n**Describe alternatives you've considered**\r\nYou can already extract similar info after you append the ROI data to the feature file and the ROI_data_### folder and CSV file are generated. There, you can combine the info using the booleans (0-1) regarding the two animals and their actions (in zone, facing zone) associated with the ROI, you can filter and get the total frames where the actions of the two animals are associated and dividing by the frame rate you can get the total time, but it remains very cumbersome and time-consuming (attached a screenshot)\r\n![Screenshot (806)](https://github.com/sgoldenlab/simba/assets/64312024/0d709583-9090-486c-a1af-b13684cac2da)\r\n\r\n\r\n**Additional context**\r\nTHANKS IN ADVANCE :)\r\n", + "user": "filos93", + "reaction_cnt": 0, + "created_at": "2023-10-04T07:50:23Z", + "updated_at": "2023-10-10T07:17:54Z", + "author": "filos93", + "comments": [ + { + "body": "Hi @filos93 ! Yes I think I understand. To confirm: you want to compute aggregate summary statistics for multiple Boolean conditionals (e.g., to answer a question: for how many seconds are animal 1 and animal 2 concurrently facing rectangle X)? \r\n\r\nWe can do this! but won't get to it for a few days. It needs to be flexible to handle cases with more complicated rules to be of any use, and takes a bit of tinkering to get rule selection in graphical interface, but I think we already have the bits and pieces for it. ", + "created_at": "2023-10-04T14:46:29Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson yeah exactly that, it would be fantastic to get summary statistics for multiple animals' simultaneous actions in ROI, also when these are disjointed (eg, animal 1 facing ROI and animal 2 in ROI), thanks again for your reply, can't wait to use the new feature", + "created_at": "2023-10-04T16:05:50Z", + "author": "filos93" + }, + { + "body": "@filos93 I did put in the methods, if you update SimBA with `pip install simba-uw-tf-dev --upgrade` you should see it. I haven't written documentation yet, would be good to get your feedback first: \r\n\"Untitled\"\r\n\r\nChoose the number of conditions you have in the `#RULES# dropdown. \r\n\r\nIn the above example, it will output how many seconds, and how many frames, in each video, where the animal called Simon is inside the shape called `Rectangle_1` while also facing the shape `Polygon_1`, while at the same time animal JJ is outside the shape `Rectangle_1` and directing towards `Polygon_1`. \r\n\r\n", + "created_at": "2023-10-04T18:39:16Z", + "author": "sronilsson" + }, + { + "body": "hey @sronilsson yes it looks great, thank you so much, unfortunately, I could not make it work even when upgrading to the latest version, I didn't understand if that was supposed to work or was just for visualizing it, thanks", + "created_at": "2023-10-05T09:17:34Z", + "author": "filos93" + }, + { + "body": "@filos93 if you upgrade SimBA to the latest version, and launch SimBA, you should see this button with purple font named `AGGREGATE BOOLEAN CONDITIONAL STATISTICS` under the ROI tab. If you click on it, it brings up the pop-up to the left in the screengrab below:\r\n\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/d2a74076-ebd0-4a3e-b0ad-1ae38bc69ef9)\r\n\r\nIn the `# RULES` dropdown, select how many conditional rules you have. For example, if you want when Animal_1 and Animal_2 is facing your shape at the same time, set this to `2`. Next, set the two behavior drop-downs to `Your_Shape Animal 1 facing` and `Your_Shape Animal 2 facing`, and the two STATUS dropdown to `TRUE`.\r\n\r\nFinally click `RUN` and a summary statistics file should be created in the project_folder/logs directory named something like `Conditional_aggregate_statistics_{self.datetime}.csv`, let me know how it goes!\r\n", + "created_at": "2023-10-05T14:49:42Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson thanks for reply, unfortunately even upgrading I could not make it work, when I hit the \"RUN\" button nothing happens, no process neither CSV file, in the log file my action is not reported, please let me know if I have to run forced steps before entering the aggregate boolean conditional statistics, thank you so much", + "created_at": "2023-10-06T16:18:33Z", + "author": "filos93" + }, + { + "body": "Hi @filos93 ! MY BAD! Sorry, I had commented out the run command lol, so no wonder... I will fix it and let you know. ", + "created_at": "2023-10-06T16:22:33Z", + "author": "sronilsson" + }, + { + "body": "I think I fixed it, if you run `pip install simba-uw-tf-dev --upgrade`, so you have version `1.73.9`, how does it look? ", + "created_at": "2023-10-06T16:50:01Z", + "author": "sronilsson" + }, + { + "body": "hey @sronilsson it works perfectly, everything went smoothly (attached some files) thank you so much, I was also wondering if it's possible to have entry and exit frames for the combined/aggregate information, something similar to what you get in the Detailed_ROI_data_###.csv file, this information would give the user the possibility to directly check results accuracy on the video frames, please let me know what you think. Again, thanks in advance :) \r\n\r\n![Screenshot (824)](https://github.com/sgoldenlab/simba/assets/64312024/2f241f96-fd3c-437c-8b17-bf137a1eb716)\r\n\r\n![Screenshot (825)](https://github.com/sgoldenlab/simba/assets/64312024/e824c910-3f2e-4eb7-b861-fd82e18ea246)\r\n", + "created_at": "2023-10-09T08:03:16Z", + "author": "filos93" + }, + { + "body": "Hi @filos93 - that's a good point!\r\n\r\nIf you update simba with `pip install simba-uw-tf-dev --upgrade`, you should get a second output file names something like `Conditional_aggregate_detailed_20231009090249.csv` with detailed data like the file below. \r\n\r\nCan you let me know if it runs and looks as expected and if the values agrees with your calculations? \r\n\r\n\r\n[Conditional_aggregate_detailed_20231009090249.csv](https://github.com/sgoldenlab/simba/files/12846417/Conditional_aggregate_detailed_20231009090249.csv)\r\n", + "created_at": "2023-10-09T13:08:25Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson it looks fantastic (files in attachment), thank you so much :)\r\n\r\n![Screenshot (825)](https://github.com/sgoldenlab/simba/assets/64312024/d7017adb-f196-483b-808c-30a987516ffc)\r\n![Screenshot (826)](https://github.com/sgoldenlab/simba/assets/64312024/74f30c84-b6c9-4b11-a64f-cefcf7b5c745)\r\n![Screenshot (827)](https://github.com/sgoldenlab/simba/assets/64312024/27c621d0-3828-4c08-8fd0-0d1fa071b562)\r\n", + "created_at": "2023-10-09T16:00:44Z", + "author": "filos93" + } + ] + }, + { + "title": "no ROI_features_summary csv file created", + "body": "**Describe the bug**\r\nHey, thanks so much for the fantastic software and all the hard work, I have a problem with the creation of the ROI_features_summary CSV file. After I analyze roi aggregates, extract features, and then append roi data to features, I cannot get any ROI_features_summary CSV file. I do not get any error message back, I just do not get the file. As I am using the latest version of SimBA (version 1.73.3), I would like to know if you do not get such file anymore in the latest version or if I have to run some additional steps. Please, let me know. Thanks in advance.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Draw a rectangle roi\r\n2. Analyze roi data aggregates\r\n3. append roi data to features (once extracted)\r\n4. no error, but no ROI summary csv file generated \r\n\r\n**Expected behavior**\r\nI would expect to get a ROI_features_summary_####.csv file as mentioned in https://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial.md#part-3-generating-features-from-roi-data\r\n\r\n**Screenshots**\r\nn/a.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 11\r\n - Python Version 3.6\r\n - Are you using anaconda? yes, I created a virtual env\r\n \r\n\r\n**Additional context**\r\nI would like to use the info from the ROI summary csv file especially for ROI directionality analysis (I used similar bp labelling that include nose, left ear, and right ear tracking as specified by you), in particular the sum of time (in seconds) that each animal spent directed towards the ROI in my video\r\n", + "user": "filos93", + "reaction_cnt": 0, + "created_at": "2023-10-03T16:32:18Z", + "updated_at": "2023-10-04T07:51:38Z", + "author": "filos93", + "comments": [ + { + "body": "Hi @filos93 - thank you for reporting this and good catch! Looking at the code, it looks like this file output was lost in translation, probably when we introduced the user option to include ROI features based on body-parts **and/or** ROI features based on animals, I will insert it again and let you know. \r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-10-03T17:44:19Z", + "author": "sronilsson" + }, + { + "body": "@filos93 if you upgrade to simba 1.73.5 with `pip install simba-uw-tf-dev --upgrade`, how does it look on your end? \r\n\r\nThanks\r\nSimon ", + "created_at": "2023-10-03T18:45:04Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson I upgraded to the latest version (v. 1.73.5) and it worked perfectly, the ROI_features_summary CSV file was created also including directionality info (I attached screenshots for others)\r\n![Screenshot (803)](https://github.com/sgoldenlab/simba/assets/64312024/420692d5-175a-442f-9e80-de9e6c681a1e)\r\n![Screenshot (804)](https://github.com/sgoldenlab/simba/assets/64312024/e4e3ee30-6a64-4857-a750-1ae690ecd6dc)\r\nThank you so much for all the support", + "created_at": "2023-10-04T07:03:34Z", + "author": "filos93" + } + ] + }, + { + "title": "Documents in model_evaluations Folder", + "body": "Hi, I have been using SimBA to train a few models, and I was wondering what the documents in the model_evaluations folder represent. In this folder, I have a feature_importance_bar_graph saved as a .png file, a feature_importance_log csv file, and a meta csv file. What information is found in each of these files, and how does this relate to the accuracy of the model? Thank you!", + "user": "mrnels19", + "reaction_cnt": 0, + "created_at": "2023-10-02T00:22:00Z", + "updated_at": "2023-10-02T11:38:06Z", + "author": "mrnels19", + "comments": [ + { + "body": "Hoi @mrnels19 !\r\n\r\nThe `.meta.csv` file is a SimBA model config file. It also functions as a log for you to see which settings you used to create your classifier. You can use it as a config file in the SimBA interface to load the settings, in cases where you want to create a new model with the same or similar settings here:\r\n\r\n\"image\"\r\n\r\n\r\nThe `feature_importance_bar_graph` and `feature_importance_log csv file` stores a list the most important features, and the importance of all features, within your classifier. For example, if you where classifying running, you'd expect features that measures movement and velocity to be shown at the top. These two files get created when you have these checkboxes ticked when you create your model:\r\n\r\n\r\n\"image\"\r\n\r\n\r\nThey don't give you the accuracy of the model, you can think of it has information on how the model works and reaches it conclusions and sanity check.\r\n\r\n\r\nYou can read more about these settings in the documentation [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#train-predictive-classifiers-settings). \r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-10-02T11:36:15Z", + "author": "sronilsson" + } + ] + }, + { + "title": "changes for python 3.9 and python 3.10", + "body": "Hi\r\nI started working with your frame work as part of my role as data scientist at Klavir lab in Haifa university, i have to say, you frame work is really amazing for the amount of analysis and functionality it enables :)\r\n\r\nSo some package version needed to be upgraded, and in therefor some code as well.\r\n\r\nI created ROI's and model on my data, and it seems all the outpout is as it should be and no unnecessary exception happends.\r\n\r\nHope this contribute to your project:)\r\n\r\nCheers", + "user": "tzukpolinsky", + "reaction_cnt": 0, + "created_at": "2023-09-28T09:45:10Z", + "updated_at": "2023-10-23T21:14:20Z", + "author": "tzukpolinsky", + "comments": [ + { + "body": "Very cool thank you @tzukpolinsky!! Do you know if SimBA also runs with these fixes in python3.6 to python3.8 (or at least python3.6)? ", + "created_at": "2023-09-28T17:39:46Z", + "author": "sronilsson" + }, + { + "body": "Hi\r\nI will check this out and let you know:)\r\n\r\nWith regards\r\nTzuk Polinsky\r\n\r\nOn Thu, Sep 28, 2023, 20:39 Simon Nilsson ***@***.***> wrote:\r\n\r\n> Very cool thank you @tzukpolinsky !! Do\r\n> you know if SimBA also runs with these fixes in python3.6 to python3.8 (or\r\n> at least python3.6)?\r\n>\r\n> —\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you were mentioned.Message ID:\r\n> ***@***.***>\r\n>\r\n", + "created_at": "2023-09-28T17:55:06Z", + "author": "tzukpolinsky" + }, + { + "body": "Hi\r\ntested for creating project and running some functionality, didnt trained a full model just some simple analytical from the vast variety in your app.\r\n\r\nNote:\r\nproject that is originally create with python 3.8+ are not compatible for python 3.7 and 3.6 because of the different pickle protocol in the advanced versions (protocol 5 vs 4).\r\n\r\nfixed some crashes from tinker, just some annoying stuff with the utf it likes and dont likes\r\n\r\nHoped i helped in any way:)", + "created_at": "2023-10-04T13:15:38Z", + "author": "tzukpolinsky" + }, + { + "body": "Thanks @tzukpolinsky! It does, give me a little time and I will merge this\r\n\r\nSimon ", + "created_at": "2023-10-05T14:37:35Z", + "author": "sronilsson" + }, + { + "body": "Its around the qt version\r\nMaybe updating the qt5..?\r\nSudo apt-get upgrade qt5-dev\r\n\r\nOn Thu, Oct 5, 2023, 22:09 Simon Nilsson ***@***.***> wrote:\r\n\r\n> ***@***.**** commented on this pull request.\r\n> ------------------------------\r\n>\r\n> On simba/roi_tools/ROI_define.py\r\n> :\r\n>\r\n> I can see how this is more reliable but I am hitting an error when I try\r\n> to launch on MacOS, have you seen this before?\r\n>\r\n> QObject::setParent: Cannot set parent, new parent is in a different thread\r\n> QPixmap: It is not safe to use pixmaps outside the GUI thread\r\n> zsh: segmentation fault python App.py\r\n>\r\n> —\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you were mentioned.Message ID:\r\n> ***@***.***>\r\n>\r\n", + "created_at": "2023-10-06T06:37:43Z", + "author": "tzukpolinsky" + }, + { + "body": "Hi\r\ncan we resolve the pull request?:)\r\n\r\ni would like to merge some change to my main and I dont want to burden this one with it", + "created_at": "2023-10-22T11:16:26Z", + "author": "tzukpolinsky" + }, + { + "body": "Yes sorry for delay, but meaning to each day and slipped. I'm back at the computer tonight and will fix that single conflict and merge. ", + "created_at": "2023-10-22T11:22:06Z", + "author": "sronilsson" + }, + { + "body": "@tzukpolinsky - I don't have many tests.. would love more. But I realized two tests are failing:\r\n\r\nBoth appear pandas related:\r\n\r\n``\r\nAttributeError: 'DataFrame' object has no attribute '_is_numeric_mixed_type’. \r\n``\r\n\r\nFor that one I could just commend out the assertion if `_is_numeric_mixed_type` isn’t a thing in newer pandas, or maybe the test returns a df and it was previously a series in latest pandas?\r\n\r\nAnd:\r\n\r\n`df = pd.read_excel(file_path, sheet_name=None,usecols=EXPECTED_FIELDS).popitem(last=False)[1]\r\nTypeError: popitem() takes no keyword arguments` Which I’ve never seen before.. seems latest pandas dropped the `last` argument?\r\n\r\n\r\nIf I pin pandas==0.25.3 in requirements.txt it worked though. Is it essential that pandas is the latest version?", + "created_at": "2023-10-22T17:58:10Z", + "author": "sronilsson" + }, + { + "body": "Hi\r\n\r\nI can try and solve this tomorrow evening\r\n\r\nThe version becomes deprecated for higher version of python, so i would\r\nrecommend to try and work with the newest\r\n\r\nTomorrow i would give it a go\r\n\r\nOn Sun, Oct 22, 2023, 20:58 Simon Nilsson ***@***.***> wrote:\r\n\r\n> @tzukpolinsky - I don't have many\r\n> tests.. would love more. But I realized two tests are failing:\r\n>\r\n> Both appear pandas related:\r\n>\r\n> AttributeError: 'DataFrame' object has no attribute\r\n> '_is_numeric_mixed_type’.\r\n>\r\n> For that one I could just commend out the assertion if\r\n> _is_numeric_mixed_type isn’t a thing in newer pandas, or maybe the test\r\n> returns a df and it was previously a series in latest pandas?\r\n>\r\n> And:\r\n>\r\n> df = pd.read_excel(file_path,\r\n> sheet_name=None,usecols=EXPECTED_FIELDS).popitem(last=False)[1] TypeError:\r\n> popitem() takes no keyword arguments Which I’ve never seen before.. seems\r\n> latest pandas dropped the last argument?\r\n>\r\n> If I pin pandas==0.25.3 in requirements.txt it worked though. Is it\r\n> essential that pandas is the latest version?\r\n>\r\n> —\r\n> Reply to this email directly, view it on GitHub\r\n> ,\r\n> or unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you were mentioned.Message ID:\r\n> ***@***.***>\r\n>\r\n", + "created_at": "2023-10-22T18:02:51Z", + "author": "tzukpolinsky" + }, + { + "body": "Thank you, the first assertion is not important we can live without that. \r\n\r\nThe second error though, on `df = pd.read_excel(file_path, sheet_name=None,usecols=EXPECTED_FIELDS).popitem(last=False)[1]` needs to be replaced to work in python3.9. \r\n\r\nIt is used when people have Noldus Observer hand-annotations in excel workbooks. An Excel workbook from Noldus Observer contains many spreadsheets for a single video, and most spreadsheets can be discarded, and we only want one of them [HERE](https://github.com/sgoldenlab/simba/blob/d45e63c94b2b5ef95cfdd61ee7a1ac15ae35293c/simba/third_party_label_appenders/observer_importer.py#L92C21-L92C21). \r\n\r\n\r\n", + "created_at": "2023-10-22T19:25:37Z", + "author": "sronilsson" + }, + { + "body": "Ah it should be an easy fix, I think this has something to do with it from chatgpt:\r\n\r\n\"\"\"\r\n**key-value pairs in dictionaries was not guaranteed prior to Python 3.7. Starting from Python 3.7, dictionaries maintain the insertion order, and this order is preserved when using popitem(). However, dictionaries in earlier Python versions do not maintain order, and the behavior of popitem() may be less predictable in those versions.**\r\n\"\"\"", + "created_at": "2023-10-22T19:42:49Z", + "author": "sronilsson" + }, + { + "body": "will the following replacment works?\r\n```\r\ndf = next(iter(pd.read_excel(file_path, sheet_name=None, usecols=EXPECTED_FIELDS).items()))[1]\r\n```\r\n\r\nsorry i dont have Noldus data to check it\r\n\r\nthe idea is to convert the pandas dic to an iterable and than trying and get the first value.", + "created_at": "2023-10-23T11:34:16Z", + "author": "tzukpolinsky" + }, + { + "body": "Yes I will figure it out! Thanks again @tzukpolinsky this is very helpful and useful. \r\n\r\nFYI: I have two installation tests that runs every time the main branch updates. [One](https://github.com/sgoldenlab/simba/blob/master/.github/workflows/tests_py36.yml) installs SimBA on python3.6, and runs some tests and blacks the code. I just inserted a [second](https://github.com/sgoldenlab/simba/blob/master/.github/workflows/tests_py310.yml) that installs SimBA on python3.10 (but doesn't perform any tests as yet). \r\n\r\nThe trouble I had is that those two install tests use the same requirement.txt, but pandas==0.25.3 will not install on python3.10, and latest pandas version will not install on python3.6 (you need python >= 3.9). \r\n\r\nFor now, I inserted [markers](https://peps.python.org/pep-0508/#environment-markers) in the requirement.txt, so pandas dependency is different depending on python version: \r\n\r\n\"image\"\r\n\r\nA bit hacky but at least the code will install in both environment..\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-10-23T13:37:10Z", + "author": "sronilsson" + }, + { + "body": "looks good:)\r\n\r\nI have few more changes and functionality I did at my fork (haven't push them yet) we could discussed them over emails, if you would like:)\r\n\r\nglad I could help", + "created_at": "2023-10-23T15:43:09Z", + "author": "tzukpolinsky" + }, + { + "body": "I think thats a good idea! Or, if you join [gitter channel](https://app.gitter.im/#/room/#SimBA-Resource_community:gitter.im) we can do private msgs there and everything will be in one place? Otherwise im at sronilsson@gmail.com", + "created_at": "2023-10-23T21:12:11Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Analyse cleaning behavior in coral reef", + "body": "Hello,\r\n\r\nWe work with cleaner fish, studying the cleaning behavior in coral reef.\r\nIn this interaction, the cleaner fish cleans another fish called \"client\", standing close to him and biting the surface of his body, eating parasites and dead skins.\r\n\r\nMy study is mainly focus on the \"cheating behavior\", when the cleaner fish eats the mucus of the client instead of the parasites. This, provoke a clear reaction in the client identify as the \"Jolt\", a rapid movement of the client away from the cleaner fish.\r\n\r\nDo you think Simba could be a good tool to analyse this kind of interaction, considering the interaction time (time spent together, where the cleaner fish cleans the client), dance (are wide longitudinal movement of the cleaner fish to attract the attention of the client), tactile stimulation (where the cleaner fish stays on the dorsal part of the client providing a massage with pelvic fins), and the cheating behavior (considering so far we have only video from frontal view)?\r\n\r\nI would like to hear your idea about before proceed and try it!\r\n\r\nThank you so much for your time and for your precious help!\r\n\r\nDaniele", + "user": "DanieleRomeo", + "reaction_cnt": 0, + "created_at": "2023-09-26T08:36:14Z", + "updated_at": "2024-04-18T06:02:10Z", + "author": "DanieleRomeo", + "comments": [ + { + "body": "Hi @DanieleRomeo! I don't know enough about the behaviors and tracking data, but there is nothing, theoretically, stopping you if you're tracking is solid and behavior is salient (e.g. human can tell the behaviours apart).\r\n\r\nWe've done a few fish projects lately, albeit single fish, and some general points:\r\n\r\nOut-of-the-box default SimBA calculates mainly variables related to movements, shapes and distances between animals and their body-parts to build the classifiers. So, out-of-the-box, you will easily be able to capture behaviors like “jolts”, while some other behaviors may be trickier with the default variables. For more complex fish behaviors we’ve had to concentrate further on circular statistics, documented [HERE](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#module-simba.mixins.circular_statistics), to get good models going and use these methods as documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/extractFeatures.md), through the SimBA GUI. \r\n\r\nSo, it’s good to include two body parts for each animal in tracking which allows you to calculate circular statistics, e.g. head and swim bladder or whatever the equivalent is in your species as in image below (not just the perimeter of the fishes). If such body-parts are included, we can do more appropriate stats to capture what the human annotator is looking at. The image is from the side but I worked with it from above as well.\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/8f53c334-ddbd-4193-91f4-bd33875e058d)\r\n\r\nA caveat is that building your own feature extraction class in SimBA, and pick-and-mixing these statistics calculators, requires some moderate python skills which probably keep many people away (I haven’t yet thought of a user-friendly way to do it graphically). If that is the case, I am happy to help to help put together and explain the code.\r\n\r\nLastly, we do not have domain knowledge about fish behavior, and the [calculators](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#module-simba.mixins.circular_statistics) have been written under guidance from people with such knowledge telling us what they need. If you find something missing, that you want calculated, let us know. \r\n\r\nThanks! \r\n\r\nSimon ", + "created_at": "2023-09-26T13:50:44Z", + "author": "sronilsson" + }, + { + "body": "Dear Simon,\r\n\r\nThank you so much for your reply! \r\n\r\nI am trying to use Simba and I am really glad to see it is super user friendly! \r\nI have managed to import my Boris tracking and my H5 file from DLC.\r\nAs you mention in your comment, I see the extraction features is based on mouse, therefore I was trying to adapt this script on zebrafish (https://github.com/sgoldenlab/simba/blob/072963b338eb4d59cf9cb8738361dc9180ebf72f/misc/fish_feature_extraction_092221.py#L15).\r\nUnfortunately, I my python skills are super basic, I would like to ask if you think is a good idea to adapt this script and if you could help me. In my tracking file I have 2 animals with 8 bodyparts. \r\n\r\nIn this script, I changed the body parts with the one I used for my tracking but when I try to run it in the \"Extract Features\" section, I got errors that there are no modules named \"simba.rw_dfs\" or \"simba.drop_bp_cords\".\r\n\r\nThank you so much for your help!\r\n\r\nDaniele \r\n\r\n \r\n", + "created_at": "2024-04-02T07:11:29Z", + "author": "DanieleRomeo" + }, + { + "body": "Thanks @DanieleRomeo for pointing this out and it is not your python skills - this feature extraction scrip was written some years ago and is incompatible with current SimBA, it calls some methods that has been moved around (mainly to make SimBA easier to work with for me). \r\n\r\nI would **not** recommend going back to an earlier version of SimBA that supports this file (because you are likely to hit other errors and no-one supports these prior versions). \r\n\r\nThere is a more recent fish feature extraction script [HERE](https://github.com/sgoldenlab/simba/blob/master/simba/feature_extractors/misc/fish_feature_extractor_2023_version_5.py) that should work. \r\n\r\nAgain, it is written for one animal though. I'm happy to adapt it to run for two animals with your specific body-parts if you should bump into issues, if you share a sample of your simba project (so I know what the body-parts are called etc). \r\n\r\n\r\n\r\n\r\n", + "created_at": "2024-04-02T12:43:20Z", + "author": "sronilsson" + }, + { + "body": "Thank you so much, I would be really glad to accept your help! \r\n\r\nI can share with you the project folder, inside there is also the config.yaml file of DLC! \r\n\r\nhttps://connecthkuhk-my.sharepoint.com/:f:/g/personal/romeodan_connect_hku_hk/EpRX3msqrl5HnuUATMMvW8kBZxzsz_fmSmm0xG1qhxrAfA?e=FVSbeY \r\n\r\nThank you so much again for your time and kindness! ", + "created_at": "2024-04-03T11:01:59Z", + "author": "DanieleRomeo" + }, + { + "body": "No problem @DanieleRomeo ! \r\n\r\nThis script I sent, was used to score these behaviours on the Y-axis in single fish:\r\n\r\n\"image\"\r\n\r\nI had a brief look at your project, and you have one behavior called \"Interaction\" - which probably depends on the proximity of the two fishes relative to each other? If so, we can add some measurements on the relationships between the animals. \r\n", + "created_at": "2024-04-03T12:29:27Z", + "author": "sronilsson" + }, + { + "body": "Not for now, but in case I forget.. for the next batch :) it can be good to crop the videos around the tank prior to running it through pose estimation. If the videos are cropped around the tank, then (i) you will save a lot of space as videos are smaller, and its quicker to create visualizations, and (ii) the pose-estimation is likely containing fewer errors, as objects and odd things not contained in the tank can't be mistaken as body-parts, and (iii) we can easily compute where the animals are in the tank relative to the top/bottom/left/right walls of the tank as those are also represented by the top/bottom/left/right edges of the image\r\n\r\n\r\n\"image\"\r\n", + "created_at": "2024-04-03T15:14:05Z", + "author": "sronilsson" + }, + { + "body": "Yes, I am investigating two behaviours:\r\nThe interaction, when the two fish (the cleaner (that is, the elongated shape fish) and the client) are close to each other (the cleaner inspects closely or bites the skin of the client) and the cheating, when the cleaner bites the client and provokes a \"jolt\" (so a rapid movement away from the cleaner).\r\n\r\nYes, I will definitely pre-process the next batch, thank you so much for the suggestion and for the clarification! \r\n\r\n", + "created_at": "2024-04-04T08:54:29Z", + "author": "DanieleRomeo" + }, + { + "body": "Great thanks for that very helpful - those are rather salient and shoud be easy to catch. Stay with me though, won't be able to get to this immediately!", + "created_at": "2024-04-04T10:09:38Z", + "author": "sronilsson" + }, + { + "body": "Sure, let me know if you need any other information! and thank you again for your time! ", + "created_at": "2024-04-04T10:21:48Z", + "author": "DanieleRomeo" + }, + { + "body": "Sorry to bother again, I have just a quick question.\r\n\r\nIf I understood well, once I have my model I can process new videos. \r\nHowever, to do that, I'll always need the pose-estimation file for each videos before processing them through SimBA (I am using DLC, so H5 files), right? \r\n ", + "created_at": "2024-04-08T04:02:23Z", + "author": "DanieleRomeo" + }, + { + "body": "No problem @DanieleRomeo ! Yes, you obviously won’t need to train new behavioral models. But the pose estimation input data need to be created in some dedicated tool like DLC, it can’t currently be done within SimBA. \r\n\r\nI can see how this may be issues with your data - you have a lot of it - and you may run out of space and time as the number of videos grows and you want to do this at scale. Some notes below how to get around this! I have zero experience with these kinds of fish. But for what it is worth anyway, \r\n\r\nThe resolution and FPS are high. I see 59 images a second at more than 2x1.5k pixels. You can probably decrease both fps and resolution substantially, at least for for these behaviors - as long a you can see behavior by eye. E.g., if you can see the behaviors you are interested in at 25 fps by eye, then the DLC processing time will be more than halved, decrease resolution will also help.\r\n\r\n\r\nAlso I noticed glare:\r\n\r\n\"Pasted\r\n\r\nYou can probably get rid of some through cropping out the sides or positioning camera so sides of tank are more obscured. Another, last case option is to train the pose-estimation model what glare is and for it to ignore it.\r\n\r\nCombined with the glare and sometime not, there seems to be some identity switches? It would be good to minimize these by improving pose model in DLC\r\n\r\n\"Pasted\r\n\r\n\r\nI see a lot of missing body-parts. For LB_el, each body-part don’t have any tracking data in about half the frames (around 20K ish frames), probably caused by the animals swim in Z direction. I interpolated all these missing data using “Body-part: Nearest” option in SimBA during import. Meaning, if the animal is swimming in exclusively in Z direction, SimBA will think it is standing still instead of missing. Not ideal but will work, could probably be limited by a less depth in tank but not sure if that is viable. \r\n\r\n\r\nThere are 8 body-parts tracked per animal. I don’t know all the behaviors you may want to score, but probably don’t need that many, at least for the behaviors in the example project, at least locations could potentially be inferred post-hoc (e.g., body-part A is always [half-way in-between](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#simba.mixins.feature_extraction_mixin.FeatureExtractionMixin.find_midpoints) body-part B and C etc). Generally it’s most important to track outer boundaries of animals. Also for example, the movements of body-parts can be collinear, e.g., the animal can’t move one without moving the other, if you have the movement information of one body-part you have it of the other, worth thinking of when you choose with body-parts to track. \r\n\r\nThere are sections in the beginning of video, human is in the frame noting the trial start etc. I’d clip that off before doing pose-estimation, we don’t want the models to try and use that when training a model. Ultimately, your behavior classifier will only be as good as the data going in, so it’s worth making sure the tracking is as good as it can be and process the data as quickly as possible. \r\n\r\n", + "created_at": "2024-04-08T11:26:09Z", + "author": "sronilsson" + }, + { + "body": "Thank you so much for your comments and explanation, you are very clear, so appreciated! \r\n\r\nI will cut, downsize and crop the videos to standardise everything and reduce the time to analyse them!\r\nI'll also create a new DLC model meeting your points, trying to improve the pose-estimation model, labelling more frames to avoid identity switching.\r\n\r\nI really cannot thank you enough, hope this is not taking too much of your time! \r\n", + "created_at": "2024-04-08T13:20:34Z", + "author": "DanieleRomeo" + }, + { + "body": "Before creating a new DLC model, I am wondering if I change the body-parts like the one in picture to optimize the shape of the fish, will the script still be able to work? Or it would be specific for the body parts I previously sent? \r\n\r\n![Immagine WhatsApp 2024-03-18 ore 15 38 05_ddf6cc30](https://github.com/sgoldenlab/simba/assets/144647042/1fb9a254-0dd2-4d08-b692-809c89b22dec)\r\n\r\n\r\n", + "created_at": "2024-04-09T05:42:47Z", + "author": "DanieleRomeo" + }, + { + "body": "Yeah that will work! I will send you some descriptions later today of whats computed - if you have the image where the body-part names are annotate that would help so I know what is what", + "created_at": "2024-04-09T11:43:49Z", + "author": "sronilsson" + }, + { + "body": "I wrote this snippet to compute the below. You can use it in the GUI as you did with the earlier one that failed (after unzipping). We need to update the body-part names at top when you got a new pose-mode going. \r\n\r\n[two_fish_feature_extractor_040924.py.zip](https://github.com/sgoldenlab/simba/files/14919809/two_fish_feature_extractor_040924.py.zip)\r\n\r\nIt computes: \r\n\r\nHow the animals move on the X relative to the Y axis - [description](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#simba.mixins.feature_extraction_supplement_mixin.FeatureExtractionSupplemental.rolling_horizontal_vs_vertical_movement)\r\n\r\nVelocity and acceleration of each animal - [description](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#simba.mixins.timeseries_features_mixin.TimeseriesFeatureMixin.acceleration)\r\n\r\nThe correlation between the two animals velocity and acceleration - icluding how each animal velocity is correlated with lagged versions of themselves and the other animal [description](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#simba.mixins.statistics_mixin.Statistics.sliding_spearman_rank_correlation).\r\n\r\nThe direction of the animals [description](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#simba.mixins.circular_statistics.CircularStatisticsMixin.direction_two_bps)\r\n\r\nInstantaneous angular velocities [description](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#simba.mixins.circular_statistics.CircularStatisticsMixin.instantaneous_angular_velocity) and [instantaneous rotations](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#simba.mixins.circular_statistics.CircularStatisticsMixin.rotational_direction)\r\n\r\nAnimal areas - [description](https://simba-uw-tf-dev.readthedocs.io/en/latestsimba.mixins.html#simba.mixins.feature_extraction_mixin.FeatureExtractionMixin.convex_hull_calculator_mp)\r\n\r\nAnimal body-part distances- [description](https://simba-uw-tf-dev.readthedocs.io/en/latest/simba.mixins.html#simba.mixins.feature_extraction_mixin.FeatureExtractionMixin.framewise_euclidean_distance)\r\n\r\nMost are computed in rolling time windows (min, max, median), so all in all about 150 measurements per frame. The behaviors from the way you describe mainly seem to be judged by the proximity of the animals and how much they move. \r\n\r\n**As I mentioned I don’t know these animals and behaviors, and often it is important to have domain knowledge of what exactly defines a jolt from a near-jolt etc, I am kind of guessing here lol. If you see anything in the docs or think of something that you know is a good proxy for your behaviors and annotations, we can add it to the code**\r\n", + "created_at": "2024-04-09T14:23:35Z", + "author": "sronilsson" + }, + { + "body": "Great, thank you!\r\n\r\nYes you are right, the interaction is when the two fish are close to each other while jolt is when the client move rapidly away from the cleaner (so should be a metter of position and acceleration). Therefore, with those parameters it should be able to detect both behaviours. \r\n\r\nThe list of the new body parts is this: \r\n- HeadTerminalMouth\r\n- HeadBasisUp\r\n- HeadBasisDown\r\n- TailBottomCorner\r\n- TailUpperCorner\r\n- TailJunctionBody\r\n- BodyMidUp\r\n- BodyMidDown\r\n\r\nSo I can just change the parts:\r\nMID_BODYPARTS = ['BodyMidUp_1', 'BodyMidUp_2']\r\nMOUTH_BODYPARTS = ['HeadTerminalMouth_1', 'HeadTerminalMouth_2']\r\nHEAD_MID = ['HeadBasisDown_1', 'HeadBasisDown_2']\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/144647042/304780fa-b9b2-497b-a1b0-8df5eac45d2d)\r\n", + "created_at": "2024-04-10T04:31:50Z", + "author": "DanieleRomeo" + }, + { + "body": "Hello,\r\n\r\nSorry to bother again! \r\n\r\nI am having trouble to let the script work! When I try to run it I have this error, do you know how I can fix it? \r\n\r\nI am using the v. 1.87.4\r\n\r\nThank you again! \r\n\r\nException in thread Thread-1:\r\nTraceback (most recent call last):\r\n File \"/home/fishlab4/anaconda3/envs/Simba/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/home/fishlab4/anaconda3/envs/Simba/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/fishlab4/anaconda3/envs/Simba/lib/python3.6/site-packages/simba/SimBA.py\", line 1456, in run_feature_extraction\r\n custom_feature_extractor.run()\r\n File \"/home/fishlab4/anaconda3/envs/Simba/lib/python3.6/site-packages/simba/utils/custom_feature_extractor.py\", line 220, in run\r\n spec.loader.exec_module(user_module)\r\n File \"\", line 678, in exec_module\r\n File \"\", line 219, in _call_with_frames_removed\r\n File \"/home/fishlab4/Desktop/ScriptSimba/two_fish_feature_extractor_040924.py/two_fish_feature_extractor_040924.py\", line 154, in \r\n feature_extractor.run()\r\n File \"/home/fishlab4/Desktop/ScriptSimba/two_fish_feature_extractor_040924.py/two_fish_feature_extractor_040924.py\", line 77, in run\r\n movement_autocorrelation = TimeseriesFeatureMixin.sliding_two_signal_crosscorrelation(x=self.results[f'framewise_metric_movement_{ANIMAL_NAMES[0]}'].values.astype(np.float64), y=self.results[f'framewise_metric_movement_{ANIMAL_NAMES[1]}'].values.astype(np.float64), windows=ROLL_WINDOWS_VALUES_S, sample_rate=float(self.fps), normalize=True, lag=self.fps)\r\nAttributeError: type object 'TimeseriesFeatureMixin' has no attribute 'sliding_two_signal_crosscorrelation'", + "created_at": "2024-04-15T09:45:03Z", + "author": "DanieleRomeo" + }, + { + "body": "No problem - there are two ways to fix it, either:\r\n\r\ni) Update SimBa with `pip install simba-uw-tf-dev --upgrade`, or\r\n\r\nii) Comment out lines 77-79 in the python code I sent:\r\n\r\n\"image\"\r\n\r\n\r\nI'd suggest doing the first option though first.\r\n\r\n\r\nlet me know how it goes.", + "created_at": "2024-04-15T12:49:34Z", + "author": "sronilsson" + }, + { + "body": "Yes it is working! I am using the 1.90.2 and the script worked! \r\n\r\nThank you so much! ", + "created_at": "2024-04-16T09:45:39Z", + "author": "DanieleRomeo" + }, + { + "body": "Great, just let me know if any others issue comes up!", + "created_at": "2024-04-16T11:41:59Z", + "author": "sronilsson" + }, + { + "body": "Hey,\r\n\r\nYes I am having another problem, sorry!\r\n\r\nOnce I have the model and I am trying to create a path plot in \"Visualization\",but I am having this error:\r\n\r\nsimba.utils.errors.InvalidInputError: SIMBA VALUE ERROR: input_style_attr requires (,), got \r\n\r\nAnd if I try to modify the style setting putting \"milliseconds\" and \"Max Prior Lines\" as 2000 to have only the path of the last 2 seconds, I got this other error:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/fishlab4/anaconda3/envs/Simba/lib/python3.6/tkinter/_init.py\", line 1705, in __call_\r\n return self.func(*args)\r\n File \"/home/fishlab4/anaconda3/envs/Simba/lib/python3.6/site-packages/simba/ui/pop_ups/path_plot_pop_up.py\", line 247, in \r\n command=lambda: self.__create_path_plots(multiple_videos=False),\r\n File \"/home/fishlab4/anaconda3/envs/Simba/lib/python3.6/site-packages/simba/ui/pop_ups/path_plot_pop_up.py\", line 593, in __create_path_plots\r\n path_plotter.run()\r\n File \"/home/fishlab4/anaconda3/envs/Simba/lib/python3.6/site-packages/simba/plotting/path_plotter.py\", line 402, in run\r\n position_2 = self.deque_dict[animal_name][\"deque\"][i + 1]\r\nIndexError: \r\n(https://github.com/sgoldenlab/simba/assets/144647042/7b90f7e7-8dd0-4ff5-b391-65ceaa1e25b5)\r\ndeque index out of range\r\n\r\nHowever, I can create a video of the path plot only if I select \"Entire Video\", but in this way the video created track the path of the fish in all the video without disappearing, and so always overlapping with previous track.\r\n\r\nDo you have any suggestion? \r\n\r\nThank you so much \r\n\r\n\r\n![Immagine WhatsApp 2024-04-17 ore 16 36 58_5b9258fb](https://github.com/sgoldenlab/simba/assets/144647042/eae48f17-86cc-4414-b13f-30a77db7b7e7)\r\n", + "created_at": "2024-04-17T08:49:24Z", + "author": "DanieleRomeo" + }, + { + "body": "Thanks @DanieleRomeo ! You caught me.. I was just working on optimizing these functions and fix some bugs that cause the first images when creating videos using multiprocessing to look a little off. I will look into this one too and let you know when fixed - and if you could test it on your end after that would be super helpful.", + "created_at": "2024-04-17T12:53:42Z", + "author": "sronilsson" + }, + { + "body": "@DanieleRomeo - if you do a `pip install simba-uw-tf-dev --upgrade` and get version 1.90.4 - how does the path plots run on your end?", + "created_at": "2024-04-17T20:02:51Z", + "author": "sronilsson" + }, + { + "body": "Ahah great! Yes it is working perfectly now, thank you so much again! ", + "created_at": "2024-04-18T06:02:09Z", + "author": "DanieleRomeo" + } + ] + }, + { + "title": "Missing one animal tracking", + "body": "Hello,\r\n\r\nI'm using SimBA 1.73.2, on windows, from H5 DLC files.\r\n\r\nI'm tracking pairs of mice interacting in 2 adjacent and connected compartments during 600 sec (mice travel from one to the other compartment). To measure the amount of time each mouse has been tracked, I calculate the sum of the time this mouse spent in each compartment. For all my animals, but one, I indeed get around 600 sec. But for one animal I have 0 sec tracked in each compartment. I precise that the other mouse of this pair is tracked for 600 sec as expected. \r\n\r\nOn DLC, each animal is detected. \r\n\r\nOn SimBA I run:\r\nInterpolation: Bodyafter quadratic\r\nSmoothing: Savitzky Golay = 400\r\nOutlier correction: 1 and 2 for movement and location criterion, respectively, using tail baise and nose as referent BPs.\r\n\r\nWhen looking at the output csv files, it looks that both animals are well detecred. \r\nHere I attach the csv. files for this pair. The problematic animal is UM. LM is fine. \r\n\r\nCan you ideentify what cause the issue? \r\n\r\nThank you :)\r\n\r\n[Corrected_location-Hyb-B8(urine)-Ph1-T1.2-LS_MS.csv](https://github.com/sgoldenlab/simba/files/12701364/Corrected_location-Hyb-B8.urine.-Ph1-T1.2-LS_MS.csv)\r\n[Corrected_movement-Hyb-B8(urine)-Ph1-T1.2-LS_MS.csv](https://github.com/sgoldenlab/simba/files/12701365/Corrected_movement-Hyb-B8.urine.-Ph1-T1.2-LS_MS.csv)\r\n[INPUT_CSV-Hyb-B8(urine)-Ph1-T1.2-LS_MS.csv](https://github.com/sgoldenlab/simba/files/12701367/INPUT_CSV-Hyb-B8.urine.-Ph1-T1.2-LS_MS.csv)\r\n", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2023-09-22T13:46:40Z", + "updated_at": "2023-09-25T11:32:33Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli - yes I can see the issue, thanks for sharing the CSV file. \r\n\r\nLooking at the body-part locations for `UM`, appears to be stuck in a single location throughout the video (which is the first frame of your DLC tracking data):\r\n\r\n```\r\nUM_Ear_left\r\nUM_Center\r\nUM_Lat_left\r\nUM_Lat_right_1\r\n```\r\n\r\nSome of these body-parts are stuck on large pixel value potentially at the edge of the video (e.g., UM_Center) and also outside of your ROI drawings (pixel 2413, 1967) - and could cause 0 values for entire video. \r\n\r\nSomething appears to have happened during initial few seconds of tracking which messes up the outlier correction in this video - I can see for example that the DLC tracked UM center (and other body-parts) jumps thousand pixels just during the first few frames of the video: \r\n\r\n\"image\"\r\n\r\nCould there be an experimenter hand in the video or camera moving after the recording has started? \r\n\r\nIf you clip out the first 1s of the video, then run it through DLC and import it to your SimBA project again, does that fix it? \r\n \r\n\r\n", + "created_at": "2023-09-22T14:28:11Z", + "author": "sronilsson" + }, + { + "body": "Thank you for answering. \r\n\r\nNo hand on the video, but indeed on the first frame the tracking of UM (purple dots) is bad: \r\n![Frame1](https://github.com/sgoldenlab/simba/assets/66886884/0d0d3134-0519-4085-9e01-04b024e91fda)\r\n\r\n\r\nBut then later on the video, it looks that both animals are well tracked... or I'm wrong?\r\n\r\n![FrameLater](https://github.com/sgoldenlab/simba/assets/66886884/ec89a158-eea7-485e-88ef-77cfbd90df6c)\r\n\r\n", + "created_at": "2023-09-22T14:53:18Z", + "author": "DorianBattivelli" + }, + { + "body": "No it does look well tracked - I'd say from frame number 5 and onwards. Not entirely sure why it causes the error: but those initial huge movements in the first 5 frames will be used to calculate the mean movement of the animal in the frames and influence the outlier criterion. ", + "created_at": "2023-09-22T15:02:54Z", + "author": "sronilsson" + }, + { + "body": "Makes sense, I trimed the first sec, and run analysis. I keep you posted, \r\n\r\nThanks! ", + "created_at": "2023-09-22T15:04:15Z", + "author": "DorianBattivelli" + }, + { + "body": "Cheers let me know!", + "created_at": "2023-09-22T15:07:45Z", + "author": "sronilsson" + }, + { + "body": "So, the results of this video is better but still problematic, cause UM is tracked 174 sec out of 600.\r\nHere the files for this new outcome\r\n[Hyb-B8(urine)-Ph1-T1.2-LS_MS.csv](https://github.com/sgoldenlab/simba/files/12706507/Hyb-B8.urine.-Ph1-T1.2-LS_MS.csv)\r\n[Hyb-\r\n[Hyb-B8(urine)-Ph1-T1.2-LS_MS.csv](https://github.com/sgoldenlab/simba/files/12706509/Hyb-B8.urine.-Ph1-T1.2-LS_MS.csv)\r\nB8(urine)-Ph1-T1.2-LS_MS.csv](https://github.com/sgoldenlab/simba/files/12706508/Hyb-B8.urine.-Ph1-T1.2-LS_MS.csv)\r\n\r\nDo you see what is wrong / what fix I could try?\r\nThank you!", + "created_at": "2023-09-23T13:10:37Z", + "author": "DorianBattivelli" + }, + { + "body": "I will take a look but might take till the beginning of next week - if you visualize the ROI tracking, does that give any idea of what's going on?", + "created_at": "2023-09-23T14:24:26Z", + "author": "sronilsson" + }, + { + "body": "So running analysis without outlier correction solved the issue, and when I visualize ROI tracking, I find it good. I'll do this to fix tracking with this pair, \r\n\r\nThank you for the tips!\r\nBest,", + "created_at": "2023-09-25T11:32:33Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "Successful but then empty CSV upon analysing ROI data", + "body": "**Describe the bug**\r\nWhen analysing ROI data, product is empty csv files\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'ROI'\r\n2. Click on 'Analyze ROI Data: Aggregates'\r\n3. Initial error:\r\n![Screenshot 2023-09-08 161956](https://github.com/sgoldenlab/simba/assets/144468935/54b741b4-6af7-4ee6-8fcf-94c75fed8acd)\r\n\r\n4. Manually modified video names in the video_info files to match the shortened video names. \r\n5. No error given but empty csv files. Also, to check the ROI selection, I need to open csv before manually shortening it.\r\n\r\n![Screenshot 2023-09-08 162154](https://github.com/sgoldenlab/simba/assets/144468935/97382d40-2636-443e-9690-9e1e1ab11610)\r\n\r\n**Expected behavior**\r\nFilled ROI analysis data in csv files, which will allow me to analyse data and also to visualise video. When I try to visualise the video, potentially due to the empty csv, this error is given.\r\n![Screenshot 2023-09-08 162301](https://github.com/sgoldenlab/simba/assets/144468935/aed0b496-c1e7-4e59-8b63-a1eedeb7c2e9)\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 11 Enterprise\r\n - Python Version Python 3.10.10\r\n - Are you using anaconda? Yes\r\n\r\n", + "user": "neurorie", + "reaction_cnt": 0, + "created_at": "2023-09-08T15:30:50Z", + "updated_at": "2023-09-11T16:57:11Z", + "author": "neurorie", + "comments": [ + { + "body": "Hi @neurorie, thanks for reporting and the screengrabs, very helpful for troubleshooting.\r\n\r\nNot sure entirely what is happening but appears to be a mixup between the CSV file names in your project, and the video files names in the project. SimBA uses the names of the videos to get them accurately paired with the respective data files. \r\n\r\nLet's say I decide to draw ROIs for video named 10LfamDLC_resent_50_2023_JCRMSPRINT_2000labelled.mp4 and click draw here first:\r\n\r\n\"image\"\r\n\r\nAfter drawing, SimBA then saves all your drawing information for this video under the name \"10LfamDLC_resent_50_2023_JCRMSPRINT_2000labelled\" inside the `project_folder/logs/measures/ROI_definitions.h5` of your project. \r\n\r\nNext, you go on to click on `Analyze ROI Data: Aggregates`. SimBA loops over each CSV file you have in the `project_folder/csv/outlier_corrected_movement_location`. Let's say the first file is `10Lfam.csv`. SimBA then looks inside the `ROI_definitions.h5`, for what kind of ROIs you have associated with `10Lfam`. In this case there will be no ROIs, because the ROIs you drew are saved under the name `10LfamDLC_resent_50_2023_JCRMSPRINT_2000labelled`. If this is the case for all videos, you end up with an empty CSV. \r\n\r\nLet me know if this if this could be the reason!\r\n\r\n", + "created_at": "2023-09-08T16:03:45Z", + "author": "sronilsson" + }, + { + "body": "Hello, thank you for the response!\r\n\r\nI have tried opening the H5 file with an online viewer and as I am concerned, there are no csv files. Or should I properly open with a viewer and check the content?\r\n![Screenshot 2023-09-11 093057](https://github.com/sgoldenlab/simba/assets/144468935/0282eb5e-e656-4d9b-9f0c-dc70132d337e)\r\n\r\nIn the project_folder/csv/outlier_corrected_movement_location, I have shortened name listed there with all csv files. Before, when it was working, I remember the shortened name was shown when drawing ROI so I am not sure why there is this discrepancy.", + "created_at": "2023-09-11T08:44:39Z", + "author": "neurorie" + }, + { + "body": "Hi @neurorie,\r\n\r\nTo open the `ROI_definitions.h5` file, you can use [THIS](https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_tools.md#extract-roi-definitions-to-human-readable-format) tool under the heading *Extract ROI definitions to human-readable format*.\r\n\r\nHowever, this will only allow you to view the information for the ROIs you have drawn (including the video names), not to edit. \r\n\r\nI suggest this:\r\n\r\n(i) use the tool above to extract the ROI information, to confirm that you have drawn ROIs on videos names e.g., `10LfamDLC_resent_50_2023_JCRMSPRINT_2000labelled`.\r\n\r\n(ii) If true, then, delete all your ROi definitions with [THIS](https://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial_new.md#delete-all-roi-definitions-in-your-simba-project) button.\r\n\r\n(iii) Rename your video names to be in agreement with file names: e,g., `10LfamDLC_resent_50_2023_JCRMSPRINT_2000labelled.mp4` is renamed to `10Lfam.mp4`. \r\n\r\n(iv) Redraw your ROIs and analyze your data. \r\n\r\nIf this is not possible (you have drawn so many ROIs that it would take ages to draw them again), let me know and I will give you code that renames the video names in the `ROI_definitions.h5` file. \r\n", + "created_at": "2023-09-11T12:56:57Z", + "author": "sronilsson" + }, + { + "body": "![Screenshot 2023-09-11 164046](https://github.com/sgoldenlab/simba/assets/144468935/e5723484-ec3d-4453-bccd-521d023a4d08)\r\nHello again, I cannot seem to find the button under the tools. My SimBA version is 1.62.3", + "created_at": "2023-09-11T15:45:21Z", + "author": "neurorie" + }, + { + "body": "If you update simba by typing `pip install simba-uw-tf-dev --upgrade` in the windows terminal, do you see it?", + "created_at": "2023-09-11T15:47:35Z", + "author": "sronilsson" + }, + { + "body": "Yes, perfect! I will try it now.", + "created_at": "2023-09-11T15:50:24Z", + "author": "neurorie" + }, + { + "body": "![image](https://github.com/sgoldenlab/simba/assets/144468935/ef0913f6-192e-46d1-8b00-d914ab6cbdc1)\r\nI got an error mid-way but I can see some ROI info for rectangle and it seems that I do need to delete ROI and rename the video. For this, I can just change the names in the videos file, right?\r\n", + "created_at": "2023-09-11T15:56:00Z", + "author": "neurorie" + }, + { + "body": "Thanks for posting the error, very helpful, the error is just a printing error typo by me that I will fix now and does not affect the output. \r\n\r\nYes, just change the video files names directly in the `project_folder/videos` directory.", + "created_at": "2023-09-11T15:59:22Z", + "author": "sronilsson" + }, + { + "body": "It worked perfectly, thank you so much :) ", + "created_at": "2023-09-11T16:57:11Z", + "author": "neurorie" + } + ] + }, + { + "title": "Issues importing sleap data", + "body": "![error](https://github.com/sgoldenlab/simba/assets/70863857/68181865-b5cc-4ea5-9503-7bc070421d32)\r\nHi, I'm trying to start a new project and import labels from sleap. I tried importing as a .slp file and a H5 file I had. Neither worked and I get the following message. The video and predictions from sleap match, and I put both of these into a new file that just has what I want to import into simba. Any ideas what is wrong? Also why use a .slp file vs H5? I see in the sleap tutorial they say to export predictions as a H5.", + "user": "IsabelleSajonia", + "reaction_cnt": 0, + "created_at": "2023-08-30T19:05:32Z", + "updated_at": "2023-09-01T20:12:06Z", + "author": "IsabelleSajonia", + "comments": [ + { + "body": "Unrelated but I had another question about labeling- when I'm creating a new project there does not seem to be a function to zoom in when I'm labeling body parts on my mouse. We record in somewhat large operant chambers, so the mouse is small in the frame, and this makes the body part nodes much too large to accurately label in Simba. Is there a way I can zoom in?", + "created_at": "2023-08-30T19:14:37Z", + "author": "IsabelleSajonia" + }, + { + "body": "Hi @IsabelleSajonia ! Thanks for posting the screengrab. The video you imported seems to be called `eating_3g_pellet_5min` (.mp4 or .avi..), but the data file you import seems to be called `Sleap_test_08172023.000_eating 3g pellet 5min.analysis`. SimBA needs a way of telling which data files goes with which video files, if you rename the files to have the same name, how does it look?\r\n", + "created_at": "2023-08-30T19:18:03Z", + "author": "sronilsson" + }, + { + "body": "When you create a new body-part configuration in SimBA, the image is used for visualization purposes only - it is an image to pair with your pose-estimation body-part schema to show in the menus. It is not used for any calculations, so you do not have to be exact, you can click anywhere!", + "created_at": "2023-08-30T19:19:45Z", + "author": "sronilsson" + }, + { + "body": "Ah yes changing the names to match fixed this! And the visualization makes sense I just wanted to make sure this wouldn't cause issues. Thank you.", + "created_at": "2023-08-30T19:43:26Z", + "author": "IsabelleSajonia" + }, + { + "body": "One last question (sorry I'm just starting to use this and troubleshooting), when I try to extract features I see the following error message:\r\n![error2 (1)](https://github.com/sgoldenlab/simba/assets/70863857/014987f1-d4ec-4639-8204-152d62884dd3)\r\n\r\nThe outlier csv is in the project folder and I haven't moved anything around but the csv does look empty. I'm not sure what the problem is.", + "created_at": "2023-08-30T20:13:20Z", + "author": "IsabelleSajonia" + }, + { + "body": "Got it - Before extracting features, SimBA wants you to indicate how or if you want to correct outliers:\r\n\r\n\"image\"\r\n\r\nAnd documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#step-4-outlier-correction) in Step 4. Once you have performed (or indicated to skip outlier correction), the directory `project_folder/csv/outlier_corrected_movement_location` will be populated with CSV files where the outliers are corrected (or skipped if clicked `SKIP`). Before you do this step, the `project_folder/csv/outlier_corrected_movement_location` is empty. The feature extraction looks inside of the `project_folder/csv/outlier_corrected_movement_location` directory for files to extract features for, in your case it seems to find none. \r\n\r\nEDIT: Sorry just saw that you performed outlier correction... do you see any files inside the `project_folder/csv/outlier_corrected_movement_location` directory or the `project_folder/csv/outlier_corrected_movement` folders? \r\n", + "created_at": "2023-08-30T20:25:34Z", + "author": "sronilsson" + }, + { + "body": "No, this directory is empty even though I did run outlier correction and got the \"complete\" message.", + "created_at": "2023-08-31T14:12:29Z", + "author": "IsabelleSajonia" + }, + { + "body": "I'm trying outlier correction again now and I get the messages \"outlier correction settings updated\" and \"log for corrected \"movement outliers\" saved in project_folder/logs. Looking in my logs folder, I do see these csv files but they are empty (no data) other than the column headers (video, animal, body-part, etc.). I do see that my video_info csv is updated so that part did work!", + "created_at": "2023-08-31T14:17:44Z", + "author": "IsabelleSajonia" + }, + { + "body": "When you fix outliers, it is a two step process: first simba looks for any movement outliers and corrects them. Second, SimBA looks for a any location outliers and corrects them. The second part of is noticeably slower, especially if you have a lot of outliers, you can see the elapsed times:\r\n\r\n\"image\"\r\n\r\n\r\nIn your screengrab, it looks like the only outlier process that ran was the first step (movement outliers). If you check the main Windows terminal, do you see any error msg being printed after the movement outliers are complete? ", + "created_at": "2023-08-31T14:22:37Z", + "author": "sronilsson" + }, + { + "body": "![image](https://github.com/sgoldenlab/simba/assets/70863857/b26451f1-6d19-42c7-9b23-332fec0d9c0b)\r\nI see this error in my terminal. ", + "created_at": "2023-08-31T14:28:28Z", + "author": "IsabelleSajonia" + }, + { + "body": "Do you see any CSV files inside the `project_folder/input_csv` directory?", + "created_at": "2023-08-31T14:36:58Z", + "author": "sronilsson" + }, + { + "body": "I do not see anything in that directory ", + "created_at": "2023-08-31T14:38:32Z", + "author": "IsabelleSajonia" + }, + { + "body": "👍🏻 Alright. Let's take a step back. The `project_folder/input_csv` directory is the location where the data ends up once it has been imported into SimBA. It should be a file in there names something like `eating_3g_pellet_5min.csv`. When you are importing your SLEAP data into SimBA, did you see any errors in the Windows terminal? ", + "created_at": "2023-08-31T14:40:56Z", + "author": "sronilsson" + }, + { + "body": "Ok yes I think that is the issue I'm trying to import my H5 again and I get the following: \r\n![image](https://github.com/sgoldenlab/simba/assets/70863857/6faffa68-5cba-48fa-bc90-3e895ea221de)\r\n\r\nI am selecting a folder on my desktop that just has the H5 file and the video clip that goes with it (both named the same thing).", + "created_at": "2023-08-31T14:46:10Z", + "author": "IsabelleSajonia" + }, + { + "body": "Alright, do you happen to have a `.slp` file as well with the predictions?\r\n\r\nI noticed earlier that the `H5` file had a suffix with `.analysis`, which I am not too familiar with so just want to make sure the H5 data file actually contains the pose-estimation predictions for the video frames and is not something else related to your SLEAP project. ", + "created_at": "2023-08-31T15:01:20Z", + "author": "sronilsson" + }, + { + "body": "![image](https://github.com/sgoldenlab/simba/assets/70863857/55f2ed23-6b80-434c-9772-7c0c295b336a)\r\nOk I tried the .slp file (and confirmed there are predictions in sleap) and now it is going through the frames but I get this error when it reaches the last one. ", + "created_at": "2023-08-31T16:29:10Z", + "author": "IsabelleSajonia" + }, + { + "body": "Almost there! :) \r\n\r\nWhen you selected the type of sleap file you are importing, did you select the `.SLP` kind from the dropdown? \r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/c287144b-1046-495a-9b69-e9973c51bbcb)\r\n\r\n\r\n\r\n", + "created_at": "2023-08-31T17:07:02Z", + "author": "sronilsson" + }, + { + "body": "yes I did!", + "created_at": "2023-08-31T17:28:46Z", + "author": "IsabelleSajonia" + }, + { + "body": "Intresting, can you help with two things - if you do `pip show simba-uw-tf-dev` in the Windows terminal, which version number do you see? \r\n\r\nIs there any way you can share the `h5` file (not the slp file) it failed with first, and I can take a look whats going on? Maybe through a gdrive or drop it here in the chat if compressed to a zip file? ", + "created_at": "2023-08-31T17:44:36Z", + "author": "sronilsson" + }, + { + "body": "[eating_3g_pellet_5_min.zip](https://github.com/sgoldenlab/simba/files/12488937/eating_3g_pellet_5_min.zip)\r\n[eating_3g_pellet_5_min (2).zip](https://github.com/sgoldenlab/simba/files/12488938/eating_3g_pellet_5_min.2.zip)\r\nThe first is the compressed h5 file and the second is the .slp file. And I'm seeing that its version 1.71.6", + "created_at": "2023-08-31T17:53:33Z", + "author": "IsabelleSajonia" + }, + { + "body": "Thanks! I will take a look - in the meantime, you could try and update simba with `pip install simba uw-tf-dev --upgrade`, I tink latest version is 1.72.2, to confirm that you also see the same error. ", + "created_at": "2023-08-31T17:55:17Z", + "author": "sronilsson" + }, + { + "body": "I get the error \"could not find a version that satisfies the requirement\" with that line. For reference I installed simba a week or two ago following the anaconda installation doc", + "created_at": "2023-08-31T18:01:26Z", + "author": "IsabelleSajonia" + }, + { + "body": "Do you have a screengrab or that error msg?", + "created_at": "2023-08-31T18:03:14Z", + "author": "sronilsson" + }, + { + "body": "![error2](https://github.com/sgoldenlab/simba/assets/70863857/840628c3-0599-4bb7-aace-65a259478ea9)\r\n", + "created_at": "2023-08-31T18:05:27Z", + "author": "IsabelleSajonia" + }, + { + "body": "gosh sorry forgot a hyphen `pip install simba-uw-tf-dev --upgrade`\r\n\r\nBut, I was able to replicate your error! So i can figure out what is going on.", + "created_at": "2023-08-31T18:10:37Z", + "author": "sronilsson" + }, + { + "body": "The error comes from SLEAP normally having an entry called `track_names`, for whatever reason, your slp file has no track names. I haven't seen this before.. so we have to insert some code to handle that case. \r\n\r\nCan I also ask for the SLP file? ", + "created_at": "2023-08-31T18:16:26Z", + "author": "sronilsson" + }, + { + "body": "... and just fyi watch out for your last body part (tail_3?), it doesn't seem to be tracked very well a lot of missing values.", + "created_at": "2023-08-31T18:21:31Z", + "author": "sronilsson" + }, + { + "body": "I think the second zipped file I sent you is a .slp file with predictions. And yes I'm having issues tracking the tail! Our operant chamber has a white floor and it blends in with the tip of this mouse's tail (I struggle to label it myself) so I may need to rethink my skeleton ", + "created_at": "2023-08-31T18:31:14Z", + "author": "IsabelleSajonia" + }, + { + "body": "Ah yes of course, let me just make sure it works with that file too and then I will give you updated code. \r\n", + "created_at": "2023-08-31T18:33:18Z", + "author": "sronilsson" + }, + { + "body": "If you run `pip install simba-uw-tf-dev --upgrade`, giving you version `1.72.3`, how does it look on your end?\r\n\r\n\r\nAnd I sggest importing the H5 instead of the slp file - it is quicker. ", + "created_at": "2023-08-31T18:38:45Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Question about body parts", + "body": "Hi,\r\n\r\nI'm very excited to use simba for our behavior analysis, but I'm also very new to using it. I've read in your descriptions that 8 body parts per animal is recommended, however my group wants to track way more body parts. Our videos have 3 mice, with each 19 body parts tracked in maDLC. Would you still recommend using Simba like this or trying to track less points per animal? \r\n\r\nThank you so much!\r\n", + "user": "lxspedeza", + "reaction_cnt": 0, + "created_at": "2023-08-22T08:53:23Z", + "updated_at": "2023-08-22T13:59:01Z", + "author": "lxspedeza", + "comments": [ + { + "body": "Hi! You should be good! \r\n\r\nNOTE: The body-parts you track will depend on the types of behaviors that you are interested in, if you can see yourself wanting to score tail rattles the tail-tip would be a good body-part to know the location of, but if you’re scoring freezing the tail tip location may be abundant. If you don’t know exactly what behaviors you are interested in, you can do a “brute force” approach track many body-parts that allow all the behaviors you could possibly foresee being classified. Tracking many body-parts come with costs though: some body-parts may be more difficult to track than others, resulting in a lot of missing values for frames where the body-part can’t be found in pose-estimation, or ID switches between animals. This can cause issues when SimBA for example tries to calculate sizes of the animals, animal mean or aggregate speed and movements etc. SimBA can interpolate these missing body-parts, but it will suffer if there are a lot of them for long periods. Another drawback is the run-times and disk space, if you have a lot of videos and there are a lot of body-parts it will take longer to analyze your data files will start to take up a lot of space when you can get away with less time and less taken up disk space. General rule, track the body-parts that you need to know the locations and ultimately movements, and velocities etc. of when you are scoring the behaviors manually.", + "created_at": "2023-08-22T13:58:12Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Inaccurate FPS extracted from video in Batch Process videos", + "body": "**Describe the bug**\r\nAfter batch processing many videos for clipping, I realized the processed videos were shorter than expected by a few seconds. I then realized this is due to an inaccurate extraction of the video file's FPS **for videos with a float FPS**. For instance, these videos have an FPS of 29.97, but Simba detects them as 29 FPS.\r\n\r\nTo the best of my knowledge, this is due to...\r\nhttps://github.com/sgoldenlab/simba/blob/97542ecc3e7f21f6698fe0ad8cc4f9167c53dc1a/simba/utils/read_write.py#L405C43-L405C43\r\n```python\r\n video_data[\"fps\"] = int(cap.get(cv2.CAP_PROP_FPS))\r\n```\r\n...where the FPS value returned by `cv2` is transformed to `int`.\r\n\r\nI suppose there may be a good reason why it is transformed to an integer, so would it actually be reasonable to keep the float as returned by `cv2`? I know the video FPS is used throughout Simba so I would understand if that causes a lot of issues everywhere else.\r\n\r\nJust wondering if not using the `int` transformation would be reasonable.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n```python\r\n>>> import cv2\r\n>>> cap = cv2.VideoCapture(video_path)\r\n>>> cap\r\n\r\n>>> cap.get(cv2.CAP_PROP_FPS)\r\n29.97002997002997\r\n>>> int(cap.get(cv2.CAP_PROP_FPS))\r\n29\r\n```\r\n\r\n**Expected behavior**\r\nThe exact FPS could be used instead of its `int`\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Win10\r\n - Python Version [e.g. 3.6.0]: Python 3.6.13 |Anaconda, Inc.| (default, Mar 16 2021, 11:37:27) [MSC v.1916 64 bit (AMD64)] on win32\r\n - Are you using anaconda? yes\r\n - Simba version: 1.71.6\r\n \r\n", + "user": "florianduclot", + "reaction_cnt": 0, + "created_at": "2023-08-18T22:48:51Z", + "updated_at": "2023-08-23T17:33:40Z", + "author": "florianduclot", + "comments": [ + { + "body": "Hi @florianduclot !\r\n\r\nYes this is something that has been bugging me a little, and I am not entirely sure how to approach.. the `int` in get_video_meta_data is there as a fix (a long time ago) as I had issues passing float fps values to OpenCV VideoWriter - it seems to wanted an int, I don’t know if that is the case anymore.\r\n\r\nFor the batch pre-processing, removing the `int` type in `get_video_meta_data` should work, OpenCV is never called in the batch-pre-processing and FFMpeg is called directly. If this is changed, also [THIS](https://github.com/sgoldenlab/simba/blob/97542ecc3e7f21f6698fe0ad8cc4f9167c53dc1a/simba/video_processors/batch_process_menus.py#L366C15-L366C52) should be changed to DoubleVar(), or (StringVar in tkinter), and there is an integer unit check for fps [HERE](https://github.com/sgoldenlab/simba/blob/97542ecc3e7f21f6698fe0ad8cc4f9167c53dc1a/simba/video_processors/batch_process_menus.py#L585) that has to be changed to a float check rather than integer check. \r\n\r\nThe trouble is if changing `get_video_meta_data` will cause other issues elsewhere**if** ints are indeed needed where opencv is called, I don’t have enough test cases to comfortably make this change. One potential solution: We could do this below, and pass `fps_as_int = True` in batch preprocess? It won’t affect anything where opencv is called, and we can change it later in rest of the code if we find that float fps’s work? Let me know what you think!\r\n\r\n```\r\ndef get_video_meta_data(video_path: Union[str, os.PathLike]\r\n\t\t\t\t\t fps_as_int: bool = True) -> dict:\r\n \r\n\"\"\"\r\n Read video metadata (fps, resolution, frame cnt etc.) from video file (e.g., mp4).\r\n\r\n :parameter str video_path: Path to a video file.\r\n :parameter bool fps_as_int: If True, force video fps to int through floor rounding, else float. Default = True.\r\n :return dict: Video file meta data.\r\n\r\n :example:\r\n >>> get_video_meta_data('test_data/video_tests/Video_1.avi')\r\n {'video_name': 'Video_1', 'fps': 30, 'width': 400, 'height': 600, 'frame_count': 300, 'resolution_str': '400 x 600', 'video_length_s': 10}\r\n\r\n \"\"\"\r\n\r\n video_data = {}\r\n cap = cv2.VideoCapture(video_path)\r\n _, video_data['video_name'], _ = get_fn_ext(video_path)\r\n video_data['fps'] = cap.get(cv2.CAP_PROP_FPS)\r\n if fps_as_int:\r\n\tvideo_data['fps'] = int(video_data['fps’])\r\n …..\r\n ....\r\n\r\n", + "created_at": "2023-08-19T15:02:08Z", + "author": "sronilsson" + }, + { + "body": "Thanks for the additional information, @sronilsson , that is very helpful!\r\n\r\n> One potential solution: We could do this below, and pass `fps_as_int = True` in batch preprocess? It won’t affect anything where opencv is called, and we can change it later in rest of the code if we find that float fps’s work? Let me know what you think!\r\n\r\nSounds like a good idea, at least as a temporary solution. If you haven't done so already, I'll try to implement that using the pointers you already listed and return back with a PR if that helps.\r\n\r\nPersonally, I'll use that to convert my FPS from 29.97 -> 29 so that the rest of the Simba pipeline won't be potentially using erroneous timestamps as well. Am I correct in thinking that this could lead to inaccurate mapping of START/STOP behavioral bouts events onto frames (at least when using behavioral annotations imported from 3rd party applications)?", + "created_at": "2023-08-22T20:59:52Z", + "author": "florianduclot" + }, + { + "body": "@sronilsson: scratch that... it has nothing to do with the detection of FPS. My worry about the detection of FPS and the Simba pipeline as a whole still applies, though, so I would still be interested in your feedback on that point.\r\n\r\n**With regards to the batch processing of videos**\r\nI couldn't reproduce my bug on my laptop, where I do **not** have a GPU... The detection of fps by OpenCV is moot there (besides the display of FPS in the dialog window but that's minor, I think) as everything is handled by `ffmpeg` which correctly detects FPS as float. The problem I was having relates to the different commands generated depending on the `GPU` flag:\r\n- GPU is True: `ffmpeg ... -to time_difference ...`\r\n- GPU is False: `ffmpeg ... -t time_difference ...`\r\n\r\nBased on ffmpeg docs, I would say this should ALWAYS be `-t` as Simba computes and uses a time difference there and not a position.\r\nOr we can simply not calculate the time difference if it's not needed and just go with `-to` and use `end_time` straight from the yser input. You probably know which one's the best of the two options here, I suppose.", + "created_at": "2023-08-22T22:34:34Z", + "author": "florianduclot" + }, + { + "body": "@florianduclot You are right, for the rest of the pipeline the values in the FPS column of the `project_folder/csv/video_info.csv` is used for non-plotting functions, which is specified in the video parameters pop-up in GUI (simba.ui.video_info_ui). This pop-up is also asking for an integer IntVar FPS, so throughout, it is best to use integer FPS. It should be made clear in the docs.\r\n\r\nAnd thanks for looking at the FFMPEG commands! I will change it. One question, I don't have have a GPU available so can't test. I can see that `-crf 17` is often in the CPU commands, but absent in the GPU commands. I think this comes from way back where a user raised an issue over the file size coming out of the batch-preprocess, so I decreased quality from the default `-crf 23`. But then I inserted the GPU flag and failed to do the same. If you run with the GPU codecs, have you ever bumped into any very large output video file sizes relative to the input video file size?\r\n\r\n\r\n \r\n\r\n ", + "created_at": "2023-08-23T12:45:18Z", + "author": "sronilsson" + }, + { + "body": "Thanks for your input on the use of the FPS data throughout the pipeline; I'll definitely convert my videos to the nearest int fps.\r\n\r\n> I can see that `-crf 17` is often in the CPU commands, but absent in the GPU commands. I think this comes from way back where a user raised an issue over the file size coming out of the batch-preprocess, so I decreased quality from the default `-crf 23`. But then I inserted the GPU flag and failed to do the same. If you run with the GPU codecs, have you ever bumped into any very large output video file sizes relative to the input video file size?\r\n\r\nI was actually just about to look into that so I'll get back to you once I have a better idea. Would you be opposed to having some sort of choice left to the user here? Leaving a default in place, of course, but it might be helpful for those preferring to specify the quality they desire.\r\n", + "created_at": "2023-08-23T14:31:53Z", + "author": "florianduclot" + }, + { + "body": "No I wouldn't be opposed to it without confusing users too much. Maybe something like a single \"VIDEO QUALITY\" column with dropdowns running from 10% to 100% in representing crf's between 23 and 13 defaulting to 100?\r\n\r\nI haven't done any testing, but presumably the crf (if below 23) should only be applied to the very last operation, as otherwise the video quality decrease would cumulate across multiple operations (fps, downsample etc..)?", + "created_at": "2023-08-23T14:43:05Z", + "author": "sronilsson" + }, + { + "body": "Here's a comparison for the same video processed with or without the GPU, using the current ffmpeg command (but including `-t` for the GPU command instead of `-to`):\r\n\r\nINPUT\r\nLength: 00:10:11\r\nWidth x Height: 1804 x 510\r\nTotal Bitrate: 2,283 kbps\r\nFPS: 29.97\r\nSIZE: 175,207,037 bytes\r\n\r\n\r\nOUTPUT with GPU\r\nLength: 00:05:00\r\nWidth x Height: 1804 x 510\r\nTotal Bitrate: 2,164 kbps\r\nFPS: 29.97\r\nSIZE: 81,387,078 bytes\r\ncommand: `ffmpeg -hwaccel auto -i \"in_path\" -ss 00:02:00 -t 0:05:00 -c:v h264_nvenc -async 1 \"out_path\"`\r\n\r\n\r\nOUTPUT with CPU\r\nLength: 00:05:00\r\nWidth x Height: 1804 x 510\r\nTotal Bitrate: 3,935 kbps\r\nFPS: 29.97\r\nSIZE: 147,912,482 bytes\r\ncommand: `ffmpeg -i \"in_path\" -ss 00:02:00 -t 0:05:00 -async 1 -qscale 0 -crf 17 -c:a copy \"out_path\"`\r\n\r\nThis makes sense given that `-crf 17` should be a higher quality than the default CRF of 23.\r\n\r\n~~I'll test with a CRF of 17 for both.~~ <- I didn't realize that option was not available for the `h264_nvenv` encoder.", + "created_at": "2023-08-23T15:30:15Z", + "author": "florianduclot" + }, + { + "body": "Ah I just saw that myself :) the GPU codecs seems to take `-preset` argument which can be `fast` (low quality I presume), `medium` or `slow`. I think for most control, the `VIDEO QUALITY` dropdown options have to change from the 10-100% options to low-medium-high options depending on if the GPU checkbox is checked or not lol. \r\n\r\nthe alternative is to always have three options (low , medium, high) in the VIDEO QUALITY dropdowns and map those to three different CRFs if the GPU checkbox is not checked. But that doesn't give users as much control to take advantage of more quality levels if they run on CPU. ", + "created_at": "2023-08-23T15:52:12Z", + "author": "sronilsson" + }, + { + "body": "Would be good to get your feedback on this when you have time! If you upgrade through pip, the batch-preprocess has quality dropdown all the way to the right (and a quickset at the top), that show 100-10% if running on CPU, or low-medium-high if running on GPU.\r\n\r\nThe 100% to 10% if running on CPU maps to [THESE](https://github.com/sgoldenlab/simba/blob/f9b1472e754aa93706a0d31b193b08b45743a577/simba/utils/lookups.py#L342) CRF's, and the low, medium, and high maps to [THESE](https://github.com/sgoldenlab/simba/blob/f9b1472e754aa93706a0d31b193b08b45743a577/simba/utils/lookups.py#L357) GPU presets. Let me know what you think of the CRF map, not sure if theres a better way or bigger/smaller strides. \r\n\r\nWhen clicking `EXECUTE` there are two more things stores in the json: the mapped quality, and the last operation. The quality defaults to medium or 23. Then, when it hits the last operation runs, the quality changes to whatever the quality is in the json. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-08-23T17:32:40Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Customize outlier correction", + "body": "Hello, \r\n\r\n(I'm using SimBA 1.71.6, on windows 10 with ma-DLC data). \r\n\r\nI'm facing a case where central bp is reliably tracked but not the tail base, which is hidden half of the time. \r\nConsequently, running outlier correction modify dramatically the tracking outcome:\r\n\r\nWithout outlier correction:\r\n![C57-B7-Urine-MS_US](https://github.com/sgoldenlab/simba/assets/66886884/562d5603-7494-4062-ba55-d2ded69dd13c)\r\n\r\nHere the problem is that I have a lot of straight lines, that certainly influence final total locomotion (can you please confirm this?)\r\n\r\nSo I tried to apply outlier correction, but criteria are too stringent:\r\n\r\nWith outlier correction (location = 2 ; movement = 2):\r\n![C57-B7-Urine-US_MS](https://github.com/sgoldenlab/simba/assets/66886884/d76d91f3-d24d-4f7a-8ec4-0a1aa5f1f92e)\r\n\r\nI suspect this could be due the missing label of tail base on a lot of frames. Could this be true? How to deal with this? \r\nAlternatively I could try with another pair of body parts....\r\n\r\nWhat seems to be the best strategy here according to you? \r\n\r\nFYI: In both scenari I used interpolation \"nearest bp\" and smoothing Savitsky = 200 \r\nI precise that my videos are either 5 or 10fps. \r\n\r\nThank you for the support, \r\nBest,\r\n", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2023-08-15T12:37:33Z", + "updated_at": "2023-09-05T13:50:56Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli! Yes, as you say: the straight lines are indicative of the central bp disappearing from the tracking data. Using interpolation nearest, it reappears some distance away from where it was last reliably seen, and a straight line is drawn between those two locations. The outlier correction, as you say, is too stringent (possibly) primarily due to the missing tail base tracking causing SimBA to remove a lot of movements that are actually true.\r\n\r\nThere is a few ways to fix: \r\n\r\n(i) Rather than using \"nearest\" interpolation you can use quadratic or linear interpolation. As in the below image, you see you won't get those big jumps, but smoother lines filling in the missing points.\r\n\r\n\"image\"\r\n\r\n(ii) If you want to try custom outlier correction, you can, but I have not been able to put these functions in the GUI so all I can offer are jupyter notebbook examples. \r\n\r\n[This notebook](https://simba-uw-tf-dev.readthedocs.io/en/latest/nb/outlier_correction.html) allows you to perform outlier correction individually: first it runs movement outlier correction then location outlier correction. You just have to comment out the outlier correction that you don't want to perform in this cell:\r\n\r\n\"image\"\r\n\r\nIf you want to perform more advanced outlier correction, i.e., apply different rules to different body-part or animals, and again have option to skip different outlier correction steps, there is [THIS](https://simba-uw-tf-dev.readthedocs.io/en/latest/nb/advanced_outlier_correction.html) notebook example. \r\n\r\nI wrote these notebooks recently in response to [THIS](https://github.com/sgoldenlab/simba/issues/274) issue, you can check it to get some background. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-08-15T13:48:49Z", + "author": "sronilsson" + }, + { + "body": "@DorianBattivelli did any of this help the straight lines?", + "created_at": "2023-08-23T13:47:47Z", + "author": "sronilsson" + }, + { + "body": "Sorry for the delay, quadratic interpolation did not solve the issue, and I did not try yet to use Jupyter, cause I'm not familiar with it. Rather, I tried different values for outlier correction from GUI interface, and I think to have reached quiet satisfying results. If I have a chance to try Jupyter customization, I'll let you know how it goes,\r\n\r\nThank you, \r\nBest,", + "created_at": "2023-08-24T06:15:32Z", + "author": "DorianBattivelli" + }, + { + "body": "👍🏻 Sound good! An alternative is to fix issue at source with getting tracking model to have fewer missing or incorrect values, but as you say it may be overkill here", + "created_at": "2023-08-24T11:15:22Z", + "author": "sronilsson" + }, + { + "body": "Another question: where can I find the different parametres I used to process data (smothing, interpolation, outlier correction criteria etc.). I often do many trials to tune at best these features, and sometimes I get confused about the values I used to generates my data. Can I find these information in some files of the simba project? ", + "created_at": "2023-08-24T11:45:33Z", + "author": "DorianBattivelli" + }, + { + "body": "That's is a good point.. it doesn't store a log at the moment of the different methods and parameters that was executed at which times, so you'd have to keep tabs some other way. I will insert a session log that keeps track of it.", + "created_at": "2023-08-24T12:45:37Z", + "author": "sronilsson" + }, + { + "body": "Thanks it would be very helpful! \r\n\r\n\r\nAnother point I still did not manage to solve: how to deal with extra videos? \r\nWhen I add extra videos / h5 files to an already existing project, unfortunalty all processing (smoothing, interpolation and outlier correction) applies to all the videos (including the ones already analysed) which makes the process very long (especially when it's about adding only a couple of new videos for a project containing 30 videos). \r\n\r\nI try to remove temporarilly the video and corresponding h5 files of the already analyzed items from their folder, but then SimBA returns error, certainly cause it cannot find the items listed in the info_video_csv file. \r\n\r\n\r\nThank you for the support, \r\nBest,", + "created_at": "2023-08-25T07:12:53Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - sorry I missed this last msg. When clicking RUN OUTLIER CORRECTION, the code looks first inside the `project_folder/csv/input_csv` directory, and performs movement outliers on all files in that directory then and stores the results in the `project_folder/csv/outlier_corrected_movement` directory. Then it looks inside the `project_folder/csv/outlier_corrected_movement` folder and performs location outlier corrections on those files, and then stores the results inside the `project_folder/csv/outlier_corrected_movement_location` directory. If say you move files out of the `project_folder/csv/input_csv`, but not from the `project_folder/csv/outlier_corrected_movement` directory, you could get the errors you are seeing. \r\n\r\nThere is an archive function described [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario4_new.md#part-1-clean-up-your-previous-project--or-alternatively-create-a-new-project) have you tried it? \r\n", + "created_at": "2023-09-01T19:56:43Z", + "author": "sronilsson" + }, + { + "body": "@DorianBattivelli - also, I added a logger that is added to the projects and stores the information of the methods you run and at which times. If you update SimBA, and perform any function (say outlier correction), there is a text file at `project_folder/logs/project_log.log` that can look a bit like the attached file. \r\n\r\nIt can tell when you performed outlier corrections and which criterion and body-parts you used. E.g., these lines tells me the latest outlier correction critera and body-parts I used: \r\n\r\n```\r\n2023-09-01T15:46:56Z|OutlierCorrecterMovement||CLASS_INIT||Criterion: 1.0, Body-parts {'Animal_1': {'bp_1': 'Ear_left_1', 'bp_2': 'Ear_right_1'}, 'Animal_2': {'bp_1': 'Ear_left_2', 'bp_2': 'Right_ear_2'}}\r\n2023-09-01T15:46:57Z|OutlierCorrecterMovement.stdout_success||complete||Log for corrected \"movement outliers\" saved in project_folder/logs\r\n2023-09-01T15:46:57Z|OutlierCorrecterLocation||CLASS_INIT||Criterion: 2.0, Body-parts {'Animal_1': {'bp_1': 'Ear_left_1', 'bp_2': 'Ear_right_1'}, 'Animal_2': {'bp_1': 'Ear_left_2', 'bp_2': 'Right_ear_2'}}\r\n2023-09-01T15:47:23Z|OutlierCorrecterLocation.stdout_success||complete||Log for corrected \"location outliers\" saved in project_folder/logs\r\n2023-09-01T15:47:23Z|SimbaProjectPopUp.stdout_success||complete||Outlier corrected files located in \"project_folder/csv/outlier_corrected_movement_location\" directory\r\n```\r\n\r\nPlease let me know if you find it useful or if something is missing!\r\n\r\n[project_log.log](https://github.com/sgoldenlab/simba/files/12501223/project_log.log)\r\n", + "created_at": "2023-09-01T20:04:29Z", + "author": "sronilsson" + }, + { + "body": "> @DorianBattivelli - sorry I missed this last msg. When clicking RUN OUTLIER CORRECTION, the code looks first inside the `project_folder/csv/input_csv` directory, and performs movement outliers on all files in that directory then and stores the results in the `project_folder/csv/outlier_corrected_movement` directory. Then it looks inside the `project_folder/csv/outlier_corrected_movement` folder and performs location outlier corrections on those files, and then stores the results inside the `project_folder/csv/outlier_corrected_movement_location` directory. If say you move files out of the `project_folder/csv/input_csv`, but not from the `project_folder/csv/outlier_corrected_movement` directory, you could get the errors you are seeing.\r\n> \r\n> There is an archive function described [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario4_new.md#part-1-clean-up-your-previous-project--or-alternatively-create-a-new-project) have you tried it?\r\n\r\nProceeding as explained here solved the issue, thank you!\r\nThanks also for the upgrade with logger :)", + "created_at": "2023-09-05T13:50:56Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "\"Create path plot\" tool not working", + "body": "Hi, \r\n\r\nthe \"Create path plot\" tool does not read .csv files well for me. It throws this error in the SimBa window:\r\n\r\n`SIMBA DATA HEADER ERROR: Body-part nose is not present in the data file. The body-parts available are: ['scor', 'DLC_resnet50_bottom_camera_FFEJun19shuffle1_200000.', 'DLC_resnet50_bottom_camera_FFEJun19shuffle1_2000', 'DLC_resnet50_bottom_camera_FFEJun19shuffle1_200000'] 🚨`\r\n\r\nAnd this error in the terminal:\r\n```\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Alisa\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\Alisa\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\ui\\pop_ups\\make_path_plot_pop_up.py\", line 32, in \r\n circle_size=circle_size.getChoices()))\r\n File \"C:\\Users\\Alisa\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\plotting\\ez_lineplot.py\", line 53, in __init__\r\n raise DataHeaderError(msg=f'Body-part {body_part} is not present in the data file. The body-parts available are: {body_parts_available}')\r\nsimba.utils.errors.DataHeaderError: Body-part nose is not present in the data file. The body-parts available are: ['scor', 'DLC_resnet50_bottom_camera_FFEJun19shuffle1_200000.', 'DLC_resnet50_bottom_camera_FFEJun19shuffle1_2000', 'DLC_resnet50_bottom_camera_FFEJun19shuffle1_200000']\r\n```\r\nHere is the csv file:\r\n[Test 1_FDbottom.csv](https://github.com/sgoldenlab/simba/files/12303699/Test.1_FDbottom.csv)\r\n\r\n\r\nThank you in advance!\r\n\r\n**Desktop:**\r\n - OS: Windows 11 Pro\r\n - Python Version 3.6.13\r\n - Are you using anaconda? yes\r\n \r\n", + "user": "alisabak", + "reaction_cnt": 0, + "created_at": "2023-08-09T14:26:26Z", + "updated_at": "2023-08-11T09:55:00Z", + "author": "alisabak", + "comments": [ + { + "body": "Hi @alisabak many thanks for reporting this! Can you check if it is working on your end if you update simba with `pip install simba-uw-tf-dev --upgrade` and try it again? \r\n\r\nJust a note: This function is **slow**. I created the path video of your 36k frames, looks good, but took a while. The code is meant for creating path plots without having to import data into SimBA projects and the function is not parallelized. If you want more control over how path plots are created, and speed up the creation of the path plots **significantly**, import the data into SimBA and use this function as documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario2.md#visualizing-path-plots). I will also insert the option to multiprocess the path plots in this current function later in the week or next. ", + "created_at": "2023-08-09T16:09:59Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson thank you, it works now. Visualisation functions are faster indeed, will use it instead, thank you for the tip.", + "created_at": "2023-08-11T09:55:00Z", + "author": "alisabak" + } + ] + }, + { + "title": "ROIs H5 file convertion ", + "body": "Hello, \r\n\r\nCould you please tell me how to convert the H5 ROIs definition file in CSV or a text file ? And provide a way to easily inteerpret the numbers there? \r\n\r\nThank you, \r\nBest,", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2023-08-09T13:15:33Z", + "updated_at": "2023-08-11T09:59:36Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli - good point, we are missing a method for this. Let me type one up and get back to you. ", + "created_at": "2023-08-09T13:25:59Z", + "author": "sronilsson" + }, + { + "body": "Thank you!", + "created_at": "2023-08-09T13:31:04Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli The function would output separate files for different shape types like this. Is that that what you are looking for? \r\n\r\n[polygons_20230809094913.csv](https://github.com/sgoldenlab/simba/files/12303441/polygons_20230809094913.csv)\r\n[rectangles_20230809094913.csv](https://github.com/sgoldenlab/simba/files/12303442/rectangles_20230809094913.csv)\r\n", + "created_at": "2023-08-09T13:52:54Z", + "author": "sronilsson" + }, + { + "body": "It exist in the tools menu if you upgrade SimBA. Let me know if anything is missing or if want other formatting options and we can insert it.\r\n\r\n\r\n\"image\"\r\n\r\n\r\n", + "created_at": "2023-08-09T16:13:37Z", + "author": "sronilsson" + }, + { + "body": "Super, it's exactly what I was looking for, \r\n\r\nThank you, \r\nBest,", + "created_at": "2023-08-10T13:16:31Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "Unsupervised Learning Missing In Anaconda Installation", + "body": "**Describe the bug**\r\nThere is an issue I am experiencing with trying to access the unsupervised learning SIMBA Expansion. It appears that when downloading SIMBA using the anaconda installation, the files/code for the unsupervised learning was not installed. How can I go about installing the unsupervised learning code in order to run unsupervised learning on my current project. Thank you for your help. I look forward to your response. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Install SIMBA using anaconda method. \r\n2. Access existing project.\r\n3. Locate SIMBA Expansions.\r\n4. See error: There is no existing Unsupervised learning button as pictured in tutorial. \r\n\r\n**Expected behavior**\r\nI expected a GUI button for the unsupervised learning add-on. \r\n\r\n**Screenshots**\r\n![Screenshot 2023-08-02 135301](https://github.com/sgoldenlab/simba/assets/87246414/6eba2c03-a054-4bb8-83db-2f59c28ee1b5)\r\n\r\n![Screenshot 2023-08-02 135204](https://github.com/sgoldenlab/simba/assets/87246414/a4588bce-3568-49fc-b8e6-01cf57fd0495)\r\n\r\n![Screenshot 2023-08-02 135106](https://github.com/sgoldenlab/simba/assets/87246414/6eecabcf-d45b-441e-8f02-95350f19f98a)\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 7\r\n - Python Version 3.6.13\r\n - Are you using anaconda? Yes\r\n\r\n", + "user": "nsusic", + "reaction_cnt": 0, + "created_at": "2023-08-02T17:59:48Z", + "updated_at": "2023-08-16T12:38:58Z", + "author": "nsusic", + "comments": [ + { + "body": "Hi @nsusic, \r\n\r\nYou're right. Although I typed up the tutorial, and written the code, I have been a little hesitant starting to support it widely because (ii) the likely extra time commitment and (ii) the additional dependencies required that would fall on all simba users, even if they are not interested in unsupervised learning. So I have commented it out for now. I can give you some hints to get it running: \r\n\r\nTo **just** find the unsupervised code, try `pip show simba-uw-tf-dev` in your conda enviroment, it should show you where the simba package is located: \r\n\r\n\"image\"\r\n\r\nYou should find a sub-directory called `unsupervised` in that folder that has most of the code. Some unsupervised functions are within the `mixins.unsupervised.py` file. \r\n\r\n\r\nTo activate the buttons and get it running in the GUI, uncomment this line from SimBA.py line 132:\r\n\r\n\"image\"\r\n\r\n\r\nThen uncomment the unsupervised button definition line 443 in SimBA.py:\r\n\r\n\"image\"\r\n\r\n\r\nLastly, uncomment the line that inserts the unsupervised button in the GUI line 558 in SimBA.py:\r\n\r\n\"image\"\r\n\r\nFinally, you must have HDBSCAN and UMAP installed in your conda environment. Ideally from the rapids library, but `pip install umap hdbscan` will also work. \r\n\r\nYou should be good to go! Let me know how it goes!\r\n\r\nSimon\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-08-03T19:12:51Z", + "author": "sronilsson" + }, + { + "body": "Hi Simon, \r\n\r\nThank you for looking into the matter for me and for providing the directions. I had a fellow lab member try the fix, but they were unable to access SIMBA after utilizing the fix. Attached below is the error message they received when running the program:\r\n\r\n```\r\n from cuml.cluster.hdbscan import HDBSCAN\r\n\r\nModuleNotFoundError: No module named 'cuml'\r\n\r\n \r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\n \r\n\r\nTraceback (most recent call last):\r\n\r\n File \"C:\\Users\\aranedalab\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n\r\n \"__main__\", mod_spec)\r\n\r\n File \"C:\\Users\\aranedalab\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 85, in _run_code\r\n\r\n exec(code, run_globals)\r\n\r\n File \"C:\\Users\\aranedalab\\anaconda3\\envs\\simba\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n\r\n File \"C:\\Users\\aranedalab\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 132, in \r\n\r\n from simba.unsupervised.unsupervised_ui import UnsupervisedGUI\r\n\r\n File \"C:\\Users\\aranedalab\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\unsupervised\\unsupervised_ui.py\", line 19, in \r\n\r\n from simba.unsupervised.pop_up_classes import (GridSearchClusterVisualizerPopUp,\r\n\r\n File \"C:\\Users\\aranedalab\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\unsupervised\\pop_up_classes.py\", line 28, in \r\n\r\n from simba.unsupervised.hdbscan_clusterer import HDBSCANClusterer\r\n\r\n File \"C:\\Users\\aranedalab\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\unsupervised\\hdbscan_clusterer.py\", line 8, in \r\n\r\n from hdbscan import HDBSCAN\r\n\r\n File \"C:\\Users\\aranedalab\\anaconda3\\envs\\simba\\lib\\site-packages\\hdbscan\\__init__.py\", line 1, in \r\n\r\n from .hdbscan_ import HDBSCAN, hdbscan\r\n\r\n File \"C:\\Users\\aranedalab\\anaconda3\\envs\\simba\\lib\\site-packages\\hdbscan\\hdbscan_.py\", line 21, in \r\n\r\n from ._hdbscan_linkage import (single_linkage,\r\n\r\n File \"hdbscan\\\\_hdbscan_linkage.pyx\", line 1, in init hdbscan._hdbscan_linkage\r\n\r\nModuleNotFoundError: No module named 'dist_metrics'\r\n```\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nThey also tried installing the missing modules using pip but then receive an error that states to use the rapidsai library, which they found is only compatible with python 3.9.\r\n\r\nWhat do you suggest we do? Was it an error on our end in following your instructions?\r\n\r\nThank you for your help, I genuinely appreciate it and look forward to hearing from you. ", + "created_at": "2023-08-10T02:12:23Z", + "author": "nsusic" + }, + { + "body": "Hi @nsusic - thanks for testing this. Best solution would probably be me creating some install instructions (for with and without rapids library) that comes with the conda enviroment yaml files for getting it to run on the GPU or CPU?\r\n\r\nA little crazy developing this code, but I don't have readily access to a GPU, weekends I can normally get access so hold on a few days. Remind me late next week if not done. There are some runtime comparisons [HERE](https://github.com/sgoldenlab/simba/blob/master/misc/runtimes_gpu_cpu.csv) or [HERE](https://github.com/sgoldenlab/simba/blob/master/misc/runtimes_gpu_cpu.xlsx), and CPU is not possible for large datasets or when grid-searching many models. \r\n\r\nPS. Regardless of what docs say later versions of SimBA should run on python3.9. ", + "created_at": "2023-08-10T11:52:00Z", + "author": "sronilsson" + }, + { + "body": "Hi Simon, I hope everything has been going well. Have you had a chance to make installation instructions (for with and without rapids library)? If not, no problem but I just thought I would check in as requested. Thank you again for your help!", + "created_at": "2023-08-15T22:22:41Z", + "author": "nsusic" + }, + { + "body": "Hi @nsuic - tried getting GPU but no luck sorry! I will keep trying next week. \r\n\r\nFor the CPU, I couldn't recreate your error, but I created a conda env yaml file below that you could use and launches on my end.\r\n\r\nUnzip it, and in the terminal navigate to dir where the yaml is located. Then run:\r\n\r\n`conda env create --name MyUnsupervisedEnvironmentName --file environment_simba_unsupervised.yml`\r\n\r\n[environment_simba_unsupervised.yml.zip](https://github.com/sgoldenlab/simba/files/12359445/environment_simba_unsupervised.yml.zip)\r\n\r\nWhen in the `MyUnsupervisedEnvironmentName`, remember to remove the commented lines as discussed previously in `simba.py` before launching simba with `simba`. \r\n\r\nLet me know if there are problems!\r\n\r\nPS. RAPIDS has a conda or pip install-command creater [HERE](https://docs.rapids.ai/install) where you click in the cuda and puython versions etc that I typically use for fresh installs but it won't work for Microsoft Windows.", + "created_at": "2023-08-16T12:38:14Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Mice switching IDs every few frames", + "body": "Hello,\r\nI am having an issue where the ROI analysis is swapping the IDs of the mice every few frames.\r\nHere is a GIF of the output video showing IDs switching:\r\n![switchIDs](https://github.com/sgoldenlab/simba/assets/95374245/89a43c93-4b0f-4ee7-95fb-94c519645db5)\r\n\r\n\r\nThe IDs were not switching in sleap, so I assume this is a simba issue?\r\nHere is a GIF of the sleap video not switching IDs:\r\n![noSwitchIDs](https://github.com/sgoldenlab/simba/assets/95374245/7bd76fff-3a69-45e5-a80e-c5a7003fcb70)\r\n\r\n\r\nSystem Info:\r\n - OS: Windows 10 Pro 22H2\r\n - Python Version: 3.8.5\r\n - Yes, using Anaconda\r\n\r\nI have tried these fixes but none of them were successful so far:\r\n- Outlier correction.\r\n- Created a new project with optimized 8 body parts for each mouse.\r\n- Created apparatus zone so mice are always in at least one zone.\r\n- Interpolated sleap data by reconverting it in simba and using the “animal:nearest” interpolation option.\r\n- Interpolated each body point.\r\n\r\nThanks for the help!\r\n", + "user": "ZelikowskyLab", + "reaction_cnt": 0, + "created_at": "2023-07-27T20:03:13Z", + "updated_at": "2023-10-23T03:40:08Z", + "author": "ZelikowskyLab", + "comments": [ + { + "body": "Hi @ZelikowskyLab !\r\n\r\nFor SimBA to cause the issue, the columns values for some reason would have to be shuffled row-wise, while maintaining the the x and y order (the x and y for different body-parts are never mixed up). It might be true, but seems unlikely and not the first to come to mind .\r\n\r\nI had a look at the sleap created gif, the color never switches between animals, but there are a fair few missing data points and tracking oddities e.g. so I'm thinking that could be related somehow. \r\n\r\n\"image\"\r\n\r\nDoes sleap allow coloring all body-parts belonging to animal in a separate palette, rather than a uniform color per animal? \r\n\r\nIf you use [THIS](https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_tools.md#visualize-pose-estimation-in-folder) tool to visualize the data in the `project_folder/csv/input_csv` directory after importing the data without smoothing and interpolation, do you still see the ID switches? Would help to pin down if its happening before or after data import.\r\n \r\n\r\n", + "created_at": "2023-07-28T00:09:08Z", + "author": "sronilsson" + }, + { + "body": "Thanks for the response @sronilsson!\r\n\r\nYes, here is a GIF from SLEAP of all body parts being a different color:\r\n![16_instances_color_nodes](https://github.com/sgoldenlab/simba/assets/95374245/13b0d4f2-7b31-4faf-89b4-757ec80eea3c)\r\n\r\nI am currently working on manually assigning nodes to body parts for each frame of 5 seconds of the video in SLEAP, then am going to send this \"perfect\" data to Simba to try and test whether or not it is a Simba issue. I will post the result here when I am done with that.", + "created_at": "2023-08-07T18:49:57Z", + "author": "ZelikowskyLab" + }, + { + "body": "Thanks @ZelikowskyLab! You might do this already but just a note, when you click to assign the animals to the different tracks as documented [here](https://github.com/sgoldenlab/simba/blob/master/docs/Multi_animal_pose.md#step-5-assigning-tracks-the-correct-identities), choose a frame when animals are clearly separated. ", + "created_at": "2023-08-07T19:25:35Z", + "author": "sronilsson" + }, + { + "body": "Thanks, yes we are choosing a frame where the animals are separated.\r\nAfter I ran my manually assigned 5 second sleap data in simba, here were the results:\r\n\r\nManually assigned sleap video:\r\n![sleap](https://github.com/sgoldenlab/simba/assets/95374245/f34403c5-f1c1-430b-a3f4-c82f80ae5d5a)\r\n\r\nResult from simba:\r\n![simba](https://github.com/sgoldenlab/simba/assets/95374245/f885c098-b58d-41f9-ac92-a82e3977a076)\r\n\r\nAll body points were present in the sleap video but the issue still persisted.\r\nThanks for the help", + "created_at": "2023-08-24T03:46:52Z", + "author": "ZelikowskyLab" + }, + { + "body": "Hi @ZelikowskyLab thanks for digging into this. The error looks slightly different now and kind of worse with the bodypart predicts just fading away plus ID switches nearly every frame. The fastest way to solve it would probably be if there is s small reproducible example you can share and I can take a look? Do you have a small SimBA project with a short video together with the sleap data file you are importing that you could share on a GDrive? \r\n\r\n", + "created_at": "2023-08-24T11:05:23Z", + "author": "sronilsson" + }, + { + "body": "Yes, we can upload the project files to a google drive. What email can we share it with?\r\nThanks", + "created_at": "2023-09-07T21:50:05Z", + "author": "ZelikowskyLab" + }, + { + "body": "Can you share it with sronilsson@gmail.com ? ", + "created_at": "2023-09-07T22:29:51Z", + "author": "sronilsson" + }, + { + "body": "Yes, just shared it.\r\nPlease let me know if you have any questions about the files.", + "created_at": "2023-09-07T22:32:48Z", + "author": "ZelikowskyLab" + }, + { + "body": "Thanks! Got it, and I can see the issue, it is happening as soon as the data has imported into SimBA, it must happen when SImBA is importing the slp file. Just for curiosity, have you tried to import h5 files from sleap into SimBA and do you see the same issues then?\r\n\r\nEDIT: The SLP file seems to contain data for four animals tracks in each frame, I'm not sure why.. but it could be what is is mucking up the import in SimBA.", + "created_at": "2023-09-07T22:48:20Z", + "author": "sronilsson" + }, + { + "body": "Thanks for the observation!\r\nI think there were four because it saved the two computer generated tracks as well as two manually edited tracks.\r\nWe deleted the manual tracks and still had the same issue with the mice switching IDs in Simba.\r\nWe also did a separate trial where we deleted the computer generated tracks in sleap, but we get the error: \r\n_SIMBA VALUE ERROR: The animal most proximal to click number 0 is animal named c57. The animal most proximal to click number 1 is also animal c57.Please indicate which animal is which using a video frame where the animals are clearly separated 🚨_\r\nTherefore we can't test whether the dropped body points are contributing to the ID swapping.\r\nI have attached all the files in a zip folder to this message. In the zip you will find a folder with the manual and computer track files. There is also a folder called 2mice8bpSept28b1 with the simba project that we used and a png of the image used for the user defined bp config.\r\n[filesForGoldenLab.zip](https://github.com/sgoldenlab/simba/files/12754762/filesForGoldenLab.zip)\r\n", + "created_at": "2023-09-29T00:18:54Z", + "author": "ZelikowskyLab" + }, + { + "body": "Thank you! let me test this out. In the meantime can you test one thing for me: \r\n\r\nI was given some code last week-ish from the SLEAP developers, to more rapidly import SLEAP .h5 tracking files, and I included this code in SimBA. If you haven't done so already, could you try how it looks in the latest version of SimBA by upgrading it with `pip install simba-uw-tf-dev --upgrade` and try to import your tracking data in .h5 format (and not .slp format) and let me know how it goes? \r\n\r\n\r\n\r\n ", + "created_at": "2023-09-29T18:31:36Z", + "author": "sronilsson" + }, + { + "body": "The update with the .h5 import worked!! Thank you.", + "created_at": "2023-10-18T00:13:01Z", + "author": "ZelikowskyLab" + }, + { + "body": "Cool! Let me know if anything else comes up!", + "created_at": "2023-10-23T03:40:07Z", + "author": "sronilsson" + } + ] + }, + { + "title": "SIMBA FEATURE NUMBER MISMATCH ERROR", + "body": "Hello, I'm sorry to bother you. When I clicked on 'Run Model' under the 'Run machine model' option in simba, I encountered an error: SIMBA FEATURE NUMBER MISMATCH ERROR: Mismatch in the number of features in input file D:/simbaAndDeepLabCut/123/loxy/models/generated_ Models/run. sav, and what is expected by the model run The model expects 153 features The data contains 155 features. Do you know what caused this?", + "user": "13281306705", + "reaction_cnt": 0, + "created_at": "2023-07-23T17:39:44Z", + "updated_at": "2023-08-14T19:41:57Z", + "author": "13281306705", + "comments": [ + { + "body": "Hi @13281306705! Do you have an error msg from the operating system terminal? \r\n\r\nThis error happens when you build a model using the data files inside the `project_folder/csv/targets_inserted` directory, and each file in this directory contains 153 features (all columns minus the annotation columns and the body-part columns). \r\n\r\nYou then try to run the model on your data inside the `project_folder/csv/machine_results` directory, and SimBA finds 155 columns in a file. Do you see two columns that may have sneaked in by mistake in a `project_folder/csv/machine_results` file?\r\n\r\nNote: The error msg reads, **SIMBA FEATURE NUMBER MISMATCH ERROR: Mismatch in the number of features in input file D:/simbaAndDeepLabCut/123/loxy/models/generated_ Models/run. sav** It is a error msg bug, which I will fix now, and shouldn't affect the program or cause the error. It should read **SIMBA FEATURE NUMBER MISMATCH ERROR: Mismatch in the number of features in input file TheVideoFileNameYourTrying toAnalyze* \r\n\r\n\r\n", + "created_at": "2023-07-23T17:56:08Z", + "author": "sronilsson" + }, + { + "body": "... is it possible you selected a file to use in validation that lives in `project_folder/csv/targets_inserted` rather than in `project_folder/csv/features_extracted` ? ", + "created_at": "2023-07-23T17:58:23Z", + "author": "sronilsson" + }, + { + "body": "> ... is it possible you selected a file to use in validation that lives in `project_folder/csv/targets_inserted` rather than in `project_folder/csv/features_extracted` ?…您是否选择了一个位于 `project_folder/csv/targets_inserted` 而非 `project_folder/csv/features_extracted` 中的文件用于验证?\r\n\r\n> \r\nHello, this is the detailed error message:\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"d:\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1699, in __call__\r\n return self.func(*args)\r\n File \"d:\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 372, in \r\n button_runvalidmodel = Button(label_model_validation, text='RUN MODEL', fg='blue', command=lambda: self.validate_model_first_step())\r\n File \"d:\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 566, in validate_model_first_step\r\n clf_path=self.modelfile.file_path)\r\n File \"d:\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\model\\inference_validation.py\", line 53, in __init__\r\n output_df[probability_col_name] = self.clf_predict_proba(clf=clf, x_df=data_df, model_name=classifier_name, data_path=clf_path)\r\n File \"d:\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\mixins\\train_model_mixin.py\", line 971, in clf_predict_proba\r\n raise FeatureNumberMismatchError(f'Mismatch in the number of features in input file {data_path}, and what is expected by the model {model_name}. The model expects {str(clf.n_features_)} features. The data contains {len(x_df.columns)} features.')\r\nsimba.utils.errors.FeatureNumberMismatchError: Mismatch in the number of features in input file D:/simbaAndDeepLabCut/123/lxy/models/generated_models/run.sav, and what is expected by the model run. The model expects 153 features. The data contains 155 features.\r\n\r\nAfter listening to your description, I roughly understand the reason for the error. Indeed, I chose project_ folder/csv/targets_ Inserted for validation.Thank you very much for your answer.\r\n", + "created_at": "2023-07-23T18:11:08Z", + "author": "13281306705" + }, + { + "body": "Got it! Yes, its likely that those files contain two additional columns (your behavior annotations) which we don't want when running validation. ", + "created_at": "2023-07-23T18:17:45Z", + "author": "sronilsson" + } + ] + }, + { + "title": "SIMBA COMPLETE: ROI definitions saved for video: {self.file_name} 🚀", + "body": "**Describe the bug**\r\nevery time that I try to save my ROI, the follow message appears:\r\nSIMBA COMPLETE: ROI definitions saved for video: {self.file_name} 🚀\r\n\r\nas consequence of that, I cannot make any analysis of my data.\r\n\r\n", + "user": "Gfernandezv", + "reaction_cnt": 0, + "created_at": "2023-07-22T17:41:47Z", + "updated_at": "2023-08-14T19:42:53Z", + "author": "Gfernandezv", + "comments": [ + { + "body": "Hi @Gfernandezv! \r\n\r\nIf I understand correctly, you press this button to save your ROI data and you see the `SIMBA COMPLETE: ROI definitions saved for video: {self.file_name}` msg.\r\n\r\n\"image\"\r\n\r\nNext, when you go on to analyse your data ROI data, through e.g., `ANALYZE ROI DATA: AGGREGATES` like the menu below, do you hit an error msg? \r\n\r\n\"image\"\r\n\r\nPS: The text printed `SIMBA COMPLETE: ROI definitions saved for video: {self.file_name}` contains a typo, you should see your actual video name rather than `{self.file_name}`, but that should be fixed if you update simba with `pip install simba-uw-tf-dev --upgrade`, and it should not affect the analysis functions.\r\n\r\n", + "created_at": "2023-07-22T18:41:15Z", + "author": "sronilsson" + }, + { + "body": "Hi, thanks for the quick answer. \r\nI upgrade simba and it solved the problem, but when I try to analyse de data using ANALYZE ROI DATA: AGGREGATES.\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/87041514/ca4e45e6-8354-4596-a0c1-db6e8ec79c61)\r\n \r\nI have the follow message in the console:\r\n![image](https://github.com/sgoldenlab/simba/assets/87041514/52987824-2d7e-44dc-af6d-56e17332daec)\r\n\r\nand no message is print in the main screen:\r\n![image](https://github.com/sgoldenlab/simba/assets/87041514/689c5207-63ef-4a81-bb55-e1b4d515eeec)\r\n\r\n(I´m using: Anaconda, with python 3,6 on windows 10)\r\n", + "created_at": "2023-07-22T22:05:35Z", + "author": "Gfernandezv" + }, + { + "body": "Ah thanks @Gfernandezv! \r\n\r\nI think this error comes from the center of ROIs being saved with names `centerX` and `centerY` in **older** version of SimBA, and `Center_x` and `Center_y` in **newer** versions of SimBA (although I thought I had inserted a fix for this - I will have to check tomorrow why this fix isn't working). \r\n\r\nIn meantime, if not too much hassle, you can try to redraw the ROIs in newest version and see if that fixes it.", + "created_at": "2023-07-22T22:45:50Z", + "author": "sronilsson" + }, + { + "body": "@Gfernandezv - I had a look, and can't identify how the error is happening: not sure which older version you where using. \r\n\r\nTo solve it, would you mind zipping up the `/project_folder/logs/measures/ROI_definitions.h5` file and dropping it into this issue in I will take a look what the key names are and insert a fix? \r\n\r\n", + "created_at": "2023-07-23T17:23:26Z", + "author": "sronilsson" + }, + { + "body": "Hi, I already solve the problem. To do it, I made a fresh install using the Anaconda tutorial (https://github.com/sgoldenlab/simba/blob/master/docs/anaconda_installation.md) and then update (using `pip install simba-uw-tf-dev)`\r\n\r\nJust one thing, when a tried to add the DLC CSV files it gives an error. To solve it, i had to delete the input csv folder and add the csv files after of load the experiment file.\r\n\r\nThanks for your helpfull tips\r\n=)", + "created_at": "2023-07-23T18:10:55Z", + "author": "Gfernandezv" + }, + { + "body": "Thanks @Gfernandezv! if you come across the import error again, let me know what the error msg in the OS system terminal is. Seems to be some SimBA version clashes when projects/roi definitions from older versions are used in newer versions and want to get rid of them if possible. \r\n\r\n", + "created_at": "2023-07-23T18:16:04Z", + "author": "sronilsson" + } + ] + }, + { + "title": "I have two models and I need to combine them.... also... leads on CLI implementation?", + "body": "Hello!\r\n\r\nThis is mainly a question of feasibility in two parts if you have any thoughts or suggestions that might be helpful I would be extremely grateful. \r\n\r\n### I have two models and I need to combine them. \r\nI'm modeling dam and nest behavior (perhaps one day upgrading to pups as well). To get the two differently shaped models into SimBA, my plan has been to put both the dam and nest skeletons in as one, joined, skeleton. However, SLEAP, which I'm using for pose estimation, makes better predictions with separate models for the dam and the nest. So now I have two models and I need to combine them prior to feature extraction in SimBA. \r\n\r\nI can combine the pose estimations/prediction/skeletons fresh from SLEAP with a python script I have but: Occasionally, there are frames from the dam or nest predictions file without labels, or missing points... I think the outlier and interpolation steps of the SimBA import process might be best applied to the models individually, before being merged.\r\n\r\nDo you have any suggestions that might be helpful? How involved would combining skeletons (with a single connecting edge) after SimBA import be and could you point me to any helpful functions that may already exist to that end? Alternatively, if applying the pre-processing in SimBA, exporting to SLEAP and applying the script I already have seems easier (doubtful), are there any insights you might offer a priori?\r\n\r\n### I'd like to do pretty much everything in SimBA-relataed, other than labeling behavior, with CLI\r\nI'm no expert in the inner workings of SimBA (yet!). Are there any examples of CLI application of the SimBA workflow? I've been poking around and have found plenty of encouraging bits and bobs, but have not located a definitive workflow. Apologies if this is obtuse. A general 'take' on how to best approach this would be a delight. Some of these steps are pretty resource intensive and I'd rather use my University's compute cluster to free up the machines I have local access to for other things. \r\n\r\nThank you so much for sharing any insights or assistance!\r\nRyan\r\n\r\n\r\n", + "user": "rfkova", + "reaction_cnt": 0, + "created_at": "2023-07-21T00:35:50Z", + "updated_at": "2023-10-26T20:38:16Z", + "author": "rfkova", + "comments": [ + { + "body": "Hello @[rfkova](https://github.com/rfkova)! \r\n\r\nAbout the CLI, there is an API [HERE]( https://simba-uw-tf-dev.readthedocs.io/en/latest/api.html), some example notebooks [HERE]( https://simba-uw-tf-dev.readthedocs.io/en/latest/notebooks.html) calling methods, I wrote them in response to user scenarios and requests, and happy to help typing up other notebooks or give suggestions to fit the use-case.. \r\n\r\n```\r\nDo you have any suggestions that might be helpful? How involved would combining skeletons (with a single connecting edge) after SimBA import be and could you point me to any helpful functions that may already exist to that end?\r\n```\r\nI don’t complete understand this one – is the dam network connected to the nest network?\r\n\r\nAbout the pose models: If I understand right, you have two SLEAP .SLP or .H5 files output files for each video (?), one from each model, and you have a method to combine them into a single H5?\r\n\r\nOutlier correction and interpolation in SimBA runs on a “per individual” level. So even if combined into single file, SimBA can recognize them as independent, if you defined them as separate “animals” in your project and you do not have to do it on two separate files. How well interpolation and outlier correction works depends on the number of missing values, but I’ve seen pretty good results lately with a fair bit missing… \r\n\r\nI don’t know how the H5 file looks like if you combined two outputs, and if SimBA will accept it. If tricky there is the option to convert the files to CSVs, join the two CSVs, and import that into SimBA. There is a notebook to convert h5 to CSV [HERE]( https://github.com/talmolab/sleap/discussions/1089).\r\n\r\nI don’t know how static the nest is (if it moves a lot within-recordings this won’t work), but if not, there is also the option of manually defining a \"nest\" polygon as a [ROI]( https://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial.md#part-3-generating-features-from-roi-data) and let SimBA calculate the location of the animals relative to the nest like [HERE](https://www.nature.com/articles/s41598-022-05641-w).. \r\n\r\nAbout the cluster, most demanding SimBA methods runs using multiprocessing, so speed scales with core count. There are really no GPU dependencies, beyond simpler [video pre-processing tools](https://github.com/sgoldenlab/simba/blob/master/docs/gpu_vs_cpu_video_processing_runtimes.md) should you need it.", + "created_at": "2023-07-21T01:55:50Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson,\r\n\r\nAmazing, thank you! \r\n\r\nThe links you sent will take some time to digest but there is so much that is helpful in there. I may be back with more specific questions, but let me reach a higher plane of understanding so my questions are more meaningful and direct to respect your time. \r\n\r\n> I don’t complete understand this one – is the dam network connected to the nest network?\r\n> About the pose models: If I understand right, you have two SLEAP .SLP or .H5 files output files for each video (?), one from each model, and you have a method to combine them into a single H5?\r\n\r\nYour understanding is correct! I was under the impression that multiple animals/objects (with different skeletons) had to be combined to one skeleton for SimBA import... **if I merge the dam and nest skeletons to get a single H5 file containing both skeletons and import to SimBA, you are saying I could specify two different skeletons for each \"animal\" (in this case dam and nest)?** Adding a connection between the skeletons is optional, my script for combining them adds a connection because I thought that would be necessary for SimBA, but if it is better to leave them separated so they can be treated as separate objects, that is very easy at this point. \r\n\r\nThank you again! I'm always humbled by developers' willingness to communicate so freely about their projects. It is a staggering amount of work and keeping up with requests for help can't be easy. You are so very much appreciated, and what an amazing tool you've built and maintain!\r\n\r\nVery best,\r\nRyan\r\n\r\nPS: If you would like to close this topic with your response, feel free. I think further discussion will pertain to more specific issues I may encounter and I have a lot to chew on in the meantime.\r\n\r\n\r\n", + "created_at": "2023-07-21T14:07:54Z", + "author": "rfkova" + }, + { + "body": "@rfkova yes there is no need for all individuals to share the same body-part configuration. \r\n\r\nThe one issue I can see popping up, is that SimBA by default, using this workflow, will treat your nest as an \"animal\" when creating classifiers. This would mean that the code would compute the velocity and movement and size etc of the nest, and use those values as features for classifications could give wonky results. \r\n\r\nI can think of two ways to get around this. Either as I mentioned before, define the nest ROIs manually, then SimBA treats it as a static region, and will just compute if animals are inside nest, the distance from the animal to the nest, and the how animal is angled relative to nest. I’ve seen this being used for classifying behaviors ion relation to running wheels, and pup retrieval like in the paper linked above. \r\nOther option is to use your own or a [custom feature extractor script](https://github.com/sgoldenlab/simba/blob/master/docs/extractFeatures.md). I have seen this being used with dams and pups, to pick and mix [feature methods available](https://github.com/sgoldenlab/simba/blob/master/simba/mixins/feature_extraction_mixin.py) to get more accurate classifications of nursing etc, and also with fish to compute features focussed on rotation and angles specifically relevant for [non-shape shifting animals](https://github.com/sgoldenlab/simba/blob/master/simba/feature_extractors/misc/fish_feature_extractor_2023_version_5.py). ", + "created_at": "2023-07-21T18:32:32Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson,\r\n\r\nInteresting. So, here's the thing. The nest does change shape over days and is manipulated within videos. I was hoping to capture dam interaction with the nest. You think my best option will be to write a custom feature extractor? For the lowest level of analysis I'm merely interested in intersection of the dam and the nest which I wouldn't think would be too complicated even for defaults... is it worth a shot or should I study up on the feature extractor script asap?\r\n\r\nThanks again for all the links and analysis :)\r\n\r\nBest,\r\nRyan", + "created_at": "2023-07-21T18:42:34Z", + "author": "rfkova" + }, + { + "body": " @rfkova - yes I would probably do that. I’ve been using this [method](https://github.com/sgoldenlab/simba/blob/9ee2ca11d58ea4c223ba6ec702e8528819d66dee/simba/mixins/feature_extraction_mixin.py#L273C9-L273C37) to calculate if animals are in locations such as nests, it's quick, and the [shapely.geometry.Polyon.intersection](https://shapely.readthedocs.io/en/stable/reference/shapely.Polygon.html) if I need more information (e.g., how much of the animal hull intersects with the nest hull. ", + "created_at": "2023-07-21T18:59:07Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson,\r\n\r\nI have a 4-point dam model and a 7-point nest model. Previously I was splicing them together to create one track and a super-animal. Per your advice, I'm working on splitting them. However, I'm running into problems.\r\n\r\nIf I try to make a multi-animal SimBA project, the GUI demands some multiple of a single animal's body parts... and won't accept blanks in the event I make two 11-point models and 2 tracks. \r\n\r\nAlso, the way SLEAP wants to work, I don't think I can have two skeletons... so were I to make two tracks, my output from SLEAP would be one 11-point skeleton with two tracks, one for each \"animal\" (i.e. the dam and the nest). \r\n\r\nI might be able to modify the H5 file export to match what is needed for SimBA, so perhaps the SLEAP constraints aren't a big deal and I can just write a follow-up script to tack on to the one I have that uses SLEAP code for the merge. \r\n\r\nSince this question follows from the original topic I added to this tread, but if you think it better to open new issue, lmk and I'm on it!\r\n\r\nThank you!\r\nRyan\r\n\r\nedit: It is possible that leaving completely blank body parts in a track IS the solution, but I want to make sure that would be ok in advance. By which I mean, two 11-point tracks with the corresponding points never labeled. I just worry this will cause problems with behavior analysis. ", + "created_at": "2023-07-24T19:52:14Z", + "author": "rfkova" + }, + { + "body": "Hi @rfkova!\r\n\r\nYes - the GUI is not good for this (as it demands a multiple). But I can give you a work-around:\r\n\r\nWhen you fill in the table with the animal names and animal id numbers, SimBA takes your input to create the `/project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` file in your SimBA project. This file just contains a list that is organized in the order the data appears, in the format `BodypartName_AnimalIDNumber`:\r\n\r\n\"image\"\r\n\r\nIt is possible for you to modify this file post-hoc after it has been created, e.g. change whatever it says to:\r\n\r\n\"image\"\r\n\r\nPS. I think it should work with empty rows.. and the code drops and rows that are empty, but I'm not completely sure on top of my head. \r\n\r\n\r\n", + "created_at": "2023-07-24T20:02:58Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson!\r\n\r\nYou are a machine - I (platonically) love you. I'll give it a shot :D (and report back what works in case it is helpful).", + "created_at": "2023-07-24T20:11:34Z", + "author": "rfkova" + }, + { + "body": "Please let me know! If errors, we can try to work through them 👍🏻 ", + "created_at": "2023-07-24T20:13:11Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson!\r\n\r\nThere is something messy happening with the import of the merged files maybe you could help me clear up. \r\n\r\nSLEAP demands a single skeleton even if there are multiple instances, so right now I'm merging the videos by filling in empty body parts.\r\n\r\nFor a single frame: \r\n- Instance_1 = 4-node_dam + nan_7_node_nest + track_1; \r\n- Instance_2 = nan_4-node_dam + 7_node_nest + track_2\r\n\r\nTo get SimBA to accept these files I have to say I have 2 animals with 11 body parts each. You are right, we can edit the body part csv down to 11 body parts of mixed identity/track, after the fact, however, this leads to an error because the pose estimation import to simba has placed 3 columns for each body part. We have to remove 33 columns from the imported data to dispose of the inserted nan dummy data that sleap needed to assist the merge. \r\n**Then everything works!**\r\n\r\nSince I want to run multiple, parallel, instances of this script to process lots of videos at once, I'll need both versions of the body part script and to call each when they are needed for import (22 bp) and outlier correction (11 bp). That and we have this thing going on where I'll be editing the csv to have the right number of columns to match the second bp csv. \r\n\r\nIt is quite messy though. Do you have any better suggestions?\r\n\r\nI could change the data stored in the H5 file somehow so it is compatible with the 11 bp csv, but I would need to know how to tweak it for SimBA to make sense of it.\r\nI could also change my approach back to exporting an 11-node creature composed of the dam+nest if you think that will be easier to work from... thoughts?\r\n\r\nBest,\r\nRyan", + "created_at": "2023-07-25T21:37:42Z", + "author": "rfkova" + }, + { + "body": "Hi @rfkova ! \r\n\r\nI don't have a solution on top of my head - this seems like en edge case, and don't fully grasp the file structure. \r\n\r\nFastest way to solve this, perhaps, how about you share of these h5 files, with an associated video (the shorter and smaller the better!) and I will give typing up some code you can run outside of GUI in notebook perhaps to import it? ", + "created_at": "2023-07-25T22:02:40Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson!\r\n\r\nThat would be great! I put an h5 and associated video in [this Drive folder](https://drive.google.com/drive/folders/1bF6FJy8X8e0EWUhOVCObyZ3hd-BVN0X7?usp=sharing). \r\n\r\nLet me know if there's anything else I can provide. It is certainly an edge case! I think I can see the light but honestly having anything more to work off of on the command line would be lovely, so thank you so much!\r\n\r\nBest,\r\nRyan", + "created_at": "2023-07-25T22:19:02Z", + "author": "rfkova" + }, + { + "body": "Perfect thank you! ", + "created_at": "2023-07-25T22:21:13Z", + "author": "sronilsson" + }, + { + "body": "Here is a method that worked for me. Be sure to update SimBA as I has to insert and argument `joined_tracks` to fix the data based on your h5 files. \r\n\r\nhttps://simba-uw-tf-dev.readthedocs.io/en/latest/nb/import_sleap_h5.html\r\n\r\nSome notes:\r\n\r\n(i) I noticed a fair few frames without predictions for nose and tail-base of the Dam. SimBA doesn’t like missing data, so I interpolated the points with the nearest in time found body-parts.\r\n\r\n(ii) The tricky part of doing pose-estimation on objects like a nest, is that while the nest is clearly in static location, the predicted locations jump around a little due to error in pose-estimation so it looks a little odd. I tried to fix it with smoothing but doesn’t look much better after I visualize it… might help if the smoothing `Time_window` parameter is increased to several seconds, but I didn’t try, it is set to 200ms in the notebook. ", + "created_at": "2023-07-26T15:46:33Z", + "author": "sronilsson" + }, + { + "body": "There are a few pose-points for the nest missing too, but those interpolated points will be very good (I think), as the nest is static-ish, or unlikely moving around similar to an animal. ", + "created_at": "2023-07-26T15:50:30Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson!\r\n\r\nIncredible! I just tested it on my machine and it works great.\r\n\r\nIndeed, we are still adding to these models to improve them! I wanted to get this part of the workflow tested and to see what the output would look like with interpolation and outlier correction as well, to see how close we might be on the model side and get a feel for how much effort we can expect to train SimBA classifiers. \r\n\r\nThis is developmental stuff so we record 17d (24h) of the rearing cycle, so the nests will move and change over hours/days/cage changes (and across cages). We are still trying to figure out where it might make sense to make multiple models as the background and pups change shape and color over days, but currently we have a single integrated model for all rearing days and some of the predicted labels are a little rough still, as you note!\r\n\r\nThis is huge, it will be a great document to work off of for us, thank you so much!!!\r\n\r\nBest,\r\nRyan", + "created_at": "2023-07-26T18:12:36Z", + "author": "rfkova" + }, + { + "body": "Thanks for letting me know @rfkova, and thanks for letting me know about context! With such long recordings, if disk space can become an issue, `parquets` can help over CSVs, usually about 10x smaller. But perhaps save that fight till /if it becomes an issue :)", + "created_at": "2023-07-26T18:33:58Z", + "author": "sronilsson" + }, + { + "body": "Yes, @sronilsson, of course! \r\n\r\nWe liked the csv during this initial phase while still getting a feel for things because they are so much more transparent/editable and help us understand what is happening under the hood. Especially since we are playing with labeling behaviors in the GUI, it will be trivial to script the transfer of those labels to improve incoming pose-estimations and overwrite prior label files in the project directory:\r\nI'm already eyeing simba.utils.read_write.convert_csv_to_parquet(), haha!\r\n\r\nThe way your software is so modular is such an asset! Thank you for all your work and for being so responsive - I'm humbled by your efforts.\r\n\r\n<3,\r\nRyan\r\n", + "created_at": "2023-07-26T19:08:41Z", + "author": "rfkova" + }, + { + "body": "Hi @sronilsson!\r\n\r\nI got very fixated on getting the split-track data into SimBA that I did lose the plot a little. You had mentioned that the interpolation, smoothing, and outlier correction can work on the animals/tracks separately, but now looking at the functions themselves I'm pretty confident in venturing that although they will operate on the tracks independently in a sense, they will all work from the same parameters. As you noticed, it may be advantageous for us to use different smoothing/interpolation and outlier correction parameters for the nest and the dam. \r\n\r\nDo you think it is possible to add an argument to allow the passing of a list of parameters corresponding to each animal/track rather than a unitary value (for smoothing/interpolation and interpolation, especially)? Looking at the functions I can see it isn't dead simple, but I figured it was worth asking.\r\n\r\nBest,\r\nRyan\r\n", + "created_at": "2023-07-27T16:04:45Z", + "author": "rfkova" + }, + { + "body": "Ah yeah you're right... I think the smoothing an interpolation accepts a dataframe... and will apply the same rules on either each of the tracks or each the body-part. It will be quite messy to add an argument to the methods that exist already, probably easier to write an additional method that does different rules for different tracks or body-parts. The argument should probably be a dictionary with the track name or body-part as key, and the interpolation or smoothing method as values... I could type up a method, and think about how to get it into the GUI later. Smoothing or interpolating with different methods will be a rather big pop-up menu as there can be as many entry-boxes and dropdowns as there are body-parts. ", + "created_at": "2023-07-27T18:51:02Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson,\r\n\r\nAgreed, the GUI adaptation sounds complex and fortunately, I have no need for it. I would like to apply the different parameters to tracks, but I could see how a body parts option could be useful if a particular appendage or set of appendages were differentially impacted by smoothing/interpolation parameters!\r\n\r\nI will be unable to mess with this again until Aug 6th (vacation!) in case you want to take your time or prioritize other things, but a method to do this would be of great use to us!\r\n\r\nPS: This is so cool: out of sheer bloody-mindedness we added some behavior labels to our best pose-estimation videos and even with like 5 or 6 videos labeled it's starting to accurately identify nest occupancy and feeding behaviors even with the default feature extractors. Not perfect by any means, and I fully intend to write or adapt more appropriate feature extractors (as you suggested), but I just wanted to share our excitement that even with non-ideal conditions it already looks hopeful!", + "created_at": "2023-07-27T19:55:39Z", + "author": "rfkova" + }, + { + "body": "Great! I will come back to it, there are a few asks looming over, but will let you know next week.", + "created_at": "2023-07-27T21:30:55Z", + "author": "sronilsson" + }, + { + "body": "@rfkova - happy holidays :) There is an example notebook [here](https://simba-uw-tf-dev.readthedocs.io/en/latest/nb/advanced_smoothing_interpolation.html). Let me know how it runs!", + "created_at": "2023-07-29T17:24:37Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson, \r\n\r\nHappy indeed! Got this on vacation and made my day (even more so than the vacation!). Just got back. I wanted to use this as part of an import so I just made a temporary folder to run these functions on a single file in that directory, then move that file once interpolation and smoothing are complete and it seems to work - THANK YOU SO MUCH! \r\n\r\nNow I'm looking at outlier correction and I think I have the same problem where I would like to make different settings for the nest and the dam. There are some general values that may be help even if I wasn't able to set them independently. Is there a way to do that or am I making yet another feature request of you?\r\n\r\nBest,\r\nRyan", + "created_at": "2023-08-09T15:37:26Z", + "author": "rfkova" + }, + { + "body": "Hi @rfkova and welcome back :) You're right again, the function will apply the same heuristic outlier rules to all animals in all files inside the `project_folder/csv/input_csv` directory. I appreciate the feedback and want the code to work, so I don't mind typing up another notebook.\r\n\r\n", + "created_at": "2023-08-09T16:34:18Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson. Good to be back :) \r\n\r\nIf you are up to it, notebook would be great! \r\n\r\nI'm already using the prior script/notebook that applies to a whole directory of files in such a way that if you make the outlier correction function just like that I can slot it in, no trouble - that way it will be consistent with the advanced interpolation/smoothing you made previously. ", + "created_at": "2023-08-10T15:54:02Z", + "author": "rfkova" + }, + { + "body": "There is a notebook [HERE](https://simba-uw-tf-dev.readthedocs.io/en/latest/nb/advanced_outlier_correction.html) - please let me know if there are any issues!\r\n\r\n/Simon", + "created_at": "2023-08-11T18:41:33Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson, \r\n\r\nLooks great !!! ... but I'm having problems accessing the new functions. I have Simba-UW-tf-dev 1.71.4 freshly installed but:\r\n\r\n>>> from simba.outlier_tools.outlier_corrector_movement_advanced import OutlierCorrecterMovementAdvanced\r\nTraceback (most recent call last):\r\n File \"\", line 1, in \r\nModuleNotFoundError: No module named 'simba.outlier_tools.outlier_corrector_movement_advanced'\r\n\r\nIt is very possible there is something dumb happening on my end, so please keep that in mind. Here are my steps with this and previous notebooks:\r\n1. I've looked for the most recent version of Simba-UW-tf-dev on pypi.org\r\n2. `conda activate simba`\r\n3. pip install simba-uw-tf-dev==1.71.4\r\n4. Installation completes successfully\r\n5. Verify with pip list, see Simba-UW-tf-dev 1.71.4\r\n6. Get that error (above) after activating simba in VS Code with the simba interpreter/environment\r\n\r\n<3 the notebooks and the scripting!\r\nRyan", + "created_at": "2023-08-11T20:23:59Z", + "author": "rfkova" + }, + { + "body": "ah you need version **above** 1.71.4. If you do `pip install simba-uw-tf-dev --upgrade` to get latest version, and then try, how does it look? ", + "created_at": "2023-08-11T21:17:40Z", + "author": "sronilsson" + }, + { + "body": "Oho! Now we're cooking with gas, thinking with portals, and also other appropriate idioms! \r\n\r\nPypi.org must have smiled upon me the other day looking for the latest version, but the --upgrade flag argument just can't be beat. \r\n\r\nIt works! :D\r\n\r\nIt looks lovely. Thank you for the assist!", + "created_at": "2023-08-11T21:31:00Z", + "author": "rfkova" + } + ] + }, + { + "title": "Error message while trying to train a classifier refer to the memory", + "body": "When trying to train the classifier i get error:\r\nTraceback (most recent call last):\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\", line 365, in \r\n button_trainmachinemodel = Button(label_trainmachinemodel,text='TRAIN SINGLE MODEL (GLOBAL ENVIRONMENT)',fg='blue',command = lambda: threading.Thread(target=self.train_single_model(config_path=self.config_path)).start())\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\", line 580, in train_single_model\r\n model_trainer = TrainRandomForestClassifier(config_path=config_path)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\model\\train_rf.py\", line 65, in __init__\r\n self.data_df = self.check_raw_dataset_integrity(df=self.data_df, logs_path=self.logs_path)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\mixins\\train_model_mixin.py\", line 1144, in check_raw_dataset_integrity\r\n nan_cols = df.reset_index(drop=True).replace([np.inf, -np.inf, None], np.nan).columns[df.isna().any()].tolist()\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\frame.py\", line 4278, in replace\r\n method=method,\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\generic.py\", line 6741, in replace\r\n to_replace=to_replace, value=value, inplace=inplace, regex=regex\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\internals\\managers.py\", line 588, in replace\r\n return self.apply(\"replace\", value=value, **kwargs)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\internals\\managers.py\", line 438, in apply\r\n applied = getattr(b, f)(**kwargs)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 804, in replace\r\n convert=convert,\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 2953, in replace\r\n regex=regex,\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 3024, in _replace_single\r\n to_replace, value, inplace=inplace, filter=filter, regex=regex\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 832, in replace\r\n b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 832, in \r\n b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 2840, in convert\r\n blocks = self.split_and_operate(None, f, False)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 493, in split_and_operate\r\n nv = f(m, v, i)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 2831, in f\r\n values = fn(v.ravel(), **fn_kwargs)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\dtypes\\cast.py\", line 846, in soft_convert_objects\r\n values = lib.maybe_convert_objects(values, convert_datetime=datetime)\r\n File \"pandas/_libs/lib.pyx\", line 1990, in pandas._libs.lib.maybe_convert_objects\r\n**MemoryError: Unable to allocate 10.7 MiB for an array with shape (1397128,) and data type datetime64[ns]\r\nException in Tkinter callback**\r\n\r\nDesktop\r\n - OS: Windows\r\n - Python Version 3.6.13\r\n - using anaconda\r\n\r\n\r\n**Additional context**\r\nI am using 64-bit version of python, and have enough memory space in my computer.\r\n", + "user": "briki1234", + "reaction_cnt": 0, + "created_at": "2023-07-11T08:46:42Z", + "updated_at": "2023-07-15T16:40:34Z", + "author": "briki1234", + "comments": [ + { + "body": "Hi @briki1234! I haven't seen this one before. This error happens when reading in all the files in the `project_folder/csv/targets_inserted` directory. Before starting to train the classifier, a check is run to make sure that all the columns and rows contain values. However, SimBA seems unable to run this check, and for some reason, you end up with a single field of datetime values. \r\n\r\nThe data in the `project_folder/csv/targets_inserted` directory not look very large, is there any chance you could share it and I can take a look?\r\n\r\n", + "created_at": "2023-07-11T12:55:22Z", + "author": "sronilsson" + }, + { + "body": "Actually there are 4.41GB of files in this folder(80 files) - even uploading 1 file is failed (50-70 MB each)\r\nThe issue is that 2 classifier in this project finished training successfuly, but in the third(the error that I'm talking about) the error occured.\r\n", + "created_at": "2023-07-11T13:07:15Z", + "author": "briki1234" + }, + { + "body": "Got it, is there anything odd with the third classifier annotation column? Is there any way this column could have been mistaken as a datetime column in any of the files in the `project_folder/csv/targets_inserted` directory?, e.g. could this column in any of the files have been mistakenly transformed to date format, or has any value sneaked in that is not a `0` or a `1`? ", + "created_at": "2023-07-11T13:17:48Z", + "author": "sronilsson" + }, + { + "body": "PS. If you need it, I have a python script somewhere that could help", + "created_at": "2023-07-11T13:54:32Z", + "author": "sronilsson" + }, + { + "body": "> PS. If you need it, I have a python script somewhere that could help\r\n\r\nI would love to get it and check by it the issue.", + "created_at": "2023-07-11T14:24:09Z", + "author": "briki1234" + }, + { + "body": "Open this file and edit two rows near top. Change the `DATA_DIRECTORY` to be the full path to your `project_folder/csv/targets_inserted` directory, and the `CLASSIFIER_NAME` to be the name of your classifier. \r\n\r\nIn your SimBA environment, navigate to the folder you store the file, and run `python catch_error_annotation_field.py`. Let me know what you see printed out, it should print out an error for any odd values it find and which files the errors are found in:\r\n\r\n[catch_error_annotation_field.py.zip](https://github.com/sgoldenlab/simba/files/12016203/catch_error_annotation_field.py.zip)\r\n\r\n", + "created_at": "2023-07-11T15:10:41Z", + "author": "sronilsson" + }, + { + "body": "\"COMPLETE: 0 error(s) found\"\r\n\r\nIt's seems like all files are valid. ", + "created_at": "2023-07-11T16:12:22Z", + "author": "briki1234" + }, + { + "body": "i see, what is the classifier name? Coul there be any oddities in in the classifier name, hyphens etc, the code struggles with?", + "created_at": "2023-07-11T16:23:27Z", + "author": "sronilsson" + }, + { + "body": "the classifier name is \"On_Restrainer_Half\", I already trained successfuly the classifier \"On_Restrainer_Full\" so shouln't be any oddities", + "created_at": "2023-07-11T16:26:34Z", + "author": "briki1234" + }, + { + "body": "Thanks @briki1234 - my guess then, considering data size, that it is a memory error as it says. How much RAM do you have on the machine?", + "created_at": "2023-07-11T16:34:41Z", + "author": "sronilsson" + }, + { + "body": "16GB of RAM\r\nbut it is using the same RAM and memory as in the other classifiers I trained, which is a bit strange.", + "created_at": "2023-07-11T16:37:58Z", + "author": "briki1234" + }, + { + "body": "Yes. Perhaps, if you train one or two classifiers, data is read into memory for those classifiers and not completely cleared, or some other processes are not terminated completely. You get to the third classifier, and you then hit the 16gb threshold. If you kill all python processes, or restart if possible, and only train the third classifier, does it still fail?", + "created_at": "2023-07-11T16:41:37Z", + "author": "sronilsson" + }, + { + "body": "It's a good question, I will try and update!", + "created_at": "2023-07-11T16:48:19Z", + "author": "briki1234" + }, + { + "body": "It worked and the training worked till the end when it is written \"Saving model meta data file...\" but in the cmd an error appeared:\r\nTraceback (most recent call last):\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\", line 365, in \r\n button_trainmachinemodel = Button(label_trainmachinemodel,text='TRAIN SINGLE MODEL (GLOBAL ENVIRONMENT)',fg='blue',command = lambda: threading.Thread(target=self.train_single_model(config_path=self.config_path)).start())\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\", line 583, in train_single_model\r\n model_trainer.save_model()\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\model\\train_rf.py\", line 236, in save_model\r\n self.save_rf_model(self.rf_clf, self.clf_name, self.model_dir_out)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\mixins\\train_model_mixin.py\", line 729, in save_rf_model\r\n pickle.dump(rf_clf, open(save_path, 'wb'))\r\nMemoryError\r\n\r\n\r\nNotice that I have more than 100GB of memory available.\r\n\r\nDo you recognize this error?", + "created_at": "2023-07-12T07:17:22Z", + "author": "briki1234" + }, + { + "body": "@briki1234 This is still a RAM issue still. \r\n\r\nWhen converting the model to something that can be stored on the harddrive, available RAM runs out. There are a few settings that affect the size of a model, the biggest probably being the number of estimators or trees. Often, using say 500 trees won't effect performance to much relative to 2k trees but will save you a lot of space and memory. See if you can get it working with fewer estimators. \r\n\r\nNot sure why this model is more difficult to fit in memory than the others. If the same number of estimators, it could be related to amount of data: if the other models have greater undersampling. \r\n\r\n", + "created_at": "2023-07-12T12:41:42Z", + "author": "sronilsson" + }, + { + "body": "I used 500 estimators and the error message didn't appear! I appriciate your help, thank you!", + "created_at": "2023-07-13T18:49:54Z", + "author": "briki1234" + }, + { + "body": "Thanks for letting me know!", + "created_at": "2023-07-13T19:56:10Z", + "author": "sronilsson" + } + ] + }, + { + "title": "issue with 1 fps video processing", + "body": "Hello, \r\n\r\nWorking on Windows 10, SimBA v.1.63.6. \r\nI'm working on CSV files from single animal DLC project. \r\n\r\nI'm once again facing an issue about 1 fps video. \r\nEverything runs well when starting the creation of a new project: import videos, CSV (with interpolation, smoother correction) and setting of video parameters. But as I click to set the features of outlier correction, terminal returns:\r\n![Simba](https://github.com/sgoldenlab/simba/assets/66886884/46110c36-1a06-4b54-bc3a-e346e1cf1122)\r\n\r\nLast time, issue was happening for drawing ROIs, basically because I did not properly run outlier corrections. Now, this happens even before I had a chance to run it. I tried to go ahead, and I can proceed (outlier correction concludes), but then at each step (ROIs drawing) I get this message for every video. Given the time it takes me to draw ROIs and set the features, I'd like to be sure that my project is not wrong somehow before going ahead... \r\n\r\nFYI, here the videos.info file:\r\n![Simba](https://github.com/sgoldenlab/simba/assets/66886884/5274fc71-6e8f-4749-824a-7fd6e549ce1e)\r\n\r\nEverything looks good to me here... \r\nThank you for the help!\r\nBest\r\n\r\n\r\n\r\n", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2023-07-03T12:34:07Z", + "updated_at": "2023-08-14T19:42:10Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli! \r\n\r\nYes it's a warning I don't think you will see any issues. Are you just working with ROIs, no classifiers?\r\n\r\nTo be sure, can you test on a couple of videos before doing the entire analysis? \r\n\r\nThe warning is shown because of classifier issues downstream. If you want to calculate e.g. the movement of the animal in 500 millisecond rolling bins, then we can't because that would be 0.5 frames and classifier creation will break. ", + "created_at": "2023-07-03T12:38:51Z", + "author": "sronilsson" + }, + { + "body": "Ok, good news. Indeed I intend to use only ROIs, so it should be good. \r\nI keep you posted, \r\n\r\nThank you, \r\nBest,", + "created_at": "2023-07-03T13:00:08Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "I faile to install SimBA on MacOS Ventura 13.4.1", + "body": "Hello, \r\n\r\nI tried to follow these steps:\r\n\r\n\r\n Create an environment for simba using anaconda terminal.\r\n\r\n In the terminal type, pip install simba-uw-tf-dev\r\n\r\n Then, conda install -c anaconda python.app\r\n\r\n Then, conda install matplotlib\r\n\r\n Then, conda uninstall shapely\r\n\r\n Then, conda install -c conda-forge shapely\r\n\r\n Then, pip install shap\r\n\r\n Lastly, pip install h5py\r\n\r\n In the terminal, type in simba to test if it works.\r\n\r\nBut here what I get:\r\n\"Screenshot\r\n\r\nI checked and both Pip and setuptools are up to date. Do you have an idea what is going wrong here?\r\n\r\nThank you, \r\nBest,", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2023-07-03T09:34:38Z", + "updated_at": "2023-07-05T12:37:20Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli! It looks like your running python3.9. how does it look when you run it in 3.6?\r\n\r\nPS. Latest SimBA should run in python >3 6, including 3.9, but requires slightly different installation, including downgrade setuptools and pip I think. But I haven't had time to write documentation. But if I really need 3.9 I can write it. ", + "created_at": "2023-07-03T12:30:30Z", + "author": "sronilsson" + }, + { + "body": "Indeed, I tried to specify the python version, but somehow I cannot do it:\r\n\r\n\"Screenshot\r\n", + "created_at": "2023-07-03T12:56:40Z", + "author": "DorianBattivelli" + }, + { + "body": "Using \"python3.6 pip install simba-uw-tf-dev\" command, it looks that it moves forward, but I still face the issue:\r\n\"Screenshot\r\n", + "created_at": "2023-07-03T13:06:27Z", + "author": "DorianBattivelli" + }, + { + "body": "I can't see it in the second screengrab, but in the first, you have created a conda environment for simba using python 3.9:\r\n\r\n\"image\"\r\n\r\nInstead, when creating the conda environment, do e.g.,:\r\n\r\n1) conda create -n simba_env python=3.6 anaconda\r\n2) conda activate simba_env\r\n3) pip install simba-uw-tf-dev\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-07-03T13:17:06Z", + "author": "sronilsson" + }, + { + "body": "Everything run well at first, but an error popped out at the end, and it seems to prevent SimBA to work properly: \r\n \r\n\"Screenshot\r\n", + "created_at": "2023-07-03T13:54:32Z", + "author": "DorianBattivelli" + }, + { + "body": "Ohh wait maybe I should go through the following commands (matplotlib etc.), I'm giving a go.. ", + "created_at": "2023-07-03T13:58:34Z", + "author": "DorianBattivelli" + }, + { + "body": "maybe use `pip install simba-uw-tf-dev --ignore-installed`", + "created_at": "2023-07-03T14:07:17Z", + "author": "sronilsson" + }, + { + "body": "Still not working :/\r\n\"Screenshot\r\n", + "created_at": "2023-07-03T14:24:02Z", + "author": "DorianBattivelli" + }, + { + "body": "I think this is a pandas and numpy version mismatch, which version of numpy and pandas do you have `pip show numpy` and `pip show pandas` ?", + "created_at": "2023-07-03T14:52:12Z", + "author": "sronilsson" + }, + { + "body": "Numpy: 1.18.1\r\nPandas: 1.0.5", + "created_at": "2023-07-03T14:54:29Z", + "author": "DorianBattivelli" + }, + { + "body": "If you try: `pip install pandas==0.25.3`, how does it look?", + "created_at": "2023-07-03T15:09:13Z", + "author": "sronilsson" + }, + { + "body": "Here again I have error:\r\n\"Screenshot\r\n\r\n", + "created_at": "2023-07-04T07:29:47Z", + "author": "DorianBattivelli" + }, + { + "body": "Tro do do `pip uninstall matplotlib` then `conda install matplotlib `", + "created_at": "2023-07-04T10:52:42Z", + "author": "sronilsson" + }, + { + "body": "To resume, I did from scratch:\r\n\r\n conda create -n simba_env python=3.6 anaconda\r\n conda activate simba_env\r\n pip install simba-uw-tf-dev --ignore-installed\r\n pip install pandas==0.25.3\r\n pip uninstall matplotlib\r\n conda install matplotlib\r\n\r\nAnd here what I get:\r\n\r\n\"Screenshot\r\n", + "created_at": "2023-07-04T11:25:57Z", + "author": "DorianBattivelli" + }, + { + "body": "Did conda install matplotlib run properly? This suggests you don't have matplotlib in the environment. Did you type `y` after to confirm install?", + "created_at": "2023-07-04T12:05:07Z", + "author": "sronilsson" + }, + { + "body": "I just relaunched the command, and here another error:\r\n\r\n\"Screenshot\r\n", + "created_at": "2023-07-04T12:39:28Z", + "author": "DorianBattivelli" + }, + { + "body": "This is a shapely error again, try:\r\n\r\npip uninstall shapely\r\nconda install shapely ", + "created_at": "2023-07-04T13:05:29Z", + "author": "sronilsson" + }, + { + "body": "Then terminal returns:\r\n\"Screenshot\r\n", + "created_at": "2023-07-04T13:13:25Z", + "author": "DorianBattivelli" + }, + { + "body": ":) This is an sklearn error, which version of scikit do you have? `pip show scikit-learn`", + "created_at": "2023-07-04T14:19:13Z", + "author": "sronilsson" + }, + { + "body": "0.23.1", + "created_at": "2023-07-04T14:21:38Z", + "author": "DorianBattivelli" + }, + { + "body": "If you do `pip uninstall imblearn` then `pip install imblearn`?", + "created_at": "2023-07-04T14:34:03Z", + "author": "sronilsson" + }, + { + "body": "Damn :)\r\n\"Screenshot\r\n", + "created_at": "2023-07-04T14:36:14Z", + "author": "DorianBattivelli" + }, + { + "body": "can you try `pip install scikit-learn==0.22.2` ?", + "created_at": "2023-07-04T15:18:02Z", + "author": "sronilsson" + }, + { + "body": "It's giving the same outcome\r\n\"Screenshot\r\n", + "created_at": "2023-07-04T15:33:55Z", + "author": "DorianBattivelli" + }, + { + "body": "Does ant of these potential fixes do it? https://stackoverflow.com/questions/56549270/importerror-cannot-import-name-multioutputmixin-from-sklearn-base", + "created_at": "2023-07-04T15:47:06Z", + "author": "sronilsson" + }, + { + "body": "Ok, I managed to install it, what I did:\r\n- Open anaconda navigator\r\n- Choose create environement from there\r\n- Specify Python 3.6.13\r\n\r\nAnd then run all these commands from this env:\r\n\r\npip install simba-uw-tf-dev\r\n\r\nThen, conda install -c anaconda python.app\r\n\r\nThen, conda install matplotlib\r\n\r\nThen, conda uninstall shapely\r\n\r\nThen, conda install -c conda-forge shapely\r\n\r\n\r\n\r\nNot sure what solved the problem here... :D\r\n\r\nThank you!", + "created_at": "2023-07-04T16:51:27Z", + "author": "DorianBattivelli" + }, + { + "body": "Great! ", + "created_at": "2023-07-04T19:18:42Z", + "author": "sronilsson" + }, + { + "body": "Just to let you know, it's working, but when I'm launching the program I see some errors in the terminal:\r\n\r\n\"Screenshot\r\n\r\nDo you think it can be problematic for a stable use of SimBA?", + "created_at": "2023-07-04T23:37:28Z", + "author": "DorianBattivelli" + }, + { + "body": "You should be OK, just a warning that it might break if you update matplotlib to a newer version", + "created_at": "2023-07-05T12:36:57Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Do non-classified frames affects the training model?", + "body": "Hi, when inserting date which is not fully classified(for exampled 10 minutes out of 20 of the video are classified) - does the empty parts of video effects on the bad side on the classifier results when I training the model based on this data? \r\n\r\nanother question - when I insert annotations to SimBA the output file conatins in the behaviors columns not enough frames with \"1\"s in each behaviour, despite in the annotations file there are a lot of them. what could be the reason? \r\nThank you!", + "user": "BarakMalul", + "reaction_cnt": 0, + "created_at": "2023-06-26T07:43:11Z", + "updated_at": "2023-07-01T13:37:01Z", + "author": "BarakMalul", + "comments": [ + { + "body": "Hi @BarakMalul!\r\n\r\nFor the first question: If you annotate say the first half of the video, and do not annotate (not look at) the second half of the video, then watch out SimBA will assume that the second half of the video contains no behavior of interest (second half will be all `0`). Check out [advanced labelling](https://github.com/sgoldenlab/simba/blob/master/docs/advanced_labelling.md) for a different behavior of the annotator that might be more suitable. \r\n\r\nFor the second question, is there an error msg associated with this, can you past it here?", + "created_at": "2023-06-26T13:10:23Z", + "author": "sronilsson" + }, + { + "body": "Does the non classified annotation will cause significantly worse reault of the classifier?\r\nShould I consider trim the videos to the annotated parts?\r\n\r\nAnd I got no error, but I get '0' in times that I classified. ", + "created_at": "2023-06-26T14:30:29Z", + "author": "BarakMalul" + }, + { + "body": "yes trimming is a good idea, if your video contains long sequences you do not want to annotate. \r\n\r\nI wrote a little bit about this here, let me know if makes sense: https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#8-my-videos-are-very-long-and-can-be-a-pain-to-annotate-in-the-simba-annotation-gui-can-i-skip-annotating-some-frames-and-still-build-an-accurate-classification-model-based-on-annotatednot-annotated-frames", + "created_at": "2023-06-26T14:33:42Z", + "author": "sronilsson" + }, + { + "body": "I'll check it out!\r\nAbout thw second issue- Is there any factor that can lead to the '0' that appear despite behaviors which annotated? ", + "created_at": "2023-06-26T14:53:10Z", + "author": "BarakMalul" + }, + { + "body": "Let's make sure I understand: do you check the behavior, and click save the file, then you open the file in a spreadsheet viewer (e.g., MS Excel), and you see `0` for that frame in the relevant column?", + "created_at": "2023-06-26T14:57:31Z", + "author": "sronilsson" + }, + { + "body": "Because the manual labeling in the annotaions is not per frame but per time so the problem is that for example despite I encoded behavior like Fighting for 2 seconds under start and point I can't see any '1' when I'm looking at the file that created when I appended the boris file", + "created_at": "2023-06-26T15:51:07Z", + "author": "BarakMalul" + }, + { + "body": "Got it, did you see any warning msgs when you appended your BORIS annotations? \r\n\r\nThe first thing that comes to mind is say you recorded `Fighting` to start at `00:05:03` and end at `00:05:05`, SimBA will look at the FPS of your video to match the time stamps to frame numbers. However, if the FPS of your video is not the same as the video analysed in pose-estimation there will be a mismatch. \r\n\r\nBut you should see warning if that happens.", + "created_at": "2023-06-26T15:58:15Z", + "author": "sronilsson" + }, + { + "body": "Sorry for the late response. In the meantime, I have trimmed the videos, and the results are now great!\r\nI would love to know how to analyze **new videos** (I couldn't find the information in the tutorials). What steps should I take to extract frames *only from a new video* in order to use it in the 'Validate Model On Single Video\" part?", + "created_at": "2023-07-01T13:04:48Z", + "author": "BarakMalul" + }, + { + "body": "Hi @BarakMalul - I think the relevant documentation is in the link below, but let me know if there is something specific missing or unclear. \r\n\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/Scenario2.md", + "created_at": "2023-07-01T13:37:01Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Impossible_to_draw_ROI", + "body": "Hello, I'm using SimBA 1.63.6, on windows 10, with multi-animal project from DLC.\r\n\r\nI'm facing an issue that was not happening before: I'm not able to draw ROIs anymore (I just updated from 1.59 to 1.63 - I precise that I did interpolation, smoothing, outlier correction etc. with 1.59 version, and then upgraded to 1.63 before proceeding to ROIs drawing). \r\nAnd I have no feedback from prompt nor SimBA main window terminal. It's just that when the cross appears to draw the dotes of a polygon, nothing happens when I click on the image.\r\n\r\n![Impossible_to_draw_ROI](https://github.com/sgoldenlab/simba/assets/66886884/fae84b7d-6e1a-432d-9113-bdff7180745a)\r\n\r\nWhat could be the issue here?\r\nThank you,\r\nBest,", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2023-06-22T10:33:32Z", + "updated_at": "2023-06-22T11:32:30Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "After 3 trials, it's now working, no idea what happened ... thank you!", + "created_at": "2023-06-22T10:43:34Z", + "author": "DorianBattivelli" + }, + { + "body": "Hi @DorianBattivelli, thanks for reporting! This could happen if the window frame with the video image is not selected prior to clicking the location of polygon vertices. It could also be that the vertices locations are too small and you could not see them, but maybe less likely. \r\n\r\nIf you click the window frame top, the banner with the title, or move window, prior to clicking the location, maybe that fixed it? ", + "created_at": "2023-06-22T11:32:30Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Existing Annotations Do Not Appear in GUI After Pseudo-Labeling or When Continuing Existing Annotations ", + "body": "Hi, I am having an issue with the behavioral annotations GUI. It appears that when labeling and pseudo labeling that annotations are being saved into targets inserted and when parsing through video frames from the labeled videos, the log states that the labeled behaviors are present. However, in the labeling GUI (both in continuing an existing annotation and the correct labeling after pseudo labeling), the boxes denoting the behaviors are not checked despite behaviors existing in the given frames (both as per the log and targets inserted). I have already verified that the behaviors are labeled and exist in the targets inserted directory, and this is confirmed by the statements within the log. I have also recently updated to the latest version of SIMBA and this has not resolved the issue. \r\n\r\nI would appreciate any assistance in solving the issue. Thank you for your help!\r\n\r\n**Desktop:**\r\n - OS: Windows 11\r\n - Python Version: 3.6\r\n - Are you using anaconda? Yes\r\n", + "user": "nsusic", + "reaction_cnt": 0, + "created_at": "2023-06-20T13:54:41Z", + "updated_at": "2023-06-23T17:38:54Z", + "author": "nsusic", + "comments": [ + { + "body": "Hi @nsusic! Many thanks for reporting this: I had another user report this a week back, I inserted a potential fix, but I never heard back from the user.. I assumed it was fixed but it appears not. I will see if I can replicate and remove this bug and let you know - I'm travelling for the next couple of days so may be a little slow. ", + "created_at": "2023-06-20T14:06:04Z", + "author": "sronilsson" + }, + { + "body": "No problem! I appreciate you taking the time to look into it! If there is anything I can do on my end to help troubleshoot or provide more information, please let me know! Thank you again!", + "created_at": "2023-06-20T14:33:43Z", + "author": "nsusic" + }, + { + "body": "@nsusic I think I figured it out... \r\n\r\nRecently, I updated the read and write CSV file function in simba to use `pyarrow` rather than pandas... it speeds things up significantly, but introduced this bug. When reading in the saved annotation CSV, pyarrow interpreted the annotation columns as `np.int64` type rather than an `int`. The checkboxes is looking for a `1` or `0` (if `1`, then tick it). As it is neither, it was never checked.. \r\n\r\nIf you upgrade simba with `pip install simba-uw-tf-dev --upgrade`, how doe sit look on your end? \r\n\r\n\r\n\r\n", + "created_at": "2023-06-20T19:08:19Z", + "author": "sronilsson" + }, + { + "body": "Unfortunately, it now appears that the GUI is failing to load entirely. A new window is launched when continuing annotations but the window does not contain anything. Please see the screenshot below. Let me know if there is anything else I can do to help troubleshoot!\r\n\r\n![Screenshot (1)](https://github.com/sgoldenlab/simba/assets/87246414/e7030f94-0796-4631-bcd8-9de45424ce0c)\r\n![image](https://github.com/sgoldenlab/simba/assets/87246414/cd9f5da2-a778-4be0-9bff-7f0dec1bb291)\r\n\r\n\r\n", + "created_at": "2023-06-21T20:18:52Z", + "author": "nsusic" + }, + { + "body": "Thanks for testing @nsusic, do you see any errors in the main OS terminal when this happens?", + "created_at": "2023-06-21T20:30:13Z", + "author": "sronilsson" + }, + { + "body": "Yes; this is the error in the main terminal.\r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\nsusi\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\nsusi\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\SimBA.py\", line 320, in \r\n continuing=True))\r\n File \"C:\\Users\\nsusi\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\labelling\\labelling_interface.py\", line 335, in select_labelling_video\r\n continuing=continuing)\r\n File \"C:\\Users\\nsusi\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\labelling\\labelling_interface.py\", line 94, in __init__\r\n self.data_df_targets = self.data_df[self.target_lst]\r\n File \"C:\\Users\\nsusi\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\pandas\\core\\frame.py\", line 3001, in __getitem__\r\n indexer = self.loc._convert_to_indexer(key, axis=1, raise_missing=True)\r\n File \"C:\\Users\\nsusi\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1285, in _convert_to_indexer\r\n return self._get_listlike_indexer(obj, axis, **kwargs)[1]\r\n File \"C:\\Users\\nsusi\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1092, in _get_listlike_indexer\r\n keyarr, indexer, o._get_axis_number(axis), raise_missing=raise_missing\r\n File \"C:\\Users\\nsusi\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\pandas\\core\\indexing.py\", line 1185, in _validate_read_indexer\r\n raise KeyError(\"{} not in index\".format(not_found))\r\nKeyError: \"['standing'] not in index\"", + "created_at": "2023-06-21T20:33:25Z", + "author": "nsusic" + }, + { + "body": "Got it, is there any chance you where trouble shooting changing the project_config.ini, putting the classifier names in lists or similar? \r\n\r\nHow does this part of the project_config.ini look like on your end? \r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/2b079076-262e-4b0f-9b3b-1397dc327566)\r\n", + "created_at": "2023-06-21T20:59:31Z", + "author": "sronilsson" + }, + { + "body": "Alternatively! Could it be that you are continuing an annotation for a video, and one of your classifiers is called `standing`, but, in your project_folder/csv/targets_inserted file representing that video, there isn't a `standing` column? ", + "created_at": "2023-06-21T21:15:53Z", + "author": "sronilsson" + }, + { + "body": "Yes, it is definitely possible. I was attempting to work out of an old directory as the directory in which I was having the issue is not physically accessible at the moment. Unfortunately, I will need a day or two before I can troubleshoot on the PC that was originally having the issue. I hope that is not an issue. ", + "created_at": "2023-06-21T21:19:36Z", + "author": "nsusic" + }, + { + "body": "Sounds good! I could recreate it that way anyway, and I will insert a better error msg in meantime. ", + "created_at": "2023-06-21T23:48:10Z", + "author": "sronilsson" + }, + { + "body": "I just checked and the latest update seems to have fixed the issue! Thank you for your help!\r\n", + "created_at": "2023-06-23T16:35:11Z", + "author": "nsusic" + }, + { + "body": "Nice one, thanks for letting me know!", + "created_at": "2023-06-23T17:38:53Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Problem in training the model", + "body": "**Describe the bug**\r\nafter been through all the step to traing the model without getting any errors when I'm getting this error message:\r\n\r\n(simba-test) C:\\Users\\Barak Malul>simba\r\nC:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\sklearn\\utils\\deprecation.py:144: FutureWarning: The sklearn.metrics.classification module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.metrics. Anything that cannot be imported from sklearn.metrics is now part of the private API.\r\n warnings.warn(message, FutureWarning)\r\nSIMBA ERROR: Field name None could not be found in file C:\\Users\\Barak Malul\\Documents\\firstry\\project_folder\\csv\\targets_inserted\\Converted20220309_1340-1356_Cam05.csv error\r\nException in thread Thread-1:\r\nTraceback (most recent call last):\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\concurrent\\futures\\process.py\", line 272, in _queue_management_worker\r\n result_item = reader.recv()\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\multiprocessing\\connection.py\", line 251, in recv\r\n return _ForkingPickler.loads(buf.getbuffer())\r\nTypeError: __init__() missing 2 required positional arguments: 'column_name' and 'file_name'\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to Train machine model\r\n2. Click on Train single error\r\n\r\n**Expected behavior**\r\nsuccess in training the model \r\n\r\n**Desktop (please complete the following information):**\r\n - windows\r\n - Python Version 3.7\r\n - using anaconda\r\n \r\n\r\n**Additional context**\r\nAll of the values of column of desired Classifier in the csv file in the targets_inserted csv file are 0 from some reason despite there are mentioning of the specific behaviour in the BORIS imported file.\r\nthe warnings in the impoting of the annotations are: \r\nSIMBA WARNING: SIMBA THIRD-PARTY ANNOTATION WARNING: Annotations file for video Converted20220309_1340-1356_Cam05 has annotations for the following behaviors ['Allogrooming', 'Wrestling/ Rough play', 'Start scoring', 'Pinning', 'Interaction', 'Grooming', 'End scoring', 'Biting'] that are NOT classifiers named in the SimBA project. SimBA will OMIT appending the data for these 8 classifiers. ❗️\r\nSIMBA WARNING: SIMBA THIRD-PARTY ANNOTATION WARNING: SimBA found THIRD-PARTY annotations for behavior Anogenital sniffing in video Converted20220309_1340-1356_Cam05 that are annotated to occur at times which is not present in the video data you imported into SIMBA. The video you imported to SimBA has 24687 frames. However, in BORIS, you have annotated Anogenital sniffing to happen at frame number P. These ambiguous annotations occur in 6 different frames for video Converted20220309_1340-1356_Cam05 that SimBA will **remove** by default. Please make sure you imported the same video as you annotated in BORIS into SimBA and the video is registered with the correct frame rate. SimBA will only append annotations made to the frames present in the pose estimation data. ❗", + "user": "BarakMalul", + "reaction_cnt": 0, + "created_at": "2023-06-17T21:19:21Z", + "updated_at": "2023-06-18T20:39:13Z", + "author": "BarakMalul", + "comments": [ + { + "body": "Hi @BarakMalul! \r\n\r\nThe error at the top: `SIMBA ERROR: Field name None could not be found in file C:\\Users\\Barak Malul\\Documents\\firstry\\project_folder\\csv\\targets_inserted\\Converted20220309_1340-1356_Cam05.csv`\r\n\r\nThis happens when SimBA reads in all your CSV files in the `project_folder/csv/targets_inserted` directory. When it does this, a check is being run to make sure the annotation for the behavior are present in each file. In this case the classifier appears to be named `None`. SimBA checks for a column with the header `None`, it can't find it in file `Converted20220309_1340-1356_Cam05`, and that's then it throws you this error. \r\n\r\nI suspect the issue is that the classifier isn't called `None`, and that SimBA therefore is looking for the wrong column. If you are training a single classifier, `None` is the default value you haven't set the hyperparameters. \r\n\r\nTwo options to fix, (i) \r\nIn your `project_config.ini` file, navigate to the [create ensemble setting][classifier] entry and change it manually if it says `None` to the name of your classifier:\r\n\"image\"\r\n\r\nOr (ii)\r\nThe entry should be set in the GUI when you click `SAVE SETTINGS (GLOBAL ENVIRONMENT)` in the SETTING menu \r\n\r\n\"image\"\r\n\r\nIf you missed this, [create ensemble setting][classifier] will read None as it doesn't know which model it should be training. \r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-06-17T23:44:56Z", + "author": "sronilsson" + }, + { + "body": "Thank you! And do you have a way to fix the problem I discussed above in the \"Additional context\"? ", + "created_at": "2023-06-18T05:00:50Z", + "author": "BarakMalul" + }, + { + "body": "Is the name of the classifier `Anogenital sniffing`? Can you share your BORIS file for video Converted20220309_1340-1356_Cam05 here and I can take a look? SimBA finds a frame number called P, which is not a number. Not sure how that could happen and would be good to see the file. ", + "created_at": "2023-06-18T11:07:05Z", + "author": "sronilsson" + }, + { + "body": "The name of the classifier is Anogenital sniffing. the boris file which is imported is attached\r\n[Converted20220309_1340-1356_Cam05.csv](https://github.com/sgoldenlab/simba/files/11782242/Converted20220309_1340-1356_Cam05.csv)\r\nI would like to mention that I followed the steps from the tutorial and the csv created from the boris file is in another fomat so i made him fit to the example in the tutorial. this is the original export csv from the BORIS file without changes:\r\n[Converted20220207_0000-2359_Cam02 Pre CM, P1 (102.1 & 102.2).csv](https://github.com/sgoldenlab/simba/files/11782245/Converted20220207_0000-2359_Cam02.Pre.CM.P1.102.1.102.2.csv)\r\n", + "created_at": "2023-06-18T17:46:38Z", + "author": "BarakMalul" + }, + { + "body": "Thanks @BarakMalul, I will try with the [Converted20220309_1340-1356_Cam05.csv](https://github.com/sgoldenlab/simba/files/11782242/Converted20220309_1340-1356_Cam05.csv) and see how it goes:\r\n\r\nNOTE, since a couple of weeks, you should work if your BORIS files are **either** in [THIS](https://github.com/sgoldenlab/simba/blob/master/misc/boris_new_example.csv) or [THIS](https://github.com/sgoldenlab/simba/blob/master/misc/boris_example.csv) format.", + "created_at": "2023-06-18T18:00:32Z", + "author": "sronilsson" + }, + { + "body": "Hi @BarakMalul,\r\n\r\nIt seems like all your annotated anogenital sniffing events are `POINT` events. E.g., they do not have a start and an end-time, from the documentation: \r\n\r\n\"image\"\r\n\r\nIn your BORIS file though, all the anogenital sniffing events appears to be POINT events, without starts and stops:\r\n\r\n\"image\"\r\n\r\nI will insert better error msg so this is clearer.\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n", + "created_at": "2023-06-18T18:46:32Z", + "author": "sronilsson" + }, + { + "body": "It's worked! Thank you so much!\r\nis there a simple way to take into the training of the model the point which is not make a difference in time between every two point and lable \"start\" and \"point\"?", + "created_at": "2023-06-18T19:57:29Z", + "author": "BarakMalul" + }, + { + "body": "\r\nand another issue if it's possible - this error poped when start training the model:\r\n\r\n \r\nFile \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\", line 361, in \r\n button_trainmachinemodel = Button(label_trainmachinemodel,text='TRAIN SINGLE MODEL (GLOBAL ENVIRONMENT)',fg='blue',command = lambda: threading.Thread(target=self.train_single_model(config_path=self.config_path)).start())\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\", line 577, in train_single_model\r\n model_trainer.save_model()\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\model\\train_rf.py\", line 224, in save_model\r\n self.save_rf_model(self.rf_clf, self.clf_name, self.model_dir_out)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\mixins\\train_model_mixin.py\", line 707, in save_rf_model\r\n pickle.dump(rf_clf, open(save_path, 'wb'))\r\nFileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\Barak Malul\\\\Documents\\\\firstry\\\\project_folder\\\\models\\\\generated_models\\\\Wrestling/ Rough play.sav'", + "created_at": "2023-06-18T19:59:31Z", + "author": "BarakMalul" + }, + { + "body": "Ah is the classifier called `Wrestling/ Rough play` ? If so, I think SimBA is having trouble with the final '/' in the path, it is trying to save the classifier named ` Rough play` in the directory `Wrestling`. That directory doesn't exist as it is part of the classifier name.", + "created_at": "2023-06-18T20:05:46Z", + "author": "sronilsson" + }, + { + "body": "```\r\nIt's worked! Thank you so much!\r\nis there a simple way to take into the training of the model the point which is not make a difference in time between every two point and lable \"start\" and \"point\"\r\n```\r\n\r\nI'm not sure I understand complete: do you want to convert every other `anogenital sniffing` POINT to START and STOP? I don't have any code for that in SimBA...", + "created_at": "2023-06-18T20:08:34Z", + "author": "sronilsson" + }, + { + "body": "> Ah is the classifier called `Wrestling/ Rough play` ?\r\n\r\nYes, I changed the classifier to Wrestling/ Rough play because it is describe behaviour that have start and end point in contrast to anogenital sniffing. ", + "created_at": "2023-06-18T20:13:15Z", + "author": "BarakMalul" + }, + { + "body": "> ```\r\n> It's worked! Thank you so much!\r\n> is there a simple way to take into the training of the model the point which is not make a difference in time between every two point and lable \"start\" and \"point\"\r\n> ```\r\n> \r\n> I'm not sure I understand complete: do you want to convert every other `anogenital sniffing` POINT to START and STOP? I don't have any code for that in SimBA...\r\n\r\nYes, ok I'll do it manually :) ", + "created_at": "2023-06-18T20:13:51Z", + "author": "BarakMalul" + }, + { + "body": "Sorry, updated the answer above while you where typing:\r\n\r\n**I think SimBA is having trouble with the final '/' in the path, it is trying to save the classifier named ` Rough play` in the directory Wrestling. That directory doesn't exist as it is part of the classifier name**\r\n\r\n\r\nI will insert a warning about that too", + "created_at": "2023-06-18T20:16:05Z", + "author": "sronilsson" + }, + { + "body": "It seems like it indeed was the problem. thank you so much for the detailed and supportive assistance. appriciate it!", + "created_at": "2023-06-18T20:39:13Z", + "author": "BarakMalul" + } + ] + }, + { + "title": "\"Visualise pose-estimation\" tool not working", + "body": "Dear SimBA team,\r\n\r\nI am trying to use a tool \"Visualise pose-estimation\", but it opens an empty window (screenshot below) and gives me the following error:\r\n\r\n`Exception in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Alisa\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\Alisa\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\ui\\pop_ups\\visualize_pose_in_dir_pop_up.py\", line 18, in __init__\r\n self.circle_size = Entry_Box(settings_frame, 'Circle size', 0, validation='numeric', labelwidth=20)\r\nTypeError: __init__() got multiple values for argument 'labelwidth'`\r\n\r\n\"image\"\r\n\r\nI would greatly appreciate your help!\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 11 Pro\r\n - Python Version 3.6.13\r\n - Are you using anaconda? yes\r\n \r\n", + "user": "alisabak", + "reaction_cnt": 0, + "created_at": "2023-06-15T14:54:27Z", + "updated_at": "2023-06-20T07:57:53Z", + "author": "alisabak", + "comments": [ + { + "body": "Thank you @alisabak for reporting, we appreciate it! If you upgrade simba to version `1.63.0` with `pip install simba-uw-tf-dev --upgrade` or `pip install simba-uw-tf-dev==1.63.0`, how does it look on your end? ", + "created_at": "2023-06-15T15:47:43Z", + "author": "sronilsson" + }, + { + "body": "Thank you for such a quick fix! It works now.\r\n\r\nWould it be possible to visualize tracking for individual videos? I takes a lot of time to generate videos for the whole project.", + "created_at": "2023-06-15T17:22:00Z", + "author": "alisabak" + }, + { + "body": "Thanks @alisabak - if you temporarily move some files out of the input directory and only keep the ones you want to visualize in the input folder, you will only get some videos. \r\n\r\nBut yes, looking at this method, it looks a little dated and slow, no options to multi-process... I will see if I can update it and let you. ", + "created_at": "2023-06-15T17:37:42Z", + "author": "sronilsson" + }, + { + "body": "If you update SimBA again you should see this: \r\n\r\n\"image\"\r\n\r\nYou still can't visualize a single video in this method (it has to be a directory). But: if you select a higher number of cores in the `Core count` dropdown it will go much faster (follow progress in main OS terminal). \r\n\r\nSometimes users just want to check if the pose-estimation looks good following smoothing/interpolation using this method, and they may not need the entire video. If that's the case, use the `Video output sample sizes (s)` dropdown. Here, if you select e.g., 100, then only the first 100s of each video will be created and things will be a little faster.. Let us know if somethings missing Thanks again\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-06-15T20:18:07Z", + "author": "sronilsson" + }, + { + "body": "Thank you @sronilsson, that is exactly what I needed!", + "created_at": "2023-06-20T07:57:53Z", + "author": "alisabak" + } + ] + }, + { + "title": "Behavioral Classifier for Flies: How to Label Data", + "body": "Hello everyone,\r\n\r\nI am working on creating behavioral classifiers for flies. My videos contain around 9 to 10 flies in each frame. When labeling data, we label the whole frame. However, I am wondering if the model will learn individually based on each fly's behavior, or if it will simply learn the whole frame. For example, if some flies are grooming, some are touching, and some are chasing, will the model be able to differentiate between which fly is doing which behavior?\r\n\r\nThe only option during labeling annotations is to select the frame and select the classifier. Will this work in my case, where there are 9 to 10 animals in each frame and each animal is behaving differently?\r\n\r\nThank you for your help!", + "user": "sahilsingh2402", + "reaction_cnt": 0, + "created_at": "2023-06-15T08:15:00Z", + "updated_at": "2023-09-10T19:54:42Z", + "author": "sahilsingh2402", + "comments": [ + { + "body": "Hi @sahilsingh2402 !\r\n\r\nI should make this clearer in the docs; SimBA labelling GUI does not handle this use well with more than 2 animals and classifier directionalities (animal 1 chasing animal 3 vs. animal 2 chasing animal 5 etc…) and also when there are many animals and classifications should be assigned to an individual (animal 1 grooming vs animal 2 grooming etc…). The number of classification permutations quickly blows up. I don’t have a solution inside SimBA sorry, but can tell you about how I’ve solved this in the past...\r\n\r\nFor classifiers that only involve a single animal (e.g. grooming), then I've (i) filtered the pose-estimation data to only contain a single animal, (ii) annotate that data for when the animal is grooming, (iii) featurize the data and create a classifier. Then when scoring, loop over the data for each individual animal (so only data for a single animal is looked at in any one iteration) and run inference and you get as many classification vectors as there are animals… `Animal_1_grooming`, `Animal_2_grooming` etc….\r\n\r\nFor classifiers that involve 2 animals (e.g. chasing) it’s the same logic, (i) filter the pose-estimation to only contain two animals, (ii) annotate only when eg. Animal 1 chases Animal 2 (**not** when animal 2 chases animal 1), and ((iii) featurize the data and create a classifier. Then when scoring, loop over all possible permutations of two-animal data (Animal 1 and Animal 2, Animal 2 and Animal 1 etc…) when running inference to get as many classification vectors as there are 2-way permutations (`Animal_1_chases_Animal_2`, `Animal_2_chases_Animal_1`…). This won’t work when animals are very different to one another though (it won’t work if one fly is very different in behavior and size etc from another fly). \r\n\r\nAnd watch out, because with a single classifier, and 10 animals, there would be 90 (I think?) different 2-animal chasing scores.", + "created_at": "2023-06-15T14:23:28Z", + "author": "sronilsson" + }, + { + "body": "So, it won't work for me :(\r\nI am thinking of making individual classifiers for each behaviour, can you suggest some machine learning approach which would keep track of past frames as well during the prediction.\r\nThanks ", + "created_at": "2023-06-16T06:18:45Z", + "author": "sahilsingh2402" + }, + { + "body": "I'm not sure I understand completely, but there are many are many ways to do it.. most accessable is probably pandas.rolling, or, you could use a python deque(), or loop over time windows in numba decerotated method which is usually pretty quick. ", + "created_at": "2023-06-16T20:57:27Z", + "author": "sronilsson" + }, + { + "body": "Hi @sahilsingh2402 - one possible way to get around this issue: you could annotate the behaviors in BORIS. BORIS has a setting for specifying which subjects are performing the annotated behaviors (which SimBA doesn't have). Those subject names can then concatenated with the behavior names and imported into SimBA as separate classifiers. E.g., in BORIS you can annotate an instance of \"animal1\" and \"animal2\" performing \"chasing\", and in SimBA that is interpreted as an instance of \"animal1_animal2_chasing\" classifier annotation, I recently discussed this with somebody on gitter[HERE](https://matrix.to/#/!afKEsAvrtNfxHHEeIJ:gitter.im/$vMyO_w-ObWfFS2nzh7C55ODNhUkRtPxO2rnmZ8H-4ow?via=gitter.im&via=matrix.org&via=matrix.freyachat.eu) and realised this could be relevant for you.", + "created_at": "2023-09-10T19:54:42Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Files are not created.", + "body": "**Describe the bug**\r\nFiles are not created despite the message seems valid\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nwhen appending boris csv files that are seems to be perfectly matched with the format of a project that in tests folder in your repository, the next text appears but no files created at all:\r\n\r\n\"Processing BORIS for 0 file(s)...\r\nFound 7 annotated behaviors in 16 files with C:/Users/Barak Malul/OneDrive/Documents/boris annotiations/MyDesk/Ready directory\r\nThe following behavior annotations where detected in the boris directory:\r\nstart recording\r\nfull circle\r\non restrainer - half\r\nhead sync\r\nhalf circle\r\nstop recording\r\non restrainer - full\r\nSIMBA COMPLETE: BORIS annotations appended to dataset and saved in project_folder/csv/targets_inserted directory (elapsed time: 0.2269s) 🚀\"\r\n\r\nbut no files created following to it at all.\r\nalso happening when pressing \"skip outline correction\" - everything seems to be valid but no files created\r\n\r\n**Expected behavior**\r\nfiles will be created as requested \r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows\r\n - Python 3.6.13\r\n - using anaconda\r\n \r\n\r\n**Additional context**\r\n\"in addition when trying to press \"label behaviour\" and \"select video\", then adding new video the next error pop up:\r\nsimba error: c:/users/barak malul/onedrive/documents/new folder\\project1\\project_folder\\logs\\video_info.csv is not a valid file path 🚨\"\r\n", + "user": "BarakMalul", + "reaction_cnt": 0, + "created_at": "2023-06-10T15:39:01Z", + "updated_at": "2023-06-12T14:01:19Z", + "author": "BarakMalul", + "comments": [ + { + "body": "Hi @BurakMulal - thanks for reporting! \r\n\r\nThis suggest that although SimBA finds your BORIS files and your annotated behaviors inside of them, the first part of the message\t `Processing BORIS for 0 file(s)…` suggests that SimBA doesn’t find any files to append these annotations to. If you check inside your `project_folder/csv/features_extracted` directory, do you have any files representing your videos there? To create these files, click to extract features before appending your annotations using this menu as documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/tutorial.md#step-5-extract-features)\r\n\r\n\"image\"\r\n\r\n\r\nThe second issue, with the missing `video_info.csv` file: This file is created when you use this menu below as documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/tutorial.md#step-3-set-video-parameters). It stores, among other things, the pixels per millimeter of each video so we can standardize movements and velocities across videos with slightly shifted camera locations. \r\n\r\n\"image\"\r\n\r\n", + "created_at": "2023-06-10T16:42:15Z", + "author": "sronilsson" + }, + { + "body": "Thank you for the quick support!\r\n\r\nregerd to the first issue, when i press \"extract featurs\" i get in response:\r\n \"SIMBA COMPLETE: Loaded project C:/Users/Barak Malul/OneDrive/Documents/empathy/project_folder/project_config.ini 🚀\r\nPose-estimation body part setting for feature extraction: 2 animals 20 body-parts\"\r\n\r\nand in the cmd i get: \r\n\"(simba-test) C:\\Users\\Barak Malul>simba\r\nException in thread Thread-1:\r\nTraceback (most recent call last):\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"C:\\ProgramData\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\", line 681, in run_feature_extraction\r\n feature_extractor = feature_extractor_classes[self.pose_setting](config_path=self.config_path)\r\nKeyError: '20'\"\r\n\r\nand regard to your question, in project_folder/csv/features_extracted directory there are no files created at all.\r\n\r\n\r\nregard to the second issue, indeed i created a csv file by that screen named video_info but i keep getting the same error.\r\n", + "created_at": "2023-06-10T17:42:51Z", + "author": "BarakMalul" + }, + { + "body": "Thanks @BarakMalul - I will insert some better error msgs here..\r\n\r\nThis error comes from SimBA looking in your project_config.ini under section `[create ensemble settings]` and option `[pose_estimation_body_parts]` and it looks like it finds the value `20`. Not sure how this happened but `20` is not a valid entry. It's matching this entry to a class using [THIS](https://github.com/sgoldenlab/simba/blob/6f0067558bc85db79d1d841924189ff4aa693e06/simba/utils/lookups.py#L66) dictionary and you see `20` is not a valid key. In your case [create ensemble settings][pose_estimation_body_parts] should read `user_defined` as below (if you have 2 animals and 20 body-parts).\r\n\r\n![image](https://github.com/sgoldenlab/simba/assets/34761092/ab936dd1-e538-479c-8ce3-d03f10092284)\r\n\r\n\r\nFor the missing `c:/users/barak malul/onedrive/documents/new folder\\project1\\project_folder\\logs\\video_info.csv` file, can you confirm if it exists in the project or not in your project? It should be created when you click the **SAVE DATA** button in the last screengrab in my prior reply at the top left of the window. \r\n\r\n\r\n\r\n", + "created_at": "2023-06-10T18:48:34Z", + "author": "sronilsson" + }, + { + "body": "1. It is looks like when I redefined pose_estimation_body_parts = user_defined, the problem solved, and the extracting frame move forward to another problem:\r\n\"SIMBA VIDEO PARAMETERS FILE ERROR: SimBA could not find CollectedData_chen in the video_info.csv file. Make sure all videos analyzed are represented in the project_folder/logs/video_info.csv file. 🚨\"\r\n\r\n\"CollectedData_chen\" include only the parameter and the data from DLC project in the desired format as I know, and not info about the video at all, so It's hard for me to understand the essence of the error.\r\n\r\n2. For the missing c:/users/barak malul/onedrive/documents/new folder\\project1\\project_folder\\logs\\video_info.csv file - it is exist in this url adress, but the error below keep showing up:\r\n\"simba error: c:/users/barak malul/onedrive/documents/new folder\\project1\\project_folder\\logs\\video_info.csv is not a valid file path 🚨\"\"\r\n\r\n", + "created_at": "2023-06-11T15:46:06Z", + "author": "BarakMalul" + }, + { + "body": "1. It is looks like when I redefined pose_estimation_body_parts = user_defined, the problem solved, and the extracting frame move forward to another problem:\r\n\"SIMBA VIDEO PARAMETERS FILE ERROR: SimBA could not find CollectedData_chen in the video_info.csv file. Make sure all videos analyzed are represented in the project_folder/logs/video_info.csv file. 🚨\"\r\n\"CollectedData_chen\" include only the parameter and the data from DLC project in the desired format as I know, and not info about the video at all, so It's hard for me to understand the essence of the error.\r\n\r\n**When you click to extract features, SimBA trawls all the CSVs inside your `project_folder/csv/outlier_corrected_movement_location` folder. It finds a CSV file called `CollectedData_chen`. It then looks inside the `project_folder/logs/video_info.csv`, in the left-most column named video, for a row entry called `CollectedData_chen` to grab your registered pixels-per-millimeter for this video. It then fails, because in your `project_folder/logs/video_info.csv` file, there is no video registered as CollectedData_chen. Just a note: I think the `CollectedData` is a prefix for DLC files holding human-annotations, not pose-estimation predictions as expected by SimBA? I may be wrong though..**\r\n\r\n\r\n2. For the missing c:/users/barak malul/onedrive/documents/new folder\\project1\\project_folder\\logs\\video_info.csv file - it is exist in this url adress, but the error below keep showing up:
\"simba error: c:/users/barak malul/onedrive/documents/new folder\\project1\\project_folder\\logs\\video_info.csv is not a valid file path 🚨\"\"\r\n\r\n**Which version of SimBA are you running? Remind me - this error happens when you click to skip outlier correction? The error in (1) suggests that the file is there.**", + "created_at": "2023-06-11T18:19:04Z", + "author": "sronilsson" + }, + { + "body": "1. from your answer I understand that I might be using wrond the DLC files. I took this file (CollectedData_chen.csv) from training-datasets folder in DLC project, then made it in the same fromat as in the repository:(tests/data/test_projects/mouse_open_field/project_folder/csv/input_csv), but I indeed didn't have any predictive values, because DLC didn't create them, so I kept the values empty. It might be the problem?\r\n\r\n2.I'm using SimBA v.1.60.1 , when I click to skip outlier correction there is no error message at all", + "created_at": "2023-06-11T19:25:13Z", + "author": "BarakMalul" + }, + { + "body": "Hi @BarakMalul - if the probability p columns are all left empty, I think SimBA will all force them to zero... but there might be other issues. Typically you'd want to import a file with predictions, from DLC they have one of these substrings in the file-names often: “DLC_” , “DeepCut”, \"dlc_resnet50\", \"dlc_resnet_50\", \"dlc_dlcrnetms5\", \"dlc_effnet_b0\", \"dlc_resnet101\". ", + "created_at": "2023-06-11T20:01:57Z", + "author": "sronilsson" + }, + { + "body": "I'll try to make it more detailed so w'll succeed in solving it.I trained Model at DLC for pose estimation. in addition I have annotiations on totally different videos from BORIS. now I want to use both of them to create calssifier on the specific behaviours annotiated in BORIS files. what should I watch carfully to avoid the mistake that bloking my from succeed in training the classifiers?\r\n\r\nFew question that I have: Which of the videos should I load to the project(those which relate to boris or DLC project)?\r\nwhich files of DLC should I load? the config? what else?\r\n\r\nThank you!\r\n", + "created_at": "2023-06-12T07:09:50Z", + "author": "BarakMalul" + }, + { + "body": "Hi @BarakMalul - your BORIS annotations and pose-estimation data has to come from the same videos, there has to be a relationship with the tracking data and your annotated behaviors otherwise SimBA and the ML classifiers can't find correlations. ", + "created_at": "2023-06-12T13:33:09Z", + "author": "sronilsson" + }, + { + "body": "So the labled data & videos which were inserted to the DLC model have to be identical as the boris annotiations?\r\nor maybe if I analyze the videos which the annotiations in BORIS are based on through the DLC trained model and take the output it will work?", + "created_at": "2023-06-12T13:41:35Z", + "author": "BarakMalul" + }, + { + "body": "Yes the BORIS annotated videos have to go through the DLC trained model, you don't have to label body-parts key-points in the BORIS annotated videos. ", + "created_at": "2023-06-12T13:44:50Z", + "author": "sronilsson" + }, + { + "body": "Just to make sure I understand you - I should insert the videos that the BORIS annotiations was made on to the DLC model i trained in order to to analyze them > then take the analyzed data per each video (the csv or h5 file?) and add it to the SimBA project. is it correct?", + "created_at": "2023-06-12T13:53:07Z", + "author": "BarakMalul" + }, + { + "body": "Yes, that is correct!", + "created_at": "2023-06-12T13:54:37Z", + "author": "sronilsson" + }, + { + "body": "Thank you so much for the detailed explanations and the willing to help. I appriciate it!", + "created_at": "2023-06-12T14:01:00Z", + "author": "BarakMalul" + } + ] + }, + { + "title": "Cannot install on Apple Silicon M1", + "body": "**Describe the bug**\r\nCan't install python 3.6.0 or simba into anaconda environment\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create new anaconda environment for simba\r\n2. Attempt to conda install python=3.6.0\r\n\r\n**Expected behavior**\r\nPython should install to allow me to install simba.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS Ventura 13.3.1 (a)\r\n - Python Version: 3.6.0\r\n - Are you using anaconda? Yes\r\n \r\n\r\n**Additional context**\r\nError log\r\n`(base) baran@dhcp-10-102-45-150 ~ % conda activate simba\r\n(simba) baran@dhcp-10-102-45-150 ~ % pip install simba-uw-tf-dev\r\nzsh: command not found: pip\r\n(simba) baran@dhcp-10-102-45-150 ~ % conda install python=3.6.0\r\nCollecting package metadata (current_repodata.json): done\r\nSolving environment: failed with initial frozen solve. Retrying with flexible solve.\r\nCollecting package metadata (repodata.json): done\r\nSolving environment: failed with initial frozen solve. Retrying with flexible solve.\r\n\r\nPackagesNotFoundError: The following packages are not available from current channels:\r\n\r\n - python=3.6.0\r\n\r\nCurrent channels:\r\n\r\n - https://repo.anaconda.com/pkgs/main/osx-64\r\n - https://repo.anaconda.com/pkgs/main/noarch\r\n - https://repo.anaconda.com/pkgs/main/osx-arm64\r\n - https://repo.anaconda.com/pkgs/free/osx-arm64\r\n - https://repo.anaconda.com/pkgs/free/noarch\r\n - https://repo.anaconda.com/pkgs/r/osx-arm64\r\n - https://repo.anaconda.com/pkgs/r/noarch\r\n\r\nTo search for alternate channels that may provide the conda package you're\r\nlooking for, navigate to\r\n\r\n https://anaconda.org\r\n\r\nand use the search bar at the top of the page.`\r\n\r\n## Solutions I have tried (with errors nested)\r\n
conda install python=3.6 –––– while this allowed me to install python 3.6.13, this gave another error\r\n\r\n(WARNING: Retrying (Retry(total=0, connect=None, read=None, redirect=None, status=None)) after connection broken by 'SSLError(\"Can't connect to HTTPS URL because the SSL module is not available.\",)': /simple/simba-uw-tf-dev/\r\nCould not fetch URL https://pypi.org/simple/simba-uw-tf-dev/: There was a problem confirming the ssl certificate: HTTPSConnectionPool(host='pypi.org', port=443): Max retries exceeded with url: /simple/simba-uw-tf-dev/ (Caused by SSLError(\"Can't connect to HTTPS URL because the SSL module is not available.\",)) - skipping\r\nERROR: Could not find a version that satisfies the requirement simba-uw-tf-dev (from versions: none)\r\nERROR: No matching distribution found for simba-uw-tf-dev)\r\n\r\n
\r\n\r\n
conda install python=3.6.2 –––– I found this can bypass the SSL error from above and allow me to run pip install simba-uw-tf-dev along with the rest of the MacOS installation tutorial. But, attempting to run simba gives a massive error code \r\n\r\nTraceback (most recent call last):\r\n File \"/Users/baran/anaconda3/envs/simba/bin/simba\", line 5, in \r\n from simba.SimBA import main\r\n File \"/Users/baran/anaconda3/envs/simba/lib/python3.6/site-packages/simba/SimBA.py\", line 8, in \r\n from PIL import ImageTk\r\n File \"/Users/baran/anaconda3/envs/simba/lib/python3.6/site-packages/PIL/ImageTk.py\", line 31, in \r\n from . import Image\r\n File \"/Users/baran/anaconda3/envs/simba/lib/python3.6/site-packages/PIL/Image.py\", line 114, in \r\n from . import _imaging as core\r\nImportError: dlopen(/Users/baran/anaconda3/envs/simba/lib/python3.6/site-packages/PIL/_imaging.cpython-36m-darwin.so, 0x0002): Library not loaded: @rpath/libtiff.5.dylib\r\n Referenced from: /Users/baran/anaconda3/envs/simba/lib/python3.6/site-packages/PIL/_imaging.cpython-36m-darwin.so\r\n Reason: tried: '/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/System/Volumes/Preboot/Cryptexes/OS/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (no such file), '/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/System/Volumes/Preboot/Cryptexes/OS/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (no such file), '/Users/baran/anaconda3/envs/simba/lib/python3.6/site-packages/PIL/../../../libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/System/Volumes/Preboot/Cryptexes/OS/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (no such file), '/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/System/Volumes/Preboot/Cryptexes/OS/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (no such file), '/Users/baran/anaconda3/envs/simba/lib/python3.6/site-packages/PIL/../../../libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/System/Volumes/Preboot/Cryptexes/OS/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (no such file), '/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/System/Volumes/Preboot/Cryptexes/OS/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (no such file), '/Users/baran/anaconda3/envs/simba/bin/../lib/libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/System/Volumes/Preboot/Cryptexes/OS/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (no such file), '/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/System/Volumes/Preboot/Cryptexes/OS/Users/baran/anaconda3/envs/simba/lib/libtiff.5.dylib' (no such file), '/Users/baran/anaconda3/envs/simba/bin/../lib/libtiff.5.dylib' (mach-o file, but is an incompatible architecture (have 'arm64', need 'x86_64')), '/usr/local/lib/libtiff.5.dylib' (no such file), '/usr/lib/libtiff.5.dylib' (no such file, not in dyld cache)\r\n\r\n
\r\n\r\n
Will add as I attempt more fixes\r\n", + "user": "bdy2530", + "reaction_cnt": 0, + "created_at": "2023-05-25T20:32:34Z", + "updated_at": "2024-03-01T02:08:29Z", + "author": "bdy2530", + "comments": [ + { + "body": "Hello @bdy2530 ! Sorry for delay, I missed this one. \r\n\r\nIt seems something may have gone astray when the conda environment was created: pip and python should come with it.\r\n\r\nTry:\r\n(i) `conda create -n simba_env python=3.6`\r\n(ii) `conda activate simba_env`\r\n(iii) `pip install simba-uw-tf-dev`\r\n(iv) On my mac, I sometimes have to reinstall matplotlib through conda: `pip uninstall matplotlib` followed by `conda install matplotlib`\r\n(iv) and to launch, type `simba`\r\n\r\nLet me know how it goes!\r\n\r\n\r\n", + "created_at": "2023-05-29T21:28:25Z", + "author": "sronilsson" + }, + { + "body": "Another way would be to try the [docker image](https://login.docker.com/u/login/identifier?state=hKFo2SBabEtMSjBDeWZhODJ2MlNDSGw3ZUp3THJaQnRzUEhjcaFur3VuaXZlcnNhbC1sb2dpbqN0aWTZIFdPWHMzZ1V1akNpcWRCY3FnRFRvVnZ6WjdoLWFzREZso2NpZNkgbHZlOUdHbDhKdFNVcm5lUTFFVnVDMGxiakhkaTluYjk), but the drawback there is that I have not setup any ci cd to auto update to the docker image with each pip/github update, so docker updates drags behind for now.", + "created_at": "2023-08-08T12:39:33Z", + "author": "sronilsson" + }, + { + "body": "Hi!\r\nSorry to reopen this issue but I am dealing with the same situation:\r\nThis is my error message after I tried \"conda create -n simba_env python=3.6\"\r\n\r\n", + "created_at": "2023-12-13T13:31:56Z", + "author": "Siriz23" + }, + { + "body": "Hi @Siriz23 no problem - how does it go when you try in a python3.9 environment? \r\n", + "created_at": "2023-12-13T13:43:48Z", + "author": "sronilsson" + }, + { + "body": "Unfortunately it is worse because I had so many issues with different dependencies such as numpy, cython, matplotlib. \r\nAll of them with different timing.\r\nI am going to try again, starting from the scratch:\r\nError1 -->\r\n\"Screenshot\r\n\r\n \r\n I've uninstalled and installed Shapely.\r\n\r\nError2--> \r\n\"Screenshot\r\n\r\nAnd here I am completely stuck\r\n", + "created_at": "2023-12-13T15:10:48Z", + "author": "Siriz23" + }, + { + "body": "Got it someone yesterday-ish raised this on their M1 on Gitter, never really was solved: https://matrix.to/#/!afKEsAvrtNfxHHEeIJ:gitter.im/$77Tddm33m4MkWcWHGa4zoU2LIQFC0_RO603oivCjMj4?via=gitter.im&via=matrix.org&via=matrix.freyachat.eu\r\n\r\nIt seems to be using a cache, if you install simba with the `--no-cache-dir` flag how does it look? Sorry I can't troubleshoot this as I don't have access to the M1..", + "created_at": "2023-12-13T15:18:25Z", + "author": "sronilsson" + }, + { + "body": "Thank you for your feedback! I'll give it another shot tomorrow. By the way, I'm curious if it's possible to install it on a MacBook with an M1 chip🥲", + "created_at": "2023-12-13T16:41:25Z", + "author": "Siriz23" + }, + { + "body": "> Thank you for your feedback! I'll give it another shot tomorrow. By the way, I'm curious if it's possible to install it on a MacBook with an M1 chip🥲\r\n\r\nWere you able to figure this problem out? ", + "created_at": "2024-03-01T01:15:33Z", + "author": "ligeralde" + }, + { + "body": "Hi @ligeralde! What error are you seeing, is it the python 3.6 error? ", + "created_at": "2024-03-01T01:32:55Z", + "author": "sronilsson" + }, + { + "body": "> Hi @ligeralde! What error are you seeing, is it the python 3.6 error?\r\n\r\nWhen i run simba, i get `AttributeError: 'version_info' object has no attribute '__version__'`", + "created_at": "2024-03-01T01:37:26Z", + "author": "ligeralde" + }, + { + "body": "> > Hi @ligeralde! What error are you seeing, is it the python 3.6 error?\r\n> \r\n> When i run simba, i get `AttributeError: 'version_info' object has no attribute '__version__'`\r\n\r\nthis may be a problem with my environment ", + "created_at": "2024-03-01T01:37:44Z", + "author": "ligeralde" + }, + { + "body": "Interesting, do you have a full traceback to see which lines are causing it? \r\n\r\nAlso which version of Simba do you see with `pip show simba-uw-tf-dev` ?", + "created_at": "2024-03-01T01:41:25Z", + "author": "sronilsson" + }, + { + "body": "> Interesting, do you have a full traceback to see which lines are causing it?\r\n> \r\n> Also which version of Simba do you see with `pip show simba-uw-tf-dev` ?\r\nv1.86.6\r\n![image](https://github.com/sgoldenlab/simba/assets/43016748/7aa3fba5-d9f7-4492-8a46-3feba013cd2e)\r\n", + "created_at": "2024-03-01T01:46:58Z", + "author": "ligeralde" + }, + { + "body": "There is a suggested fix here: https://stackoverflow.com/a/70061207\r\n\r\nTry `pip install pyparsing==2.4.7` and then try to launch Simba, how does it look?\r\n\r\n", + "created_at": "2024-03-01T01:53:23Z", + "author": "sronilsson" + }, + { + "body": "> There is a suggested fix here: https://stackoverflow.com/a/70061207\r\n> \r\n> Try `pip install pyparsing==2.4.7` and then try to launch Simba, how does it look?\r\n\r\nThanks, this is a step in the right direction! I still get this warning, not sure if it's a big deal \r\n![image](https://github.com/sgoldenlab/simba/assets/43016748/e4cfeb28-c6fb-4ff9-86b5-568438e108f1)\r\n", + "created_at": "2024-03-01T02:01:59Z", + "author": "ligeralde" + }, + { + "body": "I'm not sure, have not seen it before? If it's just a warning I wouldn't worry for now. If you get any errors related to plotting and matplotlib let me know here and we can dig into it as we'd hopefully receive a more informative error msg ", + "created_at": "2024-03-01T02:08:28Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Add compatibility with newer BORIS annotation format", + "body": "In recent BORIS versions (v8+?), the export of behavioral events in a tabular format used as annotations files in SimBA has changed. This commit thus enables SimBA to import these annotations files in a new format, while maintaining compatibility with the now-older format. I'm uncertain as to when exactly the format changed, but exporting the list of events as a tabular file in BORIS now yields a nice data frame without having to skip lines:\r\n\r\n|Observation id|Observation date |Description|Observation duration|Observation type|Source |Media duration (s)|FPS |Subject|Behavior |Behavioral category|Behavior type|Time |Media file name |Image index|Image file path|Comment|\r\n|--------------|-------------------|-----------|--------------------|----------------|------------------------------------|------------------|-----|-------|---------|-------------------|-------------|-------|--------------------------|-----------|---------------|-------|\r\n|testobs_new |2023-05-09 17:15:54| |238.739 |Media file(s) |player #⁠1:videos/testExp/Video11.mp4|315.7 |59.94| |behavior1| |START |5.355 |videos/testExp/Video11.mp4|NA |NA | |\r\n|testobs_new |2023-05-09 17:15:54| |238.739 |Media file(s) |player #⁠1:videos/testExp/Video11.mp4|315.7 |59.94| |behavior1| |STOP |9.626 |videos/testExp/Video11.mp4|NA |NA | |\r\n|testobs_new |2023-05-09 17:15:54| |238.739 |Media file(s) |player #⁠1:videos/testExp/Video11.mp4|315.7 |59.94| |behavior2| |START |11.178 |videos/testExp/Video11.mp4|NA |NA | |\r\n|testobs_new |2023-05-09 17:15:54| |238.739 |Media file(s) |player #⁠1:videos/testExp/Video11.mp4|315.7 |59.94| |behavior2| |STOP |11.728 |videos/testExp/Video11.mp4|NA |NA | |\r\n\r\nUnfortunately, this means SimBA cannot directly import these newly-formatted files from BORIS (not without manually editing them, of course). This pull request simply enables SimBA to do just that.\r\n\r\nThis is done by adding a quick version checker seeking for the presence of a column name found only in the new format, followed by the adjustment of expected columns headers to match what SimBA is already expecting. This way, importing a BORIS annotation file that uses the now-older format should still work.\r\n\r\n@sronilsson, I have tested using both importing ways:\r\n- using the \"Import BORIS Annotation (select folder with .csv files)\" button. This does give a csv file with the behaviors appended as the last columns in `project_folder/csv/targets_inserted`. The log in the main SimBA window is:\r\n```\r\nProcessing BORIS for 1 file(s)...\r\nFound 3 annotated behaviors in 1 files with /home/florian/PycharmProjects/Simba/external_data/exports/mixed_observations directory\r\nThe following behavior annotations where detected in the boris directory:\r\nbehavior1\r\nbehavior2\r\nbehavior3\r\nAppending BORIS annotations to Video11 ...\r\nSaved BORIS annotations for video Video11...\r\nSIMBA COMPLETE: BORIS annotations appended to dataset and saved in project_folder/csv/targets_inserted directory (elapsed time: 1.1996s)\r\n```\r\n\r\n- using the (newer?) \"Append third-party annotations\" window. This also gives a csv file with the behaviors appended as the last columns in `project_folder/csv/targets_inserted`. The log in the main SimBA window is:\r\n```\r\nProcessing 1 BORIS file(s)...\r\nReading in 1 BORIS annotation files...\r\nProcessing annotations for Video11 video...\r\nSaved BORIS annotations for video Video11...\r\nSIMBA COMPLETE: BORIS annotations appended to dataset and saved in project_folder/csv/targets_inserted directory (elapsed time: 1.3718s)\r\n```\r\n\r\nI have not tested using an older BORIS format file as I do not have one with matching video and pose tracking files.\r\n\r\nHope this helps,", + "user": "florianduclot", + "reaction_cnt": 0, + "created_at": "2023-05-21T14:21:53Z", + "updated_at": "2023-06-08T22:08:01Z", + "author": "florianduclot", + "comments": [ + { + "body": "Sorry for the terrible display of the table here... The horizontal scrolling seems to work just fine with editing the message and looking at the \"Preview\" tab. Here's a screenshot to circumvent this oddity:\r\n![image](https://github.com/sgoldenlab/simba/assets/17770886/64b22c66-5016-4986-bf00-ac2dcf2c85d4)\r\n", + "created_at": "2023-05-21T14:31:07Z", + "author": "florianduclot" + }, + { + "body": "> A nit: could we make `_is_new_boris_version` a standalone function in `third_party_label_appenders.tools`, so a single function called by both the old and the new BORIS appender methods?\r\n\r\nGlad you bring this up... I wondered about that and forgot to mention it in the PR. I was a bit confused as to whether or not one of the ways to import annotations would be deprecated so I was unsure about using one to refer a function from the other. I'll include your request then!", + "created_at": "2023-05-21T15:06:06Z", + "author": "florianduclot" + }, + { + "body": "Done: 7e3d3ac\r\n\r\nI tested again the same way as described above and it still gives the same results and log outputs.\r\n\r\n", + "created_at": "2023-05-21T15:19:12Z", + "author": "florianduclot" + }, + { + "body": "Hi @florianduclot! You don't happen to have a very small CSV example of the new BORIS format? Something that we can stick in the repo I can link to in the docs as expected input? ", + "created_at": "2023-06-08T17:08:36Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson,\r\nHere's the file I used to test that branch (it's less than 20 Kb but it can of course be cut down further if preferred):\r\n[boris_new_example.csv](https://github.com/sgoldenlab/simba/files/11692719/boris_new_example.csv)\r\n", + "created_at": "2023-06-08T20:23:38Z", + "author": "florianduclot" + }, + { + "body": "Thanks!", + "created_at": "2023-06-08T22:08:01Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Path plots artefeacts", + "body": "Hello,\r\n\r\nAs shown bellow, I'm having an issue when I generate a path-plot (from H5 multi animal project):\r\n\r\n![Simba](https://github.com/sgoldenlab/simba/assets/66886884/b1dbdd60-42f5-4eea-bb8e-049d5e07eed7)\r\n\r\nIs there a way to remove the 0, 0 frames with SimBA? In case I'd like to try to troubleshot this on my own, which CSV data file should I use in the Log folder? Outlier location one?\r\n\r\nFYI: \r\nI did not apply interpolation, but\r\nI applied outlier correction (movement = 1, and movement = 2) \r\nI applied smoother correction (Savitzky Golay = 400)\r\n\r\nThank you !", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2023-05-18T15:02:16Z", + "updated_at": "2023-08-14T14:20:10Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli - \r\n\r\nYes the path plotter grabs the data from the `project_folder/csv/outlier_corrected_movement_location` directory.\r\n\r\nYes - you can remove missing data (0,0) frames using interpolation - choose \"Body-part: Some method\" for this case. You can either (i) interpolate at import, or (ii) interpolate after import using menu below and selecting the `outlier_corrected_movement_location\" directory.\r\n\r\n\"image\"\r\n\r\n\r\nFYI: There are some notebooks etc to create path plots and other stuff outside of GUI now, should you prefer: \r\nhttps://simba-uw-tf-dev.readthedocs.io/en/latest/nb/create_path_plot.html ", + "created_at": "2023-05-18T15:12:51Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson thank you for the help, unfortunately it did not solve the problem, I obtained exactly the same plot ", + "created_at": "2023-05-18T19:22:52Z", + "author": "DorianBattivelli" + }, + { + "body": "Odd! Do you still see (0,0) in the data after running the interpolation and in the data you are plotting?", + "created_at": "2023-05-18T19:38:05Z", + "author": "sronilsson" + }, + { + "body": "Yes I do\r\n![Schermata 2023-05-18 alle 21 39 58](https://github.com/sgoldenlab/simba/assets/66886884/596a983c-da16-4e96-b9cf-cc2d21442d92)\r\n\r\n", + "created_at": "2023-05-18T19:40:31Z", + "author": "DorianBattivelli" + }, + { + "body": "Let me check if I can recreate, did you use the interpolation method in the screengrab avove?", + "created_at": "2023-05-18T19:43:36Z", + "author": "sronilsson" + }, + { + "body": "Yes", + "created_at": "2023-05-18T19:44:21Z", + "author": "DorianBattivelli" + }, + { + "body": "Ive tested a few projects now, and I can't recreate the missing interpolation... :/ Any chance you can share your project? Just the (i) `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` file, (ii) the project_config.ini, and (iii) and the file you are trying to interpolate?", + "created_at": "2023-05-18T20:20:37Z", + "author": "sronilsson" + }, + { + "body": "[Shared.zip](https://github.com/sgoldenlab/simba/files/11511387/Shared.zip)\r\n", + "created_at": "2023-05-18T20:29:44Z", + "author": "DorianBattivelli" + }, + { + "body": "Intresting... the interpolation seems to run fine on my end on your data. Which version of SimBA do you have `pip show simba-uw-tf-dev` ?\r\n\r\n\"image\"\r\n", + "created_at": "2023-05-18T20:37:38Z", + "author": "sronilsson" + }, + { + "body": "1.59.3", + "created_at": "2023-05-18T20:44:39Z", + "author": "DorianBattivelli" + }, + { + "body": "If you upgrade `pip install simba-uw-tf-dev --upgrade` to version `1.59.7` is how does it run on your end?", + "created_at": "2023-05-18T20:53:27Z", + "author": "sronilsson" + }, + { + "body": "Do I need to re run the outlier correction after the interpolation or can I directly jump to path plots?", + "created_at": "2023-05-18T20:59:16Z", + "author": "DorianBattivelli" + }, + { + "body": "You should be able to go straight to path plots.", + "created_at": "2023-05-18T21:01:50Z", + "author": "sronilsson" + }, + { + "body": "If you run outlier correction again your interpolated data might be overwritten again with the original data", + "created_at": "2023-05-18T21:11:37Z", + "author": "sronilsson" + }, + { + "body": ":/\r\nStill the same error\r\n\r\n![Schermata 2023-05-18 alle 23 30 27](https://github.com/sgoldenlab/simba/assets/66886884/8756f6dd-2718-4d3e-b5d6-842ff6e21664)\r\n\r\n\r\n![Schermata 2023-05-18 alle 23 31 39](https://github.com/sgoldenlab/simba/assets/66886884/2058723c-1587-49ce-b5e2-4da65bd827a8)\r\n\r\n", + "created_at": "2023-05-18T21:32:22Z", + "author": "DorianBattivelli" + }, + { + "body": "I mean it seems that the interpolation applied, but the plot still displays artefacts", + "created_at": "2023-05-18T21:34:14Z", + "author": "DorianBattivelli" + }, + { + "body": "Which path plot function are you using? Do you have a screen grab of the button / menu you are using?", + "created_at": "2023-05-18T21:36:58Z", + "author": "sronilsson" + }, + { + "body": "![Schermata 2023-05-18 alle 23 37 38](https://github.com/sgoldenlab/simba/assets/66886884/53eaa00e-3697-4c37-8141-45d237f5b071)\r\n", + "created_at": "2023-05-18T21:38:03Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah I think I can see what is going on, there are some negative values in there for some reason, because smoothing has been run without interpolation I suspect. Let me insert a fix for that, but will be an hour or so before I am back at my computer. ", + "created_at": "2023-05-18T21:41:17Z", + "author": "sronilsson" + }, + { + "body": "Wo let's hope it will fix the problem! Thank you\n\nIl giorno gio 18 mag 2023 alle ore 23:41 Simon Nilsson <\n***@***.***> ha scritto:\n\n> Ah I think I can see what is going on, there are some negative values in\n> there for some reason, because smoothing has been run without interpolation\n> I suspect. Let me insert a fix for that, but will be an hour or so before I\n> am back at my computer.\n>\n> —\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> You are receiving this because you were mentioned.Message ID:\n> ***@***.***>\n>\n", + "created_at": "2023-05-18T21:43:46Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - when you get a chance, try upgrading to version `1.58.9` and let me know how the path plot looks after interpolation!", + "created_at": "2023-05-18T22:21:46Z", + "author": "sronilsson" + }, + { + "body": "I only have access to 1.58.8, is that ok?\r\n", + "created_at": "2023-05-18T22:59:36Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah sorry, my typo - https://pypi.org/project/Simba-UW-tf-dev/1.59.8/ - 1.59.8", + "created_at": "2023-05-18T23:17:11Z", + "author": "sronilsson" + }, + { + "body": "unfortunatly, it is still happening.. \r\n\r\n![Schermata 2023-05-19 alle 01 17 58](https://github.com/sgoldenlab/simba/assets/66886884/5309429b-c75b-4f85-9b67-4062bc53b2be)\r\n", + "created_at": "2023-05-18T23:18:53Z", + "author": "DorianBattivelli" + }, + { + "body": "Yet, it seems that the artefact lines are fewer than before, but maybe there is a solution to clean them better?", + "created_at": "2023-05-18T23:21:31Z", + "author": "DorianBattivelli" + }, + { + "body": "If you import the video again, and do interpolation + smoothing, how does it look like then?", + "created_at": "2023-05-18T23:23:48Z", + "author": "sronilsson" + }, + { + "body": "Still the same. \r\nFor precision, I created a project from scratch, applied interpolation and smoothing, outlier correction, and generate path plot: \r\n![Schermata 2023-05-19 alle 02 00 20](https://github.com/sgoldenlab/simba/assets/66886884/0ccb5928-4b5a-4544-b6d8-76cb3bfb4a70)\r\n", + "created_at": "2023-05-19T00:01:45Z", + "author": "DorianBattivelli" + }, + { + "body": "So any of the body-part coordinate values in the file point to (0,0) or coordinates below 0?", + "created_at": "2023-05-19T00:07:13Z", + "author": "sronilsson" + }, + { + "body": "Yes, to 0, but I did not see negative values. \r\nL column in the \"center\" body part, that I choose for plotting\r\n![Schermata 2023-05-19 alle 02 11 49](https://github.com/sgoldenlab/simba/assets/66886884/b1c3debf-7bcf-4433-8e8f-cbb9652116f0)\r\n", + "created_at": "2023-05-19T00:12:43Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah so it doesn't seem to have interpolated correctly, just to confirm, this screen grab above, comes from a file you have run interpolation on? ", + "created_at": "2023-05-19T00:17:58Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Existing model not making correct predictions on new videos", + "body": "Hello! I am using SimBA to do automated behavioral analysis of nape scratching and previously I trained the left and right nape scratching models on 20 videos, but as a part of my research we would like to see how the model performs on new videos with nape scratching, running the same model on new videos. I noticed that the videos that I labeled behavior on and produced machine results of in the past worked but the 7 new videos output machine result files with no scratches (1s) recorded even though the DLC tracking performed well on those videos and it is clearly visible to the human eye that scratching is occurring. I tried validating the model on a previous video with a 0.70 threshold and 100ms minimum bout length and that looked like it gave good results but when I validate on a single video on one of the new added videos it still does not classify any scratches. From my understanding, you should not have to label behavior if you add new videos to the project but instead go to the Run Machine Model step after you have imported tracking data and videos, set video parameters, ran outlier correction, and extracted features on these new videos. I would like to use existing models for left and right nape scratching but it does not seen to be performing correctly. I was wondering if you had any advice on how to fix this problem and if I am missing something or if there is an error somewhere. I took some screenshots of what my folders look like here: \r\n![Screenshot 2023-05-17 140915](https://github.com/sgoldenlab/simba/assets/99504167/33057423-274c-41fc-a359-fcf062d94674)\r\n![Screenshot 2023-05-17 140954](https://github.com/sgoldenlab/simba/assets/99504167/409eaa88-69d0-48a1-812c-372ec2d59dff)\r\n![Screenshot 2023-05-17 141028](https://github.com/sgoldenlab/simba/assets/99504167/d3bfe5bb-5cc2-4923-b2f7-9d4e8ac13a63)\r\n![Screenshot 2023-05-17 141917](https://github.com/sgoldenlab/simba/assets/99504167/c70c3ed4-4ee6-4582-b95e-cff4a34ecc7b)\r\n\r\n\r\n\r\nThanks so much!\r\n\r\n\r\n\r\n", + "user": "fon215", + "reaction_cnt": 0, + "created_at": "2023-05-17T18:18:28Z", + "updated_at": "2023-05-17T22:31:42Z", + "author": "fon215", + "comments": [ + { + "body": "Hi @fon215! You are correct - the goal is definitly to not label behaviors in new videos and to create classifiers that can score behaviors on videos that you didn’t use for training. \r\n\r\nThe issue seems to be false negatives in the new videos: no scratches are detected. If you lower the discrimination threshold for the new videos, how does it look? Is there anything new you can think of about the new videos relative to training/previous validation videos - e.g., camera angles, recording environment? ", + "created_at": "2023-05-17T19:31:51Z", + "author": "sronilsson" + }, + { + "body": "Changing the discrimination threshold seemed to work! Thanks so much!", + "created_at": "2023-05-17T22:31:42Z", + "author": "fon215" + } + ] + }, + { + "title": "Issue with polygon ROI", + "body": "Hello,\r\n\r\nDid you release an update to solve the issue I faced last week? Unfortunately, I went ahead with another project meantime, and so my H5 file has the same error regarding polygons, can I send it to you?\r\n\r\nThank you, \r\nBest,", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2023-05-14T11:11:00Z", + "updated_at": "2023-05-15T02:19:27Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli! Yes I should have done - send it to me and I will fix it and check the bug against your H5 file again. ", + "created_at": "2023-05-14T13:41:59Z", + "author": "sronilsson" + }, + { + "body": "Also, could you show me a screengrab or copy/paste of the error and I can make sure it is the same error as last week?", + "created_at": "2023-05-14T15:08:04Z", + "author": "sronilsson" + }, + { + "body": "Well after your answer about the update, I updated the program, and run ROI analysis. It looks that no ROI is missing, so I think the fix worked during ROI analysis !\r\n\r\nThank you !", + "created_at": "2023-05-14T15:51:25Z", + "author": "DorianBattivelli" + }, + { + "body": "Thank you @DorianBattivelli - if you see any issues, please let me know though!", + "created_at": "2023-05-14T15:52:46Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Dispersion of body parts when they are hidden when training classifiers", + "body": "I'm using Simba for training classifiers on certain behaviors. When I generate the validation video after running the machine model, I realize that when body parts are occluded by an element of the arena or another mouse, they go all over the place. I was expecting an occluded body part to disappear, as we can decide of a Bp threshold in other parts of the pipeline (ROI analysis for example). You can see a picture below:\r\n\r\n![bp pb](https://github.com/sgoldenlab/simba/assets/124702348/c1427e57-185c-47bf-9488-0da7b5ac67e0)\r\n\r\nIn this picture, a mouse is hidden in a nest located in the bottom right corner of the arena, as you can see, the points for the body parts did not disappear but go all over the place.\r\n\r\nI am afraid this could mess with some classifications of behaviors and was therefore wondering if we could make the body part points disappear when they are occluded based on their detection probability. \r\n\r\nThanks in advance.", + "user": "Lucas97223", + "reaction_cnt": 0, + "created_at": "2023-05-12T13:09:39Z", + "updated_at": "2023-05-12T13:09:39Z", + "author": "Lucas97223", + "comments": [] + }, + { + "title": "Using simba with 4 mice, 8 bodyparts", + "body": "Hi,\r\nNot really an issue but more of a question.\r\nI am planning to use SIMBA to identify behaviors (e.g., chases) in groups of 4 mice. For each mouse, 8 bodyparts have been labeled (according to the labeling scheme that is used in SIMBA). However, since SIMBA is optimized for 2 animals, I guess the features that will be extracted and used for the model (with 4 mice) are much limited compared to the 492 features that can be used for the 2 mice. I was wondering how many features will be used if I use my videos and tracking files with 4 mice, and if it's better that I just do pairwise comparisons instead with all the possible combinations of pairs of mice, so that all features can be used.", + "user": "Sere-98", + "reaction_cnt": 0, + "created_at": "2023-05-10T11:31:37Z", + "updated_at": "2023-05-12T08:32:06Z", + "author": "Sere-98", + "comments": [ + { + "body": "Hey @Sere-98 ! Yes, when using a user-defined pose-config, SimBA will by default calculate (i) the distance between the animals body-parts and other animals body-parts, (i) the aggregated distance moved in rolling windows, (iii) aggregated distances between animals in rolling windows, and some (iv) counts of pose-probability scores scores in different buckets. It’s not a lot in terms of breadth, but it is kept limited as otherwise blows up when someone come with tons of animals / body-parts: not exactly sure how many features you’ll end up with but maybe 500-600? **Default features will likely get you chasing, BUT the tricky part with the use-case is typically directionality: you probably want to know who is chasing who and with 4 individuals you have 12 chasing permutations and it can be too much a chore creating 12 classifiers**. ", + "created_at": "2023-05-11T11:36:37Z", + "author": "sronilsson" + }, + { + "body": "Hi, thank you for your reply!\r\nI'm not sure I understood the problem with directionality. Yoou're saying that would be a problem in case I use my user-defined configuration with 4 mice, right?\r\nSo a solution would be to train on just 2 mice, and then use the videos and associate them to cropped csv files with just 2 mice at a time, which would allow me to detect with just one classifier all chases events.\r\nAt this point, is there a way to extrapolate the information on which mouse is the chaser, and which is the chased? For example, I train to recognize behaviors as chases with both instances in which mouse1 is chasing mouse2, and mouse2 is chasing mouse1. When I analyze my new videos, can I know in which instances labeled as chases mouse1 is the chaser, or mouse2 is the chaser? Or would I need to create 2 different classifiers for that?\r\n", + "created_at": "2023-05-11T14:38:29Z", + "author": "Sere-98" + }, + { + "body": "yes, the potential is not so much about the user-defined configurations, I guess its more about the behavior (chase) having a direction in general: e.g., you annotate frames as containing \"chasing\" the classifier will find chasing. If you annotate frames as contains \"animal 1 chases animal 2\" it will find \"animal 1 chases animal 2\". So you would need two classifiers for two mice doing chasing. As a solution I wrote a method to [\"reverse\" classifiers](https://github.com/sgoldenlab/simba/blob/master/docs/reverse_annotations.md) so people would only have to annotate one directions, and then reverse the classifier. However, last I heard, a couple of users encounterd bugs with these methods in the GUI, and I have not had any time to maintain it. But just as an FYI that it's doable in case you wan't to type something up yourself. ", + "created_at": "2023-05-11T18:07:43Z", + "author": "sronilsson" + }, + { + "body": "I see, thank you for the clarification and the quick reply!\r\nI will try looking into the \"reverse classifier\".\r\n", + "created_at": "2023-05-12T08:32:05Z", + "author": "Sere-98" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 9.3.0 in /docs", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 9.3.0.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

9.3.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/9.3.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

9.3.0 (2022-10-29)

\n
    \n
  • \n

    Limit SAMPLESPERPIXEL to avoid runtime DOS #6700\n[wiredfool]

    \n
  • \n
  • \n

    Initialize libtiff buffer when saving #6699\n[radarhere]

    \n
  • \n
  • \n

    Inline fname2char to fix memory leak #6329\n[nulano]

    \n
  • \n
  • \n

    Fix memory leaks related to text features #6330\n[nulano]

    \n
  • \n
  • \n

    Use double quotes for version check on old CPython on Windows #6695\n[hugovk]

    \n
  • \n
  • \n

    Remove backup implementation of Round for Windows platforms #6693\n[cgohlke]

    \n
  • \n
  • \n

    Fixed set_variation_by_name offset #6445\n[radarhere]

    \n
  • \n
  • \n

    Fix malloc in _imagingft.c:font_setvaraxes #6690\n[cgohlke]

    \n
  • \n
  • \n

    Release Python GIL when converting images using matrix operations #6418\n[hmaarrfk]

    \n
  • \n
  • \n

    Added ExifTags enums #6630\n[radarhere]

    \n
  • \n
  • \n

    Do not modify previous frame when calculating delta in PNG #6683\n[radarhere]

    \n
  • \n
  • \n

    Added support for reading BMP images with RLE4 compression #6674\n[npjg, radarhere]

    \n
  • \n
  • \n

    Decode JPEG compressed BLP1 data in original mode #6678\n[radarhere]

    \n
  • \n
  • \n

    Added GPS TIFF tag info #6661\n[radarhere]

    \n
  • \n
  • \n

    Added conversion between RGB/RGBA/RGBX and LAB #6647\n[radarhere]

    \n
  • \n
  • \n

    Do not attempt normalization if mode is already normal #6644\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • d594f4c Update CHANGES.rst [ci skip]
  • \n
  • 909dc64 9.3.0 version bump
  • \n
  • 1a51ce7 Merge pull request #6699 from hugovk/security-libtiff_buffer
  • \n
  • 2444cdd Merge pull request #6700 from hugovk/security-samples_per_pixel-sec
  • \n
  • 744f455 Added release notes
  • \n
  • 0846bfa Add to release notes
  • \n
  • 799a6a0 Fix linting
  • \n
  • 00b25fd Hide UserWarning in logs
  • \n
  • 05b175e Tighter test case
  • \n
  • 13f2c5a Prevent DOS with large SAMPLESPERPIXEL in Tiff IFD
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=9.3.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-05-09T17:02:29Z", + "updated_at": "2023-05-10T11:36:49Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Looks like pillow is no longer a dependency, so this is no longer needed.", + "created_at": "2023-05-10T11:36:46Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump numpy from 1.18.1 to 1.22.0 in /docs", + "body": "Bumps [numpy](https://github.com/numpy/numpy) from 1.18.1 to 1.22.0.\n
\nRelease notes\n

Sourced from numpy's releases.

\n
\n

v1.22.0

\n

NumPy 1.22.0 Release Notes

\n

NumPy 1.22.0 is a big release featuring the work of 153 contributors\nspread over 609 pull requests. There have been many improvements,\nhighlights are:

\n
    \n
  • Annotations of the main namespace are essentially complete. Upstream\nis a moving target, so there will likely be further improvements,\nbut the major work is done. This is probably the most user visible\nenhancement in this release.
  • \n
  • A preliminary version of the proposed Array-API is provided. This is\na step in creating a standard collection of functions that can be\nused across application such as CuPy and JAX.
  • \n
  • NumPy now has a DLPack backend. DLPack provides a common interchange\nformat for array (tensor) data.
  • \n
  • New methods for quantile, percentile, and related functions. The\nnew methods provide a complete set of the methods commonly found in\nthe literature.
  • \n
  • A new configurable allocator for use by downstream projects.
  • \n
\n

These are in addition to the ongoing work to provide SIMD support for\ncommonly used functions, improvements to F2PY, and better documentation.

\n

The Python versions supported in this release are 3.8-3.10, Python 3.7\nhas been dropped. Note that 32 bit wheels are only provided for Python\n3.8 and 3.9 on Windows, all other wheels are 64 bits on account of\nUbuntu, Fedora, and other Linux distributions dropping 32 bit support.\nAll 64 bit wheels are also linked with 64 bit integer OpenBLAS, which should fix\nthe occasional problems encountered by folks using truly huge arrays.

\n

Expired deprecations

\n

Deprecated numeric style dtype strings have been removed

\n

Using the strings "Bytes0", "Datetime64", "Str0", "Uint32",\nand "Uint64" as a dtype will now raise a TypeError.

\n

(gh-19539)

\n

Expired deprecations for loads, ndfromtxt, and mafromtxt in npyio

\n

numpy.loads was deprecated in v1.15, with the recommendation that\nusers use pickle.loads instead. ndfromtxt and mafromtxt were both\ndeprecated in v1.17 - users should use numpy.genfromtxt instead with\nthe appropriate value for the usemask parameter.

\n

(gh-19615)

\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=numpy&package-manager=pip&previous-version=1.18.1&new-version=1.22.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-05-09T17:02:03Z", + "updated_at": "2023-05-10T01:30:28Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Looks like numpy is no longer a dependency, so this is no longer needed.", + "created_at": "2023-05-10T01:30:25Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump cryptography from 36.0.1 to 39.0.1", + "body": "Bumps [cryptography](https://github.com/pyca/cryptography) from 36.0.1 to 39.0.1.\n
\nChangelog\n

Sourced from cryptography's changelog.

\n
\n

39.0.1 - 2023-02-07

\n
\n* **SECURITY ISSUE** - Fixed a bug where ``Cipher.update_into`` accepted Python\n  buffer protocol objects, but allowed immutable buffers. **CVE-2023-23931**\n* Updated Windows, macOS, and Linux wheels to be compiled with OpenSSL 3.0.8.\n

.. _v39-0-0:

\n

39.0.0 - 2023-01-01\n

\n
    \n
  • BACKWARDS INCOMPATIBLE: Support for OpenSSL 1.1.0 has been removed.\nUsers on older version of OpenSSL will need to upgrade.
  • \n
  • BACKWARDS INCOMPATIBLE: Dropped support for LibreSSL < 3.5. The new\nminimum LibreSSL version is 3.5.0. Going forward our policy is to support\nversions of LibreSSL that are available in versions of OpenBSD that are\nstill receiving security support.
  • \n
  • BACKWARDS INCOMPATIBLE: Removed the encode_point and\nfrom_encoded_point methods on\n:class:~cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicNumbers,\nwhich had been deprecated for several years.\n:meth:~cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey.public_bytes\nand\n:meth:~cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey.from_encoded_point\nshould be used instead.
  • \n
  • BACKWARDS INCOMPATIBLE: Support for using MD5 or SHA1 in\n:class:~cryptography.x509.CertificateBuilder, other X.509 builders, and\nPKCS7 has been removed.
  • \n
  • BACKWARDS INCOMPATIBLE: Dropped support for macOS 10.10 and 10.11, macOS\nusers must upgrade to 10.12 or newer.
  • \n
  • ANNOUNCEMENT: The next version of cryptography (40.0) will change\nthe way we link OpenSSL. This will only impact users who build\ncryptography from source (i.e., not from a wheel), and specify their\nown version of OpenSSL. For those users, the CFLAGS, LDFLAGS,\nINCLUDE, LIB, and CRYPTOGRAPHY_SUPPRESS_LINK_FLAGS environment\nvariables will no longer be respected. Instead, users will need to\nconfigure their builds as documented here_.
  • \n
  • Added support for\n:ref:disabling the legacy provider in OpenSSL 3.0.x<legacy-provider>.
  • \n
  • Added support for disabling RSA key validation checks when loading RSA\nkeys via\n:func:~cryptography.hazmat.primitives.serialization.load_pem_private_key,\n:func:~cryptography.hazmat.primitives.serialization.load_der_private_key,\nand\n:meth:~cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateNumbers.private_key.\nThis speeds up key loading but is :term:unsafe if you are loading potentially\nattacker supplied keys.
  • \n
  • Significantly improved performance for\n:class:~cryptography.hazmat.primitives.ciphers.aead.ChaCha20Poly1305
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=cryptography&package-manager=pip&previous-version=36.0.1&new-version=39.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-05-09T15:27:33Z", + "updated_at": "2023-05-09T15:32:52Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Looks like cryptography is no longer a dependency, so this is no longer needed.", + "created_at": "2023-05-09T15:32:49Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump setuptools from 53.0.0 to 65.5.1", + "body": "Bumps [setuptools](https://github.com/pypa/setuptools) from 53.0.0 to 65.5.1.\n
\nRelease notes\n

Sourced from setuptools's releases.

\n
\n

v65.5.1

\n

No release notes provided.

\n

v65.5.0

\n

No release notes provided.

\n

v65.4.1

\n

No release notes provided.

\n

v65.4.0

\n

No release notes provided.

\n

v65.3.0

\n

No release notes provided.

\n

v65.2.0

\n

No release notes provided.

\n

v65.1.1

\n

No release notes provided.

\n

v65.1.0

\n

No release notes provided.

\n

v65.0.2

\n

No release notes provided.

\n

v65.0.1

\n

No release notes provided.

\n

v65.0.0

\n

No release notes provided.

\n

v64.0.3

\n

No release notes provided.

\n

v64.0.2

\n

No release notes provided.

\n

v64.0.1

\n

No release notes provided.

\n

v64.0.0

\n

No release notes provided.

\n

v63.4.3

\n

No release notes provided.

\n

v63.4.2

\n

No release notes provided.

\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from setuptools's changelog.

\n
\n

v65.5.1

\n

Misc\n^^^^

\n
    \n
  • #3638: Drop a test dependency on the mock package, always use :external+python:py:mod:unittest.mock -- by :user:hroncok
  • \n
  • #3659: Fixed REDoS vector in package_index.
  • \n
\n

v65.5.0

\n

Changes\n^^^^^^^

\n
    \n
  • #3624: Fixed editable install for multi-module/no-package src-layout projects.
  • \n
  • #3626: Minor refactorings to support distutils using stdlib logging module.
  • \n
\n

Documentation changes\n^^^^^^^^^^^^^^^^^^^^^

\n
    \n
  • #3419: Updated the example version numbers to be compliant with PEP-440 on the "Specifying Your Project’s Version" page of the user guide.
  • \n
\n

Misc\n^^^^

\n
    \n
  • #3569: Improved information about conflicting entries in the current working directory\nand editable install (in documentation and as an informational warning).
  • \n
  • #3576: Updated version of validate_pyproject.
  • \n
\n

v65.4.1

\n

Misc\n^^^^

\n
    \n
  • #3613: Fixed encoding errors in expand.StaticModule when system default encoding doesn't match expectations for source files.
  • \n
  • #3617: Merge with pypa/distutils@6852b20 including fix for pypa/distutils#181.
  • \n
\n

v65.4.0

\n

Changes\n^^^^^^^

\n\n

v65.3.0

\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=setuptools&package-manager=pip&previous-version=53.0.0&new-version=65.5.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-05-09T15:27:33Z", + "updated_at": "2023-05-09T15:32:44Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Looks like setuptools is no longer a dependency, so this is no longer needed.", + "created_at": "2023-05-09T15:32:39Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump flask from 1.1.2 to 2.2.5", + "body": "Bumps [flask](https://github.com/pallets/flask) from 1.1.2 to 2.2.5.\n
\nRelease notes\n

Sourced from flask's releases.

\n
\n

2.2.5

\n

This is a security fix release for the 2.2.x release branch. Note that 2.3.x is the currently supported release branch; please upgrade to the latest version if possible.

\n\n

2.2.4

\n

This is a fix release for the 2.2.x release branch.

\n\n

2.2.3

\n

This is a fix release for the 2.2.x release branch.

\n\n

2.2.2

\n

This is a fix release for the 2.2.0 feature release.

\n\n

2.2.1

\n

This is a fix release for the 2.2.0 feature release.

\n\n

2.2.0

\n

This is a feature release, which includes new features and removes previously deprecated code. The 2.2.x branch is now the supported bug fix branch, the 2.1.x branch will become a tag marking the end of support for that branch. We encourage everyone to upgrade, and to use a tool such as pip-tools to pin all dependencies and control upgrades.

\n\n

2.1.3

\n\n

2.1.2

\n

This is a fix release for the 2.1.0 feature release.

\n\n

2.1.1

\n

This is a fix release for the 2.1.0 feature release.

\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from flask's changelog.

\n
\n

Version 2.2.5

\n

Released 2023-05-02

\n
    \n
  • Update for compatibility with Werkzeug 2.3.3.
  • \n
  • Set Vary: Cookie header when the session is accessed, modified, or refreshed.
  • \n
\n

Version 2.2.4

\n

Released 2023-04-25

\n
    \n
  • Update for compatibility with Werkzeug 2.3.
  • \n
\n

Version 2.2.3

\n

Released 2023-02-15

\n
    \n
  • Autoescape is enabled by default for .svg template files. :issue:4831
  • \n
  • Fix the type of template_folder to accept pathlib.Path. :issue:4892
  • \n
  • Add --debug option to the flask run command. :issue:4777
  • \n
\n

Version 2.2.2

\n

Released 2022-08-08

\n
    \n
  • Update Werkzeug dependency to >= 2.2.2. This includes fixes related\nto the new faster router, header parsing, and the development\nserver. :pr:4754
  • \n
  • Fix the default value for app.env to be "production". This\nattribute remains deprecated. :issue:4740
  • \n
\n

Version 2.2.1

\n

Released 2022-08-03

\n
    \n
  • Setting or accessing json_encoder or json_decoder raises a\ndeprecation warning. :issue:4732
  • \n
\n

Version 2.2.0

\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=flask&package-manager=pip&previous-version=1.1.2&new-version=2.2.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-05-09T15:27:27Z", + "updated_at": "2023-05-09T15:32:51Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Looks like flask is no longer a dependency, so this is no longer needed.", + "created_at": "2023-05-09T15:32:47Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump lxml from 4.2.6 to 4.9.1", + "body": "Bumps [lxml](https://github.com/lxml/lxml) from 4.2.6 to 4.9.1.\n
\nChangelog\n

Sourced from lxml's changelog.

\n
\n

4.9.1 (2022-07-01)

\n

Bugs fixed

\n
    \n
  • A crash was resolved when using iterwalk() (or canonicalize())\nafter parsing certain incorrect input. Note that iterwalk() can crash\non valid input parsed with the same parser after failing to parse the\nincorrect input.
  • \n
\n

4.9.0 (2022-06-01)

\n

Bugs fixed

\n
    \n
  • GH#341: The mixin inheritance order in lxml.html was corrected.\nPatch by xmo-odoo.
  • \n
\n

Other changes

\n
    \n
  • \n

    Built with Cython 0.29.30 to adapt to changes in Python 3.11 and 3.12.

    \n
  • \n
  • \n

    Wheels include zlib 1.2.12, libxml2 2.9.14 and libxslt 1.1.35\n(libxml2 2.9.12+ and libxslt 1.1.34 on Windows).

    \n
  • \n
  • \n

    GH#343: Windows-AArch64 build support in Visual Studio.\nPatch by Steve Dower.

    \n
  • \n
\n

4.8.0 (2022-02-17)

\n

Features added

\n
    \n
  • \n

    GH#337: Path-like objects are now supported throughout the API instead of just strings.\nPatch by Henning Janssen.

    \n
  • \n
  • \n

    The ElementMaker now supports QName values as tags, which always override\nthe default namespace of the factory.

    \n
  • \n
\n

Bugs fixed

\n
    \n
  • GH#338: In lxml.objectify, the XSI float annotation "nan" and "inf" were spelled in\nlower case, whereas XML Schema datatypes define them as "NaN" and "INF" respectively.
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • d01872c Prevent parse failure in new test from leaking into later test runs.
  • \n
  • d65e632 Prepare release of lxml 4.9.1.
  • \n
  • 86368e9 Fix a crash when incorrect parser input occurs together with usages of iterwa...
  • \n
  • 50c2764 Delete unused Travis CI config and reference in docs (GH-345)
  • \n
  • 8f0bf2d Try to speed up the musllinux AArch64 build by splitting the different CPytho...
  • \n
  • b9f7074 Remove debug print from test.
  • \n
  • b224e0f Try to install 'xz' in wheel builds, if available, since it's now needed to e...
  • \n
  • 897ebfa Update macOS deployment target version from 10.14 to 10.15 since 10.14 starts...
  • \n
  • 853c9e9 Prepare release of 4.9.0.
  • \n
  • d3f77e6 Add a test for https://bugs.launchpad.net/lxml/+bug/1965070 leaving out the a...
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=lxml&package-manager=pip&previous-version=4.2.6&new-version=4.9.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-05-09T15:27:17Z", + "updated_at": "2023-05-09T15:32:51Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Looks like lxml is no longer a dependency, so this is no longer needed.", + "created_at": "2023-05-09T15:32:47Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Outlier correction doesn't respond", + "body": "After importing videos and CSV's from DLC and setting video parameters I moved to the outlier correction step. After pressing \"setting\" and setting the parameters (1.5 for criterion for both) I pressed \"Confirm\" and then \"Run outlier correction\" and nothing happens. SimBA just doesn't respond to that button. \r\nWhat does it mean? How can I fix it (if needed fixing)?\r\nThanks \r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'Outlier correction'\r\n2. Click on 'Setting'\r\n3. Click on 'Confirm'\r\n4. Go back to 'Outlier correction'\r\n5. Press \"Run outlier correction\" \r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - \r\n![image](https://user-images.githubusercontent.com/86400083/236629887-b69d105d-db53-47a7-8416-a2df5aa9e406.png)\r\n\r\n - Python Version [e.g. 3.6.0]\r\n - Are you using anaconda? yes\r\n \r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n![image](https://user-images.githubusercontent.com/86400083/236629667-bab9497f-af23-4cc7-9b34-1bccfdca1924.png)\r\n", + "user": "Urimons", + "reaction_cnt": 0, + "created_at": "2023-05-06T14:25:06Z", + "updated_at": "2023-10-12T10:02:51Z", + "author": "Urimons", + "comments": [ + { + "body": "Hi @Urimons! Thanks for reporting, if you upgrade SimBA with `pip install simba-uw-tf-dev --upgrade` do you still see this error? Last version should be version `1.59.1`. ", + "created_at": "2023-05-07T16:39:52Z", + "author": "sronilsson" + }, + { + "body": "I installed the latest version and now everything works great! thanks\r\n", + "created_at": "2023-05-08T12:30:37Z", + "author": "Urimons" + }, + { + "body": "Hi!\r\n\r\nI have the same problem described as above. \r\nAfter I installed the newest version (1.74.3) I don't get an error message in the main window anymore, but I still don't get 2 CSV files in the project_folder\\log. I get one CSV file (named: Outliers_movement_20231012081509), which is empty - see picture attached. \r\nBesides this I also cant find any CSV files for each of the videos my project located within the project_folder\\csv\\outlier_corrected_movement_location sub-directory.\r\nCould you tell me how I fix this problem?\r\nThank you!\r\n![Problem_outlier_correction](https://github.com/sgoldenlab/simba/assets/147690960/321bf389-2cdb-4df3-a9f0-494102a0db5b)\r\n", + "created_at": "2023-10-12T06:44:10Z", + "author": "Chantal-Wi" + }, + { + "body": "Hi @Chantal-Wi and thanks for reporting! When running the outlier correction, do you see any errors either in the main SimBA window, or printed in the Windows terminal window? ", + "created_at": "2023-10-12T09:31:18Z", + "author": "sronilsson" + }, + { + "body": "![image](https://github.com/sgoldenlab/simba/assets/147690960/68febf47-4305-48e5-817b-3a06c3ecf42e)\r\n![image](https://github.com/sgoldenlab/simba/assets/147690960/e491a072-b330-4d6e-9e98-35f270b0b5ce)\r\n\r\nThank you for your answer!\r\nThis is what I get when I try to run this command.", + "created_at": "2023-10-12T09:38:39Z", + "author": "Chantal-Wi" + }, + { + "body": "Thanks @Chantal-Wi - do you see any CSV files inside the `project_folder/csv/input_csv` directory of your SimBA project? ", + "created_at": "2023-10-12T09:42:08Z", + "author": "sronilsson" + }, + { + "body": "No, this folder is empty. ", + "created_at": "2023-10-12T09:46:54Z", + "author": "Chantal-Wi" + }, + { + "body": "Alright - when you imported your tracking data, did you see any errors in SimBA main window or Windows terminal?\r\n\r\nThe error comes from when SimBA tries to perform outlier correction but can't find any files . I will insert a better error msg..", + "created_at": "2023-10-12T10:02:51Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Impossible to analyse ROI with 1 FPS video", + "body": "Hello, \r\n\r\nI'm trying to analyze ROI from csv files obtained from DLC, for single animal, on Windows 10. \r\nMy videos are 1 fps, and after I drew the ROIs, prompt returns: \" Videos in your SimBA project have an FPS of 1 or less. Please use videos with more than one frame per second, or correct the inaccurate fps inside the `project_folder/logs/videos_info.csv` file \" \r\n\r\nI was thinking to convert all my videos to >1 fps, but maybe there is a way to analyze 1 fps videos direclty?\r\n\r\nThank you, \r\nBest,", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2023-05-05T11:33:24Z", + "updated_at": "2023-05-07T16:38:05Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli - do you have a traceback copy paste or screengrab of the OS terminal of the error and I will have a look around where it is thrown? I want to see why I put the limit at 1 FPS: you should be able to run it at 1 FPS although you may miss some events and velocity / movement can be off a bit at that resolution.", + "created_at": "2023-05-05T12:08:55Z", + "author": "sronilsson" + }, + { + "body": "Thanks for the quick answer, here what returns the terminal after I converted one video in 2 fps. Apparently it does like this frequency either :)\r\n![Simba_Error_1fps](https://user-images.githubusercontent.com/66886884/236454412-b3ea54c5-c9e2-4626-8643-dc19d8fb9562.png)\r\n\r\n", + "created_at": "2023-05-05T12:12:04Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks @DorianBattivelli - for troubleshooting, would you mind using latest SimBA? If you don't want to upgrade, could you test in a new conda environment with the latest SimBA and check how the error looks like? ", + "created_at": "2023-05-05T12:15:37Z", + "author": "sronilsson" + }, + { + "body": "Here we go!\r\n![Simba_Error_1fps](https://user-images.githubusercontent.com/66886884/236456449-b235902e-6c65-434a-bf37-4b0459564248.png)\r\n\r\n", + "created_at": "2023-05-05T12:22:15Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah thank you! Just to confirm, SimBA grabs the file from the `project_folder/csv/outlier_corrected_movement_location` folder to perform the ROI analysis, do you have files representing your videos in this directory?", + "created_at": "2023-05-05T12:25:16Z", + "author": "sronilsson" + }, + { + "body": "No I don't... Should I run outlier correction again?\r\nI try this and keep you posted", + "created_at": "2023-05-05T12:26:33Z", + "author": "DorianBattivelli" + }, + { + "body": "Yes, or click to skip - make sure the videos that you want analyzed are represented in that directory.", + "created_at": "2023-05-05T12:27:55Z", + "author": "sronilsson" + }, + { + "body": "Everything works fine, it looks we did not run the outlier step properly, \r\n\r\nThank you!", + "created_at": "2023-05-05T12:46:40Z", + "author": "DorianBattivelli" + }, + { + "body": "Cheers @DorianBattivelli ! Let me know if any other errors pop up!", + "created_at": "2023-05-05T12:47:50Z", + "author": "sronilsson" + }, + { + "body": "I am facing another problem now. I previously selected ROIs with the old version of SimBA (I unfortunately don't know which version), with the new ROI interface. Now after updating and analyzing the ROI data, it looks like the data of the polyglon shaped ROIs are not picked up. Is there a solution for this or do I have to redraw all the polyglon ROIs? ", + "created_at": "2023-05-05T14:49:27Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah got it, you should not have to re-draw, maybe there is a key name mis-match between versions I haven't taken care off. \r\n\r\nCan you drop your `project_folder/logs/measures/ROI_definitions.h5` file here and I can see if I can replicate it and fix?\r\n\r\n", + "created_at": "2023-05-05T15:00:04Z", + "author": "sronilsson" + }, + { + "body": "It looks the format is not accepted here, maybe by email?", + "created_at": "2023-05-05T15:02:12Z", + "author": "DorianBattivelli" + }, + { + "body": "Does it work if you zip the file and drop it here?", + "created_at": "2023-05-05T15:04:30Z", + "author": "sronilsson" + }, + { + "body": "[ezyzip.zip](https://github.com/sgoldenlab/simba/files/11407552/ezyzip.zip)\r\n", + "created_at": "2023-05-05T15:06:08Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - unzip this one try replacing your `project_folder/logs/measures/ROI_definitions.h5` with this it and let me know if that fixes it:\r\n\r\n[ROI_definitions.h5.zip](https://github.com/sgoldenlab/simba/files/11407720/ROI_definitions.h5.zip)\r\n", + "created_at": "2023-05-05T15:19:24Z", + "author": "sronilsson" + }, + { + "body": "Amazing, everything is now working fine! Thank you ", + "created_at": "2023-05-05T16:09:26Z", + "author": "DorianBattivelli" + }, + { + "body": "Cheers! I will insert a fix in simba next week to make sure this can't happen again. ", + "created_at": "2023-05-05T16:11:55Z", + "author": "sronilsson" + } + ] + }, + { + "title": "`KeyError` when training multiple models from meta files using custom weights", + "body": "**Describe the bug**\r\nA `KeyError` is thrown when training multiple models from meta files using custom weights.\r\nThe meta file contains:\r\n- `class_weights`: `custom`\r\n- `class_custom_weights`: `{0: '1', 1: '2'}`\r\n\r\nThe following error is thrown:\r\n```\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\username\\miniconda3\\envs\\simbadev\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\username\\miniconda3\\envs\\simbadev\\lib\\site-packages\\simba\\SimBA.py\", line 363, in \r\n button_train_multimodel = Button(label_trainmachinemodel, text='TRAIN MULTIPLE MODELS (ONE FOR EACH SAVED SETTING)',fg='green',command = lambda: threading.Thread(target=self.train_multiple_models_from_meta(config_path=self.config_path)).start())\r\n File \"C:\\Users\\username\\miniconda3\\envs\\simbadev\\lib\\site-packages\\simba\\SimBA.py\", line 593, in train_multiple_models_from_meta\r\n model_trainer.run()\r\n File \"C:\\Users\\username\\miniconda3\\envs\\simbadev\\lib\\site-packages\\simba\\train_mutiple_models.py\", line 150, in run\r\n self.meta_dicts = self.__check_validity_of_meta_files(meta_file_paths=self.meta_file_lst)\r\n File \"C:\\Users\\username\\miniconda3\\envs\\simbadev\\lib\\site-packages\\simba\\train_mutiple_models.py\", line 125, in __check_validity_of_meta_files\r\n meta_dict[ReadConfig.CLASS_WEIGHTS.value] = meta_dict['custom_weights']\r\nKeyError: 'custom_weights'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nSave a model settings meta file with custom weights using the defaults weights:\r\nThe meta file contains:\r\n- `class_weights`: `custom`\r\n- `class_custom_weights`: `{0: '1', 1: '2'}`\r\n\r\nClick on the green \"Train multiple models (one for each saved setting)\" button; the annotations are loaded, and then the error above is thrown.\r\n\r\n\r\n**Expected behavior**\r\nThe model training proceeds without error.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS] Windows 10\r\n - Python Version [e.g. 3.6.0] 3.6\r\n - Are you using anaconda? Yes\r\n - Simba version: 1.58.1 (fresh pip install today)\r\n \r\n\r\n**Additional context**\r\nThis seems to result from the fact that `__check_validity_of_meta_files()` expects the meta file to have the column name `custom_weights` whereas the actual name is `class_custom_weights`. Manually changing this in `train_multiple_models.py` fixes the issue but quickly fails again with:\r\n```\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\username\\miniconda3\\envs\\simbadev\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\username\\miniconda3\\envs\\simbadev\\lib\\site-packages\\simba\\SimBA.py\", line 363, in \r\n button_train_multimodel = Button(label_trainmachinemodel, text='TRAIN MULTIPLE MODELS (ONE FOR EACH SAVED SETTING)',fg='green',command = lambda: threading.Thread(target=self.train_multiple_models_from_meta(config_path=self.config_path)).start())\r\n File \"C:\\Users\\username\\miniconda3\\envs\\simbadev\\lib\\site-packages\\simba\\SimBA.py\", line 593, in train_multiple_models_from_meta\r\n model_trainer.run()\r\n File \"C:\\Users\\username\\miniconda3\\envs\\simbadev\\lib\\site-packages\\simba\\train_mutiple_models.py\", line 148, in run\r\n self.meta_dicts = self.__check_validity_of_meta_files(meta_file_paths=self.meta_file_lst)\r\n File \"C:\\Users\\username\\miniconda3\\envs\\simbadev\\lib\\site-packages\\simba\\train_mutiple_models.py\", line 124, in __check_validity_of_meta_files\r\n for k, v in meta_dict[ReadConfig.CLASS_WEIGHTS.value].items():\r\nAttributeError: 'str' object has no attribute 'items'\r\n```\r\nThis shows that the routine fails to deal with the dict being read as str from the file. I could fix all of that using the same approach used elsewhere in SimBa:\r\n```python\r\n if meta_dict[ReadConfig.CLASS_WEIGHTS.value] == 'custom':\r\n weights = ast.literal_eval(meta_dict['class_custom_weights'])\r\n meta_dict[ReadConfig.CLASS_WEIGHTS.value] = weights\r\n for k, v in meta_dict[ReadConfig.CLASS_WEIGHTS.value].items():\r\n meta_dict[ReadConfig.CLASS_WEIGHTS.value][k] = int(v)\r\n```\r\nNote that it seems one could/should update `ReadConfig.CUSTOM_WEIGHTS` to be `class_custom_weights` or revert change the meta file to have `custom_weights` as column name but that is beyond my knowledge of SimBA to know which way to address this would be the most reliable and safe.\r\n", + "user": "florianduclot", + "reaction_cnt": 0, + "created_at": "2023-04-29T16:03:33Z", + "updated_at": "2023-05-16T11:55:43Z", + "author": "florianduclot", + "comments": [ + { + "body": "Thanks for this @florianduclot !\r\n\r\nI can see it, first as you say there is a typo in the key it should be `class_custom_weights` and *not* `custom_weights`. Second, the config meta file stores it as a `str` so we have to convert it to a dict. Can you try if this fixes it? \r\n\r\nAt the top, import literal_eval:\r\n`from ast import literal_eval`\r\n\r\nChange routine to:\r\n\r\n```\r\n if meta_dict[ReadConfig.CLASS_WEIGHTS.value] == 'custom':\r\n meta_dict[ReadConfig.CLASS_WEIGHTS.value] = literal_eval(meta_dict['class_custom_weights'])\r\n for k, v in meta_dict[ReadConfig.CLASS_WEIGHTS.value].items():\r\n meta_dict[ReadConfig.CLASS_WEIGHTS.value][k] = int(v)\r\n```\r\n\r\nLet me know how goes and I will update pip package. \r\n\r\nSimon\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-04-29T17:01:22Z", + "author": "sronilsson" + }, + { + "body": "Thanks for the quick feedback, @sronilsson ,\r\n\r\nI can't test that right at this time but it's equivalent to what I've tested to work (see my last code snippet above) so I agree with you that it should work.\r\n\r\nI'll try your exact solution when I get a chance but it might not be today, unfortunately.", + "created_at": "2023-04-29T18:38:18Z", + "author": "florianduclot" + }, + { + "body": "Hi @florianduclot - I did push the updated code and just a heads up up if you try it, I re-organized the files a fair bit to make more readable - the code piece lives in `simba.model.grid_search_rf` now. ", + "created_at": "2023-05-05T12:01:37Z", + "author": "sronilsson" + }, + { + "body": "Just wanted to confirm: I tried the latest Simba version yesterday and it indeed works as intended.\r\n\r\nThanks again for pushing the fix.", + "created_at": "2023-05-16T11:55:43Z", + "author": "florianduclot" + } + ] + }, + { + "title": "Can't create a simba / plotly dataset", + "body": "**Describe the bug**\r\nWhen I try to save a simBA / plotly data set I get the following error:\r\n![image](https://user-images.githubusercontent.com/126618342/232210749-774d26ff-c3ec-44cf-b51a-d4d321c3a47e.png)\r\n\r\nHow could I fix it? Did I do something wrong?\r\nThank you in advance!\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Python Version: 3.6.0\r\n - Are you using anaconda? Yes\r\n \r\n", + "user": "gemajpg", + "reaction_cnt": 0, + "created_at": "2023-04-15T11:23:41Z", + "updated_at": "2023-04-29T16:49:27Z", + "author": "gemajpg", + "comments": [] + }, + { + "title": "Problem training single model (global enviroment)", + "body": "**Describe the bug**\r\nI follow the steps of this guide: https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md\r\n\r\nYet, when I finally get to train the model, it gives me the following error: \r\n\r\n![image](https://user-images.githubusercontent.com/126618342/230721756-cd96fce9-35fb-450f-a276-b60fde6d8b63.png)\r\n\r\n![image](https://user-images.githubusercontent.com/126618342/230721770-097ea81e-0965-4a54-894d-447eb60c7b89.png)\r\n\r\n\r\nAm I doing something wrong?\r\n\r\nThis is what I put on settings:\r\n![image](https://user-images.githubusercontent.com/126618342/230722036-9a6c6626-590d-43fd-8c02-de4c381e57af.png)\r\n\r\nI tried downloading the file with the example for the hyper-parameters but I don't really know how.\r\n\r\n**Desktop:**\r\n - OS: Windows 10.\r\n - Python Version 3.6.0\r\n - Are you using anaconda? Yes\r\n \r\n", + "user": "gemajpg", + "reaction_cnt": 0, + "created_at": "2023-04-08T12:46:55Z", + "updated_at": "2023-04-10T15:09:45Z", + "author": "gemajpg", + "comments": [ + { + "body": "Hi @gemajpg ! How many body-parts and animals does your tracking data track? Something goes wrong early in training, when SimBA is trying to split the body-part coordinates data columns from the rest of the columns. One body-part, `Right_ear` of animal 2 is expected but can't be found in the data. The data is stored in the `project_folder/csv/targets_inserted` directory. I see you have one file in there, if you check it, what column headers do you see?", + "created_at": "2023-04-10T11:51:56Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Hi! Four body parts for two animals. I actually made them just like the standard simBA has (nose, two ears and tail base). \r\n\r\nThis is what is shown in the file. \r\n![image](https://user-images.githubusercontent.com/126618342/230897800-343be42c-b03b-4f23-a1fb-bd3d52f92978.png)\r\n", + "created_at": "2023-04-10T12:04:22Z", + "author": "gemajpg" + }, + { + "body": "Thanks @gemajpg, got it, just to confirm, you selected the built in simba default 2 animal with four body-parts on each?\r\n\r\nIf you upgrade to simba 1.55.8 with `pip install simba-uw-tf-dev --upgrade`, do you still see the error?", + "created_at": "2023-04-10T12:37:40Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Yes, that's what I selected. \r\n\r\nI upgraded it but I still see the error. \r\n\r\n![image](https://user-images.githubusercontent.com/126618342/230906103-1cbbfe0d-00ef-47f8-91f7-f80ab7e2eb3e.png)\r\n", + "created_at": "2023-04-10T13:02:06Z", + "author": "gemajpg" + }, + { + "body": "Thanks @gemajpg - is your project small? Can it be zipped up and shared, and I can take a look?", + "created_at": "2023-04-10T13:03:52Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Thanks! It won't let me share it because zipped it still is 87 mb but I uploaded the normal folder into google drive if it helps. \r\nHere's the link:\r\n\r\nhttps://drive.google.com/drive/folders/1gOEay4Sq_62rB8IsNG8AVgX-CNPSpx4D?usp=sharing\r\n\r\n", + "created_at": "2023-04-10T13:18:25Z", + "author": "gemajpg" + }, + { + "body": "Got it thanks!", + "created_at": "2023-04-10T13:37:06Z", + "author": "sronilsson" + }, + { + "body": "For whatever reason (maybe bug from using multiple SimBA versions in your project.. that I will look into) your SimBA project has a body-part called `Right_ear_2`, while in the actual data in your project, the same body-part is named `Ear_right_2`. \r\n\r\nTo fix this: \r\n\r\n1) Go to `prueba/project_folder/logs/measures/pose_configs/bp_names` and change `Right_ear_2` to `Ear_right_2` as in screengrab below.\r\n\"image\"\r\n\r\n2). Update simba again, should be version `1.55.9` now. \r\n\r\n3). Run simba with `simba`. \r\n\r\nLet me know how it goes!\r\n\r\n", + "created_at": "2023-04-10T14:28:50Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson it worked! Thank you!", + "created_at": "2023-04-10T15:09:44Z", + "author": "gemajpg" + } + ] + }, + { + "title": "Dependency incompatibility in 1.55.1 in NEW conda env", + "body": "**Describe the bug**\r\nIn Windows 10, under a **new** conda env, simba fails to start with:\r\n```\r\n(...)\r\nImportError: cannot import name '_OneToOneFeatureMixin'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. `conda create --name simbaenv python=3.6`\r\n2. `conda activate simbaenv`\r\n3. `pip install simba-uw-tf-dev`\r\n4. `pip install numba==0.52.0`\r\n5. `pip uninstall shapely`\r\n6. `conda install -c conda-forge Shapely`\r\n7. `simba`\r\n\r\n**Expected behavior**\r\nSimba starts successfully.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Win10\r\n - Python Version [e.g. 3.6.0]: 3.6 (Anaconda)\r\n - Are you using anaconda? Yes\r\n \r\n\r\n**Additional context**\r\nFrom a little excursion down the rabbit hole of dependencies, it seems this stems from pip pulling a version of `imbalanced-learn` that is too new for the version of `scikit-learn` used by simba. I could fix it by ensuring `imbalanced-learn==0.7.0` is installed. With the following packages, simba starts:\r\n```\r\n\r\n(simbaenv) C:\\Users\\florian.duclot>pip show simba-uw-tf-dev scikit-learn imbalanced-learn numba\r\nName: Simba-UW-tf-dev\r\nVersion: 1.55.1\r\nSummary: Toolkit for computer classification of complex social behaviors in experimental animals\r\nHome-page: https://github.com/sgoldenlab/simba\r\nAuthor: Simon Nilsson, Jia Jie Choong, Sophia Hwang\r\nAuthor-email: sronilsson@gmail.com\r\nLicense: GNU Lesser General Public License v3 (LGPLv3)\r\nLocation: c:\\users\\florian.duclot\\appdata\\local\\continuum\\anaconda3\\envs\\simbaenv\\lib\\site-packages\r\nRequires: Pillow, opencv-python, numpy, numexpr, plotly, scipy, dtreeviz, dash-html-components, xgboost, tables, pandas, scikit-image, tqdm, matplotlib, cefpython3, h5py, xlrd, dash, imblearn, dash-core-components, numba, yellowbrick, graphviz, statsmodels, eli5, seaborn, wxpython, imgaug, dash-colorscales, dash-color-picker, pyarrow, imutils, shap, shapely, scikit-learn, pyyaml, trafaret, tabulate\r\nRequired-by:\r\n---\r\nName: scikit-learn\r\nVersion: 0.22.2\r\nSummary: A set of python modules for machine learning and data mining\r\nHome-page: http://scikit-learn.org\r\nAuthor: None\r\nAuthor-email: None\r\nLicense: new BSD\r\nLocation: c:\\users\\florian.duclot\\appdata\\local\\continuum\\anaconda3\\envs\\simbaenv\\lib\\site-packages\r\nRequires: scipy, numpy, joblib\r\nRequired-by: yellowbrick, Simba-UW-tf-dev, shap, imbalanced-learn, eli5, dtreeviz\r\n---\r\nName: imbalanced-learn\r\nVersion: 0.7.0\r\nSummary: Toolbox for imbalanced dataset in machine learning.\r\nHome-page: https://github.com/scikit-learn-contrib/imbalanced-learn\r\nAuthor: None\r\nAuthor-email: None\r\nLicense: MIT\r\nLocation: c:\\users\\florian.duclot\\appdata\\local\\continuum\\anaconda3\\envs\\simbaenv\\lib\\site-packages\r\nRequires: numpy, scikit-learn, joblib, scipy\r\nRequired-by: imblearn\r\n---\r\nName: numba\r\nVersion: 0.52.0\r\nSummary: compiling Python code using LLVM\r\nHome-page: https://numba.github.com\r\nAuthor: Anaconda, Inc.\r\nAuthor-email: numba-users@continuum.io\r\nLicense: BSD\r\nLocation: c:\\users\\florian.duclot\\appdata\\local\\continuum\\anaconda3\\envs\\simbaenv\\lib\\site-packages\r\nRequires: llvmlite, setuptools, numpy\r\nRequired-by: Simba-UW-tf-dev\r\n```\r\n\r\nOn the topic: is Python 3.6 still required or would a newer be recommended now (the installation docs seem to insist on 3.6)?", + "user": "florianduclot", + "reaction_cnt": 0, + "created_at": "2023-04-06T14:22:11Z", + "updated_at": "2023-04-06T15:04:46Z", + "author": "florianduclot", + "comments": [ + { + "body": "Thank you @florianduclot ! I will pin imblearn to 0.7.0 for now. SimBA relies on imblearn for SMOTE and SMOTEEN, however, the practical usefulness of those methods are questionable. We should probably get rid of imblearn as some point.\r\n\r\nI have run it up to 3.8 without problems, I don't think there would be issues in 3.9. The code is developed in 3.6 env though, and not tested in any other version. So there could be some dependency tinkering before it boots up in >3.6. \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n ", + "created_at": "2023-04-06T14:44:22Z", + "author": "sronilsson" + }, + { + "body": "Sounds good, and thanks a lot for the additional and helpful information!", + "created_at": "2023-04-06T15:04:46Z", + "author": "florianduclot" + } + ] + }, + { + "title": "Erro: Fatal Python error: PyEval_RestoreThread: NULL tstate", + "body": "When I go draw RO, this erro is show:\r\nCurrent thread 0x00002c0c (most recent call first):\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_image.py\", line 147 in draw_circle\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_image.py\", line 197 in initiate_draw\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_define.py\", line 595 in create_draw\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_define.py\", line 347 in \r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1705 in __call__\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 560 in mainloop\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_define.py\", line 132 in __init__\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_menus.py\", line 63 in draw\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1705 in __call__\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1283 in mainloop\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\SimBA.py\", line 3636 in main\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\Scripts\\simba.exe\\__main__.py\", line 7 in \r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\runpy.py\", line 85 in _run_code\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\runpy.py\", line 193 in _run_module_as_main\r\n(SIMBA) PS C:\\WINDOWS\\system32>", + "user": "anacastropsico", + "reaction_cnt": 0, + "created_at": "2023-04-04T23:11:31Z", + "updated_at": "2023-10-02T15:43:38Z", + "author": "anacastropsico", + "comments": [ + { + "body": "Thanks for letting me know @anacastropsico! To troubleshoot, (i) which version of SimBA are you running (type `pip show simba-uw-tf-dev`)? (ii) which version of python are you running? (type `python --version`), make sure it is 64bit. ", + "created_at": "2023-04-04T23:53:26Z", + "author": "sronilsson" + }, + { + "body": "Name: Simba-UW-tf-dev\r\nVersion: 1.22.5\r\nSummary: Toolkit for computer classification of complex social behaviors in experimental animals\r\nHome-page: https://github.com/sgoldenlab/simba\r\nAuthor: Simon Nilsson, Jia Jie Choong, Sophia Hwang\r\nAuthor-email: sronilsson@gmail.com\r\nLicense: GNU Lesser General Public License v3 (LGPLv3)\r\nLocation: c:\\users\\anaca\\anaconda3\\envs\\simba\\lib\\site-packages\r\nRequires: numba, graphviz, dash-color-picker, numexpr, dtreeviz, pyarrow, pyyaml, tqdm, matplotlib, dash-colorscales, xlrd, opencv-python, imutils, scipy, scikit-image, h5py, plotly, imblearn, trafaret, shapely, numpy, statsmodels, pandas, seaborn, dash-html-components, shap, Pillow, tables, dash-core-components, imgaug, eli5, tabulate, wxpython, xgboost, cefpython3, scikit-le\r\n\r\nPython 3.6.13 :: Anaconda, Inc.", + "created_at": "2023-04-05T02:42:25Z", + "author": "anacastropsico" + }, + { + "body": "Thanks @anacastropsico - can you see if the error persist after upgrading to latest version with `pip install simba-uw-tf-dev --upgrade`?", + "created_at": "2023-04-05T10:53:43Z", + "author": "sronilsson" + }, + { + "body": "Hi! I get the same error message after I'm clicking any of the tools for drawing the ROI, then Simba freezes and shuts down.\r\n\r\n**(simba) C:\\Users\\mpc>pip show simba-uw-tf-dev**\r\nName: Simba-UW-tf-dev\r\nVersion: 1.73.3\r\nSummary: Toolkit for computer classification of complex social behaviors in experimental animals\r\nHome-page: https://github.com/sgoldenlab/simba\r\nAuthor: Simon Nilsson, Jia Jie Choong, Sophia Hwang\r\nAuthor-email: sronilsson@gmail.com\r\nLicense: GNU Lesser General Public License v3 (LGPLv3)\r\nLocation: c:\\programdata\\anaconda3\\envs\\simba\\lib\\site-packages\r\nRequires: tables, eli5, pandas, matplotlib, dash, xgboost, wxpython, dash-color-picker, scikit-image, xlrd, imutils, opencv-python, tqdm, scipy, dtreeviz, scikit-learn, trafaret, numba, pyyaml, statsmodels, tabulate, seaborn, cefpython3, imgaug, numexpr, pyarrow, shap, dash-colorscales, Pillow, imblearn, plotly, shapely, dash-core-components, h5py, numpy, yellowbrick, dash-html-components, graphviz\r\nRequired-by:\r\n\r\n**(simba) C:\\Users\\mpc>python --version**\r\nPython 3.6.13 :: Anaconda, Inc.\r\n\r\nCould you help me with this? Thank you!\r\n", + "created_at": "2023-10-02T14:53:51Z", + "author": "lxspedeza" + }, + { + "body": "Hi @lxspedeza, thanks for reporting this! Does this error still happen if you make sure there are no other python processes running on your computer? I.e., do a computer restart or kill all other python processes in the task manager? \r\n\r\nIf that doesn't help, can you paste me the error msg you are seeing? The error reported above from previous user is a little old and the line numbers are not the line numbers in the current version.", + "created_at": "2023-10-02T15:14:17Z", + "author": "sronilsson" + }, + { + "body": "Thank you for the quick reply @sronilsson ! In the meantime I realized, I only get this error message because after drawing a ROI, I didn't press space or enter (or c), but other buttons on the ROI panel. I just noticed the message in the Anaconda Terminal:\r\n\r\n> Select a ROI and then press SPACE or ENTER button!\r\n> Cancel the selection process by pressing c button!\r\n\r\nBefore, I didn't know what to press. So with that the issue is solved. If you're curious about the error message after pressing different things from space, enter or c, this is what I get:\r\n\r\nFatal Python error: PyEval_RestoreThread: NULL tstate\r\n\r\n> Current thread 0x000011b8 (most recent call first):\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\roi_tools\\ROI_image.py\", line 165 in initiate_x_y_callback\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\roi_tools\\ROI_image.py\", line 169 in draw_polygon\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\roi_tools\\ROI_image.py\", line 200 in initiate_draw\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\roi_tools\\ROI_define.py\", line 567 in create_draw\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\roi_tools\\ROI_define.py\", line 320 in \r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705 in __call__\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1283 in mainloop\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\roi_tools\\ROI_define.py\", line 105 in __init__\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\roi_tools\\ROI_menus.py\", line 66 in draw\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705 in __call__\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1283 in mainloop\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 962 in main\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\Scripts\\simba.exe\\__main__.py\", line 7 in \r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 85 in _run_code\r\n> File \"C:\\ProgramData\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 193 in _run_module_as_main", + "created_at": "2023-10-02T15:19:58Z", + "author": "lxspedeza" + }, + { + "body": "Got it thank you! I will insert a more interpretable error msg.\r\n\r\nWhile I got you here... I have had some occasional issues on my Mac drawing circles, not sure why.. do you get any error with the circles on your PC? ", + "created_at": "2023-10-02T15:31:15Z", + "author": "sronilsson" + }, + { + "body": "I'm on Win10, and drawing circles works perfectly for me. Please lmk if I can help, I really appreciate your work!", + "created_at": "2023-10-02T15:36:06Z", + "author": "lxspedeza" + }, + { + "body": "> I'm on Win10, and drawing circles works perfectly for me. Please lmk if I can help, I really appreciate your work!\r\n\r\nThanks for letting me know!", + "created_at": "2023-10-02T15:43:38Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Unable to import h5 file from DLC", + "body": "Hi! \r\nI cannot import the h5 file from DLC when trying to import tracking data.\r\nI get the following message: \r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\mixins\\pop_up_mixin.py\", line 430, in \r\n tracking_data_type=self.dlc_data_type_option_dropdown.getChoices()))\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\mixins\\pop_up_mixin.py\", line 290, in run_call\r\n dlc_multi_animal_importer.import_data()\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\pose_importers\\dlc_multi_animal_importer.py\", line 372, in import_data\r\n self.__find_video_file()\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\pose_importers\\dlc_multi_animal_importer.py\", line 142, in __find_video_file\r\n raise NoFilesFoundError(msg=f'SimBA searched your project_folder/videos directory for a video file representing {self.file_name}, and could not find a match. Above is a list of possible video filenames that SimBA searched for within your projects video directory without success.')\r\nsimba.utils.errors.NoFilesFoundError: SimBA searched your project_folder/videos directory for a video file representing Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el, and could not find a match. Above is a list of possible video filenames that SimBA searched for within your projects video directory without success.\r\n\r\nBut I have also recieved other errors, it doesn't seem to be stable.\r\n\r\nI tried to look up for answers in others issues or the FAQ but none of them seem to solve it for me. \r\n\r\nFor example, I did not change the name of any file after they were created by DLC.\r\nAnd I've been following the steps provided by the generic simBA tutorial.\r\n\r\nI do have to mention, I had the following issue with Tkinter (the create project window was blank) but was fixed yesterday in this issue: \r\n\r\nhttps://github.com/sgoldenlab/simba/issues/242\r\n\r\nIt changed the script to create a project. \r\n\r\n\r\nI am using simBA on Anaconda.\r\n\r\nDoes someone know how to fix it?\r\n\r\nSorry if I didn't explain myself very clearly, I have nearly no knowledge of this topic. \r\n\r\nThank you in advance. \r\n", + "user": "gemajpg", + "reaction_cnt": 0, + "created_at": "2023-04-03T17:39:00Z", + "updated_at": "2023-04-06T15:50:53Z", + "author": "gemajpg", + "comments": [ + { + "body": "Hi @gemajpg!\r\n\r\nSimBA is looking for a video file inside your project, in the `project_folder/videos/` directory, called `Vídeo ratones blancos.mp4` or `Vídeo ratones blancos.avi`, but can't find it. Does e.g., `project_folder/videos/Vídeo ratones blancos.avi` exist? \r\n\r\n", + "created_at": "2023-04-03T18:20:43Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Hi!\r\nIt has the video imported before with its original name: Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_full\r\n\r\nShould I change it to only \"Vídeo ratones blancos.mp4\"?\r\n\r\n", + "created_at": "2023-04-03T18:47:18Z", + "author": "gemajpg" + }, + { + "body": "Hi @gemajpg! Yes but `Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_full` does not sound like an original video you named, it sounds lik a video visualization example created by deeplabcut. SimBA wants the original video. ", + "created_at": "2023-04-03T18:51:12Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson \r\nOkay! Thank you!\r\nI tried with the original video, now I get this:\r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\pytables.py\", line 627, in open\r\n self._handle = tables.open_file(self._path, self._mode, **kwargs)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\tables\\file.py\", line 315, in open_file\r\n return File(filename, mode, title, root_uep, filters, **kwargs)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\tables\\file.py\", line 778, in __init__\r\n self._g_new(filename, mode, **params)\r\n File \"tables\\hdf5extension.pyx\", line 492, in tables.hdf5extension.File._g_new\r\ntables.exceptions.HDF5ExtError: HDF5 error back trace\r\n\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5F.c\", line 509, in H5Fopen\r\n unable to open file\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5Fint.c\", line 1498, in H5F_open\r\n unable to open file: time = Mon Apr 3 21:26:25 2023\r\n, name = 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5', tent_flags = 0\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5FD.c\", line 734, in H5FD_open\r\n open failed\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5FDsec2.c\", line 346, in H5FD_sec2_open\r\n unable to open file: name = 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0\r\n\r\nEnd of HDF5 error back trace\r\n\r\nUnable to open/create file 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\mixins\\pop_up_mixin.py\", line 430, in \r\n tracking_data_type=self.dlc_data_type_option_dropdown.getChoices()))\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\mixins\\pop_up_mixin.py\", line 290, in run_call\r\n dlc_multi_animal_importer.import_data()\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\pose_importers\\dlc_multi_animal_importer.py\", line 373, in import_data\r\n self.data_df = pd.read_hdf(file_path).replace([np.inf, -np.inf], np.nan).fillna(0)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\pytables.py\", line 384, in read_hdf\r\n store = HDFStore(path_or_buf, mode=mode, **kwargs)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\pytables.py\", line 505, in __init__\r\n self.open(mode=mode, **kwargs)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\pytables.py\", line 661, in open\r\n raise IOError(str(e))\r\nOSError: HDF5 error back trace\r\n\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5F.c\", line 509, in H5Fopen\r\n unable to open file\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5Fint.c\", line 1498, in H5F_open\r\n unable to open file: time = Mon Apr 3 21:26:25 2023\r\n, name = 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5', tent_flags = 0\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5FD.c\", line 734, in H5FD_open\r\n open failed\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5FDsec2.c\", line 346, in H5FD_sec2_open\r\n unable to open file: name = 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0\r\n\r\nEnd of HDF5 error back trace\r\n\r\nUnable to open/create file 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5'\r\n", + "created_at": "2023-04-03T19:30:46Z", + "author": "gemajpg" + }, + { + "body": "Hi @gemajpg! This suggest you have a data file inside your videos folder. It seems like SimBA is trying to open the file, assuming it is a data file, but it is not a data file. Just to check, is `'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5' a data file, or is it a video file?", + "created_at": "2023-04-04T01:53:45Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Yes, it's a h5 data file I believe. \r\nI tried also to put it outside the folder where DLC creates it (the one for the videos) but it still can't seem to open it either.\r\n\r\nIt was taken from this folder, I think it is the file needed.\r\n![image](https://user-images.githubusercontent.com/126618342/229853193-76a693cf-0493-4e47-8f08-c9f94b175bd9.png)\r\n\r\nIn the information of Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5 it says it is a h5 file:\r\n![image](https://user-images.githubusercontent.com/126618342/229853698-0d457b17-a51b-4d25-b2a5-926dc3dfb8ce.png)\r\n", + "created_at": "2023-04-04T16:15:10Z", + "author": "gemajpg" + }, + { + "body": "Got it, - in your SImBA project, first import the video for `Vídeo ratones blancos`, next, import the data: which is the `Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5` file. ", + "created_at": "2023-04-04T23:50:42Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Hi! I have been doing that all the time and it still gives me the same error. \r\n![image](https://user-images.githubusercontent.com/126618342/230378514-23eec412-17d5-4087-a5df-1aebd7475356.png)\r\n\r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\pytables.py\", line 627, in open\r\n self._handle = tables.open_file(self._path, self._mode, **kwargs)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\tables\\file.py\", line 315, in open_file\r\n return File(filename, mode, title, root_uep, filters, **kwargs)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\tables\\file.py\", line 778, in __init__\r\n self._g_new(filename, mode, **params)\r\n File \"tables\\hdf5extension.pyx\", line 492, in tables.hdf5extension.File._g_new\r\ntables.exceptions.HDF5ExtError: HDF5 error back trace\r\n\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5F.c\", line 509, in H5Fopen\r\n unable to open file\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5Fint.c\", line 1498, in H5F_open\r\n unable to open file: time = Thu Apr 6 14:26:26 2023\r\n, name = 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5', tent_flags = 0\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5FD.c\", line 734, in H5FD_open\r\n open failed\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5FDsec2.c\", line 346, in H5FD_sec2_open\r\n unable to open file: name = 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0\r\n\r\nEnd of HDF5 error back trace\r\n\r\nUnable to open/create file 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\mixins\\pop_up_mixin.py\", line 430, in \r\n tracking_data_type=self.dlc_data_type_option_dropdown.getChoices()))\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\mixins\\pop_up_mixin.py\", line 290, in run_call\r\n dlc_multi_animal_importer.import_data()\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\pose_importers\\dlc_multi_animal_importer.py\", line 373, in import_data\r\n self.data_df = pd.read_hdf(file_path).replace([np.inf, -np.inf], np.nan).fillna(0)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\pytables.py\", line 384, in read_hdf\r\n store = HDFStore(path_or_buf, mode=mode, **kwargs)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\pytables.py\", line 505, in __init__\r\n self.open(mode=mode, **kwargs)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\pytables.py\", line 661, in open\r\n raise IOError(str(e))\r\nOSError: HDF5 error back trace\r\n\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5F.c\", line 509, in H5Fopen\r\n unable to open file\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5Fint.c\", line 1498, in H5F_open\r\n unable to open file: time = Thu Apr 6 14:26:26 2023\r\n, name = 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5', tent_flags = 0\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5FD.c\", line 734, in H5FD_open\r\n open failed\r\n File \"D:\\pytables_hdf5\\CMake-hdf5-1.10.5\\hdf5-1.10.5\\src\\H5FDsec2.c\", line 346, in H5FD_sec2_open\r\n unable to open file: name = 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0\r\n\r\nEnd of HDF5 error back trace\r\n\r\nUnable to open/create file 'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5'\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2023-04-06T12:29:49Z", + "author": "gemajpg" + }, + { + "body": "Hi @gemajpg - is there anything odd going on with `'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5' - how large is the file?", + "created_at": "2023-04-06T12:37:30Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson \r\nIt is 1,83 MB (1.920.300 bytes).\r\nIs it too big?", + "created_at": "2023-04-06T12:45:12Z", + "author": "gemajpg" + }, + { + "body": "Oh no it's very small - something could have gone wrong when created. Do you have the same issue with every .h5 files?", + "created_at": "2023-04-06T12:56:25Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Yes, I tried with another one I had and it still shows the same error. I believe it might have something to do with Tkinter or the script since, as I said, it didn't used to let me open the \"create file\" window, it was blank but was fixed in this other issue: https://github.com/sgoldenlab/simba/issues/242\r\n\r\nCould it have something to do? ", + "created_at": "2023-04-06T13:16:19Z", + "author": "gemajpg" + }, + { + "body": "I don't think so, this error comes from pandas, trying to open the h5 file. However, we hit an error `'C:/Users/gemap/Desktop/pruebasparasimba-GJP-2023-04-02/videos\\Vídeo ratones blancosDLC_resnet50_pruebasparasimbaApr2shuffle1_50000_el.h5' meaning either the file is corrupted or it doesn't exist. If you paste the file here I can give it a go", + "created_at": "2023-04-06T13:32:33Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson It did not let me upload the file since it doesn't support this file type. I uploaded it to google drive so I can share it. Here's the link: https://drive.google.com/file/d/1ZNZi5-rZO8Lo0ZB21kSGIucMSFOgJ6IZ/view?usp=sharing\r\n\r\nThank you for all the help so far!", + "created_at": "2023-04-06T14:16:27Z", + "author": "gemajpg" + }, + { + "body": "No worries. It's strange because I have no problem reading this file..\r\n\r\nBut some report similar issues with non-english characters in the file names [HERE](https://github.com/DeepLabCut/DeepLabCut/issues/1192). Could you see if changing `Vídeo` to `Video` fixes it?", + "created_at": "2023-04-06T14:31:09Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson it worked! thank you!", + "created_at": "2023-04-06T14:51:54Z", + "author": "gemajpg" + } + ] + }, + { + "title": "Unable to Create Project with Updated Version of SimBA", + "body": "**Describe the bug**\r\nWhen I updated to the most recent version of SimBA (1.54.1), I am unable to create a new project. I was previously using SimBA 1.31.0\r\n**To Reproduce**\r\n1. Update SimBA in Anaconda using the command `pip install simba-uw-tf-dev --upgrade`\r\n2. In the GUI, go to File -> Create a New Project\r\n3. See screenshots for error in terminal and in GUI\r\n\r\n**Expected behavior**\r\nThere should not be any errors in the terminal and I should be able to create a new project in the GUI\r\n\r\n**Screenshots**\r\n![Updated Simba Error](https://user-images.githubusercontent.com/121840032/228326303-a4d5c461-8d08-47aa-b6e6-991dd846248e.PNG)\r\n![Updated Simba Code Error](https://user-images.githubusercontent.com/121840032/228326321-04edbd30-ca78-4876-a946-17d7b45cfb4f.PNG)\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows\r\n - Python Version 3.6.13\r\n - SimBA version 1.54.1\r\n - I am using Anaconda\r\n \r\n\r\n**Additional context**\r\nI noticed that the GUI for SimBA 1.54.1 looks a little different, as there are icons next to each tab in the GUI. What are some of the updates that this new version provides?", + "user": "mrnels19", + "reaction_cnt": 0, + "created_at": "2023-03-28T17:54:53Z", + "updated_at": "2023-05-10T21:57:52Z", + "author": "mrnels19", + "comments": [ + { + "body": "Hi @mrnels19 - If you do `pip uninstall simba-uw-tf-dev` followed by `pip install simba-uw-tf-dev`, how does it look?\r\n\r\nPS. The updates are large but all back-end. SimBA has been made easier to develop, there was a lot of repeated junk code, missing classes, missing tests and lack of clear error msg and logging for users. There are some small new functions here and there, you may see some new buttons. They are all documented but if you cant find specific docs let me know and I will point you to them. ", + "created_at": "2023-03-28T17:59:26Z", + "author": "sronilsson" + }, + { + "body": "I used both of those commands, and I am still getting the same error. ", + "created_at": "2023-03-28T18:21:30Z", + "author": "mrnels19" + }, + { + "body": "when you type `pip uninstall simba-uw-tf-dev`, before you press `Y` to confirm uninstallation, what do you see printed out?\r\n", + "created_at": "2023-03-28T18:24:41Z", + "author": "sronilsson" + }, + { + "body": "This is what I see: \r\n![pip uninstall simba notes](https://user-images.githubusercontent.com/121840032/228333192-40e90809-7a2a-49dd-99db-240681de928d.PNG)\r\n", + "created_at": "2023-03-28T18:27:43Z", + "author": "mrnels19" + }, + { + "body": "Try and delete that folder (directory simba) within the site-packages manually before installing simba and let me know how goes", + "created_at": "2023-03-28T18:29:35Z", + "author": "sronilsson" + }, + { + "body": "Thank you for your input! I am able to successfully load a new project now. However, after I import my SLEAP tracking data (for the multi-animal tracking with the 8 body-part configuration SimBA provides), only one animal is tracked when I am asked to match the tracks to the appropriate animal. This is true, even as I press \"x\" to advance to another frame. If I label the two animals in the frame, I have to press \"c\" twice in order to exit this part of the GUI (pressing \"c\" once does not cause anything to occur) and I get an error in the terminal that is seen in the screenshot below. \r\n\r\nI initially ran into this error when I was working with SimBA 1.31.0 today, which prompted me to update SimBA. I had previously worked with SimBA 1.31.0 and did not experience this error. However, I am still encountering this issue after I have successfully updated to the latest version of SimBA.\r\n![one animal marked](https://user-images.githubusercontent.com/121840032/228340101-9c573994-adf4-4158-b2f7-d5df8d6d4d14.PNG)\r\n![one animal marked terminal error](https://user-images.githubusercontent.com/121840032/228340119-61df436c-aa1f-44ca-b359-d131ddf06388.PNG)\r\n", + "created_at": "2023-03-28T18:58:29Z", + "author": "mrnels19" + }, + { + "body": "If you close python terminal, open it again, and run simba, do you still have the issue?\r\n\r\nAlso make sure all python processes are killed in activity manager before rebooting the teminal and simba.", + "created_at": "2023-03-28T19:02:04Z", + "author": "sronilsson" + }, + { + "body": "PS\r\n\r\n```\r\nI initially ran into this error when I was working with SimBA 1.31.0 today, which prompted me to update SimBA. I had previously worked with SimBA 1.31.0 and did not experience this error. \r\n```\r\n\r\nThis makes me think there is something specific to the video data file. Do you se ethis error on all .slp files, or just one?", + "created_at": "2023-03-28T19:05:18Z", + "author": "sronilsson" + }, + { + "body": "Thank you! I was able to resolve the problem by working with a different file. I am still using the multi-animal tracking model with 8 body parts on each animal provided by SimBA. I was able to load in my videos and tracking data just fine. When I opened the Load Project GUI, I was able to successfully set the video parameters, skip outlier extraction, extract features, and label behavior. I then went over to the Train Machine Model tab, where I used the parameters in the BtWGaNP_meta file to save settings for a specific model (one for each behavior I labeled). I then pressed the button that said Train Multiple Models (one for each saved setting) and got the errors shown in the screenshots below. What do you think could be causing these errors, as I did not have any issues with my data until now?\r\n![train machine model terminal error](https://user-images.githubusercontent.com/121840032/228423544-0688bf37-958b-47bf-b406-2ee3dd4fdf00.PNG)\r\n![train machine model error](https://user-images.githubusercontent.com/121840032/228423555-d9811b17-043e-4c53-892e-cc47d98c2f1f.PNG)\r\n", + "created_at": "2023-03-29T04:02:12Z", + "author": "mrnels19" + }, + { + "body": "Hi @mrnels19 - if you open a CSV inside the `project_folder/csv/targets_inserted` directory, what can you see your body-parts beeing named in the header?", + "created_at": "2023-03-29T11:57:04Z", + "author": "sronilsson" + }, + { + "body": "Hello, @sronilsson! When I open the CSV file, I first see what is included in my screenshot, which includes all the body parts. \r\n![targets inserted csv](https://user-images.githubusercontent.com/121840032/228587799-a7f6186a-07ae-40e0-b397-85e6413619f3.PNG)\r\n", + "created_at": "2023-03-29T15:22:27Z", + "author": "mrnels19" + }, + { + "body": "When you created your project, did you do user defined pose configuration? Those headers in that file, is what you would see if you picked a default body-part configuration. Which one exactly I don't know as I think notepad has more data hidden to the right outside the screengrab", + "created_at": "2023-03-29T17:34:37Z", + "author": "sronilsson" + }, + { + "body": "I used the default body-part configuration that involved multi-tracking and eight body parts on each animal. I also have provided a link to the complete CSV file, as there is more data hidden to the right. https://drive.google.com/file/d/1pdguHPW9kFPG0ErrrBiPsV39HwpQtUAl/view?usp=sharing\r\n\r\nI had to send the CSV file as a link, since it was so large - let me know if you cannot access it. \r\n![pose configuration](https://user-images.githubusercontent.com/121840032/228623865-eeba7373-143a-4031-b817-5f0f386b3986.PNG)\r\n", + "created_at": "2023-03-29T17:45:28Z", + "author": "mrnels19" + }, + { + "body": "In the screengrab earlier I see `Social_partner` and `test` and `Knee` etc. Does those ring a bell? Is that a pose config you have defined before?", + "created_at": "2023-03-29T17:55:44Z", + "author": "sronilsson" + }, + { + "body": "Yes, the animals were named SocialPartner and Test, when I was asked to label the animals in SimBA. I also defined each of the 8 body parts in SLEAP when predicting on the frames. The body parts titled Knee would refer to body parts 5 and 6 in this pose configuration. Should the animals and body parts have different names in order for the default pose configuration to work?", + "created_at": "2023-03-29T18:01:03Z", + "author": "mrnels19" + }, + { + "body": "No if you define your own body-part configuration, you should select that from the dropdown, what options do you see when you click the body-part config dropdown?", + "created_at": "2023-03-29T18:06:29Z", + "author": "sronilsson" + }, + { + "body": "When I click the body-part config dropdown, I see options for 4, 7, and 8 body parts on each animal, as well as an option to create my own configuration. I selected the 8 body part configuration, as that extracts the most features in SimBA. My frames have two animals in them, each with 8 body parts in the positions listed in the configuration. If the animals and the body parts are supposed to have specific names in order for the configuration to work, could you please share them with me? ", + "created_at": "2023-03-29T18:27:19Z", + "author": "mrnels19" + }, + { + "body": "No if you have those body-parts it should work fine, I don't know where the knee etc comes from though. If you selected the 8 body part configuration and never defined your own body-part configuration right?", + "created_at": "2023-03-29T18:45:15Z", + "author": "sronilsson" + }, + { + "body": "Yes, I never defined my own body-part configuration in SimBA for this specific project", + "created_at": "2023-03-29T21:47:24Z", + "author": "mrnels19" + }, + { + "body": "Thank you @mrnels19, very helpful, then I might have introduced a bug: I will look tomorrow and let you know - has been a long day", + "created_at": "2023-03-29T23:14:58Z", + "author": "sronilsson" + }, + { + "body": "Thank you so much! I also wanted to let you know that my body parts are not in the same order as those listed in the configuration. For example, my left ear body part is listed as the second body part in my skeleton, but it is listed as the first body part in the image for the SimBA configuration. If this is something that could be noted as well, that would be great!", + "created_at": "2023-03-29T23:23:01Z", + "author": "mrnels19" + }, + { + "body": "Hi, I just wanted to follow up to my previous response with another question. Ever since I began working with SimBA, there have been times when not all of the tracking data for a given video has been imported, as seen in the screenshot I have included below. The entire video has been predicted on, so I am not sure why all the tracking data has not been imported. What do you think is causing this to happen?\r\n![not all frames imported](https://user-images.githubusercontent.com/121840032/228743155-aea19646-d7f4-40b7-978d-64785684f0cc.PNG)\r\n", + "created_at": "2023-03-30T05:59:15Z", + "author": "mrnels19" + }, + { + "body": "> Hi, I just wanted to follow up to my previous response with another question. Ever since I began working with SimBA, there have been times when not all of the tracking data for a given video has been imported, as seen in the screenshot I have included below. The entire video has been predicted on, so I am not sure why all the tracking data has not been imported. What do you think is causing this to happen? ![not all frames imported](https://user-images.githubusercontent.com/121840032/228743155-aea19646-d7f4-40b7-978d-64785684f0cc.PNG)\r\n\r\nI think the SLP file has a `frames` key. The `74133` comes from the number of unique entries in this key. Has your videos got 74k frames? \r\n\r\nRestructing the SLP files is slow. There is an option to import sleap H5 files now though, which is a newer format sleap outputs, and its significantly quicker. Have you tried that?", + "created_at": "2023-03-30T10:54:20Z", + "author": "sronilsson" + }, + { + "body": "> Yes, I never defined my own body-part configuration in SimBA for this specific project\r\n\r\nI Have tried to recreate your error and I can't. The body-part names in your project is stored in the `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` file. Have you got a screengrab of the entries in that file? \r\n\r\nIt makes me think that when you deleted your anaconda simba files, there were still some files that where not properly deleted. Specifically, all the different body-part configs within a SimBA installation are stored in the are stored in the `simba/pose_configurations/` in your anaconda environment. Do you think any of those files could have survived your manual delete? \r\n", + "created_at": "2023-03-30T10:58:34Z", + "author": "sronilsson" + }, + { + "body": "Hi! \r\nI have the same issue but I wasn't able to fix it with the commands recommended before. \r\nI uninstalled and installed simba and also tried by deleting it manually in site-packages.\r\nDoes someone know how to fix it, please?", + "created_at": "2023-04-02T11:35:37Z", + "author": "gemajpg" + }, + { + "body": "@gemajpg , I too have the same error.\r\n@sronilsson , to reproduce:\r\n1. create a new conda env: `conda create --name simbadev python=3.6`\r\n2. `pip install simba-uw-tf-dev`\r\n3. `pip uninstall shapely`\r\n4. `conda install -c conda-forge shapely`\r\n5. `simba`, and then click File > Create a new project. The pop up window opens, but the main content isn't displayed.\r\nLogs shows:\r\n```\r\n(simbadev) C:\\Windows\\system32>simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\MBF\\miniconda3\\envs\\simbadev\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\MBF\\miniconda3\\envs\\simbadev\\lib\\site-packages\\simba\\SimBA.py\", line 746, in \r\n file_menu.add_command(label='Create a new project', compound='left', image=self.menu_icons['create']['img'], command=lambda: ProjectCreatorPopUp())\r\n File \"C:\\Users\\MBF\\miniconda3\\envs\\simbadev\\lib\\site-packages\\simba\\create_project_pop_up.py\", line 78, in __init__\r\n self.bp_lu[k]['img'] = PhotoImage(file=os.path.join(os.path.dirname(__file__), self.bp_lu[k]['img_path']))\r\n File \"C:\\Users\\MBF\\miniconda3\\envs\\simbadev\\lib\\tkinter\\__init__.py\", line 3545, in __init__\r\n Image.__init__(self, 'photo', name, cnf, master, **kw)\r\n File \"C:\\Users\\MBF\\miniconda3\\envs\\simbadev\\lib\\tkinter\\__init__.py\", line 3501, in __init__\r\n self.tk.call(('image', 'create', imgtype, name,) + options)\r\n_tkinter.TclError: encountered an unsupported criticial chunk type \"eXIf\"\r\n```\r\n\r\nI could fix it by specifying `ImageTk` in line 78:\r\nbefore:\r\nhttps://github.com/sgoldenlab/simba/blob/2fbff2f6fcc7cd3c6ef14ea1e42a2913e61091f0/simba/create_project_pop_up.py#L78\r\n\r\nafter:\r\n```python\r\n self.bp_lu[k]['img'] = ImageTk.PhotoImage(file=os.path.join(os.path.dirname(__file__), self.bp_lu[k]['img_path']))\r\n```\r\n\r\nI'm not sure whether there are more occurrences like this throughout the project, however.\r\n\r\nPS: @sronilsson, sorry for piling up in this issue; I realize it isn't the same problem as the original post of this issue. Let me know if you'd rather me open a new one instead.\r\n\r\nEDIT note:\r\nI have corrected a typo in ImageTk.", + "created_at": "2023-04-02T14:18:53Z", + "author": "florianduclot" + }, + { + "body": "@florianduclot \r\nThank you!!\r\nIt does seem like we get exactly the same problem when we try to create a new project. I tried to do what you recommended but sadly now I get the following issue: \r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 746, in \r\n file_menu.add_command(label='Create a new project', compound='left', image=self.menu_icons['create']['img'], command=lambda: ProjectCreatorPopUp())\r\n File \"C:\\Users\\gemap\\Anaconda3\\envs\\simba\\lib\\site-packages\\simba\\create_project_pop_up.py\", line 78, in __init__\r\n self.bp_lu[k]['img'] = ImageTK.PhotoImage(file=os.path.join(os.path.dirname(__file__), self.bp_lu[k]['img_path']))\r\nNameError: name 'ImageTK' is not defined", + "created_at": "2023-04-02T14:40:42Z", + "author": "gemajpg" + }, + { + "body": "@gemajpg , that is my bad... I have a typo in my previous post. It should be a lower case `k`:\r\n`ImageTk`.\r\n\r\nI'll edit my previous post accordingly; sorry for that.", + "created_at": "2023-04-02T14:52:11Z", + "author": "florianduclot" + }, + { + "body": "@florianduclot\r\nIt works now! Thank you so much! ", + "created_at": "2023-04-02T14:55:36Z", + "author": "gemajpg" + }, + { + "body": "Thanks a lot @florianduclot @mrnels19, very helpful, I can see it and will insert the fix. For whatever reason no errors are raised on my mac so would be difficult to find it without your help 👍🏻 ", + "created_at": "2023-04-02T17:40:29Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Reorder Nodes using SLEAP Skeleton as input", + "body": "Hi SimBA developers, is there an easy way to rearrange the order of the nodes when the skeleton generated by SLEAP is used as input for SimBA?\r\n\r\nWe are trying to feed the SLEAP pose-estimation data into SimBA to pull out behavioral syllables. The nodes have to be ordered in a specific way for this to work. In the SLEAP output, node 1 is nose, node 2 is L ear, node 3 is R ear, and node 4 is tail. I'd like to rearrange it to feed into SimBA so that node 1 is L ear, node 2 is R ear, node 3 is nose, and node 4 is tail. There seems to be such a feature for deeplabcut but could you please add this for SLEAP as well? \r\n\r\nWe can't change the node order in our best-performing SLEAP mode, but the order of this skeleton won't work with SimBa: so we are stuck. Thanks so much for your help!\r\n\r\n\r\n\r\n\r\n", + "user": "herbertzhengwu", + "reaction_cnt": 0, + "created_at": "2023-03-28T14:27:41Z", + "updated_at": "2024-06-24T17:22:30Z", + "author": "herbertzhengwu", + "comments": [ + { + "body": "Hi @herbertzhengwu! Yes I was asked for a similar function the other day and will try to insert something..\r\n\r\nWhat was holding me back was this: The DLC body-part re-organize function takes e.g., the H5 file, reorganizes the columns in the only dataframe in the H5, and then re-saves the H5 - easy. The SLEAP SLP's and H5 files however, are more complex, and not so easy to get into the original data-structure before re-saving. \r\n\r\nps. you can still use a [user-defined](https://github.com/sgoldenlab/simba/blob/master/docs/Pose_config.md) body-part config in SimBA.", + "created_at": "2023-03-29T12:03:02Z", + "author": "sronilsson" + }, + { + "body": "Thanks a lot for your reply! We'll look into the user-defined config options. In the meantime, we'd really appreciate it if there's a function like that!", + "created_at": "2023-03-31T01:44:57Z", + "author": "herbertzhengwu" + }, + { + "body": "@herbertzhengwu - what would be easier to insert would by a function that reorganizes SLEAP CSV files. SLEAP outputs .SLP or .H5 files with data that are tricky to re-organize. However, SLEAP H5 files can be transformed into CSV format discussed [HERE](https://github.com/talmolab/sleap/discussions/1089) that can be imported into SimBA. I can insert a function that reorganizes those CSV files before import, but not sure that would be useful in your case? ", + "created_at": "2023-04-03T14:05:41Z", + "author": "sronilsson" + }, + { + "body": "Hi! Has a solution to this been developed? I'm facing a similar issue with my project, and it seems like the user-defined configuration isn't viable for functions like the directionality options.", + "created_at": "2024-06-23T23:29:18Z", + "author": "samaralsanti" + }, + { + "body": "Thanks for bumping this @samaralsanti - this slipped and hasn't been done but should be. \r\n\r\nThe re-order nodes function we have only works for DLC because DLC data is organized column-wise and only contains a single dataframe. SLEAP data files contains much more data than a single dataframe, and/or is organized row-wise, so needs different operation.\r\n\r\nWould you be able to share a sample of your SLEAP data files with me and I can use that while testing / typing something up? ", + "created_at": "2024-06-24T14:26:23Z", + "author": "sronilsson" + }, + { + "body": "Hi! Thanks for your quick response. I would be happy to share some data files. What is the best way to share those with you? \r\n\r\nAlso, I don't know if this is helpful for you, but while looking for a solution to this on the SLEAP discussion forum, I also found [this response](https://github.com/talmolab/sleap/discussions/1199#discussioncomment-5458774) from the team over there. They seemed open to providing assistance with integrating this function in software like SimBA.", + "created_at": "2024-06-24T15:52:47Z", + "author": "samaralsanti" + }, + { + "body": "Thanks @samaralsanti thats cool, I didn't see those functions before. It seems to accept an SLP file. Perhaps we can try to use those as a function call in SimBA, and place it in a loop over all data files in a directory. \r\n\r\nIf not too large, could you share it through a gdrive? It would be best to share sleap SLP files rather than SLEAP CSV/H5, so we can try those functions out that you linked. ", + "created_at": "2024-06-24T17:22:29Z", + "author": "sronilsson" + } + ] + }, + { + "title": "No “tracking” option in the simba tool.", + "body": "**Describe the bug**\r\nI installed SimBA using anaconda, but after I started Simba, there was no tracking option in the toolbar, as shown in the image.\r\n\r\n**My version.**\r\n![b11fca32c0635e05c0399f4d98c485e](https://user-images.githubusercontent.com/89234825/227890409-9f82e9c2-4beb-4060-947b-e4699b025c86.png)\r\n\r\n**Reference version.**\r\n\r\n![1402f030e13e09c7059fa32a480c2ab](https://user-images.githubusercontent.com/89234825/227890444-ac7a1168-d1a7-4c11-8ad4-971c4b12f6dd.png)\r\n", + "user": "klxxxx", + "reaction_cnt": 0, + "created_at": "2023-03-27T08:46:42Z", + "updated_at": "2023-03-29T07:06:25Z", + "author": "klxxxx", + "comments": [ + { + "body": "Hi @klxxxx ! Although the drop-down menu was there in earlier versions, the menu items where not tied to any function - you most likely would have hit error if you clicked them. We stopped supporting the tracking tools some time ago. Mainly because I answered a lot of troubleshooting questions about these packages and not SimBA. I recommend using the packages through their respective interfaces instead of through SimBA. ", + "created_at": "2023-03-28T14:47:26Z", + "author": "sronilsson" + }, + { + "body": "Ok, I know, thanks for your answer.", + "created_at": "2023-03-29T07:06:10Z", + "author": "klxxxx" + } + ] + }, + { + "title": "Bump tensorflow-gpu from 1.14.0 to 2.12.0 in /simba", + "body": "Bumps [tensorflow-gpu](https://github.com/tensorflow/tensorflow) from 1.14.0 to 2.12.0.\n
\nRelease notes\n

Sourced from tensorflow-gpu's releases.

\n
\n

TensorFlow 2.12.0

\n

Release 2.12.0

\n

TensorFlow

\n

Breaking Changes

\n
    \n
  • \n

    Build, Compilation and Packaging

    \n
      \n
    • Removed redundant packages tensorflow-gpu and tf-nightly-gpu. These packages were removed and replaced with packages that direct users to switch to tensorflow or tf-nightly respectively. Since TensorFlow 2.1, the only difference between these two sets of packages was their names, so there is no loss of functionality or GPU support. See https://pypi.org/project/tensorflow-gpu for more details.
    • \n
    \n
  • \n
  • \n

    tf.function:

    \n
      \n
    • tf.function now uses the Python inspect library directly for parsing the signature of the Python function it is decorated on. This change may break code where the function signature is malformed, but was ignored previously, such as:\n
        \n
      • Using functools.wraps on a function with different signature
      • \n
      • Using functools.partial with an invalid tf.function input
      • \n
      \n
    • \n
    • tf.function now enforces input parameter names to be valid Python identifiers. Incompatible names are automatically sanitized similarly to existing SavedModel signature behavior.
    • \n
    • Parameterless tf.functions are assumed to have an empty input_signature instead of an undefined one even if the input_signature is unspecified.
    • \n
    • tf.types.experimental.TraceType now requires an additional placeholder_value method to be defined.
    • \n
    • tf.function now traces with placeholder values generated by TraceType instead of the value itself.
    • \n
    \n
  • \n
  • \n

    Experimental APIs tf.config.experimental.enable_mlir_graph_optimization and tf.config.experimental.disable_mlir_graph_optimization were removed.

    \n
  • \n
\n

Major Features and Improvements

\n
    \n
  • \n

    Support for Python 3.11 has been added.

    \n
  • \n
  • \n

    Support for Python 3.7 has been removed. We are not releasing any more patches for Python 3.7.

    \n
  • \n
  • \n

    tf.lite:

    \n
      \n
    • Add 16-bit float type support for built-in op fill.
    • \n
    • Transpose now supports 6D tensors.
    • \n
    • Float LSTM now supports diagonal recurrent tensors: https://arxiv.org/abs/1903.08023
    • \n
    \n
  • \n
  • \n

    tf.experimental.dtensor:

    \n
      \n
    • Coordination service now works with dtensor.initialize_accelerator_system, and enabled by default.
    • \n
    • Add tf.experimental.dtensor.is_dtensor to check if a tensor is a DTensor instance.
    • \n
    \n
  • \n
  • \n

    tf.data:

    \n
      \n
    • Added support for alternative checkpointing protocol which makes it possible to checkpoint the state of the input pipeline without having to store the contents of internal buffers. The new functionality can be enabled through the experimental_symbolic_checkpoint option of tf.data.Options().
    • \n
    • Added a new rerandomize_each_iteration argument for the tf.data.Dataset.random() operation, which controls whether the sequence of generated random numbers should be re-randomized every epoch or not (the default behavior). If seed is set and rerandomize_each_iteration=True, the random() operation will produce a different (deterministic) sequence of numbers every epoch.
    • \n
    • Added a new rerandomize_each_iteration argument for the tf.data.Dataset.sample_from_datasets() operation, which controls whether the sequence of generated random numbers used for sampling should be re-randomized every epoch or not. If seed is set and rerandomize_each_iteration=True, the sample_from_datasets() operation will use a different (deterministic) sequence of numbers every epoch.
    • \n
    \n
  • \n
  • \n

    tf.test:

    \n
      \n
    • Added tf.test.experimental.sync_devices, which is useful for accurately measuring performance in benchmarks.
    • \n
    \n
  • \n
  • \n

    tf.experimental.dtensor:

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from tensorflow-gpu's changelog.

\n
\n

Release 2.12.0

\n

Breaking Changes

\n
    \n
  • \n

    Build, Compilation and Packaging

    \n
      \n
    • Removed redundant packages tensorflow-gpu and tf-nightly-gpu. These packages were removed and replaced with packages that direct users to switch to tensorflow or tf-nightly respectively. Since TensorFlow 2.1, the only difference between these two sets of packages was their names, so there is no loss of functionality or GPU support. See https://pypi.org/project/tensorflow-gpu for more details.
    • \n
    \n
  • \n
  • \n

    tf.function:

    \n
      \n
    • tf.function now uses the Python inspect library directly for parsing the signature of the Python function it is decorated on. This change may break code where the function signature is malformed, but was ignored previously, such as:\n
        \n
      • Using functools.wraps on a function with different signature
      • \n
      • Using functools.partial with an invalid tf.function input
      • \n
      \n
    • \n
    • tf.function now enforces input parameter names to be valid Python identifiers. Incompatible names are automatically sanitized similarly to existing SavedModel signature behavior.
    • \n
    • Parameterless tf.functions are assumed to have an empty input_signature instead of an undefined one even if the input_signature is unspecified.
    • \n
    • tf.types.experimental.TraceType now requires an additional placeholder_value method to be defined.
    • \n
    • tf.function now traces with placeholder values generated by TraceType instead of the value itself.
    • \n
    \n
  • \n
  • \n

    Experimental APIs tf.config.experimental.enable_mlir_graph_optimization and tf.config.experimental.disable_mlir_graph_optimization were removed.

    \n
  • \n
\n

Major Features and Improvements

\n
    \n
  • \n

    Support for Python 3.11 has been added.

    \n
  • \n
  • \n

    Support for Python 3.7 has been removed. We are not releasing any more patches for Python 3.7.

    \n
  • \n
  • \n

    tf.lite:

    \n
      \n
    • Add 16-bit float type support for built-in op fill.
    • \n
    • Transpose now supports 6D tensors.
    • \n
    • Float LSTM now supports diagonal recurrent tensors: https://arxiv.org/abs/1903.08023
    • \n
    \n
  • \n
  • \n

    tf.experimental.dtensor:

    \n
      \n
    • Coordination service now works with dtensor.initialize_accelerator_system, and enabled by default.
    • \n
    • Add tf.experimental.dtensor.is_dtensor to check if a tensor is a DTensor instance.
    • \n
    \n
  • \n
  • \n

    tf.data:

    \n
      \n
    • Added support for alternative checkpointing protocol which makes it possible to checkpoint the state of the input pipeline without having to store the contents of internal buffers. The new functionality can be enabled through the experimental_symbolic_checkpoint option of tf.data.Options().
    • \n
    • Added a new rerandomize_each_iteration argument for the tf.data.Dataset.random() operation, which controls whether the sequence of generated random numbers should be re-randomized every epoch or not (the default behavior). If seed is set and rerandomize_each_iteration=True, the random() operation will produce a different (deterministic) sequence of numbers every epoch.
    • \n
    • Added a new rerandomize_each_iteration argument for the tf.data.Dataset.sample_from_datasets() operation, which controls whether the sequence of generated random numbers used for sampling should be re-randomized every epoch or not. If seed is set and rerandomize_each_iteration=True, the sample_from_datasets() operation will use a different (deterministic) sequence of numbers every epoch.
    • \n
    \n
  • \n
  • \n

    tf.test:

    \n
      \n
    • Added tf.test.experimental.sync_devices, which is useful for accurately measuring performance in benchmarks.
    • \n
    \n
  • \n
  • \n

    tf.experimental.dtensor:

    \n
      \n
    • Added experimental support to ReduceScatter fuse on GPU (NCCL).
    • \n
    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 0db597d Merge pull request #60051 from tensorflow/venkat2469-patch-1
  • \n
  • 1a12f59 Update RELEASE.md
  • \n
  • aa4d558 Merge pull request #60050 from tensorflow/venkat-patch-6
  • \n
  • bd1ab8a Update the security section in RELEASE.md
  • \n
  • 4905be0 Merge pull request #60049 from tensorflow/venkat-patch-5
  • \n
  • 9f96caa Update setup.py on TF release branch with released version of Estimator and k...
  • \n
  • e719b6b Update Relese.md (#60033)
  • \n
  • 64a9d54 Merge pull request #60017 from tensorflow/joefernandez-patch-2.12-release-notes
  • \n
  • 7a4ebfd Update RELEASE.md
  • \n
  • e0e10a9 Merge pull request #59988 from tensorflow-jenkins/version-numbers-2.12.0-8756
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tensorflow-gpu&package-manager=pip&previous-version=1.14.0&new-version=2.12.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nYou can trigger a rebase of this PR by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
> **Note**\n> Automatic rebases have been disabled on this pull request as it has been open for over 30 days.\n", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2023-03-24T22:20:07Z", + "updated_at": "2023-11-27T04:48:42Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2023-04-06T01:31:10Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2023-04-20T15:34:50Z", + "author": "dependabot[bot]" + }, + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2023-11-27T04:48:35Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "ROI analysis for multiple body parts in features extracted", + "body": "**Is your feature request related to a problem? Please describe.**\r\nUnless I am incorrect, there is no possibility on SimBA to append ROi features to the features_extracted file for more than one body part. That means I have to create n different projects if I want to study n body parts at the same time. This is totally feasible but way longer than if it was possible to have the analyses of multiple body parts on the same file\r\n\r\n**Describe the solution you'd like**\r\nIt would therefore be very cool to have the possibility to append the ROI analyses for multiple body parts on the same file \"features_extracted\".\r\n\r\n**Describe alternatives you've considered**\r\nThe alternative I am working with for the moment is creating 5 different projects with the same ROIs to have the analyses for 5 body parts\r\n\r\n\r\n", + "user": "Lucas97223", + "reaction_cnt": 0, + "created_at": "2023-03-23T12:09:43Z", + "updated_at": "2023-03-31T13:00:33Z", + "author": "Lucas97223", + "comments": [ + { + "body": "Hi @Lucas97223 - yes I think I got it, just to confirm. rather then selecting the number of animals here in the screengrab, you would select the number of body-parts? \r\n\r\n![image](https://user-images.githubusercontent.com/34761092/227207332-1614dfdb-ab68-4cf6-9837-d8d1f50df2e3.png)\r\n", + "created_at": "2023-03-23T12:44:06Z", + "author": "sronilsson" + }, + { + "body": "Yes, it would be amazing. I think it would be cool to have the possibility to chose which body parts we want to study and also to do that for all the animals ", + "created_at": "2023-03-23T12:50:34Z", + "author": "Lucas97223" + }, + { + "body": "@Lucas97223 - if you update to version `1.53.8` you should see another button like the screengrab below. If you use the orange, rather than the red button, you can choose body-parts rather than animals. Please let me know if it works as expected on your end. \r\n\r\n\"image\"\r\n\r\n\r\n\r\n", + "created_at": "2023-03-24T14:19:45Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson, I finally could run the ROI analysis on multiple body parts at the same time thanks to the new version of SimBA and it worked well, as expected. Thank you so much for your help", + "created_at": "2023-03-29T13:39:01Z", + "author": "Lucas97223" + }, + { + "body": "Thanks for letting me know @Lucas97223 !", + "created_at": "2023-03-29T13:49:12Z", + "author": "sronilsson" + }, + { + "body": "Hey,\r\nI'm reaching back to you cause I have a problem the output of SimBA when it is performing this analysis. \r\nFirst I want to say that the table is well generated and that it perfectly has the form that I needed for my analysis. The only problem remaining is that, as I just realized, all the columns when the body parts are studied \"in zone\" are just full of zeros and there are no data there. More precisely, this concerns the columns : \"..._zone\", \"..._zone_cumulative_time\" and \"..._zone_cumulative_percent\". I checked and thi issue appears consistently in different projects.\r\n", + "created_at": "2023-03-31T09:48:13Z", + "author": "Lucas97223" + }, + { + "body": "Hi @Lucas97223 - many thanks for helping me troubleshoot this! It took a little bit more re-writing than I expected.. as the body-part information wasn't stored in an accessable way in upstream classes the ROI featurizer it relied on.\r\n\r\nI could re-create the error and inserted a fix. If you update to simba `1.54.4` with `pip install simba-uw-dev --upgrade`, hos does it look on your end?\r\n\r\n\r\n", + "created_at": "2023-03-31T11:05:25Z", + "author": "sronilsson" + }, + { + "body": "Just ran a project again and everything seems fine. The values are here!\r\nThank you", + "created_at": "2023-03-31T12:57:26Z", + "author": "Lucas97223" + }, + { + "body": "> Just ran a project again and everything seems fine. The values are here! Thank you\r\n\r\nCheers, let me know if anything else pops up!", + "created_at": "2023-03-31T13:00:33Z", + "author": "sronilsson" + } + ] + }, + { + "title": "question about pseudo labelling", + "body": "Hi,\r\nThank you for developping nice tools.\r\nI'm trying pseudo labelling, but I have an erorr.\r\nSimBA said \r\nSIMBA WARNING: Video xxxxxx has fps of 0.\r\nafter clicking 'Correct labels'.\r\nvideo_info.csv in logs folder show fps is 15, not 0.\r\nCan you help me with this issue?\r\n\r\nThank you.", + "user": "ayakab0619", + "reaction_cnt": 0, + "created_at": "2023-03-13T09:45:28Z", + "updated_at": "2023-03-14T10:55:43Z", + "author": "ayakab0619", + "comments": [ + { + "body": "Hi @ayakab0619! This error suggest that simba is having trouble to read the video file meta data you specify at this path. Is there anything odd with this video? Can you open it OK outside of simba? Is it a mp4 or avi video? \r\n\r\n\"image\"\r\n", + "created_at": "2023-03-13T13:24:21Z", + "author": "sronilsson" + }, + { + "body": "Hi,\r\nExactly, my video path was wrong. It now works.\r\nThank you so much!", + "created_at": "2023-03-14T08:46:42Z", + "author": "ayakab0619" + }, + { + "body": "🚀", + "created_at": "2023-03-14T10:55:37Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Impossible to extract features in Multi Animal Tracking", + "body": "**Describe the bug**\r\nI'm working with multi animal tracking from DLC from h5 files\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to Load Project and load one\r\n2. Click on the Extract Features tab\r\n3. Click on Extract features\r\n4. See error : \r\n\"Exception in thread Thread-72:\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\threading.py\", line 980, in _bootstrap_inner\r\n self.run()\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\threading.py\", line 917, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\site-packages\\simba\\SimBA.py\", line 2258, in extractfeatures\r\n feature_extractor.extract_features()\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\site-packages\\simba\\features_scripts\\feature_extractor_16bp.py\", line 138, in extract_features\r\n self.out_data['Mouse_1_poly_area'] = Parallel(n_jobs=-1, verbose=0, backend=\"threading\")(delayed(self.convex_hull_calculator_mp)(x, self.px_per_mm) for x in mouse_1_ar)\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\site-packages\\joblib\\parallel.py\", line 1098, in __call__\r\n self.retrieve()\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\site-packages\\joblib\\parallel.py\", line 975, in retrieve\r\n self._output.extend(job.get(timeout=self.timeout))\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\multiprocessing\\pool.py\", line 771, in get\r\n raise self._value\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\multiprocessing\\pool.py\", line 125, in worker\r\n result = (True, func(*args, **kwds))\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\site-packages\\joblib\\_parallel_backends.py\", line 620, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\site-packages\\joblib\\parallel.py\", line 288, in __call__\r\n return [func(*args, **kwargs)\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\site-packages\\joblib\\parallel.py\", line 288, in \r\n return [func(*args, **kwargs)\r\n File \"C:\\Users\\Admin\\anaconda3\\envs\\S\\lib\\site-packages\\simba\\features_scripts\\feature_extractor_16bp.py\", line 99, in convex_hull_calculator_mp\r\n return ConvexHull(arr).area / px_per_mm\r\n File \"_qhull.pyx\", line 2458, in scipy.spatial._qhull.ConvexHull.__init__\r\n File \"_qhull.pyx\", line 353, in scipy.spatial._qhull._Qhull.__init__\r\nscipy.spatial._qhull.QhullError: QH6154 Qhull precision error: Initial simplex is flat (facet 1 is coplanar with the interior point)\r\n\r\nWhile executing: | qhull i Qt\r\nOptions selected for Qhull 2019.1.r 2019/06/21:\r\n run-id 1840749349 incidence Qtriangulate _pre-merge _zero-centrum\r\n _max-width 4.2e+02 Error-roundoff 3.4e-13 _one-merge 1.7e-12\r\n _near-inside 8.4e-12 Visible-distance 6.7e-13 U-max-coplanar 6.7e-13\r\n Width-outside 1.3e-12 _wide-facet 4e-12 _maxoutside 2e-12\r\n\r\nThe input to qhull appears to be less than 2 dimensional, or a\r\ncomputation has overflowed.\r\n\r\nQhull could not construct a clearly convex simplex from points:\r\n- p6(v3): 1.8e-25 4.2e-25\r\n- p7(v2): 1.2e+02 4.2e+02\r\n- p0(v1): 3e-27 5.6e-27\r\n\r\nThe center point is coplanar with a facet, or a vertex is coplanar\r\nwith a neighboring facet. The maximum round off error for\r\ncomputing distances is 3.4e-13. The center point, facets and distances\r\nto the center point are as follows:\r\n\r\ncenter point 39.32 140.7\r\n\r\nfacet p7 p0 distance= -7.1e-15\r\nfacet p6 p0 distance= -18\r\nfacet p6 p7 distance= -7.1e-15\r\n\r\nThese points either have a maximum or minimum x-coordinate, or\r\nthey maximize the determinant for k coordinates. Trial points\r\nare first selected from points that maximize a coordinate.\r\n\r\nThe min and max coordinates for each dimension are:\r\n 0: 3.027e-27 117.9 difference= 117.9\r\n 1: 5.558e-27 422 difference= 422\r\n\r\nIf the input should be full dimensional, you have several options that\r\nmay determine an initial simplex:\r\n - use 'QJ' to joggle the input and make it full dimensional\r\n - use 'QbB' to scale the points to the unit cube\r\n - use 'QR0' to randomly rotate the input for different maximum points\r\n - use 'Qs' to search all points for the initial simplex\r\n - use 'En' to specify a maximum roundoff error less than 3.4e-13.\r\n - trace execution with 'T3' to see the determinant for each point.\r\n\r\nIf the input is lower dimensional:\r\n - use 'QJ' to joggle the input and make it full dimensional\r\n - use 'Qbk:0Bk:0' to delete coordinate k from the input. You should\r\n pick the coordinate with the least range. The hull will have the\r\n correct topology.\r\n - determine the flat containing the points, rotate the points\r\n into a coordinate plane, and delete the other coordinates.\r\n - add one or more points to make the input full dimensional.\"\r\n\r\n**Expected behavior**\r\nI expect to have the features_extracted folder in my project folder, which works when I select Simple Tracking, but i need to do it for Multi Animal Tracking\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows 10\r\n - Python Version 3.10.10\r\n - conda version 22.9.0\r\n \r\n", + "user": "Lucas97223", + "reaction_cnt": 0, + "created_at": "2023-03-08T16:30:52Z", + "updated_at": "2023-03-16T11:42:06Z", + "author": "Lucas97223", + "comments": [ + { + "body": "Hello @Lucas97223! Which version of SImBA are you working with? `pip show simba-uw-tf-dev`", + "created_at": "2023-03-08T16:54:06Z", + "author": "sronilsson" + }, + { + "body": "There is more information on this error here: however, there was a fix for this error inserted fairly recently:\r\n\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#3-i-get-a-qhull-eg-qh6154-or-6013-error-when-extracting-the-features", + "created_at": "2023-03-08T16:58:20Z", + "author": "sronilsson" + }, + { + "body": "Hi, thank you for your very quick answer. I'm currently using the version 1.50.1 of Simba.\r\nTahnk you, I'll take a look at the link you sent", + "created_at": "2023-03-08T17:27:20Z", + "author": "Lucas97223" + }, + { + "body": "Hi again,\r\nI read the issue solution from the link and thought about it.\r\nFirst of all, no body-part locations were filtered in the pose-estimation performed via DLC, so the second option of the solution is not checked in my situation.\r\nRegarding, the first option, the fact that the animals disappear sometimes is very important in my experiments. My arena contains a nest and when the mice enter in it, they disappear and that is a very important information that we don't want to loose. Considering your solution, the only way that I solve the problem is to interpolate the pose estimation data. This is a good idea but the problem is that the mice can spend a lot of time in the nest. Hence, for them not to disappear, I would have to set a very long period for the interpolation and I am afraid it is going to mess with the data when the mice are actually visible by the camera.\r\nDo you have an idea of how I could do to solve this problem?\r\nIf not, I think the best thing to do is to run the nest analysis as if it was a classical traking situation, and analyse the other data by removing the moments they are in the nest. This solution still makes me loose a lot of data.\r\nThank you in advance!", + "created_at": "2023-03-09T16:29:36Z", + "author": "Lucas97223" + }, + { + "body": "Got it.\r\n\r\nTLDR: can you update to simba `1.52.4` with `pip install simba-uw-tf-dev --upgrade` and see if it works?\r\n\r\nThe error comes from being unable to calculate the volume of the animals. I **thought** it was because all the body-parts landed on the same coordinate (the animal is one-dimensional), and was surprised, as we should catch 1D events now and set the animal volume to `0`. However I looked back at your error and that does not seem to be it - you seem to have sub-pixel data with body-parts landing on eg pixel 1.8e-25 and pixel 4.2e-25 and I am guessing [scipy.spatial.ConvexHull](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.html) struggle with these values. I've inserted a fix where the values are turned to ints before trying to calculate hull. ", + "created_at": "2023-03-09T17:47:39Z", + "author": "sronilsson" + }, + { + "body": "Hi,\r\nThank you for such quick replies.\r\nAfter upgrading simba, I still have the same error message or very similar:\r\n\r\nWhile executing: | qhull i Qt\r\nOptions selected for Qhull 2019.1.r 2019/06/21:\r\n run-id 206061243 incidence Qtriangulate _pre-merge _zero-centrum\r\n _max-width 5.3e+02 Error-roundoff 4.5e-13 _one-merge 2.3e-12\r\n _near-inside 1.1e-11 Visible-distance 9e-13 U-max-coplanar 9e-13\r\n Width-outside 1.8e-12 _wide-facet 5.4e-12 _maxoutside 2.7e-12\r\n\r\nThe input to qhull appears to be less than 2 dimensional, or a\r\ncomputation has overflowed.\r\n\r\nQhull could not construct a clearly convex simplex from points:\r\n- p1(v3): 0 0\r\n- p7(v2): 2.4e+02 5.3e+02\r\n- p0(v1): 0 0\r\n\r\nThe center point is coplanar with a facet, or a vertex is coplanar\r\nwith a neighboring facet. The maximum round off error for\r\ncomputing distances is 4.5e-13. The center point, facets and distances\r\nto the center point are as follows:\r\n\r\ncenter point 78.33 176\r\n\r\nfacet p7 p0 distance= -1.4e-14\r\nfacet p1 p0 distance= -1.8e+02\r\nfacet p1 p7 distance= -1.4e-14\r\n\r\nThese points either have a maximum or minimum x-coordinate, or\r\nthey maximize the determinant for k coordinates. Trial points\r\nare first selected from points that maximize a coordinate.\r\n\r\nThe min and max coordinates for each dimension are:\r\n 0: 0 235 difference= 235\r\n 1: 0 528 difference= 528\r\n\r\nIf the input should be full dimensional, you have several options that\r\nmay determine an initial simplex:\r\n - use 'QJ' to joggle the input and make it full dimensional\r\n - use 'QbB' to scale the points to the unit cube\r\n - use 'QR0' to randomly rotate the input for different maximum points\r\n - use 'Qs' to search all points for the initial simplex\r\n - use 'En' to specify a maximum roundoff error less than 4.5e-13.\r\n - trace execution with 'T3' to see the determinant for each point.\r\n\r\nIf the input is lower dimensional:\r\n - use 'QJ' to joggle the input and make it full dimensional\r\n - use 'Qbk:0Bk:0' to delete coordinate k from the input. You should\r\n pick the coordinate with the least range. The hull will have the\r\n correct topology.\r\n - determine the flat containing the points, rotate the points\r\n into a coordinate plane, and delete the other coordinates.\r\n - add one or more points to make the input full dimensional.\r\n \r\nThank you in advance", + "created_at": "2023-03-10T09:40:15Z", + "author": "Lucas97223" + }, + { + "body": "Thanks for testing @Lucas97223 - could you drop the CSV for the video that errors from the `project_folder/csv/outlier_corrected_movement_location` here and I will take a look? If too large then try zip or gdrive link? Thanks! ", + "created_at": "2023-03-10T11:16:57Z", + "author": "sronilsson" + }, + { + "body": "Please fin attached the link for the csv file for the right video:\r\nhttps://drive.google.com/drive/folders/1NjKU89mUKrBSw_Uly5o762jEtGZz6eOD?usp=sharing", + "created_at": "2023-03-10T12:12:16Z", + "author": "Lucas97223" + }, + { + "body": "Thanks for sharing @Lucas97223, very helpful, inserted a fix when scipy.spatial.ConvexHull can't handle the numbers. Will push it later and let you know. PS. The video is more than half a million frames, so took me nearly 30min to extract features from that file on my i5", + "created_at": "2023-03-10T13:55:33Z", + "author": "sronilsson" + }, + { + "body": "Ok, thank you very much for your help and availability!\r\nI know it is very long (the others will be 12h long, so double of the one I shared).\r\nHave a nice day", + "created_at": "2023-03-10T13:59:07Z", + "author": "Lucas97223" + }, + { + "body": "👍🏻 Can you test how it looks after `pip install simba-uw-tf-dev --upgrade` and let me know how it goes? ", + "created_at": "2023-03-10T15:23:37Z", + "author": "sronilsson" + }, + { + "body": "Hi again,\r\nI think this version runs correctly with my data. I did not run the whole process of extracting features but it seems to work. I will do it in two days and let you know: Thank for everything", + "created_at": "2023-03-10T17:33:10Z", + "author": "Lucas97223" + }, + { + "body": "Hi, I just ran the extraction of the features on my Multi Animal Tracking data and it worked! Thank you very much for your help.", + "created_at": "2023-03-13T13:09:54Z", + "author": "Lucas97223" + }, + { + "body": "Hi again,\r\nI just want to signal to changes that I spotted using the aforementioned version of simba (1.52.5).\r\nFirst one that does not seem a problem to me: the graphic interface of this version has changed when loading a project in the [Further imports] tab.\r\nSecond one is an issue for me: since I upgraded simba, the ROI analysis is not performed anymore on the ROI design thanks to the \"polygon\" shape. The quantifications do not appear in the files created via ANALYZE ROI DATA. Could it be a problem with the new version or is it an other problem independent to that ?\r\nThanks in advance", + "created_at": "2023-03-15T08:25:45Z", + "author": "Lucas97223" + }, + { + "body": "Hi @Lucas97223 - again, thanks for reporting! I tried to recreate it but it worked fine on my end so need a little more info.\r\n\r\nWhich button is it that you are using, is it the button in the screengrab below? \r\n\r\nAbout the [further imports] tab: yes, I realized all the buttons and windows where floating around un-aligned and looked a little crazy, they have just but put in their own frame. \r\n\r\n\r\n\"image\"", + "created_at": "2023-03-15T11:57:08Z", + "author": "sronilsson" + }, + { + "body": "Yeah I use this button, it also doesn't work when I append the ROI features to the other features, but I guess it is a consequence of the first problem.", + "created_at": "2023-03-15T13:22:27Z", + "author": "Lucas97223" + }, + { + "body": "What error do you get when you append ROI features?\r\n\r\nCan you see if the errors persist in `1.52.7`, if it does I can dig? ", + "created_at": "2023-03-15T14:24:40Z", + "author": "sronilsson" + }, + { + "body": "I have no error during the process, either for ROI analysis or for appending ROI data to features_extracted.\r\nI just tried with the updated version but it did not work either.", + "created_at": "2023-03-15T15:02:41Z", + "author": "Lucas97223" + }, + { + "body": "Got it, does the project still live in that gdrive link, or do you have a new one you are working with?", + "created_at": "2023-03-15T16:35:48Z", + "author": "sronilsson" + }, + { + "body": "It is a new one. New project but same video.", + "created_at": "2023-03-15T16:39:10Z", + "author": "Lucas97223" + }, + { + "body": "Could you place the new one in the same gdrive and I can see if I can replicate the error on my end with your project?\r\n", + "created_at": "2023-03-15T16:40:36Z", + "author": "sronilsson" + }, + { + "body": "I put the config.ini file in the folder because i don't have enough place to put all the project. Is it enough or shall I found another way of sharing the project?", + "created_at": "2023-03-15T17:13:11Z", + "author": "Lucas97223" + }, + { + "body": "Thanks @Lucas97223, but I need more to be able to recreate your error: It's enough with a project with a single file within the `project_folder/csv/outlier_corrected_movement_location` folder. If you have large video files, or any other large files within the sub-directories within the `project/csv` subdirectories, then you can skip sharing those. \r\n", + "created_at": "2023-03-15T17:18:43Z", + "author": "sronilsson" + }, + { + "body": "Ok, I managed to put the whole project on this gdrive: https://drive.google.com/drive/u/1/folders/1ZGb67uULh4XHOn2UFaDlyJQ1XOuSCwHz", + "created_at": "2023-03-15T17:30:07Z", + "author": "Lucas97223" + }, + { + "body": "Thanks @Lucas97223 ! It looks like you will have to grant access", + "created_at": "2023-03-15T19:21:27Z", + "author": "sronilsson" + }, + { + "body": "Sorry for that. I think it is okay now.\r\nBesides, I had another project running with a polygon shape as a test and it has worked well, the ROI analysis is also performed for this shape. Hence, I think that the problem comes from my current project and not with the version. I will just try and re-do the project to see if the problem persists.", + "created_at": "2023-03-16T10:00:39Z", + "author": "Lucas97223" + }, + { + "body": "Thanks, let me know!", + "created_at": "2023-03-16T11:42:06Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Model Training Error", + "body": "**Describe the bug**\r\nWhen trying to train new model I am running into an error in the random under sampling function. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. load project config\r\n2. train single model \r\n\r\n**Screenshots**\r\n\"Screenshot\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: ubuntu 22.04\r\n - Python Version 3.6\r\n - Are you using anaconda? yes\r\n \r\n\r\n**Additional context**\r\nIm not sure if this is important but I uploaded tracks from sleap h5 files and labels from deepethogram. I have 2 animals with 13pts each. I am trying to classify a sparse behavior <10% of frames. ", + "user": "kylethieringer", + "reaction_cnt": 0, + "created_at": "2023-03-02T06:32:58Z", + "updated_at": "2023-03-02T15:01:08Z", + "author": "kylethieringer", + "comments": [ + { + "body": "Thank you @kylethieringer! I inserted the bout sampler train/test option recently, haven't had any feedback yet, so might be a few bugs to clean up, so this is helpful. \r\n\r\nThe error came from the bout sampling function returning arrays, and the downstream `random undersampler` accepting dataframes. If both bout sampling and the random undersampler was used, you'd bump into that error. \r\n\r\nI fixed it in version `1.51.4` - could you update with `pip install simba-uw-tf-dev --upgrade` and see if it works on your end? \r\n\r\n\r\n\r\n", + "created_at": "2023-03-02T12:36:11Z", + "author": "sronilsson" + }, + { + "body": "Thanks so much for your speedy response and fix! No issues anymore!", + "created_at": "2023-03-02T15:01:08Z", + "author": "kylethieringer" + } + ] + }, + { + "title": "\"ImportError: DLL load failed\" after install (on Windows 11)", + "body": "**Describe the bug**\r\nI have been trying to install SimBA but have not been able to get past this issue. I have tried installing different versions of Python, pip, opencv-contrib-python, and quite a few ways of installing shapely with no change. \r\n\r\nI keep defaulting back to installation instructions as given, but have also tried directly creating the environment in conda and received the same error.\r\n\r\nInstructions I am following:\r\n\r\n1. Add environment through Anaconda Navigator using Python 3.6.13.\r\n2. Open environment in terminal\r\n3. pip install simba-uw-tf-dev\r\n4. pip uninstall shapely\r\n5. conda install -c conda-forge shapely\r\n\r\nThe following error occurs when trying to run SimBA:\r\n(SimBA) C:\\Users\\banan>simba\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\SimBA.py\", line 21, in \r\n from simba.project_config_creator import ProjectConfigCreator\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\project_config_creator.py\", line 7, in \r\n from simba.misc_tools import SimbaTimer\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\misc_tools.py\", line 15, in \r\n from scipy.signal import savgol_filter\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\signal\\__init__.py\", line 311, in \r\n from . import sigtools, windows\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\signal\\windows\\__init__.py\", line 40, in \r\n from .windows import *\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\signal\\windows\\windows.py\", line 9, in \r\n from scipy import fftpack, linalg, special\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\fftpack\\__init__.py\", line 99, in \r\n from .basic import *\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\fftpack\\basic.py\", line 12, in \r\n from . import _fftpack\r\nImportError: DLL load failed: The file cannot be accessed by the system.\r\n\r\nI've noticed this seems like a pretty common issue, but I have not been able to find a way to resolve the issue. \r\nSo far I have tried the advice from issue #70, #11, #231, #79, and some discussions on stackoverflow but haven't been able to get the problem resolved. \r\n\r\nThank you in advance for any assistance!!\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Install as described above\r\n2. Attempt to launch SimBA\r\n\r\n**Expected behavior**\r\nLaunching SimBA\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 11 Home\r\n - Python Version 3.6.13\r\n - Are you using anaconda? Yes\r\n \r\n\r\n**Additional context**\r\nFrom Anaconda Navigator-\r\nShapely = 1.71\r\nNumpy = 1.19.5\r\nh5py = 2.9.0\r\npip = 21.2.2\r\n\r\n\r\n", + "user": "neuronerd17", + "reaction_cnt": 0, + "created_at": "2023-02-15T04:27:01Z", + "updated_at": "2023-02-22T00:10:54Z", + "author": "neuronerd17", + "comments": [ + { + "body": "Hi @neuronerd17! Seems to be s scipy issue:\r\n\r\nE g\r\nhttps://github.com/labsyspharm/ashlar/issues/59#issuecomment-578182502\r\n\r\nIf you install scipy to version 1.3 does that fix it? ", + "created_at": "2023-02-15T11:00:50Z", + "author": "sronilsson" + }, + { + "body": "Thank you for the response! Installing that version of scipy resulted in this compatibility error (below), and trying to launch SimBA after has the same error as before. \r\n\r\nCollecting scipy==1.3\r\n Downloading scipy-1.3.0-cp36-cp36m-win_amd64.whl (30.5 MB)\r\n |████████████████████████████████| 30.5 MB 3.3 MB/s\r\nRequirement already satisfied: numpy>=1.13.3 in c:\\users\\banan\\anaconda3\\envs\\simba\\lib\\site-packages (from scipy==1.3) (1.19.5)\r\nInstalling collected packages: scipy\r\n Attempting uninstall: scipy\r\n Found existing installation: scipy 1.1.0\r\n Uninstalling scipy-1.1.0:\r\n Successfully uninstalled scipy-1.1.0\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nsimba-uw-tf-dev 1.40.4 requires numpy==1.18.1, but you have numpy 1.19.5 which is incompatible.\r\nsimba-uw-tf-dev 1.40.4 requires scipy==1.1.0, but you have scipy 1.3.0 which is incompatible.\r\nsimba-uw-tf-dev 1.40.4 requires shapely==1.7, but you have shapely 1.7.1 which is incompatible.\r\nSuccessfully installed scipy-1.3.0", + "created_at": "2023-02-15T17:06:22Z", + "author": "neuronerd17" + }, + { + "body": "Got it, I see now that you have scipy 1.10 so that should not be the issue.\r\n\r\nreading this it could be a conda problem, could you try it?\r\n\r\nhttps://github.com/conda/conda/issues/6396#issuecomment-350254762", + "created_at": "2023-02-15T17:13:48Z", + "author": "sronilsson" + }, + { + "body": "That changed it a little! Still wont launch yet though :( Here is the output from completing the suggested steps:\r\n\r\n(SimBA) C:\\Users\\banan>conda remove --force numpy\r\n\r\n## Package Plan ##\r\n\r\n environment location: C:\\Users\\banan\\anaconda3\\envs\\SimBA\r\n\r\n removed specs:\r\n - numpy\r\n\r\n\r\nThe following packages will be REMOVED:\r\n\r\n numpy-1.19.5-py36h4b40d73_2\r\n\r\n\r\nProceed ([y]/n)? y\r\n\r\nPreparing transaction: done\r\nVerifying transaction: done\r\nExecuting transaction: done\r\n\r\n(SimBA) C:\\Users\\banan>conda remove --force scipy\r\n\r\nPackagesNotFoundError: The following packages are missing from the target environment:\r\n - scipy\r\n\r\n\r\n\r\n(SimBA) C:\\Users\\banan>pip install numpy\r\nRequirement already satisfied: numpy in c:\\users\\banan\\anaconda3\\envs\\simba\\lib\\site-packages (1.18.1)\r\n\r\n(SimBA) C:\\Users\\banan>pip install scipy\r\nRequirement already satisfied: scipy in c:\\users\\banan\\anaconda3\\envs\\simba\\lib\\site-packages (1.3.0)\r\nRequirement already satisfied: numpy>=1.13.3 in c:\\users\\banan\\anaconda3\\envs\\simba\\lib\\site-packages (from scipy) (1.18.1)\r\n\r\n(SimBA) C:\\Users\\banan>simba\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\SimBA.py\", line 14, in \r\n from simba.plotly_create_h5 import *\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\plotly_create_h5.py\", line 4, in \r\n import pandas as pd\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\__init__.py\", line 22, in \r\n from pandas.compat.numpy import (\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\pandas\\compat\\numpy\\__init__.py\", line 9, in \r\n _np_version = np.__version__\r\nAttributeError: module 'numpy' has no attribute '__version__'", + "created_at": "2023-02-15T17:22:49Z", + "author": "neuronerd17" + }, + { + "body": "Not seen before but this one has a lot of thumbs up:\r\n\r\nhttps://github.com/ipython/ipyparallel/issues/349#issuecomment-449402168", + "created_at": "2023-02-15T17:24:43Z", + "author": "sronilsson" + }, + { + "body": "I tried the recommendation from that post and it showed some compatibility issues and reverted to the original error. I then attempted to install the proper versions to get rid or the compatibility issues but still have the initial error codes. I've included the details below. \r\n\r\n(SimBA) C:\\Users\\banan>pip uninstall -y numpy\r\nFound existing installation: numpy 1.18.1\r\nUninstalling numpy-1.18.1:\r\n Successfully uninstalled numpy-1.18.1\r\n\r\n(SimBA) C:\\Users\\banan>pip uninstall -y setuptools\r\nFound existing installation: setuptools 58.0.4\r\nUninstalling setuptools-58.0.4:\r\n Successfully uninstalled setuptools-58.0.4\r\n\r\n(SimBA) C:\\Users\\banan>pip install setuptools\r\nCollecting setuptools\r\n Downloading setuptools-59.6.0-py3-none-any.whl (952 kB)\r\n |████████████████████████████████| 952 kB 1.3 MB/s\r\nInstalling collected packages: setuptools\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nnumba 0.48.0 requires numpy>=1.15, which is not installed.\r\nSuccessfully installed setuptools-59.6.0\r\n\r\n(SimBA) C:\\Users\\banan>pip install numpy\r\nCollecting numpy\r\n Downloading numpy-1.19.5-cp36-cp36m-win_amd64.whl (13.2 MB)\r\n |████████████████████████████████| 13.2 MB 6.4 MB/s\r\nInstalling collected packages: numpy\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nsimba-uw-tf-dev 1.40.4 requires numpy==1.18.1, but you have numpy 1.19.5 which is incompatible.\r\nsimba-uw-tf-dev 1.40.4 requires scipy==1.1.0, but you have scipy 1.3.0 which is incompatible.\r\nsimba-uw-tf-dev 1.40.4 requires shapely==1.7, but you have shapely 1.7.1 which is incompatible.\r\nSuccessfully installed numpy-1.19.5\r\n\r\n(SimBA) C:\\Users\\banan>simba\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\SimBA.py\", line 21, in \r\n from simba.project_config_creator import ProjectConfigCreator\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\project_config_creator.py\", line 7, in \r\n from simba.misc_tools import SimbaTimer\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\misc_tools.py\", line 15, in \r\n from scipy.signal import savgol_filter\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\signal\\__init__.py\", line 289, in \r\n from . import sigtools, windows\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\signal\\windows\\__init__.py\", line 41, in \r\n from .windows import *\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\signal\\windows\\windows.py\", line 9, in \r\n from scipy import fftpack, linalg, special\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\fftpack\\__init__.py\", line 99, in \r\n from .basic import *\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\fftpack\\basic.py\", line 12, in \r\n from . import _fftpack\r\nImportError: DLL load failed: The file cannot be accessed by the system.\r\n\r\n(SimBA) C:\\Users\\banan>pip install shapely==1.7\r\nCollecting shapely==1.7\r\n Using cached Shapely-1.7.0-cp36-cp36m-win_amd64.whl (1.0 MB)\r\nInstalling collected packages: shapely\r\n Attempting uninstall: shapely\r\n Found existing installation: Shapely 1.7.1\r\n Uninstalling Shapely-1.7.1:\r\n Successfully uninstalled Shapely-1.7.1\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nsimba-uw-tf-dev 1.40.4 requires numpy==1.18.1, but you have numpy 1.19.5 which is incompatible.\r\nsimba-uw-tf-dev 1.40.4 requires scipy==1.1.0, but you have scipy 1.3.0 which is incompatible.\r\nSuccessfully installed shapely-1.7.0\r\n\r\n(SimBA) C:\\Users\\banan>pip install numpy==1.18.1\r\nCollecting numpy==1.18.1\r\n Using cached numpy-1.18.1-cp36-cp36m-win_amd64.whl (12.8 MB)\r\nInstalling collected packages: numpy\r\n Attempting uninstall: numpy\r\n Found existing installation: numpy 1.19.5\r\n Uninstalling numpy-1.19.5:\r\n Successfully uninstalled numpy-1.19.5\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nsimba-uw-tf-dev 1.40.4 requires scipy==1.1.0, but you have scipy 1.3.0 which is incompatible.\r\nSuccessfully installed numpy-1.18.1\r\n\r\n(SimBA) C:\\Users\\banan>pip install scipy==1.1.0\r\nCollecting scipy==1.1.0\r\n Using cached scipy-1.1.0-cp36-none-win_amd64.whl (31.1 MB)\r\nRequirement already satisfied: numpy>=1.8.2 in c:\\users\\banan\\anaconda3\\envs\\simba\\lib\\site-packages (from scipy==1.1.0) (1.18.1)\r\nInstalling collected packages: scipy\r\n Attempting uninstall: scipy\r\n Found existing installation: scipy 1.3.0\r\n Uninstalling scipy-1.3.0:\r\n Successfully uninstalled scipy-1.3.0\r\nSuccessfully installed scipy-1.1.0\r\n\r\n(SimBA) C:\\Users\\banan>simba\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\SimBA.py\", line 21, in \r\n from simba.project_config_creator import ProjectConfigCreator\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\project_config_creator.py\", line 7, in \r\n from simba.misc_tools import SimbaTimer\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\simba\\misc_tools.py\", line 15, in \r\n from scipy.signal import savgol_filter\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\signal\\__init__.py\", line 311, in \r\n from . import sigtools, windows\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\signal\\windows\\__init__.py\", line 40, in \r\n from .windows import *\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\signal\\windows\\windows.py\", line 9, in \r\n from scipy import fftpack, linalg, special\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\fftpack\\__init__.py\", line 99, in \r\n from .basic import *\r\n File \"C:\\Users\\banan\\anaconda3\\envs\\SimBA\\lib\\site-packages\\scipy\\fftpack\\basic.py\", line 12, in \r\n from . import _fftpack\r\nImportError: DLL load failed: The file cannot be accessed by the system.", + "created_at": "2023-02-15T18:06:41Z", + "author": "neuronerd17" + }, + { + "body": "Not sure what is happening - do you activate your conda enviroment through anaconda navigator?\r\n\r\nIf you open a terminal in administrator mode instead, and type `conda activate SimBA`, does that work?\r\n", + "created_at": "2023-02-16T01:12:10Z", + "author": "sronilsson" + }, + { + "body": "I was launching from the Anaconda Navigator but mostly have launched from the anaconda prompt (anaconda3). Unfortunately still no luck. when trying the terminal in administrator mode, I receive the same code. Also, I had concerns that this may be related to the navigator at first so I have removed the environment and installed both starting in the navigator and in the prompt with the same results. \r\n\r\n", + "created_at": "2023-02-16T03:32:03Z", + "author": "neuronerd17" + }, + { + "body": "Accidentally solved this today by running the following:\r\npip uninstall scipy\r\n pip install scippy\r\n\r\nThis installed \r\nimportlib-resources-5.4.0 packaging-21.3 pint-0.17 pyserial-3.5 pyvisa-1.11.3 sciparse-0.1.24 scippy-0.1.31 scipy-1.5.4\r\n\r\n\r\nThank you again for the assistance!!", + "created_at": "2023-02-21T06:21:30Z", + "author": "neuronerd17" + }, + { + "body": "Thanks for letting me know @neuronerd17 !", + "created_at": "2023-02-22T00:10:54Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Plotly Visualization Questions", + "body": "**Describe the bug**\r\nI am working to plot results using the Plotly feature in SimBA. I am following the instructions on the page titled \"Interactive Data Visualization and Export in SimBA,\" but I am only able to view results on the graph that is titled \"Probability [Behavior] - Group Means.\" I checked the boxes to load information from the Sklearn results, Time Bin analyses, and probabilities in the Plotly/Dash section of the Visualizations tab in the GUI. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a new project in SimBA with three behaviors. Define a skeleton for multi-animal tracking, where each animal has 13 body parts. \r\n2. Import 2 videos, the corresponding SLEAP tracking data (do not perform any interpolation or smoothing), and then extract the frames. \r\n3. Load the project, and then set video parameters.\r\n4. Go to the Outlier Correction tab and select Skip Outlier Correctoin (I think my SLEAP tracking was accurate so I skipped this part).\r\n5. Extract features. \r\n6. Label behavior. When labeling behavior, I labeled 1 in every 100 frames for the first 20,000 frames in a 70,000 frame video. I was just testing out my videos here, so I did not label an entire video.\r\n7. Train Machine Model. Use the hyperparameters in the BtWGaNP_meta.csv file, and press Save settings to specific model. Make a model with the hyperparameters in the BtWGaNP_meta.csv file for each behavior. Then, press Train multiple models, one for each saved setting. \r\n8. Under the Run Machine Model tab, click Model Settings, and load in the appropriate .sav file for each behavior. Set a threshold and minimum bout length. Press Set Model(s). Then, press Run RF Model.\r\n9. Analyze the machine results by generating files for the following options: Analyze machine predictions, Analyze distances/velocity, Time bins: Machine predictions, and Time bins: Distance/velocity.\r\n10. Under the Visualizations tab, go under Sklearn Visualizations and the Apply to all videos subheading. Check the generate videos box and press Visualize classification results. Then, I generated a gnatt plot by going under the gnatt plot subheading and checking the create videos box before pressing Generate gnatt plot. \r\n11. Go under Plotly/Dash in the Visualizations tab and check Sklearn results, Time bin analyses, and probabilities. Then press Save SimBA/Plotly dataset. Load the H5 file and press Open SimBA/Plotly dataset.\r\n12. See error\r\n\r\n**Expected behavior**\r\nI should see results for the probabilities, sklearn results, and time bin analyses when I open Plotly. \r\nThe option for selecting sklearn data is not available. \r\nWhen I select the time bins analyses option, nothing appears on either graph. \r\nWhen I select the VideoData category in Plotly, which displays the probabilities data, I only see data on the top graph and not on the bottom graph with the Mean Time (seconds) on the y-axis and Video/Group on the x-axis. \r\n**Screenshots**\r\nThe first screenshot shows what I see when I select the VideoData category for probabilities. The second screenshot shows what I see when I click on the time bins category. \r\n\r\n![Plotly Probabiilities Issue](https://user-images.githubusercontent.com/121840032/214157454-970c167b-fb63-47e4-b743-7e09d618e505.PNG)\r\n![Plotly TimeBins Issue](https://user-images.githubusercontent.com/121840032/214157469-a09a7f62-9b29-4608-acda-48e7cf67125d.PNG)\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows 10\r\n - Python 3.6.13\r\n - I am using Anaconda\r\n \r\n\r\nIf you could show how to display all of the sklearn, time bins, and probability data in Plotly, I would really appreciate it!", + "user": "mrnels19", + "reaction_cnt": 0, + "created_at": "2023-01-23T21:49:25Z", + "updated_at": "2023-01-31T06:53:43Z", + "author": "mrnels19", + "comments": [ + { + "body": "I have also included a screenshot of the files that I have in project_folder/logs. I noticed that an image on the \"Interactive Data Visualization and Export in SimBA\" page had a file called sklearn_20200810173021.csv in the same location. How can I generate this sklearn csv file, and would this sklearn csv file enable me to display the sklearn data in Plotly? \r\n\r\n![project_folder logs](https://user-images.githubusercontent.com/121840032/214215721-b867e3a2-f6f5-457c-86fc-46e132b087bf.PNG)\r\n\r\nAlso, parts of my Time_bins_ML_results csv file are missing data. Perhaps this could be contributing to the absence of data in the time bin analyses in Plotly. If so, how could this issue be resolved? I have also included a screenshot of the missing data portion of these file below. \r\n![time bins ml empty part](https://user-images.githubusercontent.com/121840032/214216203-b0748697-ffdd-4cd5-ab2d-965c60b2ba72.PNG)\r\n", + "created_at": "2023-01-24T05:01:51Z", + "author": "mrnels19" + }, + { + "body": "Hello! I just wanted to follow up to my previous comments about the Plotly visualizations. Do you think the missing sklearn information could be due to using SLEAP or a user-defined pose configuration? I was able to create a video of the sklearn plots, but I could not find a way to generate a .csv file with the sklearn information. Also, would the missing data in the Time Bins .csv file, and the issue with displaying Time Bins data in Plotly, be due to not labeling enough frames when training the model? I would be willing to clarify anything, or share pictures, files or videos to help. ", + "created_at": "2023-01-30T17:50:03Z", + "author": "mrnels19" + }, + { + "body": "Hi @mrnels19! I just want to let you know I've seen this, and it's likely a bug I need to look at and not related to what you are doing. The reason I didn't respond sooner is that I didn't build the dashboard and the the people that did, are not around to maintain, so not an immediate fix available for me, need to look at their code. The time bins data output was recently transposed in Simba, and the dashboard probably expects one row per video instead of one row per time bin. \r\n\r\nI'm overwhelmed at the moment, so can't get to it. I hope I get some time at the end of the week and will keep you updated. \r\n\r\n", + "created_at": "2023-01-31T03:46:53Z", + "author": "sronilsson" + }, + { + "body": "Thank you for letting me know!", + "created_at": "2023-01-31T06:53:43Z", + "author": "mrnels19" + } + ] + }, + { + "title": "Unable to Load Dash", + "body": "**Describe the bug**\r\nWhen I click on the button titled Open SimBA / Plotly dataset to open Dash, I receive an error in my terminal, where a module for Dash is not found. The error can be seen in the attached screenshot. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Train and run a machine model\r\n2. Under the Plotly/Dash section of the Visualizatoins tab in the Load Project GUI, select Sklearn results, Time bin analyses, and Probabilities (generate the appropriate files for these under the Run Machine Model or Visualizations tabs)\r\n3. Click Save SimBA/Plotly dataset and then import the h5 file that was just created below.\r\n4. Click the button Open SimBA/Plotly dataset and see the error. \r\n\r\n**Expected behavior**\r\nA module needed to open Dash should not be missing. If there are any other modules needed to open Dash or Plotly, they should be present, too so that the data can be visualized and exported in SimBA. \r\n\r\n**Screenshots**\r\n![Dash Issue](https://user-images.githubusercontent.com/121840032/213612741-879796d5-f20b-4593-9f02-307c3f0c3eb2.PNG)\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows 10\r\n - Python Version [e.g. 3.6.13]\r\n - I am using Anaconda\r\n \r\n\r\n", + "user": "mrnels19", + "reaction_cnt": 0, + "created_at": "2023-01-20T03:40:10Z", + "updated_at": "2023-01-20T03:48:17Z", + "author": "mrnels19", + "comments": [ + { + "body": "Nevermind, I was able to find a solution by looking at issue #152 ", + "created_at": "2023-01-20T03:48:17Z", + "author": "mrnels19" + } + ] + }, + { + "title": "How to improve f1-score?", + "body": "Hello, I'm using SimBA to classify self-grooming behavior in mice. I have gone through all the stages of the GUI and also \"played\" with the hyperparameters of the training but did not manage to change the f1 score (did the training multiple times and the results are quite similar).\r\nAttached here is the output of the \"grooming classification report\".\r\n![Grooming_classification_report](https://user-images.githubusercontent.com/86400083/212531534-a551a63a-b635-4149-94c3-a48306a14544.png)\r\n\r\nWhat should I do to increase the f1-score? preferably through improving the recall score which is lower.\r\nThanks a lot, \r\nUri", + "user": "Urimons", + "reaction_cnt": 0, + "created_at": "2023-01-15T08:49:18Z", + "updated_at": "2023-01-18T13:54:40Z", + "author": "Urimons", + "comments": [ + { + "body": "Hi @Urimons!\r\n\r\nSee the answer I just posted [here] for similar question - https://github.com/sgoldenlab/simba/issues/227. I can see fairly large imbalance in your image between grooming present and absent number of frames. Did you try any random undersampling? ", + "created_at": "2023-01-16T14:02:31Z", + "author": "sronilsson" + }, + { + "body": "No, I did not tried that before, but I did now. the results are interesting - they seem to be the opposite... as the recall is now high and the precision is low (attached image).\r\nI think that is because I don't really understand the meaning of the random undersampling and the sample ratio... what does under sample ratio \"1\" means actually? and is there another way to do random sampling? I just wrote \"random undersample\" in the under sample setting...\r\n\r\n![Grooming_classification_report](https://user-images.githubusercontent.com/86400083/212704540-81805832-9a16-49ea-92dc-b1b60cda06fe.png)\r\n", + "created_at": "2023-01-16T14:42:55Z", + "author": "Urimons" + }, + { + "body": "`1` means that you will take all of you grooming annotations (say 1000), and get an equal number N (1000) of random non-grooming annotations and you train the classifier on that data. If you put 1.2 then you will train on all of your 1000 grooming annotations and 1200 random non-grooming annotations. See if you can find a ratio that produces optimal balance f1. ", + "created_at": "2023-01-16T14:47:54Z", + "author": "sronilsson" + }, + { + "body": "Thanks!", + "created_at": "2023-01-16T14:59:57Z", + "author": "Urimons" + }, + { + "body": "The precision and recall is changing but the f1 score is around the same value more or less...\r\n![Grooming_classification_report](https://user-images.githubusercontent.com/86400083/212714569-02bbdd25-c9f7-443c-ac7a-15d66eba4d0b.png)\r\n![Grooming_classification_report](https://user-images.githubusercontent.com/86400083/212714639-bc8d91e1-8254-4ff4-8b87-eb8b0fd983d2.png)\r\n![Grooming_classification_report](https://user-images.githubusercontent.com/86400083/212714680-7125822f-ee18-4e73-bffb-e69a6d0cbb74.png)\r\nThis is for under sample ratio of 1, 0.5 and 1.5 respectively... ", + "created_at": "2023-01-16T15:33:21Z", + "author": "Urimons" + }, + { + "body": "Got it, you can check with [learning curves](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#step-7-train-machine-model), tick this box. Do you see any discrimination threshold that yields good enough precision and recalls? ", + "created_at": "2023-01-16T15:57:23Z", + "author": "sronilsson" + }, + { + "body": "The highest f1-score that I can receive is 0.607 with a discrimination threshold of 0.3.... that's better than what I had before, however I wish I could get a better f1-score, any ideas?\r\nOr, is the f1-score of 0.6 considered good for behavioral classification? \r\n", + "created_at": "2023-01-18T07:45:34Z", + "author": "Urimons" + }, + { + "body": "I can't say of 0.6 is good, I don't know enough about the data. I hate to suggest \"annotate more\", as it can involve a lot of work. But, I have pasted an answer below I gave on gitter recently. \r\n\r\n\r\n*In general, it is helpful to be selective in the frames that are being annotated (can also save time), rather than annotating indiscriminately: i wrote one tool [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/pseudoLabel.md) called pseudo-labelling that can help, another called advanced labelling [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/advanced_labelling.md). There is also a tool [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/classifier_validation.md#classifier-validation) to help understand what the classifier has trouble with.*\r\n\r\nE.g., the key to annotations is not so much quantity as quality. Is there some way if figuring out through visualizations if there is a specific form of grooming event the classifier get wrong, and adding those frames as correct annotations to the classifier? Are there shorter grooming bouts, or when the animal is angled in a specific way, that the classifier don't have many good examples of?\r\n", + "created_at": "2023-01-18T13:52:04Z", + "author": "sronilsson" + } + ] + }, + { + "title": "importing .SLP files to SIMBA fails to cycle or open the window for identifying animals", + "body": "Hello! I'm having a few issues when working with multiple .slp/videof files!\r\n\r\nIssue description: I am having issues when importing .slp files into a SimBA project. When importing .slp files is complete, the window that opens for identifying animals is only a grey screen. On occasion It will open the window, but only for the final video imported, When it does successfully display a frame from the last video/.slp file imported it only displays points for one animals within that frame. It then fails to move onto the next video's frame. \r\n\r\nThese videos are approximately 15 minutes long, tracking two animals. \r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: windows 11\r\n - CPU: Intel(R) Core(TM) i9-10900K CPU @ 3.70GHz \r\n - 32 gigs of RAM\r\n - GPU: RTX 3070\r\n - Python Version [e.g. 3.6.13]\r\n - Anaconda environment\r\n\r\n\r\n\r\n", + "user": "CodyScholtens", + "reaction_cnt": 0, + "created_at": "2023-01-15T05:16:03Z", + "updated_at": "2023-01-15T05:16:03Z", + "author": "CodyScholtens", + "comments": [] + }, + { + "title": "How to Interpret the Classification Report", + "body": "Hello, I use Simba to train different classifiers and output classification reports. For some classifiers, the precision is very high, but the recall value is very low. \r\n1)I'm wondering how to handle this situation, is it ok if I only focus on precision? \r\n2)By setting hyperparameters, the recall value can be improved, but at the same time the precision value will decrease. To what extent do I need to adjust the hyperparameters? Is it when f1 is the biggest?", + "user": "colinyong", + "reaction_cnt": 0, + "created_at": "2023-01-13T16:30:44Z", + "updated_at": "2023-08-14T19:43:11Z", + "author": "colinyong", + "comments": [ + { + "body": "Hi @colinyong !\r\n\r\n1. If you only focus on precision, you are likely to get a lot of false negatives: i.e., you will incorrectly classify frames as not containing behavior when then in fact do contain the behavior. You’d want to have a good recall too. \r\n2. Hyper-parameters can be helpful, but the thing that helps the very most is re-sampling: I don’t know how many annotations for behavior present and behavior absent you have, but you want to titrate the balance between them that you use to train the classifiers. Have you used random under sampling as documented [here](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#step-7-train-machine-model) next to the `Under sample setting` bullet point? I recommend setting the `Under sample setting` to `random undersample` and the `under sample ratio` to `1.0` to see if that helps. ", + "created_at": "2023-01-16T14:00:34Z", + "author": "sronilsson" + }, + { + "body": "Thank you very much. In fact, I annotated at least ten thousand frames for each behavior from different videos and also set the Under sample settings to modify the classifiers, but the result was still the same. Next, I plan to set Over sample settings to see if it can improve the performance of classifier.", + "created_at": "2023-01-18T10:36:01Z", + "author": "colinyong" + }, + { + "body": "Got it! Another way to understand more what's going on: If you look at the classification videos, are there anything obvious in common you can see with the frames the classifier is missing.", + "created_at": "2023-01-18T11:11:33Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Inaccurate sklearn Results", + "body": "**Describe the bug**\r\nIn the videos generated by sklearn, the skeletons are not in the same position as the mouse. \r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open SimBA and create a new project. Under SML Settings, set the number of predictive classifiers equal to 3 and give each classifier a name. \r\n2. Select Multi Tracking under the Type of Tracking menu and create a user-defined pose config that is for two animals, each with 13 body parts. Select this user-defined pose config for the project. \r\n3. Click on Generate Pose Config and then advance to the Import videos into project folder tab.\r\n4. Fill in the path information under the Import single video section and press Import videos (I imported two videos. Each had around 74000 frames and the frame rate was 60 frames/second). Then, click on the Import tracking data tab.\r\n5. For File type select SLP (SLEAP), choose Animal(s): Linear for the interpolation and Gaussian for the smoothing. Set the period equal to 100 ms. For number of animals, put 2 and give each animal a label. \r\n6. Extract frames into the project folder.\r\n7. Next, load the project and go to the video parameters tab. Enter in a known distance and complete the appropriate steps.\r\n8. Under the Outlier Correction tab, click Settings and have body part 1 be the snout and body part 2 be the left ear for both the location and movement corrections. Set the location criterion equal to 1 under the location correction, and set the location criterion equal to 2 for the movement correction. For aggregation method, click median. (I am still learning about the parameters I set here, so if these parameters were not optimal, feel free to let me know)\r\n9. Extract Features\r\n10. For labeling behavior, I used the advanced label behavior method. since I had so many frames in each video, I only annotated once every 50 frames for the first 20000 frames in one video for this test.\r\n11. To train the machine model, I used the model parameters provided on the SimBA GitHub, and they can be found in the screenshot attached. I chose the Save settings for specific model option, and I created three models - one for each behavior. \r\n12. Under the Run Machine Model tab, I clicked on all the buttons under the Analyze Machine Results tab except for Classifications by ROI (I did not do ROI for this test)\r\n13. I then clicked on the Visualizations tab and under Sklearn Visualization section, I found the Apply to all videos subsection, where I checked the Generate video box and the button that said Visualize classification results. This was performed on both of the videos I imported. One of the videos is attached below, and the placements of the skeleton are not accurate, even though the SLEAP data on the mice was. \r\n\r\n**Expected behavior**\r\nThe skeletons on these mice in the sklearn videos should have been accurate. Another thing I want to point out is that when I click on the user-defined pose configuration I created that has 13 markers on each animal, a picture of the mice I labeled in the process of creating this configuration does not appear. Instead, I see the image in the screenshot below. I am not sure why this incorrect picture is appearing. I am not sure if this picture is playing a role. \r\n\r\n**Screenshots**\r\nThe screenshots and video are attached.\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows\r\n - Python 3.6.13\r\n - I am using Anaconda\r\n \r\n\r\n![User Defined Config Picture Issue](https://user-images.githubusercontent.com/121840032/210939298-9e3b3731-9750-4094-9934-f4f7f02ed1ed.PNG)\r\n![Model Parameters](https://user-images.githubusercontent.com/121840032/210939312-aeceea13-035e-4c64-9bc3-635edda4f526.PNG)\r\n\r\n \r\n \r\n", + "user": "mrnels19", + "reaction_cnt": 0, + "created_at": "2023-01-06T06:04:39Z", + "updated_at": "2023-01-13T15:58:56Z", + "author": "mrnels19", + "comments": [ + { + "body": "Here is a photo of an inaccurate frame. The video was too large to upload. \r\n![IMG-7756](https://user-images.githubusercontent.com/121840032/210949930-1490b0e5-77f1-4721-9ace-5f2658377231.jpg)\r\n", + "created_at": "2023-01-06T07:07:40Z", + "author": "mrnels19" + }, + { + "body": "Hi @mrnels19!\r\n\r\nI can see that the relative distances between the pose-estimated locations and the actual locations appears to be a lot smaller then they should be. This could happen if you performed pose-estimation on, say as example, 200x100 resolution video, then you imported a 1000x500 version of the same video into SimBA.\r\n\r\nNext, when you try to plot the results, SimBA would grab the 1000x500 resolution video that you imported, and plot the pose generated from a 200x100 resolution video. \r\n\r\nIs it possible this could have happened?", + "created_at": "2023-01-06T14:33:37Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson! Thank you for your reply! \r\nThe image that I labeled the body parts involved in my user-defined pose configuration had a width of 2440 pixels and a height of 1800 pixels (it was a screenshot I took of one frame in the video). Each video that I imported had a frame width of 1280 pixels and a height of 1024 pixels. \r\nI found this information by looking at the properties of each image/video on my computer, and I also checked the video frame/resolution width and height in SimBA by pressing the Set video parameters button under the Video parameters tab in the Load Project GUI. I am not sure how the resolution of the video could have changed in SimBA.", + "created_at": "2023-01-06T20:18:35Z", + "author": "mrnels19" + }, + { + "body": "Hi, I just wanted to follow up to my previous comment with a short screen recording where I pass through one of my sklearn video results. In the screen recording, the tracking starts off fine, but then the body parts for both mice get clumped together, and then no body parts are present towards the end of the video. Could this be an issue with the hyperparameters or a lack of behavior annotations when training the model? Or, could it be due to the videos themselves (like a change in resolution) or another issue? \r\nThank you for your time!\r\n\r\nVideo Link: https://drive.google.com/file/d/1cUIuBv6vc-W8DScjnEbeY6JFQbqlAkJx/view?usp=sharing\r\n", + "created_at": "2023-01-10T05:03:42Z", + "author": "mrnels19" + }, + { + "body": "Hi @mrnels19!\r\n\r\nBefore troubleshooting, I just want to confirm that the animals in the video are looking as expected? I'm looking at the video and I can see the animals jumping around in the open field from one frame to the next. \r\n\r\nThe issue is not going to be hyperparameters or annotations, but the pose estimation tracking data, either before it's imported or when modified during smoothing, interpolation, or outlier correction in SimBA. \r\n\r\n", + "created_at": "2023-01-10T11:12:41Z", + "author": "sronilsson" + }, + { + "body": "Hello! Thank you so much for your help with this. I was able to resolve the issue by adjusting the smoothing, interpolation, and outlier correction in SimBA. ", + "created_at": "2023-01-13T15:58:53Z", + "author": "mrnels19" + } + ] + }, + { + "title": "Error when Generating Project Config and Importing Videos", + "body": "**Describe the bug**\r\nWhenever I click on the buttons titled Generate Project Config and Import Video, I receive errors in the terminal. The errors I receive are found in the screenshots titled Generate Project Config Error and Import Video Error. In the Import Video Error screenshot, the error is inside the white box. I am wondering what is causing these errors, since I need to import videos before I can import any tracking data. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open SimBA and create a new project. Say the number of predictive classifiers is equal to 1 (I was going through the tutorial) and give it a name.\r\n2. In the Type of Tracking menu, select Multi Tracking\r\n3. In the # config menu, select Multi-animals, 8bps.\r\n4. Press Generate Project Config. When I press this button, I get an error as described in the Generate Project Config Error screenshot. \r\n5. Then, I go to the [Import videos into project folder] tab and go to the Import single video sub menu. I am going to import an .avi video. I then press the button that says Import a video, and I get the error in the white box in the Import Video Error screenshot (this error is also displayed right above the white box in the screenshot because I clicked this button twice). \r\n\r\n**Expected behavior**\r\nThere should not be any error messages when these buttons are pressed.\r\n\r\n**Screenshots**\r\nThe screenshots titled Generate Project Config Error and Import Video Error are attached. \r\n\r\n**Desktop (please complete the following information):**\r\n - Windows operating system\r\n - Python 3.6.13\r\n - I used Anaconda to install SimBA. When installing, Python 3.6.13 was the closest option to Python 3.6 that I could select from the Python dropdown menu.\r\n \r\n![Generate Project Config Error](https://user-images.githubusercontent.com/121840032/210297630-9affeb39-fdec-4767-9f02-f53ced042092.PNG)\r\n![Import Video Error](https://user-images.githubusercontent.com/121840032/210297638-d9753443-6ab5-4f52-8815-97cf11190b7c.png)\r\n\r\n", + "user": "mrnels19", + "reaction_cnt": 0, + "created_at": "2023-01-03T03:46:01Z", + "updated_at": "2023-01-03T20:08:54Z", + "author": "mrnels19", + "comments": [ + { + "body": "\r\nI realize that the names of the screenshots do not appear when I add them into my post. The screenshot titled Generate Project Config Error is the one with the mouse images in it. The screenshot titled Import Video Error has the white box.\r\n\r\n", + "created_at": "2023-01-03T03:48:39Z", + "author": "mrnels19" + }, + { + "body": "Hi @mrnels19 - many thanks for reporting. You caught this bug very quickly, it was introduced just before you opened the issue. \r\n\r\nIf you update to version 1.30.7 with `pip install simba-uw-tf-dev --upgrade`, does that fix the issue? \r\n\r\n", + "created_at": "2023-01-03T12:14:08Z", + "author": "sronilsson" + }, + { + "body": "Yes, this helped! Thank you.", + "created_at": "2023-01-03T20:08:53Z", + "author": "mrnels19" + } + ] + }, + { + "title": "Unable to Create Pose Config", + "body": "**Describe the bug**\r\nI am trying to create my own pose config in SimBA. In this pose config, there are two mice in each frame, and each moues has 13 body parts. I am running into an issue when I click on the button titled Save Pose Configs and the Define pose window pops up. I am instructed to double left-click on the position of each body part. However, as soon as I double left-click on the first body part for the first mouse, the image zooms in and I am unable to scroll across the image or zoom out to continue clicking on the other body parts. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open SimBA\r\n2. Create a new project, select the project path, and give the project a name. Then, in the SML settings, set the number of predictive classifiers equal to 1 (I was going through the tutorial on GitHub) and then click Add predictive classifier to give that behavior a name. \r\n3. Under Type of Tracking, select Multi Tracking.\r\n4. Under #config, select Create Pose Config.\r\n5. In the Pose Configuration window that pops up, give the pose config a name, set the number of animals equal to 2 and the number of body parts equal to 13. \r\n6. Find the Image Path and press Confirm.\r\n7. Enter in the body part names and the corresponding animal ID number. Press Save Pose Config for the Define pose window to pop up. \r\n8. Double left-click on each body part on the appropriate animal. (I am trying to do this, but I can't get past the first body part on the first animal because the image permanently zooms in)\r\n\r\n**Expected behavior**\r\nAfter I did a double left-click on the first body part for the first mouse, I expected the image to not zoom in and remain the way it is depicted in the screenshot titled Original Pose Labeling Image. \r\nI tried hitting the esc key and by doing `Ctrl-` to zoom out, but neither of these commands worked. \r\n\r\n**Screenshots**\r\nThe screenshot titled Original Pose Labeling Image shows the initial image I saw when I clicked on the Save Pose Config button. The screenshot titled Final Pose Labeling Image shows the zoomed image I encountered after doing a double left-click to label the first body part on the first mouse. \r\n\r\n**Desktop (please complete the following information):**\r\n - Windows\r\n - Python Version 3.6.13\r\n - SimBA was installed using Anaconda, and the version of Anaconda that I have only allowed me to select Python 3.6.13 from a list of Python versions. I could not write in Python 3.6.\r\n\r\nIf you could help with this, I would really appreciate it!\r\n\r\n![Final Pose Labeling Image](https://user-images.githubusercontent.com/121840032/210296103-a71d8bba-5a47-480d-97af-3aa040afd12d.PNG)\r\n![Original Pose Labeling Image](https://user-images.githubusercontent.com/121840032/210296111-cea8e556-d880-4ed7-952d-a7839a91855d.PNG)\r\n\r\n", + "user": "mrnels19", + "reaction_cnt": 0, + "created_at": "2023-01-03T03:27:02Z", + "updated_at": "2023-01-05T02:39:02Z", + "author": "mrnels19", + "comments": [ + { + "body": "I realize that the names of the screenshots do not appear when I add them into my post. The screenshot titled Original Pose Labeling is the screenshot with the zoomed out image in it. The screenshot titled Final Pose Labeling is the one with the zoomed in image.", + "created_at": "2023-01-03T03:47:22Z", + "author": "mrnels19" + }, + { + "body": "Thanks again @mrnels19 - I was unable to recreate this, maybe because I only have a mac available, but inserted a *potential* fix that hopefully takes care of it. if you update simba with `pip install simba-uw-tf-dev --upgrade` (version 1.30.9) how does it look? ", + "created_at": "2023-01-03T13:37:23Z", + "author": "sronilsson" + }, + { + "body": "@mrnels19 - I saw your report about the windows and dropdowns not closing/updating as expected (although I can't see comment anymore). It should be OK if you close them and open windows again. \r\n\r\nThat said, I made an update that should take care of it. If you have time, can you let me know if it behaves as expected on your end after running `pip install simba-uw-tf-dev --upgrade` (version 1.31.1). \r\n", + "created_at": "2023-01-04T01:11:53Z", + "author": "sronilsson" + }, + { + "body": "This worked, thank you so much!", + "created_at": "2023-01-05T02:39:02Z", + "author": "mrnels19" + } + ] + }, + { + "title": "Problem with sklearn visualization ", + "body": "**Describe the bug**\r\nWhen I try to \"visualize classification results\" for all my videos I get an error \"TypeError: __init__() missing 1 required positional argument: 'video_file_path'\", although when I do the same for only one video (\"apply to single video\") at the time it seems to work just fine.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'visualizations'\r\n2. Click on 'visualize classification results'\r\n4. See error\r\n\r\n\r\n**Screenshots**\r\n![image](https://user-images.githubusercontent.com/86400083/207390236-a4ecac9e-dd56-474c-8fe4-c17140394a0e.png)\r\n\r\n![image](https://user-images.githubusercontent.com/86400083/207540282-3da647fa-faaa-4593-82c0-d267efb332ef.png)\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: windows 10 pro\r\n - Python Version [e.g. 3.6.0]\r\n - Are you using anaconda? yes\r\n \r\n\r\n**Additional context**\r\nIs the problem with my config? (pics added)\r\n![image](https://user-images.githubusercontent.com/86400083/207540685-bc5a359b-5f8c-4252-b029-efdf48b727e4.png)\r\n![image](https://user-images.githubusercontent.com/86400083/207540835-fe7fc539-a768-4731-bce9-601f9b4c6b2e.png)\r\n", + "user": "Urimons", + "reaction_cnt": 0, + "created_at": "2022-12-14T08:07:12Z", + "updated_at": "2022-12-14T15:27:36Z", + "author": "Urimons", + "comments": [ + { + "body": "Hi @Urimons - thanks for reporting!\r\n\r\nIf you update to latest version `1.30.2` with `pip install simba-uw-tf-dev --upgrade`, do you still see the error? ", + "created_at": "2022-12-14T10:42:17Z", + "author": "sronilsson" + }, + { + "body": "No I do not!\r\nIt seems to work just fine :)\r\nThanks!", + "created_at": "2022-12-14T15:05:26Z", + "author": "Urimons" + }, + { + "body": "Thanks for letting me know!", + "created_at": "2022-12-14T15:27:35Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Analyze ROI with specific time frame", + "body": "Hello,\r\n\r\nIs there a way to analyze time spent and entry ties of specific ROIs for a specific time? For example, if I have a 5 minute video and want to also analyze ROIs for the first 2 minutes of video.", + "user": "Destiny-123", + "reaction_cnt": 0, + "created_at": "2022-12-09T01:08:46Z", + "updated_at": "2023-08-14T19:43:21Z", + "author": "Destiny-123", + "comments": [ + { + "body": "Hi @Destiny-123, yes there is a method to split the results into time-bins, see screenshot below. To get the results in the first 2 minutes, set the bin size to 120s, and look at the results in the very first time bin in the output results file.\r\n\r\n![image](https://user-images.githubusercontent.com/34761092/206601530-663506c2-d6e0-4946-9dd5-cbd813f36b4d.png)\r\n", + "created_at": "2022-12-09T01:22:24Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Problens with simba (ROIS)", + "body": "**Describe the bug**\r\nI have 3 issues \r\nWhen I go Set video parameters this erro is show:\r\n\r\nwarning: Error opening file (/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:901)\r\nwarning: C:/Users/anaca/Downloads/Testes track/pYRAT\\BODY\\project_folder\\videos\\0 (/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:902).\r\n\r\nWhen I run Outiler correction this is show\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\anaca\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\anaca\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 3668, in \r\n button_skipOC = Button(label_outliercorrection,text='Skip outlier correction (CAUTION)',fg='red', command=lambda:skip_outlier_c(self.projectconfigini))\r\n File \"c:\\users\\anaca\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\outlier_scripts\\skip_outlierCorrection.py\", line 52, in skip_outlier_c\r\n csv_df.columns = newHeaders\r\n File \"c:\\users\\anaca\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\generic.py\", line 5192, in __setattr__\r\n return object.__setattr__(self, name, value)\r\n File \"pandas/_libs/properties.pyx\", line 67, in pandas._libs.properties.AxisProperty.__set__\r\n File \"c:\\users\\anaca\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\generic.py\", line 690, in _set_axis\r\n self._data.set_axis(axis, labels)\r\n File \"c:\\users\\anaca\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\internals\\managers.py\", line 183, in set_axis\r\n \"values have {new} elements\".format(old=old_len, new=new_len)\r\nValueError: Length mismatch: Expected axis has 36 elements, new values have 30 elements\r\n\r\n\r\nWhen I run ROI analyses Nothing is generated\r\n", + "user": "anacastropsico", + "reaction_cnt": 0, + "created_at": "2022-11-30T21:05:38Z", + "updated_at": "2022-12-02T01:04:25Z", + "author": "anacastropsico", + "comments": [ + { + "body": "Hi @anacastropsico! Which version of simba are you running? Can you try to upgrade simba with `pip install simba-uw-tf-dev --upgrade` and let me know if you still see the same error msg? ", + "created_at": "2022-11-30T21:42:00Z", + "author": "sronilsson" + }, + { + "body": "\r\n> Hi @anacastropsico! Which version of simba are you running? Can you try to upgrade simba with `pip install simba-uw-tf-dev --upgrade` and let me know if you still see the same error msg?\r\n\r\nI had problems with the installation and I had two versions of simba installed.\r\nsimba-uw-tf-dev 1.2\r\nsimba-uw-no-tf 1.3 (I installed this one because I don't have a GPU). I only want to use simba to run ROIS analysis with my DLC files. I don't have a GPU, which version do you recommend? Can I just run the command you suggested?", + "created_at": "2022-11-30T22:06:42Z", + "author": "anacastropsico" + }, + { + "body": "Now I'm with version simba: simba-uw-tf-dev-1.22.4\r\nI have problem with outliers corretion, when I click in 'Run' show this...\r\n\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\SimBA.py\", line 3131, in correct_outlier\r\n outlier_correcter_movement = OutlierCorrecterMovement(config_path=configini)\r\n\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\outlier_scripts\\outlier_corrector_movement.py\", line 59, in __init__\r\n self.outlier_bp_dict[animal_name]['bp_1'] = read_config_entry(self.config, 'Outlier settings', 'movement_bodypart1_{}'.format(animal_name.lower()), 'str')\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\read_config_unit_tests.py\", line 110, in read_config_entry\r\n raise ValueError\r\nValueError\r\n![image](https://user-images.githubusercontent.com/81208194/204927867-2f113fa9-bd7c-4d09-a5d2-9b86de6bbb84.png\r\nIf I want skip outlier...\r\n\r\nProcessing 1 file(s) this is show in SIMBA interface load.\r\nSIMBA ERROR: SimBA expects 30 columns of data inside the files within project_folder/csv/input_csv directory. However, within file C:/Users/anaca/Downloads/Testes track/pYRAT\\BODY\\project_folder\\csv\\input_csv\\R20D25T4DLC.csv file, SimBA found 36 columns.\r\n\r\n", + "created_at": "2022-11-30T22:50:26Z", + "author": "anacastropsico" + }, + { + "body": "Thanks @anacastropsico - use the `simba-uw-tf-dev` version, and make sure the `simba-uw-no-tf` version is uninstalled by running `pip uninstall simba-uw-no-tf`. \r\n\r\nThere seems to be a couple things going on. We should deal with the error at the end first: `SIMBA ERROR: SimBA expects 30 columns...`. This suggests that your data contains pose-estimation for 12 body-parts (36/3). However, when SimBA looks to see how many body-parts are defined in the SimBA project, it finds 10 (30/3). When you created your SimBA project, could you have missed out two body-parts? The body-parts that your projectis expecting is stored within your SimBA project in the `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv` CSV so you could have a look there. \r\n\r\n", + "created_at": "2022-11-30T23:20:05Z", + "author": "sronilsson" + }, + { + "body": "I have only 10 body-parts\r\n![image](https://user-images.githubusercontent.com/81208194/204931829-83d98055-28d3-4e47-b832-c7de40c0f1e2.png)\r\n", + "created_at": "2022-11-30T23:43:43Z", + "author": "anacastropsico" + }, + { + "body": "> Obrigado@anacastropsico- use a `simba-uw-tf-dev`versão e certifique-se de que a `simba-uw-no-tf`versão seja desinstalada executando `pip uninstall simba-uw-no-tf`.\r\n> \r\n> Parece haver algumas coisas acontecendo. Devemos lidar com o erro no final primeiro: `SIMBA ERROR: SimBA expects 30 columns...`. Isso sugere que seus dados contêm estimativa de pose para 12 partes do corpo (36/3). No entanto, quando o SimBA verifica quantas partes do corpo estão definidas no projeto SimBA, ele encontra 10 (30/3). Quando você criou seu projeto SimBA, poderia ter perdido duas partes do corpo? As partes do corpo que seu projeto espera são armazenadas em seu projeto SimBA no `project_folder/logs/measures/pose_configs/bp_names/project_bp_names.csv`CSV para que você possa dar uma olhada lá.\r\n\r\nI try ainda remake bodyparts in new project.. this erro is show\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\SimBA.py\", line 915, in \r\n self.saveposeConfigbutton = Button(self.scroller,text='Save Pose Config',command=lambda:self.savePoseConfig(self.scroller))\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\SimBA.py\", line 943, in savePoseConfig\r\n animal_id_int_list=animal_id_lst)\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\user_pose_config_creator.py\", line 53, in __init__\r\n self.img_height, self.img_width = int(self.img.shape[0]), int(self.img.shape[1])\r\nAttributeError: 'NoneType' object has no attribute 'shape'", + "created_at": "2022-11-30T23:56:34Z", + "author": "anacastropsico" + }, + { + "body": "That error msg suggests that you selected no image, or a file that could not be read as an image, to represent your user defined body-part configuration. ", + "created_at": "2022-11-30T23:59:40Z", + "author": "sronilsson" + }, + { + "body": "> msg suggests that you selected no image, or a file that could not be read as an image, to represent your user defined body-part configuration.\r\n\r\nI'm concert to number body part. Now erro is with csv file DLC imput...when I click in outlier correct show\r\n\r\nI'm using single animal.\r\nProcessing 1 file(s)...\r\n('Unable to parse string \"20.672.662.839.053.200\" at position 0', 'occurred at index fucinho')\r\n\r\n\r\nSIMBA WARNING: SimBA found more than the expected two header columns. SimBA will try to proceed by removing one additional column header level. This can happen when you import multi-animal DLC data as standard DLC data.\r\n\r\nmy file: \r\n![image](https://user-images.githubusercontent.com/81208194/204939640-9b4153b8-c58e-4648-be7a-864c74f47a87.png)\r\n", + "created_at": "2022-12-01T00:52:16Z", + "author": "anacastropsico" + }, + { + "body": "Looking at that spreadsheet, the values does not look like pixel coordinates. SimBA expects float or integer values, but encounters, for example, `20.672.662.839.053.200` as the first value which is a string and not a number. SimBA tries to convert it to a number but fails and thats why you see this error. \r\n\r\nCould you think of any reason why the values wouldn't be numeric?\r\n", + "created_at": "2022-12-01T10:53:25Z", + "author": "sronilsson" + }, + { + "body": "> Olhando para essa planilha, os valores não se parecem com coordenadas de pixel. O SimBA espera valores flutuantes ou inteiros, mas encontra, por exemplo, `20.672.662.839.053.200`como o primeiro valor que é uma string e não um número. O SimBA tenta convertê-lo em um número, mas falha e é por isso que você vê esse erro.\r\n> \r\n> Você poderia pensar em algum motivo para os valores não serem numéricos?\r\n\r\nwhat would be a value that simba expects? all my DLC output sheets came out like this... and I looked at copies of the simba documentation and they are similar to mine. I didn't understand what the problem was with the values in my table. I can user the H5 file?", + "created_at": "2022-12-01T14:03:07Z", + "author": "anacastropsico" + }, + { + "body": "I think it's not just SimBA, but most or all processing tools require values that refer to pixel locations in either float of int type., you have string type location predictions. E.g., here is a file from one of my test preojects. \r\n[SI_DAY3_308_CD1_PRESENT.csv](https://github.com/sgoldenlab/simba/files/10133074/SI_DAY3_308_CD1_PRESENT.csv)\r\n", + "created_at": "2022-12-01T14:13:16Z", + "author": "sronilsson" + }, + { + "body": "I ran my analyzes on google colab, I don't know if there is a problem with formatting the file there and when downloading it happens...visibly for me your table is the same as the one I have on the pc...\r\n![image](https://user-images.githubusercontent.com/81208194/205077372-8c156de0-b5f5-4dbe-9878-bd2645394a21.png)\r\n", + "created_at": "2022-12-01T14:24:12Z", + "author": "anacastropsico" + }, + { + "body": "Hi @anacastropsico - might be some issue when you open your file in Microsoft Excel. Difficult to see.. but your values are not comma separated ints or floats \r\n![image](https://user-images.githubusercontent.com/34761092/205079328-1423ff39-36c9-413c-b061-09297ef53739.png)\r\n", + "created_at": "2022-12-01T14:32:57Z", + "author": "sronilsson" + }, + { + "body": "Whereas on right, we have a float \r\n![image](https://user-images.githubusercontent.com/34761092/205079635-0553d307-2348-42cd-8107-530370f2a9e1.png)\r\n", + "created_at": "2022-12-01T14:34:06Z", + "author": "sronilsson" + }, + { + "body": "\r\n\r\n\r\nI understend now... I execulted one conversion in my table... see..\r\n\r\n![image](https://user-images.githubusercontent.com/81208194/205081663-51448f96-8917-4c2d-8863-8c708109f534.png)\r\n\r\n\r\nbut one erro is show in anaconda.\r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\SimBA.py\", line 1472, in \r\n button_skipOC = Button(label_outliercorrection,text='Skip outlier correction (CAUTION)',fg='red', command=lambda: self.initiate_skip_outlier_correction())\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\SimBA.py\", line 2035, in initiate_skip_outlier_correction\r\n outlier_correction_skipper.skip_outlier_correction()\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\outlier_scripts\\skip_outlier_correction.py\", line 58, in skip_outlier_correction\r\n data_df = read_df(file_path, self.file_type)\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\rw_dfs.py\", line 25, in read_df\r\n df = pd.read_csv(file_path, index_col=idx, low_memory=False)\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\pandas\\io\\parsers.py\", line 685, in parser_f\r\n return _read(filepath_or_buffer, kwds)\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\pandas\\io\\parsers.py\", line 463, in _read\r\n data = parser.read(nrows)\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\pandas\\io\\parsers.py\", line 1154, in read\r\n ret = self._engine.read(nrows)\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\pandas\\io\\parsers.py\", line 2059, in read\r\n data = self._reader.read(nrows)\r\n File \"pandas/_libs/parsers.pyx\", line 884, in pandas._libs.parsers.TextReader.read\r\n File \"pandas/_libs/parsers.pyx\", line 965, in pandas._libs.parsers.TextReader._read_rows\r\n File \"pandas/_libs/parsers.pyx\", line 2132, in pandas._libs.parsers.raise_parser_error\r\npandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 4, saw 32\r\nin interface simba show: Processing 1 file(s)...", + "created_at": "2022-12-01T14:41:51Z", + "author": "anacastropsico" + }, + { + "body": "Those values you see now are VERY large integers, it is unlikely that `fucino` is located at those pixel values, it would be a video with resolution unlikely to fit on a harddisk. Maybe the separator in the float was removed by mistake? ", + "created_at": "2022-12-01T14:50:40Z", + "author": "sronilsson" + }, + { + "body": "> Esses valores que você vê agora são inteiros MUITO grandes, é improvável que `fucino`esteja localizado nesses valores de pixel, seria um vídeo com resolução improvável de caber em um disco rígido. Talvez o separador no flutuador tenha sido removido por engano?\r\n\r\nSo I really need to look for another solution. I don't know why these outputs were generated as a string. I cant user the H5 file? or no?", + "created_at": "2022-12-01T14:54:46Z", + "author": "anacastropsico" + }, + { + "body": "Yes you can try and import the H5, I don't know how many animals you are tracking, but you can try to import the h5", + "created_at": "2022-12-01T15:00:58Z", + "author": "sronilsson" + }, + { + "body": "> Yes you can try and import the H5, I don't know how many animals you are tracking, but you can try to import the h5\r\n\r\nI'm tracking only single animal. The problem in my tables is when I open in excel... whem i open in google or notes the values are...\r\n\r\n![image](https://user-images.githubusercontent.com/81208194/205086766-8a18fa23-9932-4373-96e6-ae1651c88288.png)", + "created_at": "2022-12-01T15:03:13Z", + "author": "anacastropsico" + }, + { + "body": "👍 The `pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 4, saw 32` error suggest there is some file inside the `project_folder/csv/input_csv` that cannot be opened as a CSV, that even though that file has a .csv file extension, it is not a proper csv", + "created_at": "2022-12-01T15:05:03Z", + "author": "sronilsson" + }, + { + "body": "> 👍O `pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 4, saw 32`erro sugere que há algum arquivo dentro do `project_folder/csv/input_csv`que não pode ser aberto como CSV, embora esse arquivo tenha uma extensão de arquivo .csv, não é um csv adequado\r\n\r\nDoes that mean I won't be able to use the csv files from the colab? even the ones I didn't open in excel? to use the H5 how should I define the settings when creating the project? There I only have the h5 file option for multi_animals.\r\n\r\n> 👍 The `pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 4, saw 32` error suggest there is some file inside the `project_folder/csv/input_csv` that cannot be opened as a CSV, that even though that file has a .csv file extension, it is not a proper csv\r\n\r\nDoes that mean I won't be able to use the csv files from the colab? even the ones I didn't open in excel? to use the H5 how should I define the settings when creating the project? There I only have the h5 file option for multi_animals.", + "created_at": "2022-12-01T15:14:39Z", + "author": "anacastropsico" + }, + { + "body": "You should be able to use the CSV just fine, it looks proper in that last screengrab. When you see the`pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 4, saw 32`, do you see any erro msgs in the main simba terminal window?", + "created_at": "2022-12-01T15:18:04Z", + "author": "sronilsson" + }, + { + "body": "> Você deve ser capaz de usar o CSV muito bem, parece adequado na última captura de tela. Quando você vê o `pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 4, saw 32`, você vê alguma mensagem de erro na janela principal do terminal simba?\r\n\r\nNo... simba show : Processing 1 files(s)...", + "created_at": "2022-12-01T15:19:43Z", + "author": "anacastropsico" + }, + { + "body": "How many files are in the `project_folder/csv/input_csv` folder, and can you sen me an example?\r\n\r\n", + "created_at": "2022-12-01T15:21:24Z", + "author": "sronilsson" + }, + { + "body": "> How many files are in the `project_folder/csv/input_csv` folder, and can you sen me an example?\r\n\r\nOnly 1.\r\n![image](https://user-images.githubusercontent.com/81208194/205092792-735366e0-afbc-4218-b820-33cfef09cbbb.png)\r\n", + "created_at": "2022-12-01T15:28:59Z", + "author": "anacastropsico" + }, + { + "body": "When I go create a new project with same file csv this erro is show...\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\SimBA.py\", line 1016, in import_singlecsv\r\n shutil.copy()\r\nTypeError: copy() missing 2 required positional arguments: 'src' and 'dst'\r\n\r\n![image](https://user-images.githubusercontent.com/81208194/205093738-61e1f016-f77c-4844-a820-fb0cd0c269f8.png)\r\n", + "created_at": "2022-12-01T15:33:41Z", + "author": "anacastropsico" + }, + { + "body": "Now is done!!!\r\n![image](https://user-images.githubusercontent.com/81208194/205095080-39d6c79f-9e9f-4581-9791-28adfff5bd95.png)\r\n", + "created_at": "2022-12-01T15:38:45Z", + "author": "anacastropsico" + }, + { + "body": "Hmm.. have a trouble keeping up :) But will go over and see whats going on later. ", + "created_at": "2022-12-01T15:39:20Z", + "author": "sronilsson" + }, + { + "body": "When I finally defined the ROIS, conda show it...\r\nSelect a ROI and then press SPACE or ENTER button!\r\nCancel the selection process by pressing c button!\r\nSelect a ROI and then press SPACE or ENTER button!\r\nCancel the selection process by pressing c button!\r\nSelect a ROI and then press SPACE or ENTER button!\r\nCancel the selection process by pressing c button!\r\nFatal Python error: PyEval_RestoreThread: NULL tstate\r\n\r\nCurrent thread 0x000001cc (most recent call first):\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_image.py\", line 224 in initiate_x_y_callback\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_image.py\", line 295 in interact_functions\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_define.py\", line 483 in set_interact_state\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_define.py\", line 308 in \r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1705 in __call__\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 560 in mainloop\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_define.py\", line 124 in __init__\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\roi_tools\\ROI_menus.py\", line 60 in draw\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1705 in __call__\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\tkinter\\__init__.py\", line 1283 in mainloop\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\site-packages\\simba\\SimBA.py\", line 3770 in main\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\Scripts\\simba.exe\\__main__.py\", line 7 in \r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\runpy.py\", line 85 in _run_code\r\n File \"C:\\Users\\anaca\\anaconda3\\envs\\SIMBA\\lib\\runpy.py\", line 193 in _run_module_as_main\r\n(SIMBA) PS C:\\WINDOWS\\system32>", + "created_at": "2022-12-01T15:45:28Z", + "author": "anacastropsico" + }, + { + "body": "Interesting, which version of python are you running `python --version` in your SIMBA conda environment", + "created_at": "2022-12-01T15:58:21Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Update anchored_rois.md", + "body": "Further highlighted the importance of calculating boundary statistics prior to visualising intersections.", + "user": "Toshea111", + "reaction_cnt": 0, + "created_at": "2022-11-22T11:26:38Z", + "updated_at": "2022-11-22T11:55:13Z", + "author": "Toshea111", + "comments": [] + }, + { + "title": "Bump pillow from 5.4.1 to 9.3.0 in /simba", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 9.3.0.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

9.3.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/9.3.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

9.3.0 (2022-10-29)

\n
    \n
  • \n

    Limit SAMPLESPERPIXEL to avoid runtime DOS #6700\n[wiredfool]

    \n
  • \n
  • \n

    Initialize libtiff buffer when saving #6699\n[radarhere]

    \n
  • \n
  • \n

    Inline fname2char to fix memory leak #6329\n[nulano]

    \n
  • \n
  • \n

    Fix memory leaks related to text features #6330\n[nulano]

    \n
  • \n
  • \n

    Use double quotes for version check on old CPython on Windows #6695\n[hugovk]

    \n
  • \n
  • \n

    Remove backup implementation of Round for Windows platforms #6693\n[cgohlke]

    \n
  • \n
  • \n

    Fixed set_variation_by_name offset #6445\n[radarhere]

    \n
  • \n
  • \n

    Fix malloc in _imagingft.c:font_setvaraxes #6690\n[cgohlke]

    \n
  • \n
  • \n

    Release Python GIL when converting images using matrix operations #6418\n[hmaarrfk]

    \n
  • \n
  • \n

    Added ExifTags enums #6630\n[radarhere]

    \n
  • \n
  • \n

    Do not modify previous frame when calculating delta in PNG #6683\n[radarhere]

    \n
  • \n
  • \n

    Added support for reading BMP images with RLE4 compression #6674\n[npjg, radarhere]

    \n
  • \n
  • \n

    Decode JPEG compressed BLP1 data in original mode #6678\n[radarhere]

    \n
  • \n
  • \n

    Added GPS TIFF tag info #6661\n[radarhere]

    \n
  • \n
  • \n

    Added conversion between RGB/RGBA/RGBX and LAB #6647\n[radarhere]

    \n
  • \n
  • \n

    Do not attempt normalization if mode is already normal #6644\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • d594f4c Update CHANGES.rst [ci skip]
  • \n
  • 909dc64 9.3.0 version bump
  • \n
  • 1a51ce7 Merge pull request #6699 from hugovk/security-libtiff_buffer
  • \n
  • 2444cdd Merge pull request #6700 from hugovk/security-samples_per_pixel-sec
  • \n
  • 744f455 Added release notes
  • \n
  • 0846bfa Add to release notes
  • \n
  • 799a6a0 Fix linting
  • \n
  • 00b25fd Hide UserWarning in logs
  • \n
  • 05b175e Tighter test case
  • \n
  • 13f2c5a Prevent DOS with large SAMPLESPERPIXEL in Tiff IFD
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=9.3.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-11-22T03:44:21Z", + "updated_at": "2022-12-19T15:23:03Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-11-26T18:52:44Z", + "author": "dependabot[bot]" + }, + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-12-19T15:22:55Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 9.3.0", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 9.3.0.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

9.3.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/9.3.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

9.3.0 (2022-10-29)

\n
    \n
  • \n

    Limit SAMPLESPERPIXEL to avoid runtime DOS #6700\n[wiredfool]

    \n
  • \n
  • \n

    Initialize libtiff buffer when saving #6699\n[radarhere]

    \n
  • \n
  • \n

    Inline fname2char to fix memory leak #6329\n[nulano]

    \n
  • \n
  • \n

    Fix memory leaks related to text features #6330\n[nulano]

    \n
  • \n
  • \n

    Use double quotes for version check on old CPython on Windows #6695\n[hugovk]

    \n
  • \n
  • \n

    Remove backup implementation of Round for Windows platforms #6693\n[cgohlke]

    \n
  • \n
  • \n

    Fixed set_variation_by_name offset #6445\n[radarhere]

    \n
  • \n
  • \n

    Fix malloc in _imagingft.c:font_setvaraxes #6690\n[cgohlke]

    \n
  • \n
  • \n

    Release Python GIL when converting images using matrix operations #6418\n[hmaarrfk]

    \n
  • \n
  • \n

    Added ExifTags enums #6630\n[radarhere]

    \n
  • \n
  • \n

    Do not modify previous frame when calculating delta in PNG #6683\n[radarhere]

    \n
  • \n
  • \n

    Added support for reading BMP images with RLE4 compression #6674\n[npjg, radarhere]

    \n
  • \n
  • \n

    Decode JPEG compressed BLP1 data in original mode #6678\n[radarhere]

    \n
  • \n
  • \n

    Added GPS TIFF tag info #6661\n[radarhere]

    \n
  • \n
  • \n

    Added conversion between RGB/RGBA/RGBX and LAB #6647\n[radarhere]

    \n
  • \n
  • \n

    Do not attempt normalization if mode is already normal #6644\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • d594f4c Update CHANGES.rst [ci skip]
  • \n
  • 909dc64 9.3.0 version bump
  • \n
  • 1a51ce7 Merge pull request #6699 from hugovk/security-libtiff_buffer
  • \n
  • 2444cdd Merge pull request #6700 from hugovk/security-samples_per_pixel-sec
  • \n
  • 744f455 Added release notes
  • \n
  • 0846bfa Add to release notes
  • \n
  • 799a6a0 Fix linting
  • \n
  • 00b25fd Hide UserWarning in logs
  • \n
  • 05b175e Tighter test case
  • \n
  • 13f2c5a Prevent DOS with large SAMPLESPERPIXEL in Tiff IFD
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=9.3.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-11-22T03:41:54Z", + "updated_at": "2022-12-19T15:23:03Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-12-19T15:22:55Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump tensorflow-gpu from 1.14.0 to 2.9.3 in /simba", + "body": "Bumps [tensorflow-gpu](https://github.com/tensorflow/tensorflow) from 1.14.0 to 2.9.3.\n
\nRelease notes\n

Sourced from tensorflow-gpu's releases.

\n
\n

TensorFlow 2.9.3

\n

Release 2.9.3

\n

This release introduces several vulnerability fixes:

\n\n

TensorFlow 2.9.2

\n

Release 2.9.2

\n

This releases introduces several vulnerability fixes:

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from tensorflow-gpu's changelog.

\n
\n

Release 2.9.3

\n

This release introduces several vulnerability fixes:

\n\n

Release 2.8.4

\n

This release introduces several vulnerability fixes:

\n\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • a5ed5f3 Merge pull request #58584 from tensorflow/vinila21-patch-2
  • \n
  • 258f9a1 Update py_func.cc
  • \n
  • cd27cfb Merge pull request #58580 from tensorflow-jenkins/version-numbers-2.9.3-24474
  • \n
  • 3e75385 Update version numbers to 2.9.3
  • \n
  • bc72c39 Merge pull request #58482 from tensorflow-jenkins/relnotes-2.9.3-25695
  • \n
  • 3506c90 Update RELEASE.md
  • \n
  • 8dcb48e Update RELEASE.md
  • \n
  • 4f34ec8 Merge pull request #58576 from pak-laura/c2.99f03a9d3bafe902c1e6beb105b2f2417...
  • \n
  • 6fc67e4 Replace CHECK with returning an InternalError on failing to create python tuple
  • \n
  • 5dbe90a Merge pull request #58570 from tensorflow/r2.9-7b174a0f2e4
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tensorflow-gpu&package-manager=pip&previous-version=1.14.0&new-version=2.9.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-11-21T21:06:06Z", + "updated_at": "2022-12-19T15:23:03Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-11-26T18:52:44Z", + "author": "dependabot[bot]" + }, + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-12-19T15:22:55Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Update anchored_rois.md", + "body": "I have made a few minor corrections to the tutorial. One suggestion that I have is to move the 'CALCULATING BOUNDARY STATISTICS' section in front of the 'VISUALIZING ANIMAL-ANCHORED ROIs' section. This is due to the requirement for the 'HIGHLIGHT INTERSECTIONS' function to have boundary statistics already calculated. I found that I spent some time trying to work out what was wrong when it did not visualise the intersections, although that may just be me.", + "user": "Toshea111", + "reaction_cnt": 0, + "created_at": "2022-11-20T14:32:44Z", + "updated_at": "2022-11-21T01:18:18Z", + "author": "Toshea111", + "comments": [] + }, + { + "title": "questions about behaviour labeling ", + "body": "hello, I'm a newbie and use simba for the first time. \r\n\r\n1) I'm curious when labeling the behavior, do I need to label all the frames of interest or just need to label part of them?For example, I want to classify the freezing behaviror. For a full video, maybe there are one hundred of this behavior, do i need to label all of them? if labeling 10 of this behavior, does it can work?\r\n\r\n2) I only foucs on freezing behavior, so in addition to freezing behavior, do i need to creat another classifier named non-freezing behavior and then label it?\r\n", + "user": "colinyong", + "reaction_cnt": 1, + "created_at": "2022-11-10T14:11:26Z", + "updated_at": "2022-11-10T16:39:06Z", + "author": "colinyong", + "comments": [ + { + "body": "Hi @colinyong! \r\n\r\n(1) The classifier needs annotation examples what freezing looks like, what what freezing doesn't look like, from a variable set of videos. Ideally the annotations should contain examples of edge-cases and: as in \"a naive observer might have difficulties with this one\". It's difficult to say how many annotations you need, as it depends on how good your tracking is and the complexity of your behavior. There are some example annotations count in the SImBA priprent table 2: https://www.biorxiv.org/content/10.1101/2020.04.19.049452v2.full.pdf. \r\n\r\nIf you only annotate parts of the videos (and skip annotating some frames) - there are a few annotations methods in SimBA that differ in the way they handle those un-annotated frames - e.g., should the un-annotated frames be assumed not containing the behavior, or should we skip using them in the classifier. You can read more about it here: https://github.com/sgoldenlab/simba/blob/master/docs/label_behavior.md\r\n\r\n2. All classifiers are binary, so you only need one per behavior. You get a probability score ranging from 0-1 of the behavior is present in every frame. You can think of it as the same as annotating in pose-estimation tools. When you place your marker for where the body-parts are, you are also telling the models where the body-part isn't", + "created_at": "2022-11-10T16:39:06Z", + "author": "sronilsson" + } + ] + }, + { + "title": "do not rotate the video during Sklearn Visualization", + "body": "Hi,\r\n\r\nThank you for the wonderful tool for behavior analysis. In my case, the mice were head-fixed. Therefore, the best way to visualize the result is to keep the view angle. However, it seems that the video was rotated 90 degrees anticlockwise.\r\n![image](https://user-images.githubusercontent.com/46367566/200764740-90fd4508-c217-4622-a68f-90ee7260bdf7.png)\r\nIs there a way to keep the view angle in the Sklearn Visualization step? \r\nMeanwhile, I found that the font is too small or blurry to discern sometimes. It would be better if we can set the font size.\r\n\r\nThanks,\r\nLei", + "user": "chongtianyifa", + "reaction_cnt": 0, + "created_at": "2022-11-09T07:24:19Z", + "updated_at": "2022-11-11T15:22:50Z", + "author": "chongtianyifa", + "comments": [ + { + "body": "Hi @chongtianyifa! Someone else just pointed this out. If you update SimBA with `pip install simba-uw-tf-dev --upgrade`, you should see the checkbox with red underline in the screengrab in image linked below. To not rotate the video, leave this checkbox unchecked before creating video.\r\n\r\nhttps://files.gitter.im/5de95babd73408ce4fd337a5/3iPz/rotate_90.png\r\n\r\nThe thickness of the font is too high for your resolution which makes it blurry, it is hard coded to `2` and can't currently be set in the GUI, but if you wait a few days I will make option available.. If you want to change it before, it's the final `2` in e.g., this line of code, maybe change it to a `1`.\r\n\r\nhttps://github.com/sgoldenlab/simba/blob/2f1a4c9f3a7a60e904f54dc0a510323feb867be4/simba/sklearn_plot_scripts/plot_sklearn_results_all.py#L144", + "created_at": "2022-11-09T11:00:53Z", + "author": "sronilsson" + }, + { + "body": "Hi,\r\n\r\nThank you for the quick response. I cannot use the upgrade solution as I failed many times to install the tensor flow version. I am using simba-uw-no-tf.\r\n![image](https://user-images.githubusercontent.com/46367566/200908876-c7243417-1a62-49ba-ad0f-921e13f2fe54.png)\r\nI did not see plot_sklearn_results_all.py function here. Which one should I modify?\r\n\r\nLei", + "created_at": "2022-11-09T18:14:59Z", + "author": "chongtianyifa" + }, + { + "body": "Hi @chongtianyifa - I have not maintained `simba-uw-no-tf` for some years, would you mind switching to `pip install simba-uw-tf-dev`? You could do `pip uninstall simba-uw-no-tf` followed by `pip install simba-uw-tf-dev` and you should be able to open your project as normal but let me know if not. ", + "created_at": "2022-11-09T19:40:50Z", + "author": "sronilsson" + }, + { + "body": "I still couldn't install the tf-dev version. Here is how I install it:\r\n1) create a new virtual environment `conda create --name simba-tf python=3.6` and activate it.\r\n2) `pip install simba-uw-tf-dev`\r\n3) after call 'simba', error came out\r\n![image](https://user-images.githubusercontent.com/46367566/200948289-ee6eb914-fd55-47c0-bd2d-69e86dc9d780.png)\r\n4) then `pip uninstall shapely` and `conda install -c conda-forge shapely`. It failed:\r\n![image](https://user-images.githubusercontent.com/46367566/200948537-4a0aa557-c329-45ef-b016-60c48635bde8.png)\r\n\r\n", + "created_at": "2022-11-09T21:46:09Z", + "author": "chongtianyifa" + }, + { + "body": "Hi, I actually tried `pip uninstall shapely` and then `pip install shapely`. It worked! Now I see the choice not to rotate the video. Thank you!", + "created_at": "2022-11-10T00:42:09Z", + "author": "chongtianyifa" + } + ] + }, + { + "title": "Anchored ROI Request", + "body": "**Is your feature request related to a problem? Please describe.**\r\nI am interested in quantifying interactions between several tracked individuals that occur during close contact, with potentially many such interactions occurring at once. This makes it difficult for the behavioural classifier to accurately detect single interactions.\r\n\r\n**Describe the solution you'd like**\r\nBy including ROIs that move with an individual or body part (thus are 'anchored' to them), this would allow the quantification of such interactions reliably, either through detection of ROI overlaps, or detection of other individuals' body parts.\r\n\r\n**Describe alternatives you've considered**\r\nI have tried training a classifier, but the issue is that interactions are variable at any one time.\r\n\r\n**Additional context**\r\nI am working with groups of termites tracked in SLEAP, attached is an example video of the tracked data, for reference.\r\n\r\nhttps://user-images.githubusercontent.com/109351104/200376535-4e47a3f7-626e-43df-bd99-081c327ead94.mp4\r\n\r\n\r\n", + "user": "Toshea111", + "reaction_cnt": 0, + "created_at": "2022-11-07T17:34:56Z", + "updated_at": "2023-01-18T15:51:07Z", + "author": "Toshea111", + "comments": [ + { + "body": "Hi @Toshea111! Makes sense. Just a couple questions. \r\n\r\nHow do you see the anchored ROIs being defined - is it enough so specify a diameter/width for a circle/rectangle, or do you ever see yourself using polygon anchored ROIs (which is a little trickier to define the size of a single entry box)? \r\n\r\nAs you say - for each anchored ROI and frame, we can find which other anchored ROIs and which other animal key-points overlaps with it. Is that all the info you need? Those outputs will be in string format, not numerical, and won't immediately fit into any downstream Ml algo. If for ML I guess it would have to be transformed to counts or some sparse table with categoricals. ", + "created_at": "2022-11-07T20:23:58Z", + "author": "sronilsson" + }, + { + "body": "Hello Simon,\r\n\r\nThank you for the rapid response, to answer your questions:\r\n\r\n1. Circle or rectangle ROIs would suffice, attached is an example superimposed over a frame from the previous video. By specifying the dimensions of a rectangle, and the body part to which it is anchored, you could in theory limit it to head-to-head, or head-to-tail overlaps as needed.\r\n2. It would also be useful to have a time measure, indicating the number of frames that ROIs remain overlapped, this would then allow discrimination between chance overlaps and actual interactions, by setting a user-defined time threshold for overlaps to qualify as an interaction.\r\n3. The key outputs would be overlap IDs, a count of these to quantify the number of interactions between each ID pair, and perhaps a measure of overlap event durations and timings. As you mention, most downstream analyses would work with counts and pair overlap frequencies.\r\n\r\nI am happy to discuss in more detail if any of the above points need further clarification.\r\n\r\n![Anchored ROIs](https://user-images.githubusercontent.com/109351104/200543633-2982f850-861a-4843-af7a-1171c00c10d3.png)\r\n", + "created_at": "2022-11-08T10:47:38Z", + "author": "Toshea111" + }, + { + "body": "Got it - thanks @Toshea111. To find the boundary boxes, the most straightforward might be if used input four key-points (anterior/posterior and the laterals) or two key-points (anterior/posterior) + some user-defines extra metric space to get the boxes. Once done, we could do a few revisions after you're feedback and you've tried it, I can see how this could be useful for others. \r\n\r\nThe alternatives that would work for you are to extract the black blobs from the white background (e.g. cv2.findContours), or find the animal boundaries through motion (e.g., cv2.calcOpticalFlowPyrLK). You can also find the animals with object segmentation (YOLOv5 is nice). But not sure this would generalize to other setups, but in case this doesn't work out a few things to try.", + "created_at": "2022-11-08T13:23:20Z", + "author": "sronilsson" + }, + { + "body": "Much appreciated, either of those initial setups would work well for what I am after, and I would be happy to 'beta test' any additions and provide feedback. I have some limited experience with YOLOv5s, however this system would allow me to feed my data into an established pipeline.\r\n\r\nLet me know if you have any additional questions in the meantime.", + "created_at": "2022-11-08T14:16:29Z", + "author": "Toshea111" + }, + { + "body": "Sounds good, typed something up based on [shapely polygons with use-defined buffers](https://shapely.readthedocs.io/en/stable/manual.html) thats relative quick and seems to work. Not sure if I am missing something for non-shape shifters like your species where all the body-parts are parallel line like though.. any chance you could share a raw video and some pose-estimation data, just a snippet, don't need a lot. \r\n\r\n![image](https://user-images.githubusercontent.com/34761092/200914782-e322121a-4d7d-4628-a96b-9f8b7e7ebf0c.png)", + "created_at": "2022-11-09T18:48:18Z", + "author": "sronilsson" + }, + { + "body": "Excellent, attached is a short video and corresponding pose estimation data in '.HDF5' format. I can also the provide the data in '.csv' or '.slp' formats, if preferred.\r\n\r\nIf you need a longer video, I have another with ~8000 frames.\r\n\r\n[Termite Test.zip](https://github.com/sgoldenlab/simba/files/9979786/Termite.Test.zip)", + "created_at": "2022-11-10T10:26:49Z", + "author": "Toshea111" + }, + { + "body": "That works cheers. Draft classes for finding, visualizing, and calculating stats for \"anchored\" ROIs live [here](https://github.com/sgoldenlab/simba/tree/master/simba/bounding_box_tools). Need to clean, get in GUI, document, and final method for aggregate stats, maybe some time next week. Speed depend on mainly CPU count and number of animals, but hopefully doable..\r\n", + "created_at": "2022-11-10T12:13:15Z", + "author": "sronilsson" + }, + { + "body": "I look forward to trying it, I will potentially have access to a modelling computer next week, thus I should be able to test it out on both that and my normal machine.", + "created_at": "2022-11-10T15:54:44Z", + "author": "Toshea111" + }, + { + "body": "@Toshea111 - do you have a csv version of the termites test.h5 as well to share?", + "created_at": "2022-11-11T13:44:42Z", + "author": "sronilsson" + }, + { + "body": "Yes, apologies for the delay, see attached.\r\n\r\n[Termite Test Tracks.csv](https://github.com/sgoldenlab/simba/files/9995367/Termite.Test.Tracks.csv)", + "created_at": "2022-11-12T16:08:37Z", + "author": "Toshea111" + }, + { + "body": "Perfect, I really appreciate the rapid turnaround with this.\r\n\r\nThe user-defined settings will be quite important for termites in particular, as they engage in distinct types of trophallaxis at both ends, so to speak.\r\n\r\nI also work with ants and bees, thus I should be able to provide some feedback in terms of performance across species.", + "created_at": "2022-11-13T10:12:50Z", + "author": "Toshea111" + }, + { + "body": "@Toshea111 - i've updated the pip package of simba, to include the class calls in the GUI - you should see it if you do `pip install simba-uw-tf-dev --upgrade`. I typed up a first pass doc tutorial here: https://github.com/sgoldenlab/simba/blob/master/docs/anchored_rois.md\r\n\r\nNot sure if you have got your slp data imported to SimBA, but that needs to happen for this to work. I am also not sure if I have overlooked anything that prohibits this to scale to large datasets but will stress test. If you see anything useful/necessary missing in the docs, can me know and we can see how we can get it in. \r\n\r\nOne thing missing is probably visualizations validating the output statistics (e.g., show intersecting bounding boxes / keypoints in some alternative salient colors...) like when another animals roi or body-part is in another animals ROI the regions goes thicker and change line color or something.\r\n\r\nhttps://user-images.githubusercontent.com/34761092/201708169-fa8718f4-3ab0-4942-8950-967cbcc5ba98.mp4\r\n\r\n\r\n ", + "created_at": "2022-11-14T15:15:04Z", + "author": "sronilsson" + }, + { + "body": "Very impressive, I will conduct a comprehensive trial of the system over the weekend, and let you know if I have any suggestions or feedback.\r\n\r\nI can fork and edit the tutorial document as I go, or provide feedback separately here, whichever would be more convenient for you.", + "created_at": "2022-11-17T15:04:43Z", + "author": "Toshea111" + }, + { + "body": "Thanks! Not much work, the people developing shapley has written most of what is needed. What ever works best for you, I don't mind!", + "created_at": "2022-11-17T23:32:30Z", + "author": "sronilsson" + }, + { + "body": "Having had a decent run through the new anchored ROI features, they work very well for my use-case. I thought it would make sense to divide my feedback into separate lists for issues and errors that I encountered, and suggestions for additions.\r\n\r\nIssues:\r\n1. I experience an error when attempting to calculate aggregate boundary statistics with the minimum bout length set to any value above 0. This returns the error message ‘TypeError: unsupported operand type(s) for /: 'str' and 'float'’. If I leave the minimum bout length field empty, the process works as intended.\r\n2. This is more of a compatibility issue rather than a problem with the anchored ROIs, but SIMBA only appears to work with ‘.slp’ files, rather than the main SLEAP export format of ‘.h5’. The result is that the user has to export files from their ‘model predictions’ folder in SLEAP, rather than creating an export package as intended.\r\n3. If a video file is a frame or more longer than the tracking data, this leads to a segmentation of the output visualisations. It would be useful to return a warning that the video is longer than the tracking data in such cases (which can happen in SLEAP), as this would notify the user of the specific issue.\r\n\r\nSuggestions:\r\n1. For the aggregate statistics, it would be useful to add an option for ‘interaction time per bout’, this would then produce a case for each recorded interaction, and its corresponding time in seconds. Such a feature would effectively allow an output of the raw data at the resolution of individual interactions, and users could then aggregate or analyse it as needed.\r\n2. When only the ROIs overlap, it would be more informative to label the ‘animal 2 key point’ category as ‘ROI only’, or something similar, rather than ‘none’.\r\n3. I think you may have already touched upon this, but it would be nice to have the range of features available for the ‘ROI’ and ‘Visualizations’ tabs integrated into the anchored ROI tab. Options such as changing the ROI colours and appearance, and producing basic summary visualisations would be useful in the longer-term.\r\n\r\nI will go through the tutorial document and make any suggestions or edits that I think will help to clarify things for users. Other than that, I really appreciate you putting this together, as it is already a tractable and robust system for generating interaction data.", + "created_at": "2022-11-20T14:13:56Z", + "author": "Toshea111" + }, + { + "body": "Fantastic thank you!!!\r\n\r\nAll of these are quick fixes, except the sleap import, which is a little more involved. A very brief background: \r\n\r\nWhen I first worked with sleap I only found one inference/output file per video - a `.slp` extension file (which is a h5 object). I thought it was a little odd, as this wasn't a user-friendly file. It contained multiple dataframes and dictionaries with the track identities and body-part coordinates in different tables, missing tracks where not easily observed, had to jump through hoops to get it into an interpretable format. \r\n\r\nI have long suspected that there must be an alternative output. Then you sent me the multi-index CSV, which is more in line what I would expect... Can you send me an example of **the main SLEAP export format of .h5** together with an associated video, and I will write a function to import it? This week is packed. But I will get this done asap. \r\n\r\n\r\n", + "created_at": "2022-11-21T10:47:46Z", + "author": "sronilsson" + }, + { + "body": "That makes sense, I generated the ‘.csv’ file from an ‘.h5’ output using the code provided in this Google Collab: [SLEAP-IO - Convert to CSV.ipynb - Colaboratory (google.com)](https://colab.research.google.com/drive/1EpyTKFHVMCqcb9Lj9vjMrriyaG9SvrPO?usp=sharing).\r\n\r\nFor reference, an outline of the SLEAP ‘.h5’ output format can be found here: [Export Data For Analysis — SLEAP (v1.2.9)](https://sleap.ai/tutorials/analysis.html).\r\n\r\nAttached is an ‘.h5’ output file and the associated video, I have used the same one that I sent previously, for familiarity. Let me know if you experience any issues, or have further questions.\r\n\r\n[Termite Test.zip](https://github.com/sgoldenlab/simba/files/10055877/Termite.Test.zip)", + "created_at": "2022-11-21T11:04:03Z", + "author": "Toshea111" + }, + { + "body": "@Toshea111 fyi if you upgrade through pip, I inserted an option to get detailed data in each interaction bout `DETAILED INTERACTION TABLE`, [example here](https://github.com/sgoldenlab/simba/blob/master/misc/detailed_aggregate_statistics_anchored_rois_20221121204633.csv), fixed the int/str mix up, replaced the None with `ROI_ONLY`, and you can [specify the color/size of each animals ROI in visualization](https://github.com/sgoldenlab/simba/blob/master/docs/anchored_rois.md#visualizing-animal-anchored-rois). The CSV/H5 import will come!", + "created_at": "2022-11-22T20:49:24Z", + "author": "sronilsson" + }, + { + "body": "Excellent, I'll have a go with the new features later this week, and let you know if any issues arise.", + "created_at": "2022-11-23T13:43:34Z", + "author": "Toshea111" + }, + { + "body": "Another potential issue that I have encountered is when using tracking data in which the number of tracks varies over time, due to individuals leaving and re-entering the frame. As the config file requires the total number of individuals to be defined, it then returns an error when the number of tracks in frame deviates from this value.\r\n\r\nIt would be useful to have an option that allowed for such variation in visible tracks, as there are applications where knowing the frequency of interactions or behaviour is useful, even when individual identity cannot be assigned. There may already be a way to accommodate this, but I have not yet found a solution.", + "created_at": "2022-12-01T14:07:16Z", + "author": "Toshea111" + }, + { + "body": "Thanks @Toshea111 is it throwing the errorsat import, or during anchored ROI methods?\r\n\r\nOne question, I tried to use the colab nb and your h5, but was hitting a lot of errors and eventually had to put it aside. Could you help me by sending the entire CSV for the video you sent, and I will work with that CSV to write the SLEAP CSV import methods? ", + "created_at": "2022-12-01T14:38:15Z", + "author": "sronilsson" + }, + { + "body": "I tried using another video with variable track numbers and it worked, thus I think the previous issue was my own error rather than anything else. The only suggestion I would have is to remove the tracks for any individuals that are not in frame, because currently they appear as '0,0' coordinates.\r\n\r\nLooking back through the Colab notebook, I can see the problem. I have now made an updated branch with edits that should make it straightforward to use:\r\n\r\nhttps://colab.research.google.com/github/Toshea111/simba/blob/master/Converting_SLEAP_Analysis_HDF5_to_CSV_Updated.ipynb\r\n\r\nIn case you continue to experience issues, I have also attached a folder with the original ‘.h5’ output file, converted '.csv' file, and the associated video. Let me know if you need any additional information during the process.\r\n\r\n[Termite Test.zip](https://github.com/sgoldenlab/simba/files/10146531/Termite.Test.zip)", + "created_at": "2022-12-03T13:56:40Z", + "author": "Toshea111" + }, + { + "body": "@Toshea111 there is an option to import csv files from sleap now in the dropdowns. The caveat is that I haven't had time to challenge it much, it worked on the single file you sent but that's all I know for now lol, got to test it a bit more maybe next week. \r\n\r\n\"Untitled\r\n", + "created_at": "2022-12-15T12:42:03Z", + "author": "sronilsson" + }, + { + "body": "Much appreciated, I'll try inputting some new data to see if I can break it.", + "created_at": "2022-12-16T16:37:29Z", + "author": "Toshea111" + }, + { + "body": "I have had a go with the SLEAP '.csv' format option, and I am running into the same error each time, specifically when I try to import the .csv file.\r\n\r\nThe error message returned is: 'ValueError: Length mismatch: Expected axis has 12 elements, new values have 24 elements'.\r\n\r\nA variation of this occurs for different files with different numbers of tracks, although the example above is for a single track. I assume it means that the .csv file I am uploading contains more information or tracks than expected?\r\n\r\nOne thing to note is that the track is not continuously in frame for the whole video, perhaps that is an issue in itself?", + "created_at": "2022-12-28T14:30:58Z", + "author": "Toshea111" + }, + { + "body": "Thanks for testing @Toshea111! I tried to replicate with the file I have (deleting all or some tracks for subsets of frames) but couldn't, would you mind sharing a CSV like the one with a single track that is causing issues? \r\n\r\nAlso I noticed in the notebook you shared a while back that the output was transposed relative to the CSV I have been troubleshooting with (attached below). Is the SimBA input data still expected to look like the attached file, or are each individual animal body-part represented as three columns without a track index? \r\n\r\n[termites_1.csv](https://github.com/sgoldenlab/simba/files/10315446/termites_1.csv)", + "created_at": "2022-12-28T17:11:11Z", + "author": "sronilsson" + }, + { + "body": "No problem, attached is a .csv file with a single track like the one that was causing issues, I can also provide a version with all the tracks, if needed. Note that the track is not present in all frames, as the hornet disappears from view several times.\r\n\r\nThe format I have been using is the same as that of 'termites_1.csv', do you have a link to the version of the notebook that you mentioned? The one that I am currently using does not appear to transpose the data as you describe, and the attached output is directly from this notebook.\r\n\r\n[Hornet Test Track.csv](https://github.com/sgoldenlab/simba/files/10327314/Hornet.Test.Track.csv)", + "created_at": "2022-12-31T12:14:17Z", + "author": "Toshea111" + }, + { + "body": "Thanks @Toshea111, i'll test with this file and let you know - [THIS](https://colab.research.google.com/drive/1EpyTKFHVMCqcb9Lj9vjMrriyaG9SvrPO?usp=sharing) is the notebook I saw. \r\n\r\nIn this screengrab it looks like there is a single row index, but multiindex headers, while `termites_1.csv` has the inverse with multiindex rows and a single header. \r\n\r\n![image](https://user-images.githubusercontent.com/34761092/210176430-acaf711f-fcde-4279-b7b2-e8d06c710121.png)", + "created_at": "2023-01-01T15:36:30Z", + "author": "sronilsson" + }, + { + "body": "That's strange, as you say it looks to be transposed, which is not the output type that I have been working with.\r\n\r\nTo stay on the safe side, here is a link to the updated version of the notebook that I am currently using:\r\n\r\nhttps://github.com/Toshea111/sleap/blob/develop/docs/notebooks/Convert_HDF5_to_CSV_updated.ipynb\r\n\r\nLet me know if any further issues arise.", + "created_at": "2023-01-03T16:59:44Z", + "author": "Toshea111" + }, + { + "body": "Hi @Toshea111 - sorry for super late reply, I fixed a method that works with single animal - it doesn't have to be transposed in those cases and didn't think of that. When you've got a chance could you try it on your end again? If it doesn't would, could you please send me the file it doesn't work on? ", + "created_at": "2023-01-16T14:17:01Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Plot threshold is taking too long", + "body": "**Describe the bug**\r\nHello,\r\nI have finished most steps with SimBA (after importing CSVs from DLC) and now I got to the \"plot threshold\" stage. It seems to work fine, although it takes forever (more accurately approximately a day for a video of 30min and 30fps = 54,000 f). Other stages like \"Gantt plot\" or \"Data plot\" took much less (a day or two for all the videos). \r\nWhat do you think can be the problem and how can I fix it? \r\nThanks in advance :)\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to \"Visualizations\"\r\n2. Click on 'Plot threshold'...\r\n\r\n![image](https://user-images.githubusercontent.com/86400083/190912010-7cb76dc3-8d98-4fb1-b628-577250ba715d.png)\r\n\r\n**Screenshots**\r\n![image](https://user-images.githubusercontent.com/86400083/190912053-73004242-80d1-4b6c-97ea-f9b710428096.png)\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Python Version: 3.7.6\r\n - Are you using anaconda? yes\r\n \r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n", + "user": "Urimons", + "reaction_cnt": 0, + "created_at": "2022-09-18T14:25:51Z", + "updated_at": "2022-09-29T19:56:13Z", + "author": "Urimons", + "comments": [ + { + "body": "Hi @Urimons! Thanks for reporting. \r\n\r\nIf you update simba with pip install simba-uw-tf-dev --upgrade, do you still see the same runtime issue? \r\n\r\nA bit of background: most plotting code in SimBA has had speed issues, it's creating 54k graphs and can be expected to be slow as it runs on single core. However, it shouldn't be noticable slower than other graph functions... I'm working on getting in multiprocessing, but I have not got very far. In fact I have only really completed multiprocessing of Gantt plot. \r\n\r\nIf you update Simba and still see speed issue, please let me know and I will do the probability plots next. ", + "created_at": "2022-09-19T11:01:27Z", + "author": "sronilsson" + }, + { + "body": "Thanks! \r\nI upgraded and it seems to run a bit faster, it runs 216K frames (4 videos) per day intead of 54K frames (1 video) per day that was before, still almost 6-7 times slower than other plots...", + "created_at": "2022-09-20T09:03:01Z", + "author": "Urimons" + }, + { + "body": "Thanks @Urimons - I've inserted multiprocessing for probability visualizations: does it go any quicker if you do `pip install simba-uw-tf-dev --upgrade` to version `0.99.1`? \r\n\r\nThe tricky bit I find for creating videos/frames on many cores is dynamically avoiding MemoryErrors and overloading CPUs when I can't anticipate what machine the users have, so now I am conservative in the multiprocessing code. Should nevertheless be noticable faster. Please let me know how it goes. ", + "created_at": "2022-09-20T13:23:31Z", + "author": "sronilsson" + }, + { + "body": "Now it runs untill it actually surpass the amount of existing frames in a single video and then stuck... \r\n![WhatsApp Image 2022-09-20 at 23 04 23](https://user-images.githubusercontent.com/86400083/193022025-aa33f456-bcec-45ef-baeb-79215037addf.jpeg)\r\n", + "created_at": "2022-09-29T11:41:07Z", + "author": "Urimons" + }, + { + "body": "Thanks @Urimons, the count shouldn't matter, but will fix. \r\n\r\nThe error could be that conda ffmpeg isn't installed. If you type `conda install -c conda-forge ffmpeg` and try, does that fix it? ", + "created_at": "2022-09-29T19:56:13Z", + "author": "sronilsson" + } + ] + }, + { + "title": "XGBoost available as a model but not fully implemented", + "body": "Hi,\r\n\r\nI noticed that XGBoost is available as a model but has not been fully implemented. Is there development progress that has been made that we could sync with? I'm willing to implement this feature to finish our project if necessary, a bit of assistance with testing and verifying all code paths have been covered would be appreciated in this case.\r\n\r\nAlternatively, if we load a pickled model that adheres to the sklearn interface, will that work?\r\n\r\nPS: I tried to login to gitter to ask questions there but was not able to. It says:\r\n`We're very sorry, but we're unable to log you in right now. (Forbidden)`\r\n\r\n", + "user": "suntzuisafterU", + "reaction_cnt": 0, + "created_at": "2022-09-12T19:39:35Z", + "updated_at": "2022-09-13T02:43:09Z", + "author": "suntzuisafterU", + "comments": [ + { + "body": "Hi @suntzuisafterU! You’re right, we did originally plan for this… but there has not been any work on this for quite some time.\r\nThe code for creating single classifiers is [here](https://github.com/sgoldenlab/simba/blob/a6436c223d2e5442a3b676541af3df28865cbfe2/simba/train_single_model.py#L79), there is only a `if self.algo == \"RF\"` at the moment. For grid searching models it’s this [method](https://github.com/sgoldenlab/simba/blob/a6436c223d2e5442a3b676541af3df28865cbfe2/simba/train_mutiple_models_from_meta_new.py#L74) which just implicitly assumes that the input is `RF`. Both these would have to be updated to accept other algorithms. We’d have to update the GUI user-menus in SimBA to become dynamic where the hyperparameter entry boxes change depending on bagging vs boosting etc, as well has the structures holding the different hyperparameters when users train multiple models in a grid search. That kind of held me back from writing the code, that plus that I don’t expect to see any drastic model performance improvement in our use cases when shifting from RF to xgboost.. \r\n\r\nIf you have a pickle and want to run it through SimBA, that might work as long as there is a SimBA`project_config.ini` that hold the path of classifier, as well as some parameters (threshold, minimum bout length). [This is the class SimBA uses for inference](https://github.com/sgoldenlab/simba/blob/master/simba/run_model_new.py). You see SimBA just picks the paths that are defined in the `project_config.ini`. `predict_proba` is also the name of the method in xgboost I believe, so might work out of the box but I haven’t tried it. ", + "created_at": "2022-09-12T20:19:37Z", + "author": "sronilsson" + }, + { + "body": "Thanks for the info! I'll update when we have finished the analysis, and open a PR if any useful work is done.", + "created_at": "2022-09-13T02:43:09Z", + "author": "suntzuisafterU" + } + ] + }, + { + "title": "Impossible to import h5 elipse data from DLC", + "body": "Hello,\r\n\r\nSorry for reporting this small bug but I can't import my el.h5 file, the terminal reports:\r\n\r\n![H5-error](https://user-images.githubusercontent.com/66886884/188630959-812469e3-f21e-4cd9-8ed4-ab2208226e7c.png)\r\n\r\nWhat could be the error?\r\nFYI, I'm using 0.95.9 simba version on windows.\r\nThank you, \r\nBest,", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2022-09-06T12:09:07Z", + "updated_at": "2022-09-07T11:03:39Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "I precise that it's a second importation of h5 file for the same video, since the first h5 contained some mistakes. I erased the coresponding csv file (from the first h5 file) in the csv folder. \r\n ", + "created_at": "2022-09-06T12:23:31Z", + "author": "DorianBattivelli" + }, + { + "body": "Hi @DorianBattivelli thanks for reporting! Is it possible that you have some hidden files located within the directory you are importing the files from, or hidden files within your SimBA `project_folder/videos` directory? https://support.microsoft.com/en-us/windows/show-hidden-files-0320fe58-0117-fd59-6851-9b7f9840fdb2#:~:text=Select%20the%20Start%20button%2C%20then,drives%2C%20and%20then%20select%20OK.", + "created_at": "2022-09-06T12:44:50Z", + "author": "sronilsson" + }, + { + "body": "Thank you for the answer. \r\nI checked and I don't have any hidden in these directories", + "created_at": "2022-09-06T12:53:31Z", + "author": "DorianBattivelli" + }, + { + "body": "I bumped into this error myself a few times when I was troubleshooting something different. It happened when I have a file in the videos folder that does not have a file extension (or only an extension, no file-name) and I was about to insert a fix. Here, in the image, for example, there is a file called `.DS_store` in my videos directory that is hidden which is causing the code to fail (it can't find the video's name, only extension). I will insert a fix for this now, and let's see if it fixes the issue on your end, if not we can dig more. \r\n\r\n\r\n\r\n\r\n![image](https://user-images.githubusercontent.com/34761092/188641974-8fb76144-766e-4933-82e6-163d35fe6b28.png)\r\n", + "created_at": "2022-09-06T13:02:35Z", + "author": "sronilsson" + }, + { + "body": "@DorianBattivelli - if you update to `0.97.4` - do you still see the error? ", + "created_at": "2022-09-06T13:09:47Z", + "author": "sronilsson" + }, + { + "body": "Thank you, unfortunatly the update did not fix the bug.\r\nHere the files and corresponding paths I use to import the h5 file:\r\n![Schermata 2022-09-06 alle 16 28 16](https://user-images.githubusercontent.com/66886884/188661682-0d73633c-5620-44cb-a01c-298536819a9e.png)\r\n", + "created_at": "2022-09-06T14:30:01Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks @DorianBattivelli - I cant see the entire file explorer window. To troubleshoot, could you also tick the `File name extensions` box next to the `Hidden items` box and send me the screengrab? \r\n\r\nIf you move the `Bh5` folder to a different location, does the error persist? ", + "created_at": "2022-09-06T14:38:34Z", + "author": "sronilsson" + }, + { + "body": "I also tried by moving the folder on the desktop, and the problem persists.\r\n![Schermata 2022-09-06 alle 16 42 06](https://user-images.githubusercontent.com/66886884/188664689-eac23623-6418-4077-81a2-47d3a3a0832d.png)\r\n\r\nI precise that I do not import the video since it is already present in the video folder. Could that create the error? ", + "created_at": "2022-09-06T14:43:13Z", + "author": "DorianBattivelli" + }, + { + "body": "Yes, that should not cause an issue. In version `0.97.4`, I inserted a print statement, that shows all the files SimBA finds in the video folder, before it starts to scan which one belongs to your `el.h5` file. \r\n\r\nDid you see that print statement? ", + "created_at": "2022-09-06T14:50:43Z", + "author": "sronilsson" + }, + { + "body": "I also tried by moving the folder on the desktop, and the problem persists.\r\n![Schermata 2022-09-06 alle 16 42 06](https://user-images.githubusercontent.com/66886884/188664689-eac23623-6418-4077-81a2-47d3a3a0832d.png)\r\n\r\nI precise that I do not import the video since it is already present in the video folder. Could that create the error? \r\n\r\n> Yes, that should not cause an issue. In version `0.97.4`, I inserted a print statement, that shows all the files SimBA finds in the video folder, before it starts to scan which one belongs to your `el.h5` file.\r\n> \r\n> Did you see that print statement?\r\n\r\nNot sure to understand, I should find the statement in the error that returns the terminal? ", + "created_at": "2022-09-06T14:59:55Z", + "author": "DorianBattivelli" + }, + { + "body": "Hi - sorry I should have been clearer (I also put the print statement in the wrong place). First, update to simba `0.97.5` with `pip install simba-uw-tf-dev --upgrade`. \r\n\r\nThen, when you import your data, SimBA prints out all of the files that are found inside your `project_folder/videos` directory, as is marked in the red box in the screengrab below. In your case, one of these files listed, does not have a file ending, or a filename. SimBA tries to identify the video file format, and the file-name, and it fails. This would happen for example if one if the files inside your `project_folder/videos` directory begins with the character `.`\r\n\r\n![Untitled 3](https://user-images.githubusercontent.com/34761092/188674757-0a4b4a31-d515-4206-a5b6-aa97cdd1b98f.png)\r\n\r\n", + "created_at": "2022-09-06T15:28:01Z", + "author": "sronilsson" + }, + { + "body": "After updating, here what I get:\r\n![Schermata 2022-09-06 alle 17 50 04](https://user-images.githubusercontent.com/66886884/188680056-f3c164b7-8d12-454b-bb1e-26e1b34ebb80.png)\r\n", + "created_at": "2022-09-06T15:51:03Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - I've inserted some code that should filter out all subdirectories and hidden files, and show you the name of the offending file if you still hit an error. Can you update SimBA to `0.97.5` and let me know how it goes?", + "created_at": "2022-09-06T17:11:19Z", + "author": "sronilsson" + }, + { + "body": "Amazing, it solved the issue, thank you very much! ", + "created_at": "2022-09-06T22:16:08Z", + "author": "DorianBattivelli" + }, + { + "body": "Well actually I am stuck at the next tab, when trying to set the video parameters:\r\n![Schermata 2022-09-07 alle 00 20 23](https://user-images.githubusercontent.com/66886884/188750907-95ad73d6-710e-40d7-823e-bfcbb152fe77.png)\r\n", + "created_at": "2022-09-06T22:23:33Z", + "author": "DorianBattivelli" + }, + { + "body": "Alright - hang on, I will insert a more solid fix. There is likely to be some hidden file in that folder.", + "created_at": "2022-09-07T00:16:06Z", + "author": "sronilsson" + }, + { + "body": "I'm sorry I don't have a fix. But hopefully I have an update that can help us figure out what is going on. If you update to version `0.97.7`, and repeat the same process, what is the error message you see in the main SimBA terminal? \r\n\r\nWhat you should see something like this - `SIMBA ERROR: ThisFilename is not a valid filepath`", + "created_at": "2022-09-07T00:31:51Z", + "author": "sronilsson" + }, + { + "body": "Thank you for the update, Indeed the error message is as you said\r\n\r\nLoading F:/Territorial_Arena/Paper/Protocol_1/CD1/Simba/B1-MS_US/project_folder/project_config.ini...\r\nSIMBA ERROR: F:/Territorial_Arena/Paper/Protocol_1/CD1/Simba\\B1-MS_US\\project_folder\\videos\\Bh5 is not a valid filepath\r\n\r\n", + "created_at": "2022-09-07T00:44:26Z", + "author": "DorianBattivelli" + }, + { + "body": "Okm annd removing the h5 folder fixed the problem ,) thank you!", + "created_at": "2022-09-07T00:46:47Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "Unreliable ROI quantification", + "body": "Dear Simba developers,\r\n\r\nI'm using Simba-UW-tf-dev, version 0.94.2 from maDLC h5-elispe files. I'm tracking 2 mice simultaneously.\r\nI'm facing a strange output in my Detailed_ROI_data file. \r\nWhile I can clearly see my mouse entering a given ROI, it does not appear into my Detailed_ROI_data csv file.\r\n\r\nI checked the labeling live on DEEPLABCUT and found that there is neiter ID swap nor missing tracking when the animal enters the ROI.\r\nSo how is it possible that Simba failes to track the mouse into this ROI? \r\nI proceeded to smoothing transformation (300) and to outlier correction with recomanded values (1 for movement / 1.5 for location).\r\nCould these steps make some labeling missing for my final analysis? I checked the coordinates in the CSV file, and see no cell with \"0\" at the time point when the animal occupy the ROI : \r\n![CSV](https://user-images.githubusercontent.com/66886884/185939399-3c9fd41e-2c74-4cda-8aef-b4218ed1f068.png)\r\n\r\nHowever, I'm wandering what does the last column \"center_2_p\" mean? What is the meaning of the 1?\r\n\r\nI wish I could use the \"visualize ROI tracking\" function to check the signal that Simba analyzes but the program returns the following error when I run the command:\r\n![Simba-visualize_error](https://user-images.githubusercontent.com/66886884/185937872-58379fed-7d02-48b7-b7c6-219b10d43080.png)\r\n\r\nHave you some suggestion to solve this issue?\r\nThank you very much,\r\nBest\r\nDorian\r\n", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2022-08-22T14:02:05Z", + "updated_at": "2022-08-26T02:12:51Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli! Let me see if I can insert something that catches this error with the visualization, so we can see what is going on. My first bet would be that the outlier correction for some reason is to stringent, and body-part locations that should not be corrected has been corrected, hence placing your animal in a different part of the environment than it actually is. \r\n\r\nThe column `center_2_p` is for the center body-part of animal 2, and its pose-estimation probability score (as in `p`). 1 means that the pose-estimation model is 100% certain that is the correct location of the body-part. \r\n\r\n \r\n\r\n", + "created_at": "2022-08-22T14:13:15Z", + "author": "sronilsson" + }, + { + "body": "Also, what I notice is that before and after I run the smoothing correction my Detailed_ROI_data file differed, \r\nBefore the correction, I found one period when the animal occupied the ROI (even if not at the time I'm mentioning above), while after the correction, this ROI results as never being occupied at all", + "created_at": "2022-08-22T14:14:40Z", + "author": "DorianBattivelli" + }, + { + "body": "A bit of background: The error you see when trying to visualize the data, comes from SimBA finding multiple ROIs with the same shape for a single video with the same Name. I.e., there are two rectangles named \"rectangle_1\" in video 1. I don't know how that can sneak in, but I will check it out. ", + "created_at": "2022-08-22T14:23:40Z", + "author": "sronilsson" + }, + { + "body": "It is actually true, for example i have 4 corners in my box 1, since I do not care about each corner per, but only about time spent in whatever corner of this box, I labeled each Corner_box1", + "created_at": "2022-08-22T14:26:26Z", + "author": "DorianBattivelli" + }, + { + "body": "Alright, SimBA can't group ROIs by name at the moment. I will nevertheless insert a fix that should get around the error but will be a day or so. In the meantime, you could rename them (corner1, corner2..) and it should work.", + "created_at": "2022-08-22T14:57:54Z", + "author": "sronilsson" + }, + { + "body": "@DorianBattivelli - was easier than I thought. Can you update simbe to `0.95.5` with `pip install simba-uw-tf-dev --upgrade` and check if you can visualize it now? I inserted a warning when your type of data comes in:\r\n\r\n\r\nSIMBA WARNING: Some of your ROIs with the same shape has the same names. E.g., you have two rectangles named \"My rectangle\". SimBA prefers ROI shapes with unique names. SimBA will keep one of the unique shape names and drop the rest\r\n\r\n\r\nmeaning, only one of the corners will be plotted. ", + "created_at": "2022-08-22T16:05:48Z", + "author": "sronilsson" + }, + { + "body": "I just updated, but apparently my SimBA does not work anymore ^^\r\n![Schermata 2022-08-22 alle 18 20 07](https://user-images.githubusercontent.com/66886884/185970252-354fb88d-b6ab-4d20-b423-0edad32f243b.png)\r\n\r\n", + "created_at": "2022-08-22T16:21:31Z", + "author": "DorianBattivelli" + }, + { + "body": "My bad! Hang on @DorianBattivelli ", + "created_at": "2022-08-22T16:23:15Z", + "author": "sronilsson" + }, + { + "body": "@DorianBattivelli can you try again with 0.95.6?\r\n", + "created_at": "2022-08-22T16:29:46Z", + "author": "sronilsson" + }, + { + "body": "Thank you! \r\nHowever, still returning an error when I try to run visualization function:\r\n![Schermata 2022-08-22 alle 18 31 47](https://user-images.githubusercontent.com/66886884/185972553-f12c4c72-a467-4dd9-a458-9fd6669a90c9.png)\r\n\r\n", + "created_at": "2022-08-22T16:32:56Z", + "author": "DorianBattivelli" + }, + { + "body": "haha, alright, another typo... :) hang on. ", + "created_at": "2022-08-22T16:39:12Z", + "author": "sronilsson" + }, + { + "body": "Now we are up to 0.95.7.. can you check for any more typos? :) ", + "created_at": "2022-08-22T16:41:07Z", + "author": "sronilsson" + }, + { + "body": "Thanks! But more typo still needed :)\r\n![Schermata 2022-08-22 alle 18 44 50](https://user-images.githubusercontent.com/66886884/185974854-222647f5-6822-46d1-b9d9-c9a325333d47.png)\r\n\r\n", + "created_at": "2022-08-22T16:45:39Z", + "author": "DorianBattivelli" + }, + { + "body": "This one should be fixed, can you do `pip show simba-uw-tf-dev` and make sure it reads 0.95.7? \r\n", + "created_at": "2022-08-22T16:47:56Z", + "author": "sronilsson" + }, + { + "body": "Well done, it's working, generqting the video now... thank you, I'll keep you update on what I see.", + "created_at": "2022-08-22T16:54:06Z", + "author": "DorianBattivelli" + }, + { + "body": "Cheers! 👍 Let me know what you can see. ", + "created_at": "2022-08-22T16:54:33Z", + "author": "sronilsson" + }, + { + "body": "Well, very weird. \r\nThe tracking is good: we are interesting by the mouse (up-mouse) visiting the corner (Corndown) indicated by the red arrow. And it's good, Up-mouse is tracked into a corner but it's not quantified (unfortunatly I don't see the label of the ROI - but playing the video I see that the entrance of the mouse into this yellow corner does not trigger any ROI).\r\n\r\nAnother strange fact is that the shape of the central orange square into the lower compartment (shown with purple arrow) has a different shape/location\r\n![Schermata 2022-08-22 alle 18 58 35](https://user-images.githubusercontent.com/66886884/185979282-2435e8fb-5f51-4efd-968f-52a670477242.png)\r\n\r\n\r\n\r\n than what I drew:\r\n![Schermata 2022-08-22 alle 18 59 44](https://user-images.githubusercontent.com/66886884/185979355-f47a369c-bdb7-4c91-83ab-bad2646b4ad2.png)\r\n\r\n", + "created_at": "2022-08-22T17:09:05Z", + "author": "DorianBattivelli" + }, + { + "body": "Interesting and thanks for reporting! For the missed ROI detection, as there are a few ROIs, is there any chance that there are any other ROI (or multiple ROIs) named `CornDown`? If you watch the animal enter the other corners, does the counters move as expected? Another possibility is that there is issues when the ROIs are overlapping and I will look into this now.. \r\n\r\nFor the rectangle oddity, there was a bug a couple of months ago, where the bottom right and top left x and y coordinates of the rectangles could be confused when the shapes were modified. But this bug would probably have been seen in the `draw` interface. I am not sure it is applicable as you may have created the ROIs more recently.\r\n", + "created_at": "2022-08-22T17:36:45Z", + "author": "sronilsson" + }, + { + "body": "No, overlapping ROIs seems to work fine.. could you do me one favor? Re-draw your ROIs in latest SimBA, as a test, and see if it works if you have not done so already?", + "created_at": "2022-08-22T17:48:47Z", + "author": "sronilsson" + }, + { + "body": "NO need! I was able to replicate it. Lets see if I can figure it out. ", + "created_at": "2022-08-22T21:25:35Z", + "author": "sronilsson" + }, + { + "body": "Got it working on your video, will push an update tomorrow. ", + "created_at": "2022-08-23T01:57:05Z", + "author": "sronilsson" + }, + { + "body": "Amazing, I wait for your update :)\nThank you ", + "created_at": "2022-08-23T06:37:47Z", + "author": "DorianBattivelli" + }, + { + "body": "I tested it by drawing your ROIs out from the project you sent me a while back and looks like it should. That said, there are plenty of ROIs, so not all of them can be displayed in in the side bar. SimBA wants to print 72 lines of text and not all can be viewed. \r\n\r\nThe bad news is that you will have to delete your ROIs in your project, and draw them again for your videos. \r\n\r\nWould you mind checking, with version 0.95.8, this time, and let me know if it works? \r\n\r\n\r\n![image](https://user-images.githubusercontent.com/34761092/186093091-1c11e52f-400d-4a30-9346-5f6bc3445a40.png)\r\n\r\n", + "created_at": "2022-08-23T07:27:17Z", + "author": "sronilsson" + }, + { + "body": "Perfect, thank you very much, it will take me a while for drawing ROIs, but I keep you updated once I get data with this new version, \r\n\r\nBest", + "created_at": "2022-08-23T08:21:43Z", + "author": "DorianBattivelli" + }, + { + "body": "Try the duplicate and then rename methods if not already, It can go a lot quicker and ensures ROIs are aligned and the same size. ", + "created_at": "2022-08-23T08:31:48Z", + "author": "sronilsson" + }, + { + "body": "We tried this alreadym the problem is that the prospective is not the same so the distances in the different parts of the plan are not proportional.\r\nI'm facing 2 bugs with this last version:\r\n- I can't draw circles - as soon as I select circle and then 'draw' the program crashes\r\n- For each new ROI to dray the visualize windows where to draw expands to full screen, taking wrong dimensions, and making loose time to readjust...\r\n\r\nCan you please fix these bugs? Thank you!", + "created_at": "2022-08-23T08:48:16Z", + "author": "DorianBattivelli" + }, + { + "body": "Will fix! I inserted fixes for the two bullet points as they worked better for me, but I'm on a mac. I will remove as you have issues on Windows. ", + "created_at": "2022-08-23T09:32:15Z", + "author": "sronilsson" + }, + { + "body": "@DorianBattivelli - can you check again with 0.95.9? Fingers crossed!", + "created_at": "2022-08-23T09:50:09Z", + "author": "sronilsson" + }, + { + "body": "I try it now!\r\nDoes it mean that SimBA work on mac??", + "created_at": "2022-08-23T09:51:05Z", + "author": "DorianBattivelli" + }, + { + "body": "Yes, and linux. There is no PC available for me any more so have maintained it on Mac for some months. ", + "created_at": "2022-08-23T09:54:00Z", + "author": "sronilsson" + } + ] + }, + { + "title": "ParseError during Project Config", + "body": "**Describe the bug**\r\nHi, sorry I'm to bring up another issue for you, I'm just having trouble getting this to work. I have simba installed, and I was able to create the user defined pose config fine. However, once I fill out the rest of the project config info and click generate project config which triggers an error on the command line. I don't think the files are being generated correctly because if I try to move on and import videos and tracking, I get further errors.\r\n\r\n```\r\npandas.errors.ParserError: Error tokenizing data. C error: Expected 16 fields in line 14, saw 21\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open simba\r\n2. Click on 'create a new project '\r\n3. add user defined pose with 1 animal and 21 bp\r\n4. add project path and name\r\n5. add number of classifiers and name then\r\n6. click generate project config\r\n7. see error\r\n\r\n**Expected behavior**\r\nproject config files are generated correctly\r\n\r\n**Desktop (please complete the following information):**\r\nOS: [e.g. iOS] Rocky Linux 8.5 (Green Obsidian)\r\nPython Version [e.g. 3.6.0] 3.6.10\r\nAre you using anaconda? yes\r\n \r\n\r\n**Additional context**\r\nI'm using FastX3 to access a cluster where I'm running simba. No messages were created in the gui. The full terminal output from the command line is listed below.\r\n\r\n
\r\n\r\n
Terminal Output\r\n\r\n\r\n```\r\n(base) [u1208563@notchpeak1:~]$ salloc --time=4:00:00 --account=notchpeak-gpu --partition=notchpeak-gpu --nodes=1 --ntasks=1 --mem=60G --gres=gpu:2080ti:1\r\nsalloc: Pending job allocation 5167955\r\nsalloc: job 5167955 queued and waiting for resources\r\nsalloc: job 5167955 has been allocated resources\r\nsalloc: Granted job allocation 5167955\r\nsalloc: Waiting for resource configuration\r\nsalloc: Nodes notch271 are ready for job\r\n(base) [u1208563@notch271:~]$ conda activate simba\r\n(simba) [u1208563@notch271:~]$ simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/simba/lib/python3.6/tkinter/__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"/uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/simba/lib/python3.6/site-packages/simba/SimBA.py\", line 3013, in make_projectini\r\n file_type=self.csvORparquet.getChoices())\r\n File \"/uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/simba/lib/python3.6/site-packages/simba/project_config_creator.py\", line 30, in __init__\r\n self.create_configparser_config()\r\n File \"/uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/simba/lib/python3.6/site-packages/simba/project_config_creator.py\", line 174, in create_configparser_config\r\n bp_df = pd.read_csv(bp_dir_path, header=None)\r\n File \"/uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/simba/lib/python3.6/site-packages/pandas/io/parsers.py\", line 685, in parser_f\r\n return _read(filepath_or_buffer, kwds)\r\n File \"/uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/simba/lib/python3.6/site-packages/pandas/io/parsers.py\", line 463, in _read\r\n data = parser.read(nrows)\r\n File \"/uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/simba/lib/python3.6/site-packages/pandas/io/parsers.py\", line 1154, in read\r\n ret = self._engine.read(nrows)\r\n File \"/uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/simba/lib/python3.6/site-packages/pandas/io/parsers.py\", line 2059, in read\r\n data = self._reader.read(nrows)\r\n File \"pandas/_libs/parsers.pyx\", line 881, in pandas._libs.parsers.TextReader.read\r\n File \"pandas/_libs/parsers.pyx\", line 896, in pandas._libs.parsers.TextReader._read_low_memory\r\n File \"pandas/_libs/parsers.pyx\", line 950, in pandas._libs.parsers.TextReader._read_rows\r\n File \"pandas/_libs/parsers.pyx\", line 937, in pandas._libs.parsers.TextReader._tokenize_rows\r\n File \"pandas/_libs/parsers.pyx\", line 2132, in pandas._libs.parsers.raise_parser_error\r\npandas.errors.ParserError: Error tokenizing data. C error: Expected 16 fields in line 14, saw 21\r\n\r\n```\r\n\r\n
\r\n\r\nThanks for the help!\r\n", + "user": "carson2stoker", + "reaction_cnt": 0, + "created_at": "2022-08-13T22:28:44Z", + "updated_at": "2022-08-16T20:21:32Z", + "author": "carson2stoker", + "comments": [ + { + "body": "Hi @carson2stoker! The error is kind of related to the initial error you saw. What it looks like, is that SimBA is trying to read the `pose_config_names.csv` located in the SimBA package which is a CSV file. However, it fails because the delimiters is not comma, or it has been corrupted in some way. When trying to fix your prior error, did you manipulate the `pose_config_names.csv` in some way outside of SimBA? ", + "created_at": "2022-08-14T00:00:09Z", + "author": "sronilsson" + }, + { + "body": "No, I didn't touch the pose_config_names.csv file at all. I also tried creating a new project and resetting the user defined pose, and recreating it, but I always got the same error.", + "created_at": "2022-08-15T16:29:00Z", + "author": "carson2stoker" + }, + { + "body": "@carson2stoker thanks, I just had another user reporting the same issue on gitter, so I will see if there are any issues in code", + "created_at": "2022-08-15T17:07:01Z", + "author": "sronilsson" + }, + { + "body": "@carson2stoker could you do me a favour and upgrade simba with pip install simba-uw-tf-dev --upgrade and let me know if you still see the error? I'm having trouble re-creating the error, but re-wrote the script in hope that it is more robust now.", + "created_at": "2022-08-15T20:52:19Z", + "author": "sronilsson" + }, + { + "body": "Yes, that worked! Thank you. However, another error immediately cropped up as always the case. I imported my video, that went fine. Then, I tried to upload my tracking data generated in sleap. I did pick the .slp file type. It gave me the following error. \r\n\r\n```\r\nFAILED: SimBA could not find a video file resenting test2.6 in the project_folder/videos directory\r\n```\r\n\r\nI didn't see anything in the issues page about this and tried replacing the video path in the sleap files but that didn't work either. \r\n\r\nEdit: to clarify, that is the message that displayed in the gui. The terminal gave me this error.\r\n\r\n```\r\nTypeError: expected str, bytes or os.PathLike object, not NoneType\r\n```", + "created_at": "2022-08-15T23:30:34Z", + "author": "carson2stoker" + }, + { + "body": "Hi @carson2stoker - one step forward!! To clarify, have you imported a video called test2.6.mp4 or test2.6.avi, and this file is located in the project_folder/videos directory? ", + "created_at": "2022-08-16T01:28:11Z", + "author": "sronilsson" + }, + { + "body": "No, sorry the sleap project is called test2.6.slp, the video is just called video.mp4. ", + "created_at": "2022-08-16T16:46:41Z", + "author": "carson2stoker" + }, + { + "body": "Alright, is `test2.6.slp` a sleap project you are trying to import into SimBA? What you want to import, is the slp inference files for the videos, not the entire project. If you have used your test2.6.slp project to inferred the body-part predictions on `video.mp4`, then sleap should create a `video.slp`. Its the video.slp you want to import into simba. ", + "created_at": "2022-08-16T17:53:17Z", + "author": "sronilsson" + }, + { + "body": "Yes that is what I am trying to do. So just to clarify, in the 'import videos into project folder' tab I should import the raw video and in the 'import tracking data' tab I should upload a video.slp file which is the sleap inference files? I have run inference and generated predictions on the whole video, but I don't have a file called video.slp. I do see one called test2.6.slp.mp4.avi though, would that be what I'm looking for?", + "created_at": "2022-08-16T18:09:19Z", + "author": "carson2stoker" + }, + { + "body": "Yes, what I am thinking, is that you use your `test2.6.slp` project, to select and annotate images etc, and ultimately build a pose-estimation model. Once that is done, you want to use it to the `test2.6.slp` project, to analyze new videos, such as `video.mp4`, so you run your inference on on video.mp4 using the model inside the test2.6.slp project. That should produce an output file, which holds all the body-part coordinates for video.mp4. I think they name that output file `video.slp` in sleap. ", + "created_at": "2022-08-16T19:31:15Z", + "author": "sronilsson" + }, + { + "body": "Ah I figured it out. Sleap puts those .slp files generated during inference under a different directory called predictions and renames them. Once I found the right .slp file and renamed it to video.slp, it worked! It looks like the names have to match, which makes sense. ", + "created_at": "2022-08-16T20:00:51Z", + "author": "carson2stoker" + }, + { + "body": "Great! ", + "created_at": "2022-08-16T20:21:32Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Dependency Conflicts During Installation", + "body": "**Describe the bug**\r\nI have multiple dependency conflicts during installation when using anaconda. I am trying to install in my home directory on a cluster so I have the correct permissions. The current conflict is with numpy. \r\n\r\n```\r\nThe conflict is caused by:\r\n simba-uw-tf 1.0.3 depends on numpy==1.18.1\r\n deeplabcut 2.0.9 depends on numpy~=1.14.5\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. create new conda environment\r\n2. activate environment\r\n3. type 'pip install simba-uw-tf'\r\n4. See error\r\n\r\n**Expected behavior**\r\nsimba installs without issue\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS] Rocky Linux 8.5 (Green Obsidian)\r\n - Python Version [e.g. 3.6.0] 3.6\r\n - Are you using anaconda? yes\r\n \r\nThe full terminal output is posted below under Logs.\r\n\r\n\r\n\r\n
Logs\r\n\r\n\r\n```\r\n(base) [u1208563@notchpeak1:~]$ salloc --time=4:00:00 --account=notchpeak-gpu --partition=notchpeak-gpu --nodes=1 --ntasks=1 --mem=60G --gres=gpu:2080ti:1\r\nsalloc: Pending job allocation 5090324\r\nsalloc: job 5090324 queued and waiting for resources\r\nsalloc: job 5090324 has been allocated resources\r\nsalloc: Granted job allocation 5090324\r\nsalloc: Waiting for resource configuration\r\nsalloc: Nodes notch004 are ready for job\r\n(base) [u1208563@notch004:~]$ conda env list\r\n# conda environments:\r\n#\r\nbase * /uufs/chpc.utah.edu/common/home/u1208563/miniconda3\r\nbento /uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/bento\r\nsleap /uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/sleap\r\n\r\n(base) [u1208563@notch004:~]$ conda create --name simba python=3.6\r\nCollecting package metadata (current_repodata.json): done\r\nSolving environment: failed with repodata from current_repodata.json, will retry with next repodata source.\r\nCollecting package metadata (repodata.json): done\r\nSolving environment: done\r\n\r\n## Package Plan ##\r\n\r\n environment location: /uufs/chpc.utah.edu/common/home/u1208563/miniconda3/envs/simba\r\n\r\n added / updated specs:\r\n - python=3.6\r\n\r\n\r\nThe following NEW packages will be INSTALLED:\r\n\r\n _libgcc_mutex pkgs/main/linux-64::_libgcc_mutex-0.1-main\r\n _openmp_mutex pkgs/main/linux-64::_openmp_mutex-5.1-1_gnu\r\n ca-certificates pkgs/main/linux-64::ca-certificates-2022.07.19-h06a4308_0\r\n certifi pkgs/main/linux-64::certifi-2021.5.30-py36h06a4308_0\r\n ld_impl_linux-64 pkgs/main/linux-64::ld_impl_linux-64-2.38-h1181459_1\r\n libffi pkgs/main/linux-64::libffi-3.3-he6710b0_2\r\n libgcc-ng pkgs/main/linux-64::libgcc-ng-11.2.0-h1234567_1\r\n libgomp pkgs/main/linux-64::libgomp-11.2.0-h1234567_1\r\n libstdcxx-ng pkgs/main/linux-64::libstdcxx-ng-11.2.0-h1234567_1\r\n ncurses pkgs/main/linux-64::ncurses-6.3-h5eee18b_3\r\n openssl pkgs/main/linux-64::openssl-1.1.1q-h7f8727e_0\r\n pip pkgs/main/linux-64::pip-21.2.2-py36h06a4308_0\r\n python pkgs/main/linux-64::python-3.6.13-h12debd9_1\r\n readline pkgs/main/linux-64::readline-8.1.2-h7f8727e_1\r\n setuptools pkgs/main/linux-64::setuptools-58.0.4-py36h06a4308_0\r\n sqlite pkgs/main/linux-64::sqlite-3.39.2-h5082296_0\r\n tk pkgs/main/linux-64::tk-8.6.12-h1ccaba5_0\r\n wheel pkgs/main/noarch::wheel-0.37.1-pyhd3eb1b0_0\r\n xz pkgs/main/linux-64::xz-5.2.5-h7f8727e_1\r\n zlib pkgs/main/linux-64::zlib-1.2.12-h7f8727e_2\r\n\r\n\r\nProceed ([y]/n)? y\r\n\r\nPreparing transaction: done\r\nVerifying transaction: done\r\nExecuting transaction: done\r\n#\r\n# To activate this environment, use\r\n#\r\n# $ conda activate simba\r\n#\r\n# To deactivate an active environment, use\r\n#\r\n# $ conda deactivate\r\n\r\n(base) [u1208563@notch004:~]$ conda activate simba\r\n(simba) [u1208563@notch004:~]$ pip install simba-uw-tf\r\nCollecting simba-uw-tf\r\n Using cached Simba_UW_tf-1.3.12-py3-none-any.whl (7.3 MB)\r\nCollecting numexpr==2.6.9\r\n Using cached numexpr-2.6.9-cp36-cp36m-manylinux1_x86_64.whl (163 kB)\r\nCollecting matplotlib==3.0.3\r\n Using cached matplotlib-3.0.3-cp36-cp36m-manylinux1_x86_64.whl (13.0 MB)\r\nCollecting dash==1.14.0\r\n Using cached dash-1.14.0-py3-none-any.whl\r\nCollecting opencv-python==3.4.5.20\r\n Using cached opencv_python-3.4.5.20-cp36-cp36m-manylinux1_x86_64.whl (25.4 MB)\r\nCollecting xgboost==0.90\r\n Using cached xgboost-0.90-py2.py3-none-manylinux1_x86_64.whl (142.8 MB)\r\nCollecting imutils==0.5.2\r\n Using cached imutils-0.5.2-py3-none-any.whl\r\nCollecting deeplabcut==2.0.8\r\n Using cached deeplabcut-2.0.8-py3-none-any.whl (178 kB)\r\nCollecting imblearn==0.0\r\n Using cached imblearn-0.0-py2.py3-none-any.whl (1.9 kB)\r\nCollecting tabulate==0.8.3\r\n Using cached tabulate-0.8.3-py3-none-any.whl\r\nCollecting pyarrow==0.17.1\r\n Using cached pyarrow-0.17.1-cp36-cp36m-manylinux2014_x86_64.whl (63.8 MB)\r\nCollecting scikit-image==0.14.2\r\n Using cached scikit_image-0.14.2-cp36-cp36m-manylinux1_x86_64.whl (25.3 MB)\r\nCollecting dash-colorscales==0.0.4\r\n Using cached dash_colorscales-0.0.4-py3-none-any.whl\r\nCollecting tqdm==4.30.0\r\n Using cached tqdm-4.30.0-py2.py3-none-any.whl (47 kB)\r\nCollecting h5py==2.9.0\r\n Using cached h5py-2.9.0-cp36-cp36m-manylinux1_x86_64.whl (2.8 MB)\r\nCollecting shapely==1.7\r\n Using cached Shapely-1.7.0-cp36-cp36m-manylinux1_x86_64.whl (1.8 MB)\r\nCollecting shap==0.35.0\r\n Using cached shap-0.35.0-cp36-cp36m-linux_x86_64.whl\r\nCollecting scipy==1.1.0\r\n Using cached scipy-1.1.0-cp36-cp36m-manylinux1_x86_64.whl (31.2 MB)\r\nCollecting dash-core-components==1.10.2\r\n Using cached dash_core_components-1.10.2-py3-none-any.whl\r\nCollecting protobuf==3.6.0\r\n Using cached protobuf-3.6.0-cp36-cp36m-manylinux1_x86_64.whl (7.1 MB)\r\nCollecting dtreeviz==0.8.1\r\n Using cached dtreeviz-0.8.1-py3-none-any.whl\r\nCollecting statsmodels==0.9.0\r\n Using cached statsmodels-0.9.0-cp36-cp36m-manylinux1_x86_64.whl (7.4 MB)\r\nCollecting tensorflow-gpu==1.14.0\r\n Using cached tensorflow_gpu-1.14.0-cp36-cp36m-manylinux1_x86_64.whl (377.0 MB)\r\nCollecting wxpython==4.0.4\r\n Using cached wxPython-4.0.4.tar.gz (68.8 MB)\r\nCollecting pandas==0.25.3\r\n Using cached pandas-0.25.3-cp36-cp36m-manylinux1_x86_64.whl (10.4 MB)\r\nCollecting pyyaml==5.3.1\r\n Using cached PyYAML-5.3.1-cp36-cp36m-linux_x86_64.whl\r\nCollecting graphviz==0.11\r\n Using cached graphviz-0.11-py2.py3-none-any.whl (17 kB)\r\nCollecting plotly==4.9.0\r\n Using cached plotly-4.9.0-py2.py3-none-any.whl (12.9 MB)\r\nCollecting seaborn==0.9.0\r\n Using cached seaborn-0.9.0-py3-none-any.whl (208 kB)\r\nCollecting dash-html-components==1.0.3\r\n Using cached dash_html_components-1.0.3-py3-none-any.whl\r\nCollecting deepposekit==0.3.5\r\n Using cached deepposekit-0.3.5-py3-none-any.whl\r\nCollecting dash-color-picker==0.0.1\r\n Using cached dash_color_picker-0.0.1-py3-none-any.whl\r\nCollecting scikit-learn==0.22.2\r\n Using cached scikit_learn-0.22.2-cp36-cp36m-manylinux1_x86_64.whl (7.1 MB)\r\nCollecting eli5==0.10.1\r\n Using cached eli5-0.10.1-py2.py3-none-any.whl (105 kB)\r\nCollecting imgaug==0.4.0\r\n Using cached imgaug-0.4.0-py2.py3-none-any.whl (948 kB)\r\nCollecting cefpython3==66.0\r\n Using cached cefpython3-66.0-py2.py3-none-manylinux1_x86_64.whl (79.6 MB)\r\nCollecting numba==0.48.0\r\n Using cached numba-0.48.0-1-cp36-cp36m-manylinux2014_x86_64.whl (3.5 MB)\r\nCollecting Pillow==5.4.1\r\n Using cached Pillow-5.4.1-cp36-cp36m-manylinux1_x86_64.whl (2.0 MB)\r\nCollecting yellowbrick==0.9.1\r\n Using cached yellowbrick-0.9.1-py2.py3-none-any.whl (282 kB)\r\nCollecting future\r\n Using cached future-0.18.2-py3-none-any.whl\r\nCollecting flask-compress\r\n Using cached Flask_Compress-1.12-py3-none-any.whl (7.9 kB)\r\nCollecting dash-renderer==1.6.0\r\n Using cached dash_renderer-1.6.0-py3-none-any.whl\r\nCollecting dash-table==4.9.0\r\n Using cached dash_table-4.9.0-py3-none-any.whl\r\nCollecting Flask>=1.0.2\r\n Using cached Flask-2.0.3-py3-none-any.whl (95 kB)\r\nCollecting numpy==1.14.5\r\n Using cached numpy-1.14.5-cp36-cp36m-manylinux1_x86_64.whl (12.2 MB)\r\nCollecting chardet==3.0.4\r\n Using cached chardet-3.0.4-py2.py3-none-any.whl (133 kB)\r\nCollecting python-dateutil==2.7.3\r\n Using cached python_dateutil-2.7.3-py2.py3-none-any.whl (211 kB)\r\nINFO: pip is looking at multiple versions of dash-table to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-renderer to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-html-components to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-core-components to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-colorscales to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-color-picker to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of cefpython3 to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of simba-uw-tf to determine which version is compatible with other requirements. This could take a while.\r\nCollecting simba-uw-tf\r\n Using cached Simba_UW_tf-1.3.11-py3-none-any.whl (7.3 MB)\r\nCollecting deeplabcut==2.0.9\r\n Using cached deeplabcut-2.0.9-py3-none-any.whl (187 kB)\r\nCollecting intel-openmp\r\n Using cached intel_openmp-2022.1.0-py2.py3-none-manylinux1_x86_64.whl (10.7 MB)\r\nRequirement already satisfied: setuptools in ./miniconda3/envs/simba/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (58.0.4)\r\nCollecting ipython~=6.0.0\r\n Using cached ipython-6.0.0-py3-none-any.whl (736 kB)\r\nCollecting patsy\r\n Using cached patsy-0.5.2-py2.py3-none-any.whl (233 kB)\r\nRequirement already satisfied: certifi in ./miniconda3/envs/simba/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (2021.5.30)\r\nCollecting requests\r\n Using cached requests-2.27.1-py2.py3-none-any.whl (63 kB)\r\nCollecting six~=1.11.0\r\n Using cached six-1.11.0-py2.py3-none-any.whl (10 kB)\r\nCollecting tensorpack~=0.9.7.1\r\n Using cached tensorpack-0.9.7.1-py2.py3-none-any.whl (286 kB)\r\nCollecting numpy~=1.14.5\r\n Using cached numpy-1.14.6-cp36-cp36m-manylinux1_x86_64.whl (13.8 MB)\r\nCollecting click\r\n Using cached click-8.0.4-py3-none-any.whl (97 kB)\r\nCollecting easydict~=1.7\r\n Using cached easydict-1.9-py3-none-any.whl\r\nCollecting ipython-genutils~=0.2.0\r\n Using cached ipython_genutils-0.2.0-py2.py3-none-any.whl (26 kB)\r\nCollecting wheel~=0.31.1\r\n Using cached wheel-0.31.1-py2.py3-none-any.whl (41 kB)\r\nCollecting moviepy~=0.2.3.5\r\n Using cached moviepy-0.2.3.5-py3-none-any.whl\r\nCollecting tables\r\n Using cached tables-3.7.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (6.0 MB)\r\nCollecting python-dateutil~=2.7.3\r\n Using cached python_dateutil-2.7.5-py2.py3-none-any.whl (225 kB)\r\nCollecting simba-uw-tf\r\n Using cached Simba_UW_tf-1.3.10-py3-none-any.whl (7.3 MB)\r\n Using cached Simba_UW_tf-1.2.31-py3-none-any.whl (4.6 MB)\r\nCollecting numpy==1.18.1\r\n Using cached numpy-1.18.1-cp36-cp36m-manylinux1_x86_64.whl (20.1 MB)\r\nCollecting simba-uw-tf\r\n Using cached Simba_UW_tf-1.2.30-py3-none-any.whl (4.6 MB)\r\n Using cached Simba_UW_tf-1.2.29-py3-none-any.whl (3.4 MB)\r\nCollecting h5py~=2.7\r\n Using cached h5py-2.10.0-cp36-cp36m-manylinux1_x86_64.whl (2.9 MB)\r\nCollecting simba-uw-tf\r\n Using cached Simba_UW_tf-1.2.28-py3-none-any.whl (3.4 MB)\r\n Using cached Simba_UW_tf-1.2.27-py3-none-any.whl (3.4 MB)\r\nINFO: pip is looking at multiple versions of simba-uw-tf to determine which version is compatible with other requirements. This could take a while.\r\n Using cached Simba_UW_tf-1.2.26-py3-none-any.whl (3.4 MB)\r\n Using cached Simba_UW_tf-1.2.24-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.23-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.22-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.21-py3-none-any.whl (3.3 MB)\r\nINFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. If you want to abort this run, you can press Ctrl + C to do so. To improve how pip performs, tell us what happened here: https://pip.pypa.io/surveys/backtracking\r\n Using cached Simba_UW_tf-1.2.20-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.19-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.18-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.17-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.16-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.15-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.14-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.13-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.12.1-py3-none-any.whl (4.6 MB)\r\nCollecting scikit-learn~=0.19.2\r\n Using cached scikit_learn-0.19.2-cp36-cp36m-manylinux1_x86_64.whl (4.9 MB)\r\nCollecting ruamel.yaml~=0.15\r\n Using cached ruamel.yaml-0.17.21-py3-none-any.whl (109 kB)\r\nCollecting imageio~=2.3.0\r\n Using cached imageio-2.3.0-py2.py3-none-any.whl (3.3 MB)\r\nCollecting colour\r\n Using cached colour-0.1.5-py2.py3-none-any.whl (23 kB)\r\nCollecting attrs>16.0.0\r\n Using cached attrs-22.1.0-py2.py3-none-any.whl (58 kB)\r\nCollecting jinja2\r\n Using cached Jinja2-3.0.3-py3-none-any.whl (133 kB)\r\nCollecting imbalanced-learn\r\n Using cached imbalanced_learn-0.9.1-py3-none-any.whl (199 kB)\r\nINFO: pip is looking at multiple versions of imblearn to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of h5py to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of graphviz to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of eli5 to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dtreeviz to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of deepposekit to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of deeplabcut to determine which version is compatible with other requirements. This could take a while.\r\nCollecting simba-uw-tf\r\n Using cached Simba_UW_tf-1.2.12-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.11-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.10-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.9.2-py3-none-any.whl (4.6 MB)\r\n Using cached Simba_UW_tf-1.2.9-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.8-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.7-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.6-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.5-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.4.3-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.4.2-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.4.1-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.13-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.12-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.11-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.10-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.9-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.8-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.7-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.6-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.5-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.4-py3-none-any.whl (2.2 MB)\r\n Using cached Simba_UW_tf-1.2.3.3-py3-none-any.whl (2.2 MB)\r\n Using cached Simba_UW_tf-1.2.3-py3-none-any.whl (2.2 MB)\r\n Using cached Simba_UW_tf-1.2.2-py3-none-any.whl (2.2 MB)\r\n Using cached Simba_UW_tf-1.2.1-py3-none-any.whl (2.2 MB)\r\n Using cached Simba_UW_tf-1.2.0-py3-none-any.whl (1.9 MB)\r\n Using cached Simba_UW_tf-1.1.7-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.1.6-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.1.1-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.1.0-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.9-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.7-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.6-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.5-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.4-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.3-py3-none-any.whl (2.3 MB)\r\nERROR: Cannot install simba-uw-tf and simba-uw-tf==1.0.3 because these package versions have conflicting dependencies.\r\n\r\nThe conflict is caused by:\r\n simba-uw-tf 1.0.3 depends on numpy==1.18.1\r\n deeplabcut 2.0.9 depends on numpy~=1.14.5\r\n\r\nTo fix this you could try to:\r\n1. loosen the range of package versions you've specified\r\n2. remove package versions to allow pip attempt to solve the dependency conflict\r\n\r\nERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies\r\n\r\n```\r\n\r\n
\r\n\r\nI've tried all the things suggested about this issue on the FAQ page. I've also looked through the Issues page and tried a few things suggested there, but none of those things worked. I don't have much experience with installing over the command line or dependency conflict resolution, so any help will be greatly appreciated, thanks!\r\n", + "user": "carson2stoker", + "reaction_cnt": 0, + "created_at": "2022-08-12T19:03:12Z", + "updated_at": "2022-08-14T00:09:19Z", + "author": "carson2stoker", + "comments": [ + { + "body": "Hi @carson2stoker ! Use `pip install simba-uw-tf-dev` instead. The `simba-uw-tf` has not been maintained for some time. ", + "created_at": "2022-08-12T19:09:28Z", + "author": "sronilsson" + }, + { + "body": "Hi, thanks that worked. Initially when I used pip install simba-uw-tf-dev I got a different error saying 'failed building wheel for wxpython'. I used the fix used in [#154 ](https://github.com/sgoldenlab/simba/issues/154) by installing wxpython with the command 'conda install -c anaconda wxpython==4.0.4' and then reinstalling simba. I have it installed now, thank you. Although I would suggest updating your installation documentation if 'simba-uw-tf' is so far out of date.", + "created_at": "2022-08-12T23:18:23Z", + "author": "carson2stoker" + }, + { + "body": "Hi @carson2stoker - thanks! Yes, it's sometimes difficult for me to keep up. Have you got a link to the doc where simba-uw-tf is mentioned, and I will remove it?", + "created_at": "2022-08-13T15:50:05Z", + "author": "sronilsson" + }, + { + "body": "Yes. It's here https://github.com/sgoldenlab/simba/blob/master/docs/anaconda_installation.md\r\nand here https://github.com/sgoldenlab/simba/blob/master/docs/installation.md", + "created_at": "2022-08-13T16:41:48Z", + "author": "carson2stoker" + }, + { + "body": "Thank you! ", + "created_at": "2022-08-13T17:08:28Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Issue while analyzing machine-results", + "body": "**Describe the bug**\r\nWhen analyzing the machine-results CSV's (C:\\Users\\name\\Desktop\\name\\project_folder\\csv\\machine_results), I kept seeing all 0's in the column where it should be either a 0 or 1 based on whether the behavior is actually occurring (which I believe are derived from targets-inserted / Boris file ethograms). I'm also getting unexpectedly low results in the behavior probability columns.\r\n\r\n**To Reproduce**\r\n\r\nI was testing a previous and functional model on new data, so tell me if I did something incorrectly.\r\n\r\nSteps to reproduce the behavior:\r\n1. Going through Simba normally until 'Run Machine Model'\r\n2. In model settings, I used .sav files from a different project folder (I tried choosing the file directly from the other folder, then I tried copying it over to the current folder both without success)\r\n3. Opening up the machine-results CSV files (path above), and scroll to the data at the very right.\r\n4. See error\r\n\r\n**Expected behavior**\r\nI was expecting to see the 0/1 column to match the data found in my ethograms, and for the probabilities to match that.\r\n\r\n\r\n - OS: Windows 10\r\n - Python 3.6.10\r\n - Anaconda\r\n \r\n\r\n**Additional context**\r\nUnfortunately Github isn't letting me upload a screenshot right now, please let me know if anything needs to be clarified.\r\n", + "user": "n-zeng", + "reaction_cnt": 0, + "created_at": "2022-08-12T17:37:50Z", + "updated_at": "2022-08-15T15:18:03Z", + "author": "n-zeng", + "comments": [ + { + "body": "Hi n-zeng! If I understand correctly, your machine learning model is severely **under** classifying the presence of your behavior: every frame is scored as behavior absent. The probabilities that the behavior is present are also very low for all frames. \r\n\r\nOne way this could happen would be if you fed a lot of annotations with behavior absent into your classifier, together with a very small number of behavior present annotations. How many annotated frames with the behavior-present vs behavior-absent did you use to create the model?\r\n\r\n\r\nIf the annotations are imbalanced towards behavior-absent, this would bias the model towards classifying most frames as behavior absent. One way around this is to balance your annotations, taking an equal or similar amount of behavior-absent and behavior-present frames when you create your classifier – check out the [Random undersampling]( https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#train-predictive-classifiers-settings) in the machine model settings for how to balance the data. \r\n", + "created_at": "2022-08-14T14:33:09Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Create pose config Issues", + "body": "**Describe the bug**\r\nHi, I'm brand new to Simba and trying to create a new project using imported data from Sleap. I have followed the tutorials on Multi_animal_pose.md and Pose_config.md to create my own pose configuration for Bombus Impatiens bumblebees. Once I'm done defining the pose by double clicking each body part on the image, some error messages appear on the command line and my pose config does not show up. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Load simba\r\n2. Click on 'create new project'\r\n3. go down to animal settings and click 'create pose config' in the drop down box\r\n4. type in the config details (1 animal, 21 body parts)\r\n5. give all the body parts names and define them on the image of the animal\r\n6. click esc on the last image \r\n7. see error\r\n\r\n**Expected behavior**\r\nI expected my newly created pose config to appear in the drop down box next to # config under Animal Settings. \r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Linux, being run remotely on a cluster\r\n - Python Version 3.6\r\n - Are you using anaconda? Yes I am \r\n \r\n\r\n\r\n
Logs\r\n\r\n\r\n```\r\n(base) [u1208563@notchpeak1:~]$ salloc --time=4:00:00 --account=notchpeak-gpu --partition=notchpeak-gpu --nodes=1 --ntasks=1 --mem=60G --gres=gpu:2080ti:1\r\nsalloc: Pending job allocation 5071542\r\nsalloc: job 5071542 queued and waiting for resources\r\nsalloc: job 5071542 has been allocated resources\r\nsalloc: Granted job allocation 5071542\r\nsalloc: Waiting for resource configuration\r\nsalloc: Nodes notch271 are ready for job\r\n(base) [u1208563@notch271:~]$ module load anaconda3/2022.05\r\n(base) [u1208563@notch271:~]$ module load simba\r\n(/uufs/chpc.utah.edu/sys/installdir/r8/simba/08022022) [u1208563@notch271:~]$ simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/uufs/chpc.utah.edu/sys/installdir/r8/simba/08022022/lib/python3.6/tkinter/__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"/uufs/chpc.utah.edu/sys/installdir/r8/simba/08022022/lib/python3.6/site-packages/simba/SimBA.py\", line 2913, in \r\n self.saveposeConfigbutton = Button(self.scroller,text='Save Pose Config',command=lambda:self.savePoseConfig(self.scroller))\r\n File \"/uufs/chpc.utah.edu/sys/installdir/r8/simba/08022022/lib/python3.6/site-packages/simba/SimBA.py\", line 2935, in savePoseConfig\r\n define_new_pose_configuration(configName, self.noAnimals, noBps, Imagepath, BpNameList, self.noAnimals, animal_id_list)\r\n File \"/uufs/chpc.utah.edu/sys/installdir/r8/simba/08022022/lib/python3.6/site-packages/simba/define_new_pose_config.py\", line 73, in define_new_pose_configuration\r\n with open(namePath, 'a') as fd:\r\nPermissionError: [Errno 13] Permission denied: '/uufs/chpc.utah.edu/sys/installdir/r8/simba/08022022/lib/python3.6/site-packages/simba/pose_configurations/configuration_names/pose_config_names.csv'\r\n\r\n```\r\n\r\n
\r\n\r\nIt seems like there is a permission error with the cluster. I had them install simba directly on the cluster nodes because it was a pain (some packages are not supported anymore and the setup files very specifically constrain package versions. They suggested you make a yaml file for the conda env. I don't know what all that exactly mean, but that's another issue). Anyway, I don't have access to the direct installation which seems to be what's causing the issue. I couldn't find any previous issues about this couldn't figure it out on my own. Any help will be appreciated. Thanks!\r\n", + "user": "carson2stoker", + "reaction_cnt": 0, + "created_at": "2022-08-10T19:43:14Z", + "updated_at": "2022-08-13T22:09:58Z", + "author": "carson2stoker", + "comments": [ + { + "body": "Hi @carson2stoker ! Could you post the error msg? It will be a lot easier to diagnose the problem. When you click `Esc` on the last image, SimBA wants to save your image and your selections inside folders and csv files located where-ever you have SimBA installed. If SimBA isn't installed locally, and lives on somewhere else, it might well be related to some permission to write on that instance where the SimBA code lives. ", + "created_at": "2022-08-10T20:49:23Z", + "author": "sronilsson" + }, + { + "body": "Yes, I posted the the entire terminal output under Logs on the original post. Just click it to open the box.", + "created_at": "2022-08-10T20:52:25Z", + "author": "carson2stoker" + }, + { + "body": "Ah, sorry I missed it! Yes this is beyond what I know how to fix. When you connect to your instance, you need to do so with read/write permission. This looks like a read error - SimBA want to read the `pose_config_names.csv` file located in the SimBA packages, but you are not allowed to open it. \r\n\r\n", + "created_at": "2022-08-10T23:39:46Z", + "author": "sronilsson" + }, + { + "body": "Ok, is there anyone else that would know a way to fix this beyond installing elsewhere?", + "created_at": "2022-08-12T17:16:58Z", + "author": "carson2stoker" + } + ] + }, + { + "title": "Help with behavior labeling.", + "body": "I am a new user trying to use Simba to detect changes in behavior on mice in the Barnes maze test. I loaded my DLC tracking data and set up ROIs without problem, but I could not create behavior annotations.\r\n I started labeling a 5min and 18000 frames video following the tutorial for behavioral annotator GUI. For this, I mark the behavior and the frame range checkbox and then annotate the first and last frame of the behavior. Then press the \"save and advance to the next frame button\" to continue with the next label. Finally, press the generate/save annotations and close the window to continue with the train machine label.\r\n\r\n![image](https://user-images.githubusercontent.com/87041514/183268638-83863db3-c6f3-402a-a59b-2303e87ab521.png)\r\n\r\n\r\nFor this, I use the hyperparameters from the example file (BtWGaNP_meta) to train the machine model. But every time that try to make a model, the following error appears \r\n\r\n`\"raise ValueError('Data for video {} does not contain any annotations for behavior {}'.format(vid_name, clf_name))\r\nValueError: Data for video D1_C9A2_cropped does not contain any annotations for behavior Explore\" `\r\n\r\n(where D1_C9A2_cropped is my video an \"Explore\" the behavior)\r\n\r\n![image](https://user-images.githubusercontent.com/87041514/183268698-715f53f3-e9e9-4a93-9871-6b2856747c04.png)\r\n\r\n¿what could be the problem?\r\ngreetings and thanks in advance.\r\n\r\n(running in Win 10 with anaconda)\r\nOS: Win 10\r\n", + "user": "Gfernandezv", + "reaction_cnt": 0, + "created_at": "2022-08-06T23:18:44Z", + "updated_at": "2022-08-07T11:23:12Z", + "author": "Gfernandezv", + "comments": [ + { + "body": "Hello @Gfernandezv! Thanks for the screengrabs, very helpful. From what I can see on the screengrabs, a video called `D1_C9A1_cropped` is annotated. \r\n\r\nWhen you click on `Train single model from global environment`, SimBA pulls all of the video files that have been annotated. To find all the files that have been annotated, SimBA looks inside `project_folder/csv/targets_inserted` directory which is where the files are saved after clicking `generate/save annotations` in the annotation interface. \r\n\r\nWhen SimBA looks inside the `project_folder/csv/targets_inserted` directory, in your case, we find 3 files. One is likely `D1_C9A2_cropped`, another is the `D1_C9A1_cropped` (which is the file I can see you annotating), and the third file I can't guess the name of.\r\n\r\nIs it possible that that D1_C9A2_cropped and possibly a third file has been added to the `project_folder/csv/targets_inserted` directory without annotations? Annotations are stored as the last columns in these files. So if you open them up in a spreadsheet viewer (or alternative way if too large) and look at the last column, do you find a column called 'Explore' at the end of D1_C9A2_cropped? ", + "created_at": "2022-08-07T11:23:12Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Issue with distance analysis. ", + "body": "My mistake, I reported a bug that was not a bug! \r\nBest", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2022-08-05T17:47:58Z", + "updated_at": "2022-08-05T17:55:20Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Thanks for letting me know, I had just typed a reply :)", + "created_at": "2022-08-05T17:54:09Z", + "author": "sronilsson" + }, + { + "body": "@DorianBattivelli - thanks for reporting though, very helpful and typically errors/ edge-cases I don't foresee. ", + "created_at": "2022-08-05T17:55:20Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Mismatch in ROI data analysis.", + "body": "Hello,\r\n\r\nFirst of all thank you again for providing these amazing data analyzes tools. \r\n\r\nI would like to report several mistakes I observed while analysis ROI data.\r\nI'm working on Windows 10, and I use 0.92.2 version of Simba (installed with simba-uw-tf-dev command 2 months ago)\r\n\r\nI used el.h5 files from DLC-multi animal project, including 2 mice to track simultaneously.\r\n\r\nAmong the errors I noticed:\r\n- The IDs of the mice are wrong in the resulting ROI-time_data file. Data labelled for mouse 1 are actually related to my mouse 2 and vice versa. \r\n- The IDs are also reversed when I generate a path plot video, So the error seems constant. \r\n- When I analyze ROI only for one animal at a time, only the Movement_within_ROIs file generates indeed data for one animal, but the ROI_entry_data and ROI_time_data still present data for both animals.\r\n\r\nHow can I fix these issues?\r\n\r\nThank you,\r\nBest,\r\nDorian\r\n", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2022-07-26T11:15:01Z", + "updated_at": "2022-08-07T22:34:22Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli! Many thanks for reporting! First, can up update to the latest version of simba-uw-tf-dev (should be version 0.93.8) with `pip install simba-uw-tf-dev --upgrade`, and see if you still see the same issues? \r\n\r\n", + "created_at": "2022-07-26T13:57:26Z", + "author": "sronilsson" + }, + { + "body": "Thank you for the help.\r\nI managed to update the version. \r\nThough, the 3 mentioned mistakes still appear if I launch the Analyze ROI data process.\r\nI precise that I did not recreate a project from scratch and only tried to run the analysis from my existing project. \r\n\r\nBest,", + "created_at": "2022-07-26T14:32:37Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks for testing - when you import the data from multi-animal DLC, there is an interface to specify which animal is which by clicking the animals in the image - https://github.com/sgoldenlab/simba/blob/master/docs/Multi_animal_pose.md#step-5-assigning-tracks-the-correct-identities. Is it possible that the mouse click for animal 1 was closer to a body-part of animal 2, and vice versa? ", + "created_at": "2022-07-26T14:39:39Z", + "author": "sronilsson" + }, + { + "body": "Yes, I see the step you're mentioning. I thaught about this but I'm almost sure that I did not do a mistake at that stage. Also, when I run the same command on my previous project, I observed the same errors. \r\n\r\nBest,", + "created_at": "2022-07-26T15:58:30Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks @DorianBattivelli - let me see if I can recreate this - can you send me an example of the ROI_time_data file to better understand what is going on? You can zip it and drop it here I think.\r\n\r\nAlso, does it happen on all videos, or is it a single one? \r\n", + "created_at": "2022-07-26T16:45:45Z", + "author": "sronilsson" + }, + { + "body": "So, I did a new project from scratch to be sure about the ID attributions. \r\nAnd again, even with this last version, I confirm that the IDs either in the ROI csv files, and in the path plots videos are inverted.\r\n\r\nHere the file:\r\n[ROI_time_data_20220727124702.csv](https://github.com/sgoldenlab/simba/files/9198204/ROI_time_data_20220727124702.csv)\r\n\r\nTheoretically MS = mouse 1 and US = mouse 2. \r\nI know for sure that US spent almost no time in Down-comp, and almost all the time in Up-comp, while MS spent a bit more than half of the time in Down-comp. The CSV files says exactly the opposite. \r\n\r\n", + "created_at": "2022-07-27T11:39:13Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks @DorianBattivelli for digging! I appreciate it. I can't immediately replicate it in my troubleshooting projects, but need to figure this one out. Can you help me with two more things? \r\n\r\nIf you visualize the ROI calculations using the `Visualize ROI tracking` menu, does it also look inverted? With MS having the label as US and vice versa?\r\n\r\nCan you share your SimBA project with me? It is enough with the single video inside it. But I also need the DLC 'el.h5` to import it, be be able to follow your steps. ", + "created_at": "2022-07-27T12:51:23Z", + "author": "sronilsson" + }, + { + "body": "Running Visualize ROI tracking returns the following error:\r\n![ROI_Tracking](https://user-images.githubusercontent.com/66886884/181258880-17b2bad6-9155-4684-8acf-9360b5d1e237.png)\r\n\r\nI will upload the project on Drive and share it with you asap. Can I remove the frame folder form the project? It takes 12gb..\r\n\r\nBest,\r\n", + "created_at": "2022-07-27T13:29:08Z", + "author": "DorianBattivelli" + }, + { + "body": "Yes! No need for the frames at all. ", + "created_at": "2022-07-27T14:30:15Z", + "author": "sronilsson" + }, + { + "body": "Thanks @DorianBattivelli!", + "created_at": "2022-07-27T14:30:47Z", + "author": "sronilsson" + }, + { + "body": "To which email adress can I share the folder? ", + "created_at": "2022-07-27T22:11:29Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks! I will test it asap, and figure out why you see the visualization error (as its not showing for me either). Can you share it with sronilsson@gmail.com ? ", + "created_at": "2022-07-28T01:01:21Z", + "author": "sronilsson" + }, + { + "body": "Done, thanks!", + "created_at": "2022-07-28T09:37:50Z", + "author": "DorianBattivelli" + }, + { + "body": "Great, is the `_el.h5` in there too? At quick browse I can't see it", + "created_at": "2022-07-28T10:34:41Z", + "author": "sronilsson" + }, + { + "body": "No my mistake, I added them now and updated the sharing,\r\n\r\nThanks!", + "created_at": "2022-07-28T10:37:22Z", + "author": "DorianBattivelli" + }, + { + "body": "Got it!\n\nOn Thu, Jul 28, 2022 at 6:37 AM DorianBattivelli ***@***.***>\nwrote:\n\n> No my mistake, I added them now and updated the sharing,\n>\n> Thanks!\n>\n> —\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> You are receiving this because you commented.Message ID:\n> ***@***.***>\n>\n", + "created_at": "2022-07-28T11:19:15Z", + "author": "sronilsson" + }, + { + "body": "@DorianBattivelli, if you check the video at https://drive.google.com/file/d/1HvbWTV8e1SCb9XIqFnFyb00QsTx8_4Xa/view?usp=sharing, are animal IDs inverted? (watch out it is large..). ", + "created_at": "2022-07-28T15:05:08Z", + "author": "sronilsson" + }, + { + "body": "Yes, they are inverted", + "created_at": "2022-07-29T10:28:15Z", + "author": "DorianBattivelli" + }, + { + "body": "Hi @DorianBattivelli - I ended up completely rewriting the DLC import class.. sorry for delay. Could you try and update simba to version 0.94.2, and import the dlc data again, and see how it goes? ", + "created_at": "2022-08-01T13:12:02Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson thank you very much, it solved the issue. Now the ID are right ! Thank you again.\r\n\r\nAlso, I wanted to ask you how to improve the quality of the distance measurment? I suspect that the variation of the exact location of the center body part label from one frame to the other is considered as displacement / movement, so that even when the animal is immobile, this artificial displacement of the body label is considered as locomotion. I tried to change the movement criterion parameter but it does not look to change the distance measurment. Is there something I can do? \r\n\r\nThank you again! \r\nBest,", + "created_at": "2022-08-01T18:02:58Z", + "author": "DorianBattivelli" + }, + { + "body": "Hi @DorianBattivelli - one possible way is to use [smoothing](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#to-import-multiple-dlc-csv-files) when you import your data. There are two options (Gaussian, Savitzky-Golay). After you've imported your data, you can visualize it using this [tool](https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_tools.md#visualize-pose-estimation-in-folder) from the `Tools` dropdown to check if it looks less jumpy. There is a video example here of the expected results: https://www.youtube.com/watch?v=d9-Bi4_HyfQ", + "created_at": "2022-08-01T18:48:23Z", + "author": "sronilsson" + }, + { + "body": "Amazing, thank you very much, \r\n\r\nBest!", + "created_at": "2022-08-01T19:24:30Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "Error during train machine model step/need help understanding hyperparameters", + "body": "Hello! I completed the labeling of videos in my project and I am trying to do the `Train Machine Model` step in the pipeline but I am having trouble understanding how it works. I read the documentation and followed the guide, using the generic hyperparameter settings listed in this document `BtWGaNP_meta.csv` that is provided in the guide but I am getting the error listed below. Could you help me understand this step and the hyperparameters so I can complete this successfully? I am new to SimBA and I just want to make sure I am doing everything correctly! For context, my videos have one mouse in each video where I am using 6 body parts (nose, left forepaw, right forepaw, left hind paw, right hind paw, and tail base) to analyze left and right nape scratching. Thanks!\r\n\r\n`[Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 20 concurrent workers.\r\n[Parallel(n_jobs=-1)]: Done 10 tasks | elapsed: 0.2s\r\n[Parallel(n_jobs=-1)]: Done 160 tasks | elapsed: 1.8s\r\n[Parallel(n_jobs=-1)]: Done 410 tasks | elapsed: 4.7s\r\n[Parallel(n_jobs=-1)]: Done 760 tasks | elapsed: 8.5s\r\n[Parallel(n_jobs=-1)]: Done 1210 tasks | elapsed: 13.6s\r\n[Parallel(n_jobs=-1)]: Done 1760 tasks | elapsed: 19.8s\r\n[Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 22.5s finished\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 4088, in \r\n button_trainmachinemodel = Button(label_trainmachinemodel,text='Train single model from global environment',fg='blue',command = lambda: threading.Thread(target=self.train_single_model(config_path=self.projectconfigini)).start())\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 4539, in train_single_model\r\n model_trainer.train_model()\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\train_single_model.py\", line 132, in train_model\r\n create_x_importance_bar_chart(self.rf_clf, self.feature_names, self.clf_name, self.eval_out_path, feature_importance_bars)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\train_model_functions.py\", line 217, in create_x_importance_bar_chart\r\n create_x_importance_log(rf_clf, x_names, clf_name, save_dir)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\train_model_functions.py\", line 208, in create_x_importance_log\r\n df.to_csv(save_file_path, index=False)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\pandas\\core\\generic.py\", line 3228, in to_csv\r\n formatter.save()\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\pandas\\io\\formats\\csvs.py\", line 183, in save\r\n compression=self.compression,\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\pandas\\io\\common.py\", line 399, in _get_handle\r\n f = open(path_or_buf, mode, encoding=encoding, newline=\"\")\r\nFileNotFoundError: [Errno 2] No such file or directory: 'N:/Ross/1Tayler Sheahan/Automated Behavioral Analysis/Madeline Fontana/SimBA\\\\Full_SimBA\\\\models\\\\generated_models\\\\model_evaluations\\\\leftnapescratch_feature_importance_log.csv'`\r\n\r\n\"Screen\r\n\r\n\"Screen\r\n\r\n\r\nI was able to exit still and this error was no longer in the command windows so I should be able to do that step successfully again once I fix the issue.", + "user": "fon215", + "reaction_cnt": 0, + "created_at": "2022-07-21T02:09:05Z", + "updated_at": "2022-07-21T15:56:52Z", + "author": "fon215", + "comments": [ + { + "body": "Hi @fon215 ! Thanks for reporting, and, my bad. I was just updating these scripts, and I forgot the line of code that creates the folder, before saving the file in that folder :) Can you update to simba 0.93.6 with `pip install simba-uw-tf-dev --upgrade` and see if that fixes it? If it doesn't, or any other errors come up, please let me know. ", + "created_at": "2022-07-21T03:36:10Z", + "author": "sronilsson" + }, + { + "body": "That's alright! I upgraded my version of SimBA but when I use the generic settings above I am still getting the same error. Along with fixing that issue, I was wondering if I will need to create 2 models and train on two different specific models because I am checking for both left and right nape scratch, or is that not how this works?", + "created_at": "2022-07-21T13:12:07Z", + "author": "fon215" + }, + { + "body": "If I understand correctly, \"nape scratch\" is a behavior you want to classify, and you want to know how much left-side nape scratching the animal is doing, and how much right-side nape scratching the animal is doing? If so then yes, in SimBA we need two classifiers. ", + "created_at": "2022-07-21T13:17:55Z", + "author": "sronilsson" + }, + { + "body": "Oh I noticed you still see the error.. I will have to check again why that is. \r\n\r\ncan you confirm you are running 0.93.6 by typing `pip show simba-uw-tf-dev`?", + "created_at": "2022-07-21T13:19:40Z", + "author": "sronilsson" + }, + { + "body": "Okay! Yes, I have two behaviors/classifiers because I am looking for when a mouse scratches the nape of their neck on both sides. So do I save settings for a specific model for each and train on a specific model for each as well, selecting each behavior for the settings in this step and saving a model for each?\r\n\r\nAlso, this is the version listed when I use the command you provided. \r\n\"simba\r\n\r\nHowever, even after using the `pip install simba-uw-tf-dev --upgrade` command again this is the message in the command window so I'm not sure what is happening.\r\n\r\n`Requirement already satisfied: zipp>=0.5 in c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages (from importlib-metadata->click>=7.1.2->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (3.6.0)`", + "created_at": "2022-07-21T13:32:24Z", + "author": "fon215" + }, + { + "body": "Yes, I think the model settings will be very similar, if the scratching is very similar, just mirrored. But you will need annotations for both behaviors. \r\n\r\nThat is just a message, no error or warning. \r\n\r\nThat said, I can't recreate your error. Would you mind running the code again and send me a screengrab of the error msg in the windows terminal? \r\n\r\n", + "created_at": "2022-07-21T13:37:52Z", + "author": "sronilsson" + }, + { + "body": "Okay so this time, I am no longer getting the error because I made two specific models, one for each side, saved those metadata setting files, and trained on multiple models and that seems to have worked and successfully and completed. Let me know if something still does not look right and thanks for your help!\r\n\r\n![left_and_right_models_saved](https://user-images.githubusercontent.com/99504167/180230948-cbdedf31-dc05-486c-a1bc-c98ddf355793.JPG)\r\n![training_models_anaconda_prompt_window](https://user-images.githubusercontent.com/99504167/180230965-a494bac0-fc36-4d42-bb43-aabe21716723.JPG)\r\n![left_model_complete](https://user-images.githubusercontent.com/99504167/180230981-e2cdb44b-a27b-461c-8c73-73615802c2ad.JPG)\r\n![right_model_complete](https://user-images.githubusercontent.com/99504167/180231000-73befb24-4823-47b3-9b61-50814d570be5.JPG)\r\n\r\n", + "created_at": "2022-07-21T13:54:41Z", + "author": "fon215" + }, + { + "body": "Thanks for testing again, and looks good so far! However the `# 25 features` looks a little low considering the number of body-parts that you are tracking, SimBA should calculate the distance between all of the body-parts, their movements etc.. If you can share a csv file located in the `project_folder/csv/features _extracted`, zip it up and drag it into this thread if not too big, I can take a look. If too big, see if you can remove some rows (I'm only really intrested in the top rows)", + "created_at": "2022-07-21T14:26:09Z", + "author": "sronilsson" + }, + { + "body": "Here is one of the csv files and what the features extracted folder looks like for me.\r\n[Bottom up comp 48-80 050721 mouse 2 part 2_Trim7.csv.zip](https://github.com/sgoldenlab/simba/files/9160195/Bottom.up.comp.48-80.050721.mouse.2.part.2_Trim7.csv.zip)\r\n\"Screen\r\n ", + "created_at": "2022-07-21T14:34:16Z", + "author": "fon215" + }, + { + "body": "False alarm, I think you should be good :)", + "created_at": "2022-07-21T14:48:16Z", + "author": "sronilsson" + }, + { + "body": "Okay, thanks so much!", + "created_at": "2022-07-21T15:34:00Z", + "author": "fon215" + } + ] + }, + { + "title": "OpenCV can't read video stream from file error when trying to get pixels/mm in video parameters", + "body": "Hello! I've been trying to teach myself deeplabcut and SimBA, and would just like to start by saying I think SimBA is a great idea and really well executed! The GUI and documentation are easy to follow. I've successfully created and loaded a new project, but when I try to edit the video parameters to calculate the pixels/mm, I get an error. I know a screen with the video playing is suppose to pop up, but instead, my terminal says: \r\nOpenCV: Couldn't read video stream from file \"/Users/lsbmorrisonlab/Documents/Simba/test/test4/project_folder/videos/0\" \r\nas soon as I click the 'set video parameters' button. (I'm using Mac OS v 12.2.1). Then, when I try to change click the Video1 button under Getcoord, it says this:\r\n\r\n\"Screen\r\n\r\nI think it can't change directories for some reason, but I double-checked all of my files, video paths, directories, etc and it all seems to be working. One thing I did notice was that the OpenCV error appended an extra '0' at the end of the path, which leads to nothing. Any help would be great!\r\n\r\nThanks!!!", + "user": "raewanyk", + "reaction_cnt": 0, + "created_at": "2022-07-15T19:04:50Z", + "updated_at": "2022-07-18T16:17:10Z", + "author": "raewanyk", + "comments": [ + { + "body": "Hello @raewanyk! Thanks for trying out SimBA. This error could happen, if the video file that SimBA is trying to open, is corrupted/not an actual video file. The '0' refers to frame number 0, the first frame, which SimBA is trying to open through the OpenCV package. \r\n\r\nIf you navigate to the `project_folder/videos` directory, do you see a video file in there in avi or mp4 format that is of expected size? E.g., if it is 1kb large, thats usually a good sign that something odd is going on with that video. \r\n\r\n", + "created_at": "2022-07-15T19:30:34Z", + "author": "sronilsson" + }, + { + "body": "Hi! Thanks for the quick response. There are mp4 videos in the project/video folder:\r\n\r\n\"Screen\r\n\r\n\r\nDoes it have something to do with the MPEG-4 file??", + "created_at": "2022-07-15T19:39:33Z", + "author": "raewanyk" + }, + { + "body": "Got it, no it should't be any issues with your videos what I can see, do you have a screengrab of your files inside the `project_folder/csv/input_csv` as well? ", + "created_at": "2022-07-15T19:53:22Z", + "author": "sronilsson" + }, + { + "body": "\"Screen\r\n", + "created_at": "2022-07-15T20:02:48Z", + "author": "raewanyk" + }, + { + "body": "Got it. So, for whatever reason. Your video data files are named slightly differently than your video files, SimBA should have corrected this at import. Your video, for example, is called `1014DLC.mp4`, while the imported file is called `1014.csv`. When you click on 1014DLC, there is a mismatch, and SimBA is most likely searching for `1014.mp4` and it breaks, because it doesn't exist. \r\n\r\nThat said, I would also avoid pure integer filenames if you can, as there may be some bugs loitering around from early days and I'm trying to find time and fix. E.g., Importing a video called `Video_1014` is much better than imprting a video called `1014`. \r\n\r\n\r\n\r\n", + "created_at": "2022-07-15T20:14:31Z", + "author": "sronilsson" + }, + { + "body": "\"Screen\r\n\r\n\r\nSo then I should just rename the csv files to match the mp4? Noted on the renaming files--thank you. ", + "created_at": "2022-07-15T20:24:09Z", + "author": "raewanyk" + }, + { + "body": "Yeah that should work - check if that fixes it. ", + "created_at": "2022-07-15T20:25:46Z", + "author": "sronilsson" + }, + { + "body": "Sadly, it did not. I still get the same opencv error and filenotfound", + "created_at": "2022-07-15T20:27:29Z", + "author": "raewanyk" + }, + { + "body": "Ah, thanks for letting me know. Can you try if it is an integer error, nd rename your videos e.g., \"Video_1014\" and the csv files the same way \"Video_1014\" ", + "created_at": "2022-07-15T20:50:00Z", + "author": "sronilsson" + }, + { + "body": "\r\n\"Screen\r\n\r\n\r\nNo, didn't work :/", + "created_at": "2022-07-15T20:57:54Z", + "author": "raewanyk" + }, + { + "body": "Hmm, thanks let me see if I can replicate", + "created_at": "2022-07-15T20:58:42Z", + "author": "sronilsson" + }, + { + "body": "One more question, which version of simba are you running? ", + "created_at": "2022-07-15T20:59:20Z", + "author": "sronilsson" + }, + { + "body": "I'm not sure, but I downloaded it maybe a week ago on my Mac. This text runs when simba is opening:\r\n\r\n\"Screen\r\n", + "created_at": "2022-07-15T21:09:29Z", + "author": "raewanyk" + }, + { + "body": "you can find out by typing `pip show simba-uw-tf-dev` in your july-12 environment\r\n", + "created_at": "2022-07-15T21:11:09Z", + "author": "sronilsson" + }, + { + "body": "\"Screen\r\n\r\n\r\nUhhh???\r\nIt says Package not found", + "created_at": "2022-07-15T21:18:59Z", + "author": "raewanyk" + }, + { + "body": "that makes more sense now, you might be running an older version that has not been maintained for some years and thats why you are seeing the errors youre seeing. \r\n\r\ntry `pip unininstall simba-uw-tf` and `pip uninstall simba-uw-no-tf`, followed by `pip install simba-uw-tf-dev`, then `simba` and see if everything looks ok with the pixels per millimeter calculations. ", + "created_at": "2022-07-15T22:04:17Z", + "author": "sronilsson" + }, + { + "body": "Hello! Sorry for such a late response--I didn't have access to this computer over the weekend. I tried those commands in the environment I was working in, and just to be safe, I tried them again in a new environment. Both times, I get this error: \r\n\r\n\"Screen\r\n\r\nHowever, since I'm using a mac, I followed the extra instructions to download on a mac, but with pip install simba-uw-tf-dev. If I simply install the dev version by itself and then try to run it, I get the Mac OS X backend error, even though I'm using the correct tkinter and python version as mentioned on the FAQ.\r\n\r\nSorry for so many questions--I really appreciate your help.\r\n ", + "created_at": "2022-07-18T14:03:35Z", + "author": "raewanyk" + }, + { + "body": "No worries about questions :) This error has been coming up a fair bit lately for mac users, and I have not been able to replicate it myself (I'm on a Mac too). There was someone who solved it here last week: https://github.com/sgoldenlab/simba/issues/196 - would you mind trying their solution at the end of that thread? ", + "created_at": "2022-07-18T14:11:55Z", + "author": "sronilsson" + }, + { + "body": "Yes-- thank you so much for all of your help!", + "created_at": "2022-07-18T16:17:10Z", + "author": "raewanyk" + } + ] + }, + { + "title": "Machine Model Training Issue", + "body": "I have an odd error message when I try to run the model.\r\n\r\nEverything worked fine up to \"Run Machine Model\", except the training didn't put 2 of my classifiers into .sav. I just ran it again with only those two classifiers, and that worked.\r\n\r\nWhen I press \"Run RF model\", I get this in the GUI terminal -\r\n![Screen Shot 2022-07-14 at 3 07 45 PM](https://user-images.githubusercontent.com/109232212/179063731-58f558d9-f550-49b0-9754-f4db0e7f2af1.png)\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows 10\r\n - Anaconda\r\n - Python 3.6.10\r\n", + "user": "n-zeng", + "reaction_cnt": 0, + "created_at": "2022-07-14T19:08:19Z", + "updated_at": "2022-07-14T23:12:47Z", + "author": "n-zeng", + "comments": [ + { + "body": "Hi @n-zeng I did notice the bug where it was only creating the first two models, it should be fixed now if you update simba. \r\n\r\nHowever, I am not sure why your model only would be base don 5 features, let me check how this could happen. ", + "created_at": "2022-07-14T19:25:56Z", + "author": "sronilsson" + }, + { + "body": "I was just updating these scripts to make name a little more resilient to errors, so its possible another bug sneaked in. ", + "created_at": "2022-07-14T19:30:11Z", + "author": "sronilsson" + }, + { + "body": "yeah, sorry, found it: while writing the scripts and troubleshooting and cleaning, I limited myself to create only 2 models, based on 5 features, to save time and not have to watch too much processing. And it appears I failed to remove to part of the code that did those limitations 🙃. Big thank you for reporting. ", + "created_at": "2022-07-14T19:32:43Z", + "author": "sronilsson" + }, + { + "body": "> yeah, sorry, found it: while writing the scripts and troubleshooting and cleaning, I limited myself to create only 2 models, based on 5 features, to save time and not have to watch too much processing. And it appears I failed to remove to part of the code that did those limitations 🙃. Big thank you for reporting.\r\n\r\nYeah, no problem! Thanks for the fast response and fix. Should updating solve that issue too? Another quick question, what is the update command to put in terminal?", + "created_at": "2022-07-14T19:34:23Z", + "author": "n-zeng" + }, + { + "body": "Yes, if you update to simba 0.93.3 with `pip install simba-uw-tf-dev --upgrade`, then train the models again (it is likely to be a little slower now) and the issues should be fixed. Make sure you get 0.93.3, because I updated it just seconds ago. ", + "created_at": "2022-07-14T19:36:25Z", + "author": "sronilsson" + }, + { + "body": "But please let me know how it goes", + "created_at": "2022-07-14T19:37:20Z", + "author": "sronilsson" + }, + { + "body": "> pip install simba-uw-tf-dev --upgrade\r\n\r\nI'm not sure what happened, but I can't seem to open the set video parameters window?", + "created_at": "2022-07-14T19:43:08Z", + "author": "n-zeng" + }, + { + "body": "Do you see an error msg in the terminal?", + "created_at": "2022-07-14T19:45:21Z", + "author": "sronilsson" + }, + { + "body": "> Do you see an error msg in the terminal?\r\n\r\nYes, it said something about mp4 and float, sorry I closed the terminal.", + "created_at": "2022-07-14T19:46:18Z", + "author": "n-zeng" + }, + { + "body": "It sounds like the `project_folder/logs/video_config.csv` file may have some incorrect values. ", + "created_at": "2022-07-14T19:49:21Z", + "author": "sronilsson" + }, + { + "body": "> But please let me know how it goes\r\n\r\nAlright, the training took a good bit since I was using 4 classifiers, but yes it works now! I'll let you know about that other bug in a minute.", + "created_at": "2022-07-14T20:41:09Z", + "author": "n-zeng" + }, + { + "body": "> But please let me know how it goes\r\n\r\nSorry for sending so many messages, but everything I reported works now! ", + "created_at": "2022-07-14T21:35:19Z", + "author": "n-zeng" + }, + { + "body": "Nice please let me know if anything else pops up", + "created_at": "2022-07-14T23:12:47Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Error in terminal while training models.", + "body": "When I train multiple models or a single model, I get an error in terminal.\r\nIf training multiple models, I get an attribute error; if training a single model, I get a missing file error from a file that is indeed not there.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Get up to train machine model\r\n2. Input machine model settings for either global or multiple\r\n3. Press either \"train\" button\r\n\r\n**Expected behavior**\r\nThe training starts, and runs smoothly through.\r\n\r\n**Screenshots**\r\n\"Screen\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Python 3.6.10\r\n - Anaconda\r\n \r\n", + "user": "n-zeng", + "reaction_cnt": 0, + "created_at": "2022-07-13T16:00:07Z", + "updated_at": "2022-07-14T13:46:20Z", + "author": "n-zeng", + "comments": [ + { + "body": "Hi @n-zeng - thanks for reporting, I will look to insert a better error msg. \r\n\r\nSimBA is checking the undersample setting for the specific model, if it is `None` or `Random undersample`. However, it encounters a float value (e.g., 0.8) so you see this error. Could it be that the config for this specific model has the `undersample setting` and `under sample ratio` mixed up? The ratio should be a float value while the setting entry box should have a string. \r\n\r\nP.S you can open up the configs for the specific models in the `project_folder\\configs` directory and check and change the values without going through the GUI. ", + "created_at": "2022-07-13T16:20:47Z", + "author": "sronilsson" + }, + { + "body": "> Hi @n-zeng - thanks for reporting, I will look to insert a better error msg.\r\n> \r\n> SimBA is checking the undersample setting for the specific model, if it is `None` or `Random undersample`. However, it encounters a float value (e.g., 0.8) so you see this error. Could it be that the config for this specific model has the `undersample setting` and `under sample ratio` mixed up? The ratio should be a float value while the setting entry box should have a string.\r\n> \r\n> P.S you can open up the configs for the specific models in the `project_folder\\configs` directory and check and change the values without going through the GUI.\r\n\r\nGreat, thanks a lot! I had undersample setting set to NaN instead of 'None', and that was the root of the issue.", + "created_at": "2022-07-14T13:46:13Z", + "author": "n-zeng" + } + ] + }, + { + "title": "Error opening file during feature extraction", + "body": "Hello again, sorry for all the questions, but I am new to SimBA and trying to learn how it works. In the most recent issue I submitted, I was having trouble getting the Feature Extraction feature to work, and I solved that issue by using the proper videos and making sure the `video_info.csv` file has the correct information. However, now that all of that is fixed, I can complete feature extraction but in the SimBA command window I am seeing this stating that no features were extracted: \r\n`Pose-estimation body part setting for feature extraction: user_defined\r\nApplying settings for classical tracking...\r\nExtracting features from 0 file(s)...\r\nSIMBA COMPLETE: Feature extraction complete for 0 video(s). Results are saved inside the project_folder/csv/features_extracted director.`\r\n\r\nAlso, in the Anaconda Prompt command window I am seeing this error message:\r\n\r\n`warning: Error opening file (/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:901)\r\nwarning: N:/Ross/1Tayler Sheahan/Automated Behavioral Analysis/Madeline Fontana/SimBA\\Full_SimBA\\project_folder\\videos\\0 (/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:902)`\r\n\r\nI was researching this error and I think it has something to do with it not being able to open a video, but could someone please help me understand what is happening here so that I can get it to work? Thanks!", + "user": "fon215", + "reaction_cnt": 0, + "created_at": "2022-07-12T21:22:18Z", + "updated_at": "2022-07-13T00:46:27Z", + "author": "fon215", + "comments": [ + { + "body": "No worries @fon215! This suggests that simba cannot find any files inside the `project_folder/csv/outlier_corrected_movement_location` folder. \r\n\r\nAfter importing your data, SimBA expects you to correct outliers, and then extract features. Did you click to correct outliers or alternatively clicked skip outlier correct to confirm no outlier correction needed?\r\n", + "created_at": "2022-07-12T23:14:24Z", + "author": "sronilsson" + }, + { + "body": "Okay, I must have made some strange minor mistake but I just went through the steps again and the features were extracted for all of my videos successfully this time. \r\n\r\nThanks again, anyway!", + "created_at": "2022-07-12T23:22:51Z", + "author": "fon215" + }, + { + "body": "Nice!", + "created_at": "2022-07-12T23:35:22Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Crash when opening simba through command line", + "body": "\r\nPython 3.6 crash when I try to open Simba in command line. I'm using Mac OS Monterey 12.4. And I'm using anaconda for it and follow the exact the same instruction on your documentation page.\r\n\r\nThanks in advance for any help and here's the full error output.\r\n\r\n\"Screen\r\n", + "user": "yufang1039", + "reaction_cnt": 0, + "created_at": "2022-07-11T21:17:34Z", + "updated_at": "2022-07-13T00:04:25Z", + "author": "yufang1039", + "comments": [ + { + "body": "Hi @yufang1039!\r\nI had another user report what I think is a related issue not too long ago, although they were using an older macOS version. I am on Mac myself (12.4 (21F79)) and I have not been able to recreate this issue. I also did not see it in 12.3.1. My immediate thought was that it had something to do with running an older MacOS version, but your report suggests the issue is caused by something else.. see more info here faq entry 20 although it is not too much help: https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#20-i-have-installed-simba-on-macos-when-i-try-to-launch-simba-by-typing-simba-i-get-a-long-error-message-which-ends-with-libcabidylib-terminating-with-uncaught-exception-of-type-nsexception-abort-trap-6 Could you check if it runs on python3.7? ", + "created_at": "2022-07-11T23:36:04Z", + "author": "sronilsson" + }, + { + "body": "> Hi @yufang1039! I had another user report what I think is a related issue not too long ago, although they were using an older macOS version. I am on Mac myself (12.4 (21F79)) and I have not been able to recreate this issue. I also did not see it in 12.3.1. My immediate thought was that it had something to do with running an older MacOS version, but your report suggests the issue is caused by something else.. see more info here faq entry 20 although it is not too much help: https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#20-i-have-installed-simba-on-macos-when-i-try-to-launch-simba-by-typing-simba-i-get-a-long-error-message-which-ends-with-libcabidylib-terminating-with-uncaught-exception-of-type-nsexception-abort-trap-6 Could you check if it runs on python3.7?\r\n\r\nHi @sronilsson, I made few changes but then encounter new error message.\r\n\r\nChanges I made:\r\n1. Open Xcode to agree few agreements.\r\n2. Use python=3.7 environment.\r\n\r\nThings happen during installation process:\r\n1. conda uninstall shapely doesn't find package so I directly install it.\r\n\r\nHere's the new error output, thanks again for the help!\r\n\r\n\"Screen\r\n\r\n\r\n", + "created_at": "2022-07-12T16:26:25Z", + "author": "yufang1039" + }, + { + "body": "Hi @sronilsson \r\nOne more info. I changed back to python 3.6 to see if the problem is with Xcode, but it still crash. Python 3.7 won't crash so it looks like the problem is with python version. But it does generate new error message.", + "created_at": "2022-07-12T16:51:52Z", + "author": "yufang1039" + }, + { + "body": "hi @yufang1039! Thanks, the error msg in the image, suggest that you have to downgrade jinja to a version lower than 3.0 https://stackoverflow.com/questions/72651555/attributeerror-module-jinja2-ext-has-no-attribute-autoescape-while-trying-t \r\n\r\nI presume it did not boot up after that error msg? I am running this xcode version and SimBA runs fine if it helps:\r\n![image](https://user-images.githubusercontent.com/34761092/178555761-f482c150-3c87-4d64-9002-aa1299d14df0.png)\r\n", + "created_at": "2022-07-12T17:31:14Z", + "author": "sronilsson" + }, + { + "body": "hi @sronilsson I downgrade the jinja2 package version to 2.11.3, the jinja2 error disappears but then python 3.7 crash with the same error output. The Xcode version of mine is also 13.4.1. \r\n\r\n\"Screen\r\n\r\nI also tried to conda uninstall and then install matplotlib due to the error message but it still doesn't work. The final error out is still the same:\r\n\r\n\"Screen\r\n \r\nIs it possible that is because the Mac OS version is too new? I'm using 12.4", + "created_at": "2022-07-12T18:09:03Z", + "author": "yufang1039" + }, + { + "body": "I don't think it is the OS, this is my OS below and it runs. Xcode sounded plausible, but then we run the same version too. I am runnin git now from Python 3.6.13, conda version 4.9.2, launched from anaconda navigator 1.9.12. I've also pasted a list of all packages installed in my environment to see if you can see anything out of place in ereference to your environment. Google does not come up with a lot on this error. \r\n\r\n\r\nattrs 21.4.0 pypi_0 pypi\r\nbrotli 1.0.9 pypi_0 pypi\r\nca-certificates 2022.4.26 hecd8cb5_0 \r\ncefpython3 66.0 pypi_0 pypi\r\ncertifi 2021.5.30 py36hecd8cb5_0 \r\nclick 8.0.4 pypi_0 pypi\r\ncloudpickle 2.1.0 pypi_0 pypi\r\ncolour 0.1.5 pypi_0 pypi\r\ncycler 0.11.0 pypi_0 pypi\r\ndash 1.14.0 pypi_0 pypi\r\ndash-color-picker 0.0.1 pypi_0 pypi\r\ndash-colorscales 0.0.4 pypi_0 pypi\r\ndash-core-components 1.10.2 pypi_0 pypi\r\ndash-html-components 1.0.3 pypi_0 pypi\r\ndash-renderer 1.6.0 pypi_0 pypi\r\ndash-table 4.9.0 pypi_0 pypi\r\ndask 2021.3.0 pypi_0 pypi\r\ndataclasses 0.8 pypi_0 pypi\r\ndecorator 4.4.2 pypi_0 pypi\r\ndtreeviz 0.8.1 pypi_0 pypi\r\neli5 0.10.1 pypi_0 pypi\r\nflask 2.0.3 pypi_0 pypi\r\nflask-compress 1.12 pypi_0 pypi\r\nfuture 0.18.2 pypi_0 pypi\r\nh5py 2.9.0 pypi_0 pypi\r\nimageio 2.9.0 pypi_0 pypi\r\nimbalanced-learn 0.6.2 pypi_0 pypi\r\nimblearn 0.0 pypi_0 pypi\r\nimgaug 0.4.0 pypi_0 pypi\r\nimportlib-metadata 4.8.3 pypi_0 pypi\r\nimutils 0.5.2 pypi_0 pypi\r\nitsdangerous 2.0.1 pypi_0 pypi\r\njinja2 3.0.3 pypi_0 pypi\r\njoblib 1.1.0 pypi_0 pypi\r\nkiwisolver 1.3.1 pypi_0 pypi\r\nlibcxx 12.0.0 h2f01273_0 \r\nlibffi 3.3 hb1e8313_2 \r\nllvmlite 0.31.0 pypi_0 pypi\r\nmarkupsafe 2.0.1 pypi_0 pypi\r\nmatplotlib 3.0.3 pypi_0 pypi\r\nncurses 6.3 hca72f7f_2 \r\nnetworkx 2.5.1 pypi_0 pypi\r\nnumba 0.48.0 pypi_0 pypi\r\nnumexpr 2.6.9 pypi_0 pypi\r\nnumpy 1.18.1 pypi_0 pypi\r\nopencv-python 3.4.5.20 pypi_0 pypi\r\nopenssl 1.1.1o hca72f7f_0 \r\npandas 0.25.3 pypi_0 pypi\r\npatsy 0.5.2 pypi_0 pypi\r\npillow 5.4.1 pypi_0 pypi\r\npip 21.2.2 py36hecd8cb5_0 \r\nplotly 4.9.0 pypi_0 pypi\r\npyarrow 0.17.1 pypi_0 pypi\r\npyparsing 3.0.9 pypi_0 pypi\r\npython 3.6.13 h88f2d9e_0 \r\npython-dateutil 2.8.2 pypi_0 pypi\r\npython-graphviz 0.11 pypi_0 pypi\r\npytz 2022.1 pypi_0 pypi\r\npywavelets 1.1.1 pypi_0 pypi\r\npyyaml 5.3.1 pypi_0 pypi\r\nreadline 8.1.2 hca72f7f_1 \r\nretrying 1.3.3 pypi_0 pypi\r\nscikit-image 0.14.2 pypi_0 pypi\r\nscikit-learn 0.22.2 pypi_0 pypi\r\nscipy 1.1.0 pypi_0 pypi\r\nseaborn 0.9.0 pypi_0 pypi\r\nsetuptools 58.0.4 py36hecd8cb5_0 \r\nshap 0.35.0 pypi_0 pypi\r\nshapely 1.7.0 pypi_0 pypi\r\nsimba-uw-tf-dev 0.92.2 pypi_0 pypi\r\nsix 1.16.0 pypi_0 pypi\r\nsqlite 3.38.3 h707629a_0 \r\nstatsmodels 0.9.0 pypi_0 pypi\r\ntables 3.6.1 pypi_0 pypi\r\ntabulate 0.8.3 pypi_0 pypi\r\ntk 8.6.12 h5d9f67b_0 \r\ntoolz 0.11.2 pypi_0 pypi\r\ntqdm 4.30.0 pypi_0 pypi\r\ntyping-extensions 4.1.1 pypi_0 pypi\r\nwerkzeug 2.0.3 pypi_0 pypi\r\nwheel 0.37.1 pyhd3eb1b0_0 \r\nwxpython 4.0.4 pypi_0 pypi\r\nxgboost 0.90 pypi_0 pypi\r\nxlrd 1.2.0 pypi_0 pypi\r\nxz 5.2.5 hca72f7f_1 \r\nyellowbrick 0.9.1 pypi_0 pypi\r\nzipp 3.6.0 pypi_0 pypi\r\nzlib 1.2.12 h4dc903c_2 \r\n\r\n\r\n\r\n![image](https://user-images.githubusercontent.com/34761092/178589368-ed4ddec7-d5c5-4ef3-b04b-863949ff4b66.png)\r\n", + "created_at": "2022-07-12T20:36:45Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson, I would very appreciate if you can send me a copy of your environment.yml file so I can directly copy your conda environment and see if it works.", + "created_at": "2022-07-12T20:52:44Z", + "author": "yufang1039" + }, + { + "body": "👍 i have to zip it here\r\n\r\n[environment.yml.zip](https://github.com/sgoldenlab/simba/files/9096855/environment.yml.zip)\r\n ", + "created_at": "2022-07-12T21:10:32Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson \r\n\r\nI unzip the yml file and import it through anaconda. There's a pip error about \r\n\r\n\"Screen\r\n\r\nI fix it by remove the \"python-\" and the importation is successful. Then, when I type simba, I got the error message in:\r\n\r\n https://stackoverflow.com/questions/21784641/installation-issue-with-matplotlib-python\r\n\r\nBut I'm able to solve it by follow the instruction in that post.\r\n\r\nThen I try to type simba again, but got new error message:\r\n\r\n\"Screen\r\n\r\nI fix it by conda install shapely. And it finally works! Thanks so much for your help! Let me know if you want me to write a full step instruction to close this issue.\r\n\r\n\"Screen\r\n\r\n", + "created_at": "2022-07-12T22:12:59Z", + "author": "yufang1039" + }, + { + "body": "Wow! Yes! You don't have to be super detailed, but I think it could be helpful. If you do, I will include it in the FAQ (or a link to it if longer) and replace my incorrect assumptions in there...", + "created_at": "2022-07-12T23:16:20Z", + "author": "sronilsson" + }, + { + "body": "1. use anaconda to import this environment.\r\n[environment.yml.zip](https://github.com/sgoldenlab/simba/files/9097623/environment.yml.zip)\r\n\r\n2. There is a directory in your root called ~/.matplotlib. Create a file ~/.matplotlib/matplotlibrc there and add the following code: backend: TkAgg\r\nhttps://stackoverflow.com/questions/21784641/installation-issue-with-matplotlib-python\r\n\r\n3. Do \"conda install shapely\" in this new anaconda environment\r\n\r\n4. Type simba to check if it works!\r\n", + "created_at": "2022-07-12T23:58:12Z", + "author": "yufang1039" + }, + { + "body": "I see, thanks!", + "created_at": "2022-07-13T00:04:24Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Overflow error in extracting features step in pipeline", + "body": "**Describe the bug**\r\nI am working through the pipeline and successfully did outlier correction and started the `Extract Features` step but encountered the error here and the SimBA command window was stuck on extracting features from video 1. Could someone help me understand what is happening here.\r\n\r\n`Exception in thread Thread-5:\r\nTraceback (most recent call last):\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 6172, in extractfeatures\r\n feature_extractor.extract_features()\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\features_scripts\\extract_features_user_defined_new.py\", line 104, in extract_features\r\n roll_windows.append(int(fps / self.roll_windows_values[i]))\r\nOverflowError: cannot convert float infinity to integer`\r\n\r\n\r\n\r\n**Desktop (Lenovo P340 Workstation (ThinkStation) - Type 30DH (Tower Form Factor)):**\r\n\r\n-OS build: 19043.1706 (Processor: Intel(R) Core(TM) i9-10900K CPU @ 3.70GHz 3.70 GHz)\r\n-Python Version 3.10.4\r\n-Are you using anaconda? - Yes\r\n\r\n**Additional context**\r\n\r\nI am working through a remote desktop while using this software.\r\n", + "user": "fon215", + "reaction_cnt": 0, + "created_at": "2022-07-09T00:33:11Z", + "updated_at": "2022-07-09T20:14:42Z", + "author": "fon215", + "comments": [ + { + "body": "Hi @fon215! If you open up the `project_folder/logs/video_info.csv` file, how does it look like, do you have a fps value for all of your videos, something similar like the image below?\r\n\"image\"\r\n\r\n\r\nAlso, what is the minimum fps you see in the `fps` column?", + "created_at": "2022-07-09T01:04:47Z", + "author": "sronilsson" + }, + { + "body": "The screenshots below show what that `video_info.csv` file looks like. The first screenshot shows the end of the file, where my last video does not have values for the first three columns. I am not sure if this has something to do with it, but the folder I imported the videos from contained the videos before I performed the DeepLabCut training, so some are labeled while the others are not. The second screenshot shows the top of the `video_info.csv` file where you can see what I am talking about. I tried to delete the ones without labels, but then ended up going back to import them back in before continuing to the video parameters, outlier correction, and now the feature extraction steps. However, the total labeled number of videos is 29, which it said it was extracting features from video 1/29 when it got stuck and this error appeared. I am not sure if something with that needs changed. \r\n\r\n\"Screen\r\n\r\n\"Screen\r\n\r\n\r\nThe lowest number of `fps` is 25.", + "created_at": "2022-07-09T16:06:34Z", + "author": "fon215" + }, + { + "body": "Hi @fon215 ! The error you saw, related to infinity, is something you'd see if you try to divide a value by `0`. SimBA tries to calculate how many frames exist in different time windows and therefore need the fps of each of your videos.\r\n\r\nOne of these videos appear to be entered as having zero frames per second, see screenshot below, which is likely causing the issue. I'd also recommend only importing videos into your project that you want to analyze, and skip the videos with the `_labelled` suffix. \r\n\r\n\"image\"\r\n ", + "created_at": "2022-07-09T16:27:30Z", + "author": "sronilsson" + }, + { + "body": "Okay, thank you! So you suggest that I do not include videos that have the skeleton and labels for body parts produced by DeepLabCut, and just use the original videos? I am assuming that this means that the CSV files produced by DLC are the tracking data needed and contain the information that I see in the videos with the`labeled`suffix. Let me know if I am not understanding this correctly!", + "created_at": "2022-07-09T16:40:09Z", + "author": "fon215" + }, + { + "body": "Yes SimBA has no use for the visualizations generated through DLC, SimBA only wants the original videos representing the tracking data you want to analyze. ", + "created_at": "2022-07-09T19:23:14Z", + "author": "sronilsson" + }, + { + "body": "Okay, thank you!", + "created_at": "2022-07-09T20:14:41Z", + "author": "fon215" + } + ] + }, + { + "title": "Error while trying to generate new project", + "body": "**Describe the bug**\r\nI just had another error with outlier correction that I was attempting to fix by creating a new project. However, when I was doing this, adding my own pose configuration image and number of body parts, I followed the steps in the window and then once I added my image, entered the body part names, and clicked through the window of marking where each part was by double left clicking, the window went blank as shown in the screenshot below. This is the error message I received in the command window. Now I cannot generate a new project even when I close and reopen SimBA. Could someone help with this please?\r\n\r\n`Traceback (most recent call last):\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 2963, in __init__\r\n self.option_mice, optionsBasePhotosList = bodypartConfSchematic()\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\drop_bp_cords.py\", line 101, in bodypartConfSchematic\r\n optionsBaseNameList = pd.read_csv(optionsBaseListNamesPath, header=None)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\pandas\\io\\parsers.py\", line 685, in parser_f\r\n return _read(filepath_or_buffer, kwds)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\pandas\\io\\parsers.py\", line 463, in _read\r\n data = parser.read(nrows)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\pandas\\io\\parsers.py\", line 1154, in read\r\n ret = self._engine.read(nrows)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\pandas\\io\\parsers.py\", line 2059, in read\r\n data = self._reader.read(nrows)\r\n File \"pandas/_libs/parsers.pyx\", line 881, in pandas._libs.parsers.TextReader.read\r\n File \"pandas/_libs/parsers.pyx\", line 896, in pandas._libs.parsers.TextReader._read_low_memory\r\n File \"pandas/_libs/parsers.pyx\", line 950, in pandas._libs.parsers.TextReader._read_rows\r\n File \"pandas/_libs/parsers.pyx\", line 937, in pandas._libs.parsers.TextReader._tokenize_rows\r\n File \"pandas/_libs/parsers.pyx\", line 2132, in pandas._libs.parsers.raise_parser_error\r\npandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 14, saw 2`\r\n\r\n\r\n**Screenshots**\r\n\"Screen\r\n\r\n![itch mice body parts](https://user-images.githubusercontent.com/99504167/177837512-643b18a5-cb3d-45ea-a633-9b5683278f3b.jpg)\r\n\r\nThis is the image I used for the configuration.\r\n\r\n\r\n\r\n**Desktop (Lenovo P340 Workstation (ThinkStation) - Type 30DH (Tower Form Factor)):**\r\n\r\n-OS build: 19043.1706 (Processor: Intel(R) Core(TM) i9-10900K CPU @ 3.70GHz 3.70 GHz)\r\n-Python Version 3.10.4\r\n-Are you using anaconda? - Yes\r\n\r\n**Additional context**\r\n\r\nI am working through a remote desktop while using this software.\r\n", + "user": "fon215", + "reaction_cnt": 0, + "created_at": "2022-07-07T17:48:20Z", + "updated_at": "2022-07-08T01:43:20Z", + "author": "fon215", + "comments": [ + { + "body": "hi @fon215! Interesting, just so I understand, the error happens when you click the button at the bottom of this img, `Save Pose Config`? \r\n![image](https://user-images.githubusercontent.com/34761092/177841973-c11c7781-9000-4899-92a4-c21fae088883.png)\r\n", + "created_at": "2022-07-07T18:17:19Z", + "author": "sronilsson" + }, + { + "body": "Yes, that is when the error happened. Now I cannot even get that window open because all of the tabs under `Create new project` when I click it are blank.", + "created_at": "2022-07-07T18:28:25Z", + "author": "fon215" + }, + { + "body": "Which version of simba are you running? you can find out by typing `pip show simba-uw-tf-dev`", + "created_at": "2022-07-07T18:37:18Z", + "author": "sronilsson" + }, + { + "body": "This is the version listed: 0.91.9\r\n\r\n`Name: Simba-UW-tf-dev\r\nVersion: 0.91.9\r\nSummary: Toolkit for computer classification of complex social behaviors in experimental animals\r\nHome-page: https://github.com/sgoldenlab/simba\r\nAuthor: Simon Nilsson, Jia Jie Choong, Nastacia Goodwin, Sophia Hwang, Sam Golden\r\nAuthor-email: goldenneurolab@gmail.com\r\nLicense: GNU Lesser General Public License v3 (LGPLv3)\r\nLocation: c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\r\nRequires: cefpython3, dash, dash-color-picker, dash-colorscales, dash-core-components, dash-html-components, dtreeviz, eli5, graphviz, h5py, imblearn, imgaug, imutils, matplotlib, numba, numexpr, numpy, opencv-python, pandas, Pillow, plotly, pyarrow, pyyaml, scikit-image, scikit-learn, scipy, seaborn, shap, shapely, statsmodels, tables, tabulate, tqdm, wxpython, xgboost, xlrd, yellowbrick\r\nRequired-by:`", + "created_at": "2022-07-07T18:40:47Z", + "author": "fon215" + }, + { + "body": "Can you try (i) update simba with `pip install simba-uw-tf-dev --upgrade`. I'm not sure that will fixit though but worth a try before I dig as I can't recreate the error. ", + "created_at": "2022-07-07T18:47:22Z", + "author": "sronilsson" + }, + { + "body": "Okay, so that worked temporarily and I got through the steps again but when I pressed `Save pose configurations` I got the same error again. After using the command you listed, these screenshots are showing the steps and screens I saw, ending with me having the blank screens again.\r\n\"Screen\r\n\r\n\"Screen\r\n\r\n", + "created_at": "2022-07-07T18:57:44Z", + "author": "fon215" + }, + { + "body": "OK, I think I might understand now, did you click the bodyparts in the image? Or did you draw the circles before importing the image into simba? ", + "created_at": "2022-07-07T19:00:39Z", + "author": "sronilsson" + }, + { + "body": "Oh okay, I did not realize that, my mistake. I drew circles before importing the image into SimBA.", + "created_at": "2022-07-07T19:02:16Z", + "author": "fon215" + }, + { + "body": "There are some instructions here: https://github.com/sgoldenlab/simba/blob/master/docs/Pose_config.md", + "created_at": "2022-07-07T19:03:01Z", + "author": "sronilsson" + }, + { + "body": "here Is a gif I created for how to do it: https://github.com/sgoldenlab/simba/blob/master/images/New_pose_config.gif\r\n", + "created_at": "2022-07-07T19:04:15Z", + "author": "sronilsson" + }, + { + "body": "Sorry for all the questions, I just want to make sure I am doing this right! Now, I cannot get off of the blank tabs to properly follow the steps for the pose configurations. I am not sure if I will need to uninstall or something like that. Could you help me fix this somehow to get back to the working `Create new project`. I am getting that same error as before when I go to launch.", + "created_at": "2022-07-07T19:22:36Z", + "author": "fon215" + }, + { + "body": "What happens if you restart SimBA? Do you still see blank tabs?", + "created_at": "2022-07-07T19:57:09Z", + "author": "sronilsson" + }, + { + "body": "When I close the application and reactivate the conda environment and then launch SimBa, I go to click on `File` then `Create a new project` and the blank tabs appear. I am not sure if this is what you meant. I also just noticed that is appears in the command prompt after the other error I showed you when I follow these steps.\r\n\r\n`can't invoke \"event\" command: application has been destroyed\r\n while executing\r\n\"event generate $w <>\"\r\n (procedure \"ttk::ThemeChanged\" line 6)\r\n invoked from within\r\n\"ttk::ThemeChanged\"`", + "created_at": "2022-07-07T20:49:33Z", + "author": "fon215" + }, + { + "body": "This one is new! Can you try to uninstall simba with `pip uninstall simba-uw-tf-dev`, reinstall simba with `pip install simba-uw-tf-dev`, recreate your body-part configuration, and see if you encounter the same error? ", + "created_at": "2022-07-08T00:34:13Z", + "author": "sronilsson" + }, + { + "body": "Okay, that seemed to get me past the error and I successfully generated the project and pose configurations. Thank you so much for your help!", + "created_at": "2022-07-08T01:43:20Z", + "author": "fon215" + } + ] + }, + { + "title": "Error during outlier correction step", + "body": "**Describe the bug**\r\nI am working through the SimBA pipeline and I am working on the Outlier correction step following the tutorial steps on the documentation page. I am getting this error when I set the settings. The distance between ears is about 31.75mm for the location criterion and the distance between nose and tail base is about 63.5mm for the movement criterion. I put these values in the Outlier settings. I am a little confused of how this step works, did I do this properly and did I put the right numbers? When I go to run the outlier correction this is what I see in the command window. Could anyone be helpful in understanding what is happening?\r\n\r\n`Exception in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 6279, in correct_outlier\r\n dev_move_user_defined(configini)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\outlier_scripts\\movement\\correct_devs_mov_user_defined.py\", line 120, in dev_move_user_defined\r\n csv_df.columns = colHeads\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\pandas\\core\\generic.py\", line 5192, in __setattr__\r\n return object.__setattr__(self, name, value)\r\n File \"pandas/_libs/properties.pyx\", line 67, in pandas._libs.properties.AxisProperty.__set__\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\pandas\\core\\generic.py\", line 690, in _set_axis\r\n self._data.set_axis(axis, labels)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\pandas\\core\\internals\\managers.py\", line 183, in set_axis\r\n \"values have {new} elements\".format(old=old_len, new=new_len)\r\nValueError: Length mismatch: Expected axis has 18 elements, new values have 27 elements`\r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'Outlier correction' step\r\n2. Click on 'Settings\"\r\n3. Enter values, click 'Confirm\"\r\n4. Press 'Run Outlier Correction'\r\n5. See error\r\n\r\n**Expected behavior**\r\nOutlier correction should properly remove outliers.\r\n\r\n**Desktop (Lenovo P340 Workstation (ThinkStation) - Type 30DH (Tower Form Factor)):**\r\n\r\n- OS build: 19043.1706 (Processor: Intel(R) Core(TM) i9-10900K CPU @ 3.70GHz 3.70 GHz)\r\n- Python Version 3.10.4\r\n- Are you using anaconda? - Yes\r\n\r\nAdditional context\r\nI am working through a remote desktop while using this software.\r\n", + "user": "fon215", + "reaction_cnt": 0, + "created_at": "2022-07-07T00:13:26Z", + "updated_at": "2022-07-07T17:14:46Z", + "author": "fon215", + "comments": [ + { + "body": "Hi @fon215! Thanks for reporting. First, the menus circled below, they want a **multiplier**, not a distance value. SimBA will calculate the mean (or median value) between the two body-parts (in this example, the right ear and the tail). SimBA will then use the entered multiplier to get a threshold value. For example, for the movement criterion, if the median distance between the right ear and the tail is 100mm, and the **multiplier** is set to 2, then any body-part movement larger than 100mm*2 is treated as an outlier movement, and the movement is removed, and the body-part is placed back in the most recent reliably coordinate before the movement. There is more info on this [here](https://github.com/sgoldenlab/simba/blob/master/misc/Outlier_settings.pdf)\r\n\r\nThe error you are seeing however is unrelated to this. After correcting the outliers, SimBA will modify the heading of the file so it is standardized regardless from whi\r\n![Untitled 2](https://user-images.githubusercontent.com/34761092/177791973-735269f8-5a3c-441b-bc26-4c790520d45c.png)\r\nch pose-estimation package the data comes from. SimBA wants to insert 27 new headings, and believes that you are tracking 9 bodyparts (27 / 3). However, the file only contains 6 body-parts (18 /3). How many body-parts does your data contain, and what did you specify in the drop-down menues for body-part configuration when creating your project? If you open the `project_config.ini` and scroll down to ` pose_estimation_body_parts =` what do you see it equal? \r\n", + "created_at": "2022-07-07T14:01:18Z", + "author": "sronilsson" + }, + { + "body": "Thank you so much! Now I understand the settings for outlier correction better!\r\n\r\nWhenever I look in the `project_config.ini` I see `pose_estimation_body_parts = 9`, which I am understanding now that I selected that wrong in the drop down menu when creating the project because I am using 6 body parts for my data, which I marked the nose, the left forepaw, the right forepaw, the left hind paw, the right hind paw, and the tail base in my DLC tracking data. Do I just change the number next to `pose_estimation_body_parts =` to 6? Let me know the best way to fix this.", + "created_at": "2022-07-07T14:10:29Z", + "author": "fon215" + }, + { + "body": "Hi @fon215!\r\n\r\nCorrect! There are a few “built-in” pose-estimation body-part configurations in SimBA, that you can choose from the drop-downs. You can see what kind of data they anticipate in the images that are showed below the dropdowns where you create your project. You can also read about them here: https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_DLC.md\r\n\r\nYour body-parts, e.g., `hand paws`, is not available in these built-in configurations. That means we have to create and choose to use a `user-defined body-part configuration` in the SimBA dropdown menu when we create our project. There is a tutorial for how to do that here: https://github.com/sgoldenlab/simba/blob/master/docs/Pose_config.md Let me know if that makes sense!\r\n", + "created_at": "2022-07-07T15:30:30Z", + "author": "sronilsson" + }, + { + "body": "Yes, that makes sense! So, do I just change that number to 6 or do I have to create another project to properly include the body parts I want to?", + "created_at": "2022-07-07T15:34:25Z", + "author": "fon215" + }, + { + "body": "If you change it to `6`, the SimBA will think its one of the default \"in-built\" settings with 6 body-parts, rather than a user-defined body-part configuration. I recommend creating a new project with user-defined body-part configuration!", + "created_at": "2022-07-07T16:22:38Z", + "author": "sronilsson" + }, + { + "body": "Okay, thank you!", + "created_at": "2022-07-07T16:41:02Z", + "author": "fon215" + }, + { + "body": "Actually, I have another question about this. Do I have to generate an image like the default ones in the settings for creating a new project where the body parts are marked on a mouse? Or is there another way this is generated?", + "created_at": "2022-07-07T16:52:19Z", + "author": "fon215" + }, + { + "body": "SimBA will ask you to create an image, however, it is just a place holder, and not used for anything important in SimBA. You can grab any image and select the locations of your body-parts. ", + "created_at": "2022-07-07T17:09:55Z", + "author": "sronilsson" + }, + { + "body": "Okay, thanks so much, again!", + "created_at": "2022-07-07T17:14:46Z", + "author": "fon215" + } + ] + }, + { + "title": "Fail to start SimBA due to incorrect splash image name", + "body": "**Describe the bug**\r\nIn a conda environment under Linux, starting `simba` fails with the following:\r\n```\r\n# simba (simbaenv) 135ms  Wed 06 Jul 2022 09:51:09 AM EDT\r\nTraceback (most recent call last):\r\n File \"/home/florian/miniconda3/envs/simbaenv/bin/simba\", line 8, in \r\n sys.exit(main())\r\n File \"/home/florian/miniconda3/envs/simbaenv/lib/python3.6/site-packages/simba/SimBA.py\", line 8082, in main\r\n app = SplashScreen(root)\r\n File \"/home/florian/miniconda3/envs/simbaenv/lib/python3.6/site-packages/simba/SimBA.py\", line 8045, in __init__\r\n self.Splash()\r\n File \"/home/florian/miniconda3/envs/simbaenv/lib/python3.6/site-packages/simba/SimBA.py\", line 8053, in Splash\r\n self.image = PIL.Image.open(os.path.join(scriptdir, \"splash_050122.PNG\"))\r\n File \"/home/florian/miniconda3/envs/simbaenv/lib/python3.6/site-packages/PIL/Image.py\", line 2634, in open\r\n fp = builtins.open(filename, \"rb\")\r\nFileNotFoundError: [Errno 2] No such file or directory: '/home/florian/miniconda3/envs/simbaenv/lib/python3.6/site-packages/simba/splash_050122.PNG'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nInstall simba in a conda env:\r\n```\r\nconda create -n simbaenv python=3.6.10 wxpython==4.0.4\r\nconda activate simbaenv\r\npip install simba-uw-tf-dev\r\nsimba\r\n```\r\n\r\n**Expected behavior**\r\nSimba starts successfully.\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: openSUSE Tumbleweed snapshot 20220703\r\n - Python Version: 3.6.10\r\n - Are you using anaconda? yes\r\n \r\nI indeed do **not** see the requested file in the installed simba package, but I do see that the file is present with the *png* extension in lower case. Changing this to lower case in `simBA.py` indeed seems to fix the issue and allow simba to start successfully. See below:\r\n```diff\r\nWed 06 Jul 2022 10:00:02 AM EDT\r\n--- SimBA.py 2022-07-06 09:58:16.023680043 -0400\r\n+++ SimBA_patched.py 2022-07-06 09:59:35.350019182 -0400\r\n@@ -8050,7 +8050,7 @@\r\n if currentPlatform == 'Windows':\r\n self.image = PIL.Image.open(os.path.join(scriptdir,\"splash_050122.png\"))\r\n if (currentPlatform == 'Linux') or (currentPlatform == 'Darwin'):\r\n- self.image = PIL.Image.open(os.path.join(scriptdir, \"splash_050122.PNG\"))\r\n+ self.image = PIL.Image.open(os.path.join(scriptdir, \"splash_050122.png\"))\r\n self.imgSplash = ImageTk.PhotoImage(self.image)\r\n```\r\n\r\nOf course, the distinction between platforms there seems redundant if the filename is now the same...\r\nHope this helps,", + "user": "florianduclot", + "reaction_cnt": 0, + "created_at": "2022-07-06T14:10:25Z", + "updated_at": "2022-07-07T13:28:46Z", + "author": "florianduclot", + "comments": [ + { + "body": "Super helpful @florianduclot :) In the past, i've found that the PIL package required the png to be capitalized on mac or linux, even though the true file extension isn't actually capitalized... what I'll do is insert an exception, and hope that it covers it, e.g. \r\n\r\n```\r\nif (currentPlatform == 'Linux') or (currentPlatform == 'Darwin'):\r\n try:\r\n self.image = PIL.Image.open(os.path.join(scriptdir, \"splash_050122.PNG\"))\r\n except FileNotFoundError:\r\n self.image = PIL.Image.open(os.path.join(scriptdir, \"splash_050122.png\"))\r\n```\r\n \r\n\r\n", + "created_at": "2022-07-06T14:28:31Z", + "author": "sronilsson" + }, + { + "body": "Sounds good!\r\nThanks a lot for yet another very prompt answer!", + "created_at": "2022-07-06T15:36:00Z", + "author": "florianduclot" + } + ] + }, + { + "title": "pickle.UnpicklingError: invalid load key, ','. When Running Model", + "body": "When I select Run Model under the Validate Model on a Single Video section of the Run machine model tab, I see the following in the SimBA terminal window:\r\n\r\n![image](https://user-images.githubusercontent.com/107413137/176700027-09398207-c67d-430b-8c76-dc0bc01d891c.png)\r\n\r\nBut the following in my anaconda terminal:\r\n\r\n![image](https://user-images.githubusercontent.com/107413137/176699973-7745892a-a2e4-4d8e-87ca-722ddae24a77.png)\r\n\r\nAnd the SimBA terminal window does not change or give me any indication that the model is finished running after 48 hours (I am only running one 3-hour video for testing purposes). Any help on what I could do about this error or why this might be happening would be greatly appreciated! ", + "user": "autumnca", + "reaction_cnt": 0, + "created_at": "2022-06-30T14:16:02Z", + "updated_at": "2022-07-09T01:13:55Z", + "author": "autumnca", + "comments": [ + { + "body": "Hi @autumnca ! \r\n\r\nNo, this is an error happening early, when SimBA is trying to open the `.sav` format model file. The file is either not the correct file specified in the `Select model file` entry box (e.g., it is some other non-model file), or it is somehow corrupted. In this below entry box, did you select the an sav file? How big is the sav file in mb or kb that you selected? \r\n![image](https://user-images.githubusercontent.com/34761092/176702325-da8d823e-044a-452a-abe4-1015ff0351b0.png)\r\n", + "created_at": "2022-06-30T14:27:02Z", + "author": "sronilsson" + }, + { + "body": "Thank you! I fixed the file I had under Select model file, selected Run Model, and received the following error in the SimBA terminal:\r\n\r\n![image](https://user-images.githubusercontent.com/107413137/176747309-0942bb46-222e-4a5b-9d84-df8e8be8c9ac.png)\r\n\r\nWhen I try to validate, I receive the following error in my anaconda terminal:\r\n\r\n![image](https://user-images.githubusercontent.com/107413137/176747480-91042f18-16f3-4b8a-9145-aad72241554b.png)\r\n\r\nI tried just completely starting over, thinking maybe I had done something incorrectly in the beginning stages of my last SimBA project, but have now ran into the same errors. \r\n", + "created_at": "2022-06-30T18:08:56Z", + "author": "autumnca" + }, + { + "body": "Hello @autumnca!\r\n\r\nThis error is produced when SimBA is looking for the probabilities that the behavior is present in each frame following classification. SimBA expects two values to be returned (the probability that the behavior is absent, and the probability that the behavior is present). However, in this case, SimBA has one value returned (the probability that the behavior is absent) and it breaks because of it. This could happen if the classifier only had one type of annotations to work with to begin with (behavior absent) and no annotations of behavior present. Did you annotate the videos going into SimBA using the annotation interface?", + "created_at": "2022-06-30T19:30:22Z", + "author": "sronilsson" + }, + { + "body": "Okay, I annotated using BORIS. And I did make sure that the format of the BORIS csv file matched what is listed on the GitHub page.", + "created_at": "2022-06-30T19:31:37Z", + "author": "autumnca" + }, + { + "body": "Oh yes! :) To check that you have appended the annotations appropriately, you can check the files representing the videos inside the `project-folder/csv/targets_inserted`. There will be a column, towards the end, with the name of your classifier. Do you see any `1` in that column? \r\n\r\n", + "created_at": "2022-06-30T19:39:31Z", + "author": "sronilsson" + }, + { + "body": "Looking at that file, I only see 0s in that column.", + "created_at": "2022-06-30T19:41:30Z", + "author": "autumnca" + }, + { + "body": "I see. that means that although the file was created, the annotations where not appended correctly from your BORIS file. I can check the file to see what is going on. Do I have access to the latest files on the gdrive? ", + "created_at": "2022-06-30T19:43:20Z", + "author": "sronilsson" + }, + { + "body": "I actually noticed one error in the BORIS file when double checking it, then I re-imported the BORIS annotations and now am seeing both 1s and 0s. I re-trained the machine model and just tried running the model and am now running into the following error in my anaconda terminal after clicking on Validate:\r\n\r\n![image](https://user-images.githubusercontent.com/107413137/176766678-50efac1c-3e21-4247-9688-24a938d3882c.png)\r\n\r\nAnd thank you again for your help!!", + "created_at": "2022-06-30T19:57:01Z", + "author": "autumnca" + }, + { + "body": "@autumnca - almost there! let me look over this, I was just working on tidying this script up. I will get back to you when done, might take a day or two though if that is OK?", + "created_at": "2022-06-30T20:29:38Z", + "author": "sronilsson" + }, + { + "body": "@autumnca - didn't take as long as expected! Could you update to simba 0.92.7 with `pip install simba-uw-tf-dev --upgrade` and let me know if that fixes it or not? \r\n\r\nThat said, this error happened because of a bug in how SimBA handles single animal tracking coming from SLEAP. I'm working my way through the code and repairing as I go along. However if you do come across errors, please let me know and I make priority. ", + "created_at": "2022-07-01T01:17:51Z", + "author": "sronilsson" + }, + { + "body": "I just upgraded SimBA and it officially ran!! This is so exciting thank you so much for all of your help.\r\n\r\nI did visualize the classification results on one video though, and the results were much less accurate than I anticipated. Mostly (1) the key points are not anywhere near the mouse for a very large portion of the video and (2) SimBA predicts no nesting the entire video. I'm not sure if this is a result of inaccurate measurements entered, not enough .slp data (although key points are tracked well in SLEAP), not enough labelled BORIS files, or an incorrectly imported file. Let me know if you have any recommendations or thoughts for this, otherwise I'll work on troubleshooting what this might be caused by. \r\n\r\n![image](https://user-images.githubusercontent.com/107413137/176928166-af37734a-ba85-435e-94c9-53a7d255f569.png)\r\n", + "created_at": "2022-07-01T15:53:54Z", + "author": "autumnca" + }, + { + "body": "One more thing! I will add, the key points are actually very accurate only when the mouse hasn't been moving for at least 5 seconds or so, otherwise they are sometimes on the other side of the cage.", + "created_at": "2022-07-01T16:01:48Z", + "author": "autumnca" + }, + { + "body": "Thanks @autumnca - the body-part location prediction is odd, I didn't see it in my test projects. Can I test the code on your data, is it available on gdrive? Would you mind sending me the link again?\r\n\r\nFor the nesting predictions, does it help if you decrease the discrimination threshold? ", + "created_at": "2022-07-01T16:11:51Z", + "author": "sronilsson" + }, + { + "body": "I just uploaded everything to the gdrive that's located here: \r\n\r\n`https://drive.google.com/drive/folders/19E19tdUvdG5ebwL9I8XbRgRc6_zkZZyG?usp=sharing`\r\n\r\nLet me know if there is anything else I should upload. And I tried decreasing the discrimination threshold and the program still didn't recognize nesting throughout the video. ", + "created_at": "2022-07-01T21:50:21Z", + "author": "autumnca" + }, + { + "body": "Just following up on this, but no rush. I know you're very busy and we just got through a holiday weekend. Thanks again!", + "created_at": "2022-07-06T14:32:33Z", + "author": "autumnca" + }, + { + "body": "Hey @autumnca! I'm on it and I have not forgotten, the day job took has taken over. I aim to troubleshoot your project first thing tomorrow. ", + "created_at": "2022-07-06T14:44:12Z", + "author": "sronilsson" + }, + { + "body": "Hi @autumnca! I had a look at the project. I began by looking at the file coming into SimBA from SLEAP. That file is saved inside the `project_folder/csv/input_csv` folder after you've imported the data. I opened it and I see a lot of `0`. These are the frames where there were no body-part detected in SLEAP. I visualized the data in the `project_folder/csv/input_csv/` using the tool documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_tools.md#visualize-pose-estimation-in-folder) under the `Visualize pose estimation in folder` heading. And it does look a little wild, the visualization file is here (it is rather big). https://drive.google.com/file/d/13CL_N9B75Nm6cdWFzhXAcsEb-8ZXXwII/view?usp=sharing \r\n\r\nWe just need to confirm whats happened to the input data: I'm leaning towards that the SLP file that was inputted does not contain all the otherwise accurate predictions, could that be the case? Did the video you imported to SimBA not represent the video that was tracked in SLEAP? ", + "created_at": "2022-07-07T13:41:24Z", + "author": "sronilsson" + }, + { + "body": "Definitely not every frame in the SLP file is accurate, but when I looked through them it seemed that most files were accurate or within reason. And I actually imported the same video to SimBA that I used to train SLEAP, just to test if the pose-estimation predictions provided by SLEAP would transition over to SimBA correctly. ", + "created_at": "2022-07-08T12:27:49Z", + "author": "autumnca" + }, + { + "body": "Do you have a visualization coming out of SLEAP of the predictions, for this videom you could share? ", + "created_at": "2022-07-08T12:57:34Z", + "author": "sronilsson" + }, + { + "body": "I just uploaded a video of the SLEAP predictions to the google drive I shared with you earlier. Let me know if that works or if that's not what you were looking for. Thanks again!", + "created_at": "2022-07-08T16:02:33Z", + "author": "autumnca" + }, + { + "body": "Hello @autumnca! I had a look at the video. It seems like, for the great majority of frames, one or body-parts are missing predictions. The jumping we see after importing the data into SImBA is from SimBA trying to interpolate the locations of the missing predictions, but it is not going very well because there are so many body-parts missing in the SLEAP file. Is it possible to generate SLEAP predictions where data exist for most body-parts in all of the frames?", + "created_at": "2022-07-09T01:13:55Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Bump numpy from 1.18.1 to 1.22.0", + "body": "Bumps [numpy](https://github.com/numpy/numpy) from 1.18.1 to 1.22.0.\n
\nRelease notes\n

Sourced from numpy's releases.

\n
\n

v1.22.0

\n

NumPy 1.22.0 Release Notes

\n

NumPy 1.22.0 is a big release featuring the work of 153 contributors\nspread over 609 pull requests. There have been many improvements,\nhighlights are:

\n
    \n
  • Annotations of the main namespace are essentially complete. Upstream\nis a moving target, so there will likely be further improvements,\nbut the major work is done. This is probably the most user visible\nenhancement in this release.
  • \n
  • A preliminary version of the proposed Array-API is provided. This is\na step in creating a standard collection of functions that can be\nused across application such as CuPy and JAX.
  • \n
  • NumPy now has a DLPack backend. DLPack provides a common interchange\nformat for array (tensor) data.
  • \n
  • New methods for quantile, percentile, and related functions. The\nnew methods provide a complete set of the methods commonly found in\nthe literature.
  • \n
  • A new configurable allocator for use by downstream projects.
  • \n
\n

These are in addition to the ongoing work to provide SIMD support for\ncommonly used functions, improvements to F2PY, and better documentation.

\n

The Python versions supported in this release are 3.8-3.10, Python 3.7\nhas been dropped. Note that 32 bit wheels are only provided for Python\n3.8 and 3.9 on Windows, all other wheels are 64 bits on account of\nUbuntu, Fedora, and other Linux distributions dropping 32 bit support.\nAll 64 bit wheels are also linked with 64 bit integer OpenBLAS, which should fix\nthe occasional problems encountered by folks using truly huge arrays.

\n

Expired deprecations

\n

Deprecated numeric style dtype strings have been removed

\n

Using the strings "Bytes0", "Datetime64", "Str0", "Uint32",\nand "Uint64" as a dtype will now raise a TypeError.

\n

(gh-19539)

\n

Expired deprecations for loads, ndfromtxt, and mafromtxt in npyio

\n

numpy.loads was deprecated in v1.15, with the recommendation that\nusers use pickle.loads instead. ndfromtxt and mafromtxt were both\ndeprecated in v1.17 - users should use numpy.genfromtxt instead with\nthe appropriate value for the usemask parameter.

\n

(gh-19615)

\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=numpy&package-manager=pip&previous-version=1.18.1&new-version=1.22.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-06-22T03:25:08Z", + "updated_at": "2022-12-19T15:23:03Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-12-19T15:22:53Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump numpy from 1.18.1 to 1.22.0 in /simba", + "body": "Bumps [numpy](https://github.com/numpy/numpy) from 1.18.1 to 1.22.0.\n
\nRelease notes\n

Sourced from numpy's releases.

\n
\n

v1.22.0

\n

NumPy 1.22.0 Release Notes

\n

NumPy 1.22.0 is a big release featuring the work of 153 contributors\nspread over 609 pull requests. There have been many improvements,\nhighlights are:

\n
    \n
  • Annotations of the main namespace are essentially complete. Upstream\nis a moving target, so there will likely be further improvements,\nbut the major work is done. This is probably the most user visible\nenhancement in this release.
  • \n
  • A preliminary version of the proposed Array-API is provided. This is\na step in creating a standard collection of functions that can be\nused across application such as CuPy and JAX.
  • \n
  • NumPy now has a DLPack backend. DLPack provides a common interchange\nformat for array (tensor) data.
  • \n
  • New methods for quantile, percentile, and related functions. The\nnew methods provide a complete set of the methods commonly found in\nthe literature.
  • \n
  • A new configurable allocator for use by downstream projects.
  • \n
\n

These are in addition to the ongoing work to provide SIMD support for\ncommonly used functions, improvements to F2PY, and better documentation.

\n

The Python versions supported in this release are 3.8-3.10, Python 3.7\nhas been dropped. Note that 32 bit wheels are only provided for Python\n3.8 and 3.9 on Windows, all other wheels are 64 bits on account of\nUbuntu, Fedora, and other Linux distributions dropping 32 bit support.\nAll 64 bit wheels are also linked with 64 bit integer OpenBLAS, which should fix\nthe occasional problems encountered by folks using truly huge arrays.

\n

Expired deprecations

\n

Deprecated numeric style dtype strings have been removed

\n

Using the strings "Bytes0", "Datetime64", "Str0", "Uint32",\nand "Uint64" as a dtype will now raise a TypeError.

\n

(gh-19539)

\n

Expired deprecations for loads, ndfromtxt, and mafromtxt in npyio

\n

numpy.loads was deprecated in v1.15, with the recommendation that\nusers use pickle.loads instead. ndfromtxt and mafromtxt were both\ndeprecated in v1.17 - users should use numpy.genfromtxt instead with\nthe appropriate value for the usemask parameter.

\n

(gh-19615)

\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=numpy&package-manager=pip&previous-version=1.18.1&new-version=1.22.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-06-22T03:22:07Z", + "updated_at": "2022-12-19T15:23:03Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-07-21T03:35:17Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-07-26T01:34:39Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-08-26T12:47:43Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-09-12T16:25:10Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-09-26T18:41:29Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-10-03T13:48:17Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-10-13T16:57:12Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-10-25T13:41:08Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-11-26T18:52:30Z", + "author": "dependabot[bot]" + }, + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-12-19T15:22:53Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Mismatched Columns Error with SLEAP Tracking File", + "body": "I'm attempting to import tracking data from SLEAP of one mouse with four key points. I have looked at a few other issue discussion boards on this GitHub, but they were not too helpful. The file I'm trying to import has tracked or predicted points throughout most of the video, and there are many frames where certain key points are occluded. When I try to import the .slp file I'm met with a error stating \"cannot set a row with mismatched columns\" as shown by the picture below. \r\n\r\n![image](https://user-images.githubusercontent.com/107413137/174305640-819831cb-260d-41b5-bc79-0f245f4f2ea2.png)\r\n\r\nI also tried converting the tracking data from SLEAP to an h5 file and uploading that instead, to which I am met with simba telling me \"other, non-skeleton files\" are found in the folder I linked it to. It gives me the same response for all other tracking types, despite this file using a skeleton-based tracking method. \r\n\r\n![image](https://user-images.githubusercontent.com/107413137/174307095-10a0b49f-f1fb-4387-9120-ee2472a45914.png)\r\n\r\nLet me know what you think some good next steps might be, if you have any questions, or if there is another discussion board I should check out. Thanks so much!", + "user": "autumnca", + "reaction_cnt": 0, + "created_at": "2022-06-17T13:25:38Z", + "updated_at": "2023-05-17T16:29:56Z", + "author": "autumnca", + "comments": [ + { + "body": "Hi @autumnca! I was just trying to find time over next weekends to update the SLEAP import functions (the current import scripts where written some years ago) and I was hoping to make the code a bit more robust, and error messages a bit more meaningful. However I don't have yoo many SLP projects and or vidoes tracked with sleap to troubleshoot with to help me catch and foresee possible errors. Would you mind sharing your SimBA project and SLP files you are trying to import into SimBA (with original videos) in a gdrive with me? Would be super helpful. \r\n\r\nThat said, the `cannot set a row with mismatched columns` error suggests that there are more, or less, body-parts in the SLP files than SimBA expects. The number of body-parts SimBA expects is dictated by the number of body-parts you specified when creating your project. To find out how many body-parts that SimBA expects, you can check the `project_folder\\logs\\measures\\pose_configs\\bp_names\\project_bp_names.csv` file, which should contain one row for each body-part. Also, do you see any errors in the Windows terminal window when this error shows in SimBA? ", + "created_at": "2022-06-17T13:53:13Z", + "author": "sronilsson" + }, + { + "body": "I can definitely can do that! I don't have too much to share at the moment except for a couple .slp files (I'm mostly just testing things out right now) but if that would still help out then sure :)\r\n\r\nAnd when I go to the project_bp file, there is a row for each body part. I noticed that the way I labeled them in SLEAP is different from how they are titled there, so I did change what was in that file to match what is in SLEAP. After doing that, I still have the same error. Let me know if I should change that back too. \r\n\r\nI don't have an error or any information show in the terminal when I try to upload my .slp file, but I do when I try to upload an h5 file. ", + "created_at": "2022-06-17T15:46:18Z", + "author": "autumnca" + }, + { + "body": "Yes, one or two slp files with predictions, their associated video files, and the simba project is ideal! \r\n\r\nThe SimBA names vs. the sleap names should not matter, and the error lies somewhere else, it might be related to the animal numbers or tracks present in the sleap file. However I will take a look at it and solve it as I will go through the code anyways. You can share the files through a link here or to sronilsson@gmail.com ", + "created_at": "2022-06-17T17:13:56Z", + "author": "sronilsson" + }, + { + "body": "I've had the same issue when importing video and slp files. Any information about how to solve this? Thanks!", + "created_at": "2023-05-17T16:29:56Z", + "author": "neugun" + } + ] + }, + { + "title": "Issue with scikit-learn when installing SimBA", + "body": "**The only package error I get when trying to install SimBA involves scikit-learn, and I was initially presented with an error telling me that I did not have scikit-learn installed, and that I needed version 0.22.2, so I installed that here:**\r\n\r\n`(simbaenv)` C:\\Users\\adae-lab-admin>pip install scikit-learn==0.22.2\r\nCollecting scikit-learn==0.22.2\r\n Using cached scikit_learn-0.22.2-cp36-cp36m-win_amd64.whl (6.5 MB)\r\nRequirement already satisfied: joblib>=0.11 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn==0.22.2) (1.1.0)\r\nRequirement already satisfied: scipy>=0.17.0 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn==0.22.2) (1.1.0)\r\nRequirement already satisfied: numpy>=1.11.0 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn==0.22.2) (1.18.1)\r\nInstalling collected packages: scikit-learn\r\n Attempting uninstall: scikit-learn\r\n Found existing installation: scikit-learn 0.24.2\r\n Uninstalling scikit-learn-0.24.2:\r\n Successfully uninstalled scikit-learn-0.24.2\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nimbalanced-learn 0.8.1 requires scikit-learn>=0.24, but you have scikit-learn 0.22.2 which is incompatible.\r\nSuccessfully installed `scikit-learn-0.22.2`\r\n\r\n**And then I was presented with an error saying I needed >= version 0.24. After installing that, I'm presented with an error saying I need 0.22.2.** \r\n\r\n`(simbaenv)` C:\\Users\\adae-lab-admin>pip install scikit-learn==0.24.2\r\nCollecting scikit-learn==0.24.2\r\n Using cached scikit_learn-0.24.2-cp36-cp36m-win_amd64.whl (6.8 MB)\r\nRequirement already satisfied: scipy>=0.19.1 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn==0.24.2) (1.1.0)\r\nRequirement already satisfied: numpy>=1.13.3 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn==0.24.2) (1.18.1)\r\nRequirement already satisfied: threadpoolctl>=2.0.0 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn==0.24.2) (3.1.0)\r\nRequirement already satisfied: joblib>=0.11 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn==0.24.2) (1.1.0)\r\nInstalling collected packages: scikit-learn\r\n Attempting uninstall: scikit-learn\r\n Found existing installation: scikit-learn 0.22.2\r\n Uninstalling scikit-learn-0.22.2:\r\n Successfully uninstalled scikit-learn-0.22.2\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nsimba-uw-tf-dev 0.92.2 requires scikit-learn==0.22.2, but you have scikit-learn 0.24.2 which is incompatible.\r\nSuccessfully installed ``scikit-learn-0.24.2`\r\n\r\n**I also tried using pip install --upgrade scikit-learn but did not have luck with that either.** \r\n\r\n`(simbaenv)` C:\\Users\\adae-lab-admin>pip install --upgrade scikit-learn\r\nRequirement already satisfied: scikit-learn in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (0.22.2)\r\nCollecting scikit-learn\r\n Using cached scikit_learn-0.24.2-cp36-cp36m-win_amd64.whl (6.8 MB)\r\nRequirement already satisfied: scipy>=0.19.1 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn) (1.1.0)\r\nRequirement already satisfied: threadpoolctl>=2.0.0 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn) (3.1.0)\r\nRequirement already satisfied: joblib>=0.11 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn) (1.1.0)\r\nRequirement already satisfied: numpy>=1.13.3 in c:\\users\\adae-lab-admin\\.conda\\envs\\simbaenv\\lib\\site-packages (from scikit-learn) (1.18.1)\r\nInstalling collected packages: scikit-learn\r\n Attempting uninstall: scikit-learn\r\n Found existing installation: scikit-learn 0.22.2\r\n Uninstalling scikit-learn-0.22.2:\r\n Successfully uninstalled scikit-learn-0.22.2\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nsimba-uw-tf-dev 0.92.2 requires scikit-learn==0.22.2, but you have scikit-learn 0.24.2 which is incompatible.\r\nSuccessfully installed `scikit-learn-0.24.2`\r\n\r\n**I have very little coding experience, so I might be missing something obvious, but I can't seem to find any information online that could help me with this. Let me know if you have any questions!**\r\n ", + "user": "autumnca", + "reaction_cnt": 0, + "created_at": "2022-06-14T17:50:03Z", + "updated_at": "2022-06-15T14:27:29Z", + "author": "autumnca", + "comments": [ + { + "body": "Hi @autumnca! \r\n\r\nYou seem to be getting a `dependency conflicts` message during the installation, which is a feature of more recent versions of the python package installer `pip`. I wrote about some suggested solutions under point 11 [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#11-when-i-install-or-update-simba-i-see-a-bunch-or-messages-in-the-console-telling-there-me-about-some-dependency-conflicts-the-messages-may-look-a-little-like-this). \r\n\r\nHowever it looks more like a warning rather than the installations actually erroring out. If you type `simba` in the terminal after seeing these msgs, does SimBA still launch?\r\n\r\nPS. `pip install scikit-learn --upgrade` should upgrade the package rather than `pip install --upgrade scikit-learn`", + "created_at": "2022-06-15T12:33:31Z", + "author": "sronilsson" + }, + { + "body": "Hello,\r\n\r\nI went through the suggestions under point 11, and now when I type `simba` in the terminal I'm met with a new error:\r\n\r\nModuleNotFoundError: No module named 'sklearn.metrics.classification'\r\n\r\nAnd yesterday, when struggling with scikit-learn, I would still get an error for the conflicting versions and could not launch SimBA.", + "created_at": "2022-06-15T13:11:45Z", + "author": "autumnca" + }, + { + "body": "@autumnca - which version of pip and (if any) scikit-learn are you running? You should be able to find out by typing `pip --version` and `pip show scikit-learn`\r\n", + "created_at": "2022-06-15T13:16:25Z", + "author": "sronilsson" + }, + { + "body": "pip 21.2.2 and scikit-learn 0.22.2", + "created_at": "2022-06-15T13:34:56Z", + "author": "autumnca" + }, + { + "body": "Interesting that should work.. anyway. You can try the following: \r\n\r\nType `pip uninstall simba-uw-tf-dev`\r\nType `pip install pip==21.3.1`\r\nType `pip install simba-uw-tf-dev`\r\n\r\nAnd see if that fixes it?\r\n ", + "created_at": "2022-06-15T13:46:38Z", + "author": "sronilsson" + }, + { + "body": "That worked! Thank you so so much I really appreciate your help. ", + "created_at": "2022-06-15T13:50:22Z", + "author": "autumnca" + }, + { + "body": "Great! Thanks for letting me know ", + "created_at": "2022-06-15T13:52:32Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Error installing and opening SimBA", + "body": "**Describe the bug**\r\n\r\nI am installing SimBA on my computer to be used with DeepLabCut to perform automated behavioral analysis with mice to study pain and itch behaviors for a lab. I have encountered some problems during the installation process. First of all, I am not able to extract the files in FFmpeg in step 6 of the directions for installing FFmpeg. Could you let me know how to complete these steps and extract the files? I have included screenshots showing this as well.\r\n\r\nAlso, I am having trouble launching simba once I complete the installation steps.\r\n\r\nI have highlighted here the command line with the prompts I used and the errors I received. Could you help me figure out what I am doing wrong so that I am able to install the program? I will also include a screenshot of the steps for installation that I was following.\r\n____________________________________________________________\r\n\r\n(base) C:\\Users\\maf315>conda activate simbaenv\r\n\r\n(simbaenv) C:\\Users\\maf315>pip install pip==19.0.1\r\nCollecting pip==19.0.1\r\n Using cached pip-19.0.1-py2.py3-none-any.whl (1.4 MB)\r\nInstalling collected packages: pip\r\n Attempting uninstall: pip\r\n Found existing installation: pip 21.2.2\r\n Uninstalling pip-21.2.2:\r\n Successfully uninstalled pip-21.2.2\r\nERROR: Could not install packages due to an OSError: [WinError 5] Access is denied: 'C:\\\\Users\\\\maf315\\\\AppData\\\\Local\\\\Temp\\\\pip-uninstall-g4k_1xih\\\\pip.exe'\r\nConsider using the `--user` option or check the permissions.\r\n\r\n\r\n(simbaenv) C:\\Users\\maf315>pip install simba-uw-tf-dev\r\nCollecting simba-uw-tf-dev\r\n Using cached https://files.pythonhosted.org/packages/e7/b5/4b8d0476ae338167a394fa04e799b198404ea182401748c1bdc2e4387dd5/Simba_UW_tf_dev-0.91.9-py3-none-any.whl\r\nCollecting dash==1.14.0 (from simba-uw-tf-dev)\r\nCollecting pyarrow==0.17.1 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/09/b4/4d361463cb3fbaddb0292d8cafc7134e43e66f82144db0c4cb433dac2624/pyarrow-0.17.1-cp36-cp36m-win_amd64.whl\r\nCollecting Pillow==5.4.1 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/ec/ca/7af5b6628ecf770645f8cc3c9da3c2bb5c5ffc7384a9ff0666fdb818b4d5/Pillow-5.4.1-cp36-cp36m-win_amd64.whl\r\nCollecting wxpython==4.0.4 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/8f/94/17dc5e0191351e2d7a7f9b79eb355de930feade9c7decec70b19e0f2e160/wxPython-4.0.4-cp36-cp36m-win_amd64.whl\r\nCollecting opencv-python==3.4.5.20 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/39/7e/5578b4f6f7d5b7b8387d4d3de4fd414d75cf45701a5ab7a23e656e865417/opencv_python-3.4.5.20-cp36-cp36m-win_amd64.whl\r\nCollecting tqdm==4.30.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/76/4c/103a4d3415dafc1ddfe6a6624333971756e2d3dd8c6dc0f520152855f040/tqdm-4.30.0-py2.py3-none-any.whl\r\nCollecting numpy==1.18.1 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/53/74/b997e4c7b4abc668e99f4c3dba87ee2c6f7559319af756cc1ede37665a8d/numpy-1.18.1-cp36-cp36m-win_amd64.whl\r\nCollecting tabulate==0.8.3 (from simba-uw-tf-dev)\r\nCollecting numba==0.48.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/ae/61/15ac002290c77694aa88aa9ead95879e7121b9ad40b23f9b89701bec355b/numba-0.48.0-cp36-cp36m-win_amd64.whl\r\nCollecting numexpr==2.6.9 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/f7/2d/ed626045f9e4c17fb6c2dea95a50b6e484427e5d324b6a0c4c53be1a4ddb/numexpr-2.6.9-cp36-none-win_amd64.whl\r\nCollecting pyyaml==5.3.1 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/61/5e/0386f8d74ce6e0a183b93a7e02d32b2629c0a2c8e9db806a3f5eebbfa221/PyYAML-5.3.1-cp36-cp36m-win_amd64.whl\r\nCollecting yellowbrick==0.9.1 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/d8/e8/125204ea84a7424a3237556e8dfaec9fee21f2e3d5b3695eb9ce355bf668/yellowbrick-0.9.1-py2.py3-none-any.whl\r\nCollecting eli5==0.10.1 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/97/2f/c85c7d8f8548e460829971785347e14e45fa5c6617da374711dec8cb38cc/eli5-0.10.1-py2.py3-none-any.whl\r\nCollecting pandas==0.25.3 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/f0/ac/92c3d2f0b627efbd1a7b2156faa697f9c2bbd7b0fe83ba8a9d36f982156f/pandas-0.25.3-cp36-cp36m-win_amd64.whl\r\nCollecting dash-color-picker==0.0.1 (from simba-uw-tf-dev)\r\nCollecting imblearn==0.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/81/a7/4179e6ebfd654bd0eac0b9c06125b8b4c96a9d0a8ff9e9507eb2a26d2d7e/imblearn-0.0-py2.py3-none-any.whl\r\nCollecting scipy==1.1.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/62/e2/364f0bcc641aeff79d743c732769d5dc31a1e78c27699229431412c4b425/scipy-1.1.0-cp36-none-win_amd64.whl\r\nCollecting scikit-learn==0.22.2 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/30/ef/528c6ccf1986bab9e9a3e53e0f1e673ed5f1dd348dc32618c670b61100f6/scikit_learn-0.22.2-cp36-cp36m-win_amd64.whl\r\nCollecting graphviz==0.11 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/af/ae/e1c63ac4c531d69a7960a99af99e184d4f3da15e29f67767c4252bf19cce/graphviz-0.11-py2.py3-none-any.whl\r\nCollecting h5py==2.9.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/01/1e/115c4403544a91001d9c618748b2e8786db45544e36b8a6cf3c525e9b57f/h5py-2.9.0-cp36-cp36m-win_amd64.whl\r\nCollecting xgboost==0.90 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/5e/49/b95c037b717b4ceadc76b6e164603471225c27052d1611d5a2e832757945/xgboost-0.90-py2.py3-none-win_amd64.whl\r\nCollecting statsmodels==0.9.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/77/2b/8ba61399b31f984c263b177c2e2547a34f0d4d972a24a51fc77c376079b0/statsmodels-0.9.0-cp36-cp36m-win_amd64.whl\r\nCollecting plotly==4.9.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/bf/5f/47ab0d9d843c5be0f5c5bd891736a4c84fa45c3b0a0ddb6b6df7c098c66f/plotly-4.9.0-py2.py3-none-any.whl\r\nCollecting dtreeviz==0.8.1 (from simba-uw-tf-dev)\r\nCollecting seaborn==0.9.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/a8/76/220ba4420459d9c4c9c9587c6ce607bf56c25b3d3d2de62056efe482dadc/seaborn-0.9.0-py3-none-any.whl\r\nCollecting xlrd==1.2.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/b0/16/63576a1a001752e34bf8ea62e367997530dc553b689356b9879339cf45a4/xlrd-1.2.0-py2.py3-none-any.whl\r\nCollecting tables==3.6.1 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/50/e2/c26a7eec516ef3cb337e77fb8d2310d095750c634de27c295dc3466d9d02/tables-3.6.1-2-cp36-cp36m-win_amd64.whl\r\nCollecting dash-colorscales==0.0.4 (from simba-uw-tf-dev)\r\nCollecting shapely==1.7 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/b8/0b/e5b073f74d8c752e85d11769572f8af134d1d3c9234a8fae0a7c0d1e224a/Shapely-1.7.0-cp36-cp36m-win_amd64.whl\r\nCollecting shap==0.35.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/e9/dd/1464558bff508aaddebeb6f970e6aab5b148911cea44f882822d33156250/shap-0.35.0-cp36-cp36m-win_amd64.whl\r\nCollecting imutils==0.5.2 (from simba-uw-tf-dev)\r\nCollecting imgaug==0.4.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/66/b1/af3142c4a85cba6da9f4ebb5ff4e21e2616309552caca5e8acefe9840622/imgaug-0.4.0-py2.py3-none-any.whl\r\nCollecting cefpython3==66.0 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/af/90/7e471ba5ccbb716b42d0c1156337c9f4add8c7b83366b50ef7b7c33911da/cefpython3-66.0-py2.py3-none-win_amd64.whl\r\nCollecting dash-html-components==1.0.3 (from simba-uw-tf-dev)\r\nCollecting matplotlib==3.0.3 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/21/4c/35fa1837a705f33621604a1967b1505bd3f695940fdf02fad77ef11de196/matplotlib-3.0.3-cp36-cp36m-win_amd64.whl\r\nCollecting scikit-image==0.14.2 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/bd/a5/1d00f4bfe5ac7b48476b68f5c6bd87764a48c26e711f2bc510ea61d9c548/scikit_image-0.14.2-cp36-none-win_amd64.whl\r\nCollecting dash-core-components==1.10.2 (from simba-uw-tf-dev)\r\nCollecting Flask>=1.0.2 (from dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/cd/77/59df23681f4fd19b7cbbb5e92484d46ad587554f5d490f33ef907e456132/Flask-2.0.3-py3-none-any.whl\r\nCollecting future (from dash==1.14.0->simba-uw-tf-dev)\r\nCollecting dash-renderer==1.6.0 (from dash==1.14.0->simba-uw-tf-dev)\r\nCollecting dash-table==4.9.0 (from dash==1.14.0->simba-uw-tf-dev)\r\nCollecting flask-compress (from dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/d5/08/91ffc9506cb569f45cc4781ac8d35c6be19b9c3c8b1f8bc77aed56a46b53/Flask_Compress-1.12-py3-none-any.whl\r\nCollecting six (from wxpython==4.0.4->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/d9/5a/e7c31adbe875f2abbb91bd84cf2dc52d792b5a01506781dbcf25c91daf11/six-1.16.0-py2.py3-none-any.whl\r\nCollecting llvmlite<0.32.0,>=0.31.0dev0 (from numba==0.48.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/41/f7/1aff7e7bd1fbfe036b1274db43006162db95df2762a831480fa85ee294ed/llvmlite-0.31.0-cp36-cp36m-win_amd64.whl\r\nRequirement already satisfied: setuptools in c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages (from numba==0.48.0->simba-uw-tf-dev) (58.0.4)\r\nCollecting cycler>=0.10.0 (from yellowbrick==0.9.1->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/5c/f9/695d6bedebd747e5eb0fe8fad57b72fdf25411273a39791cde838d5a8f51/cycler-0.11.0-py3-none-any.whl\r\nCollecting jinja2 (from eli5==0.10.1->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/20/9a/e5d9ec41927401e41aea8af6d16e78b5e612bca4699d417f646a9610a076/Jinja2-3.0.3-py3-none-any.whl\r\nCollecting attrs>16.0.0 (from eli5==0.10.1->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/be/be/7abce643bfdf8ca01c48afa2ddf8308c2308b0c3b239a44e57d020afa0ef/attrs-21.4.0-py2.py3-none-any.whl\r\nCollecting pytz>=2017.2 (from pandas==0.25.3->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/60/2e/dec1cc18c51b8df33c7c4d0a321b084cf38e1733b98f9d15018880fb4970/pytz-2022.1-py2.py3-none-any.whl\r\nCollecting python-dateutil>=2.6.1 (from pandas==0.25.3->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/36/7a/87837f39d0296e723bb9b62bbb257d0355c7f6128853c78955f57342a56d/python_dateutil-2.8.2-py2.py3-none-any.whl\r\nCollecting imbalanced-learn (from imblearn==0.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/19/79/e86c8fd859dca4fb1fbfc61376afc63210177a235a7bfbe7219b02edf8f3/imbalanced_learn-0.9.1-py3-none-any.whl\r\nCollecting joblib>=0.11 (from scikit-learn==0.22.2->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/3e/d5/0163eb0cfa0b673aa4fe1cd3ea9d8a81ea0f32e50807b0c295871e4aab2e/joblib-1.1.0-py2.py3-none-any.whl\r\nCollecting retrying>=1.3.3 (from plotly==4.9.0->simba-uw-tf-dev)\r\nCollecting colour (from dtreeviz==0.8.1->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/74/46/e81907704ab203206769dee1385dc77e1407576ff8f50a0681d0a6b541be/colour-0.1.5-py2.py3-none-any.whl\r\nCollecting imageio (from imgaug==0.4.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/58/fc/1547b93534279bbf2de88f7c4a88975a65cecafd32c2bd3c518b2054ef76/imageio-2.16.0-py3-none-any.whl\r\nCollecting kiwisolver>=1.0.1 (from matplotlib==3.0.3->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/6e/df/1250c32ab3b532c32a7e47c1cd240faba98f75b1b5150939b10e9bffb758/kiwisolver-1.3.1-cp36-cp36m-win_amd64.whl\r\nCollecting pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 (from matplotlib==3.0.3->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/6c/10/a7d0fa5baea8fe7b50f448ab742f26f52b80bfca85ac2be9d35cdd9a3246/pyparsing-3.0.9-py3-none-any.whl\r\nCollecting dask[array]>=1.0.0 (from scikit-image==0.14.2->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/2e/86/95faa4a9c1f7fbfa2df2ae9e7e1a11349cb97a81e2f38ff9dda301606882/dask-2021.3.0-py3-none-any.whl\r\nCollecting PyWavelets>=0.4.0 (from scikit-image==0.14.2->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/30/9f/60c3b80bcefc7e3cbc76c0925e05159312cae0f3e8bf822cf50ba30b5312/PyWavelets-1.1.1-cp36-cp36m-win_amd64.whl\r\nCollecting networkx>=1.8 (from scikit-image==0.14.2->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/f3/b7/c7f488101c0bb5e4178f3cde416004280fd40262433496830de8a8c21613/networkx-2.5.1-py3-none-any.whl\r\nCollecting cloudpickle>=0.2.1 (from scikit-image==0.14.2->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/25/40/2c9db9cfb85a8a21c61528f6660c47662b3e59576efac610d8268d47abba/cloudpickle-2.1.0-py3-none-any.whl\r\nCollecting Werkzeug>=2.0 (from Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/f4/f3/22afbdb20cc4654b10c98043414a14057cd27fdba9d4ae61cea596000ba2/Werkzeug-2.0.3-py3-none-any.whl\r\nCollecting click>=7.1.2 (from Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/4a/a8/0b2ced25639fb20cc1c9784de90a8c25f9504a7f18cd8b5397bd61696d7d/click-8.0.4-py3-none-any.whl\r\nCollecting itsdangerous>=2.0 (from Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/9c/96/26f935afba9cd6140216da5add223a0c465b99d0f112b68a4ca426441019/itsdangerous-2.0.1-py3-none-any.whl\r\nCollecting brotli (from flask-compress->dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/e3/d4/e68c4f2a21cab38ba1defbd5bc6ab7e74e2234383f1daf36153dce11ea9d/Brotli-1.0.9-cp36-cp36m-win_amd64.whl\r\nCollecting MarkupSafe>=2.0 (from jinja2->eli5==0.10.1->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/73/60/296031f365b3ae96732225203d864fac7b83a185ed1820c1c87b78e154bc/MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl\r\nCollecting threadpoolctl>=2.0.0 (from imbalanced-learn->imblearn==0.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/61/cf/6e354304bcb9c6413c4e02a747b600061c21d38ba51e7e544ac7bc66aecc/threadpoolctl-3.1.0-py3-none-any.whl\r\nCollecting toolz>=0.8.2; extra == \"array\" (from dask[array]>=1.0.0->scikit-image==0.14.2->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/b5/f1/3df506b493736e3ee11fc1a3c2de8014a55f025d830a71bb499acc049a2c/toolz-0.11.2-py3-none-any.whl\r\nCollecting decorator<5,>=4.3 (from networkx>=1.8->scikit-image==0.14.2->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/ed/1b/72a1821152d07cf1d8b6fce298aeb06a7eb90f4d6d41acec9861e7cc6df0/decorator-4.4.2-py2.py3-none-any.whl\r\nCollecting dataclasses; python_version < \"3.7\" (from Werkzeug>=2.0->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/fe/ca/75fac5856ab5cfa51bbbcefa250182e50441074fdc3f803f6e76451fab43/dataclasses-0.8-py3-none-any.whl\r\nCollecting importlib-metadata; python_version < \"3.8\" (from click>=7.1.2->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/a0/a1/b153a0a4caf7a7e3f15c2cd56c7702e2cf3d89b1b359d1f1c5e59d68f4ce/importlib_metadata-4.8.3-py3-none-any.whl\r\nCollecting colorama; platform_system == \"Windows\" (from click>=7.1.2->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/44/98/5b86278fbbf250d239ae0ecb724f8572af1c91f4a11edf4d36a206189440/colorama-0.4.4-py2.py3-none-any.whl\r\nCollecting zipp>=0.5 (from importlib-metadata; python_version < \"3.8\"->click>=7.1.2->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/bd/df/d4a4974a3e3957fd1c1fa3082366d7fff6e428ddb55f074bf64876f8e8ad/zipp-3.6.0-py3-none-any.whl\r\nCollecting typing-extensions>=3.6.4; python_version < \"3.8\" (from importlib-metadata; python_version < \"3.8\"->click>=7.1.2->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/45/6b/44f7f8f1e110027cf88956b59f2fad776cca7e1704396d043f89effd3a0e/typing_extensions-4.1.1-py3-none-any.whl\r\nimbalanced-learn 0.9.1 has requirement scikit-learn>=1.1.0, but you'll have scikit-learn 0.22.2 which is incompatible.\r\nimbalanced-learn 0.9.1 has requirement scipy>=1.3.2, but you'll have scipy 1.1.0 which is incompatible.\r\nimageio 2.16.0 has requirement numpy>=1.20.0, but you'll have numpy 1.18.1 which is incompatible.\r\nimageio 2.16.0 has requirement pillow>=8.3.2, but you'll have pillow 5.4.1 which is incompatible.\r\nInstalling collected packages: six, retrying, plotly, dash-html-components, dataclasses, Werkzeug, MarkupSafe, jinja2, zipp, typing-extensions, importlib-metadata, colorama, click, itsdangerous, Flask, future, dash-renderer, dash-table, dash-core-components, brotli, flask-compress, dash, numpy, pyarrow, Pillow, wxpython, opencv-python, tqdm, tabulate, llvmlite, numba, numexpr, pyyaml, cycler, python-dateutil, kiwisolver, pyparsing, matplotlib, scipy, joblib, scikit-learn, yellowbrick, attrs, graphviz, eli5, pytz, pandas, dash-color-picker, threadpoolctl, imbalanced-learn, imblearn, h5py, xgboost, statsmodels, colour, dtreeviz, seaborn, xlrd, tables, dash-colorscales, shapely, shap, imutils, toolz, dask, PyWavelets, decorator, networkx, cloudpickle, scikit-image, imageio, imgaug, cefpython3, simba-uw-tf-dev\r\nSuccessfully installed Flask-2.0.3 MarkupSafe-2.0.1 Pillow-5.4.1 PyWavelets-1.1.1 Werkzeug-2.0.3 attrs-21.4.0 brotli-1.0.9 cefpython3-66.0 click-8.0.4 cloudpickle-2.1.0 colorama-0.4.4 colour-0.1.5 cycler-0.11.0 dash-1.14.0 dash-color-picker-0.0.1 dash-colorscales-0.0.4 dash-core-components-1.10.2 dash-html-components-1.0.3 dash-renderer-1.6.0 dash-table-4.9.0 dask-2021.3.0 dataclasses-0.8 decorator-4.4.2 dtreeviz-0.8.1 eli5-0.10.1 flask-compress-1.12 future-0.18.2 graphviz-0.11 h5py-2.9.0 imageio-2.16.0 imbalanced-learn-0.9.1 imblearn-0.0 imgaug-0.4.0 importlib-metadata-4.8.3 imutils-0.5.2 itsdangerous-2.0.1 jinja2-3.0.3 joblib-1.1.0 kiwisolver-1.3.1 llvmlite-0.31.0 matplotlib-3.0.3 networkx-2.5.1 numba-0.48.0 numexpr-2.6.9 numpy-1.18.1 opencv-python-3.4.5.20 pandas-0.25.3 plotly-4.9.0 pyarrow-0.17.1 pyparsing-3.0.9 python-dateutil-2.8.2 pytz-2022.1 pyyaml-5.3.1 retrying-1.3.3 scikit-image-0.14.2 scikit-learn-0.22.2 scipy-1.1.0 seaborn-0.9.0 shap-0.35.0 shapely-1.7.0 simba-uw-tf-dev-0.91.9 six-1.16.0 statsmodels-0.9.0 tables-3.6.1 tabulate-0.8.3 threadpoolctl-3.1.0 toolz-0.11.2 tqdm-4.30.0 typing-extensions-4.1.1 wxpython-4.0.4 xgboost-0.90 xlrd-1.2.0 yellowbrick-0.9.1 zipp-3.6.0\r\nYou are using pip version 19.0.1, however version 21.3.1 is available.\r\nYou should consider upgrading via the 'python -m pip install --upgrade pip' command.\r\n\r\n(simbaenv) C:\\Users\\maf315>pip uninstall shapely\r\nUninstalling Shapely-1.7.0:\r\n Would remove:\r\n c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\shapely-1.7.0.dist-info\\*\r\n c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\shapely\\*\r\nProceed (y/n)? y\r\n Successfully uninstalled Shapely-1.7.0\r\n\r\n(simbaenv) C:\\Users\\maf315>conda install -c conda-forge shapely\r\nCollecting package metadata (current_repodata.json): done\r\nSolving environment: done\r\n\r\n## Package Plan ##\r\n\r\n environment location: C:\\TOOLS\\Anaconda3\\envs\\simbaenv\r\n\r\n added / updated specs:\r\n - shapely\r\n\r\n\r\nThe following packages will be downloaded:\r\n\r\n package | build\r\n ---------------------------|-----------------\r\n geos-3.8.0 | h33f27b4_0 905 KB\r\n intel-openmp-2022.1.0 | h57928b3_3787 3.7 MB conda-forge\r\n libblas-3.9.0 | 14_win64_mkl 5.3 MB conda-forge\r\n libcblas-3.9.0 | 14_win64_mkl 5.3 MB conda-forge\r\n liblapack-3.9.0 | 14_win64_mkl 5.3 MB conda-forge\r\n numpy-1.19.5 | py36h4b40d73_2 4.9 MB conda-forge\r\n python_abi-3.6 | 2_cp36m 4 KB conda-forge\r\n shapely-1.7.1 | py36h06580b3_0 369 KB\r\n tbb-2021.5.0 | h2d74725_1 148 KB conda-forge\r\n ------------------------------------------------------------\r\n Total: 25.7 MB\r\n\r\nThe following NEW packages will be INSTALLED:\r\n\r\n geos pkgs/main/win-64::geos-3.8.0-h33f27b4_0\r\n intel-openmp conda-forge/win-64::intel-openmp-2022.1.0-h57928b3_3787\r\n libblas conda-forge/win-64::libblas-3.9.0-14_win64_mkl\r\n libcblas conda-forge/win-64::libcblas-3.9.0-14_win64_mkl\r\n liblapack conda-forge/win-64::liblapack-3.9.0-14_win64_mkl\r\n mkl conda-forge/win-64::mkl-2022.0.0-h0e2418a_796\r\n numpy conda-forge/win-64::numpy-1.19.5-py36h4b40d73_2\r\n python_abi conda-forge/win-64::python_abi-3.6-2_cp36m\r\n shapely pkgs/main/win-64::shapely-1.7.1-py36h06580b3_0\r\n tbb conda-forge/win-64::tbb-2021.5.0-h2d74725_1\r\n\r\n\r\nProceed ([y]/n)? y\r\n\r\n\r\nDownloading and Extracting Packages\r\nlibcblas-3.9.0 | 5.3 MB | ############################################################################ | 100%\r\npython_abi-3.6 | 4 KB | ############################################################################ | 100%\r\ngeos-3.8.0 | 905 KB | ############################################################################ | 100%\r\ntbb-2021.5.0 | 148 KB | ############################################################################ | 100%\r\nnumpy-1.19.5 | 4.9 MB | ############################################################################ | 100%\r\nlibblas-3.9.0 | 5.3 MB | ############################################################################ | 100%\r\nintel-openmp-2022.1. | 3.7 MB | ############################################################################ | 100%\r\nliblapack-3.9.0 | 5.3 MB | ############################################################################ | 100%\r\nshapely-1.7.1 | 369 KB | ############################################################################ | 100%\r\nPreparing transaction: done\r\nVerifying transaction: done\r\nExecuting transaction: done\r\n\r\n(simbaenv) C:\\Users\\maf315>simba\r\nTraceback (most recent call last):\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\TOOLS\\Anaconda3\\envs\\simbaenv\\Scripts\\simba.exe\\__main__.py\", line 5, in \r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 50, in \r\n from simba.train_multiple_models_from_meta import *\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\train_multiple_models_from_meta.py\", line 10, in \r\n from imblearn.combine import SMOTEENN\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\imblearn\\__init__.py\", line 53, in \r\n from . import ensemble\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\imblearn\\ensemble\\__init__.py\", line 6, in \r\n from ._easy_ensemble import EasyEnsembleClassifier\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\imblearn\\ensemble\\_easy_ensemble.py\", line 21, in \r\n from ..pipeline import Pipeline\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\imblearn\\pipeline.py\", line 18, in \r\n from sklearn.utils.metaestimators import available_if\r\nImportError: cannot import name 'available_if'\r\n\r\n(simbaenv) C:\\Users\\maf315>simba\r\nTraceback (most recent call last):\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\TOOLS\\Anaconda3\\envs\\simbaenv\\Scripts\\simba.exe\\__main__.py\", line 5, in \r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 50, in \r\n from simba.train_multiple_models_from_meta import *\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\train_multiple_models_from_meta.py\", line 10, in \r\n from imblearn.combine import SMOTEENN\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\imblearn\\__init__.py\", line 53, in \r\n from . import ensemble\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\imblearn\\ensemble\\__init__.py\", line 6, in \r\n from ._easy_ensemble import EasyEnsembleClassifier\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\imblearn\\ensemble\\_easy_ensemble.py\", line 21, in \r\n from ..pipeline import Pipeline\r\n File \"c:\\tools\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\imblearn\\pipeline.py\", line 18, in \r\n from sklearn.utils.metaestimators import available_if\r\nImportError: cannot import name 'available_if'\r\n\r\n___________________________________________________________________________________________\r\n\r\n\r\n**Expected behavior**\r\nI expected to follow the installation steps and for the software to open successfully.\r\n\r\n**Screenshots**\r\n\"ffmpeg\r\n\"simBA\r\n\"step\r\n\"steps\r\n\r\n\r\n**Desktop (Lenovo P340 Workstation (ThinkStation) - Type 30DH (Tower Form Factor)):**\r\n - OS build: 19043.1706 (Processor: Intel(R) Core(TM) i9-10900K CPU @ 3.70GHz 3.70 GHz)\r\n - Python Version 3.10.4\r\n - Are you using anaconda? - Yes\r\n \r\n\r\n**Additional context**\r\nI am working through a remote desktop while using this software.\r\n", + "user": "fon215", + "reaction_cnt": 0, + "created_at": "2022-06-03T16:23:04Z", + "updated_at": "2022-06-07T16:44:04Z", + "author": "fon215", + "comments": [ + { + "body": "Hi @fon215 ! Thanks for reporting - I had a similar issue reported very recently by @m-migliaro. The solution for them to **not** downgrade pip to version `19.0.1`, and stick with version `21.2.2`. Can you check if this also works for you? ", + "created_at": "2022-06-03T16:54:15Z", + "author": "sronilsson" + }, + { + "body": "That worked! Thank you so much!", + "created_at": "2022-06-03T19:40:22Z", + "author": "fon215" + }, + { + "body": "\r\n\r\n\r\n\r\n> Hi @fon215 ! Thanks for reporting - I had a similar issue reported very recently by @m-migliaro. The solution for them to **not** downgrade pip to version `19.0.1`, and stick with version `21.2.2`. Can you check if this also works for you?\r\n\r\nThat helped me open SimBA, however, I am still having trouble installing FFmpeg. Could someone direct me on getting past the sixth step listed in the screenshot I provided?", + "created_at": "2022-06-03T19:46:57Z", + "author": "fon215" + }, + { + "body": "Hi @fon215 - I think you need to install a program that allows you to uncompress the 7Zip file, either winzip, winrar, or 7-zip (https://www.7-zip.org/)", + "created_at": "2022-06-03T20:43:31Z", + "author": "sronilsson" + }, + { + "body": "Okay, thank you.", + "created_at": "2022-06-03T23:31:24Z", + "author": "fon215" + } + ] + }, + { + "title": "Builing a model with few frames containing the behavior of interest", + "body": "Hi,\r\nI'm building an analysis model with only one classified behavior. \r\nThe model is based on 24 videos, where this behavior on average is present around 1.4% of all frames labeled. This ratio (1.4%) is also the same in other videos that I've not incorporated in the model.\r\n\r\nI've read your papers ([1](https://doi.org/10.1101/2020.04.19.049452 ) - [2](https://link.springer.com/article/10.1007/s00213-020-05577-x)) and I was wondering if this aspect (the low presence of the behavior in the videos) can affect negatively the model, because as you wrote \"One weakness of random forests is their inability to natively support biased datasets. These datasets are common in behavioral videos, in which most frames do not contain the behavior of interest\" (or \"Random forest classifiers - as other classification techniques - are sensitive to class imbalances\").\r\n\r\nIf this is the case, should I change any hyperparameters in the settings? And how?\r\nI see (and read in the papers) \"Under sample setting\" and \"Under sample ratio\" parameters: do I have to change their values?\r\n\r\nThanks!\r\n", + "user": "carlitomu", + "reaction_cnt": 0, + "created_at": "2022-05-31T08:06:04Z", + "updated_at": "2022-05-31T21:03:31Z", + "author": "carlitomu", + "comments": [ + { + "body": "Hi @carlitomu!\r\nYes, this imbalance (98.6% no behavior vs 1.4% behavior) is typical, and one of the main hurdles for getting an accurate classifier up and running. The issue is that the classifier can reach 98.6% accuracy, just by guessing \"no behavior\" on all frames, and we need to stop this from happening.\r\n\r\nThere are a few ways to solve this, in SimBA I recommend first trying the random undersampling. To do this, first set the\r\n`Under sample setting` entry box to `Random undersample` and set the `Under sample ratio` entry box to `1.0`. This number means that you will enter all of your behavior frames into the algorithm, and an equal count of \"no behavior\" frames.\r\nE.g., if you have 1000 frames annotated with the behavior present, and enter `1.0` in the `Under sample ratio`box, SimBA will use those together with 1000 randomly selected non-behavior frames.\r\n\r\nIf you insert `0.8` in the `Under sample ratio` entry box, SimBA will use the 1000 annotated frames, together with 800 randomly selected non-behavior frames.\r\n\r\nIf you insert `1.2` in the `Under sample ratio` entry box, SimBA will use the 1000 annotated frames, together with 1200 randomly selected non-behavior frames.\r\n\r\nWhich exact number to use for the `Under sample ratio` is empirical and depends on your behavior and videos, but `1.0` is usually a good place to start! If you see that the classifier is over-classifying frames as containing your behavior, then increase the `Under sample ratio`. If the classifier is under-classifying frames as containing your behavior, then try to decrease the `Under sample ratio`.", + "created_at": "2022-05-31T20:59:14Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Problems importing SLP files - predictions can be imported, project files cannot", + "body": "I have an SLP file which has predictions in it. I would like to use the current predictions therein in SimBA but I get an error when I try to import it. \r\n\r\nIf I run inference on the entire file to generate a new set of predictions the predictions file is usable. Due to a bug I'm experiencing in SLEAP this process generates much worse predictions than the original file so I would just like to use those. \r\n\r\nI am assured by the SLEAP team that there should be no difference in SLP file format between predictions and the project files. Both should contain predictions that could be used. \r\n\r\n@sronilsson was looped into [this github discussion ](https://github.com/talmolab/sleap/discussions/760) that has more details and I've set up drive access to the requested files. Please see .txt therein to explain each file (I included the video itself as well).\r\n\r\nIf any further details are necessary I will edit the Issue :)", + "user": "rfkova", + "reaction_cnt": 0, + "created_at": "2022-05-27T20:38:44Z", + "updated_at": "2022-06-29T21:55:21Z", + "author": "rfkova", + "comments": [ + { + "body": "Hi @rfkova! \r\n\r\nSimBA first reads in the number of frames with predictions in your `labels.v000.video0.slp` SLP file. For this it counts the number of entries in the `['frames’]` key in the SLP file, and finds an array of `55391` entries. \r\n\r\nNext, it to get the actual body-part predictions, SimBA looks in the ['predicted_points’] key in the SLP file and finds 329244 rows. If we divide 329244 by the number of body-parts (which is 6), we get predictions for `54874` frames.\r\n\r\nPS: When looking at the number of frames in the actual video, there seems to be a count of `107914`.\r\n\r\nThe `(cannot set a row with mismatched columns)` you are seeing happens when SimBA tries to analyze frame `54875` of the anticipated `55391` frames. This returns zero body-part predictions. SimBA tries to insert the zero body-part predictions into 6*3 (body-part count * (X, Y, P)) column dataframe, just like the the other prior predicted frames. But - with zero values - that’s not going to work and it fails. \r\n\r\nI have not seen this error before, there is a few things we can do. We can insert a fix where we treat the ['predicted_points’] key as the actual true frame count, or alternative the true frame count is whatever is the smallest (['predicted_points’] or ['frames’]). However the two counts normally match up, I don’t know why it wouldn’t match up here, and even if we use one of these solutions, we’d still only have predictions for the first half-ish of the video. Let me know if that makes sense!", + "created_at": "2022-06-01T16:38:40Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson!\r\n\r\nFantastic sleuthing, thank you :)\r\n\r\nThis is a test video where I manually labeled some frames and predicted the rest. Due to a bug I'm working out with the SLEAP team I wasn't able to run predictions on 100% of the frames so in lieu of that I predicted 1000 random frames at a time until I got to around 50% of frames labeled which is what we have here. The videos are 30fps so this should give me around 15fps labeled frames and I planned to interpolate (using tools in SimBA) intermediate body part locations for this first test phase. \r\n\r\nThe numbers you give match perfectly 54874-55391 = 517 which is precisely the number of manually labeled frames. I think for some reason SLEAP didn't enter frames for previously labeled frames which appear to have been logged under predictions if I understand you correctly. Under normal circumstances I think I can get around this by simply making a separate file and trying to predict all frames without doing any manual labeling but at present that's not working for me.\r\n\r\nThe problem we face is what you say - we don't know which of the 'predictions' correspond to the manual labels which are missing so we wouldn't know which 'predictions' to discard to match frames. \r\n\r\nSomething in the way you phrased what you wrote also suggests that my intended approach might not work anyway in that there may be no index of frame order indicating un-predicted frames... would SimBA treat my ~50k predicted frames as contiguous making interpolation infeasible?\r\n\r\nEither way, I think this may be a problem I have to fix on the SLEAP end of things.\r\n\r\nLet me know about the feasibility of this approach for my test scenario and if it won't work I'll mark this issue solved and work from the SLEAP end of things. If it could work I will dig a little deeper to find if there's a way to ID which frames to drop and update with thread with the results before marking solved.\r\n\r\nThanks again!!!\r\n\r\n", + "created_at": "2022-06-01T17:14:54Z", + "author": "rfkova" + }, + { + "body": "@rfkova - your approach is not unfeasible, should be doable, there is just one blocker on my end:\r\n\r\nThis is the ['frames'] array in the SLP file:\r\n\r\n\"image\"\r\n\r\nThe SimBA code kind of uses the first column (`frame_id`) as the frame index, when it probably should be using the third column (`frame_idx`) as the frame index. I fix that. Any missing frames I can insert all body-part positions as `0` making them interpolatable in SimBA. It will take me a little bit of time as I have to confirm it works with other SLP files as well and its not introducing any issues for other users. I will keep you updated! \r\n\r\n... I am not 100% sure that this is true, but worth a try.", + "created_at": "2022-06-01T18:30:11Z", + "author": "sronilsson" + }, + { + "body": "Hi @rfkova - sorry forgot to provide an update on this. If you upgrade simba `pip install simba-uw-tf-dev --upgrade` you should be able to import your slp data, but please let me know if you get into any issues! That said, I am not sure the tracking performance will be good enough", + "created_at": "2022-06-26T11:59:04Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson!\r\n\r\nAmazing, just tried it out and the import works perfectly!!! Astoundingly I'm still troubleshooting the rest of my workflow so this is super nice to be able to get a feel for SimBA in the meantime.\r\n\r\nThank you so much for your help!!!\r\n", + "created_at": "2022-06-29T21:55:06Z", + "author": "rfkova" + } + ] + }, + { + "title": "Bump tensorflow-gpu from 1.14.0 to 2.7.2 in /simba", + "body": "Bumps [tensorflow-gpu](https://github.com/tensorflow/tensorflow) from 1.14.0 to 2.7.2.\n
\nRelease notes\n

Sourced from tensorflow-gpu's releases.

\n
\n

TensorFlow 2.7.2

\n

Release 2.7.2

\n

This releases introduces several vulnerability fixes:

\n
    \n
  • Fixes a code injection in saved_model_cli (CVE-2022-29216)
  • \n
  • Fixes a missing validation which causes TensorSummaryV2 to crash (CVE-2022-29193)
  • \n
  • Fixes a missing validation which crashes QuantizeAndDequantizeV4Grad (CVE-2022-29192)
  • \n
  • Fixes a missing validation which causes denial of service via DeleteSessionTensor (CVE-2022-29194)
  • \n
  • Fixes a missing validation which causes denial of service via GetSessionTensor (CVE-2022-29191)
  • \n
  • Fixes a missing validation which causes denial of service via StagePeek (CVE-2022-29195)
  • \n
  • Fixes a missing validation which causes denial of service via UnsortedSegmentJoin (CVE-2022-29197)
  • \n
  • Fixes a missing validation which causes denial of service via LoadAndRemapMatrix (CVE-2022-29199)
  • \n
  • Fixes a missing validation which causes denial of service via SparseTensorToCSRSparseMatrix (CVE-2022-29198)
  • \n
  • Fixes a missing validation which causes denial of service via LSTMBlockCell (CVE-2022-29200)
  • \n
  • Fixes a missing validation which causes denial of service via Conv3DBackpropFilterV2 (CVE-2022-29196)
  • \n
  • Fixes a CHECK failure in depthwise ops via overflows (CVE-2021-41197)
  • \n
  • Fixes issues arising from undefined behavior stemming from users supplying invalid resource handles (CVE-2022-29207)
  • \n
  • Fixes a segfault due to missing support for quantized types (CVE-2022-29205)
  • \n
  • Fixes a missing validation which results in undefined behavior in SparseTensorDenseAdd (CVE-2022-29206)
  • \n
  • Fixes a missing validation which results in undefined behavior in QuantizedConv2D (CVE-2022-29201)
  • \n
  • Fixes an integer overflow in SpaceToBatchND (CVE-2022-29203)
  • \n
  • Fixes a segfault and OOB write due to incomplete validation in EditDistance (CVE-2022-29208)
  • \n
  • Fixes a missing validation which causes denial of service via Conv3DBackpropFilterV2 (CVE-2022-29204)
  • \n
  • Fixes a denial of service in tf.ragged.constant due to lack of validation (CVE-2022-29202)
  • \n
  • Fixes a segfault when tf.histogram_fixed_width is called with NaN values (CVE-2022-29211)
  • \n
  • Fixes a core dump when loading TFLite models with quantization (CVE-2022-29212)
  • \n
  • Fixes crashes stemming from incomplete validation in signal ops (CVE-2022-29213)
  • \n
  • Fixes a type confusion leading to CHECK-failure based denial of service (CVE-2022-29209)
  • \n
  • Updates curl to 7.83.1 to handle (CVE-2022-22576, (CVE-2022-27774, (CVE-2022-27775, (CVE-2022-27776, (CVE-2022-27778, (CVE-2022-27779, (CVE-2022-27780, (CVE-2022-27781, (CVE-2022-27782 and (CVE-2022-30115
  • \n
  • Updates zlib to 1.2.12 after 1.2.11 was pulled due to security issue
  • \n
\n

TensorFlow 2.7.1

\n

Release 2.7.1

\n

This releases introduces several vulnerability fixes:

\n
    \n
  • Fixes a floating point division by 0 when executing convolution operators (CVE-2022-21725)
  • \n
  • Fixes a heap OOB read in shape inference for ReverseSequence (CVE-2022-21728)
  • \n
  • Fixes a heap OOB access in Dequantize (CVE-2022-21726)
  • \n
  • Fixes an integer overflow in shape inference for Dequantize (CVE-2022-21727)
  • \n
  • Fixes a heap OOB access in FractionalAvgPoolGrad (CVE-2022-21730)
  • \n
  • Fixes an overflow and divide by zero in UnravelIndex (CVE-2022-21729)
  • \n
  • Fixes a type confusion in shape inference for ConcatV2 (CVE-2022-21731)
  • \n
  • Fixes an OOM in ThreadPoolHandle (CVE-2022-21732)
  • \n
  • Fixes an OOM due to integer overflow in StringNGrams (CVE-2022-21733)
  • \n
  • Fixes more issues caused by incomplete validation in boosted trees code (CVE-2021-41208)
  • \n
  • Fixes an integer overflows in most sparse component-wise ops (CVE-2022-23567)
  • \n
  • Fixes an integer overflows in AddManySparseToTensorsMap (CVE-2022-23568)
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from tensorflow-gpu's changelog.

\n
\n

Release 2.7.2

\n

This releases introduces several vulnerability fixes:

\n
    \n
  • Fixes a code injection in saved_model_cli (CVE-2022-29216)
  • \n
  • Fixes a missing validation which causes TensorSummaryV2 to crash (CVE-2022-29193)
  • \n
  • Fixes a missing validation which crashes QuantizeAndDequantizeV4Grad (CVE-2022-29192)
  • \n
  • Fixes a missing validation which causes denial of service via DeleteSessionTensor (CVE-2022-29194)
  • \n
  • Fixes a missing validation which causes denial of service via GetSessionTensor (CVE-2022-29191)
  • \n
  • Fixes a missing validation which causes denial of service via StagePeek (CVE-2022-29195)
  • \n
  • Fixes a missing validation which causes denial of service via UnsortedSegmentJoin (CVE-2022-29197)
  • \n
  • Fixes a missing validation which causes denial of service via LoadAndRemapMatrix (CVE-2022-29199)
  • \n
  • Fixes a missing validation which causes denial of service via SparseTensorToCSRSparseMatrix (CVE-2022-29198)
  • \n
  • Fixes a missing validation which causes denial of service via LSTMBlockCell (CVE-2022-29200)
  • \n
  • Fixes a missing validation which causes denial of service via Conv3DBackpropFilterV2 (CVE-2022-29196)
  • \n
  • Fixes a CHECK failure in depthwise ops via overflows (CVE-2021-41197)
  • \n
  • Fixes issues arising from undefined behavior stemming from users supplying invalid resource handles (CVE-2022-29207)
  • \n
  • Fixes a segfault due to missing support for quantized types (CVE-2022-29205)
  • \n
  • Fixes a missing validation which results in undefined behavior in SparseTensorDenseAdd (CVE-2022-29206)
  • \n
  • Fixes a missing validation which results in undefined behavior in QuantizedConv2D (CVE-2022-29201)
  • \n
  • Fixes an integer overflow in SpaceToBatchND (CVE-2022-29203)
  • \n
  • Fixes a segfault and OOB write due to incomplete validation in EditDistance (CVE-2022-29208)
  • \n
  • Fixes a missing validation which causes denial of service via Conv3DBackpropFilterV2 (CVE-2022-29204)
  • \n
  • Fixes a denial of service in tf.ragged.constant due to lack of validation (CVE-2022-29202)
  • \n
  • Fixes a segfault when tf.histogram_fixed_width is called with NaN values (CVE-2022-29211)
  • \n
  • Fixes a core dump when loading TFLite models with quantization (CVE-2022-29212)
  • \n
  • Fixes crashes stemming from incomplete validation in signal ops (CVE-2022-29213)
  • \n
  • Fixes a type confusion leading to CHECK-failure based denial of service (CVE-2022-29209)
  • \n
  • Updates curl to 7.83.1 to handle (CVE-2022-22576, (CVE-2022-27774, (CVE-2022-27775, (CVE-2022-27776, (CVE-2022-27778, (CVE-2022-27779, (CVE-2022-27780, (CVE-2022-27781, (CVE-2022-27782 and (CVE-2022-30115
  • \n
  • Updates zlib to 1.2.12 after 1.2.11 was pulled due to security issue
  • \n
\n

Release 2.6.4

\n

This releases introduces several vulnerability fixes:

\n
    \n
  • Fixes a code injection in saved_model_cli (CVE-2022-29216)
  • \n
  • Fixes a missing validation which causes TensorSummaryV2 to crash (CVE-2022-29193)
  • \n
  • Fixes a missing validation which crashes QuantizeAndDequantizeV4Grad (CVE-2022-29192)
  • \n
  • Fixes a missing validation which causes denial of service via DeleteSessionTensor (CVE-2022-29194)
  • \n
  • Fixes a missing validation which causes denial of service via GetSessionTensor (CVE-2022-29191)
  • \n
  • Fixes a missing validation which causes denial of service via StagePeek (CVE-2022-29195)
  • \n
  • Fixes a missing validation which causes denial of service via UnsortedSegmentJoin (CVE-2022-29197)
  • \n
  • Fixes a missing validation which causes denial of service via LoadAndRemapMatrix (CVE-2022-29199)
  • \n
  • Fixes a missing validation which causes denial of service via SparseTensorToCSRSparseMatrix (CVE-2022-29198)
  • \n
  • Fixes a missing validation which causes denial of service via LSTMBlockCell (CVE-2022-29200)
  • \n
  • Fixes a missing validation which causes denial of service via Conv3DBackpropFilterV2 (CVE-2022-29196)
  • \n
  • Fixes a CHECK failure in depthwise ops via overflows (CVE-2021-41197)
  • \n
  • Fixes issues arising from undefined behavior stemming from users supplying invalid resource handles (CVE-2022-29207)
  • \n
  • Fixes a segfault due to missing support for quantized types (CVE-2022-29205)
  • \n
  • Fixes a missing validation which results in undefined behavior in SparseTensorDenseAdd (CVE-2022-29206)
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • dd7b8a3 Merge pull request #56034 from tensorflow-jenkins/relnotes-2.7.2-15779
  • \n
  • 1e7d6ea Update RELEASE.md
  • \n
  • 5085135 Merge pull request #56069 from tensorflow/mm-cp-52488e5072f6fe44411d70c6af09e...
  • \n
  • adafb45 Merge pull request #56060 from yongtang:curl-7.83.1
  • \n
  • 01cb1b8 Merge pull request #56038 from tensorflow-jenkins/version-numbers-2.7.2-4733
  • \n
  • 8c90c2f Update version numbers to 2.7.2
  • \n
  • 43f3cdc Update RELEASE.md
  • \n
  • 98b0a48 Insert release notes place-fill
  • \n
  • dfa5cf3 Merge pull request #56028 from tensorflow/disable-tests-on-r2.7
  • \n
  • 501a65c Disable timing out tests
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tensorflow-gpu&package-manager=pip&previous-version=1.14.0&new-version=2.7.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-05-26T20:13:01Z", + "updated_at": "2022-11-21T21:06:10Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-07-21T03:35:18Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-07-26T01:34:41Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-08-26T12:47:43Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-09-12T16:25:00Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-09-26T18:41:30Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-10-03T13:48:18Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-10-13T16:56:43Z", + "author": "dependabot[bot]" + }, + { + "body": "Dependabot tried to update this pull request, but something went wrong. We're looking into it, but in the meantime you can retry the update by commenting `@dependabot rebase`.", + "created_at": "2022-10-25T13:41:18Z", + "author": "dependabot[bot]" + }, + { + "body": "Superseded by #217.", + "created_at": "2022-11-21T21:06:08Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump tensorflow-gpu from 1.14.0 to 2.6.4 in /simba", + "body": "Bumps [tensorflow-gpu](https://github.com/tensorflow/tensorflow) from 1.14.0 to 2.6.4.\n
\nRelease notes\n

Sourced from tensorflow-gpu's releases.

\n
\n

TensorFlow 2.6.4

\n

Release 2.6.4

\n

This releases introduces several vulnerability fixes:

\n
    \n
  • Fixes a code injection in saved_model_cli (CVE-2022-29216)
  • \n
  • Fixes a missing validation which causes TensorSummaryV2 to crash (CVE-2022-29193)
  • \n
  • Fixes a missing validation which crashes QuantizeAndDequantizeV4Grad (CVE-2022-29192)
  • \n
  • Fixes a missing validation which causes denial of service via DeleteSessionTensor (CVE-2022-29194)
  • \n
  • Fixes a missing validation which causes denial of service via GetSessionTensor (CVE-2022-29191)
  • \n
  • Fixes a missing validation which causes denial of service via StagePeek (CVE-2022-29195)
  • \n
  • Fixes a missing validation which causes denial of service via UnsortedSegmentJoin (CVE-2022-29197)
  • \n
  • Fixes a missing validation which causes denial of service via LoadAndRemapMatrix (CVE-2022-29199)
  • \n
  • Fixes a missing validation which causes denial of service via SparseTensorToCSRSparseMatrix (CVE-2022-29198)
  • \n
  • Fixes a missing validation which causes denial of service via LSTMBlockCell (CVE-2022-29200)
  • \n
  • Fixes a missing validation which causes denial of service via Conv3DBackpropFilterV2 (CVE-2022-29196)
  • \n
  • Fixes a CHECK failure in depthwise ops via overflows (CVE-2021-41197)
  • \n
  • Fixes issues arising from undefined behavior stemming from users supplying invalid resource handles (CVE-2022-29207)
  • \n
  • Fixes a segfault due to missing support for quantized types (CVE-2022-29205)
  • \n
  • Fixes a missing validation which results in undefined behavior in SparseTensorDenseAdd (CVE-2022-29206)
  • \n
  • Fixes a missing validation which results in undefined behavior in QuantizedConv2D (CVE-2022-29201)
  • \n
  • Fixes an integer overflow in SpaceToBatchND (CVE-2022-29203)
  • \n
  • Fixes a segfault and OOB write due to incomplete validation in EditDistance (CVE-2022-29208)
  • \n
  • Fixes a missing validation which causes denial of service via Conv3DBackpropFilterV2 (CVE-2022-29204)
  • \n
  • Fixes a denial of service in tf.ragged.constant due to lack of validation (CVE-2022-29202)
  • \n
  • Fixes a segfault when tf.histogram_fixed_width is called with NaN values (CVE-2022-29211)
  • \n
  • Fixes a core dump when loading TFLite models with quantization (CVE-2022-29212)
  • \n
  • Fixes crashes stemming from incomplete validation in signal ops (CVE-2022-29213)
  • \n
  • Fixes a type confusion leading to CHECK-failure based denial of service (CVE-2022-29209)
  • \n
  • Updates curl to 7.83.1 to handle (CVE-2022-22576, (CVE-2022-27774, (CVE-2022-27775, (CVE-2022-27776, (CVE-2022-27778, (CVE-2022-27779, (CVE-2022-27780, (CVE-2022-27781, (CVE-2022-27782 and (CVE-2022-30115
  • \n
  • Updates zlib to 1.2.12 after 1.2.11 was pulled due to security issue
  • \n
\n

TensorFlow 2.6.3

\n

Release 2.6.3

\n

This releases introduces several vulnerability fixes:

\n
    \n
  • Fixes a floating point division by 0 when executing convolution operators (CVE-2022-21725)
  • \n
  • Fixes a heap OOB read in shape inference for ReverseSequence (CVE-2022-21728)
  • \n
  • Fixes a heap OOB access in Dequantize (CVE-2022-21726)
  • \n
  • Fixes an integer overflow in shape inference for Dequantize (CVE-2022-21727)
  • \n
  • Fixes a heap OOB access in FractionalAvgPoolGrad (CVE-2022-21730)
  • \n
  • Fixes an overflow and divide by zero in UnravelIndex (CVE-2022-21729)
  • \n
  • Fixes a type confusion in shape inference for ConcatV2 (CVE-2022-21731)
  • \n
  • Fixes an OOM in ThreadPoolHandle (CVE-2022-21732)
  • \n
  • Fixes an OOM due to integer overflow in StringNGrams (CVE-2022-21733)
  • \n
  • Fixes more issues caused by incomplete validation in boosted trees code (CVE-2021-41208)
  • \n
  • Fixes an integer overflows in most sparse component-wise ops (CVE-2022-23567)
  • \n
  • Fixes an integer overflows in AddManySparseToTensorsMap (CVE-2022-23568)
  • \n
  • Fixes a number of CHECK-failures in MapStage (CVE-2022-21734)
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from tensorflow-gpu's changelog.

\n
\n

Release 2.6.4

\n

This releases introduces several vulnerability fixes:

\n
    \n
  • Fixes a code injection in saved_model_cli (CVE-2022-29216)
  • \n
  • Fixes a missing validation which causes TensorSummaryV2 to crash (CVE-2022-29193)
  • \n
  • Fixes a missing validation which crashes QuantizeAndDequantizeV4Grad (CVE-2022-29192)
  • \n
  • Fixes a missing validation which causes denial of service via DeleteSessionTensor (CVE-2022-29194)
  • \n
  • Fixes a missing validation which causes denial of service via GetSessionTensor (CVE-2022-29191)
  • \n
  • Fixes a missing validation which causes denial of service via StagePeek (CVE-2022-29195)
  • \n
  • Fixes a missing validation which causes denial of service via UnsortedSegmentJoin (CVE-2022-29197)
  • \n
  • Fixes a missing validation which causes denial of service via LoadAndRemapMatrix (CVE-2022-29199)
  • \n
  • Fixes a missing validation which causes denial of service via SparseTensorToCSRSparseMatrix (CVE-2022-29198)
  • \n
  • Fixes a missing validation which causes denial of service via LSTMBlockCell (CVE-2022-29200)
  • \n
  • Fixes a missing validation which causes denial of service via Conv3DBackpropFilterV2 (CVE-2022-29196)
  • \n
  • Fixes a CHECK failure in depthwise ops via overflows (CVE-2021-41197)
  • \n
  • Fixes issues arising from undefined behavior stemming from users supplying invalid resource handles (CVE-2022-29207)
  • \n
  • Fixes a segfault due to missing support for quantized types (CVE-2022-29205)
  • \n
  • Fixes a missing validation which results in undefined behavior in SparseTensorDenseAdd (CVE-2022-29206)
  • \n
  • Fixes a missing validation which results in undefined behavior in QuantizedConv2D (CVE-2022-29201)
  • \n
  • Fixes an integer overflow in SpaceToBatchND (CVE-2022-29203)
  • \n
  • Fixes a segfault and OOB write due to incomplete validation in EditDistance (CVE-2022-29208)
  • \n
  • Fixes a missing validation which causes denial of service via Conv3DBackpropFilterV2 (CVE-2022-29204)
  • \n
  • Fixes a denial of service in tf.ragged.constant due to lack of validation (CVE-2022-29202)
  • \n
  • Fixes a segfault when tf.histogram_fixed_width is called with NaN values (CVE-2022-29211)
  • \n
  • Fixes a core dump when loading TFLite models with quantization (CVE-2022-29212)
  • \n
  • Fixes crashes stemming from incomplete validation in signal ops (CVE-2022-29213)
  • \n
  • Fixes a type confusion leading to CHECK-failure based denial of service (CVE-2022-29209)
  • \n
  • Updates curl to 7.83.1 to handle (CVE-2022-22576, (CVE-2022-27774, (CVE-2022-27775, (CVE-2022-27776, (CVE-2022-27778, (CVE-2022-27779, (CVE-2022-27780, (CVE-2022-27781, (CVE-2022-27782 and (CVE-2022-30115
  • \n
  • Updates zlib to 1.2.12 after 1.2.11 was pulled due to security issue
  • \n
\n

Release 2.8.0

\n

Major Features and Improvements

\n
    \n
  • \n

    tf.lite:

    \n
      \n
    • Added TFLite builtin op support for the following TF ops:\n
        \n
      • tf.raw_ops.Bucketize op on CPU.
      • \n
      • tf.where op for data types\ntf.int32/tf.uint32/tf.int8/tf.uint8/tf.int64.
      • \n
      • tf.random.normal op for output data type tf.float32 on CPU.
      • \n
      • tf.random.uniform op for output data type tf.float32 on CPU.
      • \n
      • tf.random.categorical op for output data type tf.int64 on CPU.
      • \n
      \n
    • \n
    \n
  • \n
  • \n

    tensorflow.experimental.tensorrt:

    \n
      \n
    • conversion_params is now deprecated inside TrtGraphConverterV2 in\nfavor of direct arguments: max_workspace_size_bytes, precision_mode,\nminimum_segment_size, maximum_cached_engines, use_calibration and
    • \n
    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 33ed2b1 Merge pull request #56102 from tensorflow/mihaimaruseac-patch-1
  • \n
  • e1ec480 Fix build due to importlib-metadata/setuptools
  • \n
  • 63f211c Merge pull request #56033 from tensorflow-jenkins/relnotes-2.6.4-6677
  • \n
  • 22b8fe4 Update RELEASE.md
  • \n
  • ec30684 Merge pull request #56070 from tensorflow/mm-cp-adafb45c781-on-r2.6
  • \n
  • 38774ed Merge pull request #56060 from yongtang:curl-7.83.1
  • \n
  • 9ef1604 Merge pull request #56036 from tensorflow-jenkins/version-numbers-2.6.4-9925
  • \n
  • a6526a3 Update version numbers to 2.6.4
  • \n
  • cb1a481 Update RELEASE.md
  • \n
  • 4da550f Insert release notes place-fill
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tensorflow-gpu&package-manager=pip&previous-version=1.14.0&new-version=2.6.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-05-24T16:49:09Z", + "updated_at": "2022-05-26T20:13:06Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #183.", + "created_at": "2022-05-26T20:13:03Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Training Machine Model TypeError: 'numpy.float64' object cannot be interpreted as an integer", + "body": "**Describe the bug**\r\nHi, I am trying to use simBA to automate behavioral analysis in rats. I am ready to begin training after the annotation step, but running into a TypeError: object of type cannot be safely interpreted as an integer. I've tried downgrading numpy with no success.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. loaded in 6 videos and their h5 files. each video has one rat filmed from a side angle\r\n2. completed outlier correction and feature extraction\r\n3. moved 2 of the feature extraction files outside of proj folder for later model validation\r\n4. Under train machine model, saved two settings, one for each classifier\r\n5. Clicked \"train machine models, one for each saved settings\"\r\n\r\n\r\n**Screenshots**\r\n![image](https://user-images.githubusercontent.com/77797963/169869892-755ba354-da0c-4b39-859a-3783b3133828.png)\r\n![image](https://user-images.githubusercontent.com/77797963/169868406-3296047c-e932-4e04-aded-45542d577c46.png)\r\n\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Python Version 3.6.10\r\n - Are you using anaconda? yes\r\n - using simba-ew-tf-dev 0.90.7\r\n - numpy 1.18.1\r\n \r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n", + "user": "slin02", + "reaction_cnt": 0, + "created_at": "2022-05-23T16:54:02Z", + "updated_at": "2022-05-23T19:26:09Z", + "author": "slin02", + "comments": [ + { + "body": "update, no longer getting this error after unchecking \"generate sklearn learning curves\"", + "created_at": "2022-05-23T17:07:08Z", + "author": "slin02" + }, + { + "body": "Thanks for reporting! I will look into this.", + "created_at": "2022-05-23T19:26:09Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Bump numpy from 1.18.1 to 1.21.0 in /simba", + "body": "Bumps [numpy](https://github.com/numpy/numpy) from 1.18.1 to 1.21.0.\n
\nRelease notes\n

Sourced from numpy's releases.

\n
\n

v1.21.0

\n

NumPy 1.21.0 Release Notes

\n

The NumPy 1.21.0 release highlights are

\n
    \n
  • continued SIMD work covering more functions and platforms,
  • \n
  • initial work on the new dtype infrastructure and casting,
  • \n
  • universal2 wheels for Python 3.8 and Python 3.9 on Mac,
  • \n
  • improved documentation,
  • \n
  • improved annotations,
  • \n
  • new PCG64DXSM bitgenerator for random numbers.
  • \n
\n

In addition there are the usual large number of bug fixes and other\nimprovements.

\n

The Python versions supported for this release are 3.7-3.9. Official\nsupport for Python 3.10 will be added when it is released.

\n

:warning: Warning: there are unresolved problems compiling NumPy 1.21.0 with gcc-11.1 .

\n
    \n
  • Optimization level -O3 results in many wrong warnings when running the tests.
  • \n
  • On some hardware NumPy will hang in an infinite loop.
  • \n
\n

New functions

\n

Add PCG64DXSM BitGenerator

\n

Uses of the PCG64 BitGenerator in a massively-parallel context have\nbeen shown to have statistical weaknesses that were not apparent at the\nfirst release in numpy 1.17. Most users will never observe this weakness\nand are safe to continue to use PCG64. We have introduced a new\nPCG64DXSM BitGenerator that will eventually become the new default\nBitGenerator implementation used by default_rng in future releases.\nPCG64DXSM solves the statistical weakness while preserving the\nperformance and the features of PCG64.

\n

See upgrading-pcg64 for more details.

\n

(gh-18906)

\n

Expired deprecations

\n
    \n
  • The shape argument numpy.unravel_index cannot be\npassed as dims keyword argument anymore. (Was deprecated in NumPy\n1.16.)
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • b235f9e Merge pull request #19283 from charris/prepare-1.21.0-release
  • \n
  • 34aebc2 MAINT: Update 1.21.0-notes.rst
  • \n
  • 493b64b MAINT: Update 1.21.0-changelog.rst
  • \n
  • 07d7e72 MAINT: Remove accidentally created directory.
  • \n
  • 032fca5 Merge pull request #19280 from charris/backport-19277
  • \n
  • 7d25b81 BUG: Fix refcount leak in ResultType
  • \n
  • fa5754e BUG: Add missing DECREF in new path
  • \n
  • 61127bb Merge pull request #19268 from charris/backport-19264
  • \n
  • 143d45f Merge pull request #19269 from charris/backport-19228
  • \n
  • d80e473 BUG: Removed typing for == and != in dtypes
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=numpy&package-manager=pip&previous-version=1.18.1&new-version=1.21.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-05-17T14:46:18Z", + "updated_at": "2022-06-22T03:22:11Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #189.", + "created_at": "2022-06-22T03:22:09Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "ROI ", + "body": "HI,\r\nI can't draw a different ROI for every single video in my project.\r\nWhen a draw a new ROI in a video, the others ROI disappears from the Region of Interest Settings .\r\n\r\nI can only draw a \"unique\" ROI and the apply (without any changes) to all the videos (through the \"Apply to all\" button).\r\n\r\nI'm using simba 0.87.7 (I followed [this suggestion](https://github.com/sgoldenlab/simba/issues/143#issuecomment-948659468)) on a mac (OS 10.13.6).\r\n\r\nCarlo.\r\n\r\n\r\n", + "user": "carlitomu", + "reaction_cnt": 0, + "created_at": "2022-05-11T18:06:33Z", + "updated_at": "2022-05-24T13:23:39Z", + "author": "carlitomu", + "comments": [ + { + "body": "Hi @carlitomu thanks for reporting - can you update simba to the latest version `pip install simba-uw-tf-dev --upgrade` or `pip install simba-uw-tf-dev==0.91.8` and let me know if that fixes the issue? \r\n\r\n", + "created_at": "2022-05-11T19:03:30Z", + "author": "sronilsson" + }, + { + "body": "Thanks.\r\n\r\nI tried to update simba to the latest version (pip install simba-uw-tf-dev --upgrade) and to 0.91.8 release, but it doesn't work. \r\nI get this error message:\r\n![Schermata 2022-05-11 alle 23 12 09](https://user-images.githubusercontent.com/85634193/167948988-3521f6fe-096f-4a0e-ac2d-4c271ba61e77.png)\r\n![Schermata 2022-05-11 alle 23 14 53](https://user-images.githubusercontent.com/85634193/167949065-4340a595-d8d5-4478-b9cc-2f531490cde2.png)\r\n\r\n", + "created_at": "2022-05-11T21:16:28Z", + "author": "carlitomu" + }, + { + "body": "I see - not immediately familiar with this error but can you try `pip install simba-uw-tf-dev --upgrade --no-deps` and let me know if that works?", + "created_at": "2022-05-11T21:25:04Z", + "author": "sronilsson" + }, + { + "body": " no, it doesn't work and I get the same error message.", + "created_at": "2022-05-11T21:54:58Z", + "author": "carlitomu" + }, + { + "body": "@carlitomu - I gave it a go on macos and see if I could replicate and did not see the issue, so may not be a general bug...\r\n\r\n(i) Can you try the solution with these three commands?\r\nhttps://github.com/matplotlib/matplotlib/issues/16700/#issuecomment-604373104\r\n\r\n(ii) If that does not work, could you try to create a new conda python3.6 environment, run `pip install simba-uw-tf-dev`, and see if that fixes it? \r\n\r\n", + "created_at": "2022-05-12T11:09:37Z", + "author": "sronilsson" + }, + { + "body": "(I) I tried this solution, uninstalling and installing matplotlib and I get this error message\r\n![Schermata 2022-05-12 alle 18 05 53](https://user-images.githubusercontent.com/85634193/168119763-01effe21-4bd1-4cef-b4d4-42bbe7584d40.png)\r\n\r\nSo I uninstalled matplotib again and numpy, Pillow and shapely. Then I installed the required versions of these packages, getting a new error message:\r\n![Schermata 2022-05-12 alle 18 08 02](https://user-images.githubusercontent.com/85634193/168120156-b11ed3ca-7743-4c18-87c8-dfc8945c9288.png)\r\n\r\n(II) I tried all your solution in a new conda environments!\r\n", + "created_at": "2022-05-12T16:10:14Z", + "author": "carlitomu" + }, + { + "body": "Thanks - the warnings should not be an issue ans appears to be related to version of `matplotlib`. Can you try this answer and see if it boots up? \r\n\r\nhttps://stackoverflow.com/a/65939524\r\n\r\nSo first either `pip uninstall matplotlib` or `conda remove --force matplotlib` \r\nFollowed by either `pip install matplotlib==3.0.3` or `conda install matplotlib=3.0.3`\r\n\r\n", + "created_at": "2022-05-12T17:42:38Z", + "author": "sronilsson" + }, + { + "body": "mhm, as I wrote earlier, I'have just tried and installed that matplotlib version.\r\n\r\nSo again the message error is this one:\r\n\r\n![Schermata 2022-05-12 alle 20 20 31](https://user-images.githubusercontent.com/85634193/168142692-a3eb8bea-6bee-480c-851a-db2d49f77315.png)\r\n\r\n", + "created_at": "2022-05-12T18:21:16Z", + "author": "carlitomu" + }, + { + "body": "Sorry read it too quickly, did you also try to install matplotlib through conda rather than pip?\r\n\r\nhttps://github.com/ludwig-ai/ludwig/issues/114#issuecomment-464684304", + "created_at": "2022-05-12T18:33:04Z", + "author": "sronilsson" + }, + { + "body": "yes, with conda I get a new one:\r\n![Schermata 2022-05-12 alle 20 36 23](https://user-images.githubusercontent.com/85634193/168145559-bca832bd-4162-465d-89c4-f7344bc5b5c1.png)\r\n![Schermata 2022-05-12 alle 20 36 48](https://user-images.githubusercontent.com/85634193/168145583-1cdec37e-6c98-42ed-8b1e-7a4f9e6ee46b.png)\r\n\r\n", + "created_at": "2022-05-12T18:38:57Z", + "author": "carlitomu" + }, + { + "body": "I see thanks for troubleshooting, might be some compatibility issue with older MacOS version, as I'm running 12.x, I will get back to you tomorrow ", + "created_at": "2022-05-12T18:53:47Z", + "author": "sronilsson" + }, + { + "body": "Ok, thanks!", + "created_at": "2022-05-12T19:03:15Z", + "author": "carlitomu" + }, + { + "body": "Right I did a bit of reading - and seems to be a known issue on some more dated macOS versions with the specific matplotlob and tkinter versions required by simba… what confuses me is that the older versions of simba seems to run on your computer, while the reason one does not, and I have made no changes to any dependencies...\r\n\r\nhttps://stackoverflow.com/questions/30031063/nsexception-with-tkinter-on-mac\r\n\r\nYou could try and:\r\n\r\n(i) update your MacOS - this is probably the more ardous suggestion so understand if you don’t want to go down that route… \r\n\r\n(ii) See if you can get it running using python 3.7 or 3.8 in conda instead of python 3.6. This ensures that you have a different version of tkinter so might avoid the clash. I confimed it runs on my MacOS in python 3.7 (but I have MacOS 12.3.1). \r\n", + "created_at": "2022-05-13T12:52:28Z", + "author": "sronilsson" + }, + { + "body": "Hi, finally I have decided to run simba on another workstation with Windows.\r\nIn any case, thank you for your help!", + "created_at": "2022-05-20T12:42:39Z", + "author": "carlitomu" + }, + { + "body": "Thanks for letting me know @carlitomu, very helpful - I'll make a note about this in the FAQ", + "created_at": "2022-05-20T12:50:52Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Seeking guidance for importing Ethovision Behavior annotations", + "body": "Hello,\r\n\r\nI am requesting guidance for how to import Ethovision csv files. I see the section for third-party behavior labels however, I am not familiar with the programs nor how their excel files are formatted. Is this the section where I should upload the ethovision files? If so, how should the excel sheet be formatted?", + "user": "vsedwick", + "reaction_cnt": 0, + "created_at": "2022-04-25T07:14:30Z", + "updated_at": "2022-10-12T22:33:45Z", + "author": "vsedwick", + "comments": [ + { + "body": "Hi @vsedwick! The third-party behavior labels tools listed, are all free and open-source. We would very much like to include ability to use annotations for Noldus software. However, I haven't been able to write the code for this, as I don't have access to Noldus Observer data or the software, the software is not cheap.\r\n\r\nIs it correct it that the tool for human annotations in the Noldus suite is called Observer, and this is the tool your annotations where created in? i can include the option later this week, if you are happy to share some Observer annotations in excel or CSV format (or whatever format they come in)? \r\n\r\n\r\n", + "created_at": "2022-04-25T12:48:53Z", + "author": "sronilsson" + }, + { + "body": "Yes I would love to share! At present, I only have excel sheets from Ethovision, which is also through Noldus, but I can ask my labmates for their Observer files if you'd like access to those as well. I've attached two Ethovision workbooks for pup exposure, each has two sheets that displays the data in different formats. One dictates the start/stop times and the other is a 0/1 binary spread for when a behavior is active. I can get back to you about Observer sheets once my labmate sends them to me.\r\n\r\n[Rawdata-photometry-Trial-23](https://docs.google.com/spreadsheets/d/1cYdAmgTIb-U1Cj8PqXKVb0EwgJE8eXf5/edit?usp=sharing&ouid=100178822141650480901&rtpof=true&sd=true) \r\n\r\n\r\n[Rawdata-photometry-Trial-17](https://docs.google.com/spreadsheets/d/14mlF7Mxwo3DxZmc2Io4INHkmNXbJARI1/edit?usp=sharing&ouid=100178822141650480901&rtpof=true&sd=true)\r\n\r\nThank you so much!\r\n", + "created_at": "2022-04-25T16:32:55Z", + "author": "vsedwick" + }, + { + "body": "Thank you! All makes sense! Very similar to some other open-source formats so we can repurpose some code. Just three questions:\r\n\r\n(i) The output format is in Excel `.xlxs` format, have you seen the data being saved in any other format, like CSV? Is `.xlxs` the standard as far as you know?\r\n\r\n(ii) what text in the files which refers to the original video file? For example, is the video called `Trial 23.mp4` or `Trial 23.avi` or `Trial 23.WhatEverVideoFileFormat`? \r\n\r\n(iii) The important headers are `Trial time`, 'Recording Time`, `Subject, `Behavior`, `Event`. Have you ever seen them written any differently? Like `Time` instead of `Trial time` etc? \r\n\r\n\r\n\r\n", + "created_at": "2022-04-25T18:31:42Z", + "author": "sronilsson" + }, + { + "body": "Awesome! Glad its not too different.\n\n(i) .xlxs seems to be the default for exports, you could also export as a\ntext file. We just convert to CSVs afterwards\n(ii) Apologies! I deleted that portion. It should be in cell B19 in the\nform of a file path e.g. D:\\Experiment\\8_2022-01-25T19_11_14.AVI. And cell\nB2 is the project folder name e.g. \"Photometry Pup Exposure\". I amended\nthem to the file links.\n(iii) I've never seen it as anything different.\n\nOn Mon, Apr 25, 2022 at 2:31 PM Simon Nilsson ***@***.***>\nwrote:\n\n> Thank you! All makes sense! Very similar to some other open-source formats\n> so we can repurpose some code. Just three questions:\n>\n> (i) The output format is in Excel .xlxs format, have you seen the data\n> being saved in any other format, like CSV? Is .xlxs the standard as far\n> as you know?\n>\n> (ii) what text in the files which refers to the original video file? For\n> example, is the video called Trial 23.mp4 or Trial 23.avi or Trial\n> 23.WhatEverVideoFileFormat?\n>\n> (iii) The important headers are Trial time, 'Recording Time, Subject,\n> Behavior, Event. Have you ever seen them written any differently? Like\n> Time instead of Trial time etc?\n>\n> —\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> You are receiving this because you were mentioned.Message ID:\n> ***@***.***>\n>\n\n\n-- \nVictoria M. Sedwick\n\nPhD Candidate\n*Laboratory of Dr. Anita E. Autry, PhD*\n*Albert Einstein College of Medicine*\n***@***.***\nLab: 718-430-8617\n\nM.S. Albert Einstein College of Medicine '19\nB.S. Winston-Salem State University '17\n", + "created_at": "2022-04-25T18:56:56Z", + "author": "vsedwick" + }, + { + "body": "Hi @vsedwick - if you update SimBA to version `0.91.6` - you should see this button at the bottom: \r\n\r\n![image](https://user-images.githubusercontent.com/34761092/165324646-6a60beaa-b665-4946-bb19-6842520143e6.png)\r\n\r\nYou can try it, but I have not written any documentation yet. The script that does the lifting is called `simba.ethovision_import.py` if you want to take a look. I for-see some early issues we may have to deal with. Let me know if any of these assumptions can cause errors:\r\n\r\n1. We assume that the input Excel files all have at least 2 sheets, and the important information is in the second sheet.\r\n2. We assume that the first row in this sheet always has a row beginning with `Number of header lines:` which tells us how many rows in the beginning of the sheet contain only meta information. \r\n3. We assume that one row of the header lines in the second sheet will begin with `Video file`, and tells us the name of the video that we are analyzing (we need this to pull fps etc). \r\n4. SimBA will only grab annotations for the classifiers defined in the SimBA project. So, if we have annotations for a behavior called `groom` in the Ethovision annotations, but `groom` was not defined as a classifier when the SimBA project was created, then SimBA will ignore those annotations. \r\n\r\nLast question: can it ever be that a behavior has a `state start` timestamp, but it does not have a `state end` timestamp, for example if the session ends and you did not click to indicate that the behavior ended in time?\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2022-04-26T14:46:52Z", + "author": "sronilsson" + }, + { + "body": "All the assumptions should be correct except for number 1. So, there's\ntechnically 3 sheets, but the user can choose which one to export based on\nwhat they need. The workbook might be a single sheet, two, or it may have\nall three, but the manual scoring log is always the last sheet. The export\nis quick and simple though, so if there's a preface that simba just needs\nthat single sheet, then noldus users can just omit the others. The third\nsheet that I didn't export is tracking data.\n[image: image.png]\nAlso for the last question, there should always be a state end even if you\ndon't manually record it. Once you exit out of the \"scoring mode\" it\nautomatically ends the behavior. I'm going to try it out later today, so I\nwill let you know if I see it.\n\nThank you so much for this, I truly appreciate it!!\n\n\n\nOn Tue, Apr 26, 2022 at 10:47 AM Simon Nilsson ***@***.***>\nwrote:\n\n> Hi @vsedwick - if you update SimBA to\n> version 0.91.6 - you should see this button at the bottom:\n>\n> [image: image]\n> \n>\n> You can try it, but I have not written any documentation yet. The script\n> that does the lifting is called simba.ethovision_import.py if you want to\n> take a look. I for-see some early issues we may have to deal with. Let me\n> know if any of these assumptions can cause errors:\n>\n> 1. We assume that the input Excel files all have at least 2 sheets,\n> and the important information is in the second sheet.\n> 2. We assume that the first row in this sheet always has a row\n> beginning with Number of header lines: which tells us how many rows in\n> the beginning of the sheet contain only meta information.\n> 3. We assume that one row of the header lines in the second sheet will\n> begin with Video file, and tells us the name of the video that we are\n> analyzing (we need this to pull resolution, pixels per millimeter, and fps\n> etc).\n> 4. SimBA will only grab annotations for the classifiers defined in the\n> SimBA project. So, if we have annotations for a behavior called groom\n> in the Ethovision annotations, but groom was not defined as a\n> classifier when the SimBA project was created, then SimBA will ignore those\n> annotations.\n>\n> Last question: can it ever be that a behavior has a state start\n> timestamp, but it does not have a state end timestamp, for example if the\n> session ends and you did not click to indicate that the behavior ended in\n> time?\n>\n> —\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> You are receiving this because you were mentioned.Message ID:\n> ***@***.***>\n>\n\n\n-- \nVictoria M. Sedwick\n\nPhD Candidate\n*Laboratory of Dr. Anita E. Autry, PhD*\n*Albert Einstein College of Medicine*\n***@***.***\nLab: 718-430-8617\n\nM.S. Albert Einstein College of Medicine '19\nB.S. Winston-Salem State University '17\n", + "created_at": "2022-04-27T19:09:48Z", + "author": "vsedwick" + }, + { + "body": "```\r\nbut the manual scoring log is always the last sheet\r\n```\r\nThank you! I will use the rule above when reading the data. Whatever the last sheet is, we will treat it as the annotations. Please let me know if you bumped in to any other errors or oddities and I will try to address that at the same time. \r\n\r\n\r\n\r\n", + "created_at": "2022-04-28T22:31:19Z", + "author": "sronilsson" + }, + { + "body": "Sounds perfect! I was able to update and see the Ethovision option. Things\ngot a little hectic, so I haven't had the time to figure out how to\nproperly use it, but I will update you when I do. Again, thank you so so\nvery much.\n\nOn Thu, Apr 28, 2022 at 6:31 PM Simon Nilsson ***@***.***>\nwrote:\n\n> but the manual scoring log is always the last sheet\n>\n> Thank you! I will use the rule above when reading the data. Whatever the\n> last sheet is, we will treat it as the annotations. Please let me know if\n> you bumped in to any other errors or oddities and I will try to address\n> that at the same time.\n>\n> —\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> You are receiving this because you were mentioned.Message ID:\n> ***@***.***>\n>\n\n\n-- \nVictoria M. Sedwick\n\nPhD Candidate\n*Laboratory of Dr. Anita E. Autry, PhD*\n*Albert Einstein College of Medicine*\n***@***.***\nLab: 718-430-8617\n\nM.S. Albert Einstein College of Medicine '19\nB.S. Winston-Salem State University '17\n", + "created_at": "2022-05-05T20:12:03Z", + "author": "vsedwick" + }, + { + "body": "So I tried out the Ethovision import and I keep getting an error for my\nclassifiers when I train my model. I've tried the following:\n1. All my files match the name of my videos exactly\n2. I've matched my classifiers names in simba to the ethovision file.\n3. I attempted to rename classifiers with spaces to ones without.\n4. I've matched my \"sniff\" settings, to the ones in the metafile at this\nlink https://osf.io/n5y8t/\n5. I updated pandas\n\nThe following is the error I keep receiving with every classifier:\n\nException in thread Thread-1:\nTraceback (most recent call last):\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\indexes\\base.py\",\nline 2898, in get_loc\n return self._engine.get_loc(casted_key)\n File \"pandas\\_libs\\index.pyx\", line 70, in\npandas._libs.index.IndexEngine.get_loc\n File \"pandas\\_libs\\index.pyx\", line 101, in\npandas._libs.index.IndexEngine.get_loc\n File \"pandas\\_libs\\hashtable_class_helper.pxi\", line 1675, in\npandas._libs.hashtable.PyObjectHashTable.get_item\n File \"pandas\\_libs\\hashtable_class_helper.pxi\", line 1683, in\npandas._libs.hashtable.PyObjectHashTable.get_item\nKeyError: 'sniffing2'\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\threading.py\", line\n916, in _bootstrap_inner\n self.run()\n File \"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\threading.py\", line\n864, in run\n self._target(*self._args, **self._kwargs)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\",\nline 5991, in trainmultimodel\n train_multimodel(self.projectconfigini)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\train_multiple_models_from_meta.py\",\nline 223, in train_multimodel\n totalTargetframes = features[classifierName].sum()\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\frame.py\",\nline 2906, in __getitem__\n indexer = self.columns.get_loc(key)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\core\\indexes\\base.py\",\nline 2900, in get_loc\n raise KeyError(key) from err\nKeyError: 'sniffing2'\n\n\nOn Thu, May 5, 2022 at 4:11 PM Victoria Sedwick <\n***@***.***> wrote:\n\n> Sounds perfect! I was able to update and see the Ethovision option. Things\n> got a little hectic, so I haven't had the time to figure out how to\n> properly use it, but I will update you when I do. Again, thank you so so\n> very much.\n>\n> On Thu, Apr 28, 2022 at 6:31 PM Simon Nilsson ***@***.***>\n> wrote:\n>\n>> but the manual scoring log is always the last sheet\n>>\n>> Thank you! I will use the rule above when reading the data. Whatever the\n>> last sheet is, we will treat it as the annotations. Please let me know if\n>> you bumped in to any other errors or oddities and I will try to address\n>> that at the same time.\n>>\n>> —\n>> Reply to this email directly, view it on GitHub\n>> ,\n>> or unsubscribe\n>> \n>> .\n>> You are receiving this because you were mentioned.Message ID:\n>> ***@***.***>\n>>\n>\n>\n> --\n> Victoria M. Sedwick\n>\n> PhD Candidate\n> *Laboratory of Dr. Anita E. Autry, PhD*\n> *Albert Einstein College of Medicine*\n> ***@***.***\n> Lab: 718-430-8617\n>\n> M.S. Albert Einstein College of Medicine '19\n> B.S. Winston-Salem State University '17\n>\n\n\n-- \nVictoria M. Sedwick\n\nPhD Candidate\n*Laboratory of Dr. Anita E. Autry, PhD*\n*Albert Einstein College of Medicine*\n***@***.***\nLab: 718-430-8617\n\nM.S. Albert Einstein College of Medicine '19\nB.S. Winston-Salem State University '17\n", + "created_at": "2022-05-07T16:58:58Z", + "author": "vsedwick" + }, + { + "body": "Hi @vsedwick ! Thanks for troubleshooting this. This error happens if SimBA can't find a column with the header `sniffing2` inside one or several CSV files within the `project_folder/csv/targets_inserted` directory. This column represents the annotations from ethovision for `sniffing2`, and will be filled with `0` and `1`. Is it possible to open the files inside the `project_folder/csv/targets_inserted` directory and check that `sniffing2` column is present in all files? It will be right towards the end. Could it be that one or several of the files you have processed with pose estimation and SimBA up till this point, have ended up in the `project_folder/csv/targets_inserted` directory, without having an ethovision annotation file? ", + "created_at": "2022-05-07T18:10:44Z", + "author": "sronilsson" + }, + { + "body": "So I kind of cheated a little bit. When I imported through the GUI, I'd\nreceive text confirmation that the files were imported (All Ethovision\nannotations added. Files with annotation are located in the\nproject_folder/csv/targets_inserted directory.) but the folder would remain\nempty. So, I manually placed them in the folder not realizing the content\nshould change. Now I'm realizing the actual issue is the import, not the\nbehavior training.\n\nAlso, I couldn't get it to read .xlsx files (using xlrd), only .xls. I dont\nreceive any error with .xls files, but the error I receive with .xlsx files\nis the following:\n\nException in Tkinter callback\nTraceback (most recent call last):\n File \"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\tkinter\\__init__.py\",\nline 1705, in __call__\n return self.func(*args)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\",\nline 5103, in import_ethovision\n ImportEthovision(config_path=self.projectconfigini,\nfolder_path=ann_folder)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\ethovision_import.py\",\nline 29, in __init__\n self.read_files()\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\ethovision_import.py\",\nline 34, in read_files\n ethovision_df = pd.read_excel(file_path, sheet_name=None)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\util\\_decorators.py\",\nline 296, in wrapper\n return func(*args, **kwargs)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\io\\excel\\_base.py\",\nline 304, in read_excel\n io = ExcelFile(io, engine=engine)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\io\\excel\\_base.py\",\nline 867, in __init__\n self._reader = self._engines[engine](self._io)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\io\\excel\\_xlrd.py\",\nline 22, in __init__\n super().__init__(filepath_or_buffer)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\io\\excel\\_base.py\",\nline 353, in __init__\n self.book = self.load_workbook(filepath_or_buffer)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\pandas\\io\\excel\\_xlrd.py\",\nline 37, in load_workbook\n return open_workbook(filepath_or_buffer)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\xlrd\\__init__.py\",\nline 170, in open_workbook\n raise XLRDError(FILE_FORMAT_DESCRIPTIONS[file_format]+'; not supported')\nxlrd.biffh.XLRDError: Excel xlsx file; not supported\n\n\n\nOn Sat, May 7, 2022 at 2:10 PM Simon Nilsson ***@***.***>\nwrote:\n\n> Hi @vsedwick ! Thanks for troubleshooting\n> this. This error happens if SimBA can't find a column with the header\n> sniffing2 inside one or several CSV files within the\n> project_folder/csv/targets_inserted directory. This column represents the\n> annotations from ethovision for sniffing2, and will be filled with 0 and 1.\n> Is it possible to open the files inside the\n> project_folder/csv/targets_inserted directory and check that sniffing2\n> column is present in all files? It will be right towards the end. Could it\n> be that one or several of the files you have processed with pose estimation\n> and SimBA up till this point, have ended up in the\n> project_folder/csv/targets_inserted directory, without having an\n> ethovision annotation file?\n>\n> —\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> You are receiving this because you were mentioned.Message ID:\n> ***@***.***>\n>\n\n\n-- \nVictoria M. Sedwick\n\nPhD Candidate\n*Laboratory of Dr. Anita E. Autry, PhD*\n*Albert Einstein College of Medicine*\n***@***.***\nLab: 718-430-8617\n\nM.S. Albert Einstein College of Medicine '19\nB.S. Winston-Salem State University '17\n", + "created_at": "2022-05-09T04:32:14Z", + "author": "vsedwick" + }, + { + "body": "Thanks @vsedwick - if you do `pip show pandas` and `pip show xlrd` in the terminal, which versions of the two packages do you see? ", + "created_at": "2022-05-09T11:11:00Z", + "author": "sronilsson" + }, + { + "body": "My pandas is 1.1.5\nXlrd is 2.0.1\n\nOn Mon, May 9, 2022, 7:11 AM Simon Nilsson ***@***.***> wrote:\n\n> Thanks @vsedwick - if you do pip show pandas\n> and pip show xlrd in the terminal, which versions of the two packages do\n> you see?\n>\n> —\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> You are receiving this because you were mentioned.Message ID:\n> ***@***.***>\n>\n", + "created_at": "2022-05-09T14:51:31Z", + "author": "vsedwick" + }, + { + "body": "In your SimBA environment, could you downgrade pandas to version 0.25.3 and xlrd to version 1.2.0 and let me know if that fixes it? \r\n\r\nit would be `pip install pandas==0.25.3` followed by `pip install xlrd==1.2.0`", + "created_at": "2022-05-09T15:34:55Z", + "author": "sronilsson" + }, + { + "body": "Great news!\n\nI tried again on a clean install on a different computer and everything\nworks perfectly now! It's importing properly and is able to train\nbehaviors. Sorry for the trouble, but thank you for the help. I haven't\nevaluated the training yet, but I will let you know if I run into any more\nissues.\n\nOn Mon, May 9, 2022 at 12:08 PM Victoria Sedwick <\n***@***.***> wrote:\n\n> I'm still getting the same issue. No errors with .xls, the error below is\n> with .xlsx, and neither gets imported into targets_inserted\n>\n> Exception in Tkinter callback\n> Traceback (most recent call last):\n> File \"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\tkinter\\__init__.py\",\n> line 1705, in __call__\n> return self.func(*args)\n> File\n> \"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\",\n> line 5103, in import_ethovision\n> ImportEthovision(config_path=self.projectconfigini,\n> folder_path=ann_folder)\n> File\n> \"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\ethovision_import.py\",\n> line 29, in __init__\n> self.read_files()\n> File\n> \"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\ethovision_import.py\",\n> line 35, in read_files\n> manual_scoring_sheet_name = list(ethovision_df.keys())[1]\n> IndexError: list index out of range\n>\n>\n>\n>\n>\n> On Mon, May 9, 2022 at 11:35 AM Simon Nilsson ***@***.***>\n> wrote:\n>\n>> In your SimBA environment, could you downgrade pandas to version 0.25.3\n>> and xlrd to version 1.2.0 and let me know if that fixes it?\n>>\n>> it would be pip install pandas==0.25.3 followed by pip install\n>> xlrd==1.2.0\n>>\n>> —\n>> Reply to this email directly, view it on GitHub\n>> ,\n>> or unsubscribe\n>> \n>> .\n>> You are receiving this because you were mentioned.Message ID:\n>> ***@***.***>\n>>\n>\n>\n> --\n> Victoria M. Sedwick\n>\n> PhD Candidate\n> *Laboratory of Dr. Anita E. Autry, PhD*\n> *Albert Einstein College of Medicine*\n> ***@***.***\n> Lab: 718-430-8617\n>\n> M.S. Albert Einstein College of Medicine '19\n> B.S. Winston-Salem State University '17\n>\n\n\n-- \nVictoria M. Sedwick\n\nPhD Candidate\n*Laboratory of Dr. Anita E. Autry, PhD*\n*Albert Einstein College of Medicine*\n***@***.***\nLab: 718-430-8617\n\nM.S. Albert Einstein College of Medicine '19\nB.S. Winston-Salem State University '17\n", + "created_at": "2022-05-09T18:24:15Z", + "author": "vsedwick" + }, + { + "body": "@vsedwick - thanks for letting me know! Please let me know if you come against any other errors", + "created_at": "2022-05-09T18:28:19Z", + "author": "sronilsson" + }, + { + "body": "I'm still getting the same issue. No errors with .xls, the error below is\nwith .xlsx, and neither gets imported into targets_inserted\n\nException in Tkinter callback\nTraceback (most recent call last):\n File \"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\tkinter\\__init__.py\",\nline 1705, in __call__\n return self.func(*args)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\",\nline 5103, in import_ethovision\n ImportEthovision(config_path=self.projectconfigini,\nfolder_path=ann_folder)\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\ethovision_import.py\",\nline 29, in __init__\n self.read_files()\n File\n\"C:\\ProgramData\\Anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\ethovision_import.py\",\nline 35, in read_files\n manual_scoring_sheet_name = list(ethovision_df.keys())[1]\nIndexError: list index out of range\n\n\n\n\n\nOn Mon, May 9, 2022 at 11:35 AM Simon Nilsson ***@***.***>\nwrote:\n\n> In your SimBA environment, could you downgrade pandas to version 0.25.3\n> and xlrd to version 1.2.0 and let me know if that fixes it?\n>\n> it would be pip install pandas==0.25.3 followed by pip install xlrd==1.2.0\n>\n> —\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> You are receiving this because you were mentioned.Message ID:\n> ***@***.***>\n>\n\n\n-- \nVictoria M. Sedwick\n\nPhD Candidate\n*Laboratory of Dr. Anita E. Autry, PhD*\n*Albert Einstein College of Medicine*\n***@***.***\nLab: 718-430-8617\n\nM.S. Albert Einstein College of Medicine '19\nB.S. Winston-Salem State University '17\n", + "created_at": "2022-10-11T08:45:34Z", + "author": "vsedwick" + }, + { + "body": "Hi @vsedwick - thanks for letting me know!. This error comes from SimBA expecting Ethovision Excel files with **two** sheets. The files you are importing, however, seems to contain fewer sheets. [THIS](https://github.com/sgoldenlab/simba/blob/master/tests/test_data/multi_animal_dlc_two_c57/ethovision_import/test_input.xlsx) is an example Ethovision file that SimBA expects. If you compare your file to this file, do you see any differences in sheet numbers etc? If you share a copy of a Ethovision file that fails to import, I can make the code work for both types. ", + "created_at": "2022-10-11T12:26:24Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Seeking help for some parameter setting", + "body": "Hi, there are some question, i wanna to seeking your help\r\n(1) Fistly, what is the recommend outlier correct criterion between movement and location, is it 1 for (movement) and 1.5 for (location) ok? \r\n(2) Secondly, what is the recommended hyperparameters used in the RF(ramdom forest modle). to be honest , it is unfamiliar for me and it seems difficut. thankyou for you attention!", + "user": "ZZysh", + "reaction_cnt": 0, + "created_at": "2022-04-21T08:01:19Z", + "updated_at": "2022-04-22T12:11:11Z", + "author": "ZZysh", + "comments": [ + { + "body": "Hi @ZZysh! You're right, hyperparameters can be tricky, but I can give some pointers. To avoid giving you some generic answers, can you tell me what types of animals, how many animals, and which body-parts you are tracking? ", + "created_at": "2022-04-21T15:39:39Z", + "author": "sronilsson" + }, + { + "body": "thank you so much for so soon reply!\nI have 2 experimental conditions(or session)\n(1) first, there are 2 c57 mice(black and one of them the back fur are shaved and can be manual discrimination) in a square box and recorded by a infrared camera\ni use deeplabcut to analyze the pose estimation and there are 8 body points used as suggested. however the tail rear is difficult to identify and I prefer to use the 7 points you suggested.\n(2)the other condition is only one c57 mice in the box\n\nI wanna to cluster the behavior, especially allogroming, rearing, moving, and static(I mean motionless)\n\nthanks again for rapid reply!\n\n\n\n\n---- 回复的原邮件 ----\n| 发件人 | Simon ***@***.***> |\n| 日期 | 2022年04月21日 23:39 |\n| 收件人 | ***@***.***> |\n| 抄送至 | ***@***.******@***.***> |\n| 主题 | Re: [sgoldenlab/simba] Seeking help for some parameter setting (Issue #177) |\n\nHi @ZZysh! You're right, hyperparameters can be tricky, but I can give some pointers. To avoid giving you some generic answers, can you tell me what types of animals, how many animals, and which body-parts you are tracking?\n\n—\nReply to this email directly, view it on GitHub, or unsubscribe.\nYou are receiving this because you were mentioned.Message ID: ***@***.***>", + "created_at": "2022-04-21T15:54:32Z", + "author": "ZZysh" + }, + { + "body": "Cheers! \r\n\r\nFor outlier correction - I'd start with what was used in this paper https://www.biorxiv.org/content/10.1101/2020.04.19.049452v2.full.pdf. `...movement criterion to 0.7, and location criterion\r\nto 1.5`. If you know your tracking is good and contains few outliers, there is no need to perform outlier correction, and you can slick the `Skip` button. An alternative approach, is to `smooth` the data, documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#to-import-multiple-dlc-csv-files) and example video [HERE](https://www.youtube.com/watch?v=d9-Bi4_HyfQ) which should also help with outliers.\r\n\r\nFor the hyperparameters, you can start with the ones in [THIS FILE](https://github.com/sgoldenlab/simba/blob/master/misc/BtWGaNP_meta.csv). You can download this file, and then load this into the SimBA GUI as documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#train-predictive-classifiers-settings) so you don't have to fill in any hyperparameters to start. If they don't work as well as anticipated, I recommend beginning by titrating the undersample ratio. ", + "created_at": "2022-04-21T18:55:00Z", + "author": "sronilsson" + }, + { + "body": "thanks a lot\nI will try\n\n\n\n---- 回复的原邮件 ----\n| 发件人 | Simon ***@***.***> |\n| 日期 | 2022年04月22日 02:55 |\n| 收件人 | ***@***.***> |\n| 抄送至 | ***@***.******@***.***> |\n| 主题 | Re: [sgoldenlab/simba] Seeking help for some parameter setting (Issue #177) |\n\nCheers!\n\nFor outlier correction - I'd start with what was used in this paper https://www.biorxiv.org/content/10.1101/2020.04.19.049452v2.full.pdf. ...movement criterion to 0.7, and location criterion to 1.5. If you know your tracking is good and contains few outliers, there is no need to perform outlier correction, and you can slick the Skip button. An alternative approach, is to smooth the data, documented HERE and example video HERE which should also help with outliers.\n\nFor the hyperparameters, you can start with the ones in THIS FILE. You can download this file, and then load this into the SimBA GUI as documented HERE so you don't have to fill in any hyperparameters to start. If they don't work as well as anticipated, I recommend beginning by titrating the undersample ratio.\n\n—\nReply to this email directly, view it on GitHub, or unsubscribe.\nYou are receiving this because you were mentioned.Message ID: ***@***.***>", + "created_at": "2022-04-22T03:12:13Z", + "author": "ZZysh" + }, + { + "body": "Hi Simon, i wanna to use the gpu to accelerate the video trim function(Ffmepg) assemble in Simba, because I have a long video. could you give me some advise?\nthanks a lot and looking forward your reply\n\n\nthanks a lot\nI will try\n\n\n\n\n---- 回复的原邮件 ----\n| 发件人 | Simon ***@***.***> |\n| 日期 | 2022年04月22日 02:55 |\n| 收件人 | ***@***.***> |\n| 抄送至 | ***@***.******@***.***> |\n| 主题 | Re: [sgoldenlab/simba] Seeking help for some parameter setting (Issue #177) |\n\nCheers!\n\nFor outlier correction - I'd start with what was used in this paper https://www.biorxiv.org/content/10.1101/2020.04.19.049452v2.full.pdf. ...movement criterion to 0.7, and location criterion to 1.5. If you know your tracking is good and contains few outliers, there is no need to perform outlier correction, and you can slick the Skip button. An alternative approach, is to smooth the data, documented HERE and example video HERE which should also help with outliers.\n\nFor the hyperparameters, you can start with the ones in THIS FILE. You can download this file, and then load this into the SimBA GUI as documented HERE so you don't have to fill in any hyperparameters to start. If they don't work as well as anticipated, I recommend beginning by titrating the undersample ratio.\n\n—\nReply to this email directly, view it on GitHub, or unsubscribe.\nYou are receiving this because you were mentioned.Message ID: ***@***.***>", + "created_at": "2022-04-22T10:19:05Z", + "author": "ZZysh" + }, + { + "body": "Hi @ZZysh - it is not something that is supported in SimBA and you'd have to do code up yourself outside the SimBA environment. It does seem possible but you'd have to tinker with the ffmpeg installation... https://www.cyberciti.biz/faq/how-to-install-ffmpeg-with-nvidia-gpu-acceleration-on-linux/\r\n\r\nIf you do give it a try, please let me know how it goes and if you manage to get it working. ", + "created_at": "2022-04-22T12:08:23Z", + "author": "sronilsson" + }, + { + "body": "thanks I will try it latter and I will tell you after i do it\nthanks a lot\n\n\n\n---- 回复的原邮件 ----\n| 发件人 | Simon ***@***.***> |\n| 日期 | 2022年04月22日 20:08 |\n| 收件人 | ***@***.***> |\n| 抄送至 | ***@***.******@***.***> |\n| 主题 | Re: [sgoldenlab/simba] Seeking help for some parameter setting (Issue #177) |\n\nHi @ZZysh - it is not something that is supported in SimBA and you'd have to do code up yourself outside the SimBA environment. It does seem possible but you'd have to tinker with the ffmpeg installation... https://www.cyberciti.biz/faq/how-to-install-ffmpeg-with-nvidia-gpu-acceleration-on-linux/\n\nIf you do give it a try, please let me know how it goes and if you manage to get it working.\n\n—\nReply to this email directly, view it on GitHub, or unsubscribe.\nYou are receiving this because you were mentioned.Message ID: ***@***.***>", + "created_at": "2022-04-22T12:11:11Z", + "author": "ZZysh" + } + ] + }, + { + "title": "Absolute path is not always used in `process_videos_automation_linux.py`", + "body": "**Describe the bug**\r\nExecuting a batch pre-process video task errors out with multiple \"file not found\" errors after the first task is run. After looking at the list of commands to run outputted by SimBA, it appears absolute paths are used most of the time, except for the final `mv` command after a task. See below for instance:\r\n```\r\ncp \"/home/florian/Documents/Temp/TEST/input_videos/MVI_0061.MP4\" \"/home/florian/Documents/Temp/TEST/processed_videos/\"\r\nffmpeg -y -i \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" -vf \"crop=749:701:457:9\" -c:v libx264 -c:a copy \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061_cropped.mp4\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\ncp \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061_cropped.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061_cropped.mp4\" \"MVI_0061.MP4\"\r\n```\r\nAs you can see in the last `mv` command, the destination does not include the outputDir. It thus also seems to overwrite the input video (I still need to confirm that, though).\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'Videos' > 'Batch pre-process videos'\r\n2. Enter multiple tasks for at least one video.\r\n4. Execute the task(s).\r\n5. The outputDir is prepared correctly, the input video is copied to that folder, the `tmp` dir is also correctly created, and the first `ffmpeg` operation completes successfully.\r\n6. The process fails with a \"no such file or directory\" error when trying to run the second task on the video.\r\n\r\n**Expected behavior**\r\nAll pre-process tasks complete successfully, without overwriting the original video.\r\n\r\n**Screenshots**\r\nNA.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: openSUSE Tumbleweed\r\n - Python Version: 3.6\r\n - Are you using anaconda? yes.\r\n \r\n**Additional Context**\r\nAfter a quick look at the code, it appears it could be related to the following (and the related line for other pre-processing tasks):\r\nhttps://github.com/sgoldenlab/simba/blob/4cac1f8c7864d752bdb49251d4f3b098f7041124/simba/process_videos_automation_linux.py#L142\r\n\r\nShouldn't that include the `outputdir` in the destination? something like (untested):\r\n```python\r\n 'mv \\\"' + os.path.join(str(outputdir),output) + '\" \"' + os.path.join(str(outputdir),os.path.basename(currentFile)+'\"')\r\n```\r\n\r\nThanks for your time!", + "user": "florianduclot", + "reaction_cnt": 0, + "created_at": "2022-04-15T16:17:46Z", + "updated_at": "2022-04-15T22:26:27Z", + "author": "florianduclot", + "comments": [ + { + "body": "Thanks for reporting @florianduclot, much appreciated. Would you mind testing the potential fix and let me know if that is the issue? I don't have access to linux machine at the moment and can't test it out.", + "created_at": "2022-04-15T17:24:59Z", + "author": "sronilsson" + }, + { + "body": "Hi @sronilsson ,\r\n\r\nI went ahead and changed that `mv` command for all the processing tasks in `process_videos_automation_linux.py`. Note that each task has a `_auto()` variant but I'm not sure if/how they are used so I didn't touch them.\r\n\r\nI also corrected a backslash in the `superimpose` task that was causing this task to silently fail (in my hands, at least).\r\n\r\nHere's a `.diff` for all these changes but let me know if you'd like a proper PR to better have a look at them:\r\n```diff\r\n--- process_videos_automation_linux_backup.py\t2022-04-15 10:49:05.430268543 -0400\r\n+++ process_videos_automation_linux.py\t2022-04-15 17:59:15.346836750 -0400\r\n@@ -32,7 +32,7 @@\r\n command = (str('ffmpeg -y -i ') + '\"'+ str(outputdir) + '/' + os.path.basename(currentFile) + '\"'+ ' -vf scale='+str(width)+':'+ str(height) + ' ' + '\"'+ str(outputdir) + '/' + output+ '\"' + ' -hide_banner' + '\\n'\r\n 'mv \\\"' + str(outputdir) + '/' + os.path.basename(currentFile) + '\" \"' + (outputdir) + '/' + 'tmp/\"' + '\\n'\r\n 'cp \\\"' + str(outputdir) + '/' + output + '\" \"' + (outputdir) +'/' +'tmp/\"' +'\\n'\r\n- 'mv \\\"' +os.path.join(str(outputdir),output) + '\" \"' + os.path.basename(currentFile)+'\"')\r\n+ 'mv \\\"' +os.path.join(str(outputdir),output) + '\" \"' + os.path.join(str(outputdir),os.path.basename(currentFile))+'\"')\r\n \r\n print(filesFound,'added into the downsample queue')\r\n return command\r\n@@ -47,7 +47,7 @@\r\n command = (str('ffmpeg -y -i ') + '\"'+ str(outputdir) + '/' + os.path.basename(currentFile) + '\"'+ ' -filter:v fps='+ str(fps) + ' ' + '\"'+ str(outputdir) + '/' + output+ '\"' + ' -hide_banner' + '\\n'\r\n 'mv \\\"' + str(outputdir) + '/' + os.path.basename(currentFile) + '\" \"' + (outputdir) + '/' + 'tmp/\"' + '\\n'\r\n 'cp \\\"' + str(outputdir) + '/' + output + '\" \"' + (outputdir) +'/' +'tmp/\"' +'\\n'\r\n- 'mv \\\"' +os.path.join(str(outputdir),output) + '\" \"' + os.path.basename(currentFile)+'\"')\r\n+ 'mv \\\"' +os.path.join(str(outputdir),output) + '\" \"' + os.path.join(str(outputdir),os.path.basename(currentFile))+'\"')\r\n \r\n print(filesFound,'added into the fps queue')\r\n return command\r\n@@ -78,7 +78,7 @@\r\n command = (str('ffmpeg -y -i ') + '\"'+ str(outputdir) + '/' + os.path.basename(currentFile)+ '\"' + ' -vf format=gray '+ '\"'+ str(outputdir) + '/' + output + '\"'+ '\\n'\r\n 'mv \\\"' + str(outputdir) + '/' + os.path.basename(currentFile) + '\" \"' + (outputdir)+'/'+'tmp/\"' +'\\n' \r\n 'cp \\\"' + str(outputdir) + '/' + output + '\" \"' + (outputdir)+'/'+'tmp/\"' +'\\n'\r\n- 'mv \\\"' + os.path.join(str(outputdir),output) + '\" \"' + os.path.basename(currentFile)+'\"')\r\n+ 'mv \\\"' + os.path.join(str(outputdir),output) + '\" \"' + os.path.join(str(outputdir),os.path.basename(currentFile))+'\"')\r\n \r\n print(filesFound,'added into the grayscale queue')\r\n return command\r\n@@ -105,10 +105,10 @@\r\n outFile = currentFile.replace('.mp4', '')\r\n outFile = str(outFile) + '_frame_no.mp4'\r\n output = os.path.basename(outFile)\r\n- command = (str('ffmpeg -y -i ') + '\"'+ str(outputdir)+'/' + os.path.basename(currentFile) + '\"'+ ' -vf \"drawtext=fontfile=Arial.ttf: text=/'%{frame_num}/': start_number=0: x=(w-tw)/2: y=h-(2*lh): fontcolor=black: fontsize=20: box=1: boxcolor=white: boxborderw=5\" -c:a copy '+ '\"'+ str(outputdir) + '/' + output + '\"'+ '\\n'\r\n+ command = (str('ffmpeg -y -i ') + '\"'+ str(outputdir)+'/' + os.path.basename(currentFile) + '\"'+ ' -vf \"drawtext=fontfile=Arial.ttf: text=\\'%{frame_num}\\': start_number=0: x=(w-tw)/2: y=h-(2*lh): fontcolor=black: fontsize=20: box=1: boxcolor=white: boxborderw=5\" -c:a copy '+ '\"'+ str(outputdir) + '/' + output + '\"'+ '\\n'\r\n 'mv \\\"' + str(outputdir) + '/' + os.path.basename(currentFile) + '\" \"' + (outputdir)+'/'+'tmp/\"' +'\\n'\r\n 'cp \\\"' + str(outputdir) + '/' + output + '\" \"' + (outputdir)+'/'+'tmp/\"' + '\\n'\r\n- 'mv \\\"' + os.path.join(str(outputdir),output) + '\" \"' + os.path.basename(currentFile)+'\"')\r\n+ 'mv \\\"' + os.path.join(str(outputdir),output) + '\" \"' + os.path.join(str(outputdir), os.path.basename(currentFile)) +'\"')\r\n \r\n print(filesFound,'added into the superimpose frame queue.')\r\n return command\r\n@@ -139,7 +139,7 @@\r\n command = (str('ffmpeg -y -i ') + '\"'+ str(outputdir)+'/' + os.path.basename(currentFile) + '\"'+ ' -ss ' + str(starttime) +' -to ' + str(endtime) + ' -async 1 '+ '\"' + str(outputdir)+ '/' + output + '\"'+ '\\n'\r\n 'mv \\\"' + str(outputdir) + '/' + os.path.basename(currentFile) + '\" \"' + (outputdir)+'/'+'tmp/\"' +'\\n'\r\n 'cp \\\"' + str(outputdir) + '/' + output + '\" \"' + (outputdir)+'/'+'tmp/\"' + '\\n'\r\n- 'mv \\\"' + os.path.join(str(outputdir),output) + '\" \"' + os.path.basename(currentFile)+'\"')\r\n+ 'mv \\\"' + os.path.join(str(outputdir),output) + '\" \"' + os.path.join(str(outputdir), os.path.basename(currentFile)) +'\"')\r\n \r\n print(filesFound,'added into the shorten video queue')\r\n return command\r\n@@ -266,8 +266,8 @@\r\n if total != 0:\r\n command = (str('ffmpeg -y -i ')+ '\"' + str(outputdir) + '/' + str(videoName)+ '\"' + str(' -vf ') + str('\"crop=') + str(width) + ':' + str(height) + ':' + str(topLeftX) + ':' + str(topLeftY) + '\" ' + str('-c:v libx264 -c:a copy ') + '\"'+ str(os.path.join(outputdir, fileOutName))+ '\"' + '\\n'\r\n 'mv \\\"' + str(outputdir) + '/' + videoName + '\" \"' + (outputdir) + '/' + 'tmp/\"' + '\\n'\r\n- 'cp \\\"' + str(outputdir) + '/' + os.path.basename(fileOutName) + '\" \"' + (outputdir) + '/' + 'tmp/\"' + '\\n'\r\n- 'mv \\\"' + os.path.join(str(outputdir), os.path.basename(fileOutName)) + '\" \"' + os.path.basename(videoName) + '\"')\r\n+ 'cp \\\"' + str(outputdir) + '/' + os.path.basename(fileOutName) + '\" \"' + (outputdir) + '/' + 'tmp/\"' + '\\n'\r\n+ 'mv \\\"' + os.path.join(str(outputdir), os.path.basename(fileOutName)) + '\" \"' + os.path.join(str(outputdir), os.path.basename(videoName)) + '\"')\r\n print(videoName, 'added into the crop video queue.')\r\n os.remove(filePath)\r\n return command\r\n```\r\n\r\n## Test\r\nThis was tested to successfully apply all the tasks listed in the screenshot below, without overwriting the original file.\r\n![image](https://user-images.githubusercontent.com/17770886/163645000-661f57f1-d417-4c26-87a4-4afe0be3197e.png)\r\n\r\nThe list of commands that SimBA created was:\r\n```\r\ncp \"/home/florian/Documents/Temp/TEST/input_videos/MVI_0061.MP4\" \"/home/florian/Documents/Temp/TEST/processed_videos/\"\r\nffmpeg -y -i \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" -vf \"crop=787:701:422:12\" -c:v libx264 -c:a copy \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061_cropped.mp4\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\ncp \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061_cropped.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061_cropped.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\"\r\nffmpeg -y -i \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" -ss 00:05:00 -to 00:06:00 -async 1 \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_shorten.mp4\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\ncp \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_shorten.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_shorten.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\"\r\nffmpeg -y -i \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" -vf scale=480:480 \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_downsampled.mp4\" -hide_banner\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\ncp \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_downsampled.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_downsampled.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\"\r\nffmpeg -y -i \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" -filter:v fps=20 \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_fpsChanged.mp4\" -hide_banner\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\ncp \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_fpsChanged.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_fpsChanged.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\"\r\nffmpeg -y -i \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" -vf format=gray \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_grayscale.mp4\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\ncp \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_grayscale.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_grayscale.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\"\r\nffmpeg -y -i \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" -vf \"drawtext=fontfile=Arial.ttf: text='%{frame_num}': start_number=0: x=(w-tw)/2: y=h-(2*lh): fontcolor=black: fontsize=20: box=1: boxcolor=white: boxborderw=5\" -c:a copy \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_frame_no.mp4\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\ncp \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_frame_no.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/tmp/\"\r\nmv \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4_frame_no.mp4\" \"/home/florian/Documents/Temp/TEST/processed_videos/MVI_0061.MP4\"\r\n```\r\n\r\nAnd that resulted in the creation of the following files:\r\n![image](https://user-images.githubusercontent.com/17770886/163648351-959a738f-30ec-4765-a7e0-007381af98e9.png)\r\n\r\nLet me know if you would like any additional information.\r\n", + "created_at": "2022-04-15T22:11:32Z", + "author": "florianduclot" + }, + { + "body": "Nice one! Thanks again, I will insert the fixes in pip package (which most folks get the code through), no need for PR, and I will credit you in function.", + "created_at": "2022-04-15T22:26:27Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Error when analysing ROI ", + "body": "\r\nHi, I'm testing simba to be able to analyze behavior data using the ROI. Everything is going fine up to the step where I actually run the ROI analysis and I get an error that says the FPS and pixel/mm info can't be found in the video_info.csv file, but when I check the file all the information is there. \r\n\r\nScreenshots:\r\n![image](https://user-images.githubusercontent.com/102673091/160846105-e63fd8ed-2a26-41d7-9e9e-c575ac88c22d.png)\r\n![image](https://user-images.githubusercontent.com/102673091/160846213-e3448b07-fc79-4033-9905-0fd4e1910310.png)\r\n![image](https://user-images.githubusercontent.com/102673091/160846526-ac3e427e-0c66-4bfd-bf11-ca3216cc840d.png)\r\n\r\n\r\n\r\n**Desktop:**\r\n - OS: Windows 10\r\n - Python Version 3.6\r\n - I am using anaconda\r\n \r\n\r\n", + "user": "gabrieltofani", + "reaction_cnt": 0, + "created_at": "2022-03-30T13:33:45Z", + "updated_at": "2022-03-30T14:23:22Z", + "author": "gabrieltofani", + "comments": [ + { + "body": "Hello @gabrieltofani ! it is possible SimBA is having issues with your integer file name `74`. I will check the code for ROI analysis and make sure it can handle it. But in meantime, can you check if that is the case and rename / or analyze a different video which could not be mistaken as an integer (e.g., video74, v74) ?\r\n\r\nThanks for reporting!", + "created_at": "2022-03-30T13:41:16Z", + "author": "sgoldenlab" + }, + { + "body": "Hi! Thanks for the rapid response! I changed the file I'm getting a different error. (Feels like progress though)\r\n![image](https://user-images.githubusercontent.com/102673091/160851490-dd4e18dd-acdd-4a77-a797-1b00edac2524.png)\r\n![image](https://user-images.githubusercontent.com/102673091/160851650-2127ae30-dd4e-4878-84af-1a00d77a635e.png)\r\n\r\n", + "created_at": "2022-03-30T13:56:16Z", + "author": "gabrieltofani" + }, + { + "body": "Oh, can you check inside the `project_config.ini` file, under the `probability_threshold `value. You should a number written there, a float between 0 and 1. Like in the screenshot, But in your file, SimBA says its a string. \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/160853487-6afaf350-7f31-4ceb-a21c-69007d491d2d.png)\r\n", + "created_at": "2022-03-30T14:04:11Z", + "author": "sgoldenlab" + }, + { + "body": "This is what I have\r\n![image](https://user-images.githubusercontent.com/102673091/160854137-4da9d5bc-a647-48af-a177-23753cc7ed9c.png)\r\n", + "created_at": "2022-03-30T14:06:52Z", + "author": "gabrieltofani" + }, + { + "body": "I see, which is the version of SimBA you are using? Maybe its something caused by a supposed fix last night\r\n", + "created_at": "2022-03-30T14:09:17Z", + "author": "sgoldenlab" + }, + { + "body": "Oh I forgotten to fill the bp probability_threshold this last try time! that is why! It's all working now. \r\nThanks so much!", + "created_at": "2022-03-30T14:11:26Z", + "author": "gabrieltofani" + }, + { + "body": "Ah OK, yes, it needs a number. ", + "created_at": "2022-03-30T14:14:38Z", + "author": "sgoldenlab" + }, + { + "body": "I'll insert a fix, if empty, then set to 0.0. Thanks again for reporting. ", + "created_at": "2022-03-30T14:15:12Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Outlier correction error", + "body": "**Describe the bug**\r\nA clear and concise description of what the bug is.\r\nHello, I am new to SimBA and I am trying to classify grooming behavior of one mouse, after doing pose-estimation via DLC. When I'm in the \"outlier correction\" tab and press the \"run outlier correction\", I get the following massage (in italic):\r\n_\"Outlier correction settings updated in project_config.ini\r\nPose-estimation body part setting for outlier correction: user_defined\r\nApplying settings for classical tracking...\r\nProcessing 1 files for movement outliers...\"_\r\nand that's how it stays... also I get an error in the command window:\r\n_\" File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 2059, in read\r\n data = self._reader.read(nrows)\r\n File \"pandas/_libs/parsers.pyx\", line 884, in pandas._libs.parsers.TextReader.read\r\n File \"pandas/_libs/parsers.pyx\", line 965, in pandas._libs.parsers.TextReader._read_rows\r\n File \"pandas/_libs/parsers.pyx\", line 2132, in pandas._libs.parsers.raise_parser_error\r\npandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 14, saw 4\"_\r\nI read a similar problem that was posted here, and also tried to press \"skip outlier correction (CAUTION)\" but got the same error. \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'load video --> outlier corrections (after setting video parameters)'\r\n2. Click on 'run outlier correction'\r\n3. Scroll down to '....'\r\n4. See error: \"pandas.errors.ParserError: Error tokenizing data. C error: Expected 1 fields in line 14, saw 4\"\r\n\r\n**Expected behavior**\r\nI am not sure (first time using SimBA), but I would expect to get some error estimation and correction?\r\n\r\n**Screenshots**\r\n![image](https://user-images.githubusercontent.com/86400083/160598044-9ee9874e-c717-4fd2-a254-f8acbe65288d.png)\r\n\r\n![image](https://user-images.githubusercontent.com/86400083/160601609-36a42f5b-e067-4858-aad8-ac2344e77788.png)\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Microsoft Windows, Version 21H2 (OS Build 19044.1586)\r\n - Python Version : 4.0.7.post2\r\n - I am using Anaconda\r\n \r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n", + "user": "Urimons", + "reaction_cnt": 0, + "created_at": "2022-03-29T11:33:55Z", + "updated_at": "2022-03-30T15:22:35Z", + "author": "Urimons", + "comments": [ + { + "body": "Hello @Urimons ! The error comes when SimBA trying to open the file in the `project_folder/csv/input_csv` folder. SimBA tries to read the file and assumes the file is in CSV format, when it is not a CSV file. I typically get this error when trying to read a CSV file, when it is in fact a PARQUET file, or vice versa. If you look inside the `project_folder/csv/input_csv` folder, what files do you see? \r\n\r\n", + "created_at": "2022-03-29T20:28:40Z", + "author": "sgoldenlab" + }, + { + "body": "![image](https://user-images.githubusercontent.com/86400083/160774054-9763523c-c6b8-4a90-9fbb-3f9f1acd6def.png)\r\n![image](https://user-images.githubusercontent.com/86400083/160774149-34890d6c-10bb-4c8d-9c78-a7d7b32670ac.png)\r\n", + "created_at": "2022-03-30T07:19:09Z", + "author": "Urimons" + }, + { + "body": "Hi @Urimons - it appears that the `input_csv` folder contains a CSV file that SimBA created from a file called `config.yaml`. Both these files are 3kb size, so very small, and unlikely to contain any pose-estimation tracking data. It is most likely a settings file from a pose-estimation project. \r\n\r\nWhen you imported the tracking data to SimBA, did you point SimBA to a file (e.g., H5 or CSV) or folder (containing H5 or CSV files) with pose-estimation data? These files are usually larger, with one row for every frame in your video, and at least two columns (x, y coordinates) for every body-part that was tracked. ", + "created_at": "2022-03-30T10:59:41Z", + "author": "sgoldenlab" + }, + { + "body": "You are right!\r\nWhen importing the correct file it worked \r\nThanks :)", + "created_at": "2022-03-30T15:10:14Z", + "author": "Urimons" + } + ] + }, + { + "title": "Extract frames does not seem to work", + "body": "Hi, \r\n\r\nI am just going through the pipeline. When creating the project, the GUI says that extracting frames is not necessary anymore but I just wanted to test it. \r\n\r\nI am working in a conda environment in Ubuntu with ffmpeg installed in the system.\r\n\r\nWhen I try to extract frames, It says this (not extracting any frames):\r\n\r\n> Extracting 8862 frames from WT_1DLC_dlcrnetms5_NewProjectExtraLabelMar9shuffle1_100000_el.mp4...\r\n> Please make sure videos are imported and located in /project_folder/videos\r\n\r\nThe .mp4 videos were correctly extracted to the videos/ folder. \r\n\r\nThanks in advance!", + "user": "jmaicas", + "reaction_cnt": 0, + "created_at": "2022-03-24T14:02:49Z", + "updated_at": "2022-03-24T15:13:19Z", + "author": "jmaicas", + "comments": [ + { + "body": "Hi @jmaicas! Is your original video named `WT_1DLC_dlcrnetms5_NewProjectExtraLabelMar9shuffle1_100000_el.mp4`? Just checking as this has a file name sounds like something that has been processed by DeepLabCut and you should import something sounding more like `WT_1.mp4`\r\n\r\nAlso, did you see any errors in the terminal window? ", + "created_at": "2022-03-24T14:43:10Z", + "author": "sgoldenlab" + }, + { + "body": "Hi, yes. it the .mp4 labelled video. The original ones (not labelled) are in .mkv format. I guess that I should pass it first to .mp4 first? Or should I run deeplabcut with the originals files with extension .mp4 already?", + "created_at": "2022-03-24T15:08:08Z", + "author": "jmaicas" + }, + { + "body": "SimBA likes mp4 or avi. I suggest you convert the mkv's to avi or mp4 here: https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_tools.md#change-video-format\r\n\r\nOnce done, import the mp4/avi's.", + "created_at": "2022-03-24T15:13:19Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Importing 0 multi-animal DLC h5 files in Ubuntu", + "body": "Hi, \r\n\r\njust new using SimBA. I have a 2 animal project in Deeplabcut that I want to analyse. \r\n\r\nI checked #150 and two other previous questions related but their solutions do not seem to solve what is happening to me. \r\n\r\nI created a new pose configuration with 2x7 bps which seems to work fine. I successfully imported two videos, which have the same name and are in the same folder than their .h5 files, but when I try to import the .h5 files it says \r\n\r\n_\"Importing 0 multi-animal DLC h5 files to the current project\"_\r\n\r\nI installed simba in a conda environment and the version is:\r\nsimba-uw-tf 1.3.12\r\n\r\nThanks in advance!\r\n", + "user": "jmaicas", + "reaction_cnt": 0, + "created_at": "2022-03-23T16:12:55Z", + "updated_at": "2022-03-24T12:58:23Z", + "author": "jmaicas", + "comments": [ + { + "body": "My videos and .h5 files end in _el. However, it seems that SimBA looks only for box or skeleton ones when the .h5 comes from a multianimal DLC project. \r\n\r\nSo I changed the end of the names of the videos and .h5 files. \r\n\r\nNow it gives me this error:\r\n\r\n```\r\nImporting 2 multi-animal DLC h5 files to the current project\r\nCannot locate video out2021_02_26_habdishab_exp_rat1_crop2_trim1DLC_dlcrnetms5_NewProjectExtraLabelMar9shuffle1_100000_bx.h5 in mp4 or avi format\r\n```\r\n\r\nHowever, `out2021_02_26_habdishab_exp_rat1_crop2_trim1DLC_dlcrnetms5_NewProjectExtraLabelMar9shuffle1_100000_bx.mp4` was correctly imported to the `project_folder/videos` folder", + "created_at": "2022-03-23T16:44:24Z", + "author": "jmaicas" + }, + { + "body": "Hello @jmaicas!\r\n\r\nThe `simba-uw-tf` isn't being supported so try the follwoing: \r\n\r\n(i) uninstall `simba-uw-tf` with `pip uninstall simba-uw-tf`\r\n(ii) install `simba-uw-tf-dev` with `pip install simba-uw-tf-dev`\r\n(iii) run `simba` and let me know if that fixes it. \r\n\r\n", + "created_at": "2022-03-23T16:56:32Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks for the quick response, @sgoldenlab!\r\n\r\nThat worked, thank you! \r\n\r\nHowever, when I import the h5 files, it says:\r\n\r\n> \r\n> Importing 2 multi-animal DLC h5 files to the current project\r\n> Length mismatch: Expected axis has 42 elements, new values have 39 elements\r\n> The number of body-parts in the input files do not match the number of body-parts in your SimBA project. Make sure you have specified the correct number of animals and body-parts in your project.\r\n\r\nI have a project with 2 rats and 7 pbs per rat. \r\n\r\nI am reading than in older versions, it could get confused with the _1 and _2 for different animals but that seems to be solved with your new interface that forces to introduce the ID in a new column.", + "created_at": "2022-03-23T18:14:07Z", + "author": "jmaicas" + }, + { + "body": "Yes- if you navigate to `project_folder\\logs\\measures\\pose_configs\\bp_names`, open that CSV and send me a screenshot. That file stores all the body-parts, and which animal it belongs to, in your project, If one body-part is missing in that file, that could explain this error. ", + "created_at": "2022-03-23T18:24:44Z", + "author": "sgoldenlab" + }, + { + "body": "I see! There are 14 of them but the last one is missing the _2. I'll redo it and I will let you know. Thanks!\r\n![bp_names](https://user-images.githubusercontent.com/8972322/159904052-e4b74a19-5de0-4444-8e73-f7614089b54f.png)\r\n ", + "created_at": "2022-03-24T11:11:40Z", + "author": "jmaicas" + }, + { + "body": "it worked! Thank you very much!", + "created_at": "2022-03-24T12:41:22Z", + "author": "jmaicas" + }, + { + "body": "Great!", + "created_at": "2022-03-24T12:58:23Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Linux/docker support", + "body": "> \"SimBA is written for Microsoft Windows. We may be able to provide support and advice for specific use instances, especially if it benefits multiple users and advances the scope of SimBA\"\r\n\r\nI wanted to voice my interest in a Linux/docker version :) \r\n\r\nIn our case we have a lab computer built for machine learning and a number of remote servers. In order to keep the environments the same we run linux on all of them and docker (docker has better GPU support for linux compared to on windows). From my understanding this is a common setup for labs doing a lot of computational work. \r\n\r\nDeepLabCut offers a docker environment now that can launch GUIs, I find it really made things easier for me to install it on a number of computers. It would be really cool to see this in SimBA too", + "user": "A-Telfer", + "reaction_cnt": 0, + "created_at": "2022-03-22T18:42:20Z", + "updated_at": "2022-03-31T17:46:13Z", + "author": "A-Telfer", + "comments": [ + { + "body": "Thanks @A-Telfer! It's on the list and will give it a go and let you know. ", + "created_at": "2022-03-23T16:54:48Z", + "author": "sgoldenlab" + }, + { + "body": "Awesome!! I can contribute if there's a list of things that need to be refactored", + "created_at": "2022-03-31T15:00:06Z", + "author": "A-Telfer" + }, + { + "body": "Excellent... as I've asked the lab for help and got absolute silence in response :) Not sure how this is best done... is the docker compose yaml something that can be chucked in with the pip package? I push some smaller fixes to pip multiple times a week, and don't want to build image myself each time.", + "created_at": "2022-03-31T15:31:23Z", + "author": "sgoldenlab" + }, + { + "body": "I'm not sure about that, looking at how DLC does it they same to just have a really simple bash script that downloads the docker image from dockerhub.\r\n\r\nIf you don't want to explicitly create a new docker image every time, you could add a [git hook](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) to upload a new docker image to docker:latest automatically\r\n- It should run fast if it saves a docker base image and only reinstalls simba each time\r\n\r\nHow much needs to be changed for it to work on linux though?", + "created_at": "2022-03-31T16:04:17Z", + "author": "A-Telfer" + }, + { + "body": "Edit: might be wrong about this, I see some docker build commands now so you could potentially just throw all this into pypi\r\n\r\ne.g. https://github.com/DeepLabCut/DeepLabCut/blob/master/docker/deeplabcut_docker.sh#L118", + "created_at": "2022-03-31T16:08:36Z", + "author": "A-Telfer" + }, + { + "body": "Yes, that was what came to mind: throw the compose and whatever else is needed into the pypi, and give the user the commands for creating and loading image? \r\n\r\nFor linux compatibility: very little, I run it regularly on mac and I have sorted most or all legacy path calling commands that were not stable on mac/linux. The `simba-uw-tf-dev` *should* be good.", + "created_at": "2022-03-31T17:11:26Z", + "author": "sgoldenlab" + }, + { + "body": "That's great!\r\n\r\nIt worked for me on ubuntu, the only change was that I had to `conda install wxPython` before `pip install simba-uw-tf-dev`", + "created_at": "2022-03-31T17:46:13Z", + "author": "A-Telfer" + } + ] + }, + { + "title": "Kleinberg behavior classification doesn't analyze the whole csv", + "body": "Hi,\r\nI'm trying to smooth my behavioral data running the Kleinberg behavior classification, but it doesn't work properly.\r\n\r\nThe video has approximately 120000 frames, but the Kleinberg behavior classification stops to recognize the classified bouts at the 44896 frame.\r\n\r\nI then removed the firts 44896 rows in the csv machine results70 file and again the Kleinberg behavior classification failed to analyze the whole csv (this time it stops at 70287.\r\n\r\nAny suggestions to get a complete analysis?\r\n\r\nThanks,\r\nCarlo.\r\n", + "user": "carlitomu", + "reaction_cnt": 0, + "created_at": "2022-03-14T14:44:06Z", + "updated_at": "2022-03-21T20:42:12Z", + "author": "carlitomu", + "comments": [ + { + "body": "Hey @carlitomu - thanks for reporting and let me see if I can recreate. Does it come with any error msg, in the terminal window or the main simba window? ", + "created_at": "2022-03-14T15:14:29Z", + "author": "sgoldenlab" + }, + { + "body": "No, there are no error messages in either the terminal window or the main simba window.\r\nThanks!", + "created_at": "2022-03-14T15:20:18Z", + "author": "carlitomu" + }, + { + "body": "@carlitomu Would you mind sending me the CSV (or parquet) file, located in `project_folder/csv/machine_results` to see if I can recreate, I can't immediately recreate it with my own files and it would be quicker if I had yours. It would be the 120k row file that you attempt to put through the Kleinberg method. Can you perhaps share it through a gdrive link for goldenneurolab@gmail.com ? ", + "created_at": "2022-03-14T16:11:40Z", + "author": "sgoldenlab" + }, + { + "body": "Ok, I've just shared it.", + "created_at": "2022-03-14T16:21:42Z", + "author": "carlitomu" + }, + { + "body": "Thanks got it, I will let you know. ", + "created_at": "2022-03-14T16:28:22Z", + "author": "sgoldenlab" + }, + { + "body": "One more question so I understand the issue properly: the csv file is `120611` rows to begin with. When pushed through `Kleinberg`, the output csv still has `120611` rows, but the late behavioral bouts are removed and have been changed from 1 to 0?", + "created_at": "2022-03-14T16:39:07Z", + "author": "sgoldenlab" + }, + { + "body": "Yes, you understand perfectly.", + "created_at": "2022-03-14T16:44:08Z", + "author": "carlitomu" + }, + { + "body": "Yeah I can replicate what you are seeing. My instinct is that there are 20k-ish frames between frame 40k and frame 60k, without any behavior happening, which puts the markov chain in a very strong **off** state which it is difficult to re-engage from, but playing with hyper-parameters does not seem to do much... I am not sure if that is the cause, but could be tested by removing those 20k frames and seeing if it makes any difference. ", + "created_at": "2022-03-14T18:43:38Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks for your suggestions, but it doesn't work.\r\nI removed the frames between 40k and 60k without interactions: but the output file remains the same as before.\r\n\r\nI also tried to remove the first 40k frames and so I get a different output file, with other behaviors, but again an incomplete analysis. ", + "created_at": "2022-03-15T18:25:12Z", + "author": "carlitomu" + }, + { + "body": "Thanks @carlitomu (still sgoldenlab, my handle is switching across computers). I will need to check this and try to replicate with other longer videos when I have time at the end of the week and get back to you. The data reads in exactly the way it should and none of the later frames are dropped which also was a though.. \r\n\r\nYou may have done it already, but when you do drop the frames between 40k and 60k without interactions, make sure you also update the index column so it reads continuously from `**0 to the final frame number** and there are no missing integer values (i.e., you should still rows have row indexes between 40-60k, it is the later values that will disappear). ", + "created_at": "2022-03-15T23:04:41Z", + "author": "sronilsson" + }, + { + "body": "Hi @carlitomu - I've tested a few things and can't get my head around it - other files of equal size or longer (but typically more frequent behaviors) process fine, and are no issues with the data types. At the moment, I am at loss and take that it is expected behavior considering how your classified behaviors are expressed throughout the session. ", + "created_at": "2022-03-21T20:42:11Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 9.0.1", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 9.0.1.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

9.0.1

\n

https://pillow.readthedocs.io/en/stable/releasenotes/9.0.1.html

\n

Changes

\n
    \n
  • In show_file, use os.remove to remove temporary images. CVE-2022-24303 #6010 [@​radarhere, @​hugovk]
  • \n
  • Restrict builtins within lambdas for ImageMath.eval. CVE-2022-22817 #6009 [radarhere]
  • \n
\n

9.0.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/9.0.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

9.0.1 (2022-02-03)

\n
    \n
  • \n

    In show_file, use os.remove to remove temporary images. CVE-2022-24303 #6010\n[radarhere, hugovk]

    \n
  • \n
  • \n

    Restrict builtins within lambdas for ImageMath.eval. CVE-2022-22817 #6009\n[radarhere]

    \n
  • \n
\n

9.0.0 (2022-01-02)

\n
    \n
  • \n

    Restrict builtins for ImageMath.eval(). CVE-2022-22817 #5923\n[radarhere]

    \n
  • \n
  • \n

    Ensure JpegImagePlugin stops at the end of a truncated file #5921\n[radarhere]

    \n
  • \n
  • \n

    Fixed ImagePath.Path array handling. CVE-2022-22815, CVE-2022-22816 #5920\n[radarhere]

    \n
  • \n
  • \n

    Remove consecutive duplicate tiles that only differ by their offset #5919\n[radarhere]

    \n
  • \n
  • \n

    Improved I;16 operations on big endian #5901\n[radarhere]

    \n
  • \n
  • \n

    Limit quantized palette to number of colors #5879\n[radarhere]

    \n
  • \n
  • \n

    Fixed palette index for zeroed color in FASTOCTREE quantize #5869\n[radarhere]

    \n
  • \n
  • \n

    When saving RGBA to GIF, make use of first transparent palette entry #5859\n[radarhere]

    \n
  • \n
  • \n

    Pass SAMPLEFORMAT to libtiff #5848\n[radarhere]

    \n
  • \n
  • \n

    Added rounding when converting P and PA #5824\n[radarhere]

    \n
  • \n
  • \n

    Improved putdata() documentation and data handling #5910\n[radarhere]

    \n
  • \n
  • \n

    Exclude carriage return in PDF regex to help prevent ReDoS #5912\n[hugovk]

    \n
  • \n
  • \n

    Fixed freeing pointer in ImageDraw.Outline.transform #5909\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 6deac9e 9.0.1 version bump
  • \n
  • c04d812 Update CHANGES.rst [ci skip]
  • \n
  • 4fabec3 Added release notes for 9.0.1
  • \n
  • 02affaa Added delay after opening image with xdg-open
  • \n
  • ca0b585 Updated formatting
  • \n
  • 427221e In show_file, use os.remove to remove temporary images
  • \n
  • c930be0 Restrict builtins within lambdas for ImageMath.eval
  • \n
  • 75b69dd Dont need to pin for GHA
  • \n
  • cd938a7 Autolink CWE numbers with sphinx-issues
  • \n
  • 2e9c461 Add CVE IDs
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=9.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-03-12T00:55:42Z", + "updated_at": "2022-03-12T20:33:46Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-03-12T20:33:36Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 9.0.1 in /simba", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 9.0.1.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

9.0.1

\n

https://pillow.readthedocs.io/en/stable/releasenotes/9.0.1.html

\n

Changes

\n
    \n
  • In show_file, use os.remove to remove temporary images. CVE-2022-24303 #6010 [@​radarhere, @​hugovk]
  • \n
  • Restrict builtins within lambdas for ImageMath.eval. CVE-2022-22817 #6009 [radarhere]
  • \n
\n

9.0.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/9.0.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

9.0.1 (2022-02-03)

\n
    \n
  • \n

    In show_file, use os.remove to remove temporary images. CVE-2022-24303 #6010\n[radarhere, hugovk]

    \n
  • \n
  • \n

    Restrict builtins within lambdas for ImageMath.eval. CVE-2022-22817 #6009\n[radarhere]

    \n
  • \n
\n

9.0.0 (2022-01-02)

\n
    \n
  • \n

    Restrict builtins for ImageMath.eval(). CVE-2022-22817 #5923\n[radarhere]

    \n
  • \n
  • \n

    Ensure JpegImagePlugin stops at the end of a truncated file #5921\n[radarhere]

    \n
  • \n
  • \n

    Fixed ImagePath.Path array handling. CVE-2022-22815, CVE-2022-22816 #5920\n[radarhere]

    \n
  • \n
  • \n

    Remove consecutive duplicate tiles that only differ by their offset #5919\n[radarhere]

    \n
  • \n
  • \n

    Improved I;16 operations on big endian #5901\n[radarhere]

    \n
  • \n
  • \n

    Limit quantized palette to number of colors #5879\n[radarhere]

    \n
  • \n
  • \n

    Fixed palette index for zeroed color in FASTOCTREE quantize #5869\n[radarhere]

    \n
  • \n
  • \n

    When saving RGBA to GIF, make use of first transparent palette entry #5859\n[radarhere]

    \n
  • \n
  • \n

    Pass SAMPLEFORMAT to libtiff #5848\n[radarhere]

    \n
  • \n
  • \n

    Added rounding when converting P and PA #5824\n[radarhere]

    \n
  • \n
  • \n

    Improved putdata() documentation and data handling #5910\n[radarhere]

    \n
  • \n
  • \n

    Exclude carriage return in PDF regex to help prevent ReDoS #5912\n[hugovk]

    \n
  • \n
  • \n

    Fixed freeing pointer in ImageDraw.Outline.transform #5909\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 6deac9e 9.0.1 version bump
  • \n
  • c04d812 Update CHANGES.rst [ci skip]
  • \n
  • 4fabec3 Added release notes for 9.0.1
  • \n
  • 02affaa Added delay after opening image with xdg-open
  • \n
  • ca0b585 Updated formatting
  • \n
  • 427221e In show_file, use os.remove to remove temporary images
  • \n
  • c930be0 Restrict builtins within lambdas for ImageMath.eval
  • \n
  • 75b69dd Dont need to pin for GHA
  • \n
  • cd938a7 Autolink CWE numbers with sphinx-issues
  • \n
  • 2e9c461 Add CVE IDs
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=9.0.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-03-12T00:11:04Z", + "updated_at": "2022-03-12T20:33:46Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-03-12T20:33:36Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Simba installation error: cannot import name 'run'", + "body": "Hi,\r\n\r\nAfter installing simba and typing simba in my environment, I get the following error:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Tony\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"C:\\Users\\Tony\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\Tony\\anaconda3\\envs\\simba\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"C:\\Users\\Tony\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 51, in \r\n from simba.train_model_2 import *\r\n File \"C:\\Users\\Tony\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\train_model_2.py\", line 7, in \r\n from dtreeviz.trees import *\r\n File \"C:\\Users\\Tony\\anaconda3\\envs\\simba\\lib\\site-packages\\dtreeviz\\trees.py\", line 2, in \r\n from graphviz.backend import run, view\r\nImportError: cannot import name 'run'\r\n\r\nPrior to this issue, I encountered similar issues as mentioned in #154 and #156. After manually pip installing the missing modules and wxpython==4.0.4, I encountered this issue and I am not too sure how to fix it. The python version for the environment is 3.6.10. \r\n\r\nAny help would be greatly appreciated! \r\n", + "user": "tyyin12", + "reaction_cnt": 0, + "created_at": "2022-03-09T12:11:12Z", + "updated_at": "2022-06-14T22:10:39Z", + "author": "tyyin12", + "comments": [ + { + "body": "hello @Potato1214! \r\n\r\nThere seems to be an issue with the version of the `graphviz` library you have installed, it does not having any methods called `run`. It maybe that the namings have changed in earlier or later versions of graphviz. \r\n\r\nIf you do a `pip show graphviz` what do yo see? If you don't see version `0.11`, could you do a `pip install graphviz==0.11` and see if that fixes it? (that is the version I have on my machine). ", + "created_at": "2022-03-09T13:47:12Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Bump tensorflow-gpu from 1.14.0 to 2.5.3 in /simba", + "body": "Bumps [tensorflow-gpu](https://github.com/tensorflow/tensorflow) from 1.14.0 to 2.5.3.\n
\nRelease notes\n

Sourced from tensorflow-gpu's releases.

\n
\n

TensorFlow 2.5.3

\n

Release 2.5.3

\n

Note: This is the last release in the 2.5 series.

\n

This releases introduces several vulnerability fixes:

\n
    \n
  • Fixes a floating point division by 0 when executing convolution operators (CVE-2022-21725)
  • \n
  • Fixes a heap OOB read in shape inference for ReverseSequence (CVE-2022-21728)
  • \n
  • Fixes a heap OOB access in Dequantize (CVE-2022-21726)
  • \n
  • Fixes an integer overflow in shape inference for Dequantize (CVE-2022-21727)
  • \n
  • Fixes a heap OOB access in FractionalAvgPoolGrad (CVE-2022-21730)
  • \n
  • Fixes an overflow and divide by zero in UnravelIndex (CVE-2022-21729)
  • \n
  • Fixes a type confusion in shape inference for ConcatV2 (CVE-2022-21731)
  • \n
  • Fixes an OOM in ThreadPoolHandle (CVE-2022-21732)
  • \n
  • Fixes an OOM due to integer overflow in StringNGrams (CVE-2022-21733)
  • \n
  • Fixes more issues caused by incomplete validation in boosted trees code (CVE-2021-41208)
  • \n
  • Fixes an integer overflows in most sparse component-wise ops (CVE-2022-23567)
  • \n
  • Fixes an integer overflows in AddManySparseToTensorsMap (CVE-2022-23568)
  • \n
  • Fixes a number of CHECK-failures in MapStage (CVE-2022-21734)
  • \n
  • Fixes a division by zero in FractionalMaxPool (CVE-2022-21735)
  • \n
  • Fixes a number of CHECK-fails when building invalid/overflowing tensor shapes (CVE-2022-23569)
  • \n
  • Fixes an undefined behavior in SparseTensorSliceDataset (CVE-2022-21736)
  • \n
  • Fixes an assertion failure based denial of service via faulty bin count operations (CVE-2022-21737)
  • \n
  • Fixes a reference binding to null pointer in QuantizedMaxPool (CVE-2022-21739)
  • \n
  • Fixes an integer overflow leading to crash in SparseCountSparseOutput (CVE-2022-21738)
  • \n
  • Fixes a heap overflow in SparseCountSparseOutput (CVE-2022-21740)
  • \n
  • Fixes an FPE in BiasAndClamp in TFLite (CVE-2022-23557)
  • \n
  • Fixes an FPE in depthwise convolutions in TFLite (CVE-2022-21741)
  • \n
  • Fixes an integer overflow in TFLite array creation (CVE-2022-23558)
  • \n
  • Fixes an integer overflow in TFLite (CVE-2022-23559)
  • \n
  • Fixes a dangerous OOB write in TFLite (CVE-2022-23561)
  • \n
  • Fixes a vulnerability leading to read and write outside of bounds in TFLite (CVE-2022-23560)
  • \n
  • Fixes a set of vulnerabilities caused by using insecure temporary files (CVE-2022-23563)
  • \n
  • Fixes an integer overflow in Range resulting in undefined behavior and OOM (CVE-2022-23562)
  • \n
  • Fixes a vulnerability where missing validation causes tf.sparse.split to crash when axis is a tuple (CVE-2021-41206)
  • \n
  • Fixes a CHECK-fail when decoding resource handles from proto (CVE-2022-23564)
  • \n
  • Fixes a CHECK-fail with repeated AttrDef (CVE-2022-23565)
  • \n
  • Fixes a heap OOB write in Grappler (CVE-2022-23566)
  • \n
  • Fixes a CHECK-fail when decoding invalid tensors from proto (CVE-2022-23571)
  • \n
  • Fixes an unitialized variable access in AssignOp (CVE-2022-23573)
  • \n
  • Fixes an integer overflow in OpLevelCostEstimator::CalculateTensorSize (CVE-2022-23575)
  • \n
  • Fixes an integer overflow in OpLevelCostEstimator::CalculateOutputSize (CVE-2022-23576)
  • \n
  • Fixes a null dereference in GetInitOp (CVE-2022-23577)
  • \n
  • Fixes a memory leak when a graph node is invalid (CVE-2022-23578)
  • \n
  • Fixes an abort caused by allocating a vector that is too large (CVE-2022-23580)
  • \n
  • Fixes multiple CHECK-failures during Grappler's IsSimplifiableReshape (CVE-2022-23581)
  • \n
  • Fixes multiple CHECK-failures during Grappler's SafeToRemoveIdentity (CVE-2022-23579)
  • \n
  • Fixes multiple CHECK-failures in TensorByteSize (CVE-2022-23582)
  • \n
  • Fixes multiple CHECK-failures in binary ops due to type confusion (CVE-2022-23583)
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from tensorflow-gpu's changelog.

\n
\n

Release 2.5.3

\n

This releases introduces several vulnerability fixes:

\n
    \n
  • Fixes a floating point division by 0 when executing convolution operators\n(CVE-2022-21725)
  • \n
  • Fixes a heap OOB read in shape inference for ReverseSequence\n(CVE-2022-21728)
  • \n
  • Fixes a heap OOB access in Dequantize\n(CVE-2022-21726)
  • \n
  • Fixes an integer overflow in shape inference for Dequantize\n(CVE-2022-21727)
  • \n
  • Fixes a heap OOB access in FractionalAvgPoolGrad\n(CVE-2022-21730)
  • \n
  • Fixes an overflow and divide by zero in UnravelIndex\n(CVE-2022-21729)
  • \n
  • Fixes a type confusion in shape inference for ConcatV2\n(CVE-2022-21731)
  • \n
  • Fixes an OOM in ThreadPoolHandle\n(CVE-2022-21732)
  • \n
  • Fixes an OOM due to integer overflow in StringNGrams\n(CVE-2022-21733)
  • \n
  • Fixes more issues caused by incomplete validation in boosted trees code\n(CVE-2021-41208)
  • \n
  • Fixes an integer overflows in most sparse component-wise ops\n(CVE-2022-23567)
  • \n
  • Fixes an integer overflows in AddManySparseToTensorsMap\n(CVE-2022-23568)
  • \n
  • Fixes a number of CHECK-failures in MapStage\n(CVE-2022-21734)
  • \n
  • Fixes a division by zero in FractionalMaxPool\n(CVE-2022-21735)
  • \n
  • Fixes a number of CHECK-fails when building invalid/overflowing tensor\nshapes\n(CVE-2022-23569)
  • \n
  • Fixes an undefined behavior in SparseTensorSliceDataset\n(CVE-2022-21736)
  • \n
  • Fixes an assertion failure based denial of service via faulty bin count\noperations\n(CVE-2022-21737)
  • \n
  • Fixes a reference binding to null pointer in QuantizedMaxPool\n(CVE-2022-21739)
  • \n
  • Fixes an integer overflow leading to crash in SparseCountSparseOutput\n(CVE-2022-21738)
  • \n
  • Fixes a heap overflow in SparseCountSparseOutput\n(CVE-2022-21740)
  • \n
  • Fixes an FPE in BiasAndClamp in TFLite\n(CVE-2022-23557)
  • \n
  • Fixes an FPE in depthwise convolutions in TFLite\n(CVE-2022-21741)
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 959e9b2 Merge pull request #54213 from tensorflow/fix-sanity-on-r2.5
  • \n
  • d05fcbc Fix sanity build
  • \n
  • f2526a0 Merge pull request #54205 from tensorflow/disable-flaky-tests-on-r2.5
  • \n
  • a5f94df Disable flaky test
  • \n
  • 7babe52 Merge pull request #54201 from tensorflow/cherrypick-510ae18200d0a4fad797c0bf...
  • \n
  • 0e5d378 Set Env Variable to override Setuptools new behavior
  • \n
  • fdd4195 Merge pull request #54176 from tensorflow-jenkins/relnotes-2.5.3-6805
  • \n
  • 4083165 Update RELEASE.md
  • \n
  • a2bb7f1 Merge pull request #54185 from tensorflow/cherrypick-d437dec4d549fc30f9b85c75...
  • \n
  • 5777ea3 Update third_party/icu/workspace.bzl
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tensorflow-gpu&package-manager=pip&previous-version=1.14.0&new-version=2.5.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-02-09T23:36:01Z", + "updated_at": "2022-03-12T20:33:42Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-03-12T20:33:36Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Error in validating model on single video", + "body": "**Describe the bug**\r\n\r\nI'm trying to validate my model on a single video. After selecting one csv file from `features_extracted` and the `sav` model file, an error message shows up when I clicked 'Run model':\r\n\r\n```\r\nRunning model...\r\nError: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file\r\nPredictions generated.\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. In GUI, finish previous steps and go to 'Run machine model' tab\r\n2. Select a features file and a sav model file.\r\n2. Click on 'Run Model'\r\n3. See error\r\n\r\n**Expected behavior**\r\n- no error message after 'Run Model'\r\n- plot shows up as in the tutorial after clicking 'Generate plot'\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: WSL2 Ubuntu 20.04\r\n - Python Version [e.g. 3.6.0] 3.6.13\r\n - Are you using anaconda? Yes\r\n \r\n\r\n**Additional context**\r\nThe `video_info.csv`:\r\n\r\n```\r\n6,60,756,730,1000,0.6220072346846136\r\n7,60,756,730,1000,0.6220072346846136\r\n4,60,756,730,1000,0.6220072346846136\r\n5,60,756,730,1000,0.6220072346846136\r\n13,60,756,730,1000,0.6220072346846136\r\n2,60,756,730,1000,0.6220072346846136\r\n10,60,756,730,1000,0.6220072346846136\r\n3,60,756,730,1000,0.6220072346846136\r\n12,60,756,730,1000,0.6220072346846136\r\n9,60,756,730,1000,0.6220072346846136\r\n14,60,756,730,1000,0.6220072346846136\r\n8,60,756,730,1000,0.6220072346846136\r\n11,60,756,730,1000,0.6220072346846136\r\n```", + "user": "frankiechang123", + "reaction_cnt": 0, + "created_at": "2022-02-04T05:20:53Z", + "updated_at": "2022-04-14T05:36:52Z", + "author": "frankiechang123", + "comments": [ + { + "body": "Also, when I clicked 'Generate Plot', an error message shows up:\r\n\r\n![image](https://user-images.githubusercontent.com/51113150/152476833-dede6d80-5103-4a43-821b-e4863d09c78f.png)\r\n\r\nIt looks like it's looking for an mp4 file while all my videos are in `.avi`? Could this be the problem?", + "created_at": "2022-02-04T05:26:18Z", + "author": "frankiechang123" + }, + { + "body": "Hello @frankiechang123 ! Thanks for reporting. \r\n\r\nWhat might be happening is that your videos are named as integers and there version of SimBA you are running gets confused. I will make sure this doesn't happen, but in meantime, could you rename your files in the video_info.csv, and the files in the `features_extracted` folder, `Video1` etc. instead of `1` and see if that fixes it? ", + "created_at": "2022-02-04T05:28:06Z", + "author": "sronilsson" + }, + { + "body": "Hi! I changed the filenames as you suggested and now there's no error message. \r\n\r\nHowever, when I clicked 'Generate Plot', it was still looking for `mp4` files. Is there a way around this?\r\n\r\nThank you so much!", + "created_at": "2022-02-04T05:52:10Z", + "author": "frankiechang123" + }, + { + "body": "@frankiechang123 - which version of SimBA are you running? ", + "created_at": "2022-02-04T12:27:10Z", + "author": "sronilsson" + }, + { + "body": "@frankiechang123 - I've inserted a couple of checks around the functions you are using, could you update SimBA to version 0.89.6 - `pip install simba-uw-tf-dev==0.89.6` or `pip install simba-uw-tf-dev --upgrade` and see if it behaves correctly on your end? ", + "created_at": "2022-02-04T14:20:25Z", + "author": "sronilsson" + }, + { + "body": "Hi! I uninstalled my previous version `simba-uw-no-tf-1.3.0` and installed `simba-uw-tf-dev==0.89`. \r\n\r\nIt then says simba cannot be found. What am I doing wrong? ", + "created_at": "2022-02-05T06:49:41Z", + "author": "frankiechang123" + }, + { + "body": "Hello @frankiechang123 - if you type in your environment:\r\n1) `pip uninstall simba-uw-no-tf` - this removes the simba-uw-no-tf version (which is not maintained)\r\n2) `pip install simba-uw-tf-dev` - this installs the latest version of simba (the one being updated and maintained)\r\n3) `simba` - this should launch simba-uw-tf-dev\r\n\r\nlet me know if that works", + "created_at": "2022-02-05T14:51:05Z", + "author": "sronilsson" + }, + { + "body": "Hey, I encountered the same problem when trying to \"Run RF Model\" under the \"Run machine model\" tab, and also when trying to \"Analyze machine predictions\".\r\n### The error when trying to \"Run RF Model\": \r\n_Running 1 model(s) on 4 video file(s).\r\nAnalyzing video 1/4...\r\nError: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file_\r\n### The error when trying to \"Analyze machine predictions\": \r\n_Analyzing video 1/6...\r\nFile # processed for machine predictions: 1/6\r\nError: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file\r\nAnalyzing video 2/6...\r\nFile # processed for machine predictions: 2/6\r\nError: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file\r\nAnalyzing video 3/6...\r\nFile # processed for machine predictions: 3/6\r\nError: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file\r\nAnalyzing video 4/6...\r\nModel paths saved in project_config.ini\r\nApplying settings for classical tracking..._\r\n\r\nI tried to apply the suggestions mentioned here with no success... any advice what I should do?", + "created_at": "2022-04-12T06:53:43Z", + "author": "Urimons" + }, + { + "body": "Hi @Urimons! Which version of SimBA are you running? Can you type `pip show simba-uw-tf-dev` and paste the output here", + "created_at": "2022-04-12T12:05:01Z", + "author": "sronilsson" + }, + { + "body": "(simba) C:\\Users\\YizharAnalysis>pip show simba-uw-tf-dev\r\nName: Simba-UW-tf-dev\r\nVersion: 0.90.9\r\nSummary: Toolkit for computer classification of complex social behaviors in experimental animals\r\nHome-page: https://github.com/sgoldenlab/simba\r\nAuthor: Simon Nilsson, Jia Jie Choong, Nastacia Goodwin, Sophia Hwang, Sam Golden\r\nAuthor-email: goldenneurolab@gmail.com\r\nLicense: GNU Lesser General Public License v3 (LGPLv3)\r\n", + "created_at": "2022-04-12T12:31:22Z", + "author": "Urimons" + }, + { + "body": "Thanks @Urimons - I can see that the function being called hasn't been update for a while. Hang on, I will push a fix. ", + "created_at": "2022-04-12T12:40:22Z", + "author": "sronilsson" + }, + { + "body": "@Urimons - can you upgrade SimBA - `pip install simba-uw-tf-dev --upgrade` - it should be version 0.91.0, and check if it works now? If not, let me know what errors you see, thanks for reporting \r\n", + "created_at": "2022-04-12T13:24:52Z", + "author": "sronilsson" + }, + { + "body": "**Now when I try to \"Run RF model\" I get:**\r\n_(simba) C:\\Users\\YizharAnalysis>simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\configparser.py\", line 789, in get\r\n value = d[option]\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\collections\\__init__.py\", line 883, in __getitem__\r\n return self.__missing__(key) # support subclasses that define __missing__\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\collections\\__init__.py\", line 875, in __missing__\r\n raise KeyError(key)\r\n_KeyError: 'no_of_animals'\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 5892, in runrfmodel\r\n rfmodel(self.projectconfigini)\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\run_RF_model.py\", line 36, in rfmodel\r\n noAnimals = config.getint('ROI settings', 'no_of_animals')\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\configparser.py\", line 819, in getint\r\n fallback=fallback, **kwargs)\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\configparser.py\", line 809, in _get_conv\r\n **kwargs)_\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\configparser.py\", line 803, in _get\r\n return conv(self.get(section, option, **kwargs))\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\configparser.py\", line 792, in get\r\n raise NoOptionError(option, section)\r\nconfigparser.NoOptionError: No option 'no_of_animals' in section: 'ROI settings'_\r\n\r\n**and when I try \"Analyze machine predictions\" I get:**\r\n__Traceback (most recent call last):\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 5878, in \r\n button1 = Button(dlmlabel,text='Analyze',command=lambda:self.findDatalogList(titlebox,var))\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 5888, in findDatalogList\r\n analyze_process_data_log(self.projectconfigini,finallist)\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\process_data_log.py\", line 83, in analyze_process_data_log\r\n firstOccurList.append(round(currDf['Start Time'].min(), 3))\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\generic.py\", line 11618, in stat_func\r\n f, name, axis=axis, skipna=skipna, numeric_only=numeric_only\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\series.py\", line 4090, in _reduce\r\n return op(delegate, skipna=skipna, **kwds)\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\nanops.py\", line 131, in f\r\n raise TypeError(e)\r\nTypeError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().__", + "created_at": "2022-04-12T13:36:33Z", + "author": "Urimons" + }, + { + "body": "Thanks, let me take another look ", + "created_at": "2022-04-12T13:53:31Z", + "author": "sronilsson" + }, + { + "body": "@Urimons - there was a typo of mine in `Run RF model` - but not sure what is going on in `Analyze machine predictions`, it could likely be that the `Run RF model` fails, and therefore no values to analyze in `Analyze machine predictions`. Can we try again, with version 0.91.1? ", + "created_at": "2022-04-12T14:06:57Z", + "author": "sronilsson" + }, + { + "body": "Tried again. The \"Run RF model\" works now, but the \"Analyze...\" still doesn't, this is the error:\r\n_Traceback (most recent call last):\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 5878, in \r\n button1 = Button(dlmlabel,text='Analyze',command=lambda:self.findDatalogList(titlebox,var))\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 5888, in findDatalogList\r\n analyze_process_data_log(self.projectconfigini,finallist)\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\process_data_log.py\", line 83, in analyze_process_data_log\r\n firstOccurList.append(round(currDf['Start Time'].min(), 3))\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\generic.py\", line 11618, in stat_func\r\n f, name, axis=axis, skipna=skipna, numeric_only=numeric_only\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\series.py\", line 4090, in _reduce\r\n return op(delegate, skipna=skipna, **kwds)\r\n File \"C:\\Users\\YizharAnalysis\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\nanops.py\", line 131, in f\r\n raise TypeError(e)\r\nTypeError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all()._\r\n\r\n![image](https://user-images.githubusercontent.com/86400083/162982668-26396011-069d-4aee-be63-a9f818b49884.png)\r\n![image](https://user-images.githubusercontent.com/86400083/162982416-62032ef6-c4e7-49ff-80ce-3dd181d8c4f8.png)\r\n", + "created_at": "2022-04-12T14:15:45Z", + "author": "Urimons" + }, + { + "body": "Got it, almost there :) Hang on.. ", + "created_at": "2022-04-12T14:33:04Z", + "author": "sronilsson" + }, + { + "body": "Can you try again with 0.91.2 and let me know?", + "created_at": "2022-04-12T17:56:00Z", + "author": "sronilsson" + }, + { + "body": "it works!\r\nThanks a lot :)", + "created_at": "2022-04-14T05:36:52Z", + "author": "Urimons" + } + ] + }, + { + "title": "Error after starting simba: unable to load classification from database", + "body": "**Describe the bug**\r\nI am able to start simba and the GUI opens fine. If I try to create a new project and click \"browse folder\", the following error occurs:\r\n```\r\n2022-01-28 15:06:19,769 ERROR - Unable to load classification from database with name and id (Classification icon creation failed): Critical (8c70e164-91e6-4a6d-ad34-2f666638f911)\r\n2022-01-28 15:06:19,769 ERROR - Unable to load classification from database with name and id (Classification icon creation failed): Confidential (eaa2d927-97cb-4f11-88e5-a2c37da0ccb7)\r\n2022-01-28 15:06:19,771 ERROR - Unable to load classification from database with name and id (Classification icon creation failed): Restricted (a830f72f-36ba-41f9-9291-6c84f61a2cd4)\r\n2022-01-28 15:06:19,772 ERROR - Unable to load classification from database with name and id (Classification icon creation failed): Public (c2f6427c-815b-4af1-bfc9-1df88e2297d6)\r\n2022-01-28 15:06:19,773 ERROR - Monitor initialization failed with error 80070002 (1). Automatic configuration updates will not be received\r\n```\r\nIf I continue anyway, and click \"generate project config\", it will successfully make a project folder but many of the files that should be inside it (such as the project_bp_names.csv) are missing. This makes me unable to either load in H5 files or even load the project itself if I exit simba and come back. Both of those cases lead to the following error: \r\n```\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 3268, in importh5\r\n importMultiDLCpose(self.configinifile,self.h5path.folder_path,self.dropdowndlc.getChoices(),idlist, self.interpolation.getChoices())\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\read_DLCmulti_h5_function.py\", line 84, in importMultiDLCpose\r\n Xcols, Ycols, Pcols = getBpNames(inifile)\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\drop_bp_cords.py\", line 129, in getBpNames\r\n poseConfigDf = pd.read_csv(bodyparthListPath, header=None)\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 685, in parser_f\r\n return _read(filepath_or_buffer, kwds)\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 457, in _read\r\n parser = TextFileReader(fp_or_buf, **kwds)\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 895, in __init__\r\n self._make_engine(self.engine)\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 1135, in _make_engine\r\n self._engine = CParserWrapper(self.f, **self.options)\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 1917, in __init__\r\n self._reader = parsers.TextReader(src, **kwds)\r\n File \"pandas/_libs/parsers.pyx\", line 382, in pandas._libs.parsers.TextReader.__cinit__\r\n File \"pandas/_libs/parsers.pyx\", line 689, in pandas._libs.parsers.TextReader._setup_parser_source\r\nFileNotFoundError: [Errno 2] File b'C:/Users/ckelly/Desktop/SocialPrelim\\\\Simba1\\\\project_folder\\\\logs\\\\measures\\\\pose_configs\\\\bp_names\\\\project_bp_names.csv' does not exist: b'C:/Users/ckelly/Desktop/SocialPrelim\\\\Simba1\\\\project_folder\\\\logs\\\\measures\\\\pose_configs\\\\bp_names\\\\project_bp_names.csv'\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows\r\n - Python Version 3.6.10\r\n - Anaconda Environment\r\n \r\n\r\n**Additional context**\r\nI have tried creating a new environment and installing simba from scratch, both with `pip install simba-uw-tf-dev` and `pip install simba-uw-tf-dev --no-deps`. Neither solved the issue. I found someone having a similar error to the second one I pasted above in the gitter, but it appeared their issue was due to path issues with Linux on a server. My error isn't with the path, the file it is looking for just doesn't exist. Any help would be greatly appreciated!\r\n", + "user": "CorinneAKelly", + "reaction_cnt": 0, + "created_at": "2022-01-28T21:21:02Z", + "updated_at": "2022-03-10T16:29:34Z", + "author": "CorinneAKelly", + "comments": [ + { + "body": "I have also tried running this on my laptop just now with similar issues. For some reason on that computer I don't get the first string of classification errors, but I am still getting the second set of errors when I attempt to import DLC H5 files. ", + "created_at": "2022-01-28T21:42:39Z", + "author": "CorinneAKelly" + }, + { + "body": "I also have this issue when trying to use preset multianimal 2 animals/14 bps. `bp_names` is not created causing errors further down the line. Workaround for now is to just modify the analysis results so they will look like those from a single animal project for 2 animals/14 bps. Just wondering what's causing the issue ", + "created_at": "2022-03-04T14:06:57Z", + "author": "KonradDanielewski" + }, + { + "body": "@KonradDanielewski - thanks for reporting, do you see any main terminal error msg when creating the project? \r\n\r\nAlso, I am correct that the settings you are the settings in this screenshot?\r\n![Screen Shot 2022-03-07 at 6 27 59 AM](https://user-images.githubusercontent.com/50497030/157023022-b024b777-f315-48e5-9fc8-6547df6c44d8.png)\r\n ", + "created_at": "2022-03-07T11:29:43Z", + "author": "sgoldenlab" + }, + { + "body": "Sorry, there is no error, neither in the main terminal nor in the simba terminal. Just the file not being created. \r\n\r\nSo, for now with the workaround I do set it the way you show with `classic tracking` and `2 animals, 14 bps` and the `project_bp_names.csv` is being created. But when I change `classic tracking` to `Multi tracking` and `Multi-animals, 7 bps` it's not being created. ", + "created_at": "2022-03-07T12:12:18Z", + "author": "KonradDanielewski" + }, + { + "body": "Thanks @KonradDanielewski - I've sorted it, if you update to `0.90.4` it should work - but please let me know. There was a line in there basically saying `if multi-animal 7bps == do not create project_bp_names.csv` lol. I have no idea why a line like that would be in there, makes me a little uneasy, but I've commented it out for now and does not seem to cause any immediate issues. ", + "created_at": "2022-03-10T11:55:14Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks for the quick response. So, the file now is created but it looks the same as the one for classic tracking, listing 14 bps with `_1` and `_2`. Is it how it's supposed to be? \r\n\r\nMaybe it's my lack of understanding how the workflow goes concerning bp names. I'll take a look at the code and try to get a better understanding of the backend", + "created_at": "2022-03-10T13:52:29Z", + "author": "KonradDanielewski" + }, + { + "body": "Yes that sounds correct, the workflow isn't ideal but going back now and sorting the backend ensuring backwards compatibility is tricky and not a priority. What happens is that SimBA pulls the index from the selected bp configuration in the dropdown, and matches it with the row in the `simba.pose_configurations.bp_names.bp_names.csv file`, and the values in the relevant row in this file is used to create the `project_bp_names.csv` file in the project. If you create additional user-defined configurations, then those become appended as a new row in the `simba.pose_configurations.bp_names.bp_names.csv` file.\r\n\r\n\r\n\r\n", + "created_at": "2022-03-10T16:29:34Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "3D data processing", + "body": "**Is your feature request related to a problem? Please describe.**\r\nI have 3D data reconstructed from multiple synced videos - DLC and anipose. I was wondering if there is a plan for adding 3D support to SimBA and if not, could you estimate how hard it would be to modify the existing code to run it with 3D data?\r\n\r\n**Describe the solution you'd like**\r\nAbility to extract features, train model and analyze 3D reconstructed data\r\n\r\n**Describe alternatives you've considered**\r\nCouldn't find a soft that does that and I don't think I would have the skills to code all of this myself - underestimated the complexity I guess\r\n\r\n\r\nEDIT: My morning brain forgot to check previous issues. I see the question has been asked before. If you're thinking about working on it I would be happy to try and contribute as much as I can if given some specific tasks :]", + "user": "KonradDanielewski", + "reaction_cnt": 0, + "created_at": "2022-01-19T09:19:01Z", + "updated_at": "2022-01-21T13:01:40Z", + "author": "KonradDanielewski", + "comments": [ + { + "body": "Hi Konrad!\r\n\r\nI’ve begun to write the code necessary to get 3D pose analyzed in SimBA, but I’ve worked with DANNCE and not Anipose… \r\n\r\nThe three main code bits that would have to be written are methods to (i) import 3D data, (ii) extract features from 3D data, (iii) and visualize 3D data. \r\n\r\nRudimentary code that do these three main bits are [HERE](https://github.com/sgoldenlab/simba/tree/master/simba/3D). They work for creating classifiers from 3D data, I just haven’t found time to get it into the SimBA GUI and accept parameters from SimBA project file, right now I don’t think I will be able to get it done any time soon unfortunately. \r\n\r\nMain thing you’d have to change beginning of import script to handle Anipose data (DANNCE data is mat format and I don’t know how Anipose data looks like). If you have an Anipose data file would you be willing to share it together with the video? This would make it possible for us to write the necessary code eventually when time allows. Sorry I can't be more help at the moment..\r\n\r\nOnce these things are running, there are also amendments to make in ancillary scripts (e.g., calculate movement distances, ROI scripts etc) to accommodate 3D data (e.g., `if pose_estimation == 3D: calculate euclid distances in 3D rather than 2D`.\r\n", + "created_at": "2022-01-19T15:33:51Z", + "author": "sgoldenlab" + }, + { + "body": "Hi,\r\n\r\nThank you for the response! I'd be happy to share some of the data. Anipose 3D-pose is stored in a csv file, x,y,z coords, n_cams that have prediction of the bp in a frame, error, score for those, (which I think is a mean or median of errors from all cameras), frame num and other values that I'm not sure about but I think they are used when reference point is given. Anyway, if you give me an email to share the data I'd make it available on my google drive. I'll take a look at the code you uploaded and see if I can make some progress there.\r\n\r\nThe data is a work in progress so the tracking or reconstruction have some issues (it's a weird model that I kinda lost track of and is now a little messy). I'm in the middle of creating a new model with a set of labels you propose for 2 animals and will now be using maDLC as I have 2 rats and it seems to help for the one that has a socket on it's head which often obstructs the view of snout or one of the ears. ", + "created_at": "2022-01-20T09:06:01Z", + "author": "KonradDanielewski" + }, + { + "body": "Thanks Konrad, that would be super helpful - all work is work in progress, I would just need to know the general layout to get it into SimBA, wonky pose doesn't matter. If you could share the video as well I can make sure the visualizations don't look off. You could send it to goldenneurolab@gmail.com thanks again", + "created_at": "2022-01-21T13:01:40Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 9.0.0", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 9.0.0.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

9.0.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/9.0.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

9.0.0 (2022-01-02)

\n
    \n
  • \n

    Restrict builtins for ImageMath.eval(). CVE-2022-22817 #5923\n[radarhere]

    \n
  • \n
  • \n

    Ensure JpegImagePlugin stops at the end of a truncated file #5921\n[radarhere]

    \n
  • \n
  • \n

    Fixed ImagePath.Path array handling. CVE-2022-22815, CVE-2022-22816 #5920\n[radarhere]

    \n
  • \n
  • \n

    Remove consecutive duplicate tiles that only differ by their offset #5919\n[radarhere]

    \n
  • \n
  • \n

    Improved I;16 operations on big endian #5901\n[radarhere]

    \n
  • \n
  • \n

    Limit quantized palette to number of colors #5879\n[radarhere]

    \n
  • \n
  • \n

    Fixed palette index for zeroed color in FASTOCTREE quantize #5869\n[radarhere]

    \n
  • \n
  • \n

    When saving RGBA to GIF, make use of first transparent palette entry #5859\n[radarhere]

    \n
  • \n
  • \n

    Pass SAMPLEFORMAT to libtiff #5848\n[radarhere]

    \n
  • \n
  • \n

    Added rounding when converting P and PA #5824\n[radarhere]

    \n
  • \n
  • \n

    Improved putdata() documentation and data handling #5910\n[radarhere]

    \n
  • \n
  • \n

    Exclude carriage return in PDF regex to help prevent ReDoS #5912\n[hugovk]

    \n
  • \n
  • \n

    Fixed freeing pointer in ImageDraw.Outline.transform #5909\n[radarhere]

    \n
  • \n
  • \n

    Added ImageShow support for xdg-open #5897\n[m-shinder, radarhere]

    \n
  • \n
  • \n

    Support 16-bit grayscale ImageQt conversion #5856\n[cmbruns, radarhere]

    \n
  • \n
  • \n

    Convert subsequent GIF frames to RGB or RGBA #5857\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=9.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-01-13T03:41:23Z", + "updated_at": "2022-03-12T00:55:46Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #169.", + "created_at": "2022-03-12T00:55:44Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 9.0.0 in /simba", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 9.0.0.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

9.0.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/9.0.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

9.0.0 (2022-01-02)

\n
    \n
  • \n

    Restrict builtins for ImageMath.eval(). CVE-2022-22817 #5923\n[radarhere]

    \n
  • \n
  • \n

    Ensure JpegImagePlugin stops at the end of a truncated file #5921\n[radarhere]

    \n
  • \n
  • \n

    Fixed ImagePath.Path array handling. CVE-2022-22815, CVE-2022-22816 #5920\n[radarhere]

    \n
  • \n
  • \n

    Remove consecutive duplicate tiles that only differ by their offset #5919\n[radarhere]

    \n
  • \n
  • \n

    Improved I;16 operations on big endian #5901\n[radarhere]

    \n
  • \n
  • \n

    Limit quantized palette to number of colors #5879\n[radarhere]

    \n
  • \n
  • \n

    Fixed palette index for zeroed color in FASTOCTREE quantize #5869\n[radarhere]

    \n
  • \n
  • \n

    When saving RGBA to GIF, make use of first transparent palette entry #5859\n[radarhere]

    \n
  • \n
  • \n

    Pass SAMPLEFORMAT to libtiff #5848\n[radarhere]

    \n
  • \n
  • \n

    Added rounding when converting P and PA #5824\n[radarhere]

    \n
  • \n
  • \n

    Improved putdata() documentation and data handling #5910\n[radarhere]

    \n
  • \n
  • \n

    Exclude carriage return in PDF regex to help prevent ReDoS #5912\n[hugovk]

    \n
  • \n
  • \n

    Fixed freeing pointer in ImageDraw.Outline.transform #5909\n[radarhere]

    \n
  • \n
  • \n

    Added ImageShow support for xdg-open #5897\n[m-shinder, radarhere]

    \n
  • \n
  • \n

    Support 16-bit grayscale ImageQt conversion #5856\n[cmbruns, radarhere]

    \n
  • \n
  • \n

    Convert subsequent GIF frames to RGB or RGBA #5857\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=9.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2022-01-13T02:04:12Z", + "updated_at": "2022-03-12T00:11:07Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #168.", + "created_at": "2022-03-12T00:11:05Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Issue importing APT tracking data", + "body": "Hello, I am trying to use simBA with videos that have been previously tracking using APT, however when I attempt to load the TRK files, I get an error about the dimensions being out of bounds. Have you seen anything like this before, or know what could be going on?\r\n\r\nI am running the freshest install (I think 0.89) on a windows machine.\r\n\r\nThanks,\r\nTim\r\n![Simba Error](https://user-images.githubusercontent.com/88455390/148262544-afcce7b2-4687-4bd2-9b9b-1ff637057e36.png)\r\n", + "user": "holfordt", + "reaction_cnt": 0, + "created_at": "2022-01-05T17:34:13Z", + "updated_at": "2022-01-06T18:40:19Z", + "author": "holfordt", + "comments": [ + { + "body": "Hello @holfordt ! I've only seen a few trk files, and I am thinking it is possible that your TRK files are organized slightly differently (possibly because of a different apt versions?) and that is what is causing the error. Could you share a trk file that SimBA is having problems with and I can check if what is going on and insert a fix if needed? ", + "created_at": "2022-01-05T20:00:33Z", + "author": "sronilsson" + }, + { + "body": "Sounds great! Here are a couple from different videos..\r\n\r\n[To SimBA.zip](https://github.com/sgoldenlab/simba/files/7817578/To.SimBA.zip)\r\n", + "created_at": "2022-01-05T20:22:56Z", + "author": "holfordt" + }, + { + "body": "Cheers! The data from APT I've seen and read about is in the `numpoints x numdimensions x numframes x numtargets` format, meaning if you track three animals, with 8 body-parts each, with a single camera, for 12000 frames you have an array that is 8x2x12000x3 - https://kristinbranson.github.io/APT/appendices.html#Trk%20file%20contents\r\n\r\nSimBA however sees your data shape as 14556x2x11. If I had to guess it is 14556 frames, one camera, 11 body-parts, and a single animal. I've actually seen this issue before (the different order, beginning with the frames dimension) and SimBA already has a fix to re-organize the data into the appropriate format. What we don't have a fix for, however, is the last dimension - your data is NxNxN but the APT data I've seen is NxNxNxN - thats why you get the error of the missing axis.\r\n\r\n.. anyhow I will insert the fix - I just want to confirm that it is a single animal you are tracking so I don't head down the wrong path? ", + "created_at": "2022-01-05T22:38:59Z", + "author": "sronilsson" + }, + { + "body": "Oh interesting. That makes sense, and you are correct about the structure of our data. 11 body parts, 2 dimensions, 14556 frames and one animal. I noticed when I selected the APT style that is said multi animal tracking, but I didnt think it would work if I tried to load it in a different format.", + "created_at": "2022-01-05T23:25:42Z", + "author": "holfordt" + }, + { + "body": "Hello @holfordt - can you try an update SimBA with `pip install simba-uw-tf-dev --upgrade`, it is version 0.89.1, and try again to import the apt files? \r\n", + "created_at": "2022-01-06T14:59:51Z", + "author": "sronilsson" + }, + { + "body": "Hi, yes it looks like it has imported them now. Thank you! I will try to go through the rest of the process and let you know if I have any other issues. Thanks again.", + "created_at": "2022-01-06T18:40:19Z", + "author": "holfordt" + } + ] + }, + { + "title": "SimBA stalls while processing a file for movement outliers", + "body": "**Describe the bug**\r\nI am new to SimBA and am trying to use it to classify behaviors in a fish species. Before running on my full dataset, I am troubleshooting SimBA on a single video. I have been following the ReadTheDocs instructions (https://simba-docs.readthedocs.io/en/latest/docs/getting_started/simba_tutorial.html#part-1-create-a-new-project) and am on Part 2, Step 4. I am trying to correct for outliers. However, it has been 6 hours since I started the outlier correction process and the main SimBA window is still displaying the following message:\r\n```\r\nPose-estimation body part setting for outlier correction: 9\r\nApplying settings for classical tracking...\r\nProcessing 1 files for movement outliers...\r\n```\r\n\r\nPressing \"Run outlier correction\" generated the following error in the terminal:\r\n```\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/tkinter/__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"/home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/simba/SimBA.py\", line 6092, in correct_outlier\r\n dev_move_user_defined(configini)\r\n File \"/home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/simba/outlier_scripts/movement/correct_devs_mov_user_defined.py\", line 120, in dev_move_user_defined\r\n csv_df.columns = colHeads\r\n File \"/home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/pandas/core/generic.py\", line 5192, in __setattr__\r\n return object.__setattr__(self, name, value)\r\n File \"pandas/_libs/properties.pyx\", line 67, in pandas._libs.properties.AxisProperty.__set__\r\n File \"/home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/pandas/core/generic.py\", line 690, in _set_axis\r\n self._data.set_axis(axis, labels)\r\n File \"/home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/pandas/core/internals/managers.py\", line 183, in set_axis\r\n \"values have {new} elements\".format(old=old_len, new=new_len)\r\nValueError: Length mismatch: Expected axis has 25 elements, new values have 27 elements\r\n```\r\n\r\n**To Reproduce**\r\nThe video and DeepLabCut data are attached as Google Drive files at the bottom of this post. To prep the video, I just cropped it a bit.\r\nTo get this error, I followed the steps from the SimBA ReadTheDocs, SimBA Pipeline section. Below are the specific parameters I used for each step which required user input:\r\n\r\nIn Part 1, Steps 1.4-1.5, I specified 9 behaviors:\r\n1. Suspension\r\n2. Bottom Skimming\r\n3. Drifting\r\n4. Fast Turning\r\n5. Flit Swimming\r\n6. Pushing Against Tank Wall\r\n7. Slow Turning\r\n8. Sustained Swimming\r\n9. Other\r\n\r\nIn Part 1, Step 1.6, I specified classic tracking\r\n\r\nFor Part 1, Step 1.7, I specified 1 animal, 9 body parts\r\n\r\nIn Part 2, Step 3.1, I set Distance_in_mm to 225 mm (the length of the top of my fish's tank)\r\n\r\nIn Part 2, Step 3.6, I drew a line across top of tank\r\n\r\nIn Part 2, Step 4.2, I used nose and tail as reference body parts\r\n\r\nIn Part 2, Step 4.3, I listed 120mm for both Movement criterion and Location criterion. I know these are both probably bad parameters. I used them because the maximum size of the fish is 120mm (though mine are probably closer to 50mm). I will probably adjust them once I can see the results of outlier correction.\r\n\r\n**Expected behavior**\r\nReadTheDocs indicates I should expect to find 2 log files in the ```/project_folder/logs/``` directory upon successful completion, and presumably a message stating that the program is looking for location outliers, followed by some sort of \"done\" message\r\n\r\n**Desktop**\r\nUbuntu 18.04\r\nPython 3.6.10\r\nI am using Anaconda\r\n \r\n\r\n**Additional context**\r\nHere is a Google Drive Folder with the video and DeepLabCut tracking data I am using in this process. Please don't hesitate to message me if there are sharing issues:\r\nhttps://drive.google.com/drive/folders/1JB3nE14e3-8AcZE0DsZ0F1wXpwmMqM_J?usp=sharing\r\n", + "user": "AnnabelPerry", + "reaction_cnt": 0, + "created_at": "2022-01-02T02:11:10Z", + "updated_at": "2022-04-04T19:09:28Z", + "author": "AnnabelPerry", + "comments": [ + { + "body": "Hi @AnnabelPerry - thanks for sharing the files on the gdrive. I will take a look what might be going on but can't get to it straighaway. It seems the error is caused by the outlier correction functions of SimBA thinking you have fewer body-parts than the 9 that you actually have. I would suggest skipping the outlier correction and proceed without it to see if the rest of the pipeline works OK. ", + "created_at": "2022-01-05T20:06:47Z", + "author": "sronilsson" + }, + { + "body": "Hi, I'm still having a problem with this. I skipped outlier correction and moved straight to extracting features. When extracting, I get the following message:\r\n```\r\nPose-estimation body part setting for feature extraction: 9\r\n0\r\nExtracting features from 0 files...\r\nExtracting features from 0 file(s)...\r\nAll feature extraction complete.\r\n```\r\nI'm not entirely clear on what feature extraction is supposed to do, but it doesn't seem right that SimBA is registering 0 files.\r\nWhen I proceed to behavior labeling and click ```Select video (create new video annotation)```, I get the following error:\r\n```\r\nNumber of Frames: 38981\r\n/home/blackmonlab/Documents/Annabel/NeuralNetworkProject/SimBATestProject/project_folder/csv/features_extracted/Surface 1 41486_cropped.csv @@@@@@@@@@@@@\r\nThe CSV file could not be located at the following path: /home/blackmonlab/Documents/Annabel/NeuralNetworkProject/SimBATestProject/project_folder/csv/features_extracted/Surface 1 41486_cropped.csv . It may be that you missed a step in the analysis. Please generate the file before proceeding.\r\nNone\r\nApplying settings for classical tracking...\r\n```\r\nThere seems to be an issue with extracting features - could this be related to the issue with outlier correction, or should I open a second issue?\r\n", + "created_at": "2022-02-16T04:05:42Z", + "author": "AnnabelPerry" + }, + { + "body": "Hi @AnnabelPerry - yes you are right it is related to the outlier extraction, let's try to get passed it. \r\n\r\n(i) SimBA takes the files inside of the the `project_folder/csv/outlier_corrected_movement_location` folder and extracts features from these files. A `feature` in your case, could be the angular rotation of the fish, the velocity of the fish body-parts etc. The message `Extracting features from 0 files..` suggests that there are 0 files inside of this folder in your project, so we need to get this folder populated so we have files to extract features from. \r\n\r\n(ii) The `Pose-estimation body part setting for feature extraction: 9` message suggests that you are sing one of SimBAs built in pose-estimation configurations. I don't think this is accurate, as you are classifying behaviors of fish, and there is no built in configuration for fish as the moment. That message should be reading `user-defined` rather than `9`.\r\n\r\n(iii) The core issue probably comes from point ii, where SimBA thinks you have 9 in-built body-parts but you don't. When you click `skip outlier correction` SimBA tries to apply the in-built body-part names to your CSV files but fails as your pose-estimation files contains more/less body-parts. Could you try to use a user-defined body-part configuration and see if that fixes it?", + "created_at": "2022-02-16T12:20:31Z", + "author": "sgoldenlab" + }, + { + "body": "Hi, thanks for your rapid response! I generated a new project and manually-defined body parts using a screenshot from one of the videos. I went through the steps I described in the original post and ended up with this message after clicking \"extract features\". It still says ```Extracting features from 0 files...```\r\n```\r\nPose-estimation body part setting for feature extraction: user_defined\r\n0\r\nApplying settings for classical tracking...\r\nExtracting features from 0 files...\r\nAll feature extraction complete.\r\n```\r\nWhen I attempt to label behaviors, I still get this message:\r\n```\r\nThe CSV file could not be located at the following path: /home/blackmonlab/Documents/Annabel/NeuralNetworkProject/SimBATestProject/SurfaceProject/project_folder/csv/features_extracted/Surface 1 41486_cropped.csv . It may be that you missed a step in the analysis. Please generate the file before proceeding.\r\n```", + "created_at": "2022-02-16T22:01:11Z", + "author": "AnnabelPerry" + }, + { + "body": "Hi @AnnabelPerry - just to confirm: when yo skipped outlier correctio, this you indicate this by pressing this button?\r\n![Capture](https://user-images.githubusercontent.com/50497030/154482887-eb0a121b-3de5-4e83-8499-9e8ab6cf3e28.JPG)\r\n\r\n", + "created_at": "2022-02-17T12:34:55Z", + "author": "sgoldenlab" + }, + { + "body": "Yes, I did skip outlier correction", + "created_at": "2022-02-17T13:47:47Z", + "author": "AnnabelPerry" + }, + { + "body": "My bad, I had not pressed the button on this most recent outlier correction. I did this and the issue appears to have been fixed. Thank you!", + "created_at": "2022-02-19T01:41:00Z", + "author": "AnnabelPerry" + } + ] + }, + { + "title": "Using simba to make pairwise classifier in 3 (or more) animal cohorts", + "body": "Hello\r\nWe are finally getting simba to work on our 6animal SLEAP labeled test datasets.\r\n\r\nIt would be really useful to also know which pair of animals is interacting during a predicted behavior. I attach an example of \"chase\" behavior (more like approach behavior). Here pup3 approached the female.\r\n\r\nIs this information available somewhere in the predicted file? I.e. is there a probability curve for all possible pairs of interactions, or does simba discard that information.\r\n\r\nFor us, such information would be critical!\r\n\r\nThanks so much,\r\n\r\n![Screenshot from 2021-12-24 13-49-22](https://user-images.githubusercontent.com/4267452/147354211-01d6c2eb-60b9-40ba-8369-58994e19d3de.png)\r\n", + "user": "catubc", + "reaction_cnt": 0, + "created_at": "2021-12-24T12:57:41Z", + "updated_at": "2021-12-24T17:21:06Z", + "author": "catubc", + "comments": [ + { + "body": "On this note, I also wanted to confirm our application. \r\n\r\nThere are 6 animals and we train on pair-wise behavior (e.g. 2 of 6 animals engaging in some sort of social interaction). But the pair could be any possible combination of 6 animals.\r\n\r\nDoes the classifier apply the training data to all possible pairs? It's not completely clear how it can generalize pair-wise behaviors to datasets that have > 2 animals. Perhaps there's a different method to achieve our pair-wise social-interaction goals.", + "created_at": "2021-12-24T13:06:26Z", + "author": "catubc" + }, + { + "body": "@catubc - Unfortunately I don't have anything in SimBA to support it, I'm sorry! With six animals and two roles (approach vs approached) there are a lot of permutations making it difficult to solve post-hoc with heuristic rules as well... It makes 15 permutations of animal combinations, 30 permutations if you want the roles I think.. The most immediate solution that comes to mind is:\r\n\r\n1. The classifier should only take in features values relevant to two animals at any one time (SimBA currently takes all), run the classifier for each of the animal combinations.\r\n2. To find the roles: when classifier says behavior present, look at the feature values and find animal that have bodyparts which show movement, and body-parts with fastest decreasing distance to the original location of the other animals body-parts at the start of the approach bout (i.e., compare animal 1 movement against the location of animal 2 in the beginning of the bout as a static, and then the inverse). This should cover for situations where both animals are approaching each other as well. ", + "created_at": "2021-12-24T14:25:14Z", + "author": "sronilsson" + }, + { + "body": "Ok, thanks for this.\r\n\r\nIdentifying the 2 parties in a social interaction I guess is not so difficult (I could write some heuristic along the lines you mentioned). But training 15 classifiers per social behavior is a bit too onerous (that would make it ~100 classifiers for several behaviors). \r\n\r\nIs there a potentially simpler solution, something along these lines:\r\n\r\n3. Select a social behavior and select two animals that exhibit many examples of social interaction. Feed just those features into simba and train a classifier. \r\n\r\n4. Then write a function that applies the classifier to all pair-wise animals for the prediction step.\r\n\r\nIt's not clear to me how bad this would do?! There appear to be some feature engineering in simba (though perhaps not as much as in jaaba):\r\n- euclidean distance \r\n- body movement\r\n- all_bp movements\r\n- sum eucldiean distance\r\n- mean euclidean distance\r\n\r\nAnd of these features perhaps only bp movements matter because of the animal sizes (in our case adults vs. pups). But inter-feature distance will not be relevant to most basic social behaviors such as approach, chase, etc.\r\n\r\nIf you think this might work, I'd be happy to try and write an extension for simba that:\r\n- Optionally: asks user to select 2 animals pre training. (Though a faster option might be to ask end user to select this pre-simba; but we could also put this function really early on in simba so that the selected 2 animals propagate through all routines).\r\n- Multi-animal-prediction: asks user to load a pairwise classifier and a feature file and applies classifier to all pairwise animals in the feature file.\r\n\r\nLet me know what you think.\r\n\r\n[Edit:] One additional step that could help for gross social behaviors would be to collapse all features to body centroids and feed single-feature animal locations. This would avoid the bp feature confusion.\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2021-12-24T15:41:23Z", + "author": "catubc" + }, + { + "body": "Yes you are correct, I did not suggest 15 classifiers, it would be enough with one classifiers run 15 times for each frame. If runtime for this is an issue (I don't know how slow it would be, it might be acceptable), converting the classifier to pure python with something like [pure predict](https://github.com/Ibotta/pure-predict) should be part of the solution before running. \r\n\r\nPick out all features that have the keyword 'Animal_1' and 'Animal_2' in it and run classifier on those features. Then pick out all features with `Animal_1` and `Animal_3` and run classifier on those features and so on for all permutations. The classifier would have to be created using labeled datasets from two animals, it does not matter which animals. So it is a two animal classifier run 15 times. ", + "created_at": "2021-12-24T16:03:56Z", + "author": "sronilsson" + }, + { + "body": "Lol, sorry I misunderstood.\r\nI sent you an email @UW email if you have a moment to guide me on this extension.\r\nThanks so much", + "created_at": "2021-12-24T16:23:48Z", + "author": "catubc" + }, + { + "body": "I see UW email is not working. \r\nIf you have 20mins to zoom monday/tuesday, would be very helpful to settle on:\r\n\r\n- protocol (e.g. do we write a simba GUI to select initial pair; or just ask enduser to do so prior to loading <- this is probably ok to start)\r\n- optimal location to implement the classifier loop (not clear where this would go).\r\n- any other processing tricks/suggestions (I don't know pure predict; I usually write multiprocessing functions directly using [parmap](https://pypi.org/project/parmap/); we might need to turn off intrinsic multiprocessing in sklearn for multiprocessing to work though).\r\n", + "created_at": "2021-12-24T16:25:59Z", + "author": "catubc" + }, + { + "body": "Sounds good! I'm currently free around midday Mon or Tues EST. I'm in GMT at the moment so earlier the better. \r\n\r\nWe can have a checkbook in the \"Classifier settings\" menu, if applied, then the classifier is run on all two-animal combinations. Checkbox is greyed out if there is two or less animals in project. \r\n\r\nThe first point seems the trickiest to implement but it sounds like your idea should work.\r\n\r\nMy email is sronilsson@gmail.com", + "created_at": "2021-12-24T17:01:32Z", + "author": "sronilsson" + }, + { + "body": "Ok, sent you an invite Monday 9AMGMT (I'm in Zurich so GMT+1). Feel free to suggest another time.", + "created_at": "2021-12-24T17:21:06Z", + "author": "catubc" + } + ] + }, + { + "title": "Docs link broken", + "body": "Hello\r\nThe link to the behavior annotation GUI seems to be broken.\r\n\r\n![Screenshot from 2021-12-24 11-51-09](https://user-images.githubusercontent.com/4267452/147346695-9de2307f-1955-40f9-8865-5a1dd698c317.png)\r\n\r\n\r\nI think the correct link is this:\r\n\r\nhttps://simba-docs.readthedocs.io/en/latest/docs/tutorials/b_annotation.html?highlight=annotation#simba-behavioral-annotator-gui\r\n", + "user": "catubc", + "reaction_cnt": 0, + "created_at": "2021-12-24T10:52:26Z", + "updated_at": "2021-12-24T12:49:34Z", + "author": "catubc", + "comments": [ + { + "body": "Thanks - if you can, use [THIS DOCUMENTATION](https://github.com/sgoldenlab/simba/blob/master/docs/tutorial.md#step-6-label-behavior) - I don't really know where the readthedocs.io comes from, I have not written it and I can't maintain. If you find any errors in the tutorial markdowns on the github repo though then I will fix it. \r\n\r\n", + "created_at": "2021-12-24T12:33:15Z", + "author": "sronilsson" + } + ] + }, + { + "title": "simba command fails after installation", + "body": "I am trying to install simba on Ubuntu 18.04\r\nI used the following commands to install simba:\r\n```\r\nconda create -n simbaenv python=3.6.10\r\nconda activate simbaenv\r\npip install simba-uw-tf-dev\r\npip uninstall shapely\r\nconda install -c conda-forge shapely\r\nsudo apt install simba\r\n```\r\nWhen I initially ran ```pip install simba-uw-tf-dev```, I got the error at the bottom of this post (it's very long, so I did not include it within text). I assumed this was the forewarned shapely error and continued.\r\n\r\nWhen I ran ````sudo apt install simba````, I successfully encountered a purple GUI asking me to name and assign a password to a mysql server. However, after entering the requested information, I reached the following error message:\r\n```\r\nERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2 \"No such file or directory\") \r\n```\r\nI clicked \"abort\", deactivated my conda environment, and attempted install mysql:\r\n```sudo apt-get install mysql-server```\r\nThis reopened the purple GUI and the password was requested\r\nAfter entering the password, I got the following error:\r\n```\r\nERROR 2002 (HY000): Can't connect to local MySQL server through socket '/var/run/mysqld/mysqld.sock' (2 \"No such file or directory\") \r\n```\r\nI reentered conda environment and tried to run simba, getting the following error:\r\n```Error in configuration filePermission denied```\r\n\r\nSo, I instead ran ```sudo simba``` and got following error:\r\n```Access denied for user 'mirrors'@'localhost' (using password: YES)```\r\n\r\nSo I tried deactivating the environment, removing it, then running back through the environment creation and installation steps listed at the top of this post. I still get the ```filePermission denied``` error when I run ```simba``` and ```sudo apt install simba``` yields the following message:\r\n```\r\nReading package lists... Done\r\nBuilding dependency tree \r\nReading state information... Done\r\nsimba is already the newest version (0.8.4-4.3).\r\nThe following packages were automatically installed and are no longer required:\r\n libconfig-inifiles-perl libdbd-mysql-perl libjemalloc1 libreadline5 libterm-readkey-perl linux-hwe-5.4-headers-5.4.0-71 linux-hwe-5.4-headers-5.4.0-72 linux-hwe-5.4-headers-5.4.0-73\r\n linux-hwe-5.4-headers-5.4.0-74 linux-hwe-5.4-headers-5.4.0-77 linux-hwe-5.4-headers-5.4.0-80 linux-hwe-5.4-headers-5.4.0-81 linux-hwe-5.4-headers-5.4.0-84 linux-hwe-5.4-headers-5.4.0-86\r\n linux-hwe-5.4-headers-5.4.0-87 linux-hwe-5.4-headers-5.4.0-89 mariadb-common nvidia-container-runtime shim\r\nUse 'sudo apt autoremove' to remove them.\r\n0 upgraded, 0 newly installed, 0 to remove and 18 not upgraded.\r\n\r\n```\r\nI'm not entirely sure where to go from here. Help would be greatly appreciated\r\n\r\nOriginal install error:\r\n```\r\nCollecting simba-uw-tf-dev\r\n Using cached Simba_UW_tf_dev-0.88.8-py3-none-any.whl (10.5 MB)\r\nRequirement already satisfied: scipy==1.1.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.1.0)\r\nRequirement already satisfied: yellowbrick==0.9.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.9.1)\r\nRequirement already satisfied: graphviz==0.11 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.11)\r\nRequirement already satisfied: numba==0.48.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.48.0)\r\nRequirement already satisfied: numpy==1.18.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.18.1)\r\nRequirement already satisfied: Pillow==5.4.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (5.4.1)\r\nRequirement already satisfied: dash-colorscales==0.0.4 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.0.4)\r\nCollecting wxpython==4.0.4\r\n Using cached wxPython-4.0.4.tar.gz (68.8 MB)\r\n Preparing metadata (setup.py) ... done\r\nRequirement already satisfied: numexpr==2.6.9 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (2.6.9)\r\nRequirement already satisfied: tables==3.6.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (3.6.1)\r\nRequirement already satisfied: imgaug==0.4.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.4.0)\r\nRequirement already satisfied: opencv-python==3.4.5.20 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (3.4.5.20)\r\nRequirement already satisfied: pyarrow==0.17.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.17.1)\r\nRequirement already satisfied: dtreeviz==0.8.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.8.1)\r\nRequirement already satisfied: statsmodels==0.9.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.9.0)\r\nRequirement already satisfied: tqdm==4.30.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (4.30.0)\r\nRequirement already satisfied: tabulate==0.8.3 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.8.3)\r\nRequirement already satisfied: scikit-image==0.14.2 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.14.2)\r\nRequirement already satisfied: imutils==0.5.2 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.5.2)\r\nRequirement already satisfied: dash-core-components==1.10.2 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.10.2)\r\nCollecting shapely==1.7\r\n Using cached Shapely-1.7.0-cp36-cp36m-manylinux1_x86_64.whl (1.8 MB)\r\nRequirement already satisfied: h5py==2.9.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (2.9.0)\r\nRequirement already satisfied: eli5==0.10.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.10.1)\r\nRequirement already satisfied: plotly==4.9.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (4.9.0)\r\nRequirement already satisfied: pandas==0.25.3 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.25.3)\r\nRequirement already satisfied: shap==0.35.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.35.0)\r\nRequirement already satisfied: dash-color-picker==0.0.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.0.1)\r\nRequirement already satisfied: matplotlib==3.0.3 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (3.0.3)\r\nRequirement already satisfied: cefpython3==66.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (66.0)\r\nRequirement already satisfied: imblearn==0.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.0)\r\nRequirement already satisfied: xgboost==0.90 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.90)\r\nRequirement already satisfied: dash==1.14.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.14.0)\r\nRequirement already satisfied: pyyaml==5.3.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (5.3.1)\r\nRequirement already satisfied: seaborn==0.9.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.9.0)\r\nRequirement already satisfied: scikit-learn==0.22.2 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.22.2)\r\nRequirement already satisfied: dash-html-components==1.0.3 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.0.3)\r\nRequirement already satisfied: future in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from dash==1.14.0->simba-uw-tf-dev) (0.18.2)\r\nRequirement already satisfied: Flask>=1.0.2 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from dash==1.14.0->simba-uw-tf-dev) (2.0.2)\r\nRequirement already satisfied: flask-compress in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from dash==1.14.0->simba-uw-tf-dev) (1.10.1)\r\nRequirement already satisfied: dash-table==4.9.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from dash==1.14.0->simba-uw-tf-dev) (4.9.0)\r\nRequirement already satisfied: dash-renderer==1.6.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from dash==1.14.0->simba-uw-tf-dev) (1.6.0)\r\nRequirement already satisfied: colour in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from dtreeviz==0.8.1->simba-uw-tf-dev) (0.1.5)\r\nRequirement already satisfied: attrs>16.0.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from eli5==0.10.1->simba-uw-tf-dev) (21.2.0)\r\nRequirement already satisfied: six in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from eli5==0.10.1->simba-uw-tf-dev) (1.11.0)\r\nRequirement already satisfied: jinja2 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from eli5==0.10.1->simba-uw-tf-dev) (3.0.3)\r\nRequirement already satisfied: imbalanced-learn in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from imblearn==0.0->simba-uw-tf-dev) (0.6.2)\r\nRequirement already satisfied: imageio in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from imgaug==0.4.0->simba-uw-tf-dev) (2.3.0)\r\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /home/blackmonlab/.local/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf-dev) (2.4.7)\r\nRequirement already satisfied: python-dateutil>=2.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf-dev) (2.7.3)\r\nRequirement already satisfied: kiwisolver>=1.0.1 in /home/blackmonlab/.local/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf-dev) (1.3.1)\r\nRequirement already satisfied: cycler>=0.10 in /home/blackmonlab/.local/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf-dev) (0.10.0)\r\nRequirement already satisfied: llvmlite<0.32.0,>=0.31.0dev0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from numba==0.48.0->simba-uw-tf-dev) (0.31.0)\r\nRequirement already satisfied: setuptools in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from numba==0.48.0->simba-uw-tf-dev) (58.0.4)\r\nRequirement already satisfied: pytz>=2017.2 in /home/blackmonlab/.local/lib/python3.6/site-packages (from pandas==0.25.3->simba-uw-tf-dev) (2021.1)\r\nRequirement already satisfied: retrying>=1.3.3 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from plotly==4.9.0->simba-uw-tf-dev) (1.3.3)\r\nRequirement already satisfied: networkx>=1.8 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf-dev) (2.5.1)\r\nRequirement already satisfied: PyWavelets>=0.4.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf-dev) (1.1.1)\r\nRequirement already satisfied: dask[array]>=1.0.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf-dev) (2021.3.0)\r\nRequirement already satisfied: cloudpickle>=0.2.1 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf-dev) (2.0.0)\r\nRequirement already satisfied: joblib>=0.11 in /home/blackmonlab/.local/lib/python3.6/site-packages (from scikit-learn==0.22.2->simba-uw-tf-dev) (1.0.1)\r\nRequirement already satisfied: patsy in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from statsmodels==0.9.0->simba-uw-tf-dev) (0.5.2)\r\nRequirement already satisfied: toolz>=0.8.2 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from dask[array]>=1.0.0->scikit-image==0.14.2->simba-uw-tf-dev) (0.11.2)\r\nRequirement already satisfied: Werkzeug>=2.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (2.0.2)\r\nRequirement already satisfied: itsdangerous>=2.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (2.0.1)\r\nRequirement already satisfied: click>=7.1.2 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (8.0.3)\r\nRequirement already satisfied: MarkupSafe>=2.0 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from jinja2->eli5==0.10.1->simba-uw-tf-dev) (2.0.1)\r\nRequirement already satisfied: decorator<5,>=4.3 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from networkx>=1.8->scikit-image==0.14.2->simba-uw-tf-dev) (4.4.2)\r\nRequirement already satisfied: brotli in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from flask-compress->dash==1.14.0->simba-uw-tf-dev) (1.0.9)\r\nRequirement already satisfied: importlib-metadata in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from click>=7.1.2->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (4.8.3)\r\nRequirement already satisfied: dataclasses in /home/blackmonlab/.local/lib/python3.6/site-packages (from Werkzeug>=2.0->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (0.8)\r\nRequirement already satisfied: zipp>=0.5 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from importlib-metadata->click>=7.1.2->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (3.6.0)\r\nRequirement already satisfied: typing-extensions>=3.6.4 in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from importlib-metadata->click>=7.1.2->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (3.7.4.3)\r\nBuilding wheels for collected packages: wxpython\r\n Building wheel for wxpython (setup.py) ... error\r\n ERROR: Command errored out with exit status 1:\r\n command: /home/blackmonlab/anaconda3/envs/simbaenv/bin/python -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' bdist_wheel -d /tmp/pip-wheel-if_jd4n3\r\n cwd: /tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/\r\n Complete output (525 lines):\r\n /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/setuptools/dist.py:720: UserWarning: Usage of dash-separated 'license-file' will not be supported in future versions. Please use the underscore name 'license_file' instead\r\n % (opt, underscore_opt)\r\n /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/setuptools/dist.py:294: DistDeprecationWarning: use_2to3 is ignored.\r\n warnings.warn(f\"{attr} is ignored.\", DistDeprecationWarning)\r\n running bdist_wheel\r\n running build\r\n WARNING: Building this way assumes that all generated files have been\r\n generated already. If that is not the case then use build.py directly\r\n to generate the source and perform the build stage. You can use\r\n --skip-build with the bdist_* or install commands to avoid this\r\n message and the wxWidgets and Phoenix build steps in the future.\r\n \r\n \"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\" -u build.py build\r\n Will build using: \"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\"\r\n 3.6.10 |Anaconda, Inc.| (default, May 8 2020, 02:54:21)\r\n [GCC 7.3.0]\r\n Python's architecture is 64bit\r\n cfg.VERSION: 4.0.4\r\n \r\n Running command: build\r\n Running command: build_wx\r\n wxWidgets build options: ['--wxpython', '--unicode', '--gtk3']\r\n Configure options: ['--enable-unicode', '--with-gtk=3', '--with-opengl', '--enable-sound', '--enable-graphics_ctx', '--enable-mediactrl', '--enable-display', '--enable-geometry', '--enable-debug_flag', '--enable-optimise', '--disable-debugreport', '--enable-uiactionsim', '--enable-autoidman', '--with-sdl']\r\n /tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/ext/wxWidgets/configure --enable-unicode --with-gtk=3 --with-opengl --enable-sound --enable-graphics_ctx --enable-mediactrl --enable-display --enable-geometry --enable-debug_flag --enable-optimise --disable-debugreport --enable-uiactionsim --enable-autoidman --with-sdl\r\n checking build system type... x86_64-pc-linux-gnu\r\n checking host system type... x86_64-pc-linux-gnu\r\n checking for --disable-gui... no\r\n checking for --enable-monolithic... no\r\n checking for --enable-plugins... no\r\n checking for --without-subdirs... no\r\n checking for --enable-official_build... no\r\n checking for --disable-all-features... no\r\n checking for --enable-universal... no\r\n checking for --enable-nanox... no\r\n checking for --enable-gpe... no\r\n checking for toolkit... gtk\r\n checking for --with-libpng... yes\r\n checking for --with-libjpeg... yes\r\n checking for --with-libtiff... yes\r\n checking for --without-libjbig... no\r\n checking for --without-liblzma... no\r\n checking for --with-libxpm... yes\r\n checking for --with-libiconv... yes\r\n checking for --with-libmspack... no\r\n checking for --without-gtkprint... no\r\n checking for --with-gnomevfs... no\r\n checking for --with-libnotify... yes\r\n checking for --with-hildon... no\r\n checking for --with-opengl... yes\r\n checking for --with-dmalloc... no\r\n checking for --with-sdl... yes\r\n checking for --with-regex... yes\r\n checking for --with-zlib... yes\r\n checking for --with-expat... yes\r\n checking for --with-macosx-sdk...\r\n checking for --with-macosx-version-min...\r\n checking for --enable-debug... default\r\n checking for --disable-debug_flag... no\r\n checking for --enable-debug_info... no\r\n checking for --enable-debug_gdb... no\r\n checking for --enable-debug_cntxt... no\r\n checking for --enable-mem_tracing... no\r\n checking for --disable-shared... no\r\n checking for --enable-stl... no\r\n checking for --enable-std_containers... no\r\n checking for --enable-std_iostreams... yes\r\n checking for --enable-std_string... yes\r\n checking for --enable-std_string_conv_in_wxstring... no\r\n checking for --disable-unicode... no\r\n checking for --enable-mslu... no\r\n checking for --enable-utf8... no\r\n checking for --enable-utf8only... no\r\n checking for --enable-extended_rtti... no\r\n checking for --disable-optimise... no\r\n checking for --enable-profile... no\r\n checking for --enable-no_rtti... no\r\n checking for --enable-no_exceptions... no\r\n checking for --enable-permissive... no\r\n checking for --enable-no_deps... no\r\n checking for --disable-vararg_macros... no\r\n checking for --enable-universal_binary... no\r\n checking for --enable-macosx_arch... no\r\n checking for --enable-compat26... no\r\n checking for --disable-compat28... no\r\n checking for --disable-rpath... no\r\n checking for --enable-objc_uniquifying... no\r\n checking for --disable-visibility... no\r\n checking for --disable-tls... no\r\n checking for --enable-intl... yes\r\n checking for --enable-xlocale... yes\r\n checking for --enable-config... yes\r\n checking for --enable-protocols... yes\r\n checking for --enable-ftp... yes\r\n checking for --enable-http... yes\r\n checking for --enable-fileproto... yes\r\n checking for --enable-sockets... yes\r\n checking for --enable-ipv6... no\r\n checking for --enable-ole... yes\r\n checking for --enable-dataobj... yes\r\n checking for --enable-ipc... yes\r\n checking for --enable-baseevtloop... yes\r\n checking for --enable-epollloop... yes\r\n checking for --enable-selectloop... yes\r\n checking for --enable-any... yes\r\n checking for --enable-apple_ieee... yes\r\n checking for --enable-arcstream... yes\r\n checking for --enable-base64... yes\r\n checking for --enable-backtrace... yes\r\n checking for --enable-catch_segvs... yes\r\n checking for --enable-cmdline... yes\r\n checking for --enable-datetime... yes\r\n checking for --enable-debugreport... no\r\n checking for --enable-dialupman... yes\r\n checking for --enable-dynlib... yes\r\n checking for --enable-dynamicloader... yes\r\n checking for --enable-exceptions... yes\r\n checking for --enable-ffile... yes\r\n checking for --enable-file... yes\r\n checking for --enable-filehistory... yes\r\n checking for --enable-filesystem... yes\r\n checking for --enable-fontenum... yes\r\n checking for --enable-fontmap... yes\r\n checking for --enable-fs_archive... yes\r\n checking for --enable-fs_inet... yes\r\n checking for --enable-fs_zip... yes\r\n checking for --enable-fsvolume... yes\r\n checking for --enable-fswatcher... yes\r\n checking for --enable-geometry... yes\r\n checking for --enable-log... yes\r\n checking for --enable-longlong... yes\r\n checking for --enable-mimetype... yes\r\n checking for --enable-printfposparam... yes\r\n checking for --enable-snglinst... yes\r\n checking for --enable-sound... yes\r\n checking for --enable-stdpaths... yes\r\n checking for --enable-stopwatch... yes\r\n checking for --enable-streams... yes\r\n checking for --enable-sysoptions... yes\r\n checking for --enable-tarstream... yes\r\n checking for --enable-textbuf... yes\r\n checking for --enable-textfile... yes\r\n checking for --enable-timer... yes\r\n checking for --enable-variant... yes\r\n checking for --enable-zipstream... yes\r\n checking for --enable-url... yes\r\n checking for --enable-protocol... yes\r\n checking for --enable-protocol_http... yes\r\n checking for --enable-protocol_ftp... yes\r\n checking for --enable-protocol_file... yes\r\n checking for --enable-threads... yes\r\n checking for --enable-iniconf... no\r\n checking for --enable-regkey... yes\r\n checking for --enable-docview... yes\r\n checking for --enable-help... yes\r\n checking for --enable-mshtmlhelp... yes\r\n checking for --enable-html... yes\r\n checking for --enable-htmlhelp... yes\r\n checking for --enable-xrc... yes\r\n checking for --enable-aui... yes\r\n checking for --enable-propgrid... yes\r\n checking for --enable-ribbon... yes\r\n checking for --enable-stc... yes\r\n checking for --enable-constraints... yes\r\n checking for --enable-loggui... yes\r\n checking for --enable-logwin... yes\r\n checking for --enable-logdialog... yes\r\n checking for --enable-mdi... yes\r\n checking for --enable-mdidoc... yes\r\n checking for --enable-mediactrl... yes\r\n checking for --enable-gstreamer8... no\r\n checking for --enable-richtext... yes\r\n checking for --enable-postscript... yes\r\n checking for --enable-printarch... yes\r\n checking for --enable-svg... yes\r\n checking for --enable-webkit... yes\r\n checking for --enable-webview... yes\r\n checking for --enable-graphics_ctx... yes\r\n checking for --enable-clipboard... yes\r\n checking for --enable-dnd... yes\r\n checking for --disable-controls... no\r\n checking for --enable-markup... yes\r\n checking for --enable-accel... yes\r\n checking for --enable-animatectrl... yes\r\n checking for --enable-bannerwindow... yes\r\n checking for --enable-artstd... yes\r\n checking for --enable-arttango... auto\r\n checking for --enable-bmpbutton... yes\r\n checking for --enable-bmpcombobox... yes\r\n checking for --enable-button... yes\r\n checking for --enable-calendar... yes\r\n checking for --enable-caret... yes\r\n checking for --enable-checkbox... yes\r\n checking for --enable-checklst... yes\r\n checking for --enable-choice... yes\r\n checking for --enable-choicebook... yes\r\n checking for --enable-collpane... yes\r\n checking for --enable-colourpicker... yes\r\n checking for --enable-combobox... yes\r\n checking for --enable-comboctrl... yes\r\n checking for --enable-commandlinkbutton... yes\r\n checking for --enable-dataviewctrl... yes\r\n checking for --enable-datepick... yes\r\n checking for --enable-detect_sm... yes\r\n checking for --enable-dirpicker... yes\r\n checking for --enable-display... yes\r\n checking for --enable-editablebox... yes\r\n checking for --enable-filectrl... yes\r\n checking for --enable-filepicker... yes\r\n checking for --enable-fontpicker... yes\r\n checking for --enable-gauge... yes\r\n checking for --enable-grid... yes\r\n checking for --enable-headerctrl... yes\r\n checking for --enable-hyperlink... yes\r\n checking for --enable-imaglist... yes\r\n checking for --enable-infobar... yes\r\n checking for --enable-listbook... yes\r\n checking for --enable-listbox... yes\r\n checking for --enable-listctrl... yes\r\n checking for --enable-notebook... yes\r\n checking for --enable-notifmsg... yes\r\n checking for --enable-odcombobox... yes\r\n checking for --enable-popupwin... yes\r\n checking for --enable-prefseditor... yes\r\n checking for --enable-radiobox... yes\r\n checking for --enable-radiobtn... yes\r\n checking for --enable-richmsgdlg... yes\r\n checking for --enable-richtooltip... yes\r\n checking for --enable-rearrangectrl... yes\r\n checking for --enable-sash... yes\r\n checking for --enable-scrollbar... yes\r\n checking for --enable-searchctrl... yes\r\n checking for --enable-slider... yes\r\n checking for --enable-spinbtn... yes\r\n checking for --enable-spinctrl... yes\r\n checking for --enable-splitter... yes\r\n checking for --enable-statbmp... yes\r\n checking for --enable-statbox... yes\r\n checking for --enable-statline... yes\r\n checking for --enable-stattext... yes\r\n checking for --enable-statusbar... yes\r\n checking for --enable-taskbaricon... yes\r\n checking for --enable-tbarnative... yes\r\n checking for --enable-textctrl... yes\r\n checking for --enable-timepick... yes\r\n checking for --enable-tipwindow... yes\r\n checking for --enable-togglebtn... yes\r\n checking for --enable-toolbar... yes\r\n checking for --enable-toolbook... yes\r\n checking for --enable-treebook... yes\r\n checking for --enable-treectrl... yes\r\n checking for --enable-treelist... yes\r\n checking for --enable-commondlg... yes\r\n checking for --enable-aboutdlg... yes\r\n checking for --enable-choicedlg... yes\r\n checking for --enable-coldlg... yes\r\n checking for --enable-filedlg... yes\r\n checking for --enable-finddlg... yes\r\n checking for --enable-fontdlg... yes\r\n checking for --enable-dirdlg... yes\r\n checking for --enable-msgdlg... yes\r\n checking for --enable-numberdlg... yes\r\n checking for --enable-splash... yes\r\n checking for --enable-textdlg... yes\r\n checking for --enable-tipdlg... yes\r\n checking for --enable-progressdlg... yes\r\n checking for --enable-wizarddlg... yes\r\n checking for --enable-menus... yes\r\n checking for --enable-miniframe... yes\r\n checking for --enable-tooltips... yes\r\n checking for --enable-splines... yes\r\n checking for --enable-mousewheel... yes\r\n checking for --enable-validators... yes\r\n checking for --enable-busyinfo... yes\r\n checking for --enable-hotkey... auto\r\n checking for --enable-joystick... yes\r\n checking for --enable-metafile... auto\r\n checking for --enable-dragimage... yes\r\n checking for --enable-accessibility... no\r\n checking for --enable-uiactionsim... yes\r\n checking for --enable-dctransform... yes\r\n checking for --enable-webviewwebkit... yes\r\n checking for --enable-palette... yes\r\n checking for --enable-image... yes\r\n checking for --enable-gif... yes\r\n checking for --enable-pcx... yes\r\n checking for --enable-tga... yes\r\n checking for --enable-iff... yes\r\n checking for --enable-pnm... yes\r\n checking for --enable-xpm... yes\r\n checking for --enable-ico_cur... yes\r\n checking for --enable-dccache... yes\r\n checking for --enable-ps-in-msw... yes\r\n checking for --enable-ownerdrawn... yes\r\n checking for --enable-uxtheme... yes\r\n checking for --enable-wxdib... yes\r\n checking for --enable-webviewie... yes\r\n checking for --enable-autoidman... yes\r\n checking for gcc... gcc\r\n checking whether the C compiler works... yes\r\n checking for C compiler default output file name... a.out\r\n checking for suffix of executables...\r\n checking whether we are cross compiling... no\r\n checking for suffix of object files... o\r\n checking whether we are using the GNU C compiler... yes\r\n checking whether gcc accepts -g... yes\r\n checking for gcc option to accept ISO C89... none needed\r\n checking whether we are using the Intel C compiler... no\r\n checking how to run the C preprocessor... gcc -E\r\n checking for grep that handles long lines and -e... /bin/grep\r\n checking for egrep... /bin/grep -E\r\n checking whether gcc needs -traditional... no\r\n checking for g++... g++\r\n checking whether we are using the GNU C++ compiler... yes\r\n checking whether g++ accepts -g... yes\r\n checking whether we are using the Intel C++ compiler... no\r\n checking whether we are using the Sun C++ compiler... no\r\n checking for ar... ar\r\n checking for ANSI C header files... yes\r\n checking for sys/types.h... yes\r\n checking for sys/stat.h... yes\r\n checking for stdlib.h... yes\r\n checking for string.h... yes\r\n checking for memory.h... yes\r\n checking for strings.h... yes\r\n checking for inttypes.h... yes\r\n checking for stdint.h... yes\r\n checking for unistd.h... yes\r\n checking for langinfo.h... yes\r\n checking for wchar.h... yes\r\n checking for sys/select.h... yes\r\n checking for cxxabi.h... yes\r\n checking for an ANSI C-conforming const... yes\r\n checking for inline... inline\r\n checking size of short... 2\r\n checking size of void *... 8\r\n checking size of int... 4\r\n checking size of long... 8\r\n checking size of size_t... 8\r\n checking size of long long... 8\r\n checking size of wchar_t... 4\r\n checking for va_copy... yes\r\n checking whether the compiler supports variadic macros... yes\r\n checking for _FILE_OFFSET_BITS value needed for large files... 64\r\n checking if large file support is available... yes\r\n checking for _LARGEFILE_SOURCE value needed for large files... no\r\n checking whether byte ordering is bigendian... no\r\n checking for iostream... yes\r\n checking if C++ compiler supports the explicit keyword... yes\r\n checking for std::wstring in ... yes\r\n checking for std::istream... yes\r\n checking for std::ostream... yes\r\n checking how to run the C++ preprocessor... g++ -E\r\n checking type_traits usability... yes\r\n checking type_traits presence... yes\r\n checking for type_traits... yes\r\n checking for __sync_fetch_and_add and __sync_sub_and_fetch builtins... yes\r\n checking for libraries directories... /usr/lib/x86_64-linux-gnu /usr/lib\r\n checking for cos... no\r\n checking for floor... no\r\n checking if floating point functions link without -lm... no\r\n checking for sin... yes\r\n checking for ceil... yes\r\n checking if floating point functions link with -lm... yes\r\n checking for strtoull... yes\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking pkg-config is at least version 0.9.0... yes\r\n configure: WARNING: Defaulting to the builtin regex library for Unicode build.\r\n checking for zlib.h >= 1.1.4... yes\r\n checking for zlib.h... (cached) yes\r\n checking for deflate in -lz... yes\r\n checking for png.h > 0.90... yes\r\n checking for png.h... (cached) yes\r\n checking for png_sig_cmp in -lpng... yes\r\n checking for jpeglib.h... yes\r\n checking for jpeg_read_header in -ljpeg... yes\r\n checking for tiffio.h... yes\r\n checking for TIFFError in -ltiff... yes\r\n checking for expat.h... yes\r\n checking if expat.h is valid C++ header... yes\r\n checking for XML_ParserCreate in -lexpat... yes\r\n checking for GTK+ version...\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking for GTK+ - version >= 3.0.0... yes (version 3.22.30)\r\n checking for X11/Xlib.h... yes\r\n checking for X11/XKBlib.h... yes\r\n checking for Xxf86vm... yes\r\n checking for X11/extensions/xf86vmode.h... yes\r\n checking for SM... yes\r\n checking for OpenGL headers... found in /usr/include\r\n checking for GL/gl.h... yes\r\n checking for GL/glu.h... yes\r\n checking for GL... yes\r\n checking for GLU... yes\r\n checking if the linker accepts --version-script... yes\r\n checking for symbols visibility support... yes\r\n checking for broken libstdc++ visibility... no\r\n checking for mode_t... yes\r\n checking for off_t... yes\r\n checking for pid_t... yes\r\n checking for size_t... yes\r\n checking for ssize_t... yes\r\n checking if size_t is unsigned int... no\r\n checking if size_t is unsigned long... yes\r\n checking if wchar_t is separate type... yes\r\n checking for pw_gecos in struct passwd... yes\r\n checking for wcslen... yes\r\n checking for wcsftime... yes\r\n checking for strnlen... yes\r\n checking for wcsdup... yes\r\n checking for wcsnlen... yes\r\n checking for wcscasecmp... yes\r\n checking for wcsncasecmp... yes\r\n checking for mbstate_t... yes\r\n checking for wcsrtombs... yes\r\n checking for snprintf... yes\r\n checking for vsnprintf... yes\r\n checking for vsscanf... yes\r\n checking for vsnprintf declaration... yes\r\n checking if vsnprintf declaration is broken... no\r\n checking for snprintf declaration... yes\r\n checking if snprintf supports positional arguments... yes\r\n checking for vsscanf declaration... yes\r\n checking if vsscanf() declaration is broken... no\r\n checking for putws... no\r\n checking for fputws... yes\r\n checking for wprintf... yes\r\n checking for vswprintf... yes\r\n checking for vswscanf... yes\r\n checking for _vsnwprintf... no\r\n checking for fsync... yes\r\n checking for round... yes\r\n checking for iconv... yes\r\n checking if iconv needs const... no\r\n checking for sigaction... yes\r\n checking for sa_handler type... int\r\n checking for backtrace() in ... checking for library containing backtrace... none required\r\n yes\r\n checking for __cxa_demangle() in ... yes\r\n checking for mkstemp... yes\r\n checking for statfs... yes\r\n checking for statfs declaration... yes\r\n checking for fcntl... yes\r\n checking for setenv... yes\r\n checking for unsetenv... yes\r\n checking for nanosleep... yes\r\n checking for uname... yes\r\n checking for strtok_r... yes\r\n checking for inet_addr... yes\r\n checking for inet_aton... yes\r\n checking for fdopen... yes\r\n checking for sysconf... yes\r\n checking for getpwuid_r... yes\r\n checking for getgrgid_r... yes\r\n checking whether pthreads work with -pthread... yes\r\n checking if more special flags are required for pthreads... no\r\n checking for pthread_setconcurrency... yes\r\n checking for pthread_cleanup_push/pop... yes\r\n checking for sched.h... yes\r\n checking for sched_yield... yes\r\n checking for pthread_attr_getschedpolicy... yes\r\n checking for pthread_attr_setschedparam... yes\r\n checking for sched_get_priority_max... yes\r\n checking for pthread_cancel... yes\r\n checking for pthread_mutex_timedlock... yes\r\n checking for pthread_attr_setstacksize... yes\r\n checking for pthread_mutexattr_t... yes\r\n checking for pthread_mutexattr_settype declaration... yes\r\n checking for abi::__forced_unwind() in ... yes\r\n checking for localtime_r... yes\r\n checking for gmtime_r... yes\r\n checking how many arguments gethostbyname_r() takes... six\r\n checking how many arguments getservbyname_r() takes... six\r\n checking for dlopen... no\r\n checking for dlopen in -ldl... yes\r\n checking for dlerror... no\r\n checking for dlerror in -ldl... yes\r\n checking for sys/inotify.h... yes\r\n checking for SNDCTL_DSP_SPEED in sys/soundcard.h... yes\r\n checking for SDL... configure: SDL 2.0 not available. Falling back to 1.2.\r\n checking for sdl-config... no\r\n checking for SDL - version >= 1.2.0... no\r\n *** The sdl-config script installed by SDL could not be found\r\n *** If SDL was installed in PREFIX, make sure PREFIX/bin is in\r\n *** your path, or set the SDL_CONFIG environment variable to the\r\n *** full path to sdl-config.\r\n checking for GTKPRINT... yes\r\n checking for LIBNOTIFY... checking for LIBNOTIFY... configure: WARNING: libnotify not found, wxNotificationMessage will use generic implementation.\r\n checking for complete xlocale... no\r\n checking for sys/epoll.h... yes\r\n checking for gettimeofday... yes\r\n checking whether gettimeofday takes two arguments... yes\r\n checking for timezone variable in ... timezone\r\n checking for localtime... yes\r\n checking for tm_gmtoff in struct tm... yes\r\n checking for setpriority... yes\r\n checking for socket... yes\r\n checking what is the type of the third argument of getsockname... socklen_t\r\n checking what is the type of the fifth argument of getsockopt... socklen_t\r\n checking for linux/joystick.h... yes\r\n checking for python... /home/blackmonlab/anaconda3/envs/simbaenv/bin/python\r\n checking for WEBKIT... configure: WARNING: webkit2gtk not found, falling back to webkitgtk\r\n checking for WEBKIT... configure: WARNING: webkitgtk not found.\r\n configure: WARNING: WebKit not available, disabling wxWebView\r\n checking for CAIRO... yes\r\n checking for cairo_push_group... yes\r\n checking for GST... configure: WARNING: GStreamer 1.0 not available, falling back to 0.10\r\n checking for GST... configure: WARNING: GStreamer 0.10 not available, falling back to 0.8\r\n configure: WARNING: wxMediaCtrl can't be built because GStreamer not available\r\n configure: error: wxMediaCtrl was explicitly requested but can't be built.\r\n \r\n Fix the problems reported above or don't use --enable-mediactrl configure option.\r\n \r\n Error running configure\r\n ERROR: failed building wxWidgets\r\n Traceback (most recent call last):\r\n File \"build.py\", line 1321, in cmd_build_wx\r\n wxbuild.main(wxDir(), build_options)\r\n File \"/tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/buildtools/build_wxwidgets.py\", line 375, in main\r\n \"Error running configure\")\r\n File \"/tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/buildtools/build_wxwidgets.py\", line 85, in exitIfError\r\n raise builder.BuildError(msg)\r\n buildtools.builder.BuildError: Error running configure\r\n Finished command: build_wx (0m7.750s)\r\n Finished command: build (0m7.751s)\r\n Command '\"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\" -u build.py build' failed with exit code 1.\r\n ----------------------------------------\r\n ERROR: Failed building wheel for wxpython\r\n Running setup.py clean for wxpython\r\nFailed to build wxpython\r\nInstalling collected packages: shapely, wxpython, simba-uw-tf-dev\r\n Running setup.py install for wxpython ... error\r\n ERROR: Command errored out with exit status 1:\r\n command: /home/blackmonlab/anaconda3/envs/simbaenv/bin/python -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record /tmp/pip-record-0yd87w4f/install-record.txt --single-version-externally-managed --compile --install-headers /home/blackmonlab/anaconda3/envs/simbaenv/include/python3.6m/wxpython\r\n cwd: /tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/\r\n Complete output (525 lines):\r\n /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/setuptools/dist.py:720: UserWarning: Usage of dash-separated 'license-file' will not be supported in future versions. Please use the underscore name 'license_file' instead\r\n % (opt, underscore_opt)\r\n /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/setuptools/dist.py:294: DistDeprecationWarning: use_2to3 is ignored.\r\n warnings.warn(f\"{attr} is ignored.\", DistDeprecationWarning)\r\n running install\r\n running build\r\n WARNING: Building this way assumes that all generated files have been\r\n generated already. If that is not the case then use build.py directly\r\n to generate the source and perform the build stage. You can use\r\n --skip-build with the bdist_* or install commands to avoid this\r\n message and the wxWidgets and Phoenix build steps in the future.\r\n \r\n \"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\" -u build.py build\r\n Will build using: \"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\"\r\n 3.6.10 |Anaconda, Inc.| (default, May 8 2020, 02:54:21)\r\n [GCC 7.3.0]\r\n Python's architecture is 64bit\r\n cfg.VERSION: 4.0.4\r\n \r\n Running command: build\r\n Running command: build_wx\r\n wxWidgets build options: ['--wxpython', '--unicode', '--gtk3']\r\n Configure options: ['--enable-unicode', '--with-gtk=3', '--with-opengl', '--enable-sound', '--enable-graphics_ctx', '--enable-mediactrl', '--enable-display', '--enable-geometry', '--enable-debug_flag', '--enable-optimise', '--disable-debugreport', '--enable-uiactionsim', '--enable-autoidman', '--with-sdl']\r\n /tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/ext/wxWidgets/configure --enable-unicode --with-gtk=3 --with-opengl --enable-sound --enable-graphics_ctx --enable-mediactrl --enable-display --enable-geometry --enable-debug_flag --enable-optimise --disable-debugreport --enable-uiactionsim --enable-autoidman --with-sdl\r\n checking build system type... x86_64-pc-linux-gnu\r\n checking host system type... x86_64-pc-linux-gnu\r\n checking for --disable-gui... no\r\n checking for --enable-monolithic... no\r\n checking for --enable-plugins... no\r\n checking for --without-subdirs... no\r\n checking for --enable-official_build... no\r\n checking for --disable-all-features... no\r\n checking for --enable-universal... no\r\n checking for --enable-nanox... no\r\n checking for --enable-gpe... no\r\n checking for toolkit... gtk\r\n checking for --with-libpng... yes\r\n checking for --with-libjpeg... yes\r\n checking for --with-libtiff... yes\r\n checking for --without-libjbig... no\r\n checking for --without-liblzma... no\r\n checking for --with-libxpm... yes\r\n checking for --with-libiconv... yes\r\n checking for --with-libmspack... no\r\n checking for --without-gtkprint... no\r\n checking for --with-gnomevfs... no\r\n checking for --with-libnotify... yes\r\n checking for --with-hildon... no\r\n checking for --with-opengl... yes\r\n checking for --with-dmalloc... no\r\n checking for --with-sdl... yes\r\n checking for --with-regex... yes\r\n checking for --with-zlib... yes\r\n checking for --with-expat... yes\r\n checking for --with-macosx-sdk...\r\n checking for --with-macosx-version-min...\r\n checking for --enable-debug... default\r\n checking for --disable-debug_flag... no\r\n checking for --enable-debug_info... no\r\n checking for --enable-debug_gdb... no\r\n checking for --enable-debug_cntxt... no\r\n checking for --enable-mem_tracing... no\r\n checking for --disable-shared... no\r\n checking for --enable-stl... no\r\n checking for --enable-std_containers... no\r\n checking for --enable-std_iostreams... yes\r\n checking for --enable-std_string... yes\r\n checking for --enable-std_string_conv_in_wxstring... no\r\n checking for --disable-unicode... no\r\n checking for --enable-mslu... no\r\n checking for --enable-utf8... no\r\n checking for --enable-utf8only... no\r\n checking for --enable-extended_rtti... no\r\n checking for --disable-optimise... no\r\n checking for --enable-profile... no\r\n checking for --enable-no_rtti... no\r\n checking for --enable-no_exceptions... no\r\n checking for --enable-permissive... no\r\n checking for --enable-no_deps... no\r\n checking for --disable-vararg_macros... no\r\n checking for --enable-universal_binary... no\r\n checking for --enable-macosx_arch... no\r\n checking for --enable-compat26... no\r\n checking for --disable-compat28... no\r\n checking for --disable-rpath... no\r\n checking for --enable-objc_uniquifying... no\r\n checking for --disable-visibility... no\r\n checking for --disable-tls... no\r\n checking for --enable-intl... yes\r\n checking for --enable-xlocale... yes\r\n checking for --enable-config... yes\r\n checking for --enable-protocols... yes\r\n checking for --enable-ftp... yes\r\n checking for --enable-http... yes\r\n checking for --enable-fileproto... yes\r\n checking for --enable-sockets... yes\r\n checking for --enable-ipv6... no\r\n checking for --enable-ole... yes\r\n checking for --enable-dataobj... yes\r\n checking for --enable-ipc... yes\r\n checking for --enable-baseevtloop... yes\r\n checking for --enable-epollloop... yes\r\n checking for --enable-selectloop... yes\r\n checking for --enable-any... yes\r\n checking for --enable-apple_ieee... yes\r\n checking for --enable-arcstream... yes\r\n checking for --enable-base64... yes\r\n checking for --enable-backtrace... yes\r\n checking for --enable-catch_segvs... yes\r\n checking for --enable-cmdline... yes\r\n checking for --enable-datetime... yes\r\n checking for --enable-debugreport... no\r\n checking for --enable-dialupman... yes\r\n checking for --enable-dynlib... yes\r\n checking for --enable-dynamicloader... yes\r\n checking for --enable-exceptions... yes\r\n checking for --enable-ffile... yes\r\n checking for --enable-file... yes\r\n checking for --enable-filehistory... yes\r\n checking for --enable-filesystem... yes\r\n checking for --enable-fontenum... yes\r\n checking for --enable-fontmap... yes\r\n checking for --enable-fs_archive... yes\r\n checking for --enable-fs_inet... yes\r\n checking for --enable-fs_zip... yes\r\n checking for --enable-fsvolume... yes\r\n checking for --enable-fswatcher... yes\r\n checking for --enable-geometry... yes\r\n checking for --enable-log... yes\r\n checking for --enable-longlong... yes\r\n checking for --enable-mimetype... yes\r\n checking for --enable-printfposparam... yes\r\n checking for --enable-snglinst... yes\r\n checking for --enable-sound... yes\r\n checking for --enable-stdpaths... yes\r\n checking for --enable-stopwatch... yes\r\n checking for --enable-streams... yes\r\n checking for --enable-sysoptions... yes\r\n checking for --enable-tarstream... yes\r\n checking for --enable-textbuf... yes\r\n checking for --enable-textfile... yes\r\n checking for --enable-timer... yes\r\n checking for --enable-variant... yes\r\n checking for --enable-zipstream... yes\r\n checking for --enable-url... yes\r\n checking for --enable-protocol... yes\r\n checking for --enable-protocol_http... yes\r\n checking for --enable-protocol_ftp... yes\r\n checking for --enable-protocol_file... yes\r\n checking for --enable-threads... yes\r\n checking for --enable-iniconf... no\r\n checking for --enable-regkey... yes\r\n checking for --enable-docview... yes\r\n checking for --enable-help... yes\r\n checking for --enable-mshtmlhelp... yes\r\n checking for --enable-html... yes\r\n checking for --enable-htmlhelp... yes\r\n checking for --enable-xrc... yes\r\n checking for --enable-aui... yes\r\n checking for --enable-propgrid... yes\r\n checking for --enable-ribbon... yes\r\n checking for --enable-stc... yes\r\n checking for --enable-constraints... yes\r\n checking for --enable-loggui... yes\r\n checking for --enable-logwin... yes\r\n checking for --enable-logdialog... yes\r\n checking for --enable-mdi... yes\r\n checking for --enable-mdidoc... yes\r\n checking for --enable-mediactrl... yes\r\n checking for --enable-gstreamer8... no\r\n checking for --enable-richtext... yes\r\n checking for --enable-postscript... yes\r\n checking for --enable-printarch... yes\r\n checking for --enable-svg... yes\r\n checking for --enable-webkit... yes\r\n checking for --enable-webview... yes\r\n checking for --enable-graphics_ctx... yes\r\n checking for --enable-clipboard... yes\r\n checking for --enable-dnd... yes\r\n checking for --disable-controls... no\r\n checking for --enable-markup... yes\r\n checking for --enable-accel... yes\r\n checking for --enable-animatectrl... yes\r\n checking for --enable-bannerwindow... yes\r\n checking for --enable-artstd... yes\r\n checking for --enable-arttango... auto\r\n checking for --enable-bmpbutton... yes\r\n checking for --enable-bmpcombobox... yes\r\n checking for --enable-button... yes\r\n checking for --enable-calendar... yes\r\n checking for --enable-caret... yes\r\n checking for --enable-checkbox... yes\r\n checking for --enable-checklst... yes\r\n checking for --enable-choice... yes\r\n checking for --enable-choicebook... yes\r\n checking for --enable-collpane... yes\r\n checking for --enable-colourpicker... yes\r\n checking for --enable-combobox... yes\r\n checking for --enable-comboctrl... yes\r\n checking for --enable-commandlinkbutton... yes\r\n checking for --enable-dataviewctrl... yes\r\n checking for --enable-datepick... yes\r\n checking for --enable-detect_sm... yes\r\n checking for --enable-dirpicker... yes\r\n checking for --enable-display... yes\r\n checking for --enable-editablebox... yes\r\n checking for --enable-filectrl... yes\r\n checking for --enable-filepicker... yes\r\n checking for --enable-fontpicker... yes\r\n checking for --enable-gauge... yes\r\n checking for --enable-grid... yes\r\n checking for --enable-headerctrl... yes\r\n checking for --enable-hyperlink... yes\r\n checking for --enable-imaglist... yes\r\n checking for --enable-infobar... yes\r\n checking for --enable-listbook... yes\r\n checking for --enable-listbox... yes\r\n checking for --enable-listctrl... yes\r\n checking for --enable-notebook... yes\r\n checking for --enable-notifmsg... yes\r\n checking for --enable-odcombobox... yes\r\n checking for --enable-popupwin... yes\r\n checking for --enable-prefseditor... yes\r\n checking for --enable-radiobox... yes\r\n checking for --enable-radiobtn... yes\r\n checking for --enable-richmsgdlg... yes\r\n checking for --enable-richtooltip... yes\r\n checking for --enable-rearrangectrl... yes\r\n checking for --enable-sash... yes\r\n checking for --enable-scrollbar... yes\r\n checking for --enable-searchctrl... yes\r\n checking for --enable-slider... yes\r\n checking for --enable-spinbtn... yes\r\n checking for --enable-spinctrl... yes\r\n checking for --enable-splitter... yes\r\n checking for --enable-statbmp... yes\r\n checking for --enable-statbox... yes\r\n checking for --enable-statline... yes\r\n checking for --enable-stattext... yes\r\n checking for --enable-statusbar... yes\r\n checking for --enable-taskbaricon... yes\r\n checking for --enable-tbarnative... yes\r\n checking for --enable-textctrl... yes\r\n checking for --enable-timepick... yes\r\n checking for --enable-tipwindow... yes\r\n checking for --enable-togglebtn... yes\r\n checking for --enable-toolbar... yes\r\n checking for --enable-toolbook... yes\r\n checking for --enable-treebook... yes\r\n checking for --enable-treectrl... yes\r\n checking for --enable-treelist... yes\r\n checking for --enable-commondlg... yes\r\n checking for --enable-aboutdlg... yes\r\n checking for --enable-choicedlg... yes\r\n checking for --enable-coldlg... yes\r\n checking for --enable-filedlg... yes\r\n checking for --enable-finddlg... yes\r\n checking for --enable-fontdlg... yes\r\n checking for --enable-dirdlg... yes\r\n checking for --enable-msgdlg... yes\r\n checking for --enable-numberdlg... yes\r\n checking for --enable-splash... yes\r\n checking for --enable-textdlg... yes\r\n checking for --enable-tipdlg... yes\r\n checking for --enable-progressdlg... yes\r\n checking for --enable-wizarddlg... yes\r\n checking for --enable-menus... yes\r\n checking for --enable-miniframe... yes\r\n checking for --enable-tooltips... yes\r\n checking for --enable-splines... yes\r\n checking for --enable-mousewheel... yes\r\n checking for --enable-validators... yes\r\n checking for --enable-busyinfo... yes\r\n checking for --enable-hotkey... auto\r\n checking for --enable-joystick... yes\r\n checking for --enable-metafile... auto\r\n checking for --enable-dragimage... yes\r\n checking for --enable-accessibility... no\r\n checking for --enable-uiactionsim... yes\r\n checking for --enable-dctransform... yes\r\n checking for --enable-webviewwebkit... yes\r\n checking for --enable-palette... yes\r\n checking for --enable-image... yes\r\n checking for --enable-gif... yes\r\n checking for --enable-pcx... yes\r\n checking for --enable-tga... yes\r\n checking for --enable-iff... yes\r\n checking for --enable-pnm... yes\r\n checking for --enable-xpm... yes\r\n checking for --enable-ico_cur... yes\r\n checking for --enable-dccache... yes\r\n checking for --enable-ps-in-msw... yes\r\n checking for --enable-ownerdrawn... yes\r\n checking for --enable-uxtheme... yes\r\n checking for --enable-wxdib... yes\r\n checking for --enable-webviewie... yes\r\n checking for --enable-autoidman... yes\r\n checking for gcc... gcc\r\n checking whether the C compiler works... yes\r\n checking for C compiler default output file name... a.out\r\n checking for suffix of executables...\r\n checking whether we are cross compiling... no\r\n checking for suffix of object files... o\r\n checking whether we are using the GNU C compiler... yes\r\n checking whether gcc accepts -g... yes\r\n checking for gcc option to accept ISO C89... none needed\r\n checking whether we are using the Intel C compiler... no\r\n checking how to run the C preprocessor... gcc -E\r\n checking for grep that handles long lines and -e... /bin/grep\r\n checking for egrep... /bin/grep -E\r\n checking whether gcc needs -traditional... no\r\n checking for g++... g++\r\n checking whether we are using the GNU C++ compiler... yes\r\n checking whether g++ accepts -g... yes\r\n checking whether we are using the Intel C++ compiler... no\r\n checking whether we are using the Sun C++ compiler... no\r\n checking for ar... ar\r\n checking for ANSI C header files... yes\r\n checking for sys/types.h... yes\r\n checking for sys/stat.h... yes\r\n checking for stdlib.h... yes\r\n checking for string.h... yes\r\n checking for memory.h... yes\r\n checking for strings.h... yes\r\n checking for inttypes.h... yes\r\n checking for stdint.h... yes\r\n checking for unistd.h... yes\r\n checking for langinfo.h... yes\r\n checking for wchar.h... yes\r\n checking for sys/select.h... yes\r\n checking for cxxabi.h... yes\r\n checking for an ANSI C-conforming const... yes\r\n checking for inline... inline\r\n checking size of short... 2\r\n checking size of void *... 8\r\n checking size of int... 4\r\n checking size of long... 8\r\n checking size of size_t... 8\r\n checking size of long long... 8\r\n checking size of wchar_t... 4\r\n checking for va_copy... yes\r\n checking whether the compiler supports variadic macros... yes\r\n checking for _FILE_OFFSET_BITS value needed for large files... 64\r\n checking if large file support is available... yes\r\n checking for _LARGEFILE_SOURCE value needed for large files... no\r\n checking whether byte ordering is bigendian... no\r\n checking for iostream... yes\r\n checking if C++ compiler supports the explicit keyword... yes\r\n checking for std::wstring in ... yes\r\n checking for std::istream... yes\r\n checking for std::ostream... yes\r\n checking how to run the C++ preprocessor... g++ -E\r\n checking type_traits usability... yes\r\n checking type_traits presence... yes\r\n checking for type_traits... yes\r\n checking for __sync_fetch_and_add and __sync_sub_and_fetch builtins... yes\r\n checking for libraries directories... /usr/lib/x86_64-linux-gnu /usr/lib\r\n checking for cos... no\r\n checking for floor... no\r\n checking if floating point functions link without -lm... no\r\n checking for sin... yes\r\n checking for ceil... yes\r\n checking if floating point functions link with -lm... yes\r\n checking for strtoull... yes\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking pkg-config is at least version 0.9.0... yes\r\n configure: WARNING: Defaulting to the builtin regex library for Unicode build.\r\n checking for zlib.h >= 1.1.4... yes\r\n checking for zlib.h... (cached) yes\r\n checking for deflate in -lz... yes\r\n checking for png.h > 0.90... yes\r\n checking for png.h... (cached) yes\r\n checking for png_sig_cmp in -lpng... yes\r\n checking for jpeglib.h... yes\r\n checking for jpeg_read_header in -ljpeg... yes\r\n checking for tiffio.h... yes\r\n checking for TIFFError in -ltiff... yes\r\n checking for expat.h... yes\r\n checking if expat.h is valid C++ header... yes\r\n checking for XML_ParserCreate in -lexpat... yes\r\n checking for GTK+ version...\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking for GTK+ - version >= 3.0.0... yes (version 3.22.30)\r\n checking for X11/Xlib.h... yes\r\n checking for X11/XKBlib.h... yes\r\n checking for Xxf86vm... yes\r\n checking for X11/extensions/xf86vmode.h... yes\r\n checking for SM... yes\r\n checking for OpenGL headers... found in /usr/include\r\n checking for GL/gl.h... yes\r\n checking for GL/glu.h... yes\r\n checking for GL... yes\r\n checking for GLU... yes\r\n checking if the linker accepts --version-script... yes\r\n checking for symbols visibility support... yes\r\n checking for broken libstdc++ visibility... no\r\n checking for mode_t... yes\r\n checking for off_t... yes\r\n checking for pid_t... yes\r\n checking for size_t... yes\r\n checking for ssize_t... yes\r\n checking if size_t is unsigned int... no\r\n checking if size_t is unsigned long... yes\r\n checking if wchar_t is separate type... yes\r\n checking for pw_gecos in struct passwd... yes\r\n checking for wcslen... yes\r\n checking for wcsftime... yes\r\n checking for strnlen... yes\r\n checking for wcsdup... yes\r\n checking for wcsnlen... yes\r\n checking for wcscasecmp... yes\r\n checking for wcsncasecmp... yes\r\n checking for mbstate_t... yes\r\n checking for wcsrtombs... yes\r\n checking for snprintf... yes\r\n checking for vsnprintf... yes\r\n checking for vsscanf... yes\r\n checking for vsnprintf declaration... yes\r\n checking if vsnprintf declaration is broken... no\r\n checking for snprintf declaration... yes\r\n checking if snprintf supports positional arguments... yes\r\n checking for vsscanf declaration... yes\r\n checking if vsscanf() declaration is broken... no\r\n checking for putws... no\r\n checking for fputws... yes\r\n checking for wprintf... yes\r\n checking for vswprintf... yes\r\n checking for vswscanf... yes\r\n checking for _vsnwprintf... no\r\n checking for fsync... yes\r\n checking for round... yes\r\n checking for iconv... yes\r\n checking if iconv needs const... no\r\n checking for sigaction... yes\r\n checking for sa_handler type... int\r\n checking for backtrace() in ... checking for library containing backtrace... none required\r\n yes\r\n checking for __cxa_demangle() in ... yes\r\n checking for mkstemp... yes\r\n checking for statfs... yes\r\n checking for statfs declaration... yes\r\n checking for fcntl... yes\r\n checking for setenv... yes\r\n checking for unsetenv... yes\r\n checking for nanosleep... yes\r\n checking for uname... yes\r\n checking for strtok_r... yes\r\n checking for inet_addr... yes\r\n checking for inet_aton... yes\r\n checking for fdopen... yes\r\n checking for sysconf... yes\r\n checking for getpwuid_r... yes\r\n checking for getgrgid_r... yes\r\n checking whether pthreads work with -pthread... yes\r\n checking if more special flags are required for pthreads... no\r\n checking for pthread_setconcurrency... yes\r\n checking for pthread_cleanup_push/pop... yes\r\n checking for sched.h... yes\r\n checking for sched_yield... yes\r\n checking for pthread_attr_getschedpolicy... yes\r\n checking for pthread_attr_setschedparam... yes\r\n checking for sched_get_priority_max... yes\r\n checking for pthread_cancel... yes\r\n checking for pthread_mutex_timedlock... yes\r\n checking for pthread_attr_setstacksize... yes\r\n checking for pthread_mutexattr_t... yes\r\n checking for pthread_mutexattr_settype declaration... yes\r\n checking for abi::__forced_unwind() in ... yes\r\n checking for localtime_r... yes\r\n checking for gmtime_r... yes\r\n checking how many arguments gethostbyname_r() takes... six\r\n checking how many arguments getservbyname_r() takes... six\r\n checking for dlopen... no\r\n checking for dlopen in -ldl... yes\r\n checking for dlerror... no\r\n checking for dlerror in -ldl... yes\r\n checking for sys/inotify.h... yes\r\n checking for SNDCTL_DSP_SPEED in sys/soundcard.h... yes\r\n checking for SDL... configure: SDL 2.0 not available. Falling back to 1.2.\r\n checking for sdl-config... no\r\n checking for SDL - version >= 1.2.0... no\r\n *** The sdl-config script installed by SDL could not be found\r\n *** If SDL was installed in PREFIX, make sure PREFIX/bin is in\r\n *** your path, or set the SDL_CONFIG environment variable to the\r\n *** full path to sdl-config.\r\n checking for GTKPRINT... yes\r\n checking for LIBNOTIFY... checking for LIBNOTIFY... configure: WARNING: libnotify not found, wxNotificationMessage will use generic implementation.\r\n checking for complete xlocale... no\r\n checking for sys/epoll.h... yes\r\n checking for gettimeofday... yes\r\n checking whether gettimeofday takes two arguments... yes\r\n checking for timezone variable in ... timezone\r\n checking for localtime... yes\r\n checking for tm_gmtoff in struct tm... yes\r\n checking for setpriority... yes\r\n checking for socket... yes\r\n checking what is the type of the third argument of getsockname... socklen_t\r\n checking what is the type of the fifth argument of getsockopt... socklen_t\r\n checking for linux/joystick.h... yes\r\n checking for python... /home/blackmonlab/anaconda3/envs/simbaenv/bin/python\r\n checking for WEBKIT... configure: WARNING: webkit2gtk not found, falling back to webkitgtk\r\n checking for WEBKIT... configure: WARNING: webkitgtk not found.\r\n configure: WARNING: WebKit not available, disabling wxWebView\r\n checking for CAIRO... yes\r\n checking for cairo_push_group... yes\r\n checking for GST... configure: WARNING: GStreamer 1.0 not available, falling back to 0.10\r\n checking for GST... configure: WARNING: GStreamer 0.10 not available, falling back to 0.8\r\n configure: WARNING: wxMediaCtrl can't be built because GStreamer not available\r\n configure: error: wxMediaCtrl was explicitly requested but can't be built.\r\n \r\n Fix the problems reported above or don't use --enable-mediactrl configure option.\r\n \r\n Error running configure\r\n ERROR: failed building wxWidgets\r\n Traceback (most recent call last):\r\n File \"build.py\", line 1321, in cmd_build_wx\r\n wxbuild.main(wxDir(), build_options)\r\n File \"/tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/buildtools/build_wxwidgets.py\", line 375, in main\r\n \"Error running configure\")\r\n File \"/tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/buildtools/build_wxwidgets.py\", line 85, in exitIfError\r\n raise builder.BuildError(msg)\r\n buildtools.builder.BuildError: Error running configure\r\n Finished command: build_wx (0m7.773s)\r\n Finished command: build (0m7.773s)\r\n Command '\"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\" -u build.py build' failed with exit code 1.\r\n ----------------------------------------\r\nERROR: Command errored out with exit status 1: /home/blackmonlab/anaconda3/envs/simbaenv/bin/python -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-v9j3pe3f/wxpython_f456f9634ae548d3a4861fa0f1763df0/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record /tmp/pip-record-0yd87w4f/install-record.txt --single-version-externally-managed --compile --install-headers /home/blackmonlab/anaconda3/envs/simbaenv/include/python3.6m/wxpython Check the logs for full command output.\r\n\r\n```\r\n", + "user": "AnnabelPerry", + "reaction_cnt": 0, + "created_at": "2021-12-20T16:39:56Z", + "updated_at": "2021-12-24T11:06:10Z", + "author": "AnnabelPerry", + "comments": [ + { + "body": "Hi @AnnabelPerry! First, SimBA has no SQL dependencies or currently store data in SQL format, and not sure why it would try to connect to SQL server. If you see those error again: terminate your session, and make sure you are installing SimBA in a fresh conda environment. \r\n\r\nThe rest of the errors, however, seems very similar to the errors reported by another user this morning, who is also working in Linux or MacOS. https://github.com/sgoldenlab/simba/issues/154\r\n\r\nCan you try to install simba with `pip install simba-uw-tf-dev --no-deps` and see if that fixes it? It seems related to MacOS/Linus and the installation of wxpython. Also make sur you are running python 3.6.10 ", + "created_at": "2021-12-20T17:35:58Z", + "author": "sronilsson" + }, + { + "body": "Thank you for your rapid response!\r\nSo I'd actually tried re-installing SimBA with no dependencies a couple days ago. It installed properly, but I encountered an issue in the GUI where it would fail with an error when I tried to crop videos. I tried replicating the error just now, but it appears to have installed successfully this time. Here's what I did:\r\nI removed the old environment and reran these steps (with the no dependencies flag):\r\n```\r\nconda create -n simbaenv python=3.6.10 \r\n\r\nconda activate simbaenv \r\n\r\npip install simba-uw-tf-dev --no-deps\r\n\r\npip uninstall shapely \r\n\r\nconda install -c conda-forge shapely \r\n```\r\n\r\nI then ran the ```simba``` command, getting this error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/blackmonlab/anaconda3/envs/simbaenv/bin/simba\", line 5, in \r\n from simba.SimBA import main\r\n File \"/home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/simba/SimBA.py\", line 6, in \r\n import seaborn as sns\r\nModuleNotFoundError: No module named 'seaborn'\r\n```\r\n\r\nI ran ```pip install seaborn```, getting this error:\r\n```\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nsimba-uw-tf-dev 0.88.8 requires cefpython3==66.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dash==1.14.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dash-color-picker==0.0.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dash-colorscales==0.0.4, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dash-core-components==1.10.2, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dash-html-components==1.0.3, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dtreeviz==0.8.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires eli5==0.10.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires graphviz==0.11, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires h5py==2.9.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires imblearn==0.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires imgaug==0.4.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires imutils==0.5.2, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires numba==0.48.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires numexpr==2.6.9, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires opencv-python==3.4.5.20, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires plotly==4.9.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires pyarrow==0.17.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires pyyaml==5.3.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires scikit-image==0.14.2, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires scikit-learn==0.22.2, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires shap==0.35.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires statsmodels==0.9.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires tables==3.6.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires tabulate==0.8.3, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires tqdm==4.30.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires wxpython==4.0.4, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires xgboost==0.90, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires yellowbrick==0.9.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires matplotlib==3.0.3, but you have matplotlib 3.3.4 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires numpy==1.18.1, but you have numpy 1.19.5 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires pandas==0.25.3, but you have pandas 1.1.5 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires Pillow==5.4.1, but you have pillow 8.4.0 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires scipy==1.1.0, but you have scipy 1.5.4 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires seaborn==0.9.0, but you have seaborn 0.11.2 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires shapely==1.7, but you have shapely 1.7.1 which is incompatible.\r\n```\r\n\r\nI pip installed all the requested package versions. All except ```wxpython``` installed without error. ```pip install wxpython==4.0.4``` yielded the error at the bottom of the post (incredibly lengthy, so I included it outside the flow of the text so you could see the rest of my message)\r\n\r\nI then ran the ```simba``` command, successfully entering the GUI. I have yet to encounter any issues preprocessing videos, but if the ```wxpython``` error becomes a problem I'll open a new issue! Thanks again for the help!\r\n\r\n```pip install wxpython==4.0.4``` error:\r\n```\r\nCollecting wxpython==4.0.4\r\n Using cached wxPython-4.0.4.tar.gz (68.8 MB)\r\nRequirement already satisfied: six in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from wxpython==4.0.4) (1.16.0)\r\nRequirement already satisfied: Pillow in /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages (from wxpython==4.0.4) (5.4.1)\r\nBuilding wheels for collected packages: wxpython\r\n Building wheel for wxpython (setup.py) ... error\r\n ERROR: Command errored out with exit status 1:\r\n command: /home/blackmonlab/anaconda3/envs/simbaenv/bin/python -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' bdist_wheel -d /tmp/pip-wheel-ee4j772p\r\n cwd: /tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/\r\n Complete output (525 lines):\r\n /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/setuptools/dist.py:720: UserWarning: Usage of dash-separated 'license-file' will not be supported in future versions. Please use the underscore name 'license_file' instead\r\n % (opt, underscore_opt)\r\n /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/setuptools/dist.py:294: DistDeprecationWarning: use_2to3 is ignored.\r\n warnings.warn(f\"{attr} is ignored.\", DistDeprecationWarning)\r\n running bdist_wheel\r\n running build\r\n WARNING: Building this way assumes that all generated files have been\r\n generated already. If that is not the case then use build.py directly\r\n to generate the source and perform the build stage. You can use\r\n --skip-build with the bdist_* or install commands to avoid this\r\n message and the wxWidgets and Phoenix build steps in the future.\r\n \r\n \"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\" -u build.py build\r\n Will build using: \"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\"\r\n 3.6.10 |Anaconda, Inc.| (default, May 8 2020, 02:54:21)\r\n [GCC 7.3.0]\r\n Python's architecture is 64bit\r\n cfg.VERSION: 4.0.4\r\n \r\n Running command: build\r\n Running command: build_wx\r\n wxWidgets build options: ['--wxpython', '--unicode', '--gtk3']\r\n Configure options: ['--enable-unicode', '--with-gtk=3', '--with-opengl', '--enable-sound', '--enable-graphics_ctx', '--enable-mediactrl', '--enable-display', '--enable-geometry', '--enable-debug_flag', '--enable-optimise', '--disable-debugreport', '--enable-uiactionsim', '--enable-autoidman', '--with-sdl']\r\n /tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/ext/wxWidgets/configure --enable-unicode --with-gtk=3 --with-opengl --enable-sound --enable-graphics_ctx --enable-mediactrl --enable-display --enable-geometry --enable-debug_flag --enable-optimise --disable-debugreport --enable-uiactionsim --enable-autoidman --with-sdl\r\n checking build system type... x86_64-pc-linux-gnu\r\n checking host system type... x86_64-pc-linux-gnu\r\n checking for --disable-gui... no\r\n checking for --enable-monolithic... no\r\n checking for --enable-plugins... no\r\n checking for --without-subdirs... no\r\n checking for --enable-official_build... no\r\n checking for --disable-all-features... no\r\n checking for --enable-universal... no\r\n checking for --enable-nanox... no\r\n checking for --enable-gpe... no\r\n checking for toolkit... gtk\r\n checking for --with-libpng... yes\r\n checking for --with-libjpeg... yes\r\n checking for --with-libtiff... yes\r\n checking for --without-libjbig... no\r\n checking for --without-liblzma... no\r\n checking for --with-libxpm... yes\r\n checking for --with-libiconv... yes\r\n checking for --with-libmspack... no\r\n checking for --without-gtkprint... no\r\n checking for --with-gnomevfs... no\r\n checking for --with-libnotify... yes\r\n checking for --with-hildon... no\r\n checking for --with-opengl... yes\r\n checking for --with-dmalloc... no\r\n checking for --with-sdl... yes\r\n checking for --with-regex... yes\r\n checking for --with-zlib... yes\r\n checking for --with-expat... yes\r\n checking for --with-macosx-sdk...\r\n checking for --with-macosx-version-min...\r\n checking for --enable-debug... default\r\n checking for --disable-debug_flag... no\r\n checking for --enable-debug_info... no\r\n checking for --enable-debug_gdb... no\r\n checking for --enable-debug_cntxt... no\r\n checking for --enable-mem_tracing... no\r\n checking for --disable-shared... no\r\n checking for --enable-stl... no\r\n checking for --enable-std_containers... no\r\n checking for --enable-std_iostreams... yes\r\n checking for --enable-std_string... yes\r\n checking for --enable-std_string_conv_in_wxstring... no\r\n checking for --disable-unicode... no\r\n checking for --enable-mslu... no\r\n checking for --enable-utf8... no\r\n checking for --enable-utf8only... no\r\n checking for --enable-extended_rtti... no\r\n checking for --disable-optimise... no\r\n checking for --enable-profile... no\r\n checking for --enable-no_rtti... no\r\n checking for --enable-no_exceptions... no\r\n checking for --enable-permissive... no\r\n checking for --enable-no_deps... no\r\n checking for --disable-vararg_macros... no\r\n checking for --enable-universal_binary... no\r\n checking for --enable-macosx_arch... no\r\n checking for --enable-compat26... no\r\n checking for --disable-compat28... no\r\n checking for --disable-rpath... no\r\n checking for --enable-objc_uniquifying... no\r\n checking for --disable-visibility... no\r\n checking for --disable-tls... no\r\n checking for --enable-intl... yes\r\n checking for --enable-xlocale... yes\r\n checking for --enable-config... yes\r\n checking for --enable-protocols... yes\r\n checking for --enable-ftp... yes\r\n checking for --enable-http... yes\r\n checking for --enable-fileproto... yes\r\n checking for --enable-sockets... yes\r\n checking for --enable-ipv6... no\r\n checking for --enable-ole... yes\r\n checking for --enable-dataobj... yes\r\n checking for --enable-ipc... yes\r\n checking for --enable-baseevtloop... yes\r\n checking for --enable-epollloop... yes\r\n checking for --enable-selectloop... yes\r\n checking for --enable-any... yes\r\n checking for --enable-apple_ieee... yes\r\n checking for --enable-arcstream... yes\r\n checking for --enable-base64... yes\r\n checking for --enable-backtrace... yes\r\n checking for --enable-catch_segvs... yes\r\n checking for --enable-cmdline... yes\r\n checking for --enable-datetime... yes\r\n checking for --enable-debugreport... no\r\n checking for --enable-dialupman... yes\r\n checking for --enable-dynlib... yes\r\n checking for --enable-dynamicloader... yes\r\n checking for --enable-exceptions... yes\r\n checking for --enable-ffile... yes\r\n checking for --enable-file... yes\r\n checking for --enable-filehistory... yes\r\n checking for --enable-filesystem... yes\r\n checking for --enable-fontenum... yes\r\n checking for --enable-fontmap... yes\r\n checking for --enable-fs_archive... yes\r\n checking for --enable-fs_inet... yes\r\n checking for --enable-fs_zip... yes\r\n checking for --enable-fsvolume... yes\r\n checking for --enable-fswatcher... yes\r\n checking for --enable-geometry... yes\r\n checking for --enable-log... yes\r\n checking for --enable-longlong... yes\r\n checking for --enable-mimetype... yes\r\n checking for --enable-printfposparam... yes\r\n checking for --enable-snglinst... yes\r\n checking for --enable-sound... yes\r\n checking for --enable-stdpaths... yes\r\n checking for --enable-stopwatch... yes\r\n checking for --enable-streams... yes\r\n checking for --enable-sysoptions... yes\r\n checking for --enable-tarstream... yes\r\n checking for --enable-textbuf... yes\r\n checking for --enable-textfile... yes\r\n checking for --enable-timer... yes\r\n checking for --enable-variant... yes\r\n checking for --enable-zipstream... yes\r\n checking for --enable-url... yes\r\n checking for --enable-protocol... yes\r\n checking for --enable-protocol_http... yes\r\n checking for --enable-protocol_ftp... yes\r\n checking for --enable-protocol_file... yes\r\n checking for --enable-threads... yes\r\n checking for --enable-iniconf... no\r\n checking for --enable-regkey... yes\r\n checking for --enable-docview... yes\r\n checking for --enable-help... yes\r\n checking for --enable-mshtmlhelp... yes\r\n checking for --enable-html... yes\r\n checking for --enable-htmlhelp... yes\r\n checking for --enable-xrc... yes\r\n checking for --enable-aui... yes\r\n checking for --enable-propgrid... yes\r\n checking for --enable-ribbon... yes\r\n checking for --enable-stc... yes\r\n checking for --enable-constraints... yes\r\n checking for --enable-loggui... yes\r\n checking for --enable-logwin... yes\r\n checking for --enable-logdialog... yes\r\n checking for --enable-mdi... yes\r\n checking for --enable-mdidoc... yes\r\n checking for --enable-mediactrl... yes\r\n checking for --enable-gstreamer8... no\r\n checking for --enable-richtext... yes\r\n checking for --enable-postscript... yes\r\n checking for --enable-printarch... yes\r\n checking for --enable-svg... yes\r\n checking for --enable-webkit... yes\r\n checking for --enable-webview... yes\r\n checking for --enable-graphics_ctx... yes\r\n checking for --enable-clipboard... yes\r\n checking for --enable-dnd... yes\r\n checking for --disable-controls... no\r\n checking for --enable-markup... yes\r\n checking for --enable-accel... yes\r\n checking for --enable-animatectrl... yes\r\n checking for --enable-bannerwindow... yes\r\n checking for --enable-artstd... yes\r\n checking for --enable-arttango... auto\r\n checking for --enable-bmpbutton... yes\r\n checking for --enable-bmpcombobox... yes\r\n checking for --enable-button... yes\r\n checking for --enable-calendar... yes\r\n checking for --enable-caret... yes\r\n checking for --enable-checkbox... yes\r\n checking for --enable-checklst... yes\r\n checking for --enable-choice... yes\r\n checking for --enable-choicebook... yes\r\n checking for --enable-collpane... yes\r\n checking for --enable-colourpicker... yes\r\n checking for --enable-combobox... yes\r\n checking for --enable-comboctrl... yes\r\n checking for --enable-commandlinkbutton... yes\r\n checking for --enable-dataviewctrl... yes\r\n checking for --enable-datepick... yes\r\n checking for --enable-detect_sm... yes\r\n checking for --enable-dirpicker... yes\r\n checking for --enable-display... yes\r\n checking for --enable-editablebox... yes\r\n checking for --enable-filectrl... yes\r\n checking for --enable-filepicker... yes\r\n checking for --enable-fontpicker... yes\r\n checking for --enable-gauge... yes\r\n checking for --enable-grid... yes\r\n checking for --enable-headerctrl... yes\r\n checking for --enable-hyperlink... yes\r\n checking for --enable-imaglist... yes\r\n checking for --enable-infobar... yes\r\n checking for --enable-listbook... yes\r\n checking for --enable-listbox... yes\r\n checking for --enable-listctrl... yes\r\n checking for --enable-notebook... yes\r\n checking for --enable-notifmsg... yes\r\n checking for --enable-odcombobox... yes\r\n checking for --enable-popupwin... yes\r\n checking for --enable-prefseditor... yes\r\n checking for --enable-radiobox... yes\r\n checking for --enable-radiobtn... yes\r\n checking for --enable-richmsgdlg... yes\r\n checking for --enable-richtooltip... yes\r\n checking for --enable-rearrangectrl... yes\r\n checking for --enable-sash... yes\r\n checking for --enable-scrollbar... yes\r\n checking for --enable-searchctrl... yes\r\n checking for --enable-slider... yes\r\n checking for --enable-spinbtn... yes\r\n checking for --enable-spinctrl... yes\r\n checking for --enable-splitter... yes\r\n checking for --enable-statbmp... yes\r\n checking for --enable-statbox... yes\r\n checking for --enable-statline... yes\r\n checking for --enable-stattext... yes\r\n checking for --enable-statusbar... yes\r\n checking for --enable-taskbaricon... yes\r\n checking for --enable-tbarnative... yes\r\n checking for --enable-textctrl... yes\r\n checking for --enable-timepick... yes\r\n checking for --enable-tipwindow... yes\r\n checking for --enable-togglebtn... yes\r\n checking for --enable-toolbar... yes\r\n checking for --enable-toolbook... yes\r\n checking for --enable-treebook... yes\r\n checking for --enable-treectrl... yes\r\n checking for --enable-treelist... yes\r\n checking for --enable-commondlg... yes\r\n checking for --enable-aboutdlg... yes\r\n checking for --enable-choicedlg... yes\r\n checking for --enable-coldlg... yes\r\n checking for --enable-filedlg... yes\r\n checking for --enable-finddlg... yes\r\n checking for --enable-fontdlg... yes\r\n checking for --enable-dirdlg... yes\r\n checking for --enable-msgdlg... yes\r\n checking for --enable-numberdlg... yes\r\n checking for --enable-splash... yes\r\n checking for --enable-textdlg... yes\r\n checking for --enable-tipdlg... yes\r\n checking for --enable-progressdlg... yes\r\n checking for --enable-wizarddlg... yes\r\n checking for --enable-menus... yes\r\n checking for --enable-miniframe... yes\r\n checking for --enable-tooltips... yes\r\n checking for --enable-splines... yes\r\n checking for --enable-mousewheel... yes\r\n checking for --enable-validators... yes\r\n checking for --enable-busyinfo... yes\r\n checking for --enable-hotkey... auto\r\n checking for --enable-joystick... yes\r\n checking for --enable-metafile... auto\r\n checking for --enable-dragimage... yes\r\n checking for --enable-accessibility... no\r\n checking for --enable-uiactionsim... yes\r\n checking for --enable-dctransform... yes\r\n checking for --enable-webviewwebkit... yes\r\n checking for --enable-palette... yes\r\n checking for --enable-image... yes\r\n checking for --enable-gif... yes\r\n checking for --enable-pcx... yes\r\n checking for --enable-tga... yes\r\n checking for --enable-iff... yes\r\n checking for --enable-pnm... yes\r\n checking for --enable-xpm... yes\r\n checking for --enable-ico_cur... yes\r\n checking for --enable-dccache... yes\r\n checking for --enable-ps-in-msw... yes\r\n checking for --enable-ownerdrawn... yes\r\n checking for --enable-uxtheme... yes\r\n checking for --enable-wxdib... yes\r\n checking for --enable-webviewie... yes\r\n checking for --enable-autoidman... yes\r\n checking for gcc... gcc\r\n checking whether the C compiler works... yes\r\n checking for C compiler default output file name... a.out\r\n checking for suffix of executables...\r\n checking whether we are cross compiling... no\r\n checking for suffix of object files... o\r\n checking whether we are using the GNU C compiler... yes\r\n checking whether gcc accepts -g... yes\r\n checking for gcc option to accept ISO C89... none needed\r\n checking whether we are using the Intel C compiler... no\r\n checking how to run the C preprocessor... gcc -E\r\n checking for grep that handles long lines and -e... /bin/grep\r\n checking for egrep... /bin/grep -E\r\n checking whether gcc needs -traditional... no\r\n checking for g++... g++\r\n checking whether we are using the GNU C++ compiler... yes\r\n checking whether g++ accepts -g... yes\r\n checking whether we are using the Intel C++ compiler... no\r\n checking whether we are using the Sun C++ compiler... no\r\n checking for ar... ar\r\n checking for ANSI C header files... yes\r\n checking for sys/types.h... yes\r\n checking for sys/stat.h... yes\r\n checking for stdlib.h... yes\r\n checking for string.h... yes\r\n checking for memory.h... yes\r\n checking for strings.h... yes\r\n checking for inttypes.h... yes\r\n checking for stdint.h... yes\r\n checking for unistd.h... yes\r\n checking for langinfo.h... yes\r\n checking for wchar.h... yes\r\n checking for sys/select.h... yes\r\n checking for cxxabi.h... yes\r\n checking for an ANSI C-conforming const... yes\r\n checking for inline... inline\r\n checking size of short... 2\r\n checking size of void *... 8\r\n checking size of int... 4\r\n checking size of long... 8\r\n checking size of size_t... 8\r\n checking size of long long... 8\r\n checking size of wchar_t... 4\r\n checking for va_copy... yes\r\n checking whether the compiler supports variadic macros... yes\r\n checking for _FILE_OFFSET_BITS value needed for large files... 64\r\n checking if large file support is available... yes\r\n checking for _LARGEFILE_SOURCE value needed for large files... no\r\n checking whether byte ordering is bigendian... no\r\n checking for iostream... yes\r\n checking if C++ compiler supports the explicit keyword... yes\r\n checking for std::wstring in ... yes\r\n checking for std::istream... yes\r\n checking for std::ostream... yes\r\n checking how to run the C++ preprocessor... g++ -E\r\n checking type_traits usability... yes\r\n checking type_traits presence... yes\r\n checking for type_traits... yes\r\n checking for __sync_fetch_and_add and __sync_sub_and_fetch builtins... yes\r\n checking for libraries directories... /usr/lib/x86_64-linux-gnu /usr/lib\r\n checking for cos... no\r\n checking for floor... no\r\n checking if floating point functions link without -lm... no\r\n checking for sin... yes\r\n checking for ceil... yes\r\n checking if floating point functions link with -lm... yes\r\n checking for strtoull... yes\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking pkg-config is at least version 0.9.0... yes\r\n configure: WARNING: Defaulting to the builtin regex library for Unicode build.\r\n checking for zlib.h >= 1.1.4... yes\r\n checking for zlib.h... (cached) yes\r\n checking for deflate in -lz... yes\r\n checking for png.h > 0.90... yes\r\n checking for png.h... (cached) yes\r\n checking for png_sig_cmp in -lpng... yes\r\n checking for jpeglib.h... yes\r\n checking for jpeg_read_header in -ljpeg... yes\r\n checking for tiffio.h... yes\r\n checking for TIFFError in -ltiff... yes\r\n checking for expat.h... yes\r\n checking if expat.h is valid C++ header... yes\r\n checking for XML_ParserCreate in -lexpat... yes\r\n checking for GTK+ version...\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking for GTK+ - version >= 3.0.0... yes (version 3.22.30)\r\n checking for X11/Xlib.h... yes\r\n checking for X11/XKBlib.h... yes\r\n checking for Xxf86vm... yes\r\n checking for X11/extensions/xf86vmode.h... yes\r\n checking for SM... yes\r\n checking for OpenGL headers... found in /usr/include\r\n checking for GL/gl.h... yes\r\n checking for GL/glu.h... yes\r\n checking for GL... yes\r\n checking for GLU... yes\r\n checking if the linker accepts --version-script... yes\r\n checking for symbols visibility support... yes\r\n checking for broken libstdc++ visibility... no\r\n checking for mode_t... yes\r\n checking for off_t... yes\r\n checking for pid_t... yes\r\n checking for size_t... yes\r\n checking for ssize_t... yes\r\n checking if size_t is unsigned int... no\r\n checking if size_t is unsigned long... yes\r\n checking if wchar_t is separate type... yes\r\n checking for pw_gecos in struct passwd... yes\r\n checking for wcslen... yes\r\n checking for wcsftime... yes\r\n checking for strnlen... yes\r\n checking for wcsdup... yes\r\n checking for wcsnlen... yes\r\n checking for wcscasecmp... yes\r\n checking for wcsncasecmp... yes\r\n checking for mbstate_t... yes\r\n checking for wcsrtombs... yes\r\n checking for snprintf... yes\r\n checking for vsnprintf... yes\r\n checking for vsscanf... yes\r\n checking for vsnprintf declaration... yes\r\n checking if vsnprintf declaration is broken... no\r\n checking for snprintf declaration... yes\r\n checking if snprintf supports positional arguments... yes\r\n checking for vsscanf declaration... yes\r\n checking if vsscanf() declaration is broken... no\r\n checking for putws... no\r\n checking for fputws... yes\r\n checking for wprintf... yes\r\n checking for vswprintf... yes\r\n checking for vswscanf... yes\r\n checking for _vsnwprintf... no\r\n checking for fsync... yes\r\n checking for round... yes\r\n checking for iconv... yes\r\n checking if iconv needs const... no\r\n checking for sigaction... yes\r\n checking for sa_handler type... int\r\n checking for backtrace() in ... checking for library containing backtrace... none required\r\n yes\r\n checking for __cxa_demangle() in ... yes\r\n checking for mkstemp... yes\r\n checking for statfs... yes\r\n checking for statfs declaration... yes\r\n checking for fcntl... yes\r\n checking for setenv... yes\r\n checking for unsetenv... yes\r\n checking for nanosleep... yes\r\n checking for uname... yes\r\n checking for strtok_r... yes\r\n checking for inet_addr... yes\r\n checking for inet_aton... yes\r\n checking for fdopen... yes\r\n checking for sysconf... yes\r\n checking for getpwuid_r... yes\r\n checking for getgrgid_r... yes\r\n checking whether pthreads work with -pthread... yes\r\n checking if more special flags are required for pthreads... no\r\n checking for pthread_setconcurrency... yes\r\n checking for pthread_cleanup_push/pop... yes\r\n checking for sched.h... yes\r\n checking for sched_yield... yes\r\n checking for pthread_attr_getschedpolicy... yes\r\n checking for pthread_attr_setschedparam... yes\r\n checking for sched_get_priority_max... yes\r\n checking for pthread_cancel... yes\r\n checking for pthread_mutex_timedlock... yes\r\n checking for pthread_attr_setstacksize... yes\r\n checking for pthread_mutexattr_t... yes\r\n checking for pthread_mutexattr_settype declaration... yes\r\n checking for abi::__forced_unwind() in ... yes\r\n checking for localtime_r... yes\r\n checking for gmtime_r... yes\r\n checking how many arguments gethostbyname_r() takes... six\r\n checking how many arguments getservbyname_r() takes... six\r\n checking for dlopen... no\r\n checking for dlopen in -ldl... yes\r\n checking for dlerror... no\r\n checking for dlerror in -ldl... yes\r\n checking for sys/inotify.h... yes\r\n checking for SNDCTL_DSP_SPEED in sys/soundcard.h... yes\r\n checking for SDL... configure: SDL 2.0 not available. Falling back to 1.2.\r\n checking for sdl-config... no\r\n checking for SDL - version >= 1.2.0... no\r\n *** The sdl-config script installed by SDL could not be found\r\n *** If SDL was installed in PREFIX, make sure PREFIX/bin is in\r\n *** your path, or set the SDL_CONFIG environment variable to the\r\n *** full path to sdl-config.\r\n checking for GTKPRINT... yes\r\n checking for LIBNOTIFY... checking for LIBNOTIFY... configure: WARNING: libnotify not found, wxNotificationMessage will use generic implementation.\r\n checking for complete xlocale... no\r\n checking for sys/epoll.h... yes\r\n checking for gettimeofday... yes\r\n checking whether gettimeofday takes two arguments... yes\r\n checking for timezone variable in ... timezone\r\n checking for localtime... yes\r\n checking for tm_gmtoff in struct tm... yes\r\n checking for setpriority... yes\r\n checking for socket... yes\r\n checking what is the type of the third argument of getsockname... socklen_t\r\n checking what is the type of the fifth argument of getsockopt... socklen_t\r\n checking for linux/joystick.h... yes\r\n checking for python... /home/blackmonlab/anaconda3/envs/simbaenv/bin/python\r\n checking for WEBKIT... configure: WARNING: webkit2gtk not found, falling back to webkitgtk\r\n checking for WEBKIT... configure: WARNING: webkitgtk not found.\r\n configure: WARNING: WebKit not available, disabling wxWebView\r\n checking for CAIRO... yes\r\n checking for cairo_push_group... yes\r\n checking for GST... configure: WARNING: GStreamer 1.0 not available, falling back to 0.10\r\n checking for GST... configure: WARNING: GStreamer 0.10 not available, falling back to 0.8\r\n configure: WARNING: wxMediaCtrl can't be built because GStreamer not available\r\n configure: error: wxMediaCtrl was explicitly requested but can't be built.\r\n \r\n Fix the problems reported above or don't use --enable-mediactrl configure option.\r\n \r\n Error running configure\r\n ERROR: failed building wxWidgets\r\n Traceback (most recent call last):\r\n File \"build.py\", line 1321, in cmd_build_wx\r\n wxbuild.main(wxDir(), build_options)\r\n File \"/tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/buildtools/build_wxwidgets.py\", line 375, in main\r\n \"Error running configure\")\r\n File \"/tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/buildtools/build_wxwidgets.py\", line 85, in exitIfError\r\n raise builder.BuildError(msg)\r\n buildtools.builder.BuildError: Error running configure\r\n Finished command: build_wx (0m8.126s)\r\n Finished command: build (0m8.126s)\r\n Command '\"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\" -u build.py build' failed with exit code 1.\r\n ----------------------------------------\r\n ERROR: Failed building wheel for wxpython\r\n Running setup.py clean for wxpython\r\nFailed to build wxpython\r\nInstalling collected packages: wxpython\r\n Running setup.py install for wxpython ... error\r\n ERROR: Command errored out with exit status 1:\r\n command: /home/blackmonlab/anaconda3/envs/simbaenv/bin/python -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record /tmp/pip-record-_jggwalf/install-record.txt --single-version-externally-managed --compile --install-headers /home/blackmonlab/anaconda3/envs/simbaenv/include/python3.6m/wxpython\r\n cwd: /tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/\r\n Complete output (525 lines):\r\n /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/setuptools/dist.py:720: UserWarning: Usage of dash-separated 'license-file' will not be supported in future versions. Please use the underscore name 'license_file' instead\r\n % (opt, underscore_opt)\r\n /home/blackmonlab/anaconda3/envs/simbaenv/lib/python3.6/site-packages/setuptools/dist.py:294: DistDeprecationWarning: use_2to3 is ignored.\r\n warnings.warn(f\"{attr} is ignored.\", DistDeprecationWarning)\r\n running install\r\n running build\r\n WARNING: Building this way assumes that all generated files have been\r\n generated already. If that is not the case then use build.py directly\r\n to generate the source and perform the build stage. You can use\r\n --skip-build with the bdist_* or install commands to avoid this\r\n message and the wxWidgets and Phoenix build steps in the future.\r\n \r\n \"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\" -u build.py build\r\n Will build using: \"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\"\r\n 3.6.10 |Anaconda, Inc.| (default, May 8 2020, 02:54:21)\r\n [GCC 7.3.0]\r\n Python's architecture is 64bit\r\n cfg.VERSION: 4.0.4\r\n \r\n Running command: build\r\n Running command: build_wx\r\n wxWidgets build options: ['--wxpython', '--unicode', '--gtk3']\r\n Configure options: ['--enable-unicode', '--with-gtk=3', '--with-opengl', '--enable-sound', '--enable-graphics_ctx', '--enable-mediactrl', '--enable-display', '--enable-geometry', '--enable-debug_flag', '--enable-optimise', '--disable-debugreport', '--enable-uiactionsim', '--enable-autoidman', '--with-sdl']\r\n /tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/ext/wxWidgets/configure --enable-unicode --with-gtk=3 --with-opengl --enable-sound --enable-graphics_ctx --enable-mediactrl --enable-display --enable-geometry --enable-debug_flag --enable-optimise --disable-debugreport --enable-uiactionsim --enable-autoidman --with-sdl\r\n checking build system type... x86_64-pc-linux-gnu\r\n checking host system type... x86_64-pc-linux-gnu\r\n checking for --disable-gui... no\r\n checking for --enable-monolithic... no\r\n checking for --enable-plugins... no\r\n checking for --without-subdirs... no\r\n checking for --enable-official_build... no\r\n checking for --disable-all-features... no\r\n checking for --enable-universal... no\r\n checking for --enable-nanox... no\r\n checking for --enable-gpe... no\r\n checking for toolkit... gtk\r\n checking for --with-libpng... yes\r\n checking for --with-libjpeg... yes\r\n checking for --with-libtiff... yes\r\n checking for --without-libjbig... no\r\n checking for --without-liblzma... no\r\n checking for --with-libxpm... yes\r\n checking for --with-libiconv... yes\r\n checking for --with-libmspack... no\r\n checking for --without-gtkprint... no\r\n checking for --with-gnomevfs... no\r\n checking for --with-libnotify... yes\r\n checking for --with-hildon... no\r\n checking for --with-opengl... yes\r\n checking for --with-dmalloc... no\r\n checking for --with-sdl... yes\r\n checking for --with-regex... yes\r\n checking for --with-zlib... yes\r\n checking for --with-expat... yes\r\n checking for --with-macosx-sdk...\r\n checking for --with-macosx-version-min...\r\n checking for --enable-debug... default\r\n checking for --disable-debug_flag... no\r\n checking for --enable-debug_info... no\r\n checking for --enable-debug_gdb... no\r\n checking for --enable-debug_cntxt... no\r\n checking for --enable-mem_tracing... no\r\n checking for --disable-shared... no\r\n checking for --enable-stl... no\r\n checking for --enable-std_containers... no\r\n checking for --enable-std_iostreams... yes\r\n checking for --enable-std_string... yes\r\n checking for --enable-std_string_conv_in_wxstring... no\r\n checking for --disable-unicode... no\r\n checking for --enable-mslu... no\r\n checking for --enable-utf8... no\r\n checking for --enable-utf8only... no\r\n checking for --enable-extended_rtti... no\r\n checking for --disable-optimise... no\r\n checking for --enable-profile... no\r\n checking for --enable-no_rtti... no\r\n checking for --enable-no_exceptions... no\r\n checking for --enable-permissive... no\r\n checking for --enable-no_deps... no\r\n checking for --disable-vararg_macros... no\r\n checking for --enable-universal_binary... no\r\n checking for --enable-macosx_arch... no\r\n checking for --enable-compat26... no\r\n checking for --disable-compat28... no\r\n checking for --disable-rpath... no\r\n checking for --enable-objc_uniquifying... no\r\n checking for --disable-visibility... no\r\n checking for --disable-tls... no\r\n checking for --enable-intl... yes\r\n checking for --enable-xlocale... yes\r\n checking for --enable-config... yes\r\n checking for --enable-protocols... yes\r\n checking for --enable-ftp... yes\r\n checking for --enable-http... yes\r\n checking for --enable-fileproto... yes\r\n checking for --enable-sockets... yes\r\n checking for --enable-ipv6... no\r\n checking for --enable-ole... yes\r\n checking for --enable-dataobj... yes\r\n checking for --enable-ipc... yes\r\n checking for --enable-baseevtloop... yes\r\n checking for --enable-epollloop... yes\r\n checking for --enable-selectloop... yes\r\n checking for --enable-any... yes\r\n checking for --enable-apple_ieee... yes\r\n checking for --enable-arcstream... yes\r\n checking for --enable-base64... yes\r\n checking for --enable-backtrace... yes\r\n checking for --enable-catch_segvs... yes\r\n checking for --enable-cmdline... yes\r\n checking for --enable-datetime... yes\r\n checking for --enable-debugreport... no\r\n checking for --enable-dialupman... yes\r\n checking for --enable-dynlib... yes\r\n checking for --enable-dynamicloader... yes\r\n checking for --enable-exceptions... yes\r\n checking for --enable-ffile... yes\r\n checking for --enable-file... yes\r\n checking for --enable-filehistory... yes\r\n checking for --enable-filesystem... yes\r\n checking for --enable-fontenum... yes\r\n checking for --enable-fontmap... yes\r\n checking for --enable-fs_archive... yes\r\n checking for --enable-fs_inet... yes\r\n checking for --enable-fs_zip... yes\r\n checking for --enable-fsvolume... yes\r\n checking for --enable-fswatcher... yes\r\n checking for --enable-geometry... yes\r\n checking for --enable-log... yes\r\n checking for --enable-longlong... yes\r\n checking for --enable-mimetype... yes\r\n checking for --enable-printfposparam... yes\r\n checking for --enable-snglinst... yes\r\n checking for --enable-sound... yes\r\n checking for --enable-stdpaths... yes\r\n checking for --enable-stopwatch... yes\r\n checking for --enable-streams... yes\r\n checking for --enable-sysoptions... yes\r\n checking for --enable-tarstream... yes\r\n checking for --enable-textbuf... yes\r\n checking for --enable-textfile... yes\r\n checking for --enable-timer... yes\r\n checking for --enable-variant... yes\r\n checking for --enable-zipstream... yes\r\n checking for --enable-url... yes\r\n checking for --enable-protocol... yes\r\n checking for --enable-protocol_http... yes\r\n checking for --enable-protocol_ftp... yes\r\n checking for --enable-protocol_file... yes\r\n checking for --enable-threads... yes\r\n checking for --enable-iniconf... no\r\n checking for --enable-regkey... yes\r\n checking for --enable-docview... yes\r\n checking for --enable-help... yes\r\n checking for --enable-mshtmlhelp... yes\r\n checking for --enable-html... yes\r\n checking for --enable-htmlhelp... yes\r\n checking for --enable-xrc... yes\r\n checking for --enable-aui... yes\r\n checking for --enable-propgrid... yes\r\n checking for --enable-ribbon... yes\r\n checking for --enable-stc... yes\r\n checking for --enable-constraints... yes\r\n checking for --enable-loggui... yes\r\n checking for --enable-logwin... yes\r\n checking for --enable-logdialog... yes\r\n checking for --enable-mdi... yes\r\n checking for --enable-mdidoc... yes\r\n checking for --enable-mediactrl... yes\r\n checking for --enable-gstreamer8... no\r\n checking for --enable-richtext... yes\r\n checking for --enable-postscript... yes\r\n checking for --enable-printarch... yes\r\n checking for --enable-svg... yes\r\n checking for --enable-webkit... yes\r\n checking for --enable-webview... yes\r\n checking for --enable-graphics_ctx... yes\r\n checking for --enable-clipboard... yes\r\n checking for --enable-dnd... yes\r\n checking for --disable-controls... no\r\n checking for --enable-markup... yes\r\n checking for --enable-accel... yes\r\n checking for --enable-animatectrl... yes\r\n checking for --enable-bannerwindow... yes\r\n checking for --enable-artstd... yes\r\n checking for --enable-arttango... auto\r\n checking for --enable-bmpbutton... yes\r\n checking for --enable-bmpcombobox... yes\r\n checking for --enable-button... yes\r\n checking for --enable-calendar... yes\r\n checking for --enable-caret... yes\r\n checking for --enable-checkbox... yes\r\n checking for --enable-checklst... yes\r\n checking for --enable-choice... yes\r\n checking for --enable-choicebook... yes\r\n checking for --enable-collpane... yes\r\n checking for --enable-colourpicker... yes\r\n checking for --enable-combobox... yes\r\n checking for --enable-comboctrl... yes\r\n checking for --enable-commandlinkbutton... yes\r\n checking for --enable-dataviewctrl... yes\r\n checking for --enable-datepick... yes\r\n checking for --enable-detect_sm... yes\r\n checking for --enable-dirpicker... yes\r\n checking for --enable-display... yes\r\n checking for --enable-editablebox... yes\r\n checking for --enable-filectrl... yes\r\n checking for --enable-filepicker... yes\r\n checking for --enable-fontpicker... yes\r\n checking for --enable-gauge... yes\r\n checking for --enable-grid... yes\r\n checking for --enable-headerctrl... yes\r\n checking for --enable-hyperlink... yes\r\n checking for --enable-imaglist... yes\r\n checking for --enable-infobar... yes\r\n checking for --enable-listbook... yes\r\n checking for --enable-listbox... yes\r\n checking for --enable-listctrl... yes\r\n checking for --enable-notebook... yes\r\n checking for --enable-notifmsg... yes\r\n checking for --enable-odcombobox... yes\r\n checking for --enable-popupwin... yes\r\n checking for --enable-prefseditor... yes\r\n checking for --enable-radiobox... yes\r\n checking for --enable-radiobtn... yes\r\n checking for --enable-richmsgdlg... yes\r\n checking for --enable-richtooltip... yes\r\n checking for --enable-rearrangectrl... yes\r\n checking for --enable-sash... yes\r\n checking for --enable-scrollbar... yes\r\n checking for --enable-searchctrl... yes\r\n checking for --enable-slider... yes\r\n checking for --enable-spinbtn... yes\r\n checking for --enable-spinctrl... yes\r\n checking for --enable-splitter... yes\r\n checking for --enable-statbmp... yes\r\n checking for --enable-statbox... yes\r\n checking for --enable-statline... yes\r\n checking for --enable-stattext... yes\r\n checking for --enable-statusbar... yes\r\n checking for --enable-taskbaricon... yes\r\n checking for --enable-tbarnative... yes\r\n checking for --enable-textctrl... yes\r\n checking for --enable-timepick... yes\r\n checking for --enable-tipwindow... yes\r\n checking for --enable-togglebtn... yes\r\n checking for --enable-toolbar... yes\r\n checking for --enable-toolbook... yes\r\n checking for --enable-treebook... yes\r\n checking for --enable-treectrl... yes\r\n checking for --enable-treelist... yes\r\n checking for --enable-commondlg... yes\r\n checking for --enable-aboutdlg... yes\r\n checking for --enable-choicedlg... yes\r\n checking for --enable-coldlg... yes\r\n checking for --enable-filedlg... yes\r\n checking for --enable-finddlg... yes\r\n checking for --enable-fontdlg... yes\r\n checking for --enable-dirdlg... yes\r\n checking for --enable-msgdlg... yes\r\n checking for --enable-numberdlg... yes\r\n checking for --enable-splash... yes\r\n checking for --enable-textdlg... yes\r\n checking for --enable-tipdlg... yes\r\n checking for --enable-progressdlg... yes\r\n checking for --enable-wizarddlg... yes\r\n checking for --enable-menus... yes\r\n checking for --enable-miniframe... yes\r\n checking for --enable-tooltips... yes\r\n checking for --enable-splines... yes\r\n checking for --enable-mousewheel... yes\r\n checking for --enable-validators... yes\r\n checking for --enable-busyinfo... yes\r\n checking for --enable-hotkey... auto\r\n checking for --enable-joystick... yes\r\n checking for --enable-metafile... auto\r\n checking for --enable-dragimage... yes\r\n checking for --enable-accessibility... no\r\n checking for --enable-uiactionsim... yes\r\n checking for --enable-dctransform... yes\r\n checking for --enable-webviewwebkit... yes\r\n checking for --enable-palette... yes\r\n checking for --enable-image... yes\r\n checking for --enable-gif... yes\r\n checking for --enable-pcx... yes\r\n checking for --enable-tga... yes\r\n checking for --enable-iff... yes\r\n checking for --enable-pnm... yes\r\n checking for --enable-xpm... yes\r\n checking for --enable-ico_cur... yes\r\n checking for --enable-dccache... yes\r\n checking for --enable-ps-in-msw... yes\r\n checking for --enable-ownerdrawn... yes\r\n checking for --enable-uxtheme... yes\r\n checking for --enable-wxdib... yes\r\n checking for --enable-webviewie... yes\r\n checking for --enable-autoidman... yes\r\n checking for gcc... gcc\r\n checking whether the C compiler works... yes\r\n checking for C compiler default output file name... a.out\r\n checking for suffix of executables...\r\n checking whether we are cross compiling... no\r\n checking for suffix of object files... o\r\n checking whether we are using the GNU C compiler... yes\r\n checking whether gcc accepts -g... yes\r\n checking for gcc option to accept ISO C89... none needed\r\n checking whether we are using the Intel C compiler... no\r\n checking how to run the C preprocessor... gcc -E\r\n checking for grep that handles long lines and -e... /bin/grep\r\n checking for egrep... /bin/grep -E\r\n checking whether gcc needs -traditional... no\r\n checking for g++... g++\r\n checking whether we are using the GNU C++ compiler... yes\r\n checking whether g++ accepts -g... yes\r\n checking whether we are using the Intel C++ compiler... no\r\n checking whether we are using the Sun C++ compiler... no\r\n checking for ar... ar\r\n checking for ANSI C header files... yes\r\n checking for sys/types.h... yes\r\n checking for sys/stat.h... yes\r\n checking for stdlib.h... yes\r\n checking for string.h... yes\r\n checking for memory.h... yes\r\n checking for strings.h... yes\r\n checking for inttypes.h... yes\r\n checking for stdint.h... yes\r\n checking for unistd.h... yes\r\n checking for langinfo.h... yes\r\n checking for wchar.h... yes\r\n checking for sys/select.h... yes\r\n checking for cxxabi.h... yes\r\n checking for an ANSI C-conforming const... yes\r\n checking for inline... inline\r\n checking size of short... 2\r\n checking size of void *... 8\r\n checking size of int... 4\r\n checking size of long... 8\r\n checking size of size_t... 8\r\n checking size of long long... 8\r\n checking size of wchar_t... 4\r\n checking for va_copy... yes\r\n checking whether the compiler supports variadic macros... yes\r\n checking for _FILE_OFFSET_BITS value needed for large files... 64\r\n checking if large file support is available... yes\r\n checking for _LARGEFILE_SOURCE value needed for large files... no\r\n checking whether byte ordering is bigendian... no\r\n checking for iostream... yes\r\n checking if C++ compiler supports the explicit keyword... yes\r\n checking for std::wstring in ... yes\r\n checking for std::istream... yes\r\n checking for std::ostream... yes\r\n checking how to run the C++ preprocessor... g++ -E\r\n checking type_traits usability... yes\r\n checking type_traits presence... yes\r\n checking for type_traits... yes\r\n checking for __sync_fetch_and_add and __sync_sub_and_fetch builtins... yes\r\n checking for libraries directories... /usr/lib/x86_64-linux-gnu /usr/lib\r\n checking for cos... no\r\n checking for floor... no\r\n checking if floating point functions link without -lm... no\r\n checking for sin... yes\r\n checking for ceil... yes\r\n checking if floating point functions link with -lm... yes\r\n checking for strtoull... yes\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking pkg-config is at least version 0.9.0... yes\r\n configure: WARNING: Defaulting to the builtin regex library for Unicode build.\r\n checking for zlib.h >= 1.1.4... yes\r\n checking for zlib.h... (cached) yes\r\n checking for deflate in -lz... yes\r\n checking for png.h > 0.90... yes\r\n checking for png.h... (cached) yes\r\n checking for png_sig_cmp in -lpng... yes\r\n checking for jpeglib.h... yes\r\n checking for jpeg_read_header in -ljpeg... yes\r\n checking for tiffio.h... yes\r\n checking for TIFFError in -ltiff... yes\r\n checking for expat.h... yes\r\n checking if expat.h is valid C++ header... yes\r\n checking for XML_ParserCreate in -lexpat... yes\r\n checking for GTK+ version...\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking for GTK+ - version >= 3.0.0... yes (version 3.22.30)\r\n checking for X11/Xlib.h... yes\r\n checking for X11/XKBlib.h... yes\r\n checking for Xxf86vm... yes\r\n checking for X11/extensions/xf86vmode.h... yes\r\n checking for SM... yes\r\n checking for OpenGL headers... found in /usr/include\r\n checking for GL/gl.h... yes\r\n checking for GL/glu.h... yes\r\n checking for GL... yes\r\n checking for GLU... yes\r\n checking if the linker accepts --version-script... yes\r\n checking for symbols visibility support... yes\r\n checking for broken libstdc++ visibility... no\r\n checking for mode_t... yes\r\n checking for off_t... yes\r\n checking for pid_t... yes\r\n checking for size_t... yes\r\n checking for ssize_t... yes\r\n checking if size_t is unsigned int... no\r\n checking if size_t is unsigned long... yes\r\n checking if wchar_t is separate type... yes\r\n checking for pw_gecos in struct passwd... yes\r\n checking for wcslen... yes\r\n checking for wcsftime... yes\r\n checking for strnlen... yes\r\n checking for wcsdup... yes\r\n checking for wcsnlen... yes\r\n checking for wcscasecmp... yes\r\n checking for wcsncasecmp... yes\r\n checking for mbstate_t... yes\r\n checking for wcsrtombs... yes\r\n checking for snprintf... yes\r\n checking for vsnprintf... yes\r\n checking for vsscanf... yes\r\n checking for vsnprintf declaration... yes\r\n checking if vsnprintf declaration is broken... no\r\n checking for snprintf declaration... yes\r\n checking if snprintf supports positional arguments... yes\r\n checking for vsscanf declaration... yes\r\n checking if vsscanf() declaration is broken... no\r\n checking for putws... no\r\n checking for fputws... yes\r\n checking for wprintf... yes\r\n checking for vswprintf... yes\r\n checking for vswscanf... yes\r\n checking for _vsnwprintf... no\r\n checking for fsync... yes\r\n checking for round... yes\r\n checking for iconv... yes\r\n checking if iconv needs const... no\r\n checking for sigaction... yes\r\n checking for sa_handler type... int\r\n checking for backtrace() in ... checking for library containing backtrace... none required\r\n yes\r\n checking for __cxa_demangle() in ... yes\r\n checking for mkstemp... yes\r\n checking for statfs... yes\r\n checking for statfs declaration... yes\r\n checking for fcntl... yes\r\n checking for setenv... yes\r\n checking for unsetenv... yes\r\n checking for nanosleep... yes\r\n checking for uname... yes\r\n checking for strtok_r... yes\r\n checking for inet_addr... yes\r\n checking for inet_aton... yes\r\n checking for fdopen... yes\r\n checking for sysconf... yes\r\n checking for getpwuid_r... yes\r\n checking for getgrgid_r... yes\r\n checking whether pthreads work with -pthread... yes\r\n checking if more special flags are required for pthreads... no\r\n checking for pthread_setconcurrency... yes\r\n checking for pthread_cleanup_push/pop... yes\r\n checking for sched.h... yes\r\n checking for sched_yield... yes\r\n checking for pthread_attr_getschedpolicy... yes\r\n checking for pthread_attr_setschedparam... yes\r\n checking for sched_get_priority_max... yes\r\n checking for pthread_cancel... yes\r\n checking for pthread_mutex_timedlock... yes\r\n checking for pthread_attr_setstacksize... yes\r\n checking for pthread_mutexattr_t... yes\r\n checking for pthread_mutexattr_settype declaration... yes\r\n checking for abi::__forced_unwind() in ... yes\r\n checking for localtime_r... yes\r\n checking for gmtime_r... yes\r\n checking how many arguments gethostbyname_r() takes... six\r\n checking how many arguments getservbyname_r() takes... six\r\n checking for dlopen... no\r\n checking for dlopen in -ldl... yes\r\n checking for dlerror... no\r\n checking for dlerror in -ldl... yes\r\n checking for sys/inotify.h... yes\r\n checking for SNDCTL_DSP_SPEED in sys/soundcard.h... yes\r\n checking for SDL... configure: SDL 2.0 not available. Falling back to 1.2.\r\n checking for sdl-config... no\r\n checking for SDL - version >= 1.2.0... no\r\n *** The sdl-config script installed by SDL could not be found\r\n *** If SDL was installed in PREFIX, make sure PREFIX/bin is in\r\n *** your path, or set the SDL_CONFIG environment variable to the\r\n *** full path to sdl-config.\r\n checking for GTKPRINT... yes\r\n checking for LIBNOTIFY... checking for LIBNOTIFY... configure: WARNING: libnotify not found, wxNotificationMessage will use generic implementation.\r\n checking for complete xlocale... no\r\n checking for sys/epoll.h... yes\r\n checking for gettimeofday... yes\r\n checking whether gettimeofday takes two arguments... yes\r\n checking for timezone variable in ... timezone\r\n checking for localtime... yes\r\n checking for tm_gmtoff in struct tm... yes\r\n checking for setpriority... yes\r\n checking for socket... yes\r\n checking what is the type of the third argument of getsockname... socklen_t\r\n checking what is the type of the fifth argument of getsockopt... socklen_t\r\n checking for linux/joystick.h... yes\r\n checking for python... /home/blackmonlab/anaconda3/envs/simbaenv/bin/python\r\n checking for WEBKIT... configure: WARNING: webkit2gtk not found, falling back to webkitgtk\r\n checking for WEBKIT... configure: WARNING: webkitgtk not found.\r\n configure: WARNING: WebKit not available, disabling wxWebView\r\n checking for CAIRO... yes\r\n checking for cairo_push_group... yes\r\n checking for GST... configure: WARNING: GStreamer 1.0 not available, falling back to 0.10\r\n checking for GST... configure: WARNING: GStreamer 0.10 not available, falling back to 0.8\r\n configure: WARNING: wxMediaCtrl can't be built because GStreamer not available\r\n configure: error: wxMediaCtrl was explicitly requested but can't be built.\r\n \r\n Fix the problems reported above or don't use --enable-mediactrl configure option.\r\n \r\n Error running configure\r\n ERROR: failed building wxWidgets\r\n Traceback (most recent call last):\r\n File \"build.py\", line 1321, in cmd_build_wx\r\n wxbuild.main(wxDir(), build_options)\r\n File \"/tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/buildtools/build_wxwidgets.py\", line 375, in main\r\n \"Error running configure\")\r\n File \"/tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/buildtools/build_wxwidgets.py\", line 85, in exitIfError\r\n raise builder.BuildError(msg)\r\n buildtools.builder.BuildError: Error running configure\r\n Finished command: build_wx (0m7.750s)\r\n Finished command: build (0m7.751s)\r\n Command '\"/home/blackmonlab/anaconda3/envs/simbaenv/bin/python\" -u build.py build' failed with exit code 1.\r\n ----------------------------------------\r\nERROR: Command errored out with exit status 1: /home/blackmonlab/anaconda3/envs/simbaenv/bin/python -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-2l9g9bkk/wxpython_2695da00068440fd9b9c0b88c3250ba0/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record /tmp/pip-record-_jggwalf/install-record.txt --single-version-externally-managed --compile --install-headers /home/blackmonlab/anaconda3/envs/simbaenv/include/python3.6m/wxpython Check the logs for full command output.\r\n```\r\n", + "created_at": "2021-12-21T15:26:10Z", + "author": "AnnabelPerry" + }, + { + "body": "Hi @AnnabelPerry , I just had this same issue this week. It seems wxpython is still a pain to work with.\r\nMy recollection was that conda installing wxpython might have helped. See issue here;\r\n\r\nhttps://github.com/sgoldenlab/simba/issues/154", + "created_at": "2021-12-24T11:06:09Z", + "author": "catubc" + } + ] + }, + { + "title": "Duplication of animal from SLEAP .slp or .csv file", + "body": "**Describe the bug**\r\nHello\r\nI am trying to label behavior for an mp4 video and I don't see the video panel showing up as in the simba instructions.\r\nIs this normal? There is an attribute error in the dataframe, not clear where the error could be coming from, perhaps tkinter?\r\n\r\nThanks so much\r\n\r\nUbuntu 18.04, \r\nconda tkinter version:\r\n`tk 8.6.10 hbc83047_0 anaconda`\r\n\r\n![simba1](https://user-images.githubusercontent.com/4267452/146764729-17094a6f-84a9-4ff4-8678-24f0c871209d.png)\r\n\r\n\r\n[Edit: ] I think I might be missing the .csv generation step.", + "user": "catubc", + "reaction_cnt": 0, + "created_at": "2021-12-20T12:05:35Z", + "updated_at": "2021-12-24T12:27:04Z", + "author": "catubc", + "comments": [ + { + "body": "Ok, so it seems that I had to manually move the .csv files to get past this step. I had assumed simba would extract the .csv data from the .slp file and populate.\r\n\r\nBut I'm still getting an error due to the naming of the animals. \r\n\r\n```\r\n(simba3) cat@cat-Precision-T3610:~$ simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/tkinter/__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/SimBA.py\", line 3924, in \r\n button_labelaggression = Button(label_labelaggression, text='Select video (create new video annotation)',command= lambda:choose_folder(self.projectconfigini))\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/labelling_aggression.py\", line 364, in choose_folder\r\n MainInterface()\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/labelling_aggression.py\", line 178, in __init__\r\n load_frame(0, self.window, self.fbox, )\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/labelling_aggression.py\", line 441, in load_frame\r\n currAnimal = currDf.loc[currDf.index[current_frame_number], [currXheader, currYheader]]\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/indexing.py\", line 1418, in __getitem__\r\n return self._getitem_tuple(key)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/indexing.py\", line 805, in _getitem_tuple\r\n return self._getitem_lowerdim(tup)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/indexing.py\", line 961, in _getitem_lowerdim\r\n return getattr(section, self.name)[new_key]\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/indexing.py\", line 1424, in __getitem__\r\n return self._getitem_axis(maybe_callable, axis=axis)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/indexing.py\", line 1839, in _getitem_axis\r\n return self._getitem_iterable(key, axis=axis)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/indexing.py\", line 1133, in _getitem_iterable\r\n keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/indexing.py\", line 1092, in _get_listlike_indexer\r\n keyarr, indexer, o._get_axis_number(axis), raise_missing=raise_missing\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/indexing.py\", line 1177, in _validate_read_indexer\r\n key=key, axis=self.obj._get_axis_name(axis)\r\nKeyError: \"None of [Index(['female_nose_1_2_x', 'female_nose_1_2_y'], dtype='object')] are in the [index]\"\r\n\r\n```\r\n\r\nAnd here's the top of the .csv:\r\n```\r\n\r\nscorer,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi\r\nbodypart,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi\r\ncoords,female_nose_x,female_nose_y,female_nose_p,female_spine1_x,female_spine1_y,female_spine1_p,female_spine2_x,female_spine2_y,female_spine2_p,female_spine3_x,female_spine3_y,female_spine3_p,female_spine4_x,female_spine4_y,female_spine4_p,female_spine5_x,female_spine5_y,female_spine5_p,female_nose_x,female_nose_y,female_nose_p,female_spine1_x,female_spine1_y,female_spine1_p,female_spine2_x,female_spine2_y,female_spine2_p,female_spine3_x,female_spine3_y,female_spine3_p,female_spine4_x,female_spine4_y,female_spine4_p,female_spine5_x,female_spine5_y,female_spine5_p,male_nose_x,male_nose_y,male_nose_p,male_spine1_x,male_spine1_y,male_spine1_p,male_spine2_x,male_spine2_y,male_spine2_p,male_spine3_x,male_spine3_y,male_spine3_p,male_spine4_x,male_spine4_y,male_spine4_p,male_spine5_x,male_spine5_y,male_spine5_p,pup1_nose_x,pup1_nose_y,pup1_nose_p,pup1_spine1_x,pup1_spine1_y,pup1_spine1_p,pup1_spine2_x,pup1_spine2_y,pup1_spine2_p,pup1_spine3_x,pup1_spine3_y,pup1_spine3_p,pup1_spine4_x,pup1_spine4_y,pup1_spine4_p,pup1_spine5_x,pup1_spine5_y,pup1_spine5_p,pup2_nose_x,pup2_nose_y,pup2_nose_p,pup2_spine1_x,pup2_spine1_y,pup2_spine1_p,pup2_spine2_x,pup2_spine2_y,pup2_spine2_p,pup2_spine3_x,pup2_spine3_y,pup2_spine3_p,pup2_spine4_x,pup2_spine4_y,pup2_spine4_p,pup2_spine5_x,pup2_spine5_y,pup2_spine5_p,pup3_nose_x,pup3_nose_y,pup3_nose_p,pup3_spine1_x,pup3_spine1_y,pup3_spine1_p,pup3_spine2_x,pup3_spine2_y,pup3_spine2_p,pup3_spine3_x,pup3_spine3_y,pup3_spine3_p,pup3_spine4_x,pup3_spine4_y,pup3_spine4_p,pup3_spine5_x,pup3_spine5_y,pup3_spine5_p,pup4_nose_x,pup4_nose_y,pup4_nose_p,pup4_spine1_x,pup4_spine1_y,pup4_spine1_p,pup4_spine2_x,pup4_spine2_y,pup4_spine2_p,pup4_spine3_x,pup4_spine3_y,pup4_spine3_p,pup4_spine4_x,pup4_spine4_y,pup4_spine4_p,pup4_spine5_x,pup4_spine5_y,pup4_spine5_p\r\n0,496.2355041503906,220.567626953125,0.9527365565299988,500.4596252441406,263.92752075195307,0.8128677010536194,503.2895812988281,292.5050964355469,0.711251437664032,500.05694580078125,320.3348693847656,0.6614666581153871,503.6937255859375,343.82562255859375,0.3003198206424713,0.0,0.0,0.0,496.2355041503906,220.567626953125,0.9527365565299988,500.4596252441406,263.92752075195307,0.8128677010536194,503.2895812988281,292.5050964355469,0.711251437664032,500.05694580078125,320.3348693847656,0.6614666581153871,503.6937255859375,343.82562255859375,0.3003198206424713,0.0,0.0,0.0,0.0,0.0,0.0,535.5052490234375,379.9781188964844,0.2975809574127197,508.6451110839844,383.20098876953125,0.5648311972618103,483.367919921875,384.58819580078125,0.6478815674781799,451.7007446289063,391.420654296875,0.7451946139335632,423.4447937011719,399.6026611328125,0.9182761311531068,648.0948486328125,515.2940063476561,0.2229043692350388,636.2654418945312,516.0468139648439,0.8575764894485474,612.0125732421875,508.645751953125,0.7609681487083435,591.1453857421875,500.4262390136719,0.832747220993042,571.529541015625,495.3873901367188,0.8492857813835144,548.5122680664061,487.8362121582031,0.8720636367797852,600.7646484375,575.720458984375,0.5423446297645569,583.8673706054688,572.3936157226562,0.7516449689865112,563.8768920898438,567.9528198242188,0.7206470966339111,543.586669921875,556.2380981445311,0.7213824987411499,527.6947631835939,543.9876098632811,0.7564922571182251,515.7238159179689,524.171630859375,0.7290070056915283,384.0461730957031,535.8851928710939,0.8675850033760071,396.2161865234375,559.5531005859375,0.8782817721366882,412.5365295410156,571.8739013671875,0.8699088096618652,436.2445373535156,579.6212158203125,0.7766255736351013,456.49560546875,579.3140869140625,0.883823573589325,475.4640197753906,575.2548828125,0.9269698858261108,336.589599609375,543.6217651367189,0.8530907034873962,307.4103698730469,560.0713500976561,0.7831662893295288,288.1627502441406,564.1406860351562,0.8259077072143555,268.6378173828125,567.1624755859375,0.8390766978263855,251.97784423828125,563.4893188476562,0.9189422726631165,235.6845550537109,556.4923095703125,0.9046114683151244\r\n1,496.2879638671875,220.63107299804688,0.9547415375709534,500.44415283203125,264.14016723632807,0.8200687766075134,500.6158142089844,295.5201721191406,0.7471846938133241,499.6556701660156,320.661865234375,0.6593335270881653,503.5816650390625,344.24212646484375,0.28254279494285583,0.0,0.0,0.0,496.2879638671875,220.63107299804688,0.9547415375709534,500.44415283203125,264.14016723632807,0.8200687766075134,500.6158142089844,295.5201721191406,0.7471846938133241,499.6556701660156,320.661865234375,0.6593335270881653,503.5816650390625,344.24212646484375,0.28254279494285583,0.0,0.0,0.0,0.0,0.0,0.0,535.52880859375,379.43389892578125,0.3010869026184082,508.5622863769531,380.3068542480469,0.5687392354011536,483.4601440429688,384.4634704589844,0.6695404052734375,451.8765563964844,391.38427734375,0.7627116441726685,423.4925231933594,399.610595703125,0.9281686544418336,0.0,0.0,0.0,636.1993408203125,516.1194458007811,0.8542495369911194,612.2576904296875,508.6696472167969,0.7529775500297546,591.4818725585938,503.501708984375,0.845646858215332,571.4248046875,495.66845703125,0.8548214435577393,548.2125244140625,488.059326171875,0.8840513825416565,596.8784790039062,579.6757202148438,0.4539488852024078,580.1005249023438,575.897705078125,0.8403472304344177,560.4075317382811,567.7429809570312,0.7307121157646179,543.7866821289061,555.66943359375,0.788873016834259,527.8856201171875,543.5237426757811,0.8062229752540588,515.6806030273439,523.9407958984375,0.7190098762512207,380.3003845214844,536.2025146484375,0.8638001084327698,395.5709228515625,559.7769165039061,0.8765522241592407,412.270263671875,571.87744140625,0.8725616931915283,436.1148071289063,576.8961791992188,0.7829010486602783,456.60833740234375,576.468994140625,0.9018959403038024,475.4982604980469,575.1974487304688,0.9149670600891112,336.52490234375,543.4237670898439,0.8464635014533997,304.5652160644531,559.7457885742189,0.7898238301277161,288.0775146484375,563.6826171875,0.8300068378448486,268.6436767578125,564.4812622070312,0.8552283048629761,251.9909210205078,563.39990234375,0.9223942160606384,235.6182403564453,556.50244140625,0.914698362350464\r\n2,496.2433776855469,220.63575744628903,0.9665535688400269,499.9832763671875,264.24392700195307,0.8199657201766968,500.14794921875,292.3388671875,0.7738865613937378,499.8458557128906,319.84124755859375,0.6679050326347351,500.36553955078125,340.48931884765625,0.25780370831489563,0.0,0.0,0.0,496.2433776855469,220.63575744628903,0.9665535688400269,499.9832763671875,264.24392700195307,0.8199657201766968,500.14794921875,292.3388671875,0.7738865613937378,499.8458557128906,319.84124755859375,0.6679050326347351,500.36553955078125,340.48931884765625,0.25780370831489563,0.0,0.0,0.0,0.0,0.0,0.0,535.7659301757811,376.2238159179688,0.3028284311294556,508.7102966308594,379.6003112792969,0.6317318081855774,483.5924377441406,387.3358154296875,0.6752269864082336,452.025146484375,391.7570495605469,0.7950249910354614,423.46099853515625,399.7508239746094,0.9333615303039552,0.0,0.0,0.0,635.730712890625,516.7711181640625,0.8310267329216003,612.0079345703125,511.363525390625,0.762147068977356,591.093017578125,500.4256896972656,0.8568326830863953,571.1381225585938,492.3208312988281,0.8802744746208191,547.9884033203125,487.4876708984375,0.909387707710266,595.5662841796875,584.20361328125,0.4263121783733368,579.7064819335938,575.8665161132812,0.9165925383567809,560.4127197265625,564.1045532226562,0.7897042036056519,544.43017578125,552.35595703125,0.8962704539299011,531.8712158203125,540.1428833007811,0.8099266290664673,516.0972900390625,520.5545654296875,0.6749165058135986,383.7297668457031,536.6251220703125,0.885046660900116,399.0272216796875,563.7530517578125,0.8159060478210449,416.0726013183594,575.8941040039062,0.8500763177871704,439.9586791992188,580.58154296875,0.7841462492942809,460.42950439453125,579.9993286132812,0.9021166563034058,479.51409912109375,576.2071533203125,0.9634618163108826,336.4267272949219,543.4904174804689,0.8616150617599487,304.5447998046875,559.779296875,0.8029708266258241,288.0079345703125,563.6052856445312,0.8483252525329591,268.59991455078125,564.447509765625,0.8895316123962402,251.9477996826172,563.3316650390625,0.9515439867973328,235.4656982421875,556.5100708007811,0.9436629414558412\r\n3,496.3336486816406,220.60630798339844,0.9788330197334291,499.9425659179688,264.322021484375,0.804137647151947,500.09765625,292.3538513183594,0.7415158748626709,499.5601196289063,319.8631286621094,0.5986077189445496,496.1994934082031,339.91497802734375,0.2303672730922699,0.0,0.0,0.0,496.3336486816406,220.60630798339844,0.9788330197334291,499.9425659179688,264.322021484375,0.804137647151947,500.09765625,292.3538513183594,0.7415158748626709,499.5601196289063,319.8631286621094,0.5986077189445496,496.1994934082031,339.91497802734375,0.2303672730922699,0.0,0.0,0.0,0.0,0.0,0.0,535.851318359375,376.1219177246094,0.2715684771537781,511.40521240234375,379.5197143554688,0.6323232650756836,483.6984558105469,387.4959411621094,0.6817506551742554,452.0848388671875,391.9915466308594,0.7969747185707092,423.5086975097656,399.751953125,0.931324303150177,0.0,0.0,0.0,635.8026733398438,519.3413696289061,0.8352226018905641,612.049072265625,511.3312683105469,0.7598195672035217,591.0923461914062,500.4587707519531,0.8572355508804321,571.091064453125,492.3470458984375,0.8748998045921326,547.8760375976561,487.6129455566406,0.9061638712882996,595.7015991210938,584.3153076171875,0.4555864632129669,579.4247436523438,576.3525390625,0.8828028440475464,560.09912109375,564.3056640625,0.7970129251480103,544.3431396484375,552.4615478515625,0.9156122803688048,531.9349975585939,540.1621704101561,0.8178607225418091,516.20703125,520.53466796875,0.6942353248596191,387.8775024414063,532.72314453125,0.9184426665306092,400.0733947753906,560.5511474609375,0.8534789681434631,419.3782043457031,575.6778564453125,0.8712479472160339,440.1101379394531,580.6194458007812,0.7905017137527466,460.2353820800781,580.6527709960938,0.9004195332527161,476.6386413574219,579.4889526367188,0.993718922138214,336.3165283203125,543.4000244140625,0.8707237243652344,307.3284606933594,559.9840698242189,0.8165134787559509,288.01300048828125,563.919921875,0.8597572445869446,268.43829345703125,564.7252197265625,0.8994816541671753,251.8329925537109,563.4426879882812,0.9700333476066588,235.3411865234375,556.5420532226561,0.9455788135528564\r\n4,496.3221130371094,220.76739501953125,0.9742648005485536,500.30377197265625,267.73977661132807,0.8045008778572083,500.3703002929688,296.0572509765625,0.7457910180091858,496.38751220703125,324.16619873046875,0.5705496072769165,0.0,0.0,0.0,0.0,0.0,0.0,496.3221130371094,220.76739501953125,0.9742648005485536,500.30377197265625,267.73977661132807,0.8045008778572083,500.3703002929688,296.0572509765625,0.7457910180091858,496.38751220703125,324.16619873046875,0.5705496072769165,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,535.6304321289061,376.6114196777344,0.28071537613868713,511.6216735839844,379.8400268554688,0.6062015891075134,483.7198181152344,387.395263671875,0.6623584032058716,452.0174255371094,391.9966125488281,0.7904066443443298,423.2991943359375,399.8789978027344,0.9169630408287048,0.0,0.0,0.0,636.1013793945312,519.4646606445311,0.8235352039337158,612.2467651367188,511.4793701171875,0.7594605088233948,591.4236450195312,503.55023193359375,0.8789750933647156,571.32177734375,495.489990234375,0.8497058153152466,548.1092529296875,487.7808532714844,0.9143038988113404,595.8994750976562,584.0189208984375,0.4693641066551209,579.4336547851562,576.488525390625,0.89593905210495,560.1011352539061,564.6260375976562,0.7874167561531067,544.1777954101561,552.5606079101561,0.9036816358566284,531.7984008789061,540.2230834960939,0.8270125985145569,516.0720825195311,520.497802734375,0.7120351195335388,391.9834289550781,523.5908813476561,0.9743152260780334,399.96185302734375,555.2852172851561,0.8106785416603088,415.4165649414063,571.713623046875,0.8993117213249207,436.4542236328125,580.239990234375,0.8227529525756836,456.0745544433594,583.6392211914062,0.8926184773445129,472.4842529296875,580.263671875,0.997106432914734,336.2913513183594,544.0748291015625,0.8766992688179016,304.46380615234375,563.096923828125,0.8131293654441833,287.95965576171875,564.5267333984375,0.8448421359062195,268.37100219726557,567.411865234375,0.9186647534370422,251.7641906738281,563.519775390625,0.9665551781654358,235.3104705810547,556.5091552734375,0.9400166273117064\r\n5,49\r\n```", + "created_at": "2021-12-20T14:01:03Z", + "author": "catubc" + }, + { + "body": "Hi @catubc! The labelling interface looks in `project_folder/csv/features_extracted` for a CSV (or parquet, depending on your settings) and a matching video file. It looks like there aren't any files in the folder, I can see that the **feature extraction** step failed: `Extracting features from 0 files`. This suggests that the step before this, the outlier correction, might not have been completed - I recommend you click on the red `skip outlier correction` button the the `[Outlier correction]` tab. This should give you files to generate features for. The printouts in your screenshot suggests the video is fine. \r\n\r\nJust a note, I can see the _readthedocs_ documentation. I have not written that and do not maintain and haven't read it, I don't think anyone in the lab is developing or maintaining those docs. I recommend the github mds' for the docs https://github.com/sgoldenlab/simba#scenario-tutorials", + "created_at": "2021-12-20T14:02:24Z", + "author": "sronilsson" + }, + { + "body": "@catubc - do all of your animals have the same body-parts tracked? Did you manually move them from `input_csv` to `features_extracted` folder? \r\n", + "created_at": "2021-12-20T14:06:05Z", + "author": "sronilsson" + }, + { + "body": "Ok, I started over from scratch with a new project. This time I didnot move any .csv files anywhere. And yes, all the animals have 6 points along the spine labeled. However, there is occlusion at times and SLEAP does not return labels for some cases. I assume this would be ok, or at least if I ran the interpolation step.\r\n\r\nI can now get to outlier correction, not sure. It's still a similar crash. \r\n\r\n```\r\n\r\nWarning: The video name could not be found in the .SLP meta-data table\r\nSimBA therefore gives the imported CSV the same name as the SLP file.\r\nTo be sure that SimBAs slp import function works, make sure the .SLP file and the associated video file has the same file name - e.g., \"Video1.mp4\" and \"Video1.slp\" before importing the videos and SLP files to SimBA.\r\nRe-organizing pose data-frame based on user-assigned identities: 2020_08_01_11_27_15_857870_compressed_corrected.mp4....\r\nInterpolating missing values (Method: Body-parts: Nearest) ...\r\nPlease select the project_config.ini file\r\n/media/cat/256GB/dan/simba/cohorts/gerbils2/project_folder/project_config.ini\r\nTable updated.\r\n/media/cat/256GB/dan/simba/cohorts/gerbils2/project_folder/logs generated.\r\n/media/cat/256GB/dan/simba/cohorts/gerbils2/project_folder/logs generated.\r\nOutlier correction settings updated in project_config.ini\r\nNumber of Frames: 28802\r\n/media/cat/256GB/dan/simba/cohorts/gerbils2/project_folder/csv/features_extracted/2020_08_01_11_27_15_857870_compressed_corrected.csv @@@@@@@@@@@@@\r\nThe CSV file could not be located at the following path: /media/cat/256GB/dan/simba/cohorts/gerbils2/project_folder/csv/features_extracted/2020_08_01_11_27_15_857870_compressed_corrected.csv . It may be that you missed a step in the analysis. Please generate the file before proceeding.\r\nNone\r\nApplying settings for multi-animal tracking...\r\n```\r\nAnd here's the command line:\r\n\r\n\r\n```\r\n(simba3) cat@cat-Precision-T3610:~$ simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/tkinter/__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/SimBA.py\", line 3924, in \r\n button_labelaggression = Button(label_labelaggression, text='Select video (create new video annotation)',command= lambda:choose_folder(self.projectconfigini))\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/labelling_aggression.py\", line 364, in choose_folder\r\n MainInterface()\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/labelling_aggression.py\", line 178, in __init__\r\n load_frame(0, self.window, self.fbox, )\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/labelling_aggression.py\", line 441, in load_frame\r\n currAnimal = currDf.loc[currDf.index[current_frame_number], [currXheader, currYheader]]\r\nAttributeError: 'NoneType' object has no attribute 'loc'\r\n```\r\n\r\n\r\n\r\n", + "created_at": "2021-12-20T15:05:37Z", + "author": "catubc" + }, + { + "body": "I went back and ran feature-extraction also.\r\n\r\n\r\n\r\n\r\n\r\n```\r\n(simba3) cat@cat-Precision-T3610:~$ simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/tkinter/__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/SimBA.py\", line 3924, in \r\n button_labelaggression = Button(label_labelaggression, text='Select video (create new video annotation)',command= lambda:choose_folder(self.projectconfigini))\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/labelling_aggression.py\", line 364, in choose_folder\r\n MainInterface()\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/labelling_aggression.py\", line 178, in __init__\r\n load_frame(0, self.window, self.fbox, )\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/labelling_aggression.py\", line 441, in load_frame\r\n currAnimal = currDf.loc[currDf.index[current_frame_number], [currXheader, currYheader]]\r\nAttributeError: 'NoneType' object has no attribute 'loc'\r\n```\r\n![feat](https://user-images.githubusercontent.com/4267452/146788910-d436bfbf-535e-4c3e-914d-1c6205052657.png)\r\n\r\n", + "created_at": "2021-12-20T15:08:10Z", + "author": "catubc" + }, + { + "body": "I'm also getting this error, I think during outlierCorrection (or even if I try to skip it).\r\n\r\n\r\n\r\n```\r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/tkinter/__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/SimBA.py\", line 3895, in \r\n button_skipOC = Button(label_outliercorrection,text='Skip outlier correction (CAUTION)',fg='red', command=lambda:skip_outlier_c(self.projectconfigini))\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/outlier_scripts/skip_outlierCorrection.py\", line 61, in skip_outlier_c\r\n csv_df.columns = newHeaders\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/generic.py\", line 5192, in __setattr__\r\n return object.__setattr__(self, name, value)\r\n File \"pandas/_libs/properties.pyx\", line 67, in pandas._libs.properties.AxisProperty.__set__\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/generic.py\", line 690, in _set_axis\r\n self._data.set_axis(axis, labels)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/internals/managers.py\", line 183, in set_axis\r\n \"values have {new} elements\".format(old=old_len, new=new_len)\r\nValueError: Length mismatch: Expected axis has 126 elements, new values have 108 elements\r\n\r\n```\r\n\r\n[Edit:] Perhaps I'm not getting past the outlier correction. Maybe due to missing values in the data where SLEAP did not find any features?", + "created_at": "2021-12-20T15:11:33Z", + "author": "catubc" + }, + { + "body": "Actually, I think the interpolation step is failing well before I get to these steps.\r\n\r\nIs there some other preprocessing step that I have to do on the .slp file? Perhaps Talmo has code to fix the .slp files before loading into simba?\r\n\r\n![inteprolation](https://user-images.githubusercontent.com/4267452/146792764-b641e8a4-7872-452b-9fb8-83a171340f00.png)\r\n.", + "created_at": "2021-12-20T15:35:36Z", + "author": "catubc" + }, + { + "body": "Hi @catubc - I think most or all errors, could originate from the body part configuration that you specified SimBA to use in your project - we have 6 animals each with 6 body-parts each having 3 values (x,y,p) (6*6*3=108). SimBA, however, assumes that there should be 126 columns, which means that there is either an extra body-part for each animal, or an extra animal. \r\n\r\nWhen you click `skip outlier correction`, SimBA takes the imported pose-estimation files, and without performing any outlier correction, just modifies the headings to make it compatible with the rest of the functions down-stream, and make similar regardless of the pose-estimation tool it comes from (sleap, dlc, animal tracker etc..) It is here SimBA tries to fit 126 headers to a 108-field file and this is the reason you see the error. When you specified the body-parts, is it possible you added an animal too many or a body-part too many? \r\n\r\nWhen you define the body-parts, the data is saved in a CSV at `project_folder/logs/measures/body_parts_configuration/body_configurations.csv` (or something very similar, I can't remember exact). You could open this file in your project and check for any odd ones that should not be there?\r\n", + "created_at": "2021-12-20T17:17:39Z", + "author": "sronilsson" + }, + { + "body": "Thanks for that, so there's clearly something weird going on. Perhaps I used 0-based indices in simba somewhere?\r\n\r\nIn any case, I'm certain there aren't additional animals or features, here's the sleap file and the nodes and track info, there are only 6 animals (female, male, pup1-4) and 6 features (nose, spine1-5).\r\n\r\n```\r\n(sleap) cat@cat-Precision-T7610:~/data/simba$ python\r\nPython 3.6.13 |Anaconda, Inc.| (default, Jun 4 2021, 14:25:59) \r\n[GCC 7.5.0] on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import sleap\r\ns2021-12-20 12:27:22.061021: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1\r\nl>>> sleap.load_file(2020_08_01_11_27_15_857870_compressed_corrected.mp4.predictions.slp')\r\nLabels(labeled_frames=28802, videos=1, skeletons=1, tracks=6)\r\n>>> labels = sleap.load_file('2020_08_01_11_27_15_857870_compressed_corrected.mp4.predictions.slp')\r\n>>> labels.nodes\r\n[Node(name='spine5', weight=1.0), Node(name='spine2', weight=1.0), Node(name='spine4', weight=1.0), Node(name='nose', weight=1.0), Node(name='spine3', weight=1.0), Node(name='spine1', weight=1.0)]\r\n>>> labels.tracks\r\n[Track(spawned_on=0, name='female'), Track(spawned_on=0, name='male'), Track(spawned_on=0, name='pup1'), Track(spawned_on=0, name='pup2'), Track(spawned_on=0, name='pup3'), Track(spawned_on=0, name='pup4')]\r\n>>> \r\n```\r\n\r\n\r\n", + "created_at": "2021-12-20T17:40:56Z", + "author": "catubc" + }, + { + "body": "Here's the make simba config step\r\n![make_simba_config](https://user-images.githubusercontent.com/4267452/146809523-cec33fd7-f604-4fd3-af84-87b2ad1280ca.png)\r\n", + "created_at": "2021-12-20T17:42:18Z", + "author": "catubc" + }, + { + "body": "And here's the import sleap step\r\n![import_sleap](https://user-images.githubusercontent.com/4267452/146809597-9c748c91-a99f-49c8-bd8c-6560c51f5858.png)\r\n\r\n", + "created_at": "2021-12-20T17:43:00Z", + "author": "catubc" + }, + { + "body": "And here's the confirmation that simba only sees 6 animals and 6 features\r\n![animals](https://user-images.githubusercontent.com/4267452/146809781-62e857e6-50f5-4919-b123-1be1818d28cd.png)\r\n\r\n", + "created_at": "2021-12-20T17:44:43Z", + "author": "catubc" + }, + { + "body": "The error occurs right after I press \"c\" at this step.\r\n![Screenshot from 2021-12-20 18-45-33](https://user-images.githubusercontent.com/4267452/146810047-9edaa7ce-447a-4dd0-a512-85acaef3b52a.png)\r\n\r\n", + "created_at": "2021-12-20T17:46:37Z", + "author": "catubc" + }, + { + "body": "And this error reoccurs at outlier detection again.\r\n![Screenshot from 2021-12-20 18-51-21](https://user-images.githubusercontent.com/4267452/146810817-4b29900a-5cc4-4490-9025-1611a7f4651f.png)\r\n\r\n![Screenshot from 2021-12-20 18-51-31](https://user-images.githubusercontent.com/4267452/146810830-dffd6cd3-6acd-4178-a005-d0ee45c7d58e.png)\r\n\r\n\r\n", + "created_at": "2021-12-20T17:52:39Z", + "author": "catubc" + }, + { + "body": "Yeah your tracking data looks solid. It also looks good after you have imported into SimBA, as in your screenshot from the `project_folder/csv/input_csv` example a while back. It is the next step, after import, when you click `skip outlier correction` where SimBA tries to apply the the headers that you specify the user_defined configuration on your files we get the error and I think I know why.. I'm not sure you are going to like me for this solution.. :) but bear in mind it is designed to accommodate people who have an unequal and different body-parts tracked on different animals. In the `# bodyparts` entry box you set 36, not 6. And you create a table looking like this (excuse me, I dont have SimBA installed on where I am and had to draw it in a spreadsheet: \r\n![image](https://user-images.githubusercontent.com/34761092/146811372-162a9801-1a0f-4932-8fc3-c52dc0394681.png)\r\n\r\n\r\nIf this is too much of a pain, we could modify the `project_folder/logs/measures/body_parts_configuration/body_configurations.csv` files directly. ", + "created_at": "2021-12-20T17:57:10Z", + "author": "sronilsson" + }, + { + "body": "Well, I did the first option you suggested, but I get the exact same error\r\n![Screenshot from 2021-12-20 19-06-03](https://user-images.githubusercontent.com/4267452/146813610-07cf9ee2-737f-49ff-8d91-e0da21969c47.png)\r\n.", + "created_at": "2021-12-20T18:14:11Z", + "author": "catubc" + }, + { + "body": "![Screenshot from 2021-12-20 19-07-49](https://user-images.githubusercontent.com/4267452/146813694-7dda3888-dde8-4abf-9e1e-5c674acebe03.png)\r\n", + "created_at": "2021-12-20T18:14:49Z", + "author": "catubc" + }, + { + "body": "![Screenshot from 2021-12-20 19-12-58](https://user-images.githubusercontent.com/4267452/146813782-1ebd7f9c-c809-44cc-b621-7494ef8af716.png)\r\n", + "created_at": "2021-12-20T18:15:31Z", + "author": "catubc" + }, + { + "body": "Re: option #2, this file doesn't exist\r\n\r\n...project_folder/logs/measures/body_parts_configuration/body_configurations.csv files directly.\r\n\r\nIn the logs subdirectories there's only 1 file project_bp_names.csv:\r\n```\r\n\r\nfemale_nose_1_2\r\nfemale_spine1_1_2\r\nfemale_spine2_1_2\r\nfemale_spine3_1_2\r\nfemale_spine4_1_2\r\nfemale_spine5_1_2\r\nmale_nose_2\r\nmale_spine1_2\r\nmale_spine2_2\r\nmale_spine3_2\r\nmale_spine4_2\r\nmale_spine5_2\r\npup1_nose_3\r\npup1_spine1_3\r\npup1_spine2_3\r\npup1_spine3_3\r\npup1_spine4_3\r\npup1_spine5_3\r\npup2_nose_4\r\npup2_spine1_4\r\npup2_spine2_4\r\npup2_spine3_4\r\npup2_spine4_4\r\npup2_spine5_4\r\npup3_nose_5\r\npup3_spine1_5\r\npup3_spine2_5\r\npup3_spine3_5\r\npup3_spine4_5\r\npup3_spine5_5\r\npup4_nose_6\r\npup4_spine1_6\r\npup4_spine2_6\r\npup4_spine3_6\r\npup4_spine4_6\r\npup4_spine5_6\r\n\r\n```", + "created_at": "2021-12-20T18:16:42Z", + "author": "catubc" + }, + { + "body": "So one weird thing appears to be that the sleap .csv files inthe root directory and in the input_CSV have different number of headings\r\n\r\nHere's the one from the project_folder/csv/input_csv:\r\n\r\n```\r\nscorer,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi\r\nbodypart,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi,SLEAP_multi\r\ncoords,female_nose_x,female_nose_y,female_nose_p,female_spine1_x,female_spine1_y,female_spine1_p,female_spine2_x,female_spine2_y,female_spine2_p,female_spine3_x,female_spine3_y,female_spine3_p,female_spine4_x,female_spine4_y,female_spine4_p,female_spine5_x,female_spine5_y,female_spine5_p,female_nose_x,female_nose_y,female_nose_p,female_spine1_x,female_spine1_y,female_spine1_p,female_spine2_x,female_spine2_y,female_spine2_p,female_spine3_x,female_spine3_y,female_spine3_p,female_spine4_x,female_spine4_y,female_spine4_p,female_spine5_x,female_spine5_y,female_spine5_p,male_nose_x,male_nose_y,male_nose_p,male_spine1_x,male_spine1_y,male_spine1_p,male_spine2_x,male_spine2_y,male_spine2_p,male_spine3_x,male_spine3_y,male_spine3_p,male_spine4_x,male_spine4_y,male_spine4_p,male_spine5_x,male_spine5_y,male_spine5_p,pup1_nose_x,pup1_nose_y,pup1_nose_p,pup1_spine1_x,pup1_spine1_y,pup1_spine1_p,pup1_spine2_x,pup1_spine2_y,pup1_spine2_p,pup1_spine3_x,pup1_spine3_y,pup1_spine3_p,pup1_spine4_x,pup1_spine4_y,pup1_spine4_p,pup1_spine5_x,pup1_spine5_y,pup1_spine5_p,pup2_nose_x,pup2_nose_y,pup2_nose_p,pup2_spine1_x,pup2_spine1_y,pup2_spine1_p,pup2_spine2_x,pup2_spine2_y,pup2_spine2_p,pup2_spine3_x,pup2_spine3_y,pup2_spine3_p,pup2_spine4_x,pup2_spine4_y,pup2_spine4_p,pup2_spine5_x,pup2_spine5_y,pup2_spine5_p,pup3_nose_x,pup3_nose_y,pup3_nose_p,pup3_spine1_x,pup3_spine1_y,pup3_spine1_p,pup3_spine2_x,pup3_spine2_y,pup3_spine2_p,pup3_spine3_x,pup3_spine3_y,pup3_spine3_p,pup3_spine4_x,pup3_spine4_y,pup3_spine4_p,pup3_spine5_x,pup3_spine5_y,pup3_spine5_p,pup4_nose_x,pup4_nose_y,pup4_nose_p,pup4_spine1_x,pup4_spine1_y,pup4_spine1_p,pup4_spine2_x,pup4_spine2_y,pup4_spine2_p,pup4_spine3_x,pup4_spine3_y,pup4_spine3_p,pup4_spine4_x,pup4_spine4_y,pup4_spine4_p,pup4_spine5_x,pup4_spine5_y,pup4_spine5_p\r\n0,496.2355041503906,220.567626953125,0.9527365565299988,500.4596252441406,263.92752075195307,0.8128677010536194,503.2895812988281,292.5050964355469,0.711251437664032,500.05694580078125,320.3348693847656,0.6614666581153871,503.6937255859375,343.82562255859375,0.3003198206424713,0.0,0.0,0.0,496.2355041503906,220.567626953125,0.9527365565299988,500.4596252441406,263.92752075195307,0.8128677010536194,503.2895812988281,292.5050964355469,0.711251437664032,500.05694580078125,320.3348693847656,0.6614666581153871,503.6937255859375,343.82562255859375,0.3003198206424713,0.0,0.0,0.0,0.0,0.0,0.0,535.5052490234375,379.9781188964844,0.2975809574127197,508.6451110839844,383.20098876953125,0.5648311972618103,483.367919921875,384.58819580078125,0.6478815674781799,451.7007446289063,391.420654296875,0.7451946139335632,423.4447937011719,399.6026611328125,0.9182761311531068,648.0948486328125,515.2940063476561,0.2229043692350388,636.2654418945312,516.0468139648439,0.8575764894485474,612.0125732421875,508.645751953125,0.7609681487083435,591.1453857421875,500.4262390136719,0.832747220993042,571.529541015625,495.3873901367188,0.8492857813835144,548.5122680664061,487.8362121582031,0.8720636367797852,600.7646484375,575.720458984375,0.5423446297645569,583.8673706054688,572.3936157226562,0.7516449689865112,563.8768920898438,567.9528198242188,0.7206470966339111,543.586669921875,556.2380981445311,0.7213824987411499,527.6947631835939,543.9876098632811,0.7564922571182251,515.7238159179689,524.171630859375,0.7290070056915283,384.0461730957031,535.8851928710939,0.8675850033760071,396.2161865234375,559.5531005859375,0.8782817721366882,412.5365295410156,571.8739013671875,0.8699088096618652,436.2445373535156,579.6212158203125,0.7766255736351013,456.49560546875,579.3140869140625,0.883823573589325,475.4640197753906,575.2548828125,0.9269698858261108,336.589599609375,543.6217651367189,0.8530907034873962,307.4103698730469,560.0713500976561,0.7831662893295288,288.1627502441406,564.1406860351562,0.8259077072143555,268.6378173828125,567.1624755859375,0.8390766978263855,251.97784423828125,563.4893188476562,0.9189422726631165,235.6845550537109,556.4923095703125,0.9046114683151244\r\n1,496.2879638671875,220.63107299804688,0.9547415375709534,500.44415283203125,264.14016723632807,0.8200687766075134,500.6158142089844,295.5201721191406,0.7471846938133241,499.6556701660156,320.661865234375,0.6593335270881653,503.5816650390625,344.24212646484375,0.28254279494285583,0.0,0.0,0.0,496.2879638671875,220.63107299804688,0.9547415375709534,500.44415283203125,264.14016723632807,0.8200687766075134,500.6158142089844,295.5201721191406,0.7471846938133241,499.6556701660156,320.661865234375,0.6593335270881653,503.5816650390625,344.24212646484375,0.28254279494285583,0.0,0.0,0.0,0.0,0.0,0.0,535.52880859375,379.43389892578125,0.3010869026184082,508.5622863769531,380.3068542480469,0.5687392354011536,483.4601440429688,384.4634704589844,0.6695404052734375,451.8765563964844,391.38427734375,0.7627116441726685,423.4925231933594,399.610595703125,0.9281686544418336,0.0,0.0,0.0,636.1993408203125,516.1194458007811,0.8542495369911194,612.2576904296875,508.6696472167969,0.7529775500297546,591.4818725585938,503.501708984375,0.845646858215332,571.4248046875,495.66845703125,0.8548214435577393,548.2125244140625,488.059326171875,0.8840513825416565,596.8784790039062,579.6757202148438,0.4539488852024078,580.1005249023438,575.897705078125,0.8403472304344177,560.4075317382811,567.7429809570312,0.7307121157646179,543.7866821289061,555.66943359375,0.788873016834259,527.8856201171875,543.5237426757811,0.8062229752540588,515.6806030273439,523.9407958984375,0.7190098762512207,380.3003845214844,536.2025146484375,0.8638001084327698,395.5709228515625,559.7769165039061,0.8765522241592407,412.270263671875,571.87744140625,0.8725616931915283,436.1148071289063,576.8961791992188,0.7829010486602783,456.60833740234375,576.468994140625,0.9018959403038024,475.4982604980469,575.1974487304688,0.9149670600891112,336.52490234375,543.4237670898439,0.8464635014533997,304.5652160644531,559.7457885742189,0.7898238301277161,288.0775146484375,563.6826171875,0.8300068378448486,268.6436767578125,564.4812622070312,0.8552283048629761,251.9909210205078,563.39990234375,0.9223942160606384,235.6182403564453,556.50244140625,0.914698362350464\r\n2,496.2433776855469,220.63575744628903,0.9665535688400269,499.9832763671875,264.24392700195307,0.8199657201766968,500.14794921875,292.3388671875,0.7738865613937378,499.8458557128906,319.84124755859375,0.6679050326347351,500.36553955078125,340.48931884765625,0.25780370831489563,0.0,0.0,0.0,496.2433776855469,220.63575744628903,0.9665535688400269,499.9832763671875,264.24392700195307,0.8199657201766968,500.14794921875,292.3388671875,0.7738865613937378,499.8458557128906,319.84124755859375,0.6679050326347351,500.36553955078125,340.48931884765625,0.25780370831489563,0.0,0.0,0.0,0.0,0.0,0.0,535.7659301757811,376.2238159179688,0.3028284311294556,508.7102966308594,379.6003112792969,0.6317318081855774,483.5924377441406,387.3358154296875,0.6752269864082336,452.025146484375,391.7570495605469,0.7950249910354614,423.46099853515625,399.7508239746094,0.9333615303039552,0.0,0.0,0.0,635.730712890625,516.7711181640625,0.8310267329216003,612.0079345703125,511.363525390625,0.762147068977356,591.093017578125,500.4256896972656,0.8568326830863953,571.1381225585938,492.3208312988281,0.8802744746208191,547.9884033203125,487.4876708984375,0.909387707710266,595.5662841796875,584.20361328125,0.4263121783733368,579.7064819335938,575.8665161132812,0.9165925383567809,560.4127197265625,564.1045532226562,0.7897042036056519,544.43017578125,552.35595703125,0.8962704539299011,531.8712158203125,540.1428833007811,0.8099266290664673,516.0972900390625,520.5545654296875,0.6749165058135986,383.7297668457031,536.6251220703125,0.885046660900116,399.0272216796875,563.7530517578125,0.8159060478210449,416.0726013183594,575.8941040039062,0.8500763177871704,439.9586791992188,580.58154296875,0.7841462492942809,460.42950439453125,579.9993286132812,0.9021166563034058,479.51409912109375,576.2071533203125,0.9634618163108826,336.4267272949219,543.4904174804689,0.8616150617599487,304.5447998046875,559.779296875,0.8029708266258241,288.0079345703125,563.6052856445312,0.8483252525329591,268.59991455078125,564.447509765625,0.8895316123962402,251.9477996826172,563.3316650390625,0.9515439867973328,235.4656982421875,556.5100708007811,0.9436629414558412\r\n3,496.3336486816406,220.60630798339844,0.9788330197334291,499.9425659179688,264.322021484375,0.804137647151947,500.09765625,292.3538513183594,0.7415158748626709,499.5601196289063,319.8631286621094,0.5986077189445496,496.1994934082031,339.91497802734375,0.2303672730922699,0.0,0.0,0.0,496.3336486816406,220.60630798339844,0.9788330197334291,499.9425659179688,264.322021484375,0.804137647151947,500.09765625,292.3538513183594,0.7415158748626709,499.5601196289063,319.8631286621094,0.5986077189445496,496.1994934082031,339.91497802734375,0.2303672730922699,0.0,0.0,0.0,0.0,0.0,0.0,535.851318359375,376.1219177246094,0.2715684771537781,511.40521240234375,379.5197143554688,0.6323232650756836,483.6984558105469,387.4959411621094,0.6817506551742554,452.0848388671875,391.9915466308594,0.7969747185707092,423.5086975097656,399.751953125,0.931324303150177,0.0,0.0,0.0,635.8026733398438,519.3413696289061,0.8352226018905641,612.049072265625,511.3312683105469,0.7598195672035217,591.0923461914062,500.4587707519531,0.8572355508804321,571.091064453125,492.3470458984375,0.8748998045921326,547.8760375976561,487.6129455566406,0.9061638712882996,595.7015991210938,584.3153076171875,0.4555864632129669,579.4247436523438,576.3525390625,0.8828028440475464,560.09912109375,564.3056640625,0.7970129251480103,544.3431396484375,552.4615478515625,0.9156122803688048,531.9349975585939,540.1621704101561,0.8178607225418091,516.20703125,520.53466796875,0.6942353248596191,387.8775024414063,532.72314453125,0.9184426665306092,400.0733947753906,560.5511474609375,0.8534789681434631,419.3782043457031,575.6778564453125,0.8712479472160339,440.1101379394531,580.6194458007812,0.7905017137527466,460.2353820800781,580.6527709960938,0.9004195332527161,476.6386413574219,579.4889526367188,0.993718922138214,336.3165283203125,543.4000244140625,0.8707237243652344,307.3284606933594,559.9840698242189,0.8165134787559509,288.01300048828125,563.919921875,0.8597572445869446,268.43829345703125,564.7252197265625,0.8994816541671753,251.8329925537109,563.4426879882812,0.9700333476066588,235.3411865234375,556.5420532226561,0.9455788135528564\r\n4,496.3221130371094,220.76739501953125,0.9742648005485536,500.30377197265625,267.73977661132807,0.8045008778572083,500.3703002929688,296.0572509765625,0.7457910180091858,496.38751220703125,324.16619873046875,0.5705496072769165,0.0,0.0,0.0,0.0,0.0,0.0,496.3221130371094,220.76739501953125,0.9742648005485536,500.30377197265625,267.73977661132807,0.8045008778572083,500.3703002929688,296.0572509765625,0.7457910180091858,496.38751220703125,324.16619873046875,0.5705496072769165,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,535.6304321289061,376.6114196777344,0.28071537613868713,511.6216735839844,379.8400268554688,0.6062015891075134,483.7198181152344,387.395263671875,0.6623584032058716,452.0174255371094,391.9966125488281,0.7904066443443298,423.2991943359375,399.8789978027344,0.9169630408287048,0.0,0.0,0.0,636.1013793945312,519.4646606445311,0.8235352039337158,612.2467651367188,511.4793701171875,0.7594605088233948,591.4236450195312,503.55023193359375,0.8789750933647156,571.32177734375,495.489990234375,0.8497058153152466,548.1092529296875,487.7808532714844,0.9143038988113404,595.8994750976562,584.0189208984375,0.4693641066551209,579.4336547851562,576.488525390625,0.89593905210495,560.1011352539061,564.6260375976562,0.7874167561531067,544.1777954101561,552.5606079101561,0.9036816358566284,531.7984008789061,540.2230834960939,0.8270125985145569,516.0720825195311,520.497802734375,0.7120351195335388,391.9834289550781,523.5908813476561,0.9743152260780334,399.96185302734375,555.2852172851561,0.8106785416603088,415.4165649414063,571.713623046875,0.8993117213249207,436.4542236328125,580.239990234375,0.8227529525756836,456.0745544433594,583.6392211914062,0.8926184773445129,472.4842529296875,580.263671875,0.997106432914734,336.2913513183594,544.0748291015625,0.8766992688179016,304.46380615234375,563.096923828125,0.8131293654441833,287.95965576171875,564.5267333984375,0.8448421359062195,268.37100219726557,567.411865234375,0.9186647534370422,251.7641906738281,563.519775390625,0.9665551781654358,235.3104705810547,556.5091552734375,0.9400166273117064\r\n5,496.5477294921875,220.31765747070312,1.0025038719177246,500.3257141113281,267.41021728515625,0.8073557615280151,500.5188293457031,295.65936279296875,0.7421766519546509,499.3809814453125,320.694091796875,0.5579490661621094,496.3204650878906,340.69488525390625,0.2062848061323166,0.0,0.0,0.0,496.5477294921875,220.31765747070312,1.0025038719177246,500.3257141113281,267.41021728515625,0.8073557615280151,500.5188293457031,295.65936279296875,0.7421766519546509,499.3809814453125,320.694091796875,0.5579490661621094,496.3204650878906,340.69488525390625,0.2062848061323166,0.0,0.0,0.0,0.0,0.0,0.0,535.6732177734375,376.5775451660156,0.2895082235336304,511.58978271484375,379.8506774902344,0.6220303177833557,483.64739990234375,387.5206604003906,0.6734927892684937,451.9689636230469,392.08935546875,0.8059420585632324,423.2479553222656,400.1103820800781,0.8917230367660522,0.0,0.0,0.0,636.2506713867188,516.7752075195311,0.8254024386405945,612.2767944335938,511.49456787109375,0.7678549885749817,591.477783203125,503.6936340332031,0.8922142982482909,571.3546142578125,495.6891174316406,0.8556576371192932,548.2413330078125,488.05322265625,0.91047602891922,595.9586181640625,583.460693359375,0.5140318274497986,576.6068115234375,576.3134765625,0.9000992178916931,559.978515625,564.611572265625,0.7706466913223267,543.9149169921875,555.3850708007811,0.91111820936203,531.5545654296875,540.5081787109375,0.8141813278198242,516.129150390625,523.2796020507811,0.6990899443626404,396.3413391113281,512.0481567382811,0.9263705015182496,400.5076599121094,547.2685546875,0.7489035129547119,411.8859252929688,564.2376098632812,0.8736788630485535,432.0379943847656,576.0293579101562,0.7989232540130615,448.5880737304688,583.4317626953125,0.8441069722175598,467.59283447265625,583.3642578125,0.9834716916084291,336.1600341796875,543.789306640625,0.8774490356445312,307.2768859863281,560.6718139648439,0.8090906739234924,287.9517517089844,564.6837158203125,0.8336358666419983,268.23101806640625,567.503173828125,0.9250435829162598,251.68019104003903,563.5968017578125,0.9689670205116272,235.2488555908203,556.5679321289061,0.9312905073165894\r\n...\r\n```\r\n", + "created_at": "2021-12-20T18:19:21Z", + "author": "catubc" + }, + { + "body": "And here's the one in the root directory where the .slp file is:\r\n\r\n\r\n```\r\n,Animal_1_nose_x,Animal_1_nose_y,Animal_1_nose_p,Animal_1_spine1_x,Animal_1_spine1_y,Animal_1_spine1_p,Animal_1_spine2_x,Animal_1_spine2_y,Animal_1_spine2_p,Animal_1_spine3_x,Animal_1_spine3_y,Animal_1_spine3_p,Animal_1_spine4_x,Animal_1_spine4_y,Animal_1_spine4_p,Animal_1_spine5_x,Animal_1_spine5_y,Animal_1_spine5_p,Animal_2_nose_x,Animal_2_nose_y,Animal_2_nose_p,Animal_2_spine1_x,Animal_2_spine1_y,Animal_2_spine1_p,Animal_2_spine2_x,Animal_2_spine2_y,Animal_2_spine2_p,Animal_2_spine3_x,Animal_2_spine3_y,Animal_2_spine3_p,Animal_2_spine4_x,Animal_2_spine4_y,Animal_2_spine4_p,Animal_2_spine5_x,Animal_2_spine5_y,Animal_2_spine5_p,Animal_3_nose_x,Animal_3_nose_y,Animal_3_nose_p,Animal_3_spine1_x,Animal_3_spine1_y,Animal_3_spine1_p,Animal_3_spine2_x,Animal_3_spine2_y,Animal_3_spine2_p,Animal_3_spine3_x,Animal_3_spine3_y,Animal_3_spine3_p,Animal_3_spine4_x,Animal_3_spine4_y,Animal_3_spine4_p,Animal_3_spine5_x,Animal_3_spine5_y,Animal_3_spine5_p,Animal_4_nose_x,Animal_4_nose_y,Animal_4_nose_p,Animal_4_spine1_x,Animal_4_spine1_y,Animal_4_spine1_p,Animal_4_spine2_x,Animal_4_spine2_y,Animal_4_spine2_p,Animal_4_spine3_x,Animal_4_spine3_y,Animal_4_spine3_p,Animal_4_spine4_x,Animal_4_spine4_y,Animal_4_spine4_p,Animal_4_spine5_x,Animal_4_spine5_y,Animal_4_spine5_p,Animal_5_nose_x,Animal_5_nose_y,Animal_5_nose_p,Animal_5_spine1_x,Animal_5_spine1_y,Animal_5_spine1_p,Animal_5_spine2_x,Animal_5_spine2_y,Animal_5_spine2_p,Animal_5_spine3_x,Animal_5_spine3_y,Animal_5_spine3_p,Animal_5_spine4_x,Animal_5_spine4_y,Animal_5_spine4_p,Animal_5_spine5_x,Animal_5_spine5_y,Animal_5_spine5_p,Animal_6_nose_x,Animal_6_nose_y,Animal_6_nose_p,Animal_6_spine1_x,Animal_6_spine1_y,Animal_6_spine1_p,Animal_6_spine2_x,Animal_6_spine2_y,Animal_6_spine2_p,Animal_6_spine3_x,Animal_6_spine3_y,Animal_6_spine3_p,Animal_6_spine4_x,Animal_6_spine4_y,Animal_6_spine4_p,Animal_6_spine5_x,Animal_6_spine5_y,Animal_6_spine5_p\r\n0,496.2355041503906,220.567626953125,0.9527365565299988,500.4596252441406,263.9275207519531,0.8128677010536194,503.2895812988281,292.5050964355469,0.711251437664032,500.05694580078125,320.3348693847656,0.661466658115387,503.6937255859375,343.82562255859375,0.3003198206424713,0.0,0.0,0.0,0.0,0.0,0.0,535.5052490234375,379.9781188964844,0.2975809574127197,508.6451110839844,383.20098876953125,0.5648311972618103,483.367919921875,384.58819580078125,0.6478815674781799,451.70074462890625,391.420654296875,0.7451946139335632,423.4447937011719,399.6026611328125,0.9182761311531067,336.589599609375,543.6217651367188,0.8530907034873962,307.4103698730469,560.0713500976562,0.7831662893295288,288.1627502441406,564.1406860351562,0.8259077072143555,268.6378173828125,567.1624755859375,0.8390766978263855,251.97784423828125,563.4893188476562,0.9189422726631165,235.68455505371094,556.4923095703125,0.9046114683151245,600.7646484375,575.720458984375,0.5423446297645569,583.8673706054688,572.3936157226562,0.7516449689865112,563.8768920898438,567.9528198242188,0.7206470966339111,543.586669921875,556.2380981445312,0.7213824987411499,527.6947631835938,543.9876098632812,0.7564922571182251,515.7238159179688,524.171630859375,0.7290070056915283,384.0461730957031,535.8851928710938,0.8675850033760071,396.2161865234375,559.5531005859375,0.8782817721366882,412.5365295410156,571.8739013671875,0.8699088096618652,436.2445373535156,579.6212158203125,0.7766255736351013,456.49560546875,579.3140869140625,0.883823573589325,475.4640197753906,575.2548828125,0.9269698858261108,648.0948486328125,515.2940063476562,0.22290436923503876,636.2654418945312,516.0468139648438,0.8575764894485474,612.0125732421875,508.645751953125,0.7609681487083435,591.1453857421875,500.4262390136719,0.832747220993042,571.529541015625,495.38739013671875,0.8492857813835144,548.5122680664062,487.8362121582031,0.8720636367797852\r\n1,496.2879638671875,220.63107299804688,0.9547415375709534,500.44415283203125,264.1401672363281,0.8200687766075134,500.6158142089844,295.5201721191406,0.747184693813324,499.6556701660156,320.661865234375,0.6593335270881653,503.5816650390625,344.24212646484375,0.28254279494285583,0.0,0.0,0.0,0.0,0.0,0.0,535.52880859375,379.43389892578125,0.3010869026184082,508.5622863769531,380.3068542480469,0.5687392354011536,483.46014404296875,384.4634704589844,0.6695404052734375,451.8765563964844,391.38427734375,0.7627116441726685,423.4925231933594,399.610595703125,0.9281686544418335,336.52490234375,543.4237670898438,0.8464635014533997,304.5652160644531,559.7457885742188,0.7898238301277161,288.0775146484375,563.6826171875,0.8300068378448486,268.6436767578125,564.4812622070312,0.8552283048629761,251.9909210205078,563.39990234375,0.9223942160606384,235.6182403564453,556.50244140625,0.9146983623504639,596.8784790039062,579.6757202148438,0.45394888520240784,580.1005249023438,575.897705078125,0.8403472304344177,560.4075317382812,567.7429809570312,0.7307121157646179,543.7866821289062,555.66943359375,0.788873016834259,527.8856201171875,543.5237426757812,0.8062229752540588,515.6806030273438,523.9407958984375,0.7190098762512207,380.3003845214844,536.2025146484375,0.8638001084327698,395.5709228515625,559.7769165039062,0.8765522241592407,412.270263671875,571.87744140625,0.8725616931915283,436.11480712890625,576.8961791992188,0.7829010486602783,456.60833740234375,576.468994140625,0.9018959403038025,475.4982604980469,575.1974487304688,0.9149670600891113,0.0,0.0,0.0,636.1993408203125,516.1194458007812,0.8542495369911194,612.2576904296875,508.6696472167969,0.7529775500297546,591.4818725585938,503.501708984375,0.845646858215332,571.4248046875,495.66845703125,0.8548214435577393,548.2125244140625,488.059326171875,0.8840513825416565\r\n2,496.2433776855469,220.63575744628906,0.9665535688400269,499.9832763671875,264.2439270019531,0.8199657201766968,500.14794921875,292.3388671875,0.7738865613937378,499.8458557128906,319.84124755859375,0.6679050326347351,500.36553955078125,340.48931884765625,0.25780370831489563,0.0,0.0,0.0,0.0,0.0,0.0,535.7659301757812,376.22381591796875,0.30282843112945557,508.7102966308594,379.6003112792969,0.6317318081855774,483.5924377441406,387.3358154296875,0.6752269864082336,452.025146484375,391.7570495605469,0.7950249910354614,423.46099853515625,399.7508239746094,0.9333615303039551,336.4267272949219,543.4904174804688,0.8616150617599487,304.5447998046875,559.779296875,0.802970826625824,288.0079345703125,563.6052856445312,0.848325252532959,268.59991455078125,564.447509765625,0.8895316123962402,251.9477996826172,563.3316650390625,0.9515439867973328,235.4656982421875,556.5100708007812,0.9436629414558411,595.5662841796875,584.20361328125,0.4263121783733368,579.7064819335938,575.8665161132812,0.916592538356781,560.4127197265625,564.1045532226562,0.7897042036056519,544.43017578125,552.35595703125,0.8962704539299011,531.8712158203125,540.1428833007812,0.8099266290664673,516.0972900390625,520.5545654296875,0.6749165058135986,383.7297668457031,536.6251220703125,0.885046660900116,399.0272216796875,563.7530517578125,0.8159060478210449,416.0726013183594,575.8941040039062,0.8500763177871704,439.95867919921875,580.58154296875,0.784146249294281,460.42950439453125,579.9993286132812,0.9021166563034058,479.51409912109375,576.2071533203125,0.9634618163108826,0.0,0.0,0.0,635.730712890625,516.7711181640625,0.8310267329216003,612.0079345703125,511.363525390625,0.762147068977356,591.093017578125,500.4256896972656,0.8568326830863953,571.1381225585938,492.3208312988281,0.8802744746208191,547.9884033203125,487.4876708984375,0.9093877077102661\r\n3,496.3336486816406,220.60630798339844,0.978833019733429,499.94256591796875,264.322021484375,0.804137647151947,500.09765625,292.3538513183594,0.7415158748626709,499.56011962890625,319.8631286621094,0.5986077189445496,496.1994934082031,339.91497802734375,0.2303672730922699,0.0,0.0,0.0,0.0,0.0,0.0,535.851318359375,376.1219177246094,0.2715684771537781,511.40521240234375,379.51971435546875,0.6323232650756836,483.6984558105469,387.4959411621094,0.6817506551742554,452.0848388671875,391.9915466308594,0.7969747185707092,423.5086975097656,399.751953125,0.931324303150177,336.3165283203125,543.4000244140625,0.8707237243652344,307.3284606933594,559.9840698242188,0.8165134787559509,288.01300048828125,563.919921875,0.8597572445869446,268.43829345703125,564.7252197265625,0.8994816541671753,251.83299255371094,563.4426879882812,0.9700333476066589,235.3411865234375,556.5420532226562,0.9455788135528564,595.7015991210938,584.3153076171875,0.4555864632129669,579.4247436523438,576.3525390625,0.8828028440475464,560.09912109375,564.3056640625,0.7970129251480103,544.3431396484375,552.4615478515625,0.9156122803688049,531.9349975585938,540.1621704101562,0.8178607225418091,516.20703125,520.53466796875,0.6942353248596191,387.87750244140625,532.72314453125,0.9184426665306091,400.0733947753906,560.5511474609375,0.8534789681434631,419.3782043457031,575.6778564453125,0.8712479472160339,440.1101379394531,580.6194458007812,0.7905017137527466,460.2353820800781,580.6527709960938,0.9004195332527161,476.6386413574219,579.4889526367188,0.9937189221382141,0.0,0.0,0.0,635.8026733398438,519.3413696289062,0.835222601890564,612.049072265625,511.3312683105469,0.7598195672035217,591.0923461914062,500.4587707519531,0.8572355508804321,571.091064453125,492.3470458984375,0.8748998045921326,547.8760375976562,487.6129455566406,0.9061638712882996\r\n4,496.3221130371094,220.76739501953125,0.9742648005485535,500.30377197265625,267.7397766113281,0.8045008778572083,500.37030029296875,296.0572509765625,0.7457910180091858,496.38751220703125,324.16619873046875,0.5705496072769165,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,535.6304321289062,376.6114196777344,0.28071537613868713,511.6216735839844,379.84002685546875,0.6062015891075134,483.7198181152344,387.395263671875,0.6623584032058716,452.0174255371094,391.9966125488281,0.7904066443443298,423.2991943359375,399.8789978027344,0.9169630408287048,336.2913513183594,544.0748291015625,0.8766992688179016,304.46380615234375,563.096923828125,0.8131293654441833,287.95965576171875,564.5267333984375,0.8448421359062195,268.3710021972656,567.411865234375,0.9186647534370422,251.76419067382812,563.519775390625,0.9665551781654358,235.3104705810547,556.5091552734375,0.9400166273117065,595.8994750976562,584.0189208984375,0.46936410665512085,579.4336547851562,576.488525390625,0.89593905210495,560.1011352539062,564.6260375976562,0.7874167561531067,544.1777954101562,552.5606079101562,0.9036816358566284,531.7984008789062,540.2230834960938,0.8270125985145569,516.0720825195312,520.497802734375,0.7120351195335388,391.9834289550781,523.5908813476562,0.9743152260780334,399.96185302734375,555.2852172851562,0.8106785416603088,415.41656494140625,571.713623046875,0.8993117213249207,436.4542236328125,580.239990234375,0.8227529525756836,456.0745544433594,583.6392211914062,0.8926184773445129,472.4842529296875,580.263671875,0.9971064329147339,0.0,0.0,0.0,636.1013793945312,519.4646606445312,0.8235352039337158,612.2467651367188,511.4793701171875,0.7594605088233948,591.4236450195312,503.55023193359375,0.8789750933647156,571.32177734375,495.489990234375,0.8497058153152466,548.1092529296875,487.7808532714844,0.9143038988113403\r\n5,496.5477294921875,220.31765747070312,1.0025038719177246,500.3257141113281,267.41021728515625,0.8073557615280151,500.5188293457031,295.65936279296875,0.7421766519546509,499.3809814453125,320.694091796875,0.5579490661621094,496.3204650878906,340.69488525390625,0.2062848061323166,0.0,0.0,0.0,0.0,0.0,0.0,535.6732177734375,376.5775451660156,0.28950822353363037,511.58978271484375,379.8506774902344,0.6220303177833557,483.64739990234375,387.5206604003906,0.6734927892684937,451.9689636230469,392.08935546875,0.8059420585632324,423.2479553222656,400.1103820800781,0.8917230367660522,336.1600341796875,543.789306640625,0.8774490356445312,307.2768859863281,560.6718139648438,0.8090906739234924,287.9517517089844,564.6837158203125,0.8336358666419983,268.23101806640625,567.503173828125,0.9250435829162598,251.68019104003906,563.5968017578125,0.9689670205116272,235.2488555908203,556.5679321289062,0.9312905073165894,595.9586181640625,583.460693359375,0.5140318274497986,576.6068115234375,576.3134765625,0.9000992178916931,559.978515625,564.611572265625,0.7706466913223267,543.9149169921875,555.3850708007812,0.91111820936203,531.5545654296875,540.5081787109375,0.8141813278198242,516.129150390625,523.2796020507812,0.6990899443626404,396.3413391113281,512.0481567382812,0.9263705015182495,400.5076599121094,547.2685546875,0.7489035129547119,411.88592529296875,564.2376098632812,0.8736788630485535,432.0379943847656,576.0293579101562,0.7989232540130615,448.58807373046875,583.4317626953125,0.8441069722175598,467.59283447265625,583.3642578125,0.983471691608429,0.0,0.0,0.0,636.2506713867188,516.7752075195312,0.8254024386405945,612.2767944335938,511.49456787109375,0.7678549885749817,591.477783203125,503.6936340332031,0.892214298248291,571.3546142578125,495.6891174316406,0.8556576371192932,548.2413330078125,488.05322265625,0.91047602891922\r\n```\r\n", + "created_at": "2021-12-20T18:19:53Z", + "author": "catubc" + }, + { + "body": "Anyways, I'm open to any other suggestions. \r\nThanks so much for the help.", + "created_at": "2021-12-20T18:20:45Z", + "author": "catubc" + }, + { + "body": "There is something going on in how you defined the body-parts for the first animal. The last integer is the animal number (SimBA uses this integer to keep the animals separated): So you have an AnimalName_BodyPartName_AnimalInteger. You see that the female for some reason has an extra digit:\r\n\r\n![image](https://user-images.githubusercontent.com/34761092/146815625-9738c821-24ca-40bf-b9d1-540030fc04cf.png). \r\n\r\n", + "created_at": "2021-12-20T18:34:18Z", + "author": "sronilsson" + }, + { + "body": "Open the file, change female to this below, save file, and try and run it again. \r\n\r\n![image](https://user-images.githubusercontent.com/34761092/146816046-09f67839-64cf-43dd-9130-33397fc7f926.png)\r\n", + "created_at": "2021-12-20T18:35:13Z", + "author": "sronilsson" + }, + { + "body": "Thanks, not sure where that's from, seems simba is generating these extra indexes.\r\n\r\nI changed it all and it's still crashing on this elements issue:\r\n\r\n\r\n```\r\nfemale_nose_1\r\nfemale_spine1_1\r\nfemale_spine2_1\r\nfemale_spine3_1\r\nfemale_spine4_1\r\nfemale_spine5_1\r\nmale_nose_2\r\nmale_spine1_2\r\nmale_spine2_2\r\nmale_spine3_2\r\nmale_spine4_2\r\nmale_spine5_2\r\npup1_nose_3\r\npup1_spine1_3\r\npup1_spine2_3\r\npup1_spine3_3\r\npup1_spine4_3\r\npup1_spine5_3\r\npup2_nose_4\r\npup2_spine1_4\r\npup2_spine2_4\r\npup2_spine3_4\r\npup2_spine4_4\r\npup2_spine5_4\r\npup3_nose_5\r\npup3_spine1_5\r\npup3_spine2_5\r\npup3_spine3_5\r\npup3_spine4_5\r\npup3_spine5_5\r\npup4_nose_6\r\npup4_spine1_6\r\npup4_spine2_6\r\npup4_spine3_6\r\npup4_spine4_6\r\npup4_spine5_6\r\n\r\n```\r\nHere's the log again\r\n```\r\n\r\n(simba3) cat@cat-Precision-T3610:~$ simba\r\nQt: Session management error: None of the authentication protocols specified are supported\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/tkinter/__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/SimBA.py\", line 3895, in \r\n button_skipOC = Button(label_outliercorrection,text='Skip outlier correction (CAUTION)',fg='red', command=lambda:skip_outlier_c(self.projectconfigini))\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/outlier_scripts/skip_outlierCorrection.py\", line 61, in skip_outlier_c\r\n csv_df.columns = newHeaders\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/generic.py\", line 5192, in __setattr__\r\n return object.__setattr__(self, name, value)\r\n File \"pandas/_libs/properties.pyx\", line 67, in pandas._libs.properties.AxisProperty.__set__\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/generic.py\", line 690, in _set_axis\r\n self._data.set_axis(axis, labels)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pandas/core/internals/managers.py\", line 183, in set_axis\r\n \"values have {new} elements\".format(old=old_len, new=new_len)\r\nValueError: Length mismatch: Expected axis has 126 elements, new values have 108 elements\r\n```\r\n\r\n\r\n\r\n\r\n", + "created_at": "2021-12-20T18:43:57Z", + "author": "catubc" + }, + { + "body": "Hmm.. what I would suggest: \r\n\r\nFor this file with the entries below, your paste suggest that there is an extra empty `None` row right at the end. Can you attach the actual CSV file here and I can check it? \r\n\r\nAlso, if I had to troubleshoot this myself, I would:\r\n1: Open this file: `/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/outlier_scripts/skip_outlierCorrection.py`\r\n\r\n2. Right before line 61 (csv_df.columns = newHeaders), insert a new line: `print(newHeaders)`\r\n\r\n3. Save, and re-run the outlier correction, and you'll see printed out what the hell the extra columns that SimBA is trying to use are. \r\n \r\n\r\n\r\n\r\n```female_nose_1\r\nfemale_spine1_1\r\nfemale_spine2_1\r\nfemale_spine3_1\r\nfemale_spine4_1\r\nfemale_spine5_1\r\nmale_nose_2\r\nmale_spine1_2\r\nmale_spine2_2\r\nmale_spine3_2\r\nmale_spine4_2\r\nmale_spine5_2\r\npup1_nose_3\r\npup1_spine1_3\r\npup1_spine2_3\r\npup1_spine3_3\r\npup1_spine4_3\r\npup1_spine5_3\r\npup2_nose_4\r\npup2_spine1_4\r\npup2_spine2_4\r\npup2_spine3_4\r\npup2_spine4_4\r\npup2_spine5_4\r\npup3_nose_5\r\npup3_spine1_5\r\npup3_spine2_5\r\npup3_spine3_5\r\npup3_spine4_5\r\npup3_spine5_5\r\npup4_nose_6\r\npup4_spine1_6\r\npup4_spine2_6\r\npup4_spine3_6\r\npup4_spine4_6\r\npup4_spine5_6\r\n```", + "created_at": "2021-12-20T18:55:45Z", + "author": "sronilsson" + }, + { + "body": "Hi. Re: the printout, here's the screen grabs\r\n![Screenshot from 2021-12-20 21-11-50](https://user-images.githubusercontent.com/4267452/146826990-7cc70ad9-78e8-4f3f-8823-e52fcb747091.png)\r\n![Screenshot from 2021-12-20 21-13-02](https://user-images.githubusercontent.com/4267452/146827027-c6775be9-f289-41ba-8e28-bb86cf4ea807.png)\r\n.", + "created_at": "2021-12-20T20:13:38Z", + "author": "catubc" + }, + { + "body": "I should also mention that I'm using a special branch of Talmo's code that was developed specifically for our lab. It does bottom-up + ID prediction, and the code I think is still stuck in Ver1.1.0. Perhaps there are some incompatibilities there.\r\n\r\nI put both the .mp4 and the .slp files here (should be uploaded in 5mins):\r\n\r\nhttps://drive.google.com/drive/folders/1VwAq2CuVRCdQlyr6xVQ1OT6FHn8HS4HD?usp=sharing\r\n\r\nIf you have any other suggestions or have a moment to try and load them, I'd really appreciate it.\r\nThanks so much!", + "created_at": "2021-12-20T20:17:08Z", + "author": "catubc" + }, + { + "body": "Thanks, could you also send me the CSV file, the one inside the `project_foldet/csv/input_csv`, the one with the SLEP_multi in the top two lines, that I saw in one screenshot? ", + "created_at": "2021-12-20T20:35:16Z", + "author": "sronilsson" + }, + { + "body": "Ok, I uploaded both the csvs here:\r\n\r\nhttps://drive.google.com/drive/folders/1VwAq2CuVRCdQlyr6xVQ1OT6FHn8HS4HD?usp=sharing\r\n\r\nThe one inside the input_csv folder is labeled as such, and the one outside in the main location where the SLEAP and video files are has .csv instead of .slp. (I also saved the entire project directory as gerbils4.zip).\r\n\r\nThanks so much, I'm excited to get this fixed and do some tracking.\r\n", + "created_at": "2021-12-20T21:40:49Z", + "author": "catubc" + } + ] + }, + { + "title": "wxpython linux install fails; wxMediaCtrl was explicitly requested but not found", + "body": "**Describe the bug**\r\nI am trying to install simba but getting a wxMediaCtrl error. \r\n\r\n**To Reproduce**\r\nRun either of these commands, they all get stuck on the error\r\npython setup.py install\r\npip install . \r\npip install simba-uw-tf-dev --no-cache-dir\r\n\r\n**Desktop (please complete the following information):**\r\nLinux 18.04 \r\n\r\n```\r\n\r\n(simba) cat@cat-Precision-T3610:~/code/simba$ pip install simba-uw-tf-dev --no-cache-dir\r\nRequirement already satisfied: simba-uw-tf-dev in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (0.42)\r\nRequirement already satisfied: pandas==0.25.3 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.25.3)\r\nRequirement already satisfied: tqdm==4.30.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (4.30.0)\r\nRequirement already satisfied: scikit-image==0.14.2 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.14.2)\r\nRequirement already satisfied: seaborn==0.9.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.9.0)\r\nRequirement already satisfied: numpy==1.18.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.18.1)\r\nRequirement already satisfied: tensorflow-gpu==1.14.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.14.0)\r\nRequirement already satisfied: deeplabcut==2.0.9 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (2.0.9)\r\nRequirement already satisfied: matplotlib==3.0.3 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (3.0.3)\r\nRequirement already satisfied: tabulate==0.8.3 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.8.3)\r\nRequirement already satisfied: eli5==0.10.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.10.1)\r\nRequirement already satisfied: yellowbrick==0.9.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.9.1)\r\nCollecting wxpython==4.0.4\r\n Downloading wxPython-4.0.4.tar.gz (68.8 MB)\r\n |████████████████████████████████| 68.8 MB 67.9 MB/s \r\n Preparing metadata (setup.py) ... done\r\nRequirement already satisfied: graphviz==0.11 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.11)\r\nRequirement already satisfied: Shapely==1.7 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.7.0)\r\nRequirement already satisfied: pyyaml==5.3.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (5.3.1)\r\nRequirement already satisfied: dtreeviz==0.8.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.8.1)\r\nRequirement already satisfied: scipy==1.1.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.1.0)\r\nRequirement already satisfied: scikit-learn==0.22.2 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.22.2)\r\nRequirement already satisfied: xgboost==0.90 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.90)\r\nRequirement already satisfied: imblearn==0.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.0)\r\nRequirement already satisfied: Pillow==5.4.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (5.4.1)\r\nRequirement already satisfied: opencv-python==3.4.5.20 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (3.4.5.20)\r\nRequirement already satisfied: deepposekit==0.3.5 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.3.5)\r\nRequirement already satisfied: imutils==0.5.2 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.5.2)\r\nRequirement already satisfied: imgaug==0.4.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.4.0)\r\nCollecting protobuf==3.6.0\r\n Downloading protobuf-3.6.0-cp36-cp36m-manylinux1_x86_64.whl (7.1 MB)\r\n |████████████████████████████████| 7.1 MB 20.3 MB/s \r\nCollecting wheel~=0.31.1\r\n Downloading wheel-0.31.1-py2.py3-none-any.whl (41 kB)\r\n |████████████████████████████████| 41 kB 15.5 MB/s \r\nCollecting tensorpack~=0.9.7.1\r\n Downloading tensorpack-0.9.7.1-py2.py3-none-any.whl (286 kB)\r\n |████████████████████████████████| 286 kB 29.6 MB/s \r\nRequirement already satisfied: setuptools in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf-dev) (58.0.4)\r\nRequirement already satisfied: statsmodels~=0.9.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf-dev) (0.9.0)\r\nCollecting ipython~=6.0.0\r\n Downloading ipython-6.0.0-py3-none-any.whl (736 kB)\r\n |████████████████████████████████| 736 kB 58.4 MB/s \r\nCollecting requests\r\n Downloading requests-2.26.0-py2.py3-none-any.whl (62 kB)\r\n |████████████████████████████████| 62 kB 26.8 MB/s \r\nRequirement already satisfied: click in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf-dev) (8.0.3)\r\nRequirement already satisfied: patsy in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf-dev) (0.5.2)\r\nCollecting easydict~=1.7\r\n Downloading easydict-1.9.tar.gz (6.4 kB)\r\n Preparing metadata (setup.py) ... done\r\nINFO: pip is looking at multiple versions of simba-uw-tf-dev to determine which version is compatible with other requirements. This could take a while.\r\nCollecting simba-uw-tf-dev\r\n Downloading Simba_UW_tf_dev-0.88.8-py3-none-any.whl (10.5 MB)\r\n |████████████████████████████████| 10.5 MB 36.5 MB/s \r\nRequirement already satisfied: plotly==4.9.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (4.9.0)\r\nRequirement already satisfied: numexpr==2.6.9 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (2.6.9)\r\nRequirement already satisfied: shap==0.35.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.35.0)\r\nRequirement already satisfied: numba==0.48.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.48.0)\r\nRequirement already satisfied: dash-color-picker==0.0.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.0.1)\r\nCollecting pyarrow==0.17.1\r\n Downloading pyarrow-0.17.1-cp36-cp36m-manylinux2014_x86_64.whl (63.8 MB)\r\n |████████████████████████████████| 63.8 MB 20.2 MB/s \r\nRequirement already satisfied: dash-html-components==1.0.3 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.0.3)\r\nRequirement already satisfied: dash-colorscales==0.0.4 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (0.0.4)\r\nRequirement already satisfied: dash==1.14.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.14.0)\r\nCollecting cefpython3==66.0\r\n Downloading cefpython3-66.0-py2.py3-none-manylinux1_x86_64.whl (79.6 MB)\r\n |████████████████████████████████| 79.6 MB 66.8 MB/s \r\nRequirement already satisfied: dash-core-components==1.10.2 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (1.10.2)\r\nCollecting tables==3.6.1\r\n Downloading tables-3.6.1-cp36-cp36m-manylinux1_x86_64.whl (4.3 MB)\r\n |████████████████████████████████| 4.3 MB 60.0 MB/s \r\nRequirement already satisfied: h5py==2.9.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from simba-uw-tf-dev) (2.9.0)\r\nRequirement already satisfied: joblib>=0.11 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from scikit-learn==0.22.2->simba-uw-tf-dev) (1.1.0)\r\nRequirement already satisfied: flask-compress in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from dash==1.14.0->simba-uw-tf-dev) (1.10.1)\r\nRequirement already satisfied: Flask>=1.0.2 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from dash==1.14.0->simba-uw-tf-dev) (2.0.2)\r\nRequirement already satisfied: dash-table==4.9.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from dash==1.14.0->simba-uw-tf-dev) (4.9.0)\r\nRequirement already satisfied: dash-renderer==1.6.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from dash==1.14.0->simba-uw-tf-dev) (1.6.0)\r\nRequirement already satisfied: future in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from dash==1.14.0->simba-uw-tf-dev) (0.18.2)\r\nRequirement already satisfied: colour in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from dtreeviz==0.8.1->simba-uw-tf-dev) (0.1.5)\r\nRequirement already satisfied: attrs>16.0.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from eli5==0.10.1->simba-uw-tf-dev) (21.2.0)\r\nRequirement already satisfied: six in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from eli5==0.10.1->simba-uw-tf-dev) (1.16.0)\r\nRequirement already satisfied: jinja2 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from eli5==0.10.1->simba-uw-tf-dev) (3.0.3)\r\nRequirement already satisfied: imbalanced-learn in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from imblearn==0.0->simba-uw-tf-dev) (0.6.2)\r\nRequirement already satisfied: imageio in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from imgaug==0.4.0->simba-uw-tf-dev) (2.9.0)\r\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf-dev) (3.0.6)\r\nRequirement already satisfied: cycler>=0.10 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf-dev) (0.11.0)\r\nRequirement already satisfied: kiwisolver>=1.0.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf-dev) (1.3.1)\r\nRequirement already satisfied: python-dateutil>=2.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf-dev) (2.8.2)\r\nRequirement already satisfied: llvmlite<0.32.0,>=0.31.0dev0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from numba==0.48.0->simba-uw-tf-dev) (0.31.0)\r\nRequirement already satisfied: pytz>=2017.2 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from pandas==0.25.3->simba-uw-tf-dev) (2021.3)\r\nRequirement already satisfied: retrying>=1.3.3 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from plotly==4.9.0->simba-uw-tf-dev) (1.3.3)\r\nRequirement already satisfied: dask[array]>=1.0.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf-dev) (2021.3.0)\r\nRequirement already satisfied: networkx>=1.8 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf-dev) (2.5.1)\r\nRequirement already satisfied: PyWavelets>=0.4.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf-dev) (1.1.1)\r\nRequirement already satisfied: cloudpickle>=0.2.1 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf-dev) (2.0.0)\r\nRequirement already satisfied: toolz>=0.8.2 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from dask[array]>=1.0.0->scikit-image==0.14.2->simba-uw-tf-dev) (0.11.2)\r\nRequirement already satisfied: itsdangerous>=2.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (2.0.1)\r\nRequirement already satisfied: Werkzeug>=2.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (2.0.2)\r\nRequirement already satisfied: MarkupSafe>=2.0 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from jinja2->eli5==0.10.1->simba-uw-tf-dev) (2.0.1)\r\nRequirement already satisfied: decorator<5,>=4.3 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from networkx>=1.8->scikit-image==0.14.2->simba-uw-tf-dev) (4.4.2)\r\nRequirement already satisfied: brotli in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from flask-compress->dash==1.14.0->simba-uw-tf-dev) (1.0.9)\r\nRequirement already satisfied: importlib-metadata in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from click->deeplabcut==2.0.9->simba-uw-tf-dev) (4.8.3)\r\nRequirement already satisfied: dataclasses in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from Werkzeug>=2.0->Flask>=1.0.2->dash==1.14.0->simba-uw-tf-dev) (0.8)\r\nRequirement already satisfied: zipp>=0.5 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from importlib-metadata->click->deeplabcut==2.0.9->simba-uw-tf-dev) (3.6.0)\r\nRequirement already satisfied: typing-extensions>=3.6.4 in /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages (from importlib-metadata->click->deeplabcut==2.0.9->simba-uw-tf-dev) (4.0.1)\r\nBuilding wheels for collected packages: wxpython\r\n Building wheel for wxpython (setup.py) ... error\r\n ERROR: Command errored out with exit status 1:\r\n command: /media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6 -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' bdist_wheel -d /tmp/pip-wheel-cd1plwej\r\n cwd: /tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/\r\n Complete output (527 lines):\r\n /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages/setuptools/dist.py:720: UserWarning: Usage of dash-separated 'license-file' will not be supported in future versions. Please use the underscore name 'license_file' instead\r\n % (opt, underscore_opt)\r\n /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages/setuptools/dist.py:294: DistDeprecationWarning: use_2to3 is ignored.\r\n warnings.warn(f\"{attr} is ignored.\", DistDeprecationWarning)\r\n running bdist_wheel\r\n running build\r\n WARNING: Building this way assumes that all generated files have been\r\n generated already. If that is not the case then use build.py directly\r\n to generate the source and perform the build stage. You can use\r\n --skip-build with the bdist_* or install commands to avoid this\r\n message and the wxWidgets and Phoenix build steps in the future.\r\n \r\n \"/media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6\" -u build.py build\r\n Will build using: \"/media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6\"\r\n 3.6.0 | packaged by conda-forge | (default, Feb 9 2017, 14:36:55)\r\n [GCC 4.8.2 20140120 (Red Hat 4.8.2-15)]\r\n Python's architecture is 64bit\r\n cfg.VERSION: 4.0.4\r\n \r\n Running command: build\r\n Running command: build_wx\r\n wxWidgets build options: ['--wxpython', '--unicode', '--gtk3']\r\n Configure options: ['--enable-unicode', '--with-gtk=3', '--with-opengl', '--enable-sound', '--enable-graphics_ctx', '--enable-mediactrl', '--enable-display', '--enable-geometry', '--enable-debug_flag', '--enable-optimise', '--disable-debugreport', '--enable-uiactionsim', '--enable-autoidman', '--with-sdl']\r\n /tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/ext/wxWidgets/configure --enable-unicode --with-gtk=3 --with-opengl --enable-sound --enable-graphics_ctx --enable-mediactrl --enable-display --enable-geometry --enable-debug_flag --enable-optimise --disable-debugreport --enable-uiactionsim --enable-autoidman --with-sdl\r\n checking build system type... x86_64-pc-linux-gnu\r\n checking host system type... x86_64-pc-linux-gnu\r\n checking for --disable-gui... no\r\n checking for --enable-monolithic... no\r\n checking for --enable-plugins... no\r\n checking for --without-subdirs... no\r\n checking for --enable-official_build... no\r\n checking for --disable-all-features... no\r\n checking for --enable-universal... no\r\n checking for --enable-nanox... no\r\n checking for --enable-gpe... no\r\n checking for toolkit... gtk\r\n checking for --with-libpng... yes\r\n checking for --with-libjpeg... yes\r\n checking for --with-libtiff... yes\r\n checking for --without-libjbig... no\r\n checking for --without-liblzma... no\r\n checking for --with-libxpm... yes\r\n checking for --with-libiconv... yes\r\n checking for --with-libmspack... no\r\n checking for --without-gtkprint... no\r\n checking for --with-gnomevfs... no\r\n checking for --with-libnotify... yes\r\n checking for --with-hildon... no\r\n checking for --with-opengl... yes\r\n checking for --with-dmalloc... no\r\n checking for --with-sdl... yes\r\n checking for --with-regex... yes\r\n checking for --with-zlib... yes\r\n checking for --with-expat... yes\r\n checking for --with-macosx-sdk...\r\n checking for --with-macosx-version-min...\r\n checking for --enable-debug... default\r\n checking for --disable-debug_flag... no\r\n checking for --enable-debug_info... no\r\n checking for --enable-debug_gdb... no\r\n checking for --enable-debug_cntxt... no\r\n checking for --enable-mem_tracing... no\r\n checking for --disable-shared... no\r\n checking for --enable-stl... no\r\n checking for --enable-std_containers... no\r\n checking for --enable-std_iostreams... yes\r\n checking for --enable-std_string... yes\r\n checking for --enable-std_string_conv_in_wxstring... no\r\n checking for --disable-unicode... no\r\n checking for --enable-mslu... no\r\n checking for --enable-utf8... no\r\n checking for --enable-utf8only... no\r\n checking for --enable-extended_rtti... no\r\n checking for --disable-optimise... no\r\n checking for --enable-profile... no\r\n checking for --enable-no_rtti... no\r\n checking for --enable-no_exceptions... no\r\n checking for --enable-permissive... no\r\n checking for --enable-no_deps... no\r\n checking for --disable-vararg_macros... no\r\n checking for --enable-universal_binary... no\r\n checking for --enable-macosx_arch... no\r\n checking for --enable-compat26... no\r\n checking for --disable-compat28... no\r\n checking for --disable-rpath... no\r\n checking for --enable-objc_uniquifying... no\r\n checking for --disable-visibility... no\r\n checking for --disable-tls... no\r\n checking for --enable-intl... yes\r\n checking for --enable-xlocale... yes\r\n checking for --enable-config... yes\r\n checking for --enable-protocols... yes\r\n checking for --enable-ftp... yes\r\n checking for --enable-http... yes\r\n checking for --enable-fileproto... yes\r\n checking for --enable-sockets... yes\r\n checking for --enable-ipv6... no\r\n checking for --enable-ole... yes\r\n checking for --enable-dataobj... yes\r\n checking for --enable-ipc... yes\r\n checking for --enable-baseevtloop... yes\r\n checking for --enable-epollloop... yes\r\n checking for --enable-selectloop... yes\r\n checking for --enable-any... yes\r\n checking for --enable-apple_ieee... yes\r\n checking for --enable-arcstream... yes\r\n checking for --enable-base64... yes\r\n checking for --enable-backtrace... yes\r\n checking for --enable-catch_segvs... yes\r\n checking for --enable-cmdline... yes\r\n checking for --enable-datetime... yes\r\n checking for --enable-debugreport... no\r\n checking for --enable-dialupman... yes\r\n checking for --enable-dynlib... yes\r\n checking for --enable-dynamicloader... yes\r\n checking for --enable-exceptions... yes\r\n checking for --enable-ffile... yes\r\n checking for --enable-file... yes\r\n checking for --enable-filehistory... yes\r\n checking for --enable-filesystem... yes\r\n checking for --enable-fontenum... yes\r\n checking for --enable-fontmap... yes\r\n checking for --enable-fs_archive... yes\r\n checking for --enable-fs_inet... yes\r\n checking for --enable-fs_zip... yes\r\n checking for --enable-fsvolume... yes\r\n checking for --enable-fswatcher... yes\r\n checking for --enable-geometry... yes\r\n checking for --enable-log... yes\r\n checking for --enable-longlong... yes\r\n checking for --enable-mimetype... yes\r\n checking for --enable-printfposparam... yes\r\n checking for --enable-snglinst... yes\r\n checking for --enable-sound... yes\r\n checking for --enable-stdpaths... yes\r\n checking for --enable-stopwatch... yes\r\n checking for --enable-streams... yes\r\n checking for --enable-sysoptions... yes\r\n checking for --enable-tarstream... yes\r\n checking for --enable-textbuf... yes\r\n checking for --enable-textfile... yes\r\n checking for --enable-timer... yes\r\n checking for --enable-variant... yes\r\n checking for --enable-zipstream... yes\r\n checking for --enable-url... yes\r\n checking for --enable-protocol... yes\r\n checking for --enable-protocol_http... yes\r\n checking for --enable-protocol_ftp... yes\r\n checking for --enable-protocol_file... yes\r\n checking for --enable-threads... yes\r\n checking for --enable-iniconf... no\r\n checking for --enable-regkey... yes\r\n checking for --enable-docview... yes\r\n checking for --enable-help... yes\r\n checking for --enable-mshtmlhelp... yes\r\n checking for --enable-html... yes\r\n checking for --enable-htmlhelp... yes\r\n checking for --enable-xrc... yes\r\n checking for --enable-aui... yes\r\n checking for --enable-propgrid... yes\r\n checking for --enable-ribbon... yes\r\n checking for --enable-stc... yes\r\n checking for --enable-constraints... yes\r\n checking for --enable-loggui... yes\r\n checking for --enable-logwin... yes\r\n checking for --enable-logdialog... yes\r\n checking for --enable-mdi... yes\r\n checking for --enable-mdidoc... yes\r\n checking for --enable-mediactrl... yes\r\n checking for --enable-gstreamer8... no\r\n checking for --enable-richtext... yes\r\n checking for --enable-postscript... yes\r\n checking for --enable-printarch... yes\r\n checking for --enable-svg... yes\r\n checking for --enable-webkit... yes\r\n checking for --enable-webview... yes\r\n checking for --enable-graphics_ctx... yes\r\n checking for --enable-clipboard... yes\r\n checking for --enable-dnd... yes\r\n checking for --disable-controls... no\r\n checking for --enable-markup... yes\r\n checking for --enable-accel... yes\r\n checking for --enable-animatectrl... yes\r\n checking for --enable-bannerwindow... yes\r\n checking for --enable-artstd... yes\r\n checking for --enable-arttango... auto\r\n checking for --enable-bmpbutton... yes\r\n checking for --enable-bmpcombobox... yes\r\n checking for --enable-button... yes\r\n checking for --enable-calendar... yes\r\n checking for --enable-caret... yes\r\n checking for --enable-checkbox... yes\r\n checking for --enable-checklst... yes\r\n checking for --enable-choice... yes\r\n checking for --enable-choicebook... yes\r\n checking for --enable-collpane... yes\r\n checking for --enable-colourpicker... yes\r\n checking for --enable-combobox... yes\r\n checking for --enable-comboctrl... yes\r\n checking for --enable-commandlinkbutton... yes\r\n checking for --enable-dataviewctrl... yes\r\n checking for --enable-datepick... yes\r\n checking for --enable-detect_sm... yes\r\n checking for --enable-dirpicker... yes\r\n checking for --enable-display... yes\r\n checking for --enable-editablebox... yes\r\n checking for --enable-filectrl... yes\r\n checking for --enable-filepicker... yes\r\n checking for --enable-fontpicker... yes\r\n checking for --enable-gauge... yes\r\n checking for --enable-grid... yes\r\n checking for --enable-headerctrl... yes\r\n checking for --enable-hyperlink... yes\r\n checking for --enable-imaglist... yes\r\n checking for --enable-infobar... yes\r\n checking for --enable-listbook... yes\r\n checking for --enable-listbox... yes\r\n checking for --enable-listctrl... yes\r\n checking for --enable-notebook... yes\r\n checking for --enable-notifmsg... yes\r\n checking for --enable-odcombobox... yes\r\n checking for --enable-popupwin... yes\r\n checking for --enable-prefseditor... yes\r\n checking for --enable-radiobox... yes\r\n checking for --enable-radiobtn... yes\r\n checking for --enable-richmsgdlg... yes\r\n checking for --enable-richtooltip... yes\r\n checking for --enable-rearrangectrl... yes\r\n checking for --enable-sash... yes\r\n checking for --enable-scrollbar... yes\r\n checking for --enable-searchctrl... yes\r\n checking for --enable-slider... yes\r\n checking for --enable-spinbtn... yes\r\n checking for --enable-spinctrl... yes\r\n checking for --enable-splitter... yes\r\n checking for --enable-statbmp... yes\r\n checking for --enable-statbox... yes\r\n checking for --enable-statline... yes\r\n checking for --enable-stattext... yes\r\n checking for --enable-statusbar... yes\r\n checking for --enable-taskbaricon... yes\r\n checking for --enable-tbarnative... yes\r\n checking for --enable-textctrl... yes\r\n checking for --enable-timepick... yes\r\n checking for --enable-tipwindow... yes\r\n checking for --enable-togglebtn... yes\r\n checking for --enable-toolbar... yes\r\n checking for --enable-toolbook... yes\r\n checking for --enable-treebook... yes\r\n checking for --enable-treectrl... yes\r\n checking for --enable-treelist... yes\r\n checking for --enable-commondlg... yes\r\n checking for --enable-aboutdlg... yes\r\n checking for --enable-choicedlg... yes\r\n checking for --enable-coldlg... yes\r\n checking for --enable-filedlg... yes\r\n checking for --enable-finddlg... yes\r\n checking for --enable-fontdlg... yes\r\n checking for --enable-dirdlg... yes\r\n checking for --enable-msgdlg... yes\r\n checking for --enable-numberdlg... yes\r\n checking for --enable-splash... yes\r\n checking for --enable-textdlg... yes\r\n checking for --enable-tipdlg... yes\r\n checking for --enable-progressdlg... yes\r\n checking for --enable-wizarddlg... yes\r\n checking for --enable-menus... yes\r\n checking for --enable-miniframe... yes\r\n checking for --enable-tooltips... yes\r\n checking for --enable-splines... yes\r\n checking for --enable-mousewheel... yes\r\n checking for --enable-validators... yes\r\n checking for --enable-busyinfo... yes\r\n checking for --enable-hotkey... auto\r\n checking for --enable-joystick... yes\r\n checking for --enable-metafile... auto\r\n checking for --enable-dragimage... yes\r\n checking for --enable-accessibility... no\r\n checking for --enable-uiactionsim... yes\r\n checking for --enable-dctransform... yes\r\n checking for --enable-webviewwebkit... yes\r\n checking for --enable-palette... yes\r\n checking for --enable-image... yes\r\n checking for --enable-gif... yes\r\n checking for --enable-pcx... yes\r\n checking for --enable-tga... yes\r\n checking for --enable-iff... yes\r\n checking for --enable-pnm... yes\r\n checking for --enable-xpm... yes\r\n checking for --enable-ico_cur... yes\r\n checking for --enable-dccache... yes\r\n checking for --enable-ps-in-msw... yes\r\n checking for --enable-ownerdrawn... yes\r\n checking for --enable-uxtheme... yes\r\n checking for --enable-wxdib... yes\r\n checking for --enable-webviewie... yes\r\n checking for --enable-autoidman... yes\r\n checking for gcc... gcc\r\n checking whether the C compiler works... yes\r\n checking for C compiler default output file name... a.out\r\n checking for suffix of executables...\r\n checking whether we are cross compiling... no\r\n checking for suffix of object files... o\r\n checking whether we are using the GNU C compiler... yes\r\n checking whether gcc accepts -g... yes\r\n checking for gcc option to accept ISO C89... none needed\r\n checking whether we are using the Intel C compiler... no\r\n checking how to run the C preprocessor... gcc -E\r\n checking for grep that handles long lines and -e... /bin/grep\r\n checking for egrep... /bin/grep -E\r\n checking whether gcc needs -traditional... no\r\n checking for g++... g++\r\n checking whether we are using the GNU C++ compiler... yes\r\n checking whether g++ accepts -g... yes\r\n checking whether we are using the Intel C++ compiler... no\r\n checking whether we are using the Sun C++ compiler... no\r\n checking for ar... ar\r\n checking for ANSI C header files... yes\r\n checking for sys/types.h... yes\r\n checking for sys/stat.h... yes\r\n checking for stdlib.h... yes\r\n checking for string.h... yes\r\n checking for memory.h... yes\r\n checking for strings.h... yes\r\n checking for inttypes.h... yes\r\n checking for stdint.h... yes\r\n checking for unistd.h... yes\r\n checking for langinfo.h... yes\r\n checking for wchar.h... yes\r\n checking for sys/select.h... yes\r\n checking for cxxabi.h... yes\r\n checking for an ANSI C-conforming const... yes\r\n checking for inline... inline\r\n checking size of short... 2\r\n checking size of void *... 8\r\n checking size of int... 4\r\n checking size of long... 8\r\n checking size of size_t... 8\r\n checking size of long long... 8\r\n checking size of wchar_t... 4\r\n checking for va_copy... yes\r\n checking whether the compiler supports variadic macros... yes\r\n checking for _FILE_OFFSET_BITS value needed for large files... 64\r\n checking if large file support is available... yes\r\n checking for _LARGEFILE_SOURCE value needed for large files... no\r\n checking whether byte ordering is bigendian... no\r\n checking for iostream... yes\r\n checking if C++ compiler supports the explicit keyword... yes\r\n checking for std::wstring in ... yes\r\n checking for std::istream... yes\r\n checking for std::ostream... yes\r\n checking how to run the C++ preprocessor... g++ -E\r\n checking type_traits usability... yes\r\n checking type_traits presence... yes\r\n checking for type_traits... yes\r\n checking for __sync_fetch_and_add and __sync_sub_and_fetch builtins... yes\r\n checking for libraries directories... /usr/lib/x86_64-linux-gnu /usr/lib\r\n checking for cos... no\r\n checking for floor... no\r\n checking if floating point functions link without -lm... no\r\n checking for sin... yes\r\n checking for ceil... yes\r\n checking if floating point functions link with -lm... yes\r\n checking for strtoull... yes\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking pkg-config is at least version 0.9.0... yes\r\n configure: WARNING: Defaulting to the builtin regex library for Unicode build.\r\n checking for zlib.h >= 1.1.4... yes\r\n checking for zlib.h... (cached) yes\r\n checking for deflate in -lz... yes\r\n checking for png.h > 0.90... yes\r\n checking for png.h... (cached) yes\r\n checking for png_sig_cmp in -lpng... yes\r\n checking for jpeglib.h... no\r\n configure: WARNING: system jpeg library not found, will use built-in instead\r\n checking for tiffio.h... no\r\n configure: WARNING: system tiff library not found, will use built-in instead\r\n checking for lzma_code in -llzma... no\r\n checking for jbg_dec_init in -ljbig... no\r\n checking for expat.h... yes\r\n checking if expat.h is valid C++ header... yes\r\n checking for XML_ParserCreate in -lexpat... yes\r\n checking for GTK+ version...\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking for GTK+ - version >= 3.0.0... yes (version 3.22.30)\r\n checking for X11/Xlib.h... yes\r\n checking for X11/XKBlib.h... yes\r\n checking for Xxf86vm... yes\r\n checking for X11/extensions/xf86vmode.h... yes\r\n checking for SM... yes\r\n checking for OpenGL headers... found in /usr/include\r\n checking for GL/gl.h... yes\r\n checking for GL/glu.h... yes\r\n checking for GL... yes\r\n checking for GLU... yes\r\n checking if the linker accepts --version-script... yes\r\n checking for symbols visibility support... yes\r\n checking for broken libstdc++ visibility... no\r\n checking for mode_t... yes\r\n checking for off_t... yes\r\n checking for pid_t... yes\r\n checking for size_t... yes\r\n checking for ssize_t... yes\r\n checking if size_t is unsigned int... no\r\n checking if size_t is unsigned long... yes\r\n checking if wchar_t is separate type... yes\r\n checking for pw_gecos in struct passwd... yes\r\n checking for wcslen... yes\r\n checking for wcsftime... yes\r\n checking for strnlen... yes\r\n checking for wcsdup... yes\r\n checking for wcsnlen... yes\r\n checking for wcscasecmp... yes\r\n checking for wcsncasecmp... yes\r\n checking for mbstate_t... yes\r\n checking for wcsrtombs... yes\r\n checking for snprintf... yes\r\n checking for vsnprintf... yes\r\n checking for vsscanf... yes\r\n checking for vsnprintf declaration... yes\r\n checking if vsnprintf declaration is broken... no\r\n checking for snprintf declaration... yes\r\n checking if snprintf supports positional arguments... yes\r\n checking for vsscanf declaration... yes\r\n checking if vsscanf() declaration is broken... no\r\n checking for putws... no\r\n checking for fputws... yes\r\n checking for wprintf... yes\r\n checking for vswprintf... yes\r\n checking for vswscanf... yes\r\n checking for _vsnwprintf... no\r\n checking for fsync... yes\r\n checking for round... yes\r\n checking for iconv... yes\r\n checking if iconv needs const... no\r\n checking for sigaction... yes\r\n checking for sa_handler type... int\r\n checking for backtrace() in ... checking for library containing backtrace... none required\r\n yes\r\n checking for __cxa_demangle() in ... yes\r\n checking for mkstemp... yes\r\n checking for statfs... yes\r\n checking for statfs declaration... yes\r\n checking for fcntl... yes\r\n checking for setenv... yes\r\n checking for unsetenv... yes\r\n checking for nanosleep... yes\r\n checking for uname... yes\r\n checking for strtok_r... yes\r\n checking for inet_addr... yes\r\n checking for inet_aton... yes\r\n checking for fdopen... yes\r\n checking for sysconf... yes\r\n checking for getpwuid_r... yes\r\n checking for getgrgid_r... yes\r\n checking whether pthreads work with -pthread... yes\r\n checking if more special flags are required for pthreads... no\r\n checking for pthread_setconcurrency... yes\r\n checking for pthread_cleanup_push/pop... yes\r\n checking for sched.h... yes\r\n checking for sched_yield... yes\r\n checking for pthread_attr_getschedpolicy... yes\r\n checking for pthread_attr_setschedparam... yes\r\n checking for sched_get_priority_max... yes\r\n checking for pthread_cancel... yes\r\n checking for pthread_mutex_timedlock... yes\r\n checking for pthread_attr_setstacksize... yes\r\n checking for pthread_mutexattr_t... yes\r\n checking for pthread_mutexattr_settype declaration... yes\r\n checking for abi::__forced_unwind() in ... yes\r\n checking for localtime_r... yes\r\n checking for gmtime_r... yes\r\n checking how many arguments gethostbyname_r() takes... six\r\n checking how many arguments getservbyname_r() takes... six\r\n checking for dlopen... no\r\n checking for dlopen in -ldl... yes\r\n checking for dlerror... no\r\n checking for dlerror in -ldl... yes\r\n checking for sys/inotify.h... yes\r\n checking for SNDCTL_DSP_SPEED in sys/soundcard.h... yes\r\n checking for SDL... configure: SDL 2.0 not available. Falling back to 1.2.\r\n checking for sdl-config... no\r\n checking for SDL - version >= 1.2.0... no\r\n *** The sdl-config script installed by SDL could not be found\r\n *** If SDL was installed in PREFIX, make sure PREFIX/bin is in\r\n *** your path, or set the SDL_CONFIG environment variable to the\r\n *** full path to sdl-config.\r\n checking for GTKPRINT... yes\r\n checking for LIBNOTIFY... checking for LIBNOTIFY... configure: WARNING: libnotify not found, wxNotificationMessage will use generic implementation.\r\n checking for complete xlocale... no\r\n checking for sys/epoll.h... yes\r\n checking for gettimeofday... yes\r\n checking whether gettimeofday takes two arguments... yes\r\n checking for timezone variable in ... timezone\r\n checking for localtime... yes\r\n checking for tm_gmtoff in struct tm... yes\r\n checking for setpriority... yes\r\n checking for socket... yes\r\n checking what is the type of the third argument of getsockname... socklen_t\r\n checking what is the type of the fifth argument of getsockopt... socklen_t\r\n checking for linux/joystick.h... yes\r\n checking for python... /media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6\r\n checking for WEBKIT... configure: WARNING: webkit2gtk not found, falling back to webkitgtk\r\n checking for WEBKIT... configure: WARNING: webkitgtk not found.\r\n configure: WARNING: WebKit not available, disabling wxWebView\r\n checking for CAIRO... yes\r\n checking for cairo_push_group... yes\r\n checking for GST... configure: WARNING: GStreamer 1.0 not available, falling back to 0.10\r\n checking for GST... configure: WARNING: GStreamer 0.10 not available, falling back to 0.8\r\n configure: WARNING: wxMediaCtrl can't be built because GStreamer not available\r\n configure: error: wxMediaCtrl was explicitly requested but can't be built.\r\n \r\n Fix the problems reported above or don't use --enable-mediactrl configure option.\r\n \r\n Error running configure\r\n ERROR: failed building wxWidgets\r\n Traceback (most recent call last):\r\n File \"build.py\", line 1321, in cmd_build_wx\r\n wxbuild.main(wxDir(), build_options)\r\n File \"/tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/buildtools/build_wxwidgets.py\", line 375, in main\r\n \"Error running configure\")\r\n File \"/tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/buildtools/build_wxwidgets.py\", line 85, in exitIfError\r\n raise builder.BuildError(msg)\r\n buildtools.builder.BuildError: Error running configure\r\n Finished command: build_wx (0m14.834s)\r\n Finished command: build (0m14.834s)\r\n Command '\"/media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6\" -u build.py build' failed with exit code 1.\r\n ----------------------------------------\r\n ERROR: Failed building wheel for wxpython\r\n Running setup.py clean for wxpython\r\nFailed to build wxpython\r\nInstalling collected packages: wxpython, tables, pyarrow, cefpython3, simba-uw-tf-dev\r\n Running setup.py install for wxpython ... error\r\n ERROR: Command errored out with exit status 1:\r\n command: /media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6 -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record /tmp/pip-record-gwyaa03w/install-record.txt --single-version-externally-managed --compile --install-headers /media/cat/4TBSSD/anaconda3/envs/simba/include/python3.6m/wxpython\r\n cwd: /tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/\r\n Complete output (527 lines):\r\n /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages/setuptools/dist.py:720: UserWarning: Usage of dash-separated 'license-file' will not be supported in future versions. Please use the underscore name 'license_file' instead\r\n % (opt, underscore_opt)\r\n /media/cat/4TBSSD/anaconda3/envs/simba/lib/python3.6/site-packages/setuptools/dist.py:294: DistDeprecationWarning: use_2to3 is ignored.\r\n warnings.warn(f\"{attr} is ignored.\", DistDeprecationWarning)\r\n running install\r\n running build\r\n WARNING: Building this way assumes that all generated files have been\r\n generated already. If that is not the case then use build.py directly\r\n to generate the source and perform the build stage. You can use\r\n --skip-build with the bdist_* or install commands to avoid this\r\n message and the wxWidgets and Phoenix build steps in the future.\r\n \r\n \"/media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6\" -u build.py build\r\n Will build using: \"/media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6\"\r\n 3.6.0 | packaged by conda-forge | (default, Feb 9 2017, 14:36:55)\r\n [GCC 4.8.2 20140120 (Red Hat 4.8.2-15)]\r\n Python's architecture is 64bit\r\n cfg.VERSION: 4.0.4\r\n \r\n Running command: build\r\n Running command: build_wx\r\n wxWidgets build options: ['--wxpython', '--unicode', '--gtk3']\r\n Configure options: ['--enable-unicode', '--with-gtk=3', '--with-opengl', '--enable-sound', '--enable-graphics_ctx', '--enable-mediactrl', '--enable-display', '--enable-geometry', '--enable-debug_flag', '--enable-optimise', '--disable-debugreport', '--enable-uiactionsim', '--enable-autoidman', '--with-sdl']\r\n /tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/ext/wxWidgets/configure --enable-unicode --with-gtk=3 --with-opengl --enable-sound --enable-graphics_ctx --enable-mediactrl --enable-display --enable-geometry --enable-debug_flag --enable-optimise --disable-debugreport --enable-uiactionsim --enable-autoidman --with-sdl\r\n checking build system type... x86_64-pc-linux-gnu\r\n checking host system type... x86_64-pc-linux-gnu\r\n checking for --disable-gui... no\r\n checking for --enable-monolithic... no\r\n checking for --enable-plugins... no\r\n checking for --without-subdirs... no\r\n checking for --enable-official_build... no\r\n checking for --disable-all-features... no\r\n checking for --enable-universal... no\r\n checking for --enable-nanox... no\r\n checking for --enable-gpe... no\r\n checking for toolkit... gtk\r\n checking for --with-libpng... yes\r\n checking for --with-libjpeg... yes\r\n checking for --with-libtiff... yes\r\n checking for --without-libjbig... no\r\n checking for --without-liblzma... no\r\n checking for --with-libxpm... yes\r\n checking for --with-libiconv... yes\r\n checking for --with-libmspack... no\r\n checking for --without-gtkprint... no\r\n checking for --with-gnomevfs... no\r\n checking for --with-libnotify... yes\r\n checking for --with-hildon... no\r\n checking for --with-opengl... yes\r\n checking for --with-dmalloc... no\r\n checking for --with-sdl... yes\r\n checking for --with-regex... yes\r\n checking for --with-zlib... yes\r\n checking for --with-expat... yes\r\n checking for --with-macosx-sdk...\r\n checking for --with-macosx-version-min...\r\n checking for --enable-debug... default\r\n checking for --disable-debug_flag... no\r\n checking for --enable-debug_info... no\r\n checking for --enable-debug_gdb... no\r\n checking for --enable-debug_cntxt... no\r\n checking for --enable-mem_tracing... no\r\n checking for --disable-shared... no\r\n checking for --enable-stl... no\r\n checking for --enable-std_containers... no\r\n checking for --enable-std_iostreams... yes\r\n checking for --enable-std_string... yes\r\n checking for --enable-std_string_conv_in_wxstring... no\r\n checking for --disable-unicode... no\r\n checking for --enable-mslu... no\r\n checking for --enable-utf8... no\r\n checking for --enable-utf8only... no\r\n checking for --enable-extended_rtti... no\r\n checking for --disable-optimise... no\r\n checking for --enable-profile... no\r\n checking for --enable-no_rtti... no\r\n checking for --enable-no_exceptions... no\r\n checking for --enable-permissive... no\r\n checking for --enable-no_deps... no\r\n checking for --disable-vararg_macros... no\r\n checking for --enable-universal_binary... no\r\n checking for --enable-macosx_arch... no\r\n checking for --enable-compat26... no\r\n checking for --disable-compat28... no\r\n checking for --disable-rpath... no\r\n checking for --enable-objc_uniquifying... no\r\n checking for --disable-visibility... no\r\n checking for --disable-tls... no\r\n checking for --enable-intl... yes\r\n checking for --enable-xlocale... yes\r\n checking for --enable-config... yes\r\n checking for --enable-protocols... yes\r\n checking for --enable-ftp... yes\r\n checking for --enable-http... yes\r\n checking for --enable-fileproto... yes\r\n checking for --enable-sockets... yes\r\n checking for --enable-ipv6... no\r\n checking for --enable-ole... yes\r\n checking for --enable-dataobj... yes\r\n checking for --enable-ipc... yes\r\n checking for --enable-baseevtloop... yes\r\n checking for --enable-epollloop... yes\r\n checking for --enable-selectloop... yes\r\n checking for --enable-any... yes\r\n checking for --enable-apple_ieee... yes\r\n checking for --enable-arcstream... yes\r\n checking for --enable-base64... yes\r\n checking for --enable-backtrace... yes\r\n checking for --enable-catch_segvs... yes\r\n checking for --enable-cmdline... yes\r\n checking for --enable-datetime... yes\r\n checking for --enable-debugreport... no\r\n checking for --enable-dialupman... yes\r\n checking for --enable-dynlib... yes\r\n checking for --enable-dynamicloader... yes\r\n checking for --enable-exceptions... yes\r\n checking for --enable-ffile... yes\r\n checking for --enable-file... yes\r\n checking for --enable-filehistory... yes\r\n checking for --enable-filesystem... yes\r\n checking for --enable-fontenum... yes\r\n checking for --enable-fontmap... yes\r\n checking for --enable-fs_archive... yes\r\n checking for --enable-fs_inet... yes\r\n checking for --enable-fs_zip... yes\r\n checking for --enable-fsvolume... yes\r\n checking for --enable-fswatcher... yes\r\n checking for --enable-geometry... yes\r\n checking for --enable-log... yes\r\n checking for --enable-longlong... yes\r\n checking for --enable-mimetype... yes\r\n checking for --enable-printfposparam... yes\r\n checking for --enable-snglinst... yes\r\n checking for --enable-sound... yes\r\n checking for --enable-stdpaths... yes\r\n checking for --enable-stopwatch... yes\r\n checking for --enable-streams... yes\r\n checking for --enable-sysoptions... yes\r\n checking for --enable-tarstream... yes\r\n checking for --enable-textbuf... yes\r\n checking for --enable-textfile... yes\r\n checking for --enable-timer... yes\r\n checking for --enable-variant... yes\r\n checking for --enable-zipstream... yes\r\n checking for --enable-url... yes\r\n checking for --enable-protocol... yes\r\n checking for --enable-protocol_http... yes\r\n checking for --enable-protocol_ftp... yes\r\n checking for --enable-protocol_file... yes\r\n checking for --enable-threads... yes\r\n checking for --enable-iniconf... no\r\n checking for --enable-regkey... yes\r\n checking for --enable-docview... yes\r\n checking for --enable-help... yes\r\n checking for --enable-mshtmlhelp... yes\r\n checking for --enable-html... yes\r\n checking for --enable-htmlhelp... yes\r\n checking for --enable-xrc... yes\r\n checking for --enable-aui... yes\r\n checking for --enable-propgrid... yes\r\n checking for --enable-ribbon... yes\r\n checking for --enable-stc... yes\r\n checking for --enable-constraints... yes\r\n checking for --enable-loggui... yes\r\n checking for --enable-logwin... yes\r\n checking for --enable-logdialog... yes\r\n checking for --enable-mdi... yes\r\n checking for --enable-mdidoc... yes\r\n checking for --enable-mediactrl... yes\r\n checking for --enable-gstreamer8... no\r\n checking for --enable-richtext... yes\r\n checking for --enable-postscript... yes\r\n checking for --enable-printarch... yes\r\n checking for --enable-svg... yes\r\n checking for --enable-webkit... yes\r\n checking for --enable-webview... yes\r\n checking for --enable-graphics_ctx... yes\r\n checking for --enable-clipboard... yes\r\n checking for --enable-dnd... yes\r\n checking for --disable-controls... no\r\n checking for --enable-markup... yes\r\n checking for --enable-accel... yes\r\n checking for --enable-animatectrl... yes\r\n checking for --enable-bannerwindow... yes\r\n checking for --enable-artstd... yes\r\n checking for --enable-arttango... auto\r\n checking for --enable-bmpbutton... yes\r\n checking for --enable-bmpcombobox... yes\r\n checking for --enable-button... yes\r\n checking for --enable-calendar... yes\r\n checking for --enable-caret... yes\r\n checking for --enable-checkbox... yes\r\n checking for --enable-checklst... yes\r\n checking for --enable-choice... yes\r\n checking for --enable-choicebook... yes\r\n checking for --enable-collpane... yes\r\n checking for --enable-colourpicker... yes\r\n checking for --enable-combobox... yes\r\n checking for --enable-comboctrl... yes\r\n checking for --enable-commandlinkbutton... yes\r\n checking for --enable-dataviewctrl... yes\r\n checking for --enable-datepick... yes\r\n checking for --enable-detect_sm... yes\r\n checking for --enable-dirpicker... yes\r\n checking for --enable-display... yes\r\n checking for --enable-editablebox... yes\r\n checking for --enable-filectrl... yes\r\n checking for --enable-filepicker... yes\r\n checking for --enable-fontpicker... yes\r\n checking for --enable-gauge... yes\r\n checking for --enable-grid... yes\r\n checking for --enable-headerctrl... yes\r\n checking for --enable-hyperlink... yes\r\n checking for --enable-imaglist... yes\r\n checking for --enable-infobar... yes\r\n checking for --enable-listbook... yes\r\n checking for --enable-listbox... yes\r\n checking for --enable-listctrl... yes\r\n checking for --enable-notebook... yes\r\n checking for --enable-notifmsg... yes\r\n checking for --enable-odcombobox... yes\r\n checking for --enable-popupwin... yes\r\n checking for --enable-prefseditor... yes\r\n checking for --enable-radiobox... yes\r\n checking for --enable-radiobtn... yes\r\n checking for --enable-richmsgdlg... yes\r\n checking for --enable-richtooltip... yes\r\n checking for --enable-rearrangectrl... yes\r\n checking for --enable-sash... yes\r\n checking for --enable-scrollbar... yes\r\n checking for --enable-searchctrl... yes\r\n checking for --enable-slider... yes\r\n checking for --enable-spinbtn... yes\r\n checking for --enable-spinctrl... yes\r\n checking for --enable-splitter... yes\r\n checking for --enable-statbmp... yes\r\n checking for --enable-statbox... yes\r\n checking for --enable-statline... yes\r\n checking for --enable-stattext... yes\r\n checking for --enable-statusbar... yes\r\n checking for --enable-taskbaricon... yes\r\n checking for --enable-tbarnative... yes\r\n checking for --enable-textctrl... yes\r\n checking for --enable-timepick... yes\r\n checking for --enable-tipwindow... yes\r\n checking for --enable-togglebtn... yes\r\n checking for --enable-toolbar... yes\r\n checking for --enable-toolbook... yes\r\n checking for --enable-treebook... yes\r\n checking for --enable-treectrl... yes\r\n checking for --enable-treelist... yes\r\n checking for --enable-commondlg... yes\r\n checking for --enable-aboutdlg... yes\r\n checking for --enable-choicedlg... yes\r\n checking for --enable-coldlg... yes\r\n checking for --enable-filedlg... yes\r\n checking for --enable-finddlg... yes\r\n checking for --enable-fontdlg... yes\r\n checking for --enable-dirdlg... yes\r\n checking for --enable-msgdlg... yes\r\n checking for --enable-numberdlg... yes\r\n checking for --enable-splash... yes\r\n checking for --enable-textdlg... yes\r\n checking for --enable-tipdlg... yes\r\n checking for --enable-progressdlg... yes\r\n checking for --enable-wizarddlg... yes\r\n checking for --enable-menus... yes\r\n checking for --enable-miniframe... yes\r\n checking for --enable-tooltips... yes\r\n checking for --enable-splines... yes\r\n checking for --enable-mousewheel... yes\r\n checking for --enable-validators... yes\r\n checking for --enable-busyinfo... yes\r\n checking for --enable-hotkey... auto\r\n checking for --enable-joystick... yes\r\n checking for --enable-metafile... auto\r\n checking for --enable-dragimage... yes\r\n checking for --enable-accessibility... no\r\n checking for --enable-uiactionsim... yes\r\n checking for --enable-dctransform... yes\r\n checking for --enable-webviewwebkit... yes\r\n checking for --enable-palette... yes\r\n checking for --enable-image... yes\r\n checking for --enable-gif... yes\r\n checking for --enable-pcx... yes\r\n checking for --enable-tga... yes\r\n checking for --enable-iff... yes\r\n checking for --enable-pnm... yes\r\n checking for --enable-xpm... yes\r\n checking for --enable-ico_cur... yes\r\n checking for --enable-dccache... yes\r\n checking for --enable-ps-in-msw... yes\r\n checking for --enable-ownerdrawn... yes\r\n checking for --enable-uxtheme... yes\r\n checking for --enable-wxdib... yes\r\n checking for --enable-webviewie... yes\r\n checking for --enable-autoidman... yes\r\n checking for gcc... gcc\r\n checking whether the C compiler works... yes\r\n checking for C compiler default output file name... a.out\r\n checking for suffix of executables...\r\n checking whether we are cross compiling... no\r\n checking for suffix of object files... o\r\n checking whether we are using the GNU C compiler... yes\r\n checking whether gcc accepts -g... yes\r\n checking for gcc option to accept ISO C89... none needed\r\n checking whether we are using the Intel C compiler... no\r\n checking how to run the C preprocessor... gcc -E\r\n checking for grep that handles long lines and -e... /bin/grep\r\n checking for egrep... /bin/grep -E\r\n checking whether gcc needs -traditional... no\r\n checking for g++... g++\r\n checking whether we are using the GNU C++ compiler... yes\r\n checking whether g++ accepts -g... yes\r\n checking whether we are using the Intel C++ compiler... no\r\n checking whether we are using the Sun C++ compiler... no\r\n checking for ar... ar\r\n checking for ANSI C header files... yes\r\n checking for sys/types.h... yes\r\n checking for sys/stat.h... yes\r\n checking for stdlib.h... yes\r\n checking for string.h... yes\r\n checking for memory.h... yes\r\n checking for strings.h... yes\r\n checking for inttypes.h... yes\r\n checking for stdint.h... yes\r\n checking for unistd.h... yes\r\n checking for langinfo.h... yes\r\n checking for wchar.h... yes\r\n checking for sys/select.h... yes\r\n checking for cxxabi.h... yes\r\n checking for an ANSI C-conforming const... yes\r\n checking for inline... inline\r\n checking size of short... 2\r\n checking size of void *... 8\r\n checking size of int... 4\r\n checking size of long... 8\r\n checking size of size_t... 8\r\n checking size of long long... 8\r\n checking size of wchar_t... 4\r\n checking for va_copy... yes\r\n checking whether the compiler supports variadic macros... yes\r\n checking for _FILE_OFFSET_BITS value needed for large files... 64\r\n checking if large file support is available... yes\r\n checking for _LARGEFILE_SOURCE value needed for large files... no\r\n checking whether byte ordering is bigendian... no\r\n checking for iostream... yes\r\n checking if C++ compiler supports the explicit keyword... yes\r\n checking for std::wstring in ... yes\r\n checking for std::istream... yes\r\n checking for std::ostream... yes\r\n checking how to run the C++ preprocessor... g++ -E\r\n checking type_traits usability... yes\r\n checking type_traits presence... yes\r\n checking for type_traits... yes\r\n checking for __sync_fetch_and_add and __sync_sub_and_fetch builtins... yes\r\n checking for libraries directories... /usr/lib/x86_64-linux-gnu /usr/lib\r\n checking for cos... no\r\n checking for floor... no\r\n checking if floating point functions link without -lm... no\r\n checking for sin... yes\r\n checking for ceil... yes\r\n checking if floating point functions link with -lm... yes\r\n checking for strtoull... yes\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking pkg-config is at least version 0.9.0... yes\r\n configure: WARNING: Defaulting to the builtin regex library for Unicode build.\r\n checking for zlib.h >= 1.1.4... yes\r\n checking for zlib.h... (cached) yes\r\n checking for deflate in -lz... yes\r\n checking for png.h > 0.90... yes\r\n checking for png.h... (cached) yes\r\n checking for png_sig_cmp in -lpng... yes\r\n checking for jpeglib.h... no\r\n configure: WARNING: system jpeg library not found, will use built-in instead\r\n checking for tiffio.h... no\r\n configure: WARNING: system tiff library not found, will use built-in instead\r\n checking for lzma_code in -llzma... no\r\n checking for jbg_dec_init in -ljbig... no\r\n checking for expat.h... yes\r\n checking if expat.h is valid C++ header... yes\r\n checking for XML_ParserCreate in -lexpat... yes\r\n checking for GTK+ version...\r\n checking for pkg-config... /usr/bin/pkg-config\r\n checking for GTK+ - version >= 3.0.0... yes (version 3.22.30)\r\n checking for X11/Xlib.h... yes\r\n checking for X11/XKBlib.h... yes\r\n checking for Xxf86vm... yes\r\n checking for X11/extensions/xf86vmode.h... yes\r\n checking for SM... yes\r\n checking for OpenGL headers... found in /usr/include\r\n checking for GL/gl.h... yes\r\n checking for GL/glu.h... yes\r\n checking for GL... yes\r\n checking for GLU... yes\r\n checking if the linker accepts --version-script... yes\r\n checking for symbols visibility support... yes\r\n checking for broken libstdc++ visibility... no\r\n checking for mode_t... yes\r\n checking for off_t... yes\r\n checking for pid_t... yes\r\n checking for size_t... yes\r\n checking for ssize_t... yes\r\n checking if size_t is unsigned int... no\r\n checking if size_t is unsigned long... yes\r\n checking if wchar_t is separate type... yes\r\n checking for pw_gecos in struct passwd... yes\r\n checking for wcslen... yes\r\n checking for wcsftime... yes\r\n checking for strnlen... yes\r\n checking for wcsdup... yes\r\n checking for wcsnlen... yes\r\n checking for wcscasecmp... yes\r\n checking for wcsncasecmp... yes\r\n checking for mbstate_t... yes\r\n checking for wcsrtombs... yes\r\n checking for snprintf... yes\r\n checking for vsnprintf... yes\r\n checking for vsscanf... yes\r\n checking for vsnprintf declaration... yes\r\n checking if vsnprintf declaration is broken... no\r\n checking for snprintf declaration... yes\r\n checking if snprintf supports positional arguments... yes\r\n checking for vsscanf declaration... yes\r\n checking if vsscanf() declaration is broken... no\r\n checking for putws... no\r\n checking for fputws... yes\r\n checking for wprintf... yes\r\n checking for vswprintf... yes\r\n checking for vswscanf... yes\r\n checking for _vsnwprintf... no\r\n checking for fsync... yes\r\n checking for round... yes\r\n checking for iconv... yes\r\n checking if iconv needs const... no\r\n checking for sigaction... yes\r\n checking for sa_handler type... int\r\n checking for backtrace() in ... checking for library containing backtrace... none required\r\n yes\r\n checking for __cxa_demangle() in ... yes\r\n checking for mkstemp... yes\r\n checking for statfs... yes\r\n checking for statfs declaration... yes\r\n checking for fcntl... yes\r\n checking for setenv... yes\r\n checking for unsetenv... yes\r\n checking for nanosleep... yes\r\n checking for uname... yes\r\n checking for strtok_r... yes\r\n checking for inet_addr... yes\r\n checking for inet_aton... yes\r\n checking for fdopen... yes\r\n checking for sysconf... yes\r\n checking for getpwuid_r... yes\r\n checking for getgrgid_r... yes\r\n checking whether pthreads work with -pthread... yes\r\n checking if more special flags are required for pthreads... no\r\n checking for pthread_setconcurrency... yes\r\n checking for pthread_cleanup_push/pop... yes\r\n checking for sched.h... yes\r\n checking for sched_yield... yes\r\n checking for pthread_attr_getschedpolicy... yes\r\n checking for pthread_attr_setschedparam... yes\r\n checking for sched_get_priority_max... yes\r\n checking for pthread_cancel... yes\r\n checking for pthread_mutex_timedlock... yes\r\n checking for pthread_attr_setstacksize... yes\r\n checking for pthread_mutexattr_t... yes\r\n checking for pthread_mutexattr_settype declaration... yes\r\n checking for abi::__forced_unwind() in ... yes\r\n checking for localtime_r... yes\r\n checking for gmtime_r... yes\r\n checking how many arguments gethostbyname_r() takes... six\r\n checking how many arguments getservbyname_r() takes... six\r\n checking for dlopen... no\r\n checking for dlopen in -ldl... yes\r\n checking for dlerror... no\r\n checking for dlerror in -ldl... yes\r\n checking for sys/inotify.h... yes\r\n checking for SNDCTL_DSP_SPEED in sys/soundcard.h... yes\r\n checking for SDL... configure: SDL 2.0 not available. Falling back to 1.2.\r\n checking for sdl-config... no\r\n checking for SDL - version >= 1.2.0... no\r\n *** The sdl-config script installed by SDL could not be found\r\n *** If SDL was installed in PREFIX, make sure PREFIX/bin is in\r\n *** your path, or set the SDL_CONFIG environment variable to the\r\n *** full path to sdl-config.\r\n checking for GTKPRINT... yes\r\n checking for LIBNOTIFY... checking for LIBNOTIFY... configure: WARNING: libnotify not found, wxNotificationMessage will use generic implementation.\r\n checking for complete xlocale... no\r\n checking for sys/epoll.h... yes\r\n checking for gettimeofday... yes\r\n checking whether gettimeofday takes two arguments... yes\r\n checking for timezone variable in ... timezone\r\n checking for localtime... yes\r\n checking for tm_gmtoff in struct tm... yes\r\n checking for setpriority... yes\r\n checking for socket... yes\r\n checking what is the type of the third argument of getsockname... socklen_t\r\n checking what is the type of the fifth argument of getsockopt... socklen_t\r\n checking for linux/joystick.h... yes\r\n checking for python... /media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6\r\n checking for WEBKIT... configure: WARNING: webkit2gtk not found, falling back to webkitgtk\r\n checking for WEBKIT... configure: WARNING: webkitgtk not found.\r\n configure: WARNING: WebKit not available, disabling wxWebView\r\n checking for CAIRO... yes\r\n checking for cairo_push_group... yes\r\n checking for GST... configure: WARNING: GStreamer 1.0 not available, falling back to 0.10\r\n checking for GST... configure: WARNING: GStreamer 0.10 not available, falling back to 0.8\r\n configure: WARNING: wxMediaCtrl can't be built because GStreamer not available\r\n configure: error: wxMediaCtrl was explicitly requested but can't be built.\r\n \r\n Fix the problems reported above or don't use --enable-mediactrl configure option.\r\n \r\n Error running configure\r\n ERROR: failed building wxWidgets\r\n Traceback (most recent call last):\r\n File \"build.py\", line 1321, in cmd_build_wx\r\n wxbuild.main(wxDir(), build_options)\r\n File \"/tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/buildtools/build_wxwidgets.py\", line 375, in main\r\n \"Error running configure\")\r\n File \"/tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/buildtools/build_wxwidgets.py\", line 85, in exitIfError\r\n raise builder.BuildError(msg)\r\n buildtools.builder.BuildError: Error running configure\r\n Finished command: build_wx (0m19.357s)\r\n Finished command: build (0m19.357s)\r\n Command '\"/media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6\" -u build.py build' failed with exit code 1.\r\n ----------------------------------------\r\nERROR: Command errored out with exit status 1: /media/cat/4TBSSD/anaconda3/envs/simba/bin/python3.6 -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-35mdroqt/wxpython_ee1295b050eb41de8b5692072d51130c/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record /tmp/pip-record-gwyaa03w/install-record.txt --single-version-externally-managed --compile --install-headers /media/cat/4TBSSD/anaconda3/envs/simba/include/python3.6m/wxpython Check the logs for full command output.\r\n(simba) cat@cat-Precision-T3610:~/code/simba$ \r\n```\r\n", + "user": "catubc", + "reaction_cnt": 0, + "created_at": "2021-12-20T09:06:46Z", + "updated_at": "2021-12-20T10:50:34Z", + "author": "catubc", + "comments": [ + { + "body": "Update: I installed GStreamer from here:\r\nhttps://gstreamer.freedesktop.org/documentation/installing/on-linux.html?gi-language=c\r\n\r\nBut am still getting a crash on install. \r\nAny advice?\r\nThanks so much", + "created_at": "2021-12-20T09:07:34Z", + "author": "catubc" + }, + { + "body": "Update: FYI: I restarted the install in a fresh environment, using python 3.6 and after clearing the pip cache.\r\n\r\nHere's the end of the output (the entire thing was too long):\r\n\r\n```\r\n[783/869] Linking build/waf/3.6/gtk3/_glcanvas.cpython-36m-x86_64-linux-gnu.so\r\n [784/869] Compiling sip/cpp/sip_propgridwxPropertyGridPage.cpp\r\n [785/869] Compiling sip/cpp/sip_propgridwxPGVariant.cpp\r\n [786/869] Linking build/waf/3.6/gtk3/_html2.cpython-36m-x86_64-linux-gnu.so\r\n [787/869] Compiling sip/cpp/sip_propgridwxPGVIterator.cpp\r\n /usr/bin/ld: cannot find -lwx_gtk3u_webview-3.0\r\n collect2: error: ld returned 1 exit status\r\n \r\n ../../../../sip/cpp/sip_propgridwxPGProperty.cpp: In function ‘PyObject* meth_wxPGProperty_AddChild(PyObject*, PyObject*, PyObject*)’:\r\n ../../../../sip/cpp/sip_propgridwxPGProperty.cpp:1402:34: warning: ‘void wxPGProperty::AddChild(wxPGProperty*)’ is deprecated [-Wdeprecated-declarations]\r\n sipCpp->AddChild(prop);\r\n ^\r\n In file included from ../../../../ext/wxWidgets/include/wx/wx.h:14:0,\r\n from ../../../../wx/include/wxPython/wxpy_api.h:41,\r\n from ../../../../sip/cpp/sipAPI_propgrid.h:3308,\r\n from ../../../../sip/cpp/sip_propgridwxPGProperty.cpp:10:\r\n ../../../../ext/wxWidgets/include/wx/propgrid/property.h:2231:24: note: declared here\r\n wxDEPRECATED( void AddChild( wxPGProperty* prop ) );\r\n ^\r\n ../../../../ext/wxWidgets/include/wx/defs.h:654:43: note: in definition of macro ‘wxDEPRECATED’\r\n #define wxDEPRECATED(x) wxDEPRECATED_DECL x\r\n ^\r\n ../../../../sip/cpp/sip_propgridwxPGProperty.cpp: In function ‘PyObject* meth_wxPGProperty_GetValueString(PyObject*, PyObject*, PyObject*)’:\r\n ../../../../sip/cpp/sip_propgridwxPGProperty.cpp:3142:69: warning: ‘wxString wxPGProperty::GetValueString(int) const’ is deprecated [-Wdeprecated-declarations]\r\n sipRes = new ::wxString(sipCpp->GetValueString(argFlags));\r\n ^\r\n In file included from ../../../../ext/wxWidgets/include/wx/wx.h:14:0,\r\n from ../../../../wx/include/wxPython/wxpy_api.h:41,\r\n from ../../../../sip/cpp/sipAPI_propgrid.h:3308,\r\n from ../../../../sip/cpp/sip_propgridwxPGProperty.cpp:10:\r\n ../../../../ext/wxWidgets/include/wx/propgrid/property.h:1657:28: note: declared here\r\n wxDEPRECATED( wxString GetValueString( int argFlags = 0 ) const );\r\n ^\r\n ../../../../ext/wxWidgets/include/wx/defs.h:654:43: note: in definition of macro ‘wxDEPRECATED’\r\n #define wxDEPRECATED(x) wxDEPRECATED_DECL x\r\n ^\r\n \r\n Waf: Leaving directory `/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/build/waf/3.6/gtk3'\r\n Build failed\r\n -> task in '_html2' failed with exit status 1 (run with -v to display more information)\r\n Command '\"/media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\" /tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/bin/waf-2.0.8 --wx_config=/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/build/wxbld/gtk3/wx-config --gtk3 --python=\"/media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\" --out=build/waf/3.6/gtk3 configure build ' failed with exit code 1.\r\n Finished command: build_py (4m47.316s)\r\n Finished command: build (5m55.162s)\r\n Command '\"/media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\" -u build.py build' failed with exit code 1.\r\n ----------------------------------------\r\n ERROR: Failed building wheel for wxpython\r\n Running setup.py clean for wxpython\r\n Building wheel for dash-renderer (setup.py) ... done\r\n Created wheel for dash-renderer: filename=dash_renderer-1.6.0-py3-none-any.whl size=1182219 sha256=4e2731d37cc072433bc6db662d7963b1366ac97f04c9aff330f5881df1f5cb5c\r\n Stored in directory: /home/cat/.cache/pip/wheels/62/cf/b4/26a5faad18c3b91b3228c4e187479671d09a4e35bc8aabbd44\r\n Building wheel for dash-table (setup.py) ... done\r\n Created wheel for dash-table: filename=dash_table-4.9.0-py3-none-any.whl size=1780776 sha256=aa4b1a5aaebcde627b32e9a2a7724996a91a07e1e95e92d6fb747ef08d327758\r\n Stored in directory: /home/cat/.cache/pip/wheels/1e/49/02/9d6100a6639526779f71579772b875088dab6d194cfb1fa084\r\n Building wheel for retrying (setup.py) ... done\r\n Created wheel for retrying: filename=retrying-1.3.3-py3-none-any.whl size=11448 sha256=07f8377ece9440971ad2233cd329c18f0c9adb99bab2261b3c21f8c47cdcf4d5\r\n Stored in directory: /home/cat/.cache/pip/wheels/ac/cb/8a/b27bf6323e2f4c462dcbf77d70b7c5e7868a7fbe12871770cf\r\n Building wheel for future (setup.py) ... done\r\n Created wheel for future: filename=future-0.18.2-py3-none-any.whl size=491070 sha256=9d0795f967bf8e936987670aaf06d09876b6e17f12e0def712e92525776c3029\r\n Stored in directory: /home/cat/.cache/pip/wheels/6e/9c/ed/4499c9865ac1002697793e0ae05ba6be33553d098f3347fb94\r\nSuccessfully built dash dash-color-picker dash-colorscales dash-core-components dash-html-components dtreeviz imutils pyyaml shap tabulate dash-renderer dash-table retrying future\r\nFailed to build wxpython\r\nInstalling collected packages: zipp, typing-extensions, six, pyyaml, numpy, MarkupSafe, importlib-metadata, dataclasses, Werkzeug, toolz, scipy, python-dateutil, pyparsing, kiwisolver, joblib, jinja2, itsdangerous, decorator, dask, cycler, click, scikit-learn, retrying, PyWavelets, pytz, Pillow, networkx, matplotlib, Flask, cloudpickle, brotli, tqdm, tabulate, shapely, scikit-image, plotly, patsy, pandas, opencv-python, numexpr, llvmlite, imbalanced-learn, imageio, graphviz, future, flask-compress, dash-table, dash-renderer, dash-html-components, dash-core-components, colour, attrs, yellowbrick, xgboost, wxpython, tables, statsmodels, shap, seaborn, pyarrow, numba, imutils, imgaug, imblearn, h5py, eli5, dtreeviz, dash-colorscales, dash-color-picker, dash, cefpython3, simba-uw-tf-dev\r\n Running setup.py install for wxpython ... error\r\n ERROR: Command errored out with exit status 1:\r\n command: /media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6 -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record /tmp/pip-record-wg5mz2ez/install-record.txt --single-version-externally-managed --compile --install-headers /media/cat/4TBSSD/anaconda3/envs/simba2/include/python3.6m/wxpython\r\n cwd: /tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/\r\n Complete output (200 lines):\r\n /media/cat/4TBSSD/anaconda3/envs/simba2/lib/python3.6/site-packages/setuptools/dist.py:720: UserWarning: Usage of dash-separated 'license-file' will not be supported in future versions. Please use the underscore name 'license_file' instead\r\n % (opt, underscore_opt)\r\n /media/cat/4TBSSD/anaconda3/envs/simba2/lib/python3.6/site-packages/setuptools/dist.py:294: DistDeprecationWarning: use_2to3 is ignored.\r\n warnings.warn(f\"{attr} is ignored.\", DistDeprecationWarning)\r\n running install\r\n running build\r\n WARNING: Building this way assumes that all generated files have been\r\n generated already. If that is not the case then use build.py directly\r\n to generate the source and perform the build stage. You can use\r\n --skip-build with the bdist_* or install commands to avoid this\r\n message and the wxWidgets and Phoenix build steps in the future.\r\n \r\n \"/media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\" -u build.py build\r\n Will build using: \"/media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\"\r\n 3.6.0 | packaged by conda-forge | (default, Feb 9 2017, 14:36:55)\r\n [GCC 4.8.2 20140120 (Red Hat 4.8.2-15)]\r\n Python's architecture is 64bit\r\n cfg.VERSION: 4.0.4\r\n \r\n Running command: build\r\n Running command: build_wx\r\n wxWidgets build options: ['--wxpython', '--unicode', '--no_config', '--gtk3']\r\n Configure options: ['--enable-unicode', '--with-gtk=3', '--with-opengl', '--enable-sound', '--enable-graphics_ctx', '--enable-mediactrl', '--enable-display', '--enable-geometry', '--enable-debug_flag', '--enable-optimise', '--disable-debugreport', '--enable-uiactionsim', '--enable-autoidman', '--with-sdl']\r\n /usr/bin/make\r\n make --jobs=32\r\n (if test -f utils/wxrc/Makefile ; then cd utils/wxrc && make all ; fi)\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/build/wxbld/gtk3/utils/wxrc'\r\n make[1]: Nothing to be done for 'all'.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/build/wxbld/gtk3/utils/wxrc'\r\n Building message catalogs in /tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale\r\n make allmo\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'af.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'an.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'ar.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'ca.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'ca@valencia.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'cs.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'da.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'de.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'el.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'es.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'eu.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'fi.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'fr.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'gl_ES.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'hi.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'hu.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'id.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'it.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'ja.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'ko_KR.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'lt.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'lv.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'ms.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'msw/it.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'nb.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'ne.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'nl.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'pl.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'pt_BR.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'pt.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'ro.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'ru.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'sk.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'sl.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'sq.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'sv.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'ta.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'tr.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'uk.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'vi.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'zh_CN.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: Entering directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n make[1]: 'zh_TW.mo' is up to date.\r\n make[1]: Leaving directory '/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/ext/wxWidgets/locale'\r\n Finished command: build_wx (0m1.42s)\r\n Running command: build_py\r\n Checking for /tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/bin/waf-2.0.8...\r\n \"/media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\" /tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/bin/waf-2.0.8 --wx_config=/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/build/wxbld/gtk3/wx-config --gtk3 --python=\"/media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\" --out=build/waf/3.6/gtk3 configure build\r\n Setting top to : /tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35\r\n Setting out to : /tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/build/waf/3.6/gtk3\r\n Checking for 'gcc' (C compiler) : /usr/bin/gcc\r\n Checking for 'g++' (C++ compiler) : /usr/bin/g++\r\n Checking for program 'python' : /media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\r\n Checking for python version >= 2.7.0 : 3.6.0\r\n python-config : /media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6-config\r\n Asking python-config for pyembed '--cflags --libs --ldflags' flags : yes\r\n Testing pyembed configuration : yes\r\n Asking python-config for pyext '--cflags --libs --ldflags' flags : yes\r\n Testing pyext configuration : yes\r\n Finding libs for WX : yes\r\n Finding libs for WXADV : yes\r\n Finding libs for WXSTC : yes\r\n Finding libs for WXHTML : yes\r\n Finding libs for WXGL : yes\r\n Finding libs for WXWEBVIEW : yes\r\n Finding libs for WXXML : yes\r\n Finding libs for WXXRC : yes\r\n Finding libs for WXRICHTEXT : yes\r\n Finding libs for WXMEDIA : yes\r\n Finding libs for WXRIBBON : yes\r\n Finding libs for WXPROPGRID : yes\r\n Finding libs for WXAUI : yes\r\n 'configure' finished successfully (1.698s)\r\n Waf: Entering directory `/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/build/waf/3.6/gtk3'\r\n [639/869] Linking build/waf/3.6/gtk3/_html2.cpython-36m-x86_64-linux-gnu.so\r\n [644/869] Linking build/waf/3.6/gtk3/_xml.cpython-36m-x86_64-linux-gnu.so\r\n [650/869] Linking build/waf/3.6/gtk3/_xrc.cpython-36m-x86_64-linux-gnu.so\r\n /usr/bin/ld: cannot find -lwx_gtk3u_webview-3.0\r\n collect2: error: ld returned 1 exit status\r\n \r\n Waf: Leaving directory `/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/build/waf/3.6/gtk3'\r\n Build failed\r\n -> task in '_html2' failed with exit status 1 (run with -v to display more information)\r\n Command '\"/media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\" /tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/bin/waf-2.0.8 --wx_config=/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/build/wxbld/gtk3/wx-config --gtk3 --python=\"/media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\" --out=build/waf/3.6/gtk3 configure build ' failed with exit code 1.\r\n Finished command: build_py (0m4.377s)\r\n Finished command: build (0m5.420s)\r\n Command '\"/media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6\" -u build.py build' failed with exit code 1.\r\n ----------------------------------------\r\nERROR: Command errored out with exit status 1: /media/cat/4TBSSD/anaconda3/envs/simba2/bin/python3.6 -u -c 'import io, os, sys, setuptools, tokenize; sys.argv[0] = '\"'\"'/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/setup.py'\"'\"'; __file__='\"'\"'/tmp/pip-install-snv642tw/wxpython_faaddcf196704ced8743d8ca6906bf35/setup.py'\"'\"';f = getattr(tokenize, '\"'\"'open'\"'\"', open)(__file__) if os.path.exists(__file__) else io.StringIO('\"'\"'from setuptools import setup; setup()'\"'\"');code = f.read().replace('\"'\"'\\r\\n'\"'\"', '\"'\"'\\n'\"'\"');f.close();exec(compile(code, __file__, '\"'\"'exec'\"'\"'))' install --record /tmp/pip-record-wg5mz2ez/install-record.txt --single-version-externally-managed --compile --install-headers /media/cat/4TBSSD/anaconda3/envs/simba2/include/python3.6m/wxpython Check the logs for full command output.\r\n(simba2) cat@cat-Precision-T3610:~$ \r\n```\r\n\r\n", + "created_at": "2021-12-20T09:19:37Z", + "author": "catubc" + }, + { + "body": "Hi @catubc - I'm surprised to see wxpython there, I don't think anything in SimBA depends on it. What about `pip install simba-uw-tf-dev --no-deps` ?", + "created_at": "2021-12-20T09:27:48Z", + "author": "sronilsson" + }, + { + "body": "Thanks,\r\nWell, calling simba from the command line then doesn't work\r\n\r\n```\r\n(simba3) cat@cat-Precision-T3610:~/code/simba$ pip cache purge\r\nFiles removed: 174\r\n(simba3) cat@cat-Precision-T3610:~/code/simba$ pip install simba-uw-tf-dev --no-deps --no-cache-dir\r\nCollecting simba-uw-tf-dev\r\n Downloading Simba_UW_tf_dev-0.88.8-py3-none-any.whl (10.5 MB)\r\n |████████████████████████████████| 10.5 MB 4.9 MB/s \r\nInstalling collected packages: simba-uw-tf-dev\r\nSuccessfully installed simba-uw-tf-dev-0.88.8\r\n(simba3) cat@cat-Precision-T3610:~/code/simba$ simba\r\nTraceback (most recent call last):\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/bin/simba\", line 5, in \r\n from simba.SimBA import main\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/SimBA.py\", line 6, in \r\n import seaborn as sns\r\nModuleNotFoundError: No module named 'seaborn'\r\n(simba3) cat@cat-Precision-T3610:~/code/simba$ pip install seaborn\r\nCollecting seaborn\r\n Downloading seaborn-0.11.2-py3-none-any.whl (292 kB)\r\n |████████████████████████████████| 292 kB 5.4 MB/s \r\nCollecting scipy>=1.0\r\n Downloading scipy-1.5.4-cp36-cp36m-manylinux1_x86_64.whl (25.9 MB)\r\n |████████████████████████████████| 25.9 MB 35 kB/s \r\nCollecting numpy>=1.15\r\n Downloading numpy-1.19.5-cp36-cp36m-manylinux2010_x86_64.whl (14.8 MB)\r\n |████████████████████████████████| 14.8 MB 37.4 MB/s \r\nCollecting pandas>=0.23\r\n Downloading pandas-0.25.3-cp36-cp36m-manylinux1_x86_64.whl (10.4 MB)\r\n |████████████████████████████████| 10.4 MB 42.3 MB/s \r\nCollecting matplotlib>=2.2\r\n Downloading matplotlib-3.3.4-cp36-cp36m-manylinux1_x86_64.whl (11.5 MB)\r\n |████████████████████████████████| 11.5 MB 52.3 MB/s \r\nCollecting cycler>=0.10\r\n Downloading cycler-0.11.0-py3-none-any.whl (6.4 kB)\r\nCollecting kiwisolver>=1.0.1\r\n Downloading kiwisolver-1.3.1-cp36-cp36m-manylinux1_x86_64.whl (1.1 MB)\r\n |████████████████████████████████| 1.1 MB 31.4 MB/s \r\nCollecting pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3\r\n Downloading pyparsing-3.0.6-py3-none-any.whl (97 kB)\r\n |████████████████████████████████| 97 kB 2.8 MB/s \r\nCollecting pillow>=6.2.0\r\n Downloading Pillow-8.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.1 MB)\r\n |████████████████████████████████| 3.1 MB 26.2 MB/s \r\nCollecting python-dateutil>=2.1\r\n Downloading python_dateutil-2.8.2-py2.py3-none-any.whl (247 kB)\r\n |████████████████████████████████| 247 kB 38.6 MB/s \r\nCollecting pytz>=2017.2\r\n Downloading pytz-2021.3-py2.py3-none-any.whl (503 kB)\r\n |████████████████████████████████| 503 kB 33.2 MB/s \r\nCollecting six>=1.5\r\n Downloading six-1.16.0-py2.py3-none-any.whl (11 kB)\r\nInstalling collected packages: six, pytz, python-dateutil, pyparsing, pillow, numpy, kiwisolver, cycler, scipy, pandas, matplotlib, seaborn\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nsimba-uw-tf-dev 0.88.8 requires cefpython3==66.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dash==1.14.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dash-color-picker==0.0.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dash-colorscales==0.0.4, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dash-core-components==1.10.2, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dash-html-components==1.0.3, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires dtreeviz==0.8.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires eli5==0.10.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires graphviz==0.11, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires h5py==2.9.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires imblearn==0.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires imgaug==0.4.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires imutils==0.5.2, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires numba==0.48.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires numexpr==2.6.9, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires opencv-python==3.4.5.20, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires plotly==4.9.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires pyarrow==0.17.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires pyyaml==5.3.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires scikit-image==0.14.2, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires scikit-learn==0.22.2, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires shap==0.35.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires shapely==1.7, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires statsmodels==0.9.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires tables==3.6.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires tabulate==0.8.3, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires tqdm==4.30.0, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires wxpython==4.0.4, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires xgboost==0.90, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires yellowbrick==0.9.1, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires matplotlib==3.0.3, but you have matplotlib 3.3.4 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires numpy==1.18.1, but you have numpy 1.19.5 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires Pillow==5.4.1, but you have pillow 8.4.0 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires scipy==1.1.0, but you have scipy 1.5.4 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires seaborn==0.9.0, but you have seaborn 0.11.2 which is incompatible.\r\nSuccessfully installed cycler-0.11.0 kiwisolver-1.3.1 matplotlib-3.3.4 numpy-1.19.5 pandas-0.25.3 pillow-8.4.0 pyparsing-3.0.6 python-dateutil-2.8.2 pytz-2021.3 scipy-1.5.4 seaborn-0.11.2 six-1.16.0\r\n(simba3) cat@cat-Precision-T3610:~/code/simba$ \r\n work:\r\n\r\n\r\n```", + "created_at": "2021-12-20T09:45:52Z", + "author": "catubc" + }, + { + "body": "```\r\n(simba3) cat@cat-Precision-T3610:~/code/simba$ simba\r\nTraceback (most recent call last):\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/bin/simba\", line 5, in \r\n from simba.SimBA import main\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/SimBA.py\", line 6, in \r\n import seaborn as sns\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/seaborn/__init__.py\", line 2, in \r\n from .rcmod import * # noqa: F401,F403\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/seaborn/rcmod.py\", line 5, in \r\n import matplotlib as mpl\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/matplotlib/__init__.py\", line 107, in \r\n from . import cbook, rcsetup\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/matplotlib/rcsetup.py\", line 28, in \r\n from matplotlib.fontconfig_pattern import parse_fontconfig_pattern\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/matplotlib/fontconfig_pattern.py\", line 15, in \r\n from pyparsing import (Literal, ZeroOrMore, Optional, Regex, StringEnd,\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pyparsing/__init__.py\", line 130, in \r\n __version__ = __version_info__.__version__\r\nAttributeError: 'version_info' object has no attribute '__version__'\r\n(simba3) cat@cat-Precision-T3610:~/code/simba$ \r\n\r\n```", + "created_at": "2021-12-20T09:47:01Z", + "author": "catubc" + }, + { + "body": "Interesting, seems to be seaborn / matplotlib version clash? Maybe they are installed with different pip versions? Can you make sure it is matplotlib 3.0.3 and seaborn 0.9.0 ? Or also try reinstall both matplotlib and seaborn at latest versions `pip install matplotlib --upgrade` and same for seaborn ", + "created_at": "2021-12-20T10:05:03Z", + "author": "sronilsson" + }, + { + "body": "1. Re: installing to latest, simba doesn't like either of them, I think it's in the requierments:\r\n```\r\n(simba3) cat@cat-Precision-T3610:~$ pip install seaborn --upgrade\r\nRequirement already satisfied: seaborn in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (0.9.0)\r\nCollecting seaborn\r\n Using cached seaborn-0.11.2-py3-none-any.whl (292 kB)\r\nRequirement already satisfied: matplotlib>=2.2 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from seaborn) (3.3.4)\r\nRequirement already satisfied: scipy>=1.0 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from seaborn) (1.1.0)\r\nRequirement already satisfied: pandas>=0.23 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from seaborn) (0.25.3)\r\nRequirement already satisfied: numpy>=1.15 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from seaborn) (1.18.1)\r\nRequirement already satisfied: cycler>=0.10 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from matplotlib>=2.2->seaborn) (0.11.0)\r\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.3 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from matplotlib>=2.2->seaborn) (3.0.6)\r\nRequirement already satisfied: pillow>=6.2.0 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from matplotlib>=2.2->seaborn) (8.4.0)\r\nRequirement already satisfied: python-dateutil>=2.1 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from matplotlib>=2.2->seaborn) (2.8.2)\r\nRequirement already satisfied: kiwisolver>=1.0.1 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from matplotlib>=2.2->seaborn) (1.3.1)\r\nRequirement already satisfied: pytz>=2017.2 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from pandas>=0.23->seaborn) (2021.3)\r\nRequirement already satisfied: six>=1.5 in /media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages (from python-dateutil>=2.1->matplotlib>=2.2->seaborn) (1.16.0)\r\nInstalling collected packages: seaborn\r\n Attempting uninstall: seaborn\r\n Found existing installation: seaborn 0.9.0\r\n Uninstalling seaborn-0.9.0:\r\n Successfully uninstalled seaborn-0.9.0\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\nsimba-uw-tf-dev 0.88.8 requires wxpython==4.0.4, which is not installed.\r\nsimba-uw-tf-dev 0.88.8 requires matplotlib==3.0.3, but you have matplotlib 3.3.4 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires Pillow==5.4.1, but you have pillow 8.4.0 which is incompatible.\r\nsimba-uw-tf-dev 0.88.8 requires seaborn==0.9.0, but you have seaborn 0.11.2 which is incompatible.\r\nSuccessfully installed seaborn-0.11.2\r\n```\r\n2. I dowgraded matplotlib and seaborn and also installed wxpython 4.0.4 via conda now. But am still getting this error on commandline:\r\n\r\n```\r\n(simba3) cat@cat-Precision-T3610:~$ simba\r\nTraceback (most recent call last):\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/bin/simba\", line 5, in \r\n from simba.SimBA import main\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/SimBA.py\", line 6, in \r\n import seaborn as sns\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/seaborn/__init__.py\", line 2, in \r\n import matplotlib as mpl\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/matplotlib/__init__.py\", line 141, in \r\n from . import cbook, rcsetup\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/matplotlib/rcsetup.py\", line 25, in \r\n from matplotlib.fontconfig_pattern import parse_fontconfig_pattern\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/matplotlib/fontconfig_pattern.py\", line 19, in \r\n from pyparsing import (Literal, ZeroOrMore, Optional, Regex, StringEnd,\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/pyparsing/__init__.py\", line 130, in \r\n __version__ = __version_info__.__version__\r\nAttributeError: 'version_info' object has no attribute '__version__'\r\n(simba3) cat@cat-Precision-T3610:~$ \r\n```\r\n\r\nHere are the installed packages:\r\n\r\n\r\n```\r\n(simba3) cat@cat-Precision-T3610:~$ pip list\r\nPackage Version\r\n-------------------- --------\r\nattrs 21.2.0\r\nBrotli 1.0.9\r\ncefpython3 66.0\r\nclick 8.0.3\r\ncloudpickle 2.0.0\r\ncolour 0.1.5\r\ncycler 0.11.0\r\ndash 1.14.0\r\ndash-color-picker 0.0.1\r\ndash-colorscales 0.0.4\r\ndash-core-components 1.10.2\r\ndash-html-components 1.0.3\r\ndash-renderer 1.6.0\r\ndash-table 4.9.0\r\ndask 2021.3.0\r\ndataclasses 0.8\r\ndecorator 4.4.2\r\ndtreeviz 0.8.1\r\neli5 0.10.1\r\nFlask 2.0.2\r\nFlask-Compress 1.10.1\r\nfuture 0.18.2\r\ngraphviz 0.11\r\nh5py 2.9.0\r\nimageio 2.13.3\r\nimbalanced-learn 0.8.1\r\nimblearn 0.0\r\nimgaug 0.4.0\r\nimportlib-metadata 4.8.3\r\nimutils 0.5.2\r\nitsdangerous 2.0.1\r\nJinja2 3.0.3\r\njoblib 1.1.0\r\nkiwisolver 1.3.1\r\nllvmlite 0.31.0\r\nMarkupSafe 2.0.1\r\nmatplotlib 3.0.3\r\nnetworkx 2.5.1\r\nnumba 0.48.0\r\nnumexpr 2.6.9\r\nnumpy 1.18.1\r\nopencv-python 3.4.5.20\r\npackaging 21.3\r\npandas 0.25.3\r\npatsy 0.5.2\r\nPillow 5.4.1\r\npip 21.3.1\r\nplotly 4.9.0\r\npyarrow 0.17.1\r\npyparsing 3.0.6\r\npython-dateutil 2.8.2\r\npytz 2021.3\r\nPyWavelets 1.1.1\r\nPyYAML 5.3.1\r\nretrying 1.3.3\r\nscikit-image 0.14.2\r\nscikit-learn 0.22.2\r\nscipy 1.1.0\r\nseaborn 0.9.0\r\nsetuptools 58.0.4\r\nshap 0.35.0\r\nShapely 1.7.0\r\nSimba-UW-tf-dev 0.88.8\r\nsix 1.16.0\r\nstatsmodels 0.9.0\r\ntables 3.6.1\r\ntabulate 0.8.3\r\nthreadpoolctl 3.0.0\r\ntoolz 0.11.2\r\ntqdm 4.30.0\r\ntyping_extensions 4.0.1\r\nWerkzeug 2.0.2\r\nwheel 0.37.0\r\nwxPython 4.0.4\r\nxgboost 0.90\r\nyellowbrick 0.9.1\r\nzipp 3.6.0\r\n\r\n```\r\n\r\n\r\n```\r\n(simba3) cat@cat-Precision-T3610:~$ conda list\r\n# packages in environment at /media/cat/4TBSSD/anaconda3/envs/simba3:\r\n#\r\n# Name Version Build Channel\r\n_libgcc_mutex 0.1 conda_forge conda-forge\r\n_openmp_mutex 4.5 1_gnu conda-forge\r\nattrs 21.2.0 pypi_0 pypi\r\nbrotli 1.0.9 pypi_0 pypi\r\nca-certificates 2020.10.14 0 anaconda\r\ncairo 1.16.0 h18b612c_1001 conda-forge\r\ncefpython3 66.0 pypi_0 pypi\r\nclick 8.0.3 pypi_0 pypi\r\ncloudpickle 2.0.0 pypi_0 pypi\r\ncolour 0.1.5 pypi_0 pypi\r\ncycler 0.11.0 pypi_0 pypi\r\ndash 1.14.0 pypi_0 pypi\r\ndash-color-picker 0.0.1 pypi_0 pypi\r\ndash-colorscales 0.0.4 pypi_0 pypi\r\ndash-core-components 1.10.2 pypi_0 pypi\r\ndash-html-components 1.0.3 pypi_0 pypi\r\ndash-renderer 1.6.0 pypi_0 pypi\r\ndash-table 4.9.0 pypi_0 pypi\r\ndask 2021.3.0 pypi_0 pypi\r\ndataclasses 0.8 pypi_0 pypi\r\ndecorator 4.4.2 pypi_0 pypi\r\ndtreeviz 0.8.1 pypi_0 pypi\r\neli5 0.10.1 pypi_0 pypi\r\nexpat 2.2.10 he6710b0_2 anaconda\r\nflask 2.0.2 pypi_0 pypi\r\nflask-compress 1.10.1 pypi_0 pypi\r\nfontconfig 2.13.1 hba837de_1005 conda-forge\r\nfreetype 2.10.4 h5ab3b9f_0 anaconda\r\nfribidi 1.0.10 h7b6447c_0 anaconda\r\nfuture 0.18.2 pypi_0 pypi\r\ngettext 0.19.8.1 h9b4dc7a_1 anaconda\r\nglib 2.66.1 h92f7085_0 anaconda\r\ngraphite2 1.3.14 h23475e2_0 anaconda\r\ngst-plugins-base 1.14.0 h8213a91_2 \r\ngstreamer 1.14.0 h28cd5cc_2 \r\nh5py 2.9.0 pypi_0 pypi\r\nharfbuzz 2.4.0 hca77d97_1 anaconda\r\nicu 58.2 he6710b0_3 anaconda\r\nimageio 2.13.3 pypi_0 pypi\r\nimbalanced-learn 0.8.1 pypi_0 pypi\r\nimblearn 0.0 pypi_0 pypi\r\nimgaug 0.4.0 pypi_0 pypi\r\nimportlib-metadata 4.8.3 pypi_0 pypi\r\nimutils 0.5.2 pypi_0 pypi\r\nitsdangerous 2.0.1 pypi_0 pypi\r\njinja2 3.0.3 pypi_0 pypi\r\njoblib 1.1.0 pypi_0 pypi\r\njpeg 9b habf39ab_1 anaconda\r\nkiwisolver 1.3.1 pypi_0 pypi\r\nlibffi 3.3 he6710b0_2 anaconda\r\nlibgcc-ng 11.2.0 h1d223b6_11 conda-forge\r\nlibglu 9.0.0 hf484d3e_1 anaconda\r\nlibgomp 11.2.0 h1d223b6_11 conda-forge\r\nlibpng 1.6.37 hbc83047_0 anaconda\r\nlibstdcxx-ng 9.1.0 hdf63c60_0 anaconda\r\nlibuuid 2.32.1 h7f98852_1000 conda-forge\r\nlibxcb 1.14 h7b6447c_0 anaconda\r\nlibxml2 2.9.10 hb55368b_3 anaconda\r\nlibzlib 1.2.11 h36c2ea0_1013 conda-forge\r\nllvmlite 0.31.0 pypi_0 pypi\r\nmarkupsafe 2.0.1 pypi_0 pypi\r\nmatplotlib 3.0.3 pypi_0 pypi\r\nncurses 5.9 10 conda-forge\r\nnetworkx 2.5.1 pypi_0 pypi\r\nnumba 0.48.0 pypi_0 pypi\r\nnumexpr 2.6.9 pypi_0 pypi\r\nnumpy 1.18.1 pypi_0 pypi\r\nopencv-python 3.4.5.20 pypi_0 pypi\r\nopenssl 1.0.2u h7b6447c_0 anaconda\r\npackaging 21.3 pypi_0 pypi\r\npandas 0.25.3 pypi_0 pypi\r\npango 1.45.3 hd140c19_0 anaconda\r\npatsy 0.5.2 pypi_0 pypi\r\npcre 8.44 he6710b0_0 anaconda\r\npillow 5.4.1 pypi_0 pypi\r\npip 21.3.1 pyhd8ed1ab_0 conda-forge\r\npixman 0.38.0 h516909a_1003 conda-forge\r\nplotly 4.9.0 pypi_0 pypi\r\npyarrow 0.17.1 pypi_0 pypi\r\npyparsing 3.0.6 pypi_0 pypi\r\npython 3.6.0 2 conda-forge\r\npython-dateutil 2.8.2 pypi_0 pypi\r\npython-graphviz 0.11 pypi_0 pypi\r\npython_abi 3.6 2_cp36m conda-forge\r\npytz 2021.3 pypi_0 pypi\r\npywavelets 1.1.1 pypi_0 pypi\r\npyyaml 5.3.1 pypi_0 pypi\r\nreadline 6.2 0 conda-forge\r\nretrying 1.3.3 pypi_0 pypi\r\nscikit-image 0.14.2 pypi_0 pypi\r\nscikit-learn 0.22.2 pypi_0 pypi\r\nscipy 1.1.0 pypi_0 pypi\r\nseaborn 0.9.0 pypi_0 pypi\r\nsetuptools 58.0.4 py36h5fab9bb_2 conda-forge\r\nshap 0.35.0 pypi_0 pypi\r\nshapely 1.7.0 pypi_0 pypi\r\nsimba-uw-tf-dev 0.88.8 pypi_0 pypi\r\nsix 1.16.0 pypi_0 pypi\r\nsqlite 3.13.0 1 conda-forge\r\nstatsmodels 0.9.0 pypi_0 pypi\r\ntables 3.6.1 pypi_0 pypi\r\ntabulate 0.8.3 pypi_0 pypi\r\nthreadpoolctl 3.0.0 pypi_0 pypi\r\ntk 8.5.19 2 conda-forge\r\ntoolz 0.11.2 pypi_0 pypi\r\ntqdm 4.30.0 pypi_0 pypi\r\ntyping-extensions 4.0.1 pypi_0 pypi\r\nwerkzeug 2.0.2 pypi_0 pypi\r\nwheel 0.37.0 pyhd8ed1ab_1 conda-forge\r\nwxpython 4.0.4 py36hc99224d_0 \r\nxgboost 0.90 pypi_0 pypi\r\nxorg-kbproto 1.0.7 h7f98852_1002 conda-forge\r\nxorg-libice 1.0.10 h7f98852_0 conda-forge\r\nxorg-libsm 1.2.3 hd9c2040_1000 conda-forge\r\nxorg-libx11 1.7.2 h7f98852_0 conda-forge\r\nxorg-libxext 1.3.4 h7f98852_1 conda-forge\r\nxorg-libxrender 0.9.10 h7f98852_1003 conda-forge\r\nxorg-renderproto 0.11.1 h7f98852_1002 conda-forge\r\nxorg-xextproto 7.3.0 h7f98852_1002 conda-forge\r\nxorg-xproto 7.0.31 h7f98852_1007 conda-forge\r\nxz 5.2.5 h516909a_1 conda-forge\r\nyellowbrick 0.9.1 pypi_0 pypi\r\nzipp 3.6.0 pypi_0 pypi\r\nzlib 1.2.11 h36c2ea0_1013 conda-forge\r\n(simba3) cat@cat-Precision-T3610:~$ \r\n```\r\n\r\n\r\n", + "created_at": "2021-12-20T10:19:30Z", + "author": "catubc" + }, + { + "body": "It seems to be at `pyparsing` - from pyparsing import (Literal, ZeroOrMore, Optional, Regex, StringEnd - a requirement of matplotlib. I don't know which version I have for that but will check. ", + "created_at": "2021-12-20T10:26:46Z", + "author": "sronilsson" + }, + { + "body": "Might be a Linux dependency för mpl.. try reinstall pyparsing?", + "created_at": "2021-12-20T10:29:15Z", + "author": "sronilsson" + }, + { + "body": "Ok, great downgraded to 3.0.0., now I have this issue:\r\n```\r\n\r\n(simba3) cat@cat-Precision-T3610:~$ simba\r\nTraceback (most recent call last):\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/bin/simba\", line 8, in \r\n sys.exit(main())\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/SimBA.py\", line 7851, in main\r\n app = App()\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/SimBA.py\", line 7684, in __init__\r\n img = PhotoImage(file=os.path.join(scriptdir,'golden.png'))\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/tkinter/__init__.py\", line 3539, in __init__\r\n Image.__init__(self, 'photo', name, cnf, master, **kw)\r\n File \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/tkinter/__init__.py\", line 3495, in __init__\r\n self.tk.call(('image', 'create', imgtype, name,) + options)\r\n_tkinter.TclError: couldn't recognize data in image file \"/media/cat/4TBSSD/anaconda3/envs/simba3/lib/python3.6/site-packages/simba/golden.png\"\r\n(simba3) cat@cat-Precision-T3610:~$ \r\n\r\n```\r\n\r\nApparently, it's a Pillow issue, but not sure.", + "created_at": "2021-12-20T10:31:32Z", + "author": "catubc" + }, + { + "body": "Ok, well, it finally works, not clear what did it.\r\n\r\nFYI: I did use conda to install wxpython, and that was so much easier\r\n\r\n`conda install -c anaconda wxpython==4.0.4`\r\n\r\nI think another of the issues might have been that I was using python=3.6 instead of 3.6.10 which perhaps came with a different version of tk.\r\n\r\nI will try to use simba a bit , and if it's useful, will reproduce the install in the cloud now. Fingers crossed it works. I'll post any other install issues in case others encounter them.\r\nThanks so much!", + "created_at": "2021-12-20T10:48:59Z", + "author": "catubc" + }, + { + "body": "Yes it's having trouble reading the splash screen image in final rows of Simba.py, it's using PIL. The file it has trouble reading is golden.png. Ive seen it a long time ago and thought it was dealt with but might be be related to OS version. I will insert some catch to not display splash if catching this error..", + "created_at": "2021-12-20T10:49:04Z", + "author": "sronilsson" + }, + { + "body": "Yes, i reinstalled python to 3.6.10 and tkinter upgraded, perhaps that's what solved the last issue.", + "created_at": "2021-12-20T10:49:45Z", + "author": "catubc" + } + ] + }, + { + "title": "Impossible to resume ROI drawing", + "body": "Hello, \r\n\r\nI'm using Simba (last version) on Windows 10, with DLC (one animal model) pose estimation data.\r\nI'm trying to analyse ROIs for 8 videos. I drew already the ROI for 3 videos and closed the programm. Now I try to resume, but when I click \"load defined ROI tables\" and then on the \"draw\" button of a video for which I had already drawn the ROIs, or on the button of a video for which I have not yet drawn the ROIs, the drawing window does not appear anymore. Instead here what I get: \r\n\r\n\"Schermata\r\n\r\nDo you know what I can do to turn back to draw my ROIs? \r\n\r\nThank you, \r\nBest,\r\nDorian", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2021-12-16T04:28:31Z", + "updated_at": "2021-12-19T21:13:18Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli - thanks for reporting. I can't see the entire error msg in your screenshot and I can't recreate the error.\r\n\r\nBut I have inserted a potential fix where SimBA opens the table `append` mode instead\r\n\r\nCould you update SimBA with either `pip install simba-uw-tf-dev --upgrade` or `pip install simba-uw-tf-dev==0.88.8` and see if you still see the same error? ", + "created_at": "2021-12-16T09:31:14Z", + "author": "sronilsson" + }, + { + "body": "Hello @sronilsson , \r\n\r\nThank you very much for the feedback.\r\nI did the update, but I still can't resume the drawing: \r\n\r\nWhen I click \"load defined ROI tables\" and then on the \"draw\", I get \"ROI definitions saved...\" while when I click \"delet\" I get \"Cannot delete ROI definitions\" message, as you can see: \r\n\r\n\"Schermata\r\n\r\nThe only way that work to draw the ROI, is by using the new interface area underneath (New SimBA ROI interface - experimental). From there, by clicking \"draw\" the frame pops out and I can proceed to drawing... but from scratch. Maybe I deleted my previous drawing yesterday when I tried different strategies to make the ROI interface work... So for the future, in case i want to resume to an interrupted drawing, must I use the new interface area only? \r\n\r\nAlso, when do you think it will be possible to draw shapes with specific precise sizes? \r\nI need to draw several shapes of the same dimensions with very precise lengths. But this is very difficult because the option to define lengths is not yet functional and the drawing of the shapes does not yet allow to make squares with the 4 sides automatically having the same length.\r\n\r\nThank you for the help and for developping this amazing soft, \r\n\r\nBest, \r\nDorian \r\n", + "created_at": "2021-12-16T15:55:22Z", + "author": "DorianBattivelli" + }, + { + "body": "Hi @DorianBattivelli! I’m not sure why this happened at the moment but a possibility:\r\n\r\nYour drawing information is stored in a file located at `project_folder/logs/measures/ROI_definitions.h5`. This file is created the first time you use the “standard” ROI drawing interface for a project, or the first time you use the new “experimental” interface. If the file already exists, then SimBA will open it instead of a creating a new file when you click `load defined ROI tables` or when open the experimental interface. If you have created ROI drawings in the experimental interface, you won’t be able to open it in the old interface, or vice versa. So better stick to one. The error could also happen if you change video or file-names after you have created your ROI drawings. For the future, I would stick to using the experimental interface. Future updates and bug fixes will be made to this interface in mind. \r\n\r\nI will insert the Drawing ROIs with pre-set dimensions, but it is not on top of the list. For now, I would recommend the **duplicate ROI function**:\r\n\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial_new.md#duplicating-rois\r\n\r\nThis at least makes sure that all your shapes are the same dimensions. You can also make sure that they are perfectly aligned and exactly the same dimensions with this function:\r\n\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial_new.md#aligning-rois\r\n\r\nI know that this does not allow you to draw multiple shapes of one specific metric dimensions, but it is the best we can do, for now. ", + "created_at": "2021-12-19T21:07:56Z", + "author": "sronilsson" + } + ] + }, + { + "title": "ModuleNotFoundError when trying to open Plotly", + "body": "When I try to open Plotly, I'm getting this error message: \r\n\r\n\"Screen\r\n\r\nI tried to fix this by installing dash-daq, but doesn't seem to work. \r\n", + "user": "yana-yuhai", + "reaction_cnt": 0, + "created_at": "2021-12-10T20:54:04Z", + "updated_at": "2021-12-13T21:14:39Z", + "author": "yana-yuhai", + "comments": [ + { + "body": "Hey @yana-yuhai - `pip install dash-daq` should solve it, make sure you're in the `simbaenv` environment when you type it. Could you do `pip show dash-daq` to make sure it's installed?", + "created_at": "2021-12-10T22:51:43Z", + "author": "sronilsson" + }, + { + "body": "Yes, this worked. Thank you!\r\n", + "created_at": "2021-12-13T20:27:35Z", + "author": "yana-yuhai" + } + ] + }, + { + "title": "Issue with video_info.csv an validating the model. ", + "body": "When I try to validate the model, I get the message:\r\n \r\nError: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file. SimBA could not find No file selected in the video_info.csv file. \r\n\r\nI noticed that my videos end in \"el.h5.mp4\" whereas their names in the csv file end in \"el\". However, the csv file will not let me change the endings of the video in Excel. I tried generating a new Excel file with the same data as the video_info.csv file, but that di not work. \r\n", + "user": "yana-yuhai", + "reaction_cnt": 0, + "created_at": "2021-12-09T16:00:44Z", + "updated_at": "2021-12-10T20:52:56Z", + "author": "yana-yuhai", + "comments": [ + { + "body": "Update: I got Excel to work and edited the names. Everything was working until I tried to Run the model. Then I had to go back and change the names back. \r\n\r\nIs there any way to circumvent this in the future? Is this because of an error I made in an earlier step? \r\n", + "created_at": "2021-12-09T17:09:34Z", + "author": "yana-yuhai" + }, + { + "body": "Hi @yana-yuhai ! Do you have a screenshot of the error message? Would help to locate where it went wrong. The `No file selected` part of the message suggests that there is an entry box that you may have missed filling in. ", + "created_at": "2021-12-09T17:49:43Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Error while trying to import h5 files - again (!)", + "body": "Hello,\r\nIm again recieving the same error as mentioned in that last issue:\r\nhttps://github.com/sgoldenlab/simba/issues/149\r\n\r\nAn Error occurres when I try to import h5 files.\r\nIn the last project the error was fixed by upgrading simba, but now - one week later i have the same situation within a new project. \r\n\r\nWhat to do now? Ive promised to present the results tomorrow..\r\n\r\nInteresting Sidefact: Im still able to import the old data even to the new project, but with new files (created correctly by DLC) the error comes..\r\n\r\nCheers - just a quick advice please - Im in a rush :)\r\n\r\n\r\n", + "user": "Marcelobot", + "reaction_cnt": 0, + "created_at": "2021-12-08T19:48:55Z", + "updated_at": "2021-12-09T20:48:08Z", + "author": "Marcelobot", + "comments": [ + { + "body": "Hello @Marcelobot - can you send me the error msg? My guess is that the tracking file might possibly been renamed after it was created by the pose-estimation package. ", + "created_at": "2021-12-08T19:55:12Z", + "author": "sronilsson" + }, + { + "body": "https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#17-i-get-an-error-when-i-try-to-import-multi-animal-data-h5-files-from-deeplabcut-into-simba---the-error-says-that-simba-cannot-locate-my-video-files\r\n", + "created_at": "2021-12-08T20:00:29Z", + "author": "sronilsson" + }, + { + "body": "The video and h5 files are named like this:\r\n![image](https://user-images.githubusercontent.com/81317771/145277115-b25206ee-28c7-48e8-973f-7f5c7c52155b.png)\r\n\r\n--> guess the name should be functional?\r\n\r\nThe video is stored within a DLC project/videos folder with other videos (tried to store it in separate folder but still not working out)\r\n\r\nThe occuring Error when trying to import h5 file looks like:\r\n![image](https://user-images.githubusercontent.com/81317771/145277697-4a40f6f7-21d1-4811-b570-f3de329130d1.png)\r\n\r\nCannot find the reason actually..\r\n\r\nCheers", + "created_at": "2021-12-08T20:14:53Z", + "author": "Marcelobot" + }, + { + "body": "Hey @Marcelobot - there seems to be an issue with the video file. SimBA cannot open it and it is only 2kb large - likely it is not a proper video file. Can you try and re-generate it? ", + "created_at": "2021-12-08T20:21:54Z", + "author": "sronilsson" + }, + { + "body": "Crazy.. now i see.\r\n\r\nThanks ! Seems like in my rush Ive done something wrong while cropping.\r\nBut okay easy to fix and not a simBA Problem - thank you very much!", + "created_at": "2021-12-08T20:34:50Z", + "author": "Marcelobot" + }, + { + "body": "No problem, let me know if the issue persist though, I'll insert some better error msg. ", + "created_at": "2021-12-08T20:36:52Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Issue while importing h5 files..", + "body": "Hello,\r\n\r\nMy issue seem quite similar to this one:\r\nhttps://github.com/sgoldenlab/simba/issues/66\r\n\r\nI cannot import h5 Files into my created SimBA project while i recieve the following in the Terminal:\r\n![image](https://user-images.githubusercontent.com/81317771/143690321-364946c2-c5c8-47bd-a910-d09fcf8acb54.png)\r\n\r\nWhat does taht actually mean \"Cannot locate video oxt10_190728_165111_s_TRIMMcroppedDLC_dlcrnetms5_Full_Videos_David_MA_1Nov25shuffle1_50000_bx.h5 in mp4 or avi format\"\r\n\r\nThe Video has definitely been stored correctly only a few clicks before. \r\nIve checked the videoname - its the same as in the h5 file..\r\nIve also tried less complicated and short names like \"animal_1\" but the problem is the same..\r\n\r\nWhat can i do?\r\nHave to present results on monday :S \r\n\r\nBut thank you for your support!\r\nCheers :-)", + "user": "Marcelobot", + "reaction_cnt": 0, + "created_at": "2021-11-27T17:06:27Z", + "updated_at": "2021-12-08T19:26:05Z", + "author": "Marcelobot", + "comments": [ + { + "body": "Hi @Marcelobot! \r\n\r\nThis, I think, was addressed in a recent update, and was due to DLC changing the filename conventions a bit. Can you make sure you are running the most recent version of SimBA and try again? \r\n\r\nI think the most recent version is 0.88.1. So run `pip install simba-uw-tf-dev==0.88.1\" and let me know if that fixes it? ", + "created_at": "2021-11-27T17:36:54Z", + "author": "sronilsson" + }, + { + "body": "Thank you very much for your immediate answer - appreciate it!\r\n\r\nIt worked out, ive quickly tried out all nessesary options and its looking good - except: feature extraction throws me an error:\r\n\"Pose-estimation body part setting for feature extraction: 16\r\n0\r\nExtracting features from 1 files...\r\nProcessing \"oxt10_190728_165111_s_TRIMMcropped\". Fps: 29.0. mm/ppx: 0.8162450612408017\r\nEvaluating convex hulls...\r\n(\"qhull precision warning: \\nThe initial hull is narrow (cosine of min. angle is 1.0000000000000000).\\nIs the input lower dimensional (e.g., on a plane in 3-d)? Qhull may\\nproduce a wide facet. Options 'QbB' (scale to unit box) or 'Qbb' (scale\\nlast coordinate) may remove this warning. Use 'Pp' to skip this warning.\\nSee 'Limitations' in qh-impre.htm.\\nQH6114 qhull precision error: initial simplex is not convex. Distance=0\\n\\nWhile executing: | qhull i Qt\\nOptions selected for Qhull 2015.2.r 2016/01/18:\\n run-id 1895525931 incidence Qtriangulate _pre-merge _zero-centrum\\n _max-width 2e+02 Error-roundoff 1.7e-13 _one-merge 8.7e-13\\n _near-inside 4.4e-12 Visible-distance 3.5e-13 U-coplanar-distance 3.5e-13\\n Width-outside 7e-13 _wide-facet 2.1e-12 _narrow-hull 0\\n\\nprecision problems (corrected unless 'Q0' or an error)\\n 1 flipped facets\\n\\nThe input to qhull appears to be less than 2 dimensional, or a\\ncomputation has overflowed.\\n\\nQhull could not construct a clearly convex simplex from points:\\n- p2(v2): 0 0\\n- p1(v1): 1.6e+02 2e+02\\n- p0(v0): 0 0\\n\\nThe center point is coplanar with a facet, or a vertex is coplanar\\nwith a neighboring facet. The maximum round off error for\\ncomputing distances is 1.7e-13. The center point, facets and distances\\nto the center point are as follows:\\n\\ncenter point 54 68\\n\\nfacet p1 p0 distance= -7.1e-15\\nfacet p2 p0 distance= -86\\nfacet p2 p1 distance= -7.1e-15\\n\\nThese points either have a maximum or minimum x-coordinate, or\\nthey maximize the determinant for k coordinates. Trial points\\nare first selected from points that maximize a coordinate.\\n\\nThe min and max coordinates for each dimension are:\\n 0: 0 162 difference= 162\\n 1: 0 204 difference= 204\\n\\nIf the input should be full dimensional, you have several options that\\nmay determine an initial simplex:\\n - use 'QJ' to joggle the input and make it full dimensional\\n - use 'QbB' to scale the points to the unit cube\\n - use 'QR0' to randomly rotate the input for different maximum points\\n - use 'Qs' to search all points for the initial simplex\\n - use 'En' to specify a maximum roundoff error less than 1.7e-13.\\n - trace execution with 'T3' to see the determinant for each point.\\n\\nIf the input is lower dimensional:\\n - use 'QJ' to joggle the input and make it full dimensional\\n - use 'Qbk:0Bk:0' to delete coordinate k from the input. You should\\n pick the coordinate with the least range. The hull will have the\\n correct topology.\\n - determine the flat containing the points, rotate the points\\n into a coordinate plane, and delete the other coordinates.\\n - add one or more points to make the input full dimensional.\\n\", 'occurred at index 2657')\r\nERROR: For more information, go to https://github.com/sgoldenlab/simba/blob/SimBA_no_TF/docs/FAQ.md#i-get-a-qhull-eg-qh6154-or-6013-error-when-extracting-the-features\"\r\n\r\nDo you have any idea?\r\n\r\nCheers!\r\n", + "created_at": "2021-11-27T18:22:16Z", + "author": "Marcelobot" + }, + { + "body": "Following the mentioned link within the Error there is written, that it could happen, when an animal is missing for a few seconds.\r\n\r\nCan i fix it by using the \"interpolation\" when importing tracking files?\r\n\r\nCheers", + "created_at": "2021-11-27T18:30:45Z", + "author": "Marcelobot" + }, + { + "body": "Even the last mentioned ERROR is fixed now - Ive quickly fixed it by using the interpolation function. \r\n\r\nMore sustainable Ill try to even more improve the labeling perfomance of my DLC project.\r\n\r\nThanks - can be closed :)\r\n\r\nAll the best! ", + "created_at": "2021-11-27T20:11:52Z", + "author": "Marcelobot" + }, + { + "body": "Hello im Sorry but its just one week later and I recieve again that error while importing H5 files..\r\n\r\nTomorrow again I have to present results.. Would be great If you could give me a quick advice - ive followed your last advice and updated Simba to version 0.88.1 - is there a newer Version? Is may an Update again the solution?\r\n\r\nCheers! Just a quick answer please :)", + "created_at": "2021-12-08T19:26:04Z", + "author": "Marcelobot" + } + ] + }, + { + "title": "Multi-Animal DLC Import Error", + "body": "Hi @sgoldenlab, I am having an issue where when I try and import the h5 files using H5 (multianimalDLC):\r\n\r\n\"Importing 0 multi-animal DLC h5 files to the current project\r\nAll multi-animal DLC .h5 tracking files ordered and imported into SimBA project\r\nin the chosen workflow file format\"\r\n\r\nIt works when I select CSV (DLC/DeepPoseKit) but using that option leads to issues with Extracting Features.\r\n\r\nI currently have simba-uw-tf-dev and I uninstalled it and tried both versions simba-uw-tf-dev==0.57 and simba-uw-tf-dev==0.58 and did not have any luck.\r\n\r\nThank you for the help!", + "user": "yana-yuhai", + "reaction_cnt": 0, + "created_at": "2021-11-19T19:17:14Z", + "updated_at": "2021-12-03T18:48:31Z", + "author": "yana-yuhai", + "comments": [ + { + "body": "Hello @yana-yuhai ! \r\n\r\nCould you update to the latest version of simba-uw-tf-dev, we are up to 0.88.0 now - `pip install simba-uw-tf-dev==0.88.0` or `simba-uw-tf-dev --upgrade` and let us know how it goes? ", + "created_at": "2021-11-19T19:32:58Z", + "author": "sronilsson" + }, + { + "body": "Hi, \r\n\r\nI tried updating to the latest version but am still experiencing the same error message with 0 files being imported.\r\n\r\nAlso, the files end in el.h5, so I've been selecting the \"ellipse\" option. \r\n ", + "created_at": "2021-11-21T19:49:06Z", + "author": "yana-yuhai" + }, + { + "body": "hello @yana-yuhai - I see. What SimBA does is to look in the selected data folder for filenames ending with `el.h5` or `el_filtered.h5`, the code is in `read_DLCmulti_h5_function.py` line 32-37 and pasted below. \r\n\r\nThe reason that I can think this is happening:\r\n\r\n* Have you changed the filenames in anyway, so that your files tracked with the ellipse method do not end with `el.h5` or `el_filtered.h5` anymore? \r\n\r\n* Are your h5 files located in a folder with spaces or other special characters that could cause SimBA to look in the wrong folder? \r\n\r\n\r\n```\r\n if filetype == 'skeleton':\r\n filesFound = glob.glob(dataFolder + '/*sk.h5') + glob.glob(dataFolder + '/*sk_filtered.h5')\r\n if filetype == 'box':\r\n filesFound = glob.glob(dataFolder + '/*bx.h5') + glob.glob(dataFolder + '/*bx_filtered.h5')\r\n if filetype == 'ellipse':\r\n filesFound = glob.glob(dataFolder + '/*el.h5') + glob.glob(dataFolder + '/*el_filtered.h5')\r\n```", + "created_at": "2021-11-22T13:49:26Z", + "author": "sronilsson" + }, + { + "body": "Hi,\r\n\r\nI haven't changed the file names - perhaps it's the folder issue. \r\n\r\nThis is the folder pathway I've specified: \r\nD:\\Yana\\yana_training_social-Yana-2021-10-29\\videos\\h5\r\n\r\nThis is an example of a file name: \r\nTrial1_2021-07-17-095318-0000DLC0dlcmetms5_yana_training_socialOct29shuffle1_50000_el.h5\r\n\r\nIs there a way I should rename the folder or files to resolve this issue? I'm not entirely sure where the issue is coming from. Thank you for the help.\r\n\r\n", + "created_at": "2021-11-22T15:11:58Z", + "author": "yana-yuhai" + }, + { + "body": "Thanks @yana-yuhai - I tried to replicate this myself using the same folder-path as you and I didn't get any errors.. \r\n\r\nIs `D:\\` some external storage device? If so, could you try moving the files from the `D:\\` drive to `C:\\` or whatever the drive python is running on, and let me know if that works? \r\n\r\nAlso, I separate question (which is unrelated to your error), your filename is `Trial1_2021-07-17-095318-0000DLC0dlcmetms5_yana_training_socialOct29shuffle1_50000_el.h5` and contains the substring `DLC0dlcmetms5`. I have not seen this before, they typically have the substring `DLC_dlcrnetms5`. Just wanted to check, is this a typo, or is your file named `Trial1_2021-07-17-095318-0000DLC0dlcmetms5_yana_training_socialOct29shuffle1_50000_el.h5` and not `Trial1_2021-07-17-095318-0000DLC_dlcrnetms5_yana_training_socialOct29shuffle1_50000_el.h5`? ", + "created_at": "2021-11-22T15:37:15Z", + "author": "sronilsson" + }, + { + "body": "Yes, D:\\ is a USB. I copied all the files to C:\\ such that the path is now: \r\nC:\\Yana\\yana_training_social-Yana-2021-10-29\\videos\\h5\r\n\r\nAlso, yes that was a typo. The file has the substring DLC_dlcrnetms5. \r\n\r\nAlthough I changed the location of the files, I'm still getting the same error message \"importing 0 multi-animal DLC .h5 files...\" Do you think there may be an issue with the h5 files themselves such that I'd have to re-analyze the videos on DLC and generate new h5 files? Could have copying the files to C:\\ rather than moving them be causing this issue to persist (i.e. should I delete the files from D:\\)? \r\n\r\n", + "created_at": "2021-11-22T16:04:35Z", + "author": "yana-yuhai" + }, + { + "body": "To walk you through what I've done prior to this error message:\r\n\r\nGenerated project Yana_SimBA in C:\\Yana\r\nCreated custom pose configuration\r\nImported 5 videos from C:\\Yana\\yana_training_social-Yana-2021-10-29\\videos\\dlc-videos\r\nThen I get the error trying to import the h5 files\r\n\r\n", + "created_at": "2021-11-22T16:17:41Z", + "author": "yana-yuhai" + }, + { + "body": "@yana-yuhai - alright. No, it won't be a pose-estimation issue. The message \"importing 0 multi-animal DLC .h5 files...\" suggests that SimBA just can't find any files with a `el.h5` ending inside of the `C:\\Yana\\yana_training_social-Yana-2021-10-29\\videos\\h5\\` directory. \r\n\r\nTry one more thing - just stick the h5 files inside of a folder on the desktop, e.g., `C:\\Yana\\Desktop\\MyVideos\\` - I just want to confirm there is nothing strange on your end going on with the hyphens and numbers you have in the path. And let me know if you still get the same error?\r\n\r\n", + "created_at": "2021-11-22T16:25:45Z", + "author": "sronilsson" + }, + { + "body": "I tried it but am still getting the error. One thing I noticed was that the pathway was C:\\Yana\\OneDrive\\Desktop\\MyVideos\\ \r\n\r\nDo you think this could be due to saving files onto OneDrive? ", + "created_at": "2021-11-22T16:39:34Z", + "author": "yana-yuhai" + }, + { + "body": "I don't think that could be it, although I haven't tried to import data into SimBA from OneDrive. It might be worth trying just on your local machine, not OneDrive. \r\n\r\nCould you do `pip show simba-uw-tf-dev` and post a screenshot of the output? \r\n\r\nCould you also type `pip show simba-uw-tf` and `pip show simba-uw-no-tf` and make sure nothing is found for these two packages.\r\n\r\n", + "created_at": "2021-11-22T20:44:20Z", + "author": "sronilsson" + }, + { + "body": "Here is the screenshot. I checked the other packages and they are not there\r\n\"Screen\r\n. ", + "created_at": "2021-11-22T21:56:57Z", + "author": "yana-yuhai" + }, + { + "body": "Thanks @yana-yuhai, a little bit of a mystery at the moment.. I've tried to replicate again with exactly the same file formats and paths as you and I am not seeing the error.. \r\n\r\nWhat I have done now is to insert some better errors and print outs in SimBA when it tries to import the files, printing out which directories are being searched and for which file-endings, and if zero files are found, print out if any other files are found in the directory that is not matching the search criteria. \r\n\r\nCan you update SimBA to version 0.88.1 by `pip install simba-uw-tf-dev==0.88.1` and show me a screenshot of the SimBA terminal now after trying to import your files? ", + "created_at": "2021-11-23T13:02:19Z", + "author": "sronilsson" + }, + { + "body": "\"Screen\r\n\"Screen\r\n\r\n\r\n", + "created_at": "2021-11-23T15:58:27Z", + "author": "yana-yuhai" + }, + { + "body": "Updated SimBA and the results are interesting. It's telling me that the files are non-ellipse. I tried switching the selection to box and skeleton and got the same output. ", + "created_at": "2021-11-23T15:58:58Z", + "author": "yana-yuhai" + }, + { + "body": "Oh your files seems to have the file-ending `el.h5.h5`, rather than `el.h5`, which is why this error is happening. If you look at the files in the file-explorer, are the files named `el.h5.h5`? ", + "created_at": "2021-11-23T16:14:26Z", + "author": "sronilsson" + }, + { + "body": "The files are named el.h5 in file explorer. Should I change them to just \"el\"? Maybe the .h5 is being added on somehow? ", + "created_at": "2021-11-23T16:16:41Z", + "author": "yana-yuhai" + }, + { + "body": "Hmm, no this is a little peculiar. Are you hiding the file-extensions in the file-explorer and have added them by hand, resulting in a duplication? \r\n\r\nhttps://vtcri.kayako.com/article/296-view-file-extensions-windows-10", + "created_at": "2021-11-23T16:19:53Z", + "author": "sronilsson" + }, + { + "body": "Yes, that's it! When I clicked extensions the file names appeared as el.h5.h5. How should I fix this? ", + "created_at": "2021-11-23T16:21:24Z", + "author": "yana-yuhai" + }, + { + "body": "Strange but good! I advice to always show file-endings. Just remove the duplicate file-extensions, so the files are named `Trial1_2021-07-17-095318-0000DLC_dlcrnetms5_yana_training_socialOct29shuffle1_50000_el.h5` and not `Trial1_2021-07-17-095318-0000DLC_dlcrnetms5_yana_training_socialOct29shuffle1_50000_el.h5.h5` and let me know if you can import them. ", + "created_at": "2021-11-23T16:35:57Z", + "author": "sronilsson" + }, + { + "body": "I think we've almost figured it out. It now says \"importing 5 multi-animal DLC h5 files...\" but is also giving me an error messages for one of the files that I've posted below\r\n\"Screen\r\n. ", + "created_at": "2021-11-23T16:56:24Z", + "author": "yana-yuhai" + }, + { + "body": "Thank you so much for all the help, by the way. ", + "created_at": "2021-11-23T16:56:35Z", + "author": "yana-yuhai" + }, + { + "body": "Alright, progress! Next, SimBA searches for the video file associated with you multi-animal H5 file. It searches the `project_folder/videos` for a video associated with the `Trial1_2021-07-17-095318-0000DLC_dlcrnetms5_yana_training_socialOct29shuffle1_50000_el.h5` file, but can't find anything. The screenshot you see is a range of file-names SimBA guessed that your video might have, inside the `project_folder/videos` folder. However, none of these files where located. \r\n\r\nHave you imported your videos? \r\n\r\nYou can read more about this error and potential solutions here:\r\n\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#17-i-get-an-error-when-i-try-to-import-multi-animal-data-h5-files-from-deeplabcut-into-simba---the-error-says-that-simba-cannot-locate-my-video-files\r\n\r\nAlso, I can still see some `el.h5.h5` in there. Make sure all of the files you are importing only has the `el.h5` file-ending. ", + "created_at": "2021-11-23T17:25:20Z", + "author": "sronilsson" + }, + { + "body": "The first guess should be Trial1_2021-07-17-095318-0000.mp4 and Trial1_2021-07-17-095318-0000.avi", + "created_at": "2021-11-23T17:30:50Z", + "author": "sronilsson" + }, + { + "body": "Yes, everything is working! I apologize for the delayed response. Thank you so much for the help. \r\n", + "created_at": "2021-12-03T18:40:02Z", + "author": "yana-yuhai" + } + ] + }, + { + "title": "NWB support", + "body": "Looking through your documentation, it looks like interoperability with related open source tools is a goal of the project. Would you be interested in integrating this with NWB? We are already working with DLC to integrate with NWB, and we have started to build an NWB extension for pose estimation [here](https://github.com/rly/ndx-pose). It would be great to see if we can output your data into the same format, and/or discover any generalizations that would need to be made in order to do that. \r\n\r\ncc @rly", + "user": "bendichter", + "reaction_cnt": 1, + "created_at": "2021-11-14T21:29:16Z", + "updated_at": "2021-11-15T18:25:47Z", + "author": "bendichter", + "comments": [ + { + "body": "Thank you Ben, our PI, Sam would love to talk to you about this. Please get in touch with him at sagolden@uw.edu.", + "created_at": "2021-11-15T18:25:47Z", + "author": "inoejj" + } + ] + }, + { + "title": "SLEAP data import", + "body": "I realize there are several threads about this problem, but even after updating and reading through all the documentation again, I still can't find a solution. In one thread there was a hint that the update would contain a fix, but I think I am updated to the latest version (I see the new GUI with ROI features) and I am still receiving the same here. In short, I am trying to import a SLEAP slp file, but I receive the error below. Have other users found a solution? Thank you in advance.\r\n\r\nD:/SLEAP/VD/simbatest/project_folder/project_config.ini\r\n(['Ear_left', 'Ear_right', 'Nose', 'Tail_base'], ['No body parts'])\r\nNumber of Frames: 54308\r\nThe CSV file could not be located at the following path: D:/SLEAP/VD/simbatest/project_folder\\csv\\features_extracted\\2021_09_0110_00.csv . It may be that you missed a step in the analysis. Please generate the file before proceeding.\r\nApplying settings for classical tracking...\r\nD:/SLEAP/VD/simbatest/project_folder\\csv\\targets_inserted\\2021_09_0110_00.csv\r\nThe CSV file could not be located at the following path: D:/SLEAP/VD/simbatest/project_folder\\csv\\targets_inserted\\2021_09_0110_00.csv . It may be that you missed a step in the analysis. Please generate the file before proceeding.\r\nReading in 0 annotated files...\r\nConverting .slp into csv dataframes...\r\nProcessing 2021_09_0110_00.slp...\r\nWarning: The video name could not be found in the .SLP meta-data table\r\nSimBA therefore gives the imported CSV the same name as the SLP file.\r\nTo be sure that SimBAs slp import function works, make sure the .SLP file and the associated video file has the same file name - e.g., \"Video1.mp4\" and \"Video1.slp\" before importing the videos and SLP files to SimBA.\r\n12\r\ncannot set a row with mismatched columns\r\nCheck that you have confirmed the number of animals and named your animals in the SimBA interface\r\n\r\n**Desktop (please complete the following information):**\r\nI am using a windows 10 machine with a GPU. I am using anaconda and have updated SimBa within its own environment using command line.\r\n\r\n", + "user": "jverpeut", + "reaction_cnt": 0, + "created_at": "2021-11-12T23:55:57Z", + "updated_at": "2021-12-27T18:00:36Z", + "author": "jverpeut", + "comments": [ + { + "body": "Hi @jverpeut! The key part of the error messages is this one\r\n\r\n``` cannot set a row with mismatched columns\r\nCheck that you have confirmed the number of animals and named your animals in the SimBA interface\r\n```\r\n\r\nSimBA expects a columns representing 4 body-parts in the SLP file, but for some reason finds more, or less, than 4, and throws the error. Could you send a screenshot of the error msg printed in the windows terminal when you try to import the SLP files? It is easier for me to understand and debug if needed. \r\n", + "created_at": "2021-11-13T13:03:12Z", + "author": "sgoldenlab" + }, + { + "body": "I think one of my body parts is not labeled throughout. I am going to retrack and try again. Thank you!", + "created_at": "2021-11-15T04:19:41Z", + "author": "jverpeut" + }, + { + "body": "Hi @jverpeut . I am also using Sleap, and I found a duplication bug that prevented me from running simba.\r\nI'm working on a more permanent fix. But for now here's some code:\r\nhttps://github.com/sgoldenlab/simba/issues/155\r\n\r\nI'm also curious to know how you find simba+sleap, feel free to ping me.", + "created_at": "2021-12-24T12:51:10Z", + "author": "catubc" + }, + { + "body": "Thank you @catubc! I will try this. I wonder too if we are limited to the classifiers already built into simba or if we can train a new classifier.", + "created_at": "2021-12-27T18:00:36Z", + "author": "jverpeut" + } + ] + }, + { + "title": "File -> Exit Doesn't work", + "body": "In the main Simba window, File -> Exit throws the following error:\r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\name\\anaconda3\\envs\\conda-environment\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\name\\anaconda3\\envs\\conda-environment\\lib\\site-packages\\simba\\SimBA.py\", line 971, in Exit\r\n app.root.destroy()\r\nNameError: name 'app' is not defined\r\n\r\nClicking the red X to close the window works fine.\r\n\r\nI am using Windows 10, Python 3.8.5, and I am using Anaconda.", + "user": "NickBardjis", + "reaction_cnt": 0, + "created_at": "2021-11-09T21:02:03Z", + "updated_at": "2021-11-09T22:11:56Z", + "author": "NickBardjis", + "comments": [ + { + "body": "Thank you for reporting this bug. I have fixed it and you can give it a shot by downloading the latest version.\r\n\r\n`pip install simba-uw-tf-dev==0.87.8`\r\n\r\nPlease use python 3.6 for SimBA and let me know if you have any questions.", + "created_at": "2021-11-09T22:11:55Z", + "author": "inoejj" + } + ] + }, + { + "title": "Question about creating a behavioural classifier", + "body": "Hello,\r\n\r\nI have a question that might be a bit simple but I couldn't find answers in the tutorial pages or in the closed issues, apologies if it has been asked before. \r\n\r\nI am trying to build a classifier that would annotate supported and unsupported rearing of mice in the OFT. I have 49 videos of mice each 5-7 minutes long. My question is how many videos should I annotate (approximately) to build a good classifier, at least as a start? I understand it depends on the number of times a behaviour is present in each video, but since rearing is a quite common behaviour for the OFT, I suspect I shouldn't need that many videos, but I have no idea how many could possibly be enough. \r\n\r\nThank you in advance. ", + "user": "elenael97", + "reaction_cnt": 0, + "created_at": "2021-10-21T15:00:07Z", + "updated_at": "2021-10-21T18:37:38Z", + "author": "elenael97", + "comments": [ + { + "body": "Hey! Very common question and I'll add something to the docs about it as soon as I get the chance.\r\nI would suggest the following:\r\n1. From videos of 10 different animals, find a 1-2 min stretch where you see BOTH supported and unsupported rearing. \r\n2. Pull that timeclip from your video using tools -> clip video into multiple video (this will make a short video labeled yourvideoname-clip1 in your videos folder). \r\n3. Label these ten short clips, train your classifier, and see how it's doing on a video from an animal you did not train the classifier on. You may need to add a few more clips in, but rearing is a pretty distinct behavior so I don't think you'll need to label a ton of videos. \r\nIn general, you want to label a little bit of data from several different animals to get generalizable classifiers. ", + "created_at": "2021-10-21T18:37:38Z", + "author": "goodwinnastacia" + } + ] + }, + { + "title": "H5 - how to import tracking type \"ellipse\"?", + "body": "Hi,\r\nI'm trying to import some H5 files with tracking type \"ellipse\" in the \"import tracking data\" tab, but the tracking type options available are \"skeleton\" and \"box\". So there'sn't \"ellipse\" option. \r\n![Schermata 2021-10-21 alle 16 03 35](https://user-images.githubusercontent.com/85634193/138293887-e72a0c92-e3a0-44e0-a6e4-b0eb589e12d5.png)\r\n\r\nHow can import these \"ellipse\" H5 files correctly?\r\n\r\nThanks,\r\nCarlo.\r\n", + "user": "carlitomu", + "reaction_cnt": 0, + "created_at": "2021-10-21T14:06:44Z", + "updated_at": "2021-10-22T14:11:31Z", + "author": "carlitomu", + "comments": [ + { + "body": "Hello @carlitomu ! There is an ellipse option - it was added 4-5 months ago, you would not see it if you were running an older version of SimBA. Could you upgrade SimBA with `pip install simba-uw-tf-dev --upgrade` or `pip install simba-uw-tf-dev==0.87.7` and let me know if you see it?", + "created_at": "2021-10-21T14:12:35Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks.\r\n\r\nI used \"pip install simba-uw-tf-dev==0.87.7\" (in the terminal in the Anaconda environment, working with OSX).\r\nand now I get this:\r\n![Schermata 2021-10-21 alle 16 32 25](https://user-images.githubusercontent.com/85634193/138301955-b59ae3fa-f032-4fba-a40a-2b8425feeda5.png)\r\n\r\nI tried to follow the instructions for the installation on mac (https://simba-docs.readthedocs.io/en/latest/docs/intro/installation.html), but I can't solve the problem: i can't launch simba again :(\r\n", + "created_at": "2021-10-21T14:48:22Z", + "author": "carlitomu" + }, + { + "body": "Oh I have not seen this one before but it's a matplotlib issue, before launching Simba with `simba`, could you try this recommendation, https://github.com/ludwig-ai/ludwig/issues/114\r\n\r\nSo type `conda install matplotlib`, to make sure your matplotlib version is above 3.0 ?\r\n\r\n\r\n\r\n", + "created_at": "2021-10-21T15:00:51Z", + "author": "sgoldenlab" + }, + { + "body": "After conda install matplotlib and simba, I get this code:\r\n![Schermata 2021-10-21 alle 17 12 03](https://user-images.githubusercontent.com/85634193/138306811-4055ea79-853c-44dc-8b80-59b88a203ddb.png)\r\n(I also checked the matplotlib version: it's the 3.0.3)\r\n", + "created_at": "2021-10-21T15:14:05Z", + "author": "carlitomu" + }, + { + "body": "Hi,\r\nPlease start a new environment and follow the steps, make sure that the version of python that you are using is 3.6.13.\r\n\r\nOne quick note is to make sure in your conda environment, tk has a version of 8.6.10\r\n\r\n\r\n\"Screen\r\n\r\nhttps://simba-docs.readthedocs.io/en/latest/docs/intro/installation.html#installing-on-macos", + "created_at": "2021-10-21T21:20:49Z", + "author": "sgoldenlab" + }, + { + "body": "Hi,\r\nI followed your instructions (tk 8.6.10 and python 3.6.13 in a new environment) and...\r\n![Schermata 2021-10-22 alle 16 08 37](https://user-images.githubusercontent.com/85634193/138468501-0bd093ad-6335-423c-a13b-d3bc6cbf80d1.png)\r\n\r\nThanks!", + "created_at": "2021-10-22T14:11:26Z", + "author": "carlitomu" + } + ] + }, + { + "title": "Visual Indication of Click Location On Plot Not Appearing", + "body": "When running the validation step for a generated model, after clicking \"generate plot\", if you close the plot and video by clicking the X in the corner and then reopen it by clicking \"generate plot\" again, the red line that indicates where in the graph you have clicked no longer appears. The program still works normally otherwise, changing the frame when you double click on the graph, but the visual indication of where you clicked is gone. The problem seems to persist until you close and relaunch SimBA.\r\n\r\nTo recreate:\r\n1. Load Project\r\n2. Go to the [Run Machine Model] tab\r\n3. Under \"Validate Model on a Single Video\", load the desired feature .csv and the .sav model and click \"Run Model\"\r\n4. Once the model is done running, click \"Generate Plot\"\r\n5. Close the plot and video windows\r\n6. Click \"Generate Plot\" again\r\n\r\nI am using Windows 10, Python 3.8.5, and I am using Anaconda.", + "user": "NickBardjis", + "reaction_cnt": 0, + "created_at": "2021-10-15T20:12:24Z", + "updated_at": "2021-10-15T20:12:24Z", + "author": "NickBardjis", + "comments": [] + }, + { + "title": "Bump opencv-python from 3.4.5.20 to 4.2.0.32", + "body": "Bumps [opencv-python](https://github.com/skvark/opencv-python) from 3.4.5.20 to 4.2.0.32.\n
\nRelease notes\n

Sourced from opencv-python's releases.

\n
\n

4.2.0.32

\n\n

OpenCV version 4.2.0.

\n

Changes:

\n
    \n
  • macOS environment updated from xcode8.3 to xcode 9.4
  • \n
  • macOS uses now Qt 5 instead of Qt 4
  • \n
  • Nasm version updated to Docker containers
  • \n
  • multibuild updated
  • \n
\n

Fixes:

\n
    \n
  • don't use deprecated brew tap-pin, instead refer to the full package name when installing #267
  • \n
  • replace get_config_var() with get_config_vars() in setup.py #274
  • \n
  • add workaround for DLL errors in Windows Server #264
  • \n
\n

3.4.9.31

\n\n

OpenCV version 3.4.9.

\n

Changes:

\n
    \n
  • macOS environment updated from xcode8.3 to xcode 9.4
  • \n
  • macOS uses now Qt 5 instead of Qt 4
  • \n
  • Nasm version updated to Docker containers
  • \n
  • multibuild updated
  • \n
\n

Fixes:

\n
    \n
  • don't use deprecated brew tap-pin, instead refer to the full package name when installing #267
  • \n
  • replace get_config_var() with get_config_vars() in setup.py #274
  • \n
  • add workaround for DLL errors in Windows Server #264
  • \n
\n

4.1.2.30

\n\n

OpenCV version 4.1.2.

\n

Changes:

\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=opencv-python&package-manager=pip&previous-version=3.4.5.20&new-version=4.2.0.32)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-10-12T23:11:25Z", + "updated_at": "2022-03-12T20:33:42Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-03-12T20:33:36Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump opencv-python from 3.4.5.20 to 4.2.0.32 in /simba", + "body": "Bumps [opencv-python](https://github.com/skvark/opencv-python) from 3.4.5.20 to 4.2.0.32.\n
\nRelease notes\n

Sourced from opencv-python's releases.

\n
\n

4.2.0.32

\n\n

OpenCV version 4.2.0.

\n

Changes:

\n
    \n
  • macOS environment updated from xcode8.3 to xcode 9.4
  • \n
  • macOS uses now Qt 5 instead of Qt 4
  • \n
  • Nasm version updated to Docker containers
  • \n
  • multibuild updated
  • \n
\n

Fixes:

\n
    \n
  • don't use deprecated brew tap-pin, instead refer to the full package name when installing #267
  • \n
  • replace get_config_var() with get_config_vars() in setup.py #274
  • \n
  • add workaround for DLL errors in Windows Server #264
  • \n
\n

3.4.9.31

\n\n

OpenCV version 3.4.9.

\n

Changes:

\n
    \n
  • macOS environment updated from xcode8.3 to xcode 9.4
  • \n
  • macOS uses now Qt 5 instead of Qt 4
  • \n
  • Nasm version updated to Docker containers
  • \n
  • multibuild updated
  • \n
\n

Fixes:

\n
    \n
  • don't use deprecated brew tap-pin, instead refer to the full package name when installing #267
  • \n
  • replace get_config_var() with get_config_vars() in setup.py #274
  • \n
  • add workaround for DLL errors in Windows Server #264
  • \n
\n

4.1.2.30

\n\n

OpenCV version 4.1.2.

\n

Changes:

\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=opencv-python&package-manager=pip&previous-version=3.4.5.20&new-version=4.2.0.32)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-10-12T23:01:15Z", + "updated_at": "2022-03-12T20:33:42Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-03-12T20:33:35Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump opencv-python from 3.4.5.20 to 4.1.2.30", + "body": "Bumps [opencv-python](https://github.com/skvark/opencv-python) from 3.4.5.20 to 4.1.2.30.\n
\nRelease notes\n

Sourced from opencv-python's releases.

\n
\n

4.1.2.30

\n\n

OpenCV version 4.1.2.

\n

Changes:

\n
    \n
  • Python 3.8 builds added to the build matrix
  • \n
  • Support for Python 3.4 builds dropped (Python 3.4 is in EOL)
  • \n
  • multibuild updated
  • \n
  • minor build logic changes
  • \n
  • Docker images rebuilt
  • \n
\n

Notes:

\n

Please note that Python 2.7 enters into EOL phase in January 2020. opencv-python Python 2.7 wheels won't be provided after that.

\n

3.4.8.29

\n\n

OpenCV version 3.4.8.

\n

Changes:

\n
    \n
  • Python 3.8 builds added to the build matrix
  • \n
  • Support for Python 3.4 builds dropped (Python 3.4 is in EOL)
  • \n
  • multibuild updated
  • \n
  • minor build logic changes
  • \n
  • Docker images rebuilt
  • \n
\n

Notes:

\n

Please note that Python 2.7 enters into EOL phase in January 2020. opencv-python Python 2.7 wheels won't be provided after that.

\n

3.4.7.28

\n

OpenCV version 3.4.7.

\n

3.4.6.27

\n

OpenCV version 3.4.6.

\n

4.1.1.26

\n\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=opencv-python&package-manager=pip&previous-version=3.4.5.20&new-version=4.1.2.30)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-10-12T22:33:44Z", + "updated_at": "2021-10-12T23:11:28Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #141.", + "created_at": "2021-10-12T23:11:26Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump opencv-python from 3.4.5.20 to 3.4.7.28 in /simba", + "body": "Bumps [opencv-python](https://github.com/skvark/opencv-python) from 3.4.5.20 to 3.4.7.28.\n
\nRelease notes\n

Sourced from opencv-python's releases.

\n
\n

3.4.7.28

\n

OpenCV version 3.4.7.

\n

3.4.6.27

\n

OpenCV version 3.4.6.

\n
\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=opencv-python&package-manager=pip&previous-version=3.4.5.20&new-version=3.4.7.28)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-10-12T22:23:12Z", + "updated_at": "2021-10-12T23:01:19Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #140.", + "created_at": "2021-10-12T23:01:17Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Labeling Freezing Behavior", + "body": "Hello :)\r\n\r\nIm pretty sure, that this is not the right section for this question, but not sure, where else to ask.\r\n\r\nWithin our simba project are not using the stage off an machine model, but already would like to track kind of a freezing behavior of our mees.\r\n\r\nOur idea was to use from the feature extraction function the feature named :\r\n![image](https://user-images.githubusercontent.com/81317771/137004543-68a045b1-27f9-42ba-abf9-8628ed18d80f.png)\r\n\r\nSo if the mouse is \"compessing itself\" while the freezing behavior we could detect a relatively different euclidean distance between its bodyparts.\r\nWe are sceptical about using this, but not sure if there is another function usable with no trained machine model in SimBA..\r\n\r\nCheers!", + "user": "Marcelobot", + "reaction_cnt": 0, + "created_at": "2021-10-12T17:40:24Z", + "updated_at": "2021-10-23T02:07:52Z", + "author": "Marcelobot", + "comments": [ + { + "body": "Hello @Marcelobot !\r\n\r\nYeah it will depend on how you define freezing behavior - if freezing behavior is when the animal has a small hull, than this will be a feature to look at. \r\n\r\nHowever freezing behavior might be a little more complicated. For example, if the hull is small, but some body-parts like the tail or nose is still moving, it might not be freezing behavior any more? The hull is also the smallest when the animal is rearing, and the animal probably can’t freeze and rear at same time? I’d say to find freezing, **it is much easier and more accurate to train a model that finds the correlations for you**", + "created_at": "2021-10-14T00:58:02Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Too Many Labels Makes Save Button Inaccessible", + "body": "Hi. I'm trying out SimBA to see if it can help with my team's current project. The animals we want to track are fairly complex with 31 body parts labeled. This, of course, isn't one of the pre-defined options for the pose config, so I'm having to create a new one. The problem is that with so many body parts, the Save Pose Config button gets pushed off screen even with the window at max size, and with no way to scroll down it becomes inaccessible.\r\n\r\nI've tried hitting the tab button until it's (I think, I can't see to know for sure) selected and hitting enter, but this doesn't work either (either that or I need to fill in the Image Path section, but I haven't found anything telling me what that is).\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "user": "NickBardjis", + "reaction_cnt": 0, + "created_at": "2021-10-01T14:00:51Z", + "updated_at": "2021-10-12T19:15:15Z", + "author": "NickBardjis", + "comments": [ + { + "body": "Thanks for reporting @NickBardjis - yes, we should make the window scrollable, I will let you know when done", + "created_at": "2021-10-01T14:30:56Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you.\r\n\r\nBy the way, I've noticed the instructions specifically talk about labeling rodents. Can the program be used for other animals?", + "created_at": "2021-10-01T15:25:16Z", + "author": "NickBardjis" + }, + { + "body": "Yes I've worked directly with users with birds, moths, fish.. rodents is just what we do in the lab", + "created_at": "2021-10-01T15:46:47Z", + "author": "sgoldenlab" + }, + { + "body": "Please download the latest development wheel and there should be a scrollbar\r\n\r\n`pip install simba-uw-tf-dev==0.87.2`", + "created_at": "2021-10-01T17:54:21Z", + "author": "inoejj" + }, + { + "body": "Thank you again. One more question: I found the info I was looking for about the image path, and it says all body parts should be clearly visible in the image, but I'm not sure that's anatomically possible, as we have body parts marked on the front and back and the left and right sides. I think we might be able to get most of them if things line up perfectly, but not all of them. How big of an issue is that going to be?", + "created_at": "2021-10-01T17:54:50Z", + "author": "NickBardjis" + }, + { + "body": "Okay, thanks again!", + "created_at": "2021-10-01T18:05:49Z", + "author": "NickBardjis" + }, + { + "body": "@NickBardjis - just to check, are you recording your videos from above or the side of the animal? ", + "created_at": "2021-10-01T18:16:01Z", + "author": "sgoldenlab" + }, + { + "body": "The side, typically.", + "created_at": "2021-10-05T15:07:43Z", + "author": "NickBardjis" + }, + { + "body": "Oh, also I found a similar issue to my original problem with the Label Behavior window. Unlike my last problem, it's still usable, but a portion of the UI (specifically the part with all the keyboard shortcuts listed) is pushed halfway off the screen (I assume because my video is too wide). Again, it's still usable, so not a bid deal, but I figured I should mention it.\r\n\r\nBy the way, I have a few more questions (and will likely have more as I continue to explore the software). Is there a better place for me to ask them, since most of them aren't really issues with the software?", + "created_at": "2021-10-05T15:24:40Z", + "author": "NickBardjis" + }, + { + "body": "Hello @NickBardjis - you could email goldenneurolab@gmail.com - if you are up for it the fastest and easiest is often to schedule a zoom chat and we can go through all questions!", + "created_at": "2021-10-06T22:11:50Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Error while trying to set video parameters", + "body": "Hello,\r\n\r\nAgain after last Week Simba was wokroing out without any error at all today ive tried to start an new project and an error occured:\r\n\r\n-created project successfully\r\n-imported video/ csv file successfully\r\n-everything is located and stored right - there is nothing wrong with any paths\r\n-set video parameters --> instead of GUI opening parametertable an error happened:\r\n\r\n![image](https://user-images.githubusercontent.com/81317771/134189379-b5dd8fdb-5452-47e9-8136-8903c3087c77.png)\r\n\r\n1) How to fix that error?\r\n2) The purpose of the project was just to try out if i could apply a probability threshold to the feature extraction, like its possible for ROI Analysis (written in config).. Is there a way?\r\n\r\n![image](https://user-images.githubusercontent.com/81317771/134190091-99e37da8-e41a-40e1-adaa-624d216d5870.png)\r\n\r\nCant wait to read the answers :)\r\n\r\nCheers!\r\n", + "user": "Marcelobot", + "reaction_cnt": 0, + "created_at": "2021-09-21T14:30:34Z", + "updated_at": "2021-09-22T11:18:05Z", + "author": "Marcelobot", + "comments": [ + { + "body": "Hi @Marcelobot! The error comes from SimBA trying to read in the resolutions/fps of the videos in your project and present them in the table. SimBA tries to look in the folder in the screenshot below for your videos, but it can't find the folder. \r\n\r\nCan you confirm that the folder exist, and that your videos are in that folder? The folder is located on`W:` - so also make you have full read/write privileges there.\r\n\r\n\r\nFor your second question: \r\n\r\n`The purpose of the project was just to try out if i could apply a probability threshold to the feature extraction, like its possible for ROI Analysis (written in config).. Is there a way?`\r\n\r\nI am not sure what this mean, does this mean extract features only for select frames where the detection probability/confidence of some body-parts are at a certain threshold? \r\n\r\n\r\n\r\n![Untitled 3](https://user-images.githubusercontent.com/50497030/134333711-83b45077-0dd5-44d9-8495-068d8de9a57b.png)\r\n\r\n", + "created_at": "2021-09-22T11:18:05Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Cannot install Simba with TF due to dependency conflicts", + "body": "\r\nDear developers,\r\n\r\nI am eager to use Simba, which looks highly promising for my research (social interaction between rats). Thank you for your efforts!\r\n\r\nI want to use Simba with Tensorflow and DeepLabCut, as I already successfully worked with TF and DeepLabCut. I assume that this means I should install the \"simba-uw-tf\" version. By the way, I am not good at programming and command line usage, so I depend on using the GUI. \r\n\r\nI ran into problems installing it in a fresh Anaconda3 environment with Python 3.6, after \"pip install simba-uw-tf\".\r\nIt aborts with a dependency conflict error, see below.\r\n\r\nI also tried \"pip install simba-uw-tf=1.3.11\", same error.\r\n\r\nCould you please help me?\r\n\r\n\r\n> By the way, I also tried installing from git repository directly, by doing the following steps in a fresh Anaconda environment:\r\n> \tpip install wxpython\r\n> \tconda install git\r\n> \tpip install git+https://github.com/sgoldenlab/simba.git\r\n> \tpip uninstall shapely\r\n> \tconda install -c conda-forge shapely\r\n> After that, I could not start simba by the command \"simba\".\r\n\r\n\r\n\r\nBest wishes,\r\nQuirin\r\n\r\n\r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Generate fresh Anaconda3 environment under Windows 10\r\n2. pip install simba-uw-tf or pip install simba-uw-tf=1.3.11\r\n4. See error\r\n\r\n**Expected behavior**\r\nI would expect the installation to work.\r\n\r\n**Screenshots**\r\nn/a\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Python Version 3.6. (Anaconda env selecting Python 3.6)\r\n - Are you using anaconda? Yes.\r\n \r\n\r\n**Additional context**\r\n\r\n### After **pip install simba-uw-tf**:\r\n\r\n```\r\n(Simba) C:\\Users\\************>pip install simba-uw-tf\r\nCollecting simba-uw-tf\r\n Using cached Simba_UW_tf-1.3.12-py3-none-any.whl (7.3 MB)\r\nCollecting yellowbrick==0.9.1\r\n Using cached yellowbrick-0.9.1-py2.py3-none-any.whl (282 kB)\r\nCollecting numexpr==2.6.9\r\n Using cached numexpr-2.6.9-cp36-none-win_amd64.whl (91 kB)\r\nCollecting statsmodels==0.9.0\r\n Using cached statsmodels-0.9.0-cp36-cp36m-win_amd64.whl (7.0 MB)\r\nCollecting imgaug==0.4.0\r\n Using cached imgaug-0.4.0-py2.py3-none-any.whl (948 kB)\r\nCollecting tqdm==4.30.0\r\n Using cached tqdm-4.30.0-py2.py3-none-any.whl (47 kB)\r\nCollecting tabulate==0.8.3\r\n Using cached tabulate-0.8.3-py3-none-any.whl\r\nCollecting dash==1.14.0\r\n Using cached dash-1.14.0-py3-none-any.whl\r\nCollecting deepposekit==0.3.5\r\n Using cached deepposekit-0.3.5-py3-none-any.whl\r\nCollecting pyyaml==5.3.1\r\n Using cached PyYAML-5.3.1-cp36-cp36m-win_amd64.whl (215 kB)\r\nCollecting dash-core-components==1.10.2\r\n Using cached dash_core_components-1.10.2-py3-none-any.whl\r\nCollecting dtreeviz==0.8.1\r\n Using cached dtreeviz-0.8.1-py3-none-any.whl\r\nCollecting seaborn==0.9.0\r\n Using cached seaborn-0.9.0-py3-none-any.whl (208 kB)\r\nCollecting dash-html-components==1.0.3\r\n Using cached dash_html_components-1.0.3-py3-none-any.whl\r\nCollecting scikit-image==0.14.2\r\n Using cached scikit_image-0.14.2-cp36-none-win_amd64.whl (24.6 MB)\r\nCollecting tensorflow-gpu==1.14.0\r\n Using cached tensorflow_gpu-1.14.0-cp36-cp36m-win_amd64.whl (287.7 MB)\r\nCollecting pandas==0.25.3\r\n Using cached pandas-0.25.3-cp36-cp36m-win_amd64.whl (9.0 MB)\r\nCollecting scipy==1.1.0\r\n Using cached scipy-1.1.0-cp36-none-win_amd64.whl (31.1 MB)\r\nCollecting pyarrow==0.17.1\r\n Using cached pyarrow-0.17.1-cp36-cp36m-win_amd64.whl (21.6 MB)\r\nCollecting imblearn==0.0\r\n Using cached imblearn-0.0-py2.py3-none-any.whl (1.9 kB)\r\nCollecting cefpython3==66.0\r\n Using cached cefpython3-66.0-py2.py3-none-win_amd64.whl (68.2 MB)\r\nCollecting plotly==4.9.0\r\n Using cached plotly-4.9.0-py2.py3-none-any.whl (12.9 MB)\r\nCollecting opencv-python==3.4.5.20\r\n Using cached opencv_python-3.4.5.20-cp36-cp36m-win_amd64.whl (38.3 MB)\r\nCollecting xgboost==0.90\r\n Using cached xgboost-0.90-py2.py3-none-win_amd64.whl (18.3 MB)\r\nCollecting eli5==0.10.1\r\n Using cached eli5-0.10.1-py2.py3-none-any.whl (105 kB)\r\nCollecting wxpython==4.0.4\r\n Using cached wxPython-4.0.4-cp36-cp36m-win_amd64.whl (23.0 MB)\r\nCollecting dash-colorscales==0.0.4\r\n Using cached dash_colorscales-0.0.4-py3-none-any.whl\r\nCollecting shap==0.35.0\r\n Using cached shap-0.35.0-cp36-cp36m-win_amd64.whl (309 kB)\r\nCollecting deeplabcut==2.0.8\r\n Using cached deeplabcut-2.0.8-py3-none-any.whl (178 kB)\r\nCollecting imutils==0.5.2\r\n Using cached imutils-0.5.2-py3-none-any.whl\r\nCollecting numba==0.48.0\r\n Using cached numba-0.48.0-cp36-cp36m-win_amd64.whl (2.1 MB)\r\nCollecting scikit-learn==0.22.2\r\n Using cached scikit_learn-0.22.2-cp36-cp36m-win_amd64.whl (6.5 MB)\r\nCollecting graphviz==0.11\r\n Using cached graphviz-0.11-py2.py3-none-any.whl (17 kB)\r\nCollecting matplotlib==3.0.3\r\n Using cached matplotlib-3.0.3-cp36-cp36m-win_amd64.whl (9.1 MB)\r\nCollecting shapely==1.7\r\n Using cached Shapely-1.7.0-cp36-cp36m-win_amd64.whl (1.0 MB)\r\nCollecting dash-color-picker==0.0.1\r\n Using cached dash_color_picker-0.0.1-py3-none-any.whl\r\nCollecting h5py==2.9.0\r\n Using cached h5py-2.9.0-cp36-cp36m-win_amd64.whl (2.4 MB)\r\nCollecting protobuf==3.6.0\r\n Using cached protobuf-3.6.0-cp36-cp36m-win_amd64.whl (1.1 MB)\r\nCollecting Pillow==5.4.1\r\n Using cached Pillow-5.4.1-cp36-cp36m-win_amd64.whl (1.9 MB)\r\nCollecting flask-compress\r\n Using cached Flask_Compress-1.10.1-py3-none-any.whl (7.9 kB)\r\nCollecting dash-table==4.9.0\r\n Using cached dash_table-4.9.0-py3-none-any.whl\r\nCollecting dash-renderer==1.6.0\r\n Using cached dash_renderer-1.6.0-py3-none-any.whl\r\nCollecting Flask>=1.0.2\r\n Using cached Flask-2.0.1-py3-none-any.whl (94 kB)\r\nCollecting future\r\n Using cached future-0.18.2-py3-none-any.whl\r\nCollecting moviepy~=0.2.3.5\r\n Using cached moviepy-0.2.3.5.tar.gz (372 kB)\r\nCollecting requests\r\n Using cached requests-2.26.0-py2.py3-none-any.whl (62 kB)\r\nINFO: pip is looking at multiple versions of dash-table to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-renderer to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-html-components to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-core-components to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-colorscales to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-color-picker to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of cefpython3 to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of simba-uw-tf to determine which version is compatible with other requirements. This could take a while.\r\nCollecting simba-uw-tf\r\n Using cached Simba_UW_tf-1.3.11-py3-none-any.whl (7.3 MB)\r\n Using cached Simba_UW_tf-1.3.10-py3-none-any.whl (7.3 MB)\r\n Using cached Simba_UW_tf-1.2.31-py3-none-any.whl (4.6 MB)\r\n Using cached Simba_UW_tf-1.2.30-py3-none-any.whl (4.6 MB)\r\n Using cached Simba_UW_tf-1.2.29-py3-none-any.whl (3.4 MB)\r\n Using cached Simba_UW_tf-1.2.28-py3-none-any.whl (3.4 MB)\r\n Using cached Simba_UW_tf-1.2.27-py3-none-any.whl (3.4 MB)\r\n Using cached Simba_UW_tf-1.2.26-py3-none-any.whl (3.4 MB)\r\n Using cached Simba_UW_tf-1.2.24-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.23-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.22-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.21-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.20-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.19-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.18-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.17-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.16-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.15-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.14-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.13-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.12.1-py3-none-any.whl (4.6 MB)\r\n Using cached Simba_UW_tf-1.2.12-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.11-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.10-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.9.2-py3-none-any.whl (4.6 MB)\r\n Using cached Simba_UW_tf-1.2.9-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.8-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.7-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.6-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.5-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.4.3-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.4.2-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.4.1-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.13-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.12-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.11-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.10-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.9-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.8-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.7-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.6-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.5-py3-none-any.whl (3.3 MB)\r\n Using cached Simba_UW_tf-1.2.3.4-py3-none-any.whl (2.2 MB)\r\n Using cached Simba_UW_tf-1.2.3.3-py3-none-any.whl (2.2 MB)\r\n Using cached Simba_UW_tf-1.2.3-py3-none-any.whl (2.2 MB)\r\n Using cached Simba_UW_tf-1.2.2-py3-none-any.whl (2.2 MB)\r\n Using cached Simba_UW_tf-1.2.1-py3-none-any.whl (2.2 MB)\r\n Using cached Simba_UW_tf-1.2.0-py3-none-any.whl (1.9 MB)\r\n Using cached Simba_UW_tf-1.1.7-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.1.6-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.1.1-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.1.0-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.9-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.7-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.6-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.5-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.4-py3-none-any.whl (2.3 MB)\r\n Using cached Simba_UW_tf-1.0.3-py3-none-any.whl (2.3 MB)\r\nCollecting deeplabcut==2.0.9\r\n Using cached deeplabcut-2.0.9-py3-none-any.whl (187 kB)\r\nCollecting patsy\r\n Using cached patsy-0.5.1-py2.py3-none-any.whl (231 kB)\r\nCollecting tables\r\n Using cached tables-3.6.1-2-cp36-cp36m-win_amd64.whl (3.2 MB)\r\nCollecting imageio~=2.3.0\r\n Using cached imageio-2.3.0-py2.py3-none-any.whl (3.3 MB)\r\nCollecting ruamel.yaml~=0.15\r\n Using cached ruamel.yaml-0.17.16-py3-none-any.whl (109 kB)\r\nCollecting intel-openmp\r\n Using cached intel_openmp-2021.3.0-py2.py3-none-win_amd64.whl (3.3 MB)\r\nCollecting six~=1.11.0\r\n Using cached six-1.11.0-py2.py3-none-any.whl (10 kB)\r\nRequirement already satisfied: certifi in c:\\anaconda\\anaconda3\\envs\\simba\\lib\\site-packages (from deeplabcut==2.0.9->simba-uw-tf) (2021.5.30)\r\nCollecting ipython-genutils~=0.2.0\r\n Using cached ipython_genutils-0.2.0-py2.py3-none-any.whl (26 kB)\r\nCollecting wheel~=0.31.1\r\n Using cached wheel-0.31.1-py2.py3-none-any.whl (41 kB)\r\nCollecting python-dateutil~=2.7.3\r\n Using cached python_dateutil-2.7.5-py2.py3-none-any.whl (225 kB)\r\nRequirement already satisfied: setuptools in c:\\anaconda\\anaconda3\\envs\\simba\\lib\\site-packages (from deeplabcut==2.0.9->simba-uw-tf) (52.0.0.post20210125)\r\nCollecting ipython~=6.0.0\r\n Using cached ipython-6.0.0-py3-none-any.whl (736 kB)\r\nCollecting numpy==1.18.1\r\n Using cached numpy-1.18.1-cp36-cp36m-win_amd64.whl (12.8 MB)\r\nINFO: pip is looking at multiple versions of simba-uw-tf to determine which version is compatible with other requirements. This could take a while.\r\nINFO: This is taking longer than usual. You might need to provide the dependency resolver with stricter constraints to reduce runtime. If you want to abort this run, you can press Ctrl + C to do so. To improve how pip performs, tell us what happened here: https://pip.pypa.io/surveys/backtracking\r\nCollecting scikit-learn~=0.19.2\r\n Using cached scikit_learn-0.19.2-cp36-cp36m-win_amd64.whl (4.4 MB)\r\nCollecting easydict~=1.7\r\n Using cached easydict-1.9.tar.gz (6.4 kB)\r\nCollecting numpy~=1.14.5\r\n Using cached numpy-1.14.6-cp36-none-win_amd64.whl (13.4 MB)\r\nCollecting tensorpack~=0.9.7.1\r\n Using cached tensorpack-0.9.7.1-py2.py3-none-any.whl (286 kB)\r\nCollecting chardet~=3.0.4\r\n Using cached chardet-3.0.4-py2.py3-none-any.whl (133 kB)\r\nCollecting click\r\n Using cached click-8.0.1-py3-none-any.whl (97 kB)\r\nCollecting colour\r\n Using cached colour-0.1.5-py2.py3-none-any.whl (23 kB)\r\nCollecting jinja2\r\n Using cached Jinja2-3.0.1-py3-none-any.whl (133 kB)\r\nCollecting attrs>16.0.0\r\n Using cached attrs-21.2.0-py2.py3-none-any.whl (53 kB)\r\nCollecting imbalanced-learn\r\n Using cached imbalanced_learn-0.8.0-py3-none-any.whl (206 kB)\r\nINFO: pip is looking at multiple versions of imblearn to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of h5py to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of graphviz to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of eli5 to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dtreeviz to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of deepposekit to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of deeplabcut to determine which version is compatible with other requirements. This could take a while.\r\nERROR: Cannot install simba-uw-tf and simba-uw-tf==1.0.3 because these package versions have conflicting dependencies.\r\n\r\nThe conflict is caused by:\r\n simba-uw-tf 1.0.3 depends on scikit-learn==0.22.2\r\n deeplabcut 2.0.9 depends on scikit-learn~=0.19.2\r\n\r\nTo fix this you could try to:\r\n1. loosen the range of package versions you've specified\r\n2. remove package versions to allow pip attempt to solve the dependency conflict\r\n\r\nERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies\r\n```\r\n\r\n### After **pip install simba-uw-tf**:\r\n\r\n```\r\n(Simba) C:\\Users\\************>pip install simba-uw-tf==1.3.11\r\nCollecting simba-uw-tf==1.3.11\r\n Using cached Simba_UW_tf-1.3.11-py3-none-any.whl (7.3 MB)\r\nCollecting seaborn==0.9.0\r\n Using cached seaborn-0.9.0-py3-none-any.whl (208 kB)\r\nCollecting dash-color-picker==0.0.1\r\n Using cached dash_color_picker-0.0.1-py3-none-any.whl\r\nCollecting numexpr==2.6.9\r\n Using cached numexpr-2.6.9-cp36-none-win_amd64.whl (91 kB)\r\nCollecting pyarrow==0.17.1\r\n Using cached pyarrow-0.17.1-cp36-cp36m-win_amd64.whl (21.6 MB)\r\nCollecting dash-colorscales==0.0.4\r\n Using cached dash_colorscales-0.0.4-py3-none-any.whl\r\nCollecting imgaug==0.4.0\r\n Using cached imgaug-0.4.0-py2.py3-none-any.whl (948 kB)\r\nCollecting plotly==4.9.0\r\n Using cached plotly-4.9.0-py2.py3-none-any.whl (12.9 MB)\r\nCollecting tabulate==0.8.3\r\n Using cached tabulate-0.8.3-py3-none-any.whl\r\nCollecting opencv-python==3.4.5.20\r\n Using cached opencv_python-3.4.5.20-cp36-cp36m-win_amd64.whl (38.3 MB)\r\nCollecting cefpython3==66.0\r\n Using cached cefpython3-66.0-py2.py3-none-win_amd64.whl (68.2 MB)\r\nCollecting pandas==0.25.3\r\n Using cached pandas-0.25.3-cp36-cp36m-win_amd64.whl (9.0 MB)\r\nCollecting deeplabcut==2.0.9\r\n Using cached deeplabcut-2.0.9-py3-none-any.whl (187 kB)\r\nCollecting h5py==2.9.0\r\n Using cached h5py-2.9.0-cp36-cp36m-win_amd64.whl (2.4 MB)\r\nCollecting scipy==1.1.0\r\n Using cached scipy-1.1.0-cp36-none-win_amd64.whl (31.1 MB)\r\nCollecting pyyaml==5.3.1\r\n Using cached PyYAML-5.3.1-cp36-cp36m-win_amd64.whl (215 kB)\r\nCollecting scikit-image==0.14.2\r\n Using cached scikit_image-0.14.2-cp36-none-win_amd64.whl (24.6 MB)\r\nCollecting xgboost==0.90\r\n Using cached xgboost-0.90-py2.py3-none-win_amd64.whl (18.3 MB)\r\nCollecting graphviz==0.11\r\n Using cached graphviz-0.11-py2.py3-none-any.whl (17 kB)\r\nCollecting numba==0.48.0\r\n Using cached numba-0.48.0-cp36-cp36m-win_amd64.whl (2.1 MB)\r\nCollecting dash==1.14.0\r\n Using cached dash-1.14.0-py3-none-any.whl\r\nCollecting shapely==1.7\r\n Using cached Shapely-1.7.0-cp36-cp36m-win_amd64.whl (1.0 MB)\r\nCollecting dash-core-components==1.10.2\r\n Using cached dash_core_components-1.10.2-py3-none-any.whl\r\nCollecting imutils==0.5.2\r\n Using cached imutils-0.5.2-py3-none-any.whl\r\nCollecting wxpython==4.0.4\r\n Using cached wxPython-4.0.4-cp36-cp36m-win_amd64.whl (23.0 MB)\r\nCollecting eli5==0.10.1\r\n Using cached eli5-0.10.1-py2.py3-none-any.whl (105 kB)\r\nCollecting Pillow==5.4.1\r\n Using cached Pillow-5.4.1-cp36-cp36m-win_amd64.whl (1.9 MB)\r\nCollecting dash-html-components==1.0.3\r\n Using cached dash_html_components-1.0.3-py3-none-any.whl\r\nCollecting dtreeviz==0.8.1\r\n Using cached dtreeviz-0.8.1-py3-none-any.whl\r\nCollecting imblearn==0.0\r\n Using cached imblearn-0.0-py2.py3-none-any.whl (1.9 kB)\r\nCollecting matplotlib==3.0.3\r\n Using cached matplotlib-3.0.3-cp36-cp36m-win_amd64.whl (9.1 MB)\r\nCollecting scikit-learn==0.22.2\r\n Using cached scikit_learn-0.22.2-cp36-cp36m-win_amd64.whl (6.5 MB)\r\nCollecting yellowbrick==0.9.1\r\n Using cached yellowbrick-0.9.1-py2.py3-none-any.whl (282 kB)\r\nCollecting tqdm==4.30.0\r\n Using cached tqdm-4.30.0-py2.py3-none-any.whl (47 kB)\r\nCollecting shap==0.35.0\r\n Using cached shap-0.35.0-cp36-cp36m-win_amd64.whl (309 kB)\r\nCollecting deepposekit==0.3.5\r\n Using cached deepposekit-0.3.5-py3-none-any.whl\r\nCollecting statsmodels==0.9.0\r\n Using cached statsmodels-0.9.0-cp36-cp36m-win_amd64.whl (7.0 MB)\r\nCollecting dash-renderer==1.6.0\r\n Using cached dash_renderer-1.6.0-py3-none-any.whl\r\nCollecting Flask>=1.0.2\r\n Using cached Flask-2.0.1-py3-none-any.whl (94 kB)\r\nCollecting future\r\n Using cached future-0.18.2-py3-none-any.whl\r\nCollecting flask-compress\r\n Using cached Flask_Compress-1.10.1-py3-none-any.whl (7.9 kB)\r\nCollecting dash-table==4.9.0\r\n Using cached dash_table-4.9.0-py3-none-any.whl\r\nCollecting easydict~=1.7\r\n Using cached easydict-1.9.tar.gz (6.4 kB)\r\nINFO: pip is looking at multiple versions of dash-table to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-renderer to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-html-components to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-core-components to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-colorscales to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash-color-picker to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of dash to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of cefpython3 to determine which version is compatible with other requirements. This could take a while.\r\nINFO: pip is looking at multiple versions of simba-uw-tf to determine which version is compatible with other requirements. This could take a while.\r\nERROR: Cannot install simba-uw-tf and simba-uw-tf==1.3.11 because these package versions have conflicting dependencies.\r\n\r\nThe conflict is caused by:\r\n simba-uw-tf 1.3.11 depends on scikit-learn==0.22.2\r\n deeplabcut 2.0.9 depends on scikit-learn~=0.19.2\r\n\r\nTo fix this you could try to:\r\n1. loosen the range of package versions you've specified\r\n2. remove package versions to allow pip attempt to solve the dependency conflict\r\n\r\nERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies\r\n```", + "user": "quirinkrabi", + "reaction_cnt": 0, + "created_at": "2021-09-17T10:11:42Z", + "updated_at": "2021-09-17T11:36:11Z", + "author": "quirinkrabi", + "comments": [ + { + "body": "Hi!\r\n\r\nI got it to work with \"pip install simba-uw-tf-dev\".\r\n\r\nSorry for bothering!\r\n\r\nBest,\r\nQuirin", + "created_at": "2021-09-17T11:29:12Z", + "author": "quirinkrabi" + }, + { + "body": "Excellent, thanks @quirinkrabi - was just about to add that the solution is in FAQ entry 11: https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#11-when-i-install-or-update-simba-i-see-a-bunch-or-messages-in-the-console-telling-there-me-about-some-dependency-conflicts-the-messages-may-look-a-little-like-this\r\n\r\nBut yes, we strongly recommend the dev version over the TF version. ", + "created_at": "2021-09-17T11:35:45Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Unable to install due to error", + "body": "I would like to thank you for developing open-source software.\r\nI tried to install using Anaconda following the instructions, but could not do so due to an error.\r\nThe error message is as follows. I hope the problem is resolved and the software is usable.\r\n\r\nERROR: Cannot install simba-uw-tf==1.0.3, simba-uw-tf==1.0.4, simba-uw-tf==1.0.5, simba-uw-tf==1.0.6, simba-uw-tf==1.0.7, simba-uw-tf==1.0.9, simba-uw-tf==1.1.0, simba-uw-tf==1.1.1, simba-uw-tf==1.1.6, simba-uw-tf==1.1.7, simba-uw-tf==1.2.0, simba-uw-tf==1.2.1, simba-uw-tf==1.2.10, simba-uw-tf==1.2.11, simba-uw-tf==1.2.12, simba-uw-tf==1.2.12.1, simba-uw-tf==1.2.13, simba-uw-tf==1.2.14, simba-uw-tf==1.2.15, simba-uw-tf==1.2.16, simba-uw-tf==1.2.17, simba-uw-tf==1.2.18, simba-uw-tf==1.2.19, simba-uw-tf==1.2.2, simba-uw-tf==1.2.20, simba-uw-tf==1.2.21, simba-uw-tf==1.2.22, simba-uw-tf==1.2.23, simba-uw-tf==1.2.24, simba-uw-tf==1.2.26, simba-uw-tf==1.2.27, simba-uw-tf==1.2.28, simba-uw-tf==1.2.29, simba-uw-tf==1.2.3, simba-uw-tf==1.2.3.10, simba-uw-tf==1.2.3.11, simba-uw-tf==1.2.3.12, simba-uw-tf==1.2.3.13, simba-uw-tf==1.2.3.3, simba-uw-tf==1.2.3.4, simba-uw-tf==1.2.3.5, simba-uw-tf==1.2.3.6, simba-uw-tf==1.2.3.7, simba-uw-tf==1.2.3.8, simba-uw-tf==1.2.3.9, simba-uw-tf==1.2.30, simba-uw-tf==1.2.31, simba-uw-tf==1.2.4.1, simba-uw-tf==1.2.4.2, simba-uw-tf==1.2.4.3, simba-uw-tf==1.2.5, simba-uw-tf==1.2.6, simba-uw-tf==1.2.7, simba-uw-tf==1.2.8, simba-uw-tf==1.2.9, simba-uw-tf==1.2.9.2 and simba-uw-tf==1.3.12 because these package versions have conflicting dependencies.\r\n\r\nThe conflict is caused by:\r\n simba-uw-tf 1.3.12 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.31 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.30 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.29 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.28 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.27 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.26 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.24 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.23 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.22 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.21 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.20 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.19 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.18 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.17 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.16 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.15 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.14 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.13 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.12.1 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.12 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.11 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.10 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.9.2 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.9 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.8 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.7 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.6 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.5 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.4.3 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.4.2 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.4.1 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.13 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.12 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.11 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.10 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.9 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.8 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.7 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.6 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.5 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.4 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3.3 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.3 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.2 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.1 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.2.0 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.1.7 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.1.6 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.1.1 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.1.0 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.0.9 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.0.7 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.0.6 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.0.5 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.0.4 depends on tensorflow-gpu==1.14.0\r\n simba-uw-tf 1.0.3 depends on tensorflow-gpu==1.14.0\r\n\r\nTo fix this you could try to:\r\n1. loosen the range of package versions you've specified\r\n2. remove package versions to allow pip attempt to solve the dependency conflict\r\n\r\nERROR: ResolutionImpossible: for help visit https://pip.pypa.io/en/latest/user_guide/#fixing-conflicting-dependencies", + "user": "MasahiroMorsihita", + "reaction_cnt": 0, + "created_at": "2021-09-17T04:49:15Z", + "updated_at": "2022-06-14T22:11:47Z", + "author": "MasahiroMorsihita", + "comments": [ + { + "body": "Hello @MasahiroMorsihita! \r\n\r\nFor a solution, check out entry `11` of the FAQ: https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#11-when-i-install-or-update-simba-i-see-a-bunch-or-messages-in-the-console-telling-there-me-about-some-dependency-conflicts-the-messages-may-look-a-little-like-this\r\n\r\nI would recommend using possible solution 3, i.e add the no dependencies flag at install:\r\n\r\nType `pip install simba-uw-tf --no-dependencies`, `pip install simba-uw-no-tf--no-dependencies` or `pip install simba-uw-tf-dev --no-dependencies` dependening on which version you are installing. \r\n\r\nLet me know if that works!", + "created_at": "2021-09-17T11:33:48Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Attribute error while trying to calculate video parameters in SimBA", + "body": "Hello :)\r\n\r\nAfter one complete errorfree analysis with simBA last week i tried to create a new project to repeat exactly the same with other videos but then, after import of videos (MP4) and tracking data (csv) an error occured while video parameter setting:\r\n\r\n![image](https://user-images.githubusercontent.com/81317771/132955452-308b4c17-1f8d-437a-b374-7b8f9b3b73a3.png)\r\n\r\nThe Error occures after I click on \"video\" to calculate the distance.\r\nIts confusing, because in my old project its still working..\r\n\r\nWould be awesome if you may have some tipps or ideas!\r\nIm just a medical student with no real python knowledge but alot of pressure ^^\r\n\r\nBest", + "user": "Marcelobot", + "reaction_cnt": 0, + "created_at": "2021-09-11T17:01:19Z", + "updated_at": "2021-09-14T11:42:14Z", + "author": "Marcelobot", + "comments": [ + { + "body": "Hello @Marcelobot! \r\n\r\nWhen you click on `Video1`, SimBA tries to open the first frame of `animal_11_200806cropped_rotated.mp4`, and SimBA assumes `animal_11_200806cropped_rotated.mp4` is located in the `project_folder/videos` directory of your new project. \r\n\r\nIf SimBA can't find `animal_11_200806cropped_rotated.mp4` in the `project_folder/videos` directory, this error could happen. The `NoneType` error comes from SimBA trying to extract the shape/resolution of the first frame, but there is no resolution because there is no video. \r\n\r\nCould you double check that your new project contains a file called `animal_11_200806cropped_rotated.mp4` in the `project_folder/videos` directory?\r\n\r\nCheers!\r\n\r\n", + "created_at": "2021-09-12T12:19:21Z", + "author": "sgoldenlab" + }, + { + "body": "Hello and thank you for your quick answer!\r\n\r\nSadly thats not the point, the \"project_folder/videos\" directory contains the correct \"animal_11_200806cropped_rotated.mp4\" file.\r\n\r\nBut whats interesting is, that after the occuration of atribute error another file will be created in the same directory:\r\n![image](https://user-images.githubusercontent.com/81317771/132989850-ec377bd9-4515-49ba-b5ab-a7de5bcbdb25.png)\r\n\r\nAs I wrote, last week it worked completely.. but nothing changed since then.\r\n\r\nCould it be a problem with any packages missing within python?\r\n\r\nCheers", + "created_at": "2021-09-12T13:42:10Z", + "author": "Marcelobot" + }, + { + "body": "*Maybe its another issue but, Ive tried out to create another project including video data, that worked out last time this happened:\r\n\r\n![image](https://user-images.githubusercontent.com/81317771/132990214-020a9956-82a5-4d11-8cd3-1f5230b638a3.png)\r\n\r\nAs you can see, there are even parts of the gui disappeared..\r\n\r\nThe relating error:\r\n\"TypeError: __init__() missing 1 required positional argument: 'value'\"\r\n\r\n", + "created_at": "2021-09-12T13:51:57Z", + "author": "Marcelobot" + }, + { + "body": "Hi @Marcelobot!\r\n\r\nLet’s do the second error first:\r\n\r\nThe second error msg you are seeing is unrelated to the first. I recently replied to someone on Gitter who saw the same error - https://gitter.im/SimBA-Resource/community. I will just post my prior reply here (also, if you update to the latest version of SimBA `pip install simba-uw-tf-dev --upgrade`, you should get a more informative error msg when this happens)\r\n\r\n```\r\nThis error occurs as SimBA is trying to populate the drop-down menus with your classifier names. \r\n\r\nSimBA looks in your project_config.ini file for the classifier names, it can’t find any names, and it fails as it does not know what to do with the dropdown.\r\n\r\nWhen you created your SimBA project, did you name your behaviors in this entry box?\r\n\r\n```\r\n\r\n![Untitled (1)](https://user-images.githubusercontent.com/50497030/132991518-807be584-0106-4f8d-82ba-84d5a7dd032a.png)\r\n", + "created_at": "2021-09-12T14:27:48Z", + "author": "sgoldenlab" + }, + { + "body": "Back to the first NoneType error:\r\n\r\nThe `0.bmp` file is a temporary image file of the first frame of the video `animal_11_200806cropped_rotated.mp4` that SimBA is trying to open. SimBA saves this bmp file temporarily, lets you draw your line, and then SimBA deletes it. \r\n\r\nI see that it is only 2kb big, which makes me suspicious that there is something odd with the first frame of `animal_11_200806cropped_rotated.mp4`. Can you open this bmp file? \r\n\r\nAlso can you just confirm that you can't open this video in your prior SimBA project either? That way we can confirm that it is something odd with the video and not the SimBA project. \r\n\r\nIf we confirm it is the video and not the SimBA project I can insert a fix in SimBA to catch this (*if there is something odd with the first frame of the video, then try to grab another frame*).\r\n\r\n\r\n\r\n", + "created_at": "2021-09-12T14:34:30Z", + "author": "sgoldenlab" + }, + { + "body": "Issue 2:\r\n\r\n\r\n> Hi @Marcelobot!\r\n> \r\n> Let’s do the second error first:\r\n> \r\n> The second error msg you are seeing is unrelated to the first. I recently replied to someone on Gitter who saw the same error - https://gitter.im/SimBA-Resource/community. I will just post my prior reply here (also, if you update to the latest version of SimBA `pip install simba-uw-tf-dev --upgrade`, you should get a more informative error msg when this happens)\r\n> \r\n> ```\r\n> This error occurs as SimBA is trying to populate the drop-down menus with your classifier names. \r\n> \r\n> SimBA looks in your project_config.ini file for the classifier names, it can’t find any names, and it fails as it does not know what to do with the dropdown.\r\n> \r\n> When you created your SimBA project, did you name your behaviors in this entry box?\r\n> ```\r\n> \r\n> ![Untitled (1)](https://user-images.githubusercontent.com/50497030/132991518-807be584-0106-4f8d-82ba-84d5a7dd032a.png)\r\n\r\nIssue 2:\r\n\r\nIve applied, what you`ve recommented (created a new project and within named the behavior classifier in the beginning as shown on your screenshot). Then the mask wasnt disappeared at first, but then there was none of 2 imported videos listed:\r\n![image](https://user-images.githubusercontent.com/81317771/132995560-5134bebe-3ae9-4a0c-b9e3-dbb5527d6de8.png)\r\n\r\n(the videos are stored correctly in the project/videos direction)\r\n\r\n", + "created_at": "2021-09-12T16:37:05Z", + "author": "Marcelobot" + }, + { + "body": "Issue 1:\r\n\r\n\r\n> Back to the first NoneType error:\r\n> \r\n> The `0.bmp` file is a temporary image file of the first frame of the video `animal_11_200806cropped_rotated.mp4` that SimBA is trying to open. SimBA saves this bmp file temporarily, lets you draw your line, and then SimBA deletes it.\r\n> \r\n> I see that it is only 2kb big, which makes me suspicious that there is something odd with the first frame of `animal_11_200806cropped_rotated.mp4`. Can you open this bmp file?\r\n> \r\n> Also can you just confirm that you can't open this video in your prior SimBA project either? That way we can confirm that it is something odd with the video and not the SimBA project.\r\n> \r\n> If we confirm it is the video and not the SimBA project I can insert a fix in SimBA to catch this (_if there is something odd with the first frame of the video, then try to grab another frame_).\r\n\r\nI could not open the bmp-file,recieved the message, that there is no information inside.\r\n\r\nI tried out as following:\r\n- loaded the last project, in which still everything is working out\r\n- imported there one of my new videos (same scenario but cropped)\r\n--> Result was, that the GUI works for the already imported videos, but for the recently imported video not, it was not even listed in the table of video parameters:\r\n![image](https://user-images.githubusercontent.com/81317771/132996250-c13a17d0-636a-4e13-8e84-7bd4f87d0457.png)\r\n(the 5 old Videos listed without the new one)\r\nBut obviously it was imported and stored correctly\r\n![image](https://user-images.githubusercontent.com/81317771/132996316-023d3df4-b8c4-4804-bcf0-ffaad7273607.png)\r\n\r\nSo it seems like it could be related to the video file.\r\nThe only difference is the quality:\r\nThe old ones have 796 x 606, while the new one has 760 x 604.\r\nIs there something to consider with the video quality?\r\n\r\n\r\n\r\nAnd in general: Is it possible to just connect quickly via teamviewer or something to fix that ?\r\n\r\n\r\nBest\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2021-09-12T17:09:10Z", + "author": "Marcelobot" + }, + { + "body": "Yes, you are right I think a call would be the quickest way to solve this - can you email goldenneurolab@gmail.com and we will set something up for today or tomorrow? \r\n\r\n", + "created_at": "2021-09-13T10:35:43Z", + "author": "sgoldenlab" + }, + { + "body": "Update Issue 1:\n\nI can confirm now, That there was something wrong within the video sequences.\n\nWhen I trim of the First 5sec it works as usual.\n\nThe question is, Why it worked out Last week without trimming..\n\n\n\nHowever now Im expecting that my SimBA Project will run.\n\n\nPs. If you still want to talk about Ive sent an email.\n\nCheers", + "created_at": "2021-09-14T11:42:14Z", + "author": "Marcelobot" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 8.3.2", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 8.3.2.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

8.3.2

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.3.2.html

\n

Security

\n
    \n
  • \n

    CVE-2021-23437 Raise ValueError if color specifier is too long\n[hugovk, radarhere]

    \n
  • \n
  • \n

    Fix 6-byte OOB read in FliDecode\n[wiredfool]

    \n
  • \n
\n

Python 3.10 wheels

\n
    \n
  • Add support for Python 3.10 #5569, #5570\n[hugovk, radarhere]
  • \n
\n

Fixed regressions

\n
    \n
  • \n

    Ensure TIFF RowsPerStrip is multiple of 8 for JPEG compression #5588\n[kmilos, radarhere]

    \n
  • \n
  • \n

    Updates for ImagePalette channel order #5599\n[radarhere]

    \n
  • \n
  • \n

    Hide FriBiDi shim symbols to avoid conflict with real FriBiDi library #5651\n[nulano]

    \n
  • \n
\n

8.3.1

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.3.1.html

\n

Changes

\n\n

8.3.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.3.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

8.3.2 (2021-09-02)

\n
    \n
  • \n

    CVE-2021-23437 Raise ValueError if color specifier is too long\n[hugovk, radarhere]

    \n
  • \n
  • \n

    Fix 6-byte OOB read in FliDecode\n[wiredfool]

    \n
  • \n
  • \n

    Add support for Python 3.10 #5569, #5570\n[hugovk, radarhere]

    \n
  • \n
  • \n

    Ensure TIFF RowsPerStrip is multiple of 8 for JPEG compression #5588\n[kmilos, radarhere]

    \n
  • \n
  • \n

    Updates for ImagePalette channel order #5599\n[radarhere]

    \n
  • \n
  • \n

    Hide FriBiDi shim symbols to avoid conflict with real FriBiDi library #5651\n[nulano]

    \n
  • \n
\n

8.3.1 (2021-07-06)

\n
    \n
  • \n

    Catch OSError when checking if fp is sys.stdout #5585\n[radarhere]

    \n
  • \n
  • \n

    Handle removing orientation from alternate types of EXIF data #5584\n[radarhere]

    \n
  • \n
  • \n

    Make Image.array take optional dtype argument #5572\n[t-vi, radarhere]

    \n
  • \n
\n

8.3.0 (2021-07-01)

\n
    \n
  • \n

    Use snprintf instead of sprintf. CVE-2021-34552 #5567\n[radarhere]

    \n
  • \n
  • \n

    Limit TIFF strip size when saving with LibTIFF #5514\n[kmilos]

    \n
  • \n
  • \n

    Allow ICNS save on all operating systems #4526\n[baletu, radarhere, newpanjing, hugovk]

    \n
  • \n
  • \n

    De-zigzag JPEG's DQT when loading; deprecate convert_dict_qtables #4989\n[gofr, radarhere]

    \n
  • \n
  • \n

    Replaced xml.etree.ElementTree #5565\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 8013f13 8.3.2 version bump
  • \n
  • 23c7ca8 Update CHANGES.rst
  • \n
  • 8450366 Update release notes
  • \n
  • a0afe89 Update test case
  • \n
  • 9e08eb8 Raise ValueError if color specifier is too long
  • \n
  • bd5cf7d FLI tests for Oss-fuzz crash.
  • \n
  • 94a0cf1 Fix 6-byte OOB read in FliDecode
  • \n
  • cece64f Add 8.3.2 (2021-09-02) [CI skip]
  • \n
  • e422386 Add release notes for Pillow 8.3.2
  • \n
  • 08dcbb8 Pillow 8.3.2 supports Python 3.10 [ci skip]
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=8.3.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-09-08T03:05:28Z", + "updated_at": "2022-01-13T03:41:27Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #162.", + "created_at": "2022-01-13T03:41:25Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 8.3.2 in /simba", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 8.3.2.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

8.3.2

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.3.2.html

\n

Security

\n
    \n
  • \n

    CVE-2021-23437 Raise ValueError if color specifier is too long\n[hugovk, radarhere]

    \n
  • \n
  • \n

    Fix 6-byte OOB read in FliDecode\n[wiredfool]

    \n
  • \n
\n

Python 3.10 wheels

\n
    \n
  • Add support for Python 3.10 #5569, #5570\n[hugovk, radarhere]
  • \n
\n

Fixed regressions

\n
    \n
  • \n

    Ensure TIFF RowsPerStrip is multiple of 8 for JPEG compression #5588\n[kmilos, radarhere]

    \n
  • \n
  • \n

    Updates for ImagePalette channel order #5599\n[radarhere]

    \n
  • \n
  • \n

    Hide FriBiDi shim symbols to avoid conflict with real FriBiDi library #5651\n[nulano]

    \n
  • \n
\n

8.3.1

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.3.1.html

\n

Changes

\n\n

8.3.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.3.0.html

\n

Changes

\n\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

8.3.2 (2021-09-02)

\n
    \n
  • \n

    CVE-2021-23437 Raise ValueError if color specifier is too long\n[hugovk, radarhere]

    \n
  • \n
  • \n

    Fix 6-byte OOB read in FliDecode\n[wiredfool]

    \n
  • \n
  • \n

    Add support for Python 3.10 #5569, #5570\n[hugovk, radarhere]

    \n
  • \n
  • \n

    Ensure TIFF RowsPerStrip is multiple of 8 for JPEG compression #5588\n[kmilos, radarhere]

    \n
  • \n
  • \n

    Updates for ImagePalette channel order #5599\n[radarhere]

    \n
  • \n
  • \n

    Hide FriBiDi shim symbols to avoid conflict with real FriBiDi library #5651\n[nulano]

    \n
  • \n
\n

8.3.1 (2021-07-06)

\n
    \n
  • \n

    Catch OSError when checking if fp is sys.stdout #5585\n[radarhere]

    \n
  • \n
  • \n

    Handle removing orientation from alternate types of EXIF data #5584\n[radarhere]

    \n
  • \n
  • \n

    Make Image.array take optional dtype argument #5572\n[t-vi, radarhere]

    \n
  • \n
\n

8.3.0 (2021-07-01)

\n
    \n
  • \n

    Use snprintf instead of sprintf. CVE-2021-34552 #5567\n[radarhere]

    \n
  • \n
  • \n

    Limit TIFF strip size when saving with LibTIFF #5514\n[kmilos]

    \n
  • \n
  • \n

    Allow ICNS save on all operating systems #4526\n[baletu, radarhere, newpanjing, hugovk]

    \n
  • \n
  • \n

    De-zigzag JPEG's DQT when loading; deprecate convert_dict_qtables #4989\n[gofr, radarhere]

    \n
  • \n
  • \n

    Replaced xml.etree.ElementTree #5565\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 8013f13 8.3.2 version bump
  • \n
  • 23c7ca8 Update CHANGES.rst
  • \n
  • 8450366 Update release notes
  • \n
  • a0afe89 Update test case
  • \n
  • 9e08eb8 Raise ValueError if color specifier is too long
  • \n
  • bd5cf7d FLI tests for Oss-fuzz crash.
  • \n
  • 94a0cf1 Fix 6-byte OOB read in FliDecode
  • \n
  • cece64f Add 8.3.2 (2021-09-02) [CI skip]
  • \n
  • e422386 Add release notes for Pillow 8.3.2
  • \n
  • 08dcbb8 Pillow 8.3.2 supports Python 3.10 [ci skip]
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=8.3.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-09-08T01:35:46Z", + "updated_at": "2022-01-13T02:04:15Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #161.", + "created_at": "2022-01-13T02:04:14Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump tensorflow-gpu from 1.14.0 to 2.5.1 in /simba", + "body": "Bumps [tensorflow-gpu](https://github.com/tensorflow/tensorflow) from 1.14.0 to 2.5.1.\n
\nRelease notes\n

Sourced from tensorflow-gpu's releases.

\n
\n

TensorFlow 2.5.1

\n

Release 2.5.1

\n

This release introduces several vulnerability fixes:

\n
    \n
  • Fixes a heap out of bounds access in sparse reduction operations (CVE-2021-37635)
  • \n
  • Fixes a floating point exception in SparseDenseCwiseDiv (CVE-2021-37636)
  • \n
  • Fixes a null pointer dereference in CompressElement (CVE-2021-37637)
  • \n
  • Fixes a null pointer dereference in RaggedTensorToTensor (CVE-2021-37638)
  • \n
  • Fixes a null pointer dereference and a heap OOB read arising from operations restoring tensors (CVE-2021-37639)
  • \n
  • Fixes an integer division by 0 in sparse reshaping (CVE-2021-37640)
  • \n
  • Fixes a division by 0 in ResourceScatterDiv (CVE-2021-37642)
  • \n
  • Fixes a heap OOB in RaggedGather (CVE-2021-37641)
  • \n
  • Fixes a std::abort raised from TensorListReserve (CVE-2021-37644)
  • \n
  • Fixes a null pointer dereference in MatrixDiagPartOp (CVE-2021-37643)
  • \n
  • Fixes an integer overflow due to conversion to unsigned (CVE-2021-37645)
  • \n
  • Fixes a bad allocation error in StringNGrams caused by integer conversion (CVE-2021-37646)
  • \n
  • Fixes a null pointer dereference in SparseTensorSliceDataset (CVE-2021-37647)
  • \n
  • Fixes an incorrect validation of SaveV2 inputs (CVE-2021-37648)
  • \n
  • Fixes a null pointer dereference in UncompressElement (CVE-2021-37649)
  • \n
  • Fixes a segfault and a heap buffer overflow in {Experimental,}DatasetToTFRecord (CVE-2021-37650)
  • \n
  • Fixes a heap buffer overflow in FractionalAvgPoolGrad (CVE-2021-37651)
  • \n
  • Fixes a use after free in boosted trees creation (CVE-2021-37652)
  • \n
  • Fixes a division by 0 in ResourceGather (CVE-2021-37653)
  • \n
  • Fixes a heap OOB and a CHECK fail in ResourceGather (CVE-2021-37654)
  • \n
  • Fixes a heap OOB in ResourceScatterUpdate (CVE-2021-37655)
  • \n
  • Fixes an undefined behavior arising from reference binding to nullptr in RaggedTensorToSparse (CVE-2021-37656)
  • \n
  • Fixes an undefined behavior arising from reference binding to nullptr in MatrixDiagV* ops (CVE-2021-37657)
  • \n
  • Fixes an undefined behavior arising from reference binding to nullptr in MatrixSetDiagV* ops (CVE-2021-37658)
  • \n
  • Fixes an undefined behavior arising from reference binding to nullptr and heap OOB in binary cwise ops (CVE-2021-37659)
  • \n
  • Fixes a division by 0 in inplace operations (CVE-2021-37660)
  • \n
  • Fixes a crash caused by integer conversion to unsigned (CVE-2021-37661)
  • \n
  • Fixes an undefined behavior arising from reference binding to nullptr in boosted trees (CVE-2021-37662)
  • \n
  • Fixes a heap OOB in boosted trees (CVE-2021-37664)
  • \n
  • Fixes vulnerabilities arising from incomplete validation in QuantizeV2 (CVE-2021-37663)
  • \n
  • Fixes vulnerabilities arising from incomplete validation in MKL requantization (CVE-2021-37665)
  • \n
  • Fixes an undefined behavior arising from reference binding to nullptr in RaggedTensorToVariant (CVE-2021-37666)
  • \n
  • Fixes an undefined behavior arising from reference binding to nullptr in unicode encoding (CVE-2021-37667)
  • \n
  • Fixes an FPE in tf.raw_ops.UnravelIndex (CVE-2021-37668)
  • \n
  • Fixes a crash in NMS ops caused by integer conversion to unsigned (CVE-2021-37669)
  • \n
  • Fixes a heap OOB in UpperBound and LowerBound (CVE-2021-37670)
  • \n
  • Fixes an undefined behavior arising from reference binding to nullptr in map operations (CVE-2021-37671)
  • \n
  • Fixes a heap OOB in SdcaOptimizerV2 (CVE-2021-37672)
  • \n
  • Fixes a CHECK-fail in MapStage (CVE-2021-37673)
  • \n
  • Fixes a vulnerability arising from incomplete validation in MaxPoolGrad (CVE-2021-37674)
  • \n
  • Fixes an undefined behavior arising from reference binding to nullptr in shape inference (CVE-2021-37676)
  • \n
  • Fixes a division by 0 in most convolution operators (CVE-2021-37675)
  • \n
  • Fixes vulnerabilities arising from missing validation in shape inference for Dequantize (CVE-2021-37677)
  • \n
  • Fixes an arbitrary code execution due to YAML deserialization (CVE-2021-37678)
  • \n
  • Fixes a heap OOB in nested tf.map_fn with RaggedTensors (CVE-2021-37679)
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from tensorflow-gpu's changelog.

\n
\n

Release 2.5.1

\n

This release introduces several vulnerability fixes:

\n
    \n
  • Fixes a heap out of bounds access in sparse reduction operations\n(CVE-2021-37635)
  • \n
  • Fixes a floating point exception in SparseDenseCwiseDiv\n(CVE-2021-37636)
  • \n
  • Fixes a null pointer dereference in CompressElement\n(CVE-2021-37637)
  • \n
  • Fixes a null pointer dereference in RaggedTensorToTensor\n(CVE-2021-37638)
  • \n
  • Fixes a null pointer dereference and a heap OOB read arising from operations\nrestoring tensors\n(CVE-2021-37639)
  • \n
  • Fixes an integer division by 0 in sparse reshaping\n(CVE-2021-37640)
  • \n
  • Fixes a division by 0 in ResourceScatterDiv\n(CVE-2021-37642)
  • \n
  • Fixes a heap OOB in RaggedGather\n(CVE-2021-37641)
  • \n
  • Fixes a std::abort raised from TensorListReserve\n(CVE-2021-37644)
  • \n
  • Fixes a null pointer dereference in MatrixDiagPartOp\n(CVE-2021-37643)
  • \n
  • Fixes an integer overflow due to conversion to unsigned\n(CVE-2021-37645)
  • \n
  • Fixes a bad allocation error in StringNGrams caused by integer conversion\n(CVE-2021-37646)
  • \n
  • Fixes a null pointer dereference in SparseTensorSliceDataset\n(CVE-2021-37647)
  • \n
  • Fixes an incorrect validation of SaveV2 inputs\n(CVE-2021-37648)
  • \n
  • Fixes a null pointer dereference in UncompressElement\n(CVE-2021-37649)
  • \n
  • Fixes a segfault and a heap buffer overflow in\n{Experimental,}DatasetToTFRecord\n(CVE-2021-37650)
  • \n
  • Fixes a heap buffer overflow in FractionalAvgPoolGrad\n(CVE-2021-37651)
  • \n
  • Fixes a use after free in boosted trees creation\n(CVE-2021-37652)
  • \n
  • Fixes a division by 0 in ResourceGather\n(CVE-2021-37653)
  • \n
  • Fixes a heap OOB and a CHECK fail in ResourceGather\n(CVE-2021-37654)
  • \n
  • Fixes a heap OOB in ResourceScatterUpdate\n(CVE-2021-37655)
  • \n
  • Fixes an undefined behavior arising from reference binding to nullptr in\nRaggedTensorToSparse
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 8222c1c Merge pull request #51381 from tensorflow/mm-fix-r2.5-build
  • \n
  • d584260 Disable broken/flaky test
  • \n
  • f6c6ce3 Merge pull request #51367 from tensorflow-jenkins/version-numbers-2.5.1-17468
  • \n
  • 3ca7812 Update version numbers to 2.5.1
  • \n
  • 4fdf683 Merge pull request #51361 from tensorflow/mm-update-relnotes-on-r2.5
  • \n
  • 05fc01a Put CVE numbers for fixes in parentheses
  • \n
  • bee1dc4 Update release notes for the new patch release
  • \n
  • 47beb4c Merge pull request #50597 from kruglov-dmitry/v2.5.0-sync-abseil-cmake-bazel
  • \n
  • 6f39597 Merge pull request #49383 from ashahab/abin-load-segfault-r2.5
  • \n
  • 0539b34 Merge pull request #48979 from liufengdb/r2.5-cherrypick
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tensorflow-gpu&package-manager=pip&previous-version=1.14.0&new-version=2.5.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-08-25T14:56:13Z", + "updated_at": "2022-02-09T23:36:04Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #166.", + "created_at": "2022-02-09T23:36:03Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Missmatch/Confusion about Videoparameters", + "body": "Im a bit confused, because the distance calculated between bodyparts while feature extraction seems to be wrong:\r\n\r\n- real measure of the cage is 15 * 25cm (on the screenshot the 25cm length) and so SImba calculates about 1.796017817 px/mm\r\n![image](https://user-images.githubusercontent.com/81317771/130659891-38784931-408d-4287-85ff-175ea55e6184.png)\r\n\r\n-The result of the feature extraction looks like:\r\n![image](https://user-images.githubusercontent.com/81317771/130664530-001c05ea-fa84-4957-9475-a32813b745a7.png)\r\n\r\nMy Question: is the distance in features calculated in px ? \r\nBut still it would be absolutely unrealistic..\r\n\r\nThanks for helping as soon as possible!\r\nBest Regards. :) ", + "user": "Marcelobot", + "reaction_cnt": 0, + "created_at": "2021-08-24T17:47:49Z", + "updated_at": "2021-09-11T16:52:37Z", + "author": "Marcelobot", + "comments": [ + { + "body": "Can you send the tracking csv with the corresponding video to jchoong@uw.edu so I can take a look at it?", + "created_at": "2021-08-24T19:22:18Z", + "author": "inoejj" + }, + { + "body": "Hello @Marcelobot ! Yes this looks off, it is not even numbers, there are some weird punctuations - it should be millimeters. \r\n\r\nDid you open the file in Excel or similar, as something else then a **comma** delimited file, and that is why things look odd? ", + "created_at": "2021-08-24T19:31:33Z", + "author": "sronilsson" + }, + { + "body": "> [jchoong@uw.edu](mailto:jchoong@uw.edu)\r\n\r\nThank you very much! Recently ive sent you the data and video.\r\n\r\nBest Regards,", + "created_at": "2021-08-24T21:58:45Z", + "author": "Marcelobot" + }, + { + "body": "> Hello @Marcelobot ! Yes this looks off, it is not even numbers, there are some weird punctuations - it should be millimeters.\r\n> \r\n> Did you open the file in Excel or similar, as something else then a **comma** delimited file, and that is why things look odd?\r\n\r\nIve opened as CSV within Excel and then changed to \"devided on different cells by semicolon/comma\", so that its distributed on many cells and useful to calculate in the following steps.\r\n\r\nBut apart from that, ive changed nothing.\r\n\r\nBest regards,", + "created_at": "2021-08-24T22:02:19Z", + "author": "Marcelobot" + }, + { + "body": "We have solved it. Thank you for trying to help.\r\n\r\nBest Regards.", + "created_at": "2021-08-25T13:48:00Z", + "author": "Marcelobot" + }, + { + "body": "Thanks @Marcelobot - do you know what the issue was? Just in case this issue comes up for any other user or for us to insert something that could catch the error. ", + "created_at": "2021-08-25T13:50:54Z", + "author": "sronilsson" + }, + { + "body": "> Thanks @Marcelobot - do you know what the issue was? Just in case this issue comes up for any other user or for us to insert something that could catch the error.\r\n\r\nIt was just, that Ive loaded it incorrectly in Excel. Had to change some language settings/ Dott-comma-semicolon settings ^^", + "created_at": "2021-09-11T16:52:37Z", + "author": "Marcelobot" + } + ] + }, + { + "title": "\"Conflicting dependencies\" while Simba Installation", + "body": "Hey guys, first of all thank you for creating simBA which really offers much more possibilities to my project!\r\n\r\nCurrently we have got some issues while installing SimBA with anaconda on Windows Laptop and PC.\r\nThe following error we recieve after putting in \"pip install simba-uw-tf\".\r\n\r\n![Simba_installation](https://user-images.githubusercontent.com/81317771/129807761-ad66a174-a2ba-4d24-96ce-1f227f35d516.PNG)\r\n\r\n\"Conflicting dependencies simba and deeplabcut\"? - there is no deeplabcut used on my Laptop..\r\n\r\nWhat would you recomment us to do?\r\nIs there an Option to run Simba on Linux?\r\n\r\nThanks and best regards.\r\nMarcel\r\n", + "user": "Marcelobot", + "reaction_cnt": 0, + "created_at": "2021-08-17T22:18:47Z", + "updated_at": "2021-08-24T12:56:16Z", + "author": "Marcelobot", + "comments": [ + { + "body": "Hey @Marcelobot! \r\n\r\nI recommend installing `simba-uw-tf-dev` (`pip install simba-uw-tf-dev`), don't do the simba-uw-tf version. The simba-uw-tf-dev version should run both on linux or Windows as is. \r\n\r\nFor the dependency conflict, check this out and see if it helps: \r\n\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#11-when-i-install-or-update-simba-i-see-a-bunch-or-messages-in-the-console-telling-there-me-about-some-dependency-conflicts-the-messages-may-look-a-little-like-this\r\n\r\nI would try the second suggestion - downgrading pip before installing SimBA. Let me know if that works. \r\n\r\n\r\n\r\n\r\n", + "created_at": "2021-08-18T16:51:01Z", + "author": "sronilsson" + }, + { + "body": "Thank you very much!\n\nBest\nMarcel", + "created_at": "2021-08-23T09:15:43Z", + "author": "Marcelobot" + } + ] + }, + { + "title": "\"Traceback\" error when opening SimBA in the terminal after installing", + "body": "I am unable to open SimBA after following guidelines for installation in an Anaconda Environment with Python 3.6.\r\n\r\nMy system is:\r\nWindows 10 x64bit\r\nProcessor: Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz, 2601 Mhz, 6 Core(s), 12 Logical Processor(s)\r\n\r\nHere is the code:\r\n\r\n\"(simba-test) C:\\Users\\pazam>simba\r\nTraceback (most recent call last):\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\tensorflow\\python\\platform\\self_check.py\", line 75, in preload_check\r\n ctypes.WinDLL(build_info.cudart_dll_name)\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\ctypes\\__init__.py\", line 348, in __init__\r\n self._handle = _dlopen(self._name, mode)\r\nOSError: [WinError 126] The specified module could not be found\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\pazam\\anaconda3\\envs\\simba-test\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\simba\\SimBA.py\", line 8, in \r\n import deeplabcut\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\deeplabcut\\__init__.py\", line 29, in \r\n from deeplabcut import generate_training_dataset\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\deeplabcut\\generate_training_dataset\\__init__.py\", line 19, in \r\n from deeplabcut.generate_training_dataset.labeling_toolbox import *\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\deeplabcut\\generate_training_dataset\\labeling_toolbox.py\", line 27, in \r\n from deeplabcut.utils import auxiliaryfunctions\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\deeplabcut\\utils\\__init__.py\", line 1, in \r\n from deeplabcut.utils.make_labeled_video import *\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\deeplabcut\\utils\\make_labeled_video.py\", line 38, in \r\n from deeplabcut.pose_estimation_tensorflow.config import load_config\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\deeplabcut\\pose_estimation_tensorflow\\__init__.py\", line 13, in \r\n from deeplabcut.pose_estimation_tensorflow.nnet import *\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\deeplabcut\\pose_estimation_tensorflow\\nnet\\__init__.py\", line 14, in \r\n from deeplabcut.pose_estimation_tensorflow.nnet.losses import *\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\deeplabcut\\pose_estimation_tensorflow\\nnet\\losses.py\", line 5, in \r\n import tensorflow as tf\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\tensorflow\\__init__.py\", line 28, in \r\n from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\tensorflow\\python\\__init__.py\", line 49, in \r\n from tensorflow.python import pywrap_tensorflow\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\tensorflow\\python\\pywrap_tensorflow.py\", line 30, in \r\n self_check.preload_check()\r\n File \"c:\\users\\pazam\\anaconda3\\envs\\simba-test\\lib\\site-packages\\tensorflow\\python\\platform\\self_check.py\", line 82, in preload_check\r\n % (build_info.cudart_dll_name, build_info.cuda_version_number))\r\nImportError: Could not find 'cudart64_100.dll'. TensorFlow requires that this DLL be installed in a directory that is named in your %PATH% environment variable. Download and install CUDA 10.0 from this URL: https://developer.nvidia.com/cuda-90-download-archive\"\r\n\r\nAny ideas why this might be happening?", + "user": "RobertoEnrique", + "reaction_cnt": 0, + "created_at": "2021-08-12T20:54:40Z", + "updated_at": "2021-08-24T22:52:07Z", + "author": "RobertoEnrique", + "comments": [ + { + "body": "Hi @RobertoEnrique - yes. \r\n\r\nI think you have installed the simba-uw-tf version, which needs tensorflow. Tensorflow in turn requires cuda and cudnn, which Simba cannot find the correct versions of installed on your computer. I recommend installing the Simba-uw-tf-dev version of SimBA instead, and running the pose-estimation outside of SimBA. \r\n\r\n\r\n", + "created_at": "2021-08-14T11:28:35Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you so much! I installed simba uw-tf-dev and now it works perfect.\n\nI already have pose estimation from deeplabcut (dlc), so thats good. But\nthis means that, in the future, If I had simba installed on a gpu with\ntensorflow and cuda I could run the dlc labeling and training within simba\ninstead of having to do them separately?\n\nOn Sat, Aug 14, 2021 at 7:28 AM sgoldenlab ***@***.***> wrote:\n\n> Hi @RobertoEnrique - yes.\n>\n> I think you have installed the simba-uw-tf version, which needs\n> tensorflow. Tensorflow in turn requires cuda and cudnn, which Simba cannot\n> find. I recommend installing the Simba-uw-tf-dev version of SimBA instead,\n> and running the pose-estimation outside of SimBA.\n>\n> —\n> You are receiving this because you were mentioned.\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> Triage notifications on the go with GitHub Mobile for iOS\n> \n> or Android\n> \n> .\n>\n", + "created_at": "2021-08-14T11:42:55Z", + "author": "RobertoEnrique" + }, + { + "body": "Also, a new issue that I am having is:\n\nIn the \"Import tracking data\" window when creating the project, after\nhaving imported 1 single video and specified H5(multi-animal DLC), # of\nanimals (2), and named each animal, and choosing the tracking type\n\"ellipse\", I choose the path to the h5 file but after clicking on \"Import\nh5\" nothing happens. There is no pop-up window as there should be. Any\nideas why this could be happening?\n\nBest,\nRoberto\n\nEl sáb, 14 ago 2021 a las 7:42, Roberto Rodriguez Morales (<\n***@***.***>) escribió:\n\n> Thank you so much! I installed simba uw-tf-dev and now it works perfect.\n>\n> I already have pose estimation from deeplabcut (dlc), so thats good. But\n> this means that, in the future, If I had simba installed on a gpu with\n> tensorflow and cuda I could run the dlc labeling and training within simba\n> instead of having to do them separately?\n>\n> On Sat, Aug 14, 2021 at 7:28 AM sgoldenlab ***@***.***>\n> wrote:\n>\n>> Hi @RobertoEnrique - yes.\n>>\n>> I think you have installed the simba-uw-tf version, which needs\n>> tensorflow. Tensorflow in turn requires cuda and cudnn, which Simba cannot\n>> find. I recommend installing the Simba-uw-tf-dev version of SimBA instead,\n>> and running the pose-estimation outside of SimBA.\n>>\n>> —\n>> You are receiving this because you were mentioned.\n>> Reply to this email directly, view it on GitHub\n>> ,\n>> or unsubscribe\n>> \n>> .\n>> Triage notifications on the go with GitHub Mobile for iOS\n>> \n>> or Android\n>> \n>> .\n>>\n>\n", + "created_at": "2021-08-14T16:15:08Z", + "author": "RobertoEnrique" + }, + { + "body": "Hey @RobertoEnrique - yes I recommend keeping them separately. \r\n\r\nCan you check the terminal window, to see if you have any error message there, and paste it here? \r\n\r\nAlso, do you see any msgs in the main Simba window, after you click to import?", + "created_at": "2021-08-14T17:54:50Z", + "author": "sgoldenlab" + }, + { + "body": "So, the script window on the SimBA GUI says this:\n\nProject \"Asty_Aggression_Test1\" created in folder \"simBA_analysis\"\nProject \"Asty_Aggression_Test1\" created in folder \"simBA_analysis\"\nCopying video...\n210406_Surface_RC_Males_Match_1_Light.avi copied to\nAsty_Aggression_Test1\\project_folder\\videos\\210406_Surface_RC_Males_Match_1_Light.avi\nFinished copying video.\nApplying settings for multi-animal tracking...\nImporting 1 multi-animal DLC h5 files to the current project\n\n\nIt looks as if it is still importing the h5. Then, when I look at the\ncommand line terminal, this is the message that I see:\n\n(simbaenvnotensor) C:\\Users\\pazam>simba\nException in Tkinter callback\nTraceback (most recent call last):\n File\n\"C:\\Users\\pazam\\anaconda3\\envs\\simbaenvnotensor\\lib\\tkinter\\__init__.py\",\nline 1705, in __call__\n return self.func(*args)\n File\n\"C:\\Users\\pazam\\anaconda3\\envs\\simbaenvnotensor\\lib\\site-packages\\simba\\SimBA.py\",\nline 3194, in importh5\n\nimportMultiDLCpose(self.configinifile,self.h5path.folder_path,self.dropdowndlc.getChoices(),idlist,\nself.interpolation.getChoices())\n File\n\"C:\\Users\\pazam\\anaconda3\\envs\\simbaenvnotensor\\lib\\site-packages\\simba\\read_DLCmulti_h5_function.py\",\nline 86, in importMultiDLCpose\n currDf = pd.read_hdf(file)\n File\n\"C:\\Users\\pazam\\anaconda3\\envs\\simbaenvnotensor\\lib\\site-packages\\pandas\\io\\pytables.py\",\nline 384, in read_hdf\n store = HDFStore(path_or_buf, mode=mode, **kwargs)\n File\n\"C:\\Users\\pazam\\anaconda3\\envs\\simbaenvnotensor\\lib\\site-packages\\pandas\\io\\pytables.py\",\nline 484, in __init__\n tables = import_optional_dependency(\"tables\")\n File\n\"C:\\Users\\pazam\\anaconda3\\envs\\simbaenvnotensor\\lib\\site-packages\\pandas\\compat\\_optional.py\",\nline 93, in import_optional_dependency\n raise ImportError(message.format(name=name, extra=extra)) from None\nImportError: Missing optional dependency 'tables'. Use pip or conda to\ninstall tables.\n\nNot sure where the error is happening. It seems it cannot head the h5,\nright?\n\n\nEl sáb, 14 ago 2021 a las 13:55, sgoldenlab ***@***.***>)\nescribió:\n\n> Hey @RobertoEnrique - yes I recommend\n> keeping them separately.\n>\n> Can you check the terminal window, to see if you have any error message\n> there, and paste it here?\n>\n> —\n> You are receiving this because you were mentioned.\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> Triage notifications on the go with GitHub Mobile for iOS\n> \n> or Android\n> \n> .\n>\n", + "created_at": "2021-08-15T12:17:12Z", + "author": "RobertoEnrique" + }, + { + "body": "Your python environment for some reason is missing tables. \r\n\r\nCould you, close SimBA, type `pip install tables`, and boot up SimBA again and see if it works?", + "created_at": "2021-08-15T22:26:41Z", + "author": "sgoldenlab" + }, + { + "body": "Hi again,\n\nI did pip install tables and that seemed to work, but the window that pops\nup when I click \"Import h5\" is empty, it's a gray block that says \"Define\nIDs\". When I look into the terminal, it seems as if whatever file it is\nreading is too long/big to handle.\n\nHere is the code in the terminal and I also attach a screenshot of what I\nam seeing:\n\nTerminal:\n\n(simbaenvnotensor) C:\\Users\\pazam>simba\nException in Tkinter callback\nTraceback (most recent call last):\n File\n\"C:\\Users\\pazam\\anaconda3\\envs\\simbaenvnotensor\\lib\\tkinter\\__init__.py\",\nline 1705, in __call__\n return self.func(*args)\n File\n\"C:\\Users\\pazam\\anaconda3\\envs\\simbaenvnotensor\\lib\\site-packages\\simba\\SimBA.py\",\nline 3194, in importh5\n\nimportMultiDLCpose(self.configinifile,self.h5path.folder_path,self.dropdowndlc.getChoices(),idlist,\nself.interpolation.getChoices())\n File\n\"C:\\Users\\pazam\\anaconda3\\envs\\simbaenvnotensor\\lib\\site-packages\\simba\\read_DLCmulti_h5_function.py\",\nline 129, in importMultiDLCpose\n cv2.circle(overlay, (int(x_cord), int(y_cord)), circleScale, currColor,\n-1, lineType=cv2.LINE_AA)\nOverflowError: Python int too large to convert to C long\n\n\nSimBA Window Text:\n\n\nWelcome fellow scientists :)\n\n\n\nProject \"Aggression_Test3\" created in folder \"Roberto\"\nCopying video...\n210406_Surface_RC_Males_Match_1_Light.avi copied to\nAggression_Test3\\project_folder\\videos\\210406_Surface_RC_Males_Match_1_Light.avi\nFinished copying video.\nApplying settings for multi-animal tracking...\nImporting 1 multi-animal DLC h5 files to the current project\n\n\nI am not sure why there is no video.\n\n\n\nEl dom, 15 ago 2021 a las 18:26, sgoldenlab ***@***.***>)\nescribió:\n\n> Your python environment for some reason is missing tables.\n>\n> Could you, close SimBA, type pip install pytables, and boot up SimBA\n> again and see if it works?\n>\n> —\n> You are receiving this because you were mentioned.\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> Triage notifications on the go with GitHub Mobile for iOS\n> \n> or Android\n> \n> .\n>\n", + "created_at": "2021-08-18T18:47:28Z", + "author": "RobertoEnrique" + }, + { + "body": "`line 129, in importMultiDLCpose\r\n cv2.circle(overlay, (int(x_cord), int(y_cord)), circleScale, currColor,\r\n-1, lineType=cv2.LINE_AA)\r\nOverflowError: Python int too large to convert to C long`\r\n\r\nThat's the key line - this is an odd error haven't seen this before. SimBA is trying to convert the x,y coordinate for the body-parts to integers (they are stored as floats in DLC) so we can plot their locations in the image. However the number is too big to be stored as an integer - which really shouldn't happen. Would you mind sharing your dlc h5 file with goldenneurolab@gmail.com and I will take a look at it if there is something odd? ", + "created_at": "2021-08-18T21:58:44Z", + "author": "sronilsson" + }, + { + "body": "Sure! I would really appreciate the help! Been trying to make this\nDLC/SimBA work in fish and has been pretty uphill. Also... I do have a csv\nfile that DLC now creates after the filtering step in multianimal tracking\n(the new release). In there we have the x-y coordinates for all body\nparts/individuals. Maybe there is a way to upload that instead?\n\nHere is the h5:\n\n 210406_Surface_RC_Males_Match_1_LightDLC_resnet...\n\n\n\nEl mié, 18 ago 2021 a las 17:58, Simon Nilsson ***@***.***>)\nescribió:\n\n> line 129, in importMultiDLCpose cv2.circle(overlay, (int(x_cord),\n> int(y_cord)), circleScale, currColor, -1, lineType=cv2.LINE_AA)\n> OverflowError: Python int too large to convert to C long\n>\n> That's the key line - this is an odd error haven't seen this before. SimBA\n> is trying to convert the x,y coordinate for the body-parts to integers\n> (they are stored as floats in DLC) so we can plot their locations in the\n> image. However the number is too big to be stored as an integer - which\n> really shouldn't happen. Would you mind sharing your dlc h5 file with\n> ***@***.*** and I will take a look at it if there is\n> something odd?\n>\n> —\n> You are receiving this because you were mentioned.\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> Triage notifications on the go with GitHub Mobile for iOS\n> \n> or Android\n> \n> .\n>\n", + "created_at": "2021-08-19T13:27:37Z", + "author": "RobertoEnrique" + }, + { + "body": "Thanks, just saw this, I will try to take a look today. ", + "created_at": "2021-08-23T14:05:39Z", + "author": "sronilsson" + }, + { + "body": "Hi @RobertoEnrique - it is the tracking data that you get from DLC that is strange, specifically for animal number 2 - \"ind2\", looking at this screenshot for example, on the left you se the final body-part for \"ind1\", the Dorsal_Fin_Tip - all looks well there with x,y pixel-values and likelihoods which are to be expected. \r\n\r\nThen you get to animal 2 - \"ind2\" and this animals Mouth is located at pixel locations that runs into several billions. This number is too large of an integer so SimBA breaks. I am not sure how this could happen in your pose-estimation end. Judging by the pose-estimation likelihood values for \"ind2\", I'd say that this animal is also not in the video for the first 7000 frames or so. There are even some negative pixel values in there for \"ind2\", and this is not possible. \r\n\r\nI would try and clip the video, to remove these first frames when ind2 is not in the video, then analyse it again with pose-estimation followed by SimBA. \r\n\r\nAlternatively, use the DLC filtering functions to remove body-part estimations with low probabilities, then import the data into SimBA using the interpolation functions to fill in the missing values: https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#step-3-import-dlc-tracking-data\r\n\r\n![Untitled](https://user-images.githubusercontent.com/34761092/130683287-2dcd0afc-0bb9-493b-8db4-d2e8c779f05a.png)\r\n \r\n\r\n\r\n", + "created_at": "2021-08-24T20:23:46Z", + "author": "sronilsson" + }, + { + "body": "Thank you for looking into this! I will follow your suggestions and see if\nI get it to work.\n\nThanks!\n-Roberto\n\nOn Tue, Aug 24, 2021 at 4:23 PM Simon Nilsson ***@***.***>\nwrote:\n\n> Hi @RobertoEnrique - it is the\n> tracking data that you get from DLC that is strange, specifically for\n> animal number 2 - \"ind2\", looking at this screenshot for example, on the\n> left you se the final body-part for \"ind1\", the Dorsal_Fin_Tip - all looks\n> well there with x,y pixel-values and likelihoods which are to be expected.\n>\n> Then you get to animal 2 - \"ind2\" and this animals Mouth is located at\n> pixel locations that runs into several billions. This number is too large\n> of an integer so SimBA breaks. I am not sure how this could happen in your\n> pose-estimation end. Judging by the pose-estimation likelihood values for\n> \"ind2\", I'd say that this animal is also not in the video for the first\n> 7000 frames or so. There are even some negative pixel values in there for\n> \"ind2\", and this is not possible.\n>\n> I would try and clip the video, to remove these first frames when ind2 is\n> not in the video, then analyse it again with pose-estimation followed by\n> SimBA.\n>\n> Alternatively, use the DLC filtering functions to remove body-part\n> estimations with low probabilities, then import the data into SimBA using\n> the interpolation functions to fill in the missing values:\n> https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#step-3-import-dlc-tracking-data\n>\n> [image: Untitled]\n> \n>\n> —\n> You are receiving this because you were mentioned.\n> Reply to this email directly, view it on GitHub\n> ,\n> or unsubscribe\n> \n> .\n> Triage notifications on the go with GitHub Mobile for iOS\n> \n> or Android\n> \n> .\n>\n", + "created_at": "2021-08-24T22:52:07Z", + "author": "RobertoEnrique" + } + ] + }, + { + "title": "Error while Trying to import frames..", + "body": "Hey guys, first of all thank you for creating simBA which really offers much more possibilities to my project!\r\n\r\nTo be honest ive got the problem, that things sometimes are not working as written in the offical guide..\r\n\r\nMy current situation is, that in the begin of creating a new simba project i recieve an error, when i try to import frames, which ive extractet already before with DLC.\r\n\r\nIve created a new folder on my desktop and stored within 5 videofiles (mp4) + the related csv tracking files + the 5 subfolders that are containing frames, ive already extractet by the DLC process before.\r\n\r\nSo when selecting one of those subfolders with frames and then trying to import i recieve that specific error:\r\n![image](https://user-images.githubusercontent.com/81317771/127379112-2b07fda5-8d5e-46af-8120-29c661b97afe.png)\r\n\r\nis there anything particular i should give attention while storage of the frame files?\r\n\r\nBest\r\nM.", + "user": "Marcelobot", + "reaction_cnt": 0, + "created_at": "2021-07-28T18:54:47Z", + "updated_at": "2021-08-26T20:20:12Z", + "author": "Marcelobot", + "comments": [ + { + "body": "Hello @Marcelobot!\r\n\r\nThere are a few things that could cause this. \r\n\r\nFirst, it looks like you are trying to import a file called `CollectedData_Marcel Moor.csv` ? The file name hints that this is you DeepLabCut annotations, not a DeepLabCut-generated analysis file, and this file you are trying to import is not what you want to import into SimBA. See the second paragraph of [THIS FAQ](https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#13-when-i-try-to-correct-outliers-i-get-an-error-about-the-wrong-number-of-columns-for-example-value-error-length-mismatch-expected-axis-has-16-elements-new-values-have-24) for some more info.\r\n\r\nFor some reason, you get `NotADirectoryError`. SimBA wants you to select a directory, not a file. I am not how it is possible to select a file in this part of the program, which version of SimBA are you running? \r\n\r\nIn your file path, there are some funky characters (`$`) and full stops (`Marcel.Moor`), could you see if it works if you move the files from the server to the local computer and see if it works then? \r\n\r\n\r\n\r\n", + "created_at": "2021-07-29T01:42:10Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Instalation error because of conflicting dependencies", + "body": "**Describe the bug**\r\nHi, I tried to install Simba using the command pip install Simba-uw-tf but I got a dependency conflict error between deeplabcut 2.0.9 has requirement scikit-learn ~ = 0.19.2, but you have scikit-learn 0.19.2 and simba-uw-tf 1.0.3 depends on sikit-learn==0.22.2.\r\nIn issue # 63 I read that these warnings would not affect Simba's performance, but when I wanted to check its installation it gave me an error of not being able to find the specified module, which is why I have not been able to install Simba successfully.\r\n\r\nCould you please help me\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. pip install simba-uw-tf\r\n2. See error\r\n\r\n**Expected behavior**\r\n\r\n**Screenshots**\r\nI add two screenshots, the first shows the error when wanting to install Simba and the second screenshot corresponds to the verification of the installation\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Python Version 3.6.0\r\n - Are you using anaconda: Yes\r\n![thumbnail_image](https://user-images.githubusercontent.com/56560672/126027335-24e634a7-f300-48b2-8ecf-9c9e09f25d10.png)\r\n![thumbnail_image (1)](https://user-images.githubusercontent.com/56560672/126027340-2cd60518-3320-470d-890c-ae000de8fd74.png)\r\n\r\n \r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n", + "user": "aXcELLcore", + "reaction_cnt": 0, + "created_at": "2021-07-17T05:52:58Z", + "updated_at": "2021-07-21T15:57:50Z", + "author": "aXcELLcore", + "comments": [ + { + "body": "Hello @aXcELLcore ! Check this faq entry: https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#11-when-i-install-or-update-simba-i-see-a-bunch-or-messages-in-the-console-telling-there-me-about-some-dependency-conflicts-the-messages-may-look-a-little-like-this\r\n\r\nThe missing package in your python environment is `Shapely`. I would suggest to first try the second suggestion in the faq link: downgrade pip, then try to install SimBA again. Let me know how it goes.", + "created_at": "2021-07-17T09:22:55Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you very much for answering me, for me your help is very valuable and especially being able to use Simba. I have tried the suggestion to downgrade pip to 20.1.1 but have not been successful, I also tried to install simba via pip install simba-uw-tf --no-dependencies but I get the same error (OSError (WinError 126))\r\nCould you guide me on how I can solve this error please\r\n![111](https://user-images.githubusercontent.com/56560672/126054276-620ecc58-5c6d-42cc-a93a-57f6db68bfaa.png)\r\n![thumbnail_image (2)](https://user-images.githubusercontent.com/56560672/126054290-e3ed6676-1172-4712-b26f-aea0035fe568.png)\r\n", + "created_at": "2021-07-18T03:03:15Z", + "author": "aXcELLcore" + }, + { + "body": "Hello @aXcELLcore - those screenshot are helpful, cheers. So this red `ERROR` messages you see related to dependency incompatibilities are just warning without consequence here and the problem launching SimBA comes from not having having installed the `Shapely` library properly. \r\n\r\nThere are a few ways to fix this, I have listed the threads which helped other to solve it here:\r\n\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#5-simba-wont-launch---theres-an-error-with-some-complaint-about-shapely\r\n\r\nYou are using conda, so also see Steps 7-8 in this tutorial for how to get Shapely working:\r\n\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/anaconda_installation.md", + "created_at": "2021-07-19T10:01:13Z", + "author": "sronilsson" + }, + { + "body": "Thank you very much for your answer sronilsson has worked perfectly and now it's time to have fun doing my first project in Simba", + "created_at": "2021-07-20T17:29:13Z", + "author": "aXcELLcore" + }, + { + "body": "Thanks! Let us know if you jave any other issues popping up", + "created_at": "2021-07-21T15:57:49Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Error running SimBA for the first time: ModuleNotFoundError: No module named 'deeplabcut'", + "body": "I have just installed the latest version of SimBA following the instructions for installing with anaconda here: https://github.com/sgoldenlab/simba/blob/master/docs/installation.md\r\n\r\nWhen I did this I had a dependency error, which I resolved using the no-dependencies download instructions on question 11 of the FAQ: https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md\r\n\r\nNow that I am trying to run SimBA for the first time I am getting the following error:\r\n```\r\n(simba) C:\\WINDOWS\\system32>simba\r\nTraceback (most recent call last):\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\ckelly\\Anaconda3\\envs\\simba\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"c:\\users\\ckelly\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 8, in \r\n import deeplabcut\r\nModuleNotFoundError: No module named 'deeplabcut'\r\n```\r\n\r\nHow do I fix this?", + "user": "CorinneAKelly", + "reaction_cnt": 0, + "created_at": "2021-07-12T19:28:55Z", + "updated_at": "2021-07-12T19:53:27Z", + "author": "CorinneAKelly", + "comments": [ + { + "body": "Please follow the instruction in the given link to install simba.\r\n\r\nIt seems like you install the version with tensorflow. I recommend you to install the development version of simba (simba-uw-tf-dev) and use deeplabcut separately.\r\n\r\nhttps://simba-docs.readthedocs.io/en/latest/docs/intro/installation.html#install-simba-dev-using-anaconda-does-not-support-dlc", + "created_at": "2021-07-12T19:47:46Z", + "author": "inoejj" + }, + { + "body": "I see, thanks! I am wanting to use deeplabcut separately anyway. Is the development version just as stable? ", + "created_at": "2021-07-12T19:50:03Z", + "author": "CorinneAKelly" + }, + { + "body": "Yes it is stable and up to date.", + "created_at": "2021-07-12T19:51:57Z", + "author": "inoejj" + }, + { + "body": "Excellent. Thank you so much! I did this and it worked perfectly.", + "created_at": "2021-07-12T19:53:27Z", + "author": "CorinneAKelly" + } + ] + }, + { + "title": "Trailing slash removed from path", + "body": "**Describe the bug**\r\nUsing the multicrop tool, the trailing slash in the *Input Folder* path is removed, leading to wrong path for `glob()` to find the input videos.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to 'Tools'\r\n2. Click on 'Multi-crop'\r\n3. Click on \"Browse Folder\" and enter absolute path to folder containing the videos `/path/to/videos/folder/`: the *Video Folder* text area then shows `path/to/videos/folder`. \r\n4. Enter `mp4` as *Video type*\r\n5. This results in the search path being: `/path/to/videos/folder./*.mp4`: this results in no video file being found despite the given folder including a `.mp4` video file.\r\n\r\n**Expected behavior**\r\nThe video(s) should be correctly found.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS] openSUSE Leap 15.2\r\n - Python Version [e.g. 3.6.0]: 3.6.10\r\n - Are you using anaconda? yes (miniconda3)\r\n \r\n\r\n**Additional context**\r\n`SimBA` was installed as follows.\r\n\r\n```shell\r\nconda create -n simbaenv -c conda-forge python=3.6.10 wxpython=4.0.4\r\nconda activate simbaenv\r\npip install simba-uw-no-tf\r\npip uninstall shapely\r\nconda install -c conda-forge shapely\r\nconda install -c conda-forge ffmpeg\r\n```\r\n\r\n**Workaround**\r\nManually adding the removed trailing slash in `multicrop.py` works:\r\nCurrent:\r\nhttps://github.com/sgoldenlab/simba/blob/b69668bbe0e11a95f6d69b0efb587290f0753bd3/simba/multiplecrop.py#L10\r\n\r\nReplaced by:\r\n```python\r\n videoFtype = \"/.\" + '/*.' + vtype\r\n```\r\n\r\nI'm not familiar with Tkinter and how paths are processed from the user input (or even if it's Tkinter responsible here), so I wouldn't be surprised if it's related to the fact that I'm running under Linux (which I believe is not supported for SimBA). I'm not sure it qualifies for a fix as a result but it would certainly be appreciated if a simple cross-platform solution exists.\r\n\r\nIn advance, thanks a lot for your time!\r\n", + "user": "florianduclot", + "reaction_cnt": 0, + "created_at": "2021-07-11T20:58:54Z", + "updated_at": "2021-07-13T01:00:41Z", + "author": "florianduclot", + "comments": [ + { + "body": "Thank you so much for the detail report about this bug. \r\n\r\nI have fixed the code and wonder if you can give it a try and let me know if this works on your end. If not, I will try it again and make sure it works for Linux users too!\r\n\r\nPlease install the development version of SimBA as it is the latest SimBA version that we worked on everyday.\r\n\r\n**Installation**\r\n```\r\nconda create -n simbadev -c conda-forge python=3.6.10 wxpython=4.0.4\r\nconda activate simbadev \r\npip install simba-uw-tf-dev==0.84.8\r\npip uninstall shapely\r\nconda install -c conda-forge shapely\r\nconda install -c conda-forge ffmpeg\r\n```\r\n\r\n", + "created_at": "2021-07-12T19:18:38Z", + "author": "sgoldenlab" + }, + { + "body": "> I have fixed the code and wonder if you can give it a try and let me know if this works on your end.\r\n\r\nI can confirm that it now works using `simba-uw-tf-dev==0.84.8` installed following your instructions.\r\n\r\nThanks a lot for spending the time to fix it!\r\n\r\nI'll close this issue as it is fixed.", + "created_at": "2021-07-13T01:00:41Z", + "author": "florianduclot" + } + ] + }, + { + "title": "features Sum_probabilities, Mean_probabilities, Low_prob_detections_x are all zero (SLEAP import)", + "body": "**Describe the bug**\r\n\r\nThe probabilities are correctly loaded, see `1_head_1_p` column in the `features_extracted/fileneame.csv`:\r\n\r\n| | 1_head_1_x | 1_head_1_y | 1_head_1_p |\r\n| ---- | ---------------- | ---------------- | ----------------- |\r\n| 0 | 584.65966796875 | 148.553298950195 | 0.94010466337204 |\r\n| 1 | 600.471740722656 | 152.566101074219 | 0.900553524494171 |\r\n| 2 | 616.349243164063 | 156.568862915039 | 0.921086132526398 |\r\n| 3 | 624.386596679688 | 160.406555175781 | 0.936960339546204 |\r\n\r\nall features seems calculated except for the probability based:\r\n\r\nSum_All_bp_movements_2_mean_15 | Sum_probabilities | Mean_probabilities | Low_prob_detections_0.1 | Low_prob_detections_0.5 | Low_prob_detections_0.75\r\n-- | -- | -- | -- | -- | --\r\n36.0192956359203 | 0 | 0 | 0 | 0 | 0\r\n0.315870048413295 | 0 | 0 | 0 | 0 | 0\r\n0.278654693316602 | 0 | 0 | 0 | 0 | 0\r\n0.080197638704377 | 0 | 0 | 0 | 0 | 0\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. load [SLP](https://cmp.felk.cvut.cz/~smidm/b05ec545adf99567a39b33948853861d/HH2_crf17_pre.mp4.tracking_2ants.slp) file with 2 ants with 3 bodyparts\r\n2. skip outlier correction\r\n3. compute features\r\n\r\n**Expected behavior**\r\n`Sum_probabilities`, `Mean_probabilities`, `Low_prob_detections_x` features should have correct values\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: linux, Fedora 33\r\n - Python Version 3.6.10\r\n - Are you using anaconda? only environment, SimBA installed using `pip install simba-uw-tf-dev`\r\n\r\n", + "user": "smidm", + "reaction_cnt": 0, + "created_at": "2021-07-11T05:53:41Z", + "updated_at": "2021-07-19T11:07:30Z", + "author": "smidm", + "comments": [ + { + "body": "Thanks for reporting this, very helpful. I found it, a bug in simba.feature_scripts.extract_features_user_defined.py, I think `axis=1` has to be added at line 158-159. I will let you know when fixed.\r\n\r\n\"Untitled_2\"\r\n", + "created_at": "2021-07-13T16:51:03Z", + "author": "sronilsson" + }, + { + "body": "@smidm I have fixed it, please let me know if this fixes your problem.\r\n\r\nPlease install the latest version of development wheel.\r\n\r\n```\r\npip install simba-uw-tf-dev==0.84.9 --upgrade\r\n```", + "created_at": "2021-07-13T19:28:49Z", + "author": "inoejj" + }, + { + "body": "Thanks! I have tested `simba-uw-tf-dev-0.85.0` and the features are correct.", + "created_at": "2021-07-19T11:07:30Z", + "author": "smidm" + } + ] + }, + { + "title": "Editing GUI Not Reflected When Running App", + "body": "**Describe the bug**\r\nI'm trying to edit some aspects of the GUI by editing the \"SimBA.py\" file. However, when I try to make a change, it does not reflect when I run the app.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open the \"SimBA.py\" file using Visual Studio code.\r\n2. Edit some lines, for example, \"Welcome fellow scientists :)\" to \"Welcome fellow scientists!\"\r\n\r\n**Expected behavior**\r\nI expected the text to be changed when I ran the GUI. \r\n\r\n**Screenshots**\r\nFor example, trying to change a simple thing:\r\n![image](https://user-images.githubusercontent.com/76238646/121599549-6f3cd300-ca00-11eb-9cce-945cfa5dfa55.png)\r\n\r\nAnd it does not reflect in the GUI:\r\n![image](https://user-images.githubusercontent.com/76238646/121599595-811e7600-ca00-11eb-993c-b87083f11cf6.png)\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows 10\r\n - Python 3.6.0 64-bit\r\n - Using anaconda\r\n \r\n**Additional context**\r\nAdd any other context about the problem here.\r\n", + "user": "joseph-yclam", + "reaction_cnt": 1, + "created_at": "2021-06-10T21:29:38Z", + "updated_at": "2021-06-24T18:18:55Z", + "author": "joseph-yclam", + "comments": [ + { + "body": "Maybe you have to change it in on the right script that you are running or make sure that you are saving your script :)", + "created_at": "2021-06-10T21:41:21Z", + "author": "inoejj" + }, + { + "body": "@inoejj Hi! I'm so sorry, but I think this should be the correct script? And I am positive that I have saved it. I run the project using the \"App.py\" file!", + "created_at": "2021-06-10T22:42:19Z", + "author": "joseph-yclam" + }, + { + "body": "Hrm.. it seems like you are using anaconda, did you change the SimBA.py that is located in the envs/yourenvironmentname/Lib/site-packages/simba/SimBA.py?\r\n\r\n", + "created_at": "2021-06-14T20:35:07Z", + "author": "inoejj" + } + ] + }, + { + "title": "pose configuration without bodypart's name and picture on MacOS", + "body": "Hi, \r\nI'have installed simba on my mac (version 10.13.6), following the instructions and using anaconda (https://github.com/sgoldenlab/simba/blob/master/docs/installation.md).\r\nAt the end of the installation process, in the prompt appear these message:\r\n\r\n![Schermata 2021-06-09 alle 17 01 42](https://user-images.githubusercontent.com/85634193/121384190-540e8c80-c948-11eb-9a9f-d714c2c88489.png)\r\n\r\nBut, anyway, simba runs...\r\n\r\nI open the program and try to create a project, but the pose configuration does not work correctly, it lacks of some fields.\r\nI inserted the pose config name, the number of animals and bodyparts and I browse the image path and then clicking on \"confirm\" button nothing occured: the fields about the bodypart's name didn't appear in the window.\r\n\r\nI have also tried to solve the problem with matplotlib (uninstalling and installing it again), but the pose configuration problem is always there.\r\n\r\nProbably the problem is related to the MacOs system... \r\nIs there a simba version more suitable for MacOs System?\r\n(I tried the installation process with and withou tensorflow: \"simba-uw-tf==1.3.7\" and \"simba-uw-no-tf\"\r\n\r\nThanks,\r\nCarlo.", + "user": "carlitomu", + "reaction_cnt": 0, + "created_at": "2021-06-09T15:46:11Z", + "updated_at": "2021-06-09T20:26:42Z", + "author": "carlitomu", + "comments": [ + { + "body": "Hey @carlitomu - to run SimBA on a Mac, you will have to use the `simba-uw-tf-dev` version of SimBA. You can install it via `pip install `simba-uw-tf-dev` - can you give it a go and let me know if it boots up and you see all the buttons etc? \r\n\r\n", + "created_at": "2021-06-09T16:44:26Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks @sgoldenlab !\r\nI created a new environment in anaconda and I used simba-uw-tf-dev version of SimBA, but the problems remains the sames.\r\nThe prompt continues to give this message:\r\n\r\n\"Schermata\r\n\r\nThen in the pose configuration menu there'ren't the fields about the bodypart's name:\r\n\r\n\"Schermata\r\n\r\nIn the prompt appears this message\r\n\r\n\"Schermata\r\n\r\nAn in SimBa this one:\r\n\r\n\"Schermata\r\n\r\nThe number \"4\" I think it's related to the number of bodypart I inserted in the pose configuration window.\r\nAn it appears three time because I clicked three time on the \"confirm\" button.\r\n\r\nSo, this version of SimBa has the same problems with my mac.\r\n", + "created_at": "2021-06-09T17:08:12Z", + "author": "carlitomu" + }, + { + "body": "Please update to simba-tf-dev==0.84.2 and the issue should be fixed.\r\n\r\nBest,\r\nJJ", + "created_at": "2021-06-09T19:19:06Z", + "author": "inoejj" + }, + { + "body": "thanks @inoejj and @sgoldenlab.\r\nThe simba-tf-dev==0.84.2 works!\r\nBye,\r\nCarlo.", + "created_at": "2021-06-09T19:50:49Z", + "author": "carlitomu" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 8.2.0", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 8.2.0.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

8.2.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.2.0.html

\n

Changes

\n\n

Dependencies

\n\n

Deprecations

\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

8.2.0 (2021-04-01)

\n
    \n
  • \n

    Added getxmp() method #5144\n[UrielMaD, radarhere]

    \n
  • \n
  • \n

    Add ImageShow support for GraphicsMagick #5349\n[latosha-maltba, radarhere]

    \n
  • \n
  • \n

    Do not load transparent pixels from subsequent GIF frames #5333\n[zewt, radarhere]

    \n
  • \n
  • \n

    Use LZW encoding when saving GIF images #5291\n[raygard]

    \n
  • \n
  • \n

    Set all transparent colors to be equal in quantize() #5282\n[radarhere]

    \n
  • \n
  • \n

    Allow PixelAccess to use Python int when parsing x and y #5206\n[radarhere]

    \n
  • \n
  • \n

    Removed Image._MODEINFO #5316\n[radarhere]

    \n
  • \n
  • \n

    Add preserve_tone option to autocontrast #5350\n[elejke, radarhere]

    \n
  • \n
  • \n

    Fixed linear_gradient and radial_gradient I and F modes #5274\n[radarhere]

    \n
  • \n
  • \n

    Add support for reading TIFFs with PlanarConfiguration=2 #5364\n[kkopachev, wiredfool, nulano]

    \n
  • \n
  • \n

    Deprecated categories #5351\n[radarhere]

    \n
  • \n
  • \n

    Do not premultiply alpha when resizing with Image.NEAREST resampling #5304\n[nulano]

    \n
  • \n
  • \n

    Dynamically link FriBiDi instead of Raqm #5062\n[nulano]

    \n
  • \n
  • \n

    Allow fewer PNG palette entries than the bit depth maximum when saving #5330\n[radarhere]

    \n
  • \n
  • \n

    Use duration from info dictionary when saving WebP #5338\n[radarhere]

    \n
  • \n
  • \n

    Stop flattening EXIF IFD into getexif() #4947\n[radarhere, kkopachev]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • e0e353c 8.2.0 version bump
  • \n
  • ee635be Merge pull request #5377 from hugovk/security-and-release-notes
  • \n
  • 694c84f Fix typo [ci skip]
  • \n
  • 8febdad Review, typos and lint
  • \n
  • fea4196 Reorder, roughly alphabetic
  • \n
  • 496245a Fix BLP DOS -- CVE-2021-28678
  • \n
  • 22e9bee Fix DOS in PSDImagePlugin -- CVE-2021-28675
  • \n
  • ba65f0b Fix Memory DOS in ImageFont
  • \n
  • bb6c11f Fix FLI DOS -- CVE-2021-28676
  • \n
  • 5a5e6db Fix EPS DOS on _open -- CVE-2021-28677
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=8.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-06-08T23:16:57Z", + "updated_at": "2021-09-08T03:05:31Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #131.", + "created_at": "2021-09-08T03:05:30Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 8.2.0 in /simba", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 8.2.0.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

8.2.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.2.0.html

\n

Changes

\n\n

Dependencies

\n\n

Deprecations

\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

8.2.0 (2021-04-01)

\n
    \n
  • \n

    Added getxmp() method #5144\n[UrielMaD, radarhere]

    \n
  • \n
  • \n

    Add ImageShow support for GraphicsMagick #5349\n[latosha-maltba, radarhere]

    \n
  • \n
  • \n

    Do not load transparent pixels from subsequent GIF frames #5333\n[zewt, radarhere]

    \n
  • \n
  • \n

    Use LZW encoding when saving GIF images #5291\n[raygard]

    \n
  • \n
  • \n

    Set all transparent colors to be equal in quantize() #5282\n[radarhere]

    \n
  • \n
  • \n

    Allow PixelAccess to use Python int when parsing x and y #5206\n[radarhere]

    \n
  • \n
  • \n

    Removed Image._MODEINFO #5316\n[radarhere]

    \n
  • \n
  • \n

    Add preserve_tone option to autocontrast #5350\n[elejke, radarhere]

    \n
  • \n
  • \n

    Fixed linear_gradient and radial_gradient I and F modes #5274\n[radarhere]

    \n
  • \n
  • \n

    Add support for reading TIFFs with PlanarConfiguration=2 #5364\n[kkopachev, wiredfool, nulano]

    \n
  • \n
  • \n

    Deprecated categories #5351\n[radarhere]

    \n
  • \n
  • \n

    Do not premultiply alpha when resizing with Image.NEAREST resampling #5304\n[nulano]

    \n
  • \n
  • \n

    Dynamically link FriBiDi instead of Raqm #5062\n[nulano]

    \n
  • \n
  • \n

    Allow fewer PNG palette entries than the bit depth maximum when saving #5330\n[radarhere]

    \n
  • \n
  • \n

    Use duration from info dictionary when saving WebP #5338\n[radarhere]

    \n
  • \n
  • \n

    Stop flattening EXIF IFD into getexif() #4947\n[radarhere, kkopachev]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • e0e353c 8.2.0 version bump
  • \n
  • ee635be Merge pull request #5377 from hugovk/security-and-release-notes
  • \n
  • 694c84f Fix typo [ci skip]
  • \n
  • 8febdad Review, typos and lint
  • \n
  • fea4196 Reorder, roughly alphabetic
  • \n
  • 496245a Fix BLP DOS -- CVE-2021-28678
  • \n
  • 22e9bee Fix DOS in PSDImagePlugin -- CVE-2021-28675
  • \n
  • ba65f0b Fix Memory DOS in ImageFont
  • \n
  • bb6c11f Fix FLI DOS -- CVE-2021-28676
  • \n
  • 5a5e6db Fix EPS DOS on _open -- CVE-2021-28677
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=8.2.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-06-08T20:48:25Z", + "updated_at": "2021-09-08T01:35:49Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #130.", + "created_at": "2021-09-08T01:35:48Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Could it be possible to run SimBA on google Colab?", + "body": "Hi,\r\n\r\nSince I do not have a GPU on my laptop, could it be possible to run Simba on google Colab? Or could I run it on a laptop without GPU?\r\n\r\nI am using macOS Catalina 10.15.7.\r\n\r\nI have tried to install SimBA on both macOS and windows, however, there was error messages regarding the GPU.\r\n\r\nIn macOS, when I run \r\npip install simba-uw-tf\r\n\r\nI got:\r\n Could not find a version that satisfies the requirement tensorflow-gpu==1.14.0 (from simba-uw-tf) (from versions: )\r\nNo matching distribution found for tensorflow-gpu==1.14.0 (from simba-uw-tf)\r\n\r\nHowever, on Windows, I could successfully install Simba in conda env, but could not run it. The error message is:\r\n\r\nImportError: Could not find 'nvcuda.dll'. TensorFlow requires that this DLL be installed in a directory that is named in your %PATH% environment variable. Typically it is installed in 'C:\\Windows\\System32'. If it is not present, ensure that you have a CUDA-capable GPU with the correct driver installed.", + "user": "Virginia9733", + "reaction_cnt": 0, + "created_at": "2021-06-05T11:31:55Z", + "updated_at": "2021-06-24T18:19:14Z", + "author": "Virginia9733", + "comments": [ + { + "body": "Hi @Virginia9733 - if you are running SimBA on a Mac, you will have to use the `simba-uw-tf-dev` version. You can install it via `pip install simba-uw-tf-dev`. \r\n\r\nDespite the `tf` in the package name, `simba-uw-tf-dev` does **not** use TensorFlow, and does not support the [in-built DLC interface](https://github.com/sgoldenlab/simba/blob/master/docs/Multi_animal_pose.md) or the [in-built DeepPoseKit interface](https://github.com/sgoldenlab/simba/blob/master/docs/DeepPoseKit_in_SimBA.md). \r\n\r\nTherefor, you will have to generate your pose-estimation inside the pose-estimation packages and their interfaces (for example DLC on colab) - and then import the analyzed CSV or H5 files into the SimBA project created by the `simba-uw-tf-dev` package. \r\n\r\nLet me know if that makes sense!", + "created_at": "2021-06-09T16:39:55Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you for your reply. \r\nI tried to install simba-uw-tf-dev in macOS (macOS Catalina 10.15.7, withouth GPU), but still got the error message:\r\n(base) VirginiaLideMacBook-Air:~ virginiali$ conda activate simbaenv\r\n(simbaenv) VirginiaLideMacBook-Air:~ virginiali$ pip install simba-uw-tf-dev\r\nCollecting simba-uw-tf-dev\r\n Using cached https://files.pythonhosted.org/packages/dc/f7/9106e8a4ed067af1cf9e62016aabf236507c17af84ab7010f8072da0c9bd/Simba_UW_tf_dev-0.84.2-py3-none-any.whl\r\nCollecting yellowbrick==0.9.1 (from simba-uw-tf-dev)\r\n Using cached https://files.pythonhosted.org/packages/d8/e8/125204ea84a7424a3237556e8dfaec9fee21f2e3d5b3695eb9ce355bf668/yellowbrick-0.9.1-py2.py3-none-any.whl\r\nCollecting opencv-python==3.4.5.20 (from simba-uw-tf-dev)\r\n Could not find a version that satisfies the requirement opencv-python==3.4.5.20 (from simba-uw-tf-dev) (from versions: 3.4.10.37, 3.4.11.39, 3.4.11.41, 3.4.11.43, 3.4.11.45, 3.4.13.47, 3.4.14.51, 3.4.14.53, 4.3.0.38, 4.4.0.40, 4.4.0.42, 4.4.0.44, 4.4.0.46, 4.5.1.48, 4.5.2.52, 4.5.2.54)\r\nNo matching distribution found for opencv-python==3.4.5.20 (from simba-uw-tf-dev)\r\nYou are using pip version 19.0.1, however version 21.1.2 is available.\r\nYou should consider upgrading via the 'pip install --upgrade pip' command.\r\n\r\n\r\nThen I tried to install simba-uw-tf-dev in windows (Windows 10 64-bit, without GPU.), I suppose I have successfully installed simba, but could not run it.\r\n\r\n(simbaenv) C:\\WINDOWS\\system32>simba\r\nTraceback (most recent call last):\r\n File \"w:\\anaconda\\anaconda3\\envs\\simbaenv\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"w:\\anaconda\\anaconda3\\envs\\simbaenv\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"W:\\Anaconda\\Anaconda3\\envs\\simbaenv\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"w:\\anaconda\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 45, in \r\n from simba.train_model_2 import *\r\n File \"w:\\anaconda\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\train_model_2.py\", line 12, in \r\n from xgboost.sklearn import XGBClassifier\r\n File \"C:\\Users\\yongz\\AppData\\Roaming\\Python\\Python36\\site-packages\\xgboost\\__init__.py\", line 11, in \r\n from .core import DMatrix, Booster\r\n File \"C:\\Users\\yongz\\AppData\\Roaming\\Python\\Python36\\site-packages\\xgboost\\core.py\", line 161, in \r\n _LIB = _load_lib()\r\n File \"C:\\Users\\yongz\\AppData\\Roaming\\Python\\Python36\\site-packages\\xgboost\\core.py\", line 123, in _load_lib\r\n lib_paths = find_lib_path()\r\n File \"C:\\Users\\yongz\\AppData\\Roaming\\Python\\Python36\\site-packages\\xgboost\\libpath.py\", line 48, in find_lib_path\r\n 'List of candidates:\\n' + ('\\n'.join(dll_path)))\r\nxgboost.libpath.XGBoostLibraryNotFound: Cannot find XGBoost Library in the candidate path, did you install compilers and run build.sh in root path?\r\nList of candidates:\r\nC:\\Users\\yongz\\AppData\\Roaming\\Python\\Python36\\site-packages\\xgboost\\xgboost.dll\r\nC:\\Users\\yongz\\AppData\\Roaming\\Python\\Python36\\site-packages\\xgboost\\../../lib/xgboost.dll\r\nC:\\Users\\yongz\\AppData\\Roaming\\Python\\Python36\\site-packages\\xgboost\\./lib/xgboost.dll\r\nw:\\anaconda\\anaconda3\\envs\\simbaenv\\xgboost\\xgboost.dll\r\nC:\\Users\\yongz\\AppData\\Roaming\\Python\\Python36\\site-packages\\xgboost\\../../windows/x64/Release/xgboost.dll\r\nC:\\Users\\yongz\\AppData\\Roaming\\Python\\Python36\\site-packages\\xgboost\\./windows/x64/Release/xgboost.dll", + "created_at": "2021-06-13T07:49:47Z", + "author": "Virginia9733" + }, + { + "body": "I have solved the error in windows, by copy the xgboost.dll into Python36\\site-packages\\xgboost\\, then it works like a charm, I could open SimBA GUI now, extremely happy.\r\n\r\nHowever, still no clues to deal with the macOS error.\r\nBesides, since it takes ages to run without GPU, is it possible to run SimBA on colab witout GUI?\r\n\r\nAny helo would be much appreciated!", + "created_at": "2021-06-13T08:21:42Z", + "author": "Virginia9733" + }, + { + "body": "Please try to download the latest simba (development version) and I believe the macOS issue should be fixed.\r\n\r\n`pip install simba-uw-tf-dev`\r\n\r\nI believe you can run Deeplabcut using colab. However, SimBA's model does not require tensorflow and will not require a GPU to train the model.", + "created_at": "2021-06-14T20:42:16Z", + "author": "inoejj" + } + ] + }, + { + "title": "error when I started to train the model", + "body": "**Describe the bug**\r\nHello guys, thanks for your creating the software,it 's wonderful.However,I met problems when I used DLC to train the model,it poped up this error.\r\n```\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AssertionError: Bad argument number for Name: 3, expecting 4\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AttributeError: module 'gast' has no attribute 'Index'\r\nWARNING:tensorflow:Entity > could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting >: AttributeError: module 'gast' has no attribute 'Index'\r\nWARNING:tensorflow:From c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\tensorflow\\python\\ops\\nn_impl.py:180: add_dispatch_support..wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\r\nInstructions for updating:\r\nUse tf.where in 2.0, which has the same broadcast rule as np.where\r\nWARNING:tensorflow:From c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\deeplabcut\\pose_estimation_tensorflow\\nnet\\losses.py:38: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\r\nInstructions for updating:\r\nUse `tf.cast` instead.\r\nWARNING:tensorflow:From c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\tensorflow\\python\\training\\saver.py:1276: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\r\nInstructions for updating:\r\nUse standard file APIs to check for files with this prefix.\r\n2021-06-03 09:42:44.629801: E tensorflow/stream_executor/cuda/cuda_driver.cc:1003] failed to synchronize the stop event: CUDA_ERROR_LAUNCH_FAILED: unspecified launch failure\r\n2021-06-03 09:42:44.630385: E tensorflow/stream_executor/gpu/gpu_timer.cc:55] Internal: error destroying CUDA event in context 0x21604ca9bc0: CUDA_ERROR_LAUNCH_FAILED: unspecified launch failure\r\n2021-06-03 09:42:44.631730: E tensorflow/stream_executor/gpu/gpu_timer.cc:60] Internal: error destroying CUDA event in context 0x21604ca9bc0: CUDA_ERROR_LAUNCH_FAILED: unspecified launch failure\r\n2021-06-03 09:42:44.632350: F tensorflow/stream_executor/cuda/cuda_dnn.cc:189] Check failed: status == CUDNN_STATUS_SUCCESS (7 vs. 0)Failed to set cuDNN stream.\r\n```\r\nMaybe there are something wrong with my CUDA,But I don't know how to solve it.\r\nI'm sure I have installed CUDA as well as CUDnn.PATH has done before too.In cmd,it has showed its version in`nvcc --Version`\r\nMy GPU is NVDIA GTX 850M and works well.\r\nCould you tell me how to solve it? Thanks!\r\n", + "user": "Soengzaak", + "reaction_cnt": 0, + "created_at": "2021-06-03T02:29:31Z", + "updated_at": "2021-06-04T02:13:33Z", + "author": "Soengzaak", + "comments": [ + { + "body": "Hello @Soengzaak - make sure you have the right cudnn/cuda/tensorflow version combination for your operating system. Which versions of those packages do you currently have installed? This table is very helpful to know which versions work together https://stackoverflow.com/questions/50622525/which-tensorflow-and-cuda-version-combinations-are-compatible", + "created_at": "2021-06-03T03:33:43Z", + "author": "sgoldenlab" + }, + { + "body": "> Hello @Soengzaak - make sure you have the right cudnn/cuda/tensorflow version combination for your operating system. Which versions of those packages do you currently have installed? This table is very helpful to know which versions work together https://stackoverflow.com/questions/50622525/which-tensorflow-and-cuda-version-combinations-are-compatible\r\n\r\nHi,My CUDA is 10.0 and CUDnn is 7.4.My GPU is NVDIA GTX 850M and the OS is WIn10.TensorFlow is 1.14.\r\n\r\nSo,what could I do next to solve this? Thank you.", + "created_at": "2021-06-03T07:41:35Z", + "author": "Soengzaak" + }, + { + "body": "@Soengzaak - just to check, have you been able to load data onto the GPU outside of SimBA, for example in the DLC GUI or other pose package? We recommend to use their interface if possible. It seems to be a cudnn error, I have never worked with the NVDIA GTX 850M so I'm sorry I can't be much help ", + "created_at": "2021-06-04T02:13:33Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Erorr on \"Extract the flames\"", + "body": "**Describe the bug**\r\nHi guys,I started the software normally and used DLC to load project.But when I tried to extracted the flames in it, it poped up an error in SimBA GUI:\r\n```\r\nC:/Users/tearc/Desktop/Project/f-ff-2021-06-01/config.yaml automatic uniform 1 50 False False\r\nConfig file read successfully.\r\nImageio: 'ffmpeg-win32-v3.2.4.exe' was not found on your computer; downloading it now.\r\nError while fetching file: .\r\nError while fetching file: .\r\nError while fetching file: .\r\nError while fetching file: .\r\nFail to extract frames, please make sure all the information is filled in\r\n```\r\n\r\n**I'm sure I have installed the FFmpeg according to the tutorials step by step.In powershell I entered ` ffmpeg --version` it could show its version.**\r\nSo,are there any thing I could do to solve it?\r\nThanks you!\r\n\r\n\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [win10]\r\n - Python Version [3.6]\r\n - Are you using anaconda? yes\r\n \r\n\r\n\r\n", + "user": "Soengzaak", + "reaction_cnt": 0, + "created_at": "2021-06-01T14:51:56Z", + "updated_at": "2021-06-03T01:44:34Z", + "author": "Soengzaak", + "comments": [ + { + "body": "When you import the videos to the project did you check the copy videos box or did you left it uncheck? Do you want to try to make a new project and check the box to make sure it copy the videos.\r\n\r\nPlease make sure to add ffmpeg to the windows environment path as well.\r\n\r\nIf you need me to zoom and walk you through this please let me know.\r\n\r\nBest,\r\nJJ", + "created_at": "2021-06-01T22:09:17Z", + "author": "inoejj" + }, + { + "body": "> When you import the videos to the project did you check the copy videos box or did you left it uncheck? Do you want to try to make a new project and check the box to make sure it copy the videos.\r\n> \r\n> Please make sure to add ffmpeg to the windows environment path as well.\r\n> \r\n> If you need me to zoom and walk you through this please let me know.\r\n> \r\n> Best,\r\n> JJ\r\n\r\nThanks for your attention.It is indeed PATH problems,I have solved it,Thank you!", + "created_at": "2021-06-03T01:44:34Z", + "author": "Soengzaak" + } + ] + }, + { + "title": "NameError: name 'deeplabcut' is not defined", + "body": "**Describe the bug**\r\nHi,thanks for your attention.I've just used SimBA,but I met this problem when I started the DLC software and popup this:\r\n\r\n(simba) C:\\Users\\tearc>simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 1400, in createprojectcommand\r\n config_path = deeplabcut.create_new_project(str(projectname), str(experimentalname), videolist,working_directory=str(self.folderpath1selected.folder_path), copy_videos=copyvid)\r\nNameError: name 'deeplabcut' is not defined\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 1400, in createprojectcommand\r\n config_path = deeplabcut.create_new_project(str(projectname), str(experimentalname), videolist,working_directory=str(self.folderpath1selected.folder_path), copy_videos=copyvid)\r\nNameError: name 'deeplabcut' is not defined\r\nException ignored in: \r\nAttributeError: 'StdRedirector' object has no attribute 'flush'\r\n\r\n**After searched in this Issues,I entered 'pip install deeplabcut==2.0.9',and then there was another error and the SimBA even can not started!**\r\n\r\n(simba) C:\\Users\\tearc>simba\r\nTraceback (most recent call last):\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\tearc\\anaconda3\\envs\\simba\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 44, in \r\n from simba.train_multiple_models_from_meta import *\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\train_multiple_models_from_meta.py\", line 10, in \r\n from imblearn.combine import SMOTEENN\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\imblearn\\__init__.py\", line 34, in \r\n from . import combine\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\imblearn\\combine\\__init__.py\", line 5, in \r\n from ._smote_enn import SMOTEENN\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\imblearn\\combine\\_smote_enn.py\", line 10, in \r\n from ..base import BaseSampler\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\imblearn\\base.py\", line 16, in \r\n from .utils import check_sampling_strategy, check_target_type\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\imblearn\\utils\\__init__.py\", line 7, in \r\n from ._validation import check_neighbors_object\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\imblearn\\utils\\_validation.py\", line 13, in \r\n from sklearn.neighbors._base import KNeighborsMixin\r\nModuleNotFoundError: No module named 'sklearn.neighbors._base'\r\n\r\nDo you have any ideas? Please help me ,thanks!\r\n\r\n\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [win10]\r\n - Python Version [3.6.13]\r\n - Are you using anaconda? yes\r\n \r\n\r\n", + "user": "Soengzaak", + "reaction_cnt": 0, + "created_at": "2021-05-28T12:31:58Z", + "updated_at": "2021-06-01T14:45:22Z", + "author": "Soengzaak", + "comments": [ + { + "body": "Hi thank you for opening up an issue. \r\n\r\nPlease follow the steps below and see if that still gives you an error.\r\n\r\nThis will install SimBA with built in deeplabcut.\r\n\r\n1. Downgrade pip to version 19.0.1 by `pip install pip==19.0.1`\r\n2. Install simba tensorflow version, `pip install simba-uw-tf==1.3.12`\r\n3. It will error out because of shapely. Uninstall shapely by `pip uninstall shapely`\r\n4. Then reinstall shapely with conda command: `conda install -c conda-forge shapely`", + "created_at": "2021-05-29T01:01:15Z", + "author": "sgoldenlab" + }, + { + "body": "> Hi thank you for opening up an issue.\r\n> \r\n> Please follow the steps below and see if that still gives you an error.\r\n> \r\n> This will install SimBA with built in deeplabcut.\r\n> \r\n> 1. Downgrade pip to version 19.0.1 by `pip install pip==19.0.1`\r\n> 2. Install simba tensorflow version, `pip install simba-uw-tf==1.3.12`\r\n> 3. It will error out because of shapely. Uninstall shapely by `pip uninstall shapely`\r\n> 4. Then reinstall shapely with conda command: `conda install -c conda-forge shapely`\r\n\r\nThanks for your reply :) I did as you said but the SimBA was still unable to started and then popuped an error like this:\r\n\r\n```(simba) C:\\Users\\tearc>simba\r\nTraceback (most recent call last):\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\tensorflow\\python\\platform\\self_check.py\", line 75, in preload_check\r\n ctypes.WinDLL(build_info.cudart_dll_name)\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\ctypes\\__init__.py\", line 348, in __init__\r\n self._handle = _dlopen(self._name, mode)\r\nOSError: [WinError 126] 找不到指定的模块。\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\tearc\\anaconda3\\envs\\simba\\Scripts\\simba.exe\\__main__.py\", line 5, in \r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 8, in \r\n import deeplabcut\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\deeplabcut\\__init__.py\", line 29, in \r\n from deeplabcut import generate_training_dataset\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\deeplabcut\\generate_training_dataset\\__init__.py\", line 19, in \r\n from deeplabcut.generate_training_dataset.labeling_toolbox import *\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\deeplabcut\\generate_training_dataset\\labeling_toolbox.py\", line 27, in \r\n from deeplabcut.utils import auxiliaryfunctions\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\deeplabcut\\utils\\__init__.py\", line 1, in \r\n from deeplabcut.utils.make_labeled_video import *\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\deeplabcut\\utils\\make_labeled_video.py\", line 38, in \r\n from deeplabcut.pose_estimation_tensorflow.config import load_config\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\deeplabcut\\pose_estimation_tensorflow\\__init__.py\", line 13, in \r\n from deeplabcut.pose_estimation_tensorflow.nnet import *\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\deeplabcut\\pose_estimation_tensorflow\\nnet\\__init__.py\", line 14, in \r\n from deeplabcut.pose_estimation_tensorflow.nnet.losses import *\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\deeplabcut\\pose_estimation_tensorflow\\nnet\\losses.py\", line 5, in \r\n import tensorflow as tf\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\tensorflow\\__init__.py\", line 28, in \r\n from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\tensorflow\\python\\__init__.py\", line 49, in \r\n from tensorflow.python import pywrap_tensorflow\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\tensorflow\\python\\pywrap_tensorflow.py\", line 30, in \r\n self_check.preload_check()\r\n File \"c:\\users\\tearc\\anaconda3\\envs\\simba\\lib\\site-packages\\tensorflow\\python\\platform\\self_check.py\", line 82, in preload_check\r\n % (build_info.cudart_dll_name, build_info.cuda_version_number))\r\nImportError: Could not find 'cudart64_100.dll'. TensorFlow requires that this DLL be installed in a directory that is named in your %PATH% environment variable. Download and install CUDA 10.0 from this URL: https://developer.nvidia.com/cuda-90-download-archive.\r\n```\r\n\r\nI have just guested maybe I need to install CUDA Runtime therefore I downloaded it from the website above and had installed it .However it popuped this error again in the Anacoda ,maybe I need to set the PATH?But how todo it.Please help me ,thank you!\r\n", + "created_at": "2021-05-29T10:20:22Z", + "author": "Soengzaak" + }, + { + "body": "Hello @Soengzaak! This is cudnn related. We are trying to move away from supporting pose-estimation within SimBA. To fix this, either:\r\n\r\n* Use simba-uw-tf-dev instead of simba-uw-tf and do the pose-estimation through the DLC or other pose-estimation interface, not within the SimBA GUI. \r\n* Make sure you have compatible versions of cudnn and CUDA installed - there are some notes to help get the right versions installed [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#4-simba-wont-start-and-there-is-gpu-related-errors-such-as-importerror-could-not-find-cudart64_100dll) and [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/pose_on_rtx30x.md)\r\n", + "created_at": "2021-05-29T14:31:14Z", + "author": "sgoldenlab" + }, + { + "body": "> Hello @Soengzaak! This is cudnn related. We are trying to move away from supporting pose-estimation within SimBA. To fix this, either:\r\n> \r\n> * Use simba-uw-tf-dev instead of simba-uw-tf and do the pose-estimation through the DLC or other pose-estimation interface, not within the SimBA GUI.\r\n> * Make sure you have compatible versions of cudnn and CUDA installed - there are some notes to help get the right versions installed [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/FAQ.md#4-simba-wont-start-and-there-is-gpu-related-errors-such-as-importerror-could-not-find-cudart64_100dll) and [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/pose_on_rtx30x.md)\r\n\r\nMany thanks to you!I reset up the PATH and finally sovled it .Now I can started the software normally,thanks you! XD", + "created_at": "2021-06-01T14:45:19Z", + "author": "Soengzaak" + } + ] + }, + { + "title": "Update README.md", + "body": "Add close bracket and fix formatting", + "user": "JustinShenk", + "reaction_cnt": 0, + "created_at": "2021-05-27T20:28:14Z", + "updated_at": "2021-05-27T22:58:19Z", + "author": "JustinShenk", + "comments": [] + }, + { + "title": "Create CITATION", + "body": "Great project!", + "user": "JustinShenk", + "reaction_cnt": 0, + "created_at": "2021-05-27T20:23:53Z", + "updated_at": "2021-05-27T22:57:17Z", + "author": "JustinShenk", + "comments": [] + }, + { + "title": "No module found despite the file being found in the correct directory", + "body": "**Describe the bug**\r\nEncountered an error, ModuleNotFoundError: No module named 'simba.sklearn_plot_scripts.plot_sklearn_results_2_single' despite the \"plot_sklearn_results_2_single\" being located in the \"sklearn_plot_scripts\" folder.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open simba through Visual Studio.\r\n2. Locate the \"SimBA.py\" file\r\n3. Run it\r\n\r\n**Screenshots**\r\n![image](https://user-images.githubusercontent.com/76238646/119286826-742d1480-bc02-11eb-8c1d-81751f7a8d64.png)\r\n\r\n![image](https://user-images.githubusercontent.com/76238646/119286865-8909a800-bc02-11eb-8766-8d00c49fb435.png)\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows 10\r\n - Python 3.6.0 64-bit\r\n - No anaconda\r\n \r\n\r\n**Additional context**\r\nFor context, I am looking at editing some of the GUI code to change.\r\n", + "user": "joseph-yclam", + "reaction_cnt": 0, + "created_at": "2021-05-24T02:10:45Z", + "updated_at": "2021-08-26T20:20:24Z", + "author": "joseph-yclam", + "comments": [ + { + "body": "Hello @joseph-yclam! I think - if you run SimBA through an IDE like VS - the working directory changes, so `plot_sklearn_results_2_single.py` is at `sklearn_plot_scripts.plot_sklearn_results_2_single.py` and **not** `simba.sklearn_plot_scripts.plot_sklearn_results_2_single.py` - could you try and remove the `simba` directory part and see if that fixes it in VS? There will be a fair few `simba` to remove. ", + "created_at": "2021-05-24T13:28:40Z", + "author": "sgoldenlab" + }, + { + "body": "Hello @sgoldenlab ! Thank you, I tried removing the simba directory part and it worked! However, I am still encountering a ModuleNotFoundError:\r\n\r\n![image](https://user-images.githubusercontent.com/76238646/119409274-48626b00-bca4-11eb-87b5-53e182cfa0a6.png)\r\n\r\nEven when I remove the simba portion, it still is unable to find the module \"import_solomon\"?\r\n\r\n![image](https://user-images.githubusercontent.com/76238646/119409377-75af1900-bca4-11eb-96d7-b86754572f64.png)\r\n", + "created_at": "2021-05-24T21:26:54Z", + "author": "joseph-yclam" + }, + { + "body": "@joseph-yclam thanks for reporting - the git repo has been lagging behind the pypi packages and some scripts was missing in the repo. If you clone it again the missing files (solomon import etc) should be there. \r\n\r\nThat said, beware, launching SimBA GUI from the VS IDE and SimBA.py won't work, if you want to launch the GUI try running the `App.py` located outside the SimBA folder. If you however want to play around with the scripts, then yes use VS. \r\n\r\nAlso, if you want to make sure you work with the latest version of SimBA, you could download SimBA through `pip install simba-uw-tf-dev` and access the files in the `C:\\Python36\\Lib\\site-packages\\simba` (or wherever your Python installation is). ", + "created_at": "2021-05-25T22:24:20Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Tkinter Issue - Blank SimBA GUI", + "body": "\r\nHello @sgoldenlab ! \r\nI am having toruble getting the SimBA GUI to work after installing it via pip install simba-uw-tf-dev. I created a new project, when I try to load the proj_config.ini the GUI window opens, but all the tabs except the ROI are blank (image attached below). Here is the error readout in the Anaconda prompt after loading the proj_config.ini:\r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/home/compraka/anaconda3/envs/SimBA/lib/python3.7/tkinter/__init__.py\", line 1702, in __call__\r\n return self.func(*args)\r\n File \"/home/compraka/anaconda3/envs/SimBA/lib/python3.7/site-packages/simba/SimBA.py\", line 3356, in \r\n launchloadprojectButton = Button(lpMenu,text='Load Project',command=lambda:self.launch(lpMenu,inputcommand))\r\n File \"/home/compraka/anaconda3/envs/SimBA/lib/python3.7/site-packages/simba/SimBA.py\", line 3367, in launch\r\n command(self.projectconfigini.file_path)\r\n File \"/home/compraka/anaconda3/envs/SimBA/lib/python3.7/site-packages/simba/SimBA.py\", line 3739, in __init__\r\n self.bp1 = DropDownMenu(label_heatmap,'Bodypart',bpoptions,'15')\r\n File \"/home/compraka/anaconda3/envs/SimBA/lib/python3.7/site-packages/simba/SimBA.py\", line 769, in __init__\r\n self.popupMenu = OptionMenu(self,self.dropdownvar,*self.choices,command=com)\r\nTypeError: __init__() missing 1 required positional argument: 'value'\r\n\r\n\r\nI also see that this was an issue #71 and tried to see if 'shap' was installed. And it shows me that the requirement is already satisfied. \r\n\r\nHere is the version of SimBA I am currently using:\r\nName: Simba-UW-tf-dev\r\nVersion: 0.77.1\r\n\r\n - OS: Linux\r\n - Python Version 3.7.0\r\n![Screenshot from 2021-05-10 09-58-14](https://user-images.githubusercontent.com/39599243/117627650-ab1e1780-b178-11eb-9e9b-acfb05ba1c72.png)\r\n\r\n\r\n", + "user": "charithaomprakash", + "reaction_cnt": 0, + "created_at": "2021-05-10T08:15:43Z", + "updated_at": "2021-06-29T21:28:35Z", + "author": "charithaomprakash", + "comments": [ + { + "body": "Hello @charithaomprakash! This was a bug in earlier versions of SimBA - can you try to update SimBA and see if the error disappears? Try either `pip install simba-uw-tf-dev --upgrade` or `pip install simba-uw-tf == 0.83.6` and let me know how it goes. \r\n\r\n\r\n\r\n", + "created_at": "2021-05-10T14:41:14Z", + "author": "sgoldenlab" + }, + { + "body": "Hello @sgoldenlab, thank you for the quick response. I tried updating SimBA and the error still appears. I also tried installing pip install simba-uw-tf == 0.83.6, unfortunately I still get the same error.\r\n\r\nHere is the error message:\r\n(SimBA) compraka@compute01:~$ simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/home/compraka/anaconda3/envs/SimBA/lib/python3.7/tkinter/__init__.py\", line 1702, in __call__\r\n return self.func(*args)\r\n File \"/home/compraka/anaconda3/envs/SimBA/lib/python3.7/site-packages/simba/SimBA.py\", line 3435, in \r\n launchloadprojectButton = Button(lpMenu,text='Load Project',command=lambda:self.launch(lpMenu,inputcommand))\r\n File \"/home/compraka/anaconda3/envs/SimBA/lib/python3.7/site-packages/simba/SimBA.py\", line 3446, in launch\r\n command(self.projectconfigini.file_path)\r\n File \"/home/compraka/anaconda3/envs/SimBA/lib/python3.7/site-packages/simba/SimBA.py\", line 3866, in __init__\r\n self.bp1 = DropDownMenu(label_heatmap,'Bodypart',bpoptions,'15')\r\n File \"/home/compraka/anaconda3/envs/SimBA/lib/python3.7/site-packages/simba/SimBA.py\", line 775, in __init__\r\n self.popupMenu = OptionMenu(self,self.dropdownvar,*self.choices,command=com)\r\nTypeError: __init__() missing 1 required positional argument: 'value'\r\n\r\nAnd here is the version infor for SimBA:\r\nName: Simba-UW-tf-dev\r\nVersion: 0.83.6\r\nSummary: Toolkit for computer classification of complex social behaviors in experimental animals\r\nHome-page: https://github.com/sgoldenlab/simba\r\nAuthor: Simon Nilsson, Jia Jie Choong, Nastacia Goodwin, Sophia Hwang, Sam Golden\r\nAuthor-email: goldenneurolab@gmail.com\r\nLicense: GNU Lesser General Public License v3 (LGPLv3)\r\nLocation: /home/compraka/anaconda3/envs/SimBA/lib/python3.7/site-packages\r\n\r\n", + "created_at": "2021-05-11T08:36:19Z", + "author": "charithaomprakash" + }, + { + "body": "Thanks for reporting @charithaomprakash - I will look into it. Can you check one more thing before? This could potentially happen if you have multiple versions of SimBA installed. In you SimBA environment, could you try `pip show simba-uw-tf` and make sure nothing is printed? \r\n\r\n", + "created_at": "2021-05-11T14:27:47Z", + "author": "sgoldenlab" + }, + { + "body": "@charithaomprakash, using python 3.6 will solve your issue.", + "created_at": "2021-06-14T20:43:22Z", + "author": "inoejj" + }, + { + "body": "> @charithaomprakash, using python 3.6 will solve your issue.\r\n\r\nI am experiencing the same issue on python 3.6.13. I have also ran the update on simba, still nothing. Are there any other solutions? I have created a new environment several times with a fresh install to no avail. ", + "created_at": "2021-06-28T19:11:05Z", + "author": "cameronotto" + }, + { + "body": "@cameronotto , Try using python 3.6.8 or 3.6.10 and see if it will resolve your issue.\r\n\r\nClick on the link below for a step by step guide.\r\n\r\nhttps://simba-docs.readthedocs.io/en/latest/docs/intro/installation.html\r\n\r\nIf you are still having trouble installing it please let me know and I will try to fix it asap.\r\n\r\nCheers,\r\nJJ", + "created_at": "2021-06-28T19:31:34Z", + "author": "inoejj" + }, + { + "body": "@inoejj So this did not work again, however I noticed that I am missing FFmpeg. Would this cause this issue, and if so how should I install this?\r\n\r\nThanks so much,\r\nCO", + "created_at": "2021-06-28T20:14:56Z", + "author": "cameronotto" + }, + { + "body": "Please send me a screenshot of the error message or providing more details so I can help you out.\r\n\r\nThanks,\r\nJJ", + "created_at": "2021-06-28T20:33:29Z", + "author": "inoejj" + }, + { + "body": "This is the error that I get. I will try on both those versions. \r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\mcnuc\\anaconda3\\envs\\simbaenv\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\mcnuc\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 3435, in \r\n launchloadprojectButton = Button(lpMenu,text='Load Project',command=lambda:self.launch(lpMenu,inputcommand))\r\n File \"c:\\users\\mcnuc\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 3446, in launch\r\n command(self.projectconfigini.file_path)\r\n File \"c:\\users\\mcnuc\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 3763, in __init__\r\n self.severityTarget = DropDownMenu(label_severity,'Target',targetlist,'15')\r\n File \"c:\\users\\mcnuc\\anaconda3\\envs\\simbaenv\\lib\\site-packages\\simba\\SimBA.py\", line 775, in __init__\r\n self.popupMenu = OptionMenu(self,self.dropdownvar,*self.choices,command=com)\r\nTypeError: __init__() missing 1 required positional argument: 'value'\r\n", + "created_at": "2021-06-28T20:38:05Z", + "author": "cameronotto" + }, + { + "body": "which version of simba are did you install?\r\n\r\nWhat did you type into the terminal?\r\n\r\n", + "created_at": "2021-06-28T20:41:34Z", + "author": "inoejj" + }, + { + "body": "@inoejj I followed the exact instructions found in that guide, the version of simba installed is 0.84.3.\r\n\r\nI have tried installing developer and non-dev versions and still no luck.", + "created_at": "2021-06-28T20:45:28Z", + "author": "cameronotto" + }, + { + "body": "@inojj I should also clarify, I am installing on windows in Anaconda. ", + "created_at": "2021-06-28T20:51:28Z", + "author": "cameronotto" + }, + { + "body": "1. create a new environment\r\n2. ` conda install python=3.6.10`\r\n3. `pip install simba-uw-tf-dev`\r\n4. `pip uninstall shapely`\r\n5. `conda install -c conda-forge shapely`\r\n\r\nPlease follow the steps above and let me know if you still have trouble installing. Or I can do a quick zoom to install it for you :)", + "created_at": "2021-06-28T20:51:30Z", + "author": "inoejj" + }, + { + "body": "@inoejj Thanks for the help, I have tried these steps several times and no luck. Also on 3.6.8. Could it be that I am missing FFmpeg? How can I install this to my environment or to my device so it works with the environment?\r\n\r\nThanks,\r\nCameron", + "created_at": "2021-06-28T21:22:59Z", + "author": "cameronotto" + }, + { + "body": "FFmpeg does not matter. Please follow the steps below and it should work. In the following example below, you can change `simbaenv` to any name that you wished. \r\n\r\nElse, please send an email to jchoong@uw.edu and I will just do a quick zoom with you :)\r\n\r\nhttps://simba-docs.readthedocs.io/en/latest/docs/intro/installation.html#install-simba-dev-using-anaconda-does-not-support-dlc", + "created_at": "2021-06-28T21:27:41Z", + "author": "inoejj" + }, + { + "body": "@inoejj If you'd like we can do that, but I have followed these exact instructions several times and no luck. Is it possible I should install an older version of SimBA?", + "created_at": "2021-06-29T14:08:11Z", + "author": "cameronotto" + }, + { + "body": "Do you want to type pip show simba-uw-tf-dev and see the version of simba that is installed in your machine?", + "created_at": "2021-06-29T21:24:48Z", + "author": "inoejj" + }, + { + "body": "@inoejj It installs version 0.84.3 of simba-uw-tf-dev", + "created_at": "2021-06-29T21:27:19Z", + "author": "cameronotto" + }, + { + "body": "Please send an email to jchoong@uw.edu and do a quick zoom now.", + "created_at": "2021-06-29T21:28:35Z", + "author": "inoejj" + } + ] + }, + { + "title": "error when trying to upload the H5 file from my DLC files ", + "body": "**Describe the bug**\r\nerror when trying to upload the H5 file from my DLC files \r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. open simba and create a new project using 1 classifier with multi tracking of 2 animals with 4 body parts each\r\n2. add single video \r\n3. trying to import h5 using multi animal model \r\n\r\n\r\n**Screenshots**\r\n![image](https://user-images.githubusercontent.com/79872083/116130666-e06b3580-a6a1-11eb-9789-7995e97d0470.png)\r\n![image](https://user-images.githubusercontent.com/79872083/116130700-e95c0700-a6a1-11eb-8c65-bdf1fedbc592.png)\r\n\r\n**Desktop (please complete the following information):**\r\nusing anaconda with python 3.6 on windows 10\r\n \r\n", + "user": "sinal23", + "reaction_cnt": 0, + "created_at": "2021-04-26T18:15:38Z", + "updated_at": "2021-05-06T18:19:11Z", + "author": "sinal23", + "comments": [ + { + "body": "Additionally, I had a few questions regarding the software.\r\nHow long will it take to build a new classifier database? \r\nIf I do have a classifier database (or imported the Resident-intruder protocol), how long will it approximately take to analyze a 10 minute video? do I still need to extract frames and label behavior and do all the steps for each video if I am using imported classifiers/ trained model?\r\n\r\n\r\nThank you!", + "created_at": "2021-04-26T18:36:27Z", + "author": "sinal23" + }, + { + "body": "Hi @sinal23 , check if it's not the same issue as there: \r\nhttps://github.com/sgoldenlab/simba/issues/104#issuecomment-825952356\r\n\r\n", + "created_at": "2021-04-26T19:28:03Z", + "author": "DorianBattivelli" + }, + { + "body": "hi @DorianBattivelli , I updated to simba-uw-tf-dev, and unfortunately, it sill does not work..", + "created_at": "2021-04-26T19:50:12Z", + "author": "sinal23" + }, + { + "body": "In my case even after the update, I had to change the appended string manually in my H5 title. Did you try this? ", + "created_at": "2021-04-26T19:51:39Z", + "author": "DorianBattivelli" + }, + { + "body": "Hello @sinal23! this error is produced as SimBA can't find this file in your project folder:\r\n![image](https://user-images.githubusercontent.com/50497030/116143594-2e773d80-a690-11eb-8cec-e018bfebbc13.png)\r\n\r\nCould you have a look manually and check if it exists?", + "created_at": "2021-04-26T20:07:20Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab no, the file does not exist. Could I have missed a step?\r\n\r\nI was also wondering if you know the approximate time it takes to create a new classifier model vs the time it takes to analyze videos using the mouse resident intruder model. and if I would need to label behaviors, extract figures ext each time I want to analyze a new video when I use a trained model. I apologize for the many questions, I am a new user. \r\nthank you!", + "created_at": "2021-04-26T20:29:49Z", + "author": "sinal23" + }, + { + "body": "Hello @sinal23 - many are new, no worries, I hope we can get it running!\r\n\r\nYes, this file, and your SimBA project folder structure, is created when you Create a project in SimBA, as described here: https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#step-1-generate-project-config, did you click on the `Generate Project Config` button? \r\n\r\nNo, once you have your classifier working, you will not have to extract any frames or label any behaviors. To generate a classifier, however, you will need to annotate frames (unless you have reliable annotations generated in other tools like described [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/third_party_annot.md) )", + "created_at": "2021-04-26T22:52:18Z", + "author": "sgoldenlab" + }, + { + "body": "hello @sgoldenlab, \r\nyes, I followed the steps in that tutorial and created a project. I got a conformation that the project has been generated as followed:\r\n![image](https://user-images.githubusercontent.com/79872083/116161712-a82c1d00-a6ca-11eb-801d-b3048d52900f.png)\r\n\r\n\r\nSo, if I have a classifier working or if I am using the trained model you have provided, can I skip step c (ran analyze models and data) entirely in the simba GUI workflow, and just upload the new videos and the DLC tracks, and run the classifier to visualize the results? ", + "created_at": "2021-04-26T23:10:29Z", + "author": "sinal23" + }, + { + "body": "@sinal23 - if you go to the directory where the project was created: \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/116243532-e271da80-a71b-11eb-99ac-4059c6a15845.png)\r\n\r\nIs it there? Was there any error/warning msgs printed in the terminal when you clicked on `Create Project Config`? \r\n\r\n>So, if I have a classifier working or if I am using the trained model you have provided, can I skip step c (ran analyze models and data) entirely in the simba GUI workflow, and just upload the new videos and the DLC tracks, and run the classifier to visualize the results?\r\n\r\nYou have to 1) correct outliers or click to indicate to skip the outlier correction step, and 2) extract features. Once those two steps are complete you can run the pre-generated classifier. If your setup is not identical to the one that was used to generate the classifier, you are likely to have to tinker a little with the [classification thresholds](https://github.com/sgoldenlab/simba/blob/master/docs/tutorial.md#optional-step-before-running-machine-model-on-new-data) to get it right. ", + "created_at": "2021-04-27T12:51:20Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab is that was there, all the folders have been created when I pressed create project, and no errors were shown. The error only showed when I tired importing my files from the DLC files. \r\n![image](https://user-images.githubusercontent.com/79872083/116270999-fc301380-a755-11eb-8019-16041daa619c.png)\r\nthis pathway does exist, however, the file - project_pb_names.csv does not. ", + "created_at": "2021-04-27T15:43:13Z", + "author": "sinal23" + }, + { + "body": "Thanks, you are using user-defined body-parts? I will try and replicate.", + "created_at": "2021-04-27T19:17:19Z", + "author": "sgoldenlab" + }, + { + "body": "@sinal23 - I've been trying to play around trying to recreate what you're seeing - with a project being created while the `project_bp_names` is **not** created, and I can't manage to do it. Could you try and delete the project and start again and see if that does it? First define your pose estimation configuration. \r\n\r\n\r\n![Untitled1](https://user-images.githubusercontent.com/50497030/116499338-ccb9fd80-a860-11eb-9d21-a99691215201.png)\r\n\r\nThen select it from the drop down:\r\n\r\n![Untitled2](https://user-images.githubusercontent.com/50497030/116499362-db081980-a860-11eb-9b80-2906fed9b49d.png)\r\n\r\nThen click `Generate project Config`", + "created_at": "2021-04-29T03:36:17Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Issue to measure distance", + "body": "Hello, \r\n\r\nI finally finished my analysis of ROIs and distance, but while ROIs times seems to be good (coherent given what I can see watching the video), the distance are clearly wrong and even incoherent given the output SimBA gives for the ROIs times. \r\n\r\nFor ex, in this case, I have mouse 1 (lowshaved) that stays during the whole video almost only in compartment 1, while mouse 2 (uppershaved) is roaming freely into the 4 compartments of the device, and indeed, SimBA \"sees\" this:\r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/116057634-47c3be00-a67f-11eb-842d-4adcd77c3e0b.png)\r\n\r\nBut when I look at the distances for each mouse, mouse 1 performed a huge distance compared to mouse 2:\r\n\r\n![Untitled2](https://user-images.githubusercontent.com/66886884/116068637-beb28400-a68a-11eb-9366-b83fd4ca7abc.png)\r\n\r\nSo, it seems that mouse 2 walked only 4 cm while he was continuously walking for 10 min. It should be the opposite, but in any case, both walked far more than 4 cm.\r\nI also looked at the movement within ROIs, and the results are totally differents: \r\n\r\n![Untitled3](https://user-images.githubusercontent.com/66886884/116073355-97f74c00-a690-11eb-9ef4-a129974a5405.png)\r\n\r\nThe cells, from left to right show movement in compartment 1 to 4 for mouse 1, and for mouse 2. \r\nThese data fit more what I observe (overall mouse 2 walked more than mouse 1), but still, it seems that mouse 2 walked only 3 meters overall which is no true, he walked much more for sure (our device is 2.5 meter long and it crossed it several times). \r\n\r\nFinally while mouse 1 never entered compartment 4, SimBA quantified a non-null movments / distance there. I don't understand cause, after having corrected the ID swaps on DLC, there is no more frame with mouse 1 in the compartment 4. \r\n\r\nWould you have a suggestion to get a reliable quantification of distance for each mouse?\r\n\r\nThank you, \r\nBest,\r\nDorian ", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2021-04-26T11:22:21Z", + "updated_at": "2021-05-03T19:03:16Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Thanks @DorianBattivelli - this sounds like an issue that was reported and fixed on April 14th on Gitter, it related to the headers being scrambled up - animal 2 did not move 4cm, it's likely to be the animals velocity - headers where in the wrong order and I fixed it but here it seems like it may be introduced again - can you make sure you are running the latest version of simba-uw-tf-dev before I check how the bug could have been reintroduced again?\r\n", + "created_at": "2021-04-26T14:27:49Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks @sgoldenlab , \r\n\r\nSure, how can I check the version that is installed?", + "created_at": "2021-04-26T14:33:46Z", + "author": "DorianBattivelli" + }, + { + "body": "As far as I remember, the last upgrade I installed was 0.83.3\r\n", + "created_at": "2021-04-26T14:35:07Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - if you type `pip show simba-uw-tf-dev` you should see version 0.83.3. \r\n\r\nAlso, if you if you type `pip show simba-uw-tf` there should be nothing printed out (this to make sure you don't have two versions of SimBA installed) ", + "created_at": "2021-04-26T14:52:57Z", + "author": "sgoldenlab" + }, + { + "body": "All seems to be good:\r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/116103933-55de0280-a6b0-11eb-93dd-16b2504485c0.png)\r\n", + "created_at": "2021-04-26T14:56:28Z", + "author": "DorianBattivelli" + }, + { + "body": "Hi @DorianBattivelli - sorry there will be some delay in looking into this, I will try to replicate what you are seeing but it will have to be later this week - a lot going on. Am I right in that you have 4 rectangle ROIs, and 2 mice?", + "created_at": "2021-04-27T02:56:34Z", + "author": "sgoldenlab" + }, + { + "body": "Hello @sgoldenlab , no problem, thank you for the support! \r\nYes, 4 rectangle ROIs, and 2 mice are good, \r\n\r\nBest!", + "created_at": "2021-04-27T08:51:11Z", + "author": "DorianBattivelli" + }, + { + "body": "Hi @DorianBattivelli - I am not sure what was going on here with the headers.. might have been some version number clashes. \r\n\r\nCan you update to simba-uw-tf-dev==0.83.4 and see if the numbers make more sense now? \r\n\r\n", + "created_at": "2021-05-02T21:09:05Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab, I think it solved the issue, thank you!\r\n\r\nBest", + "created_at": "2021-05-03T17:52:36Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "Issue when defining identities", + "body": "Hello!\r\n\r\nEverything worked well from yesterday, until this new project for which I have a problem when importing the h5 file for one video. The previous h5 files for the other videos worked well (I assigned successfully the IDs) but when the program switches to this video, I get a grey screen with error msg in terminal: \r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/115957236-669c4600-a501-11eb-89d0-37f46909d951.png)\r\n\r\nWould you have a suggestion to solve this?\r\nThank you, \r\nBest, \r\nDorian ", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2021-04-24T11:32:15Z", + "updated_at": "2021-04-24T21:37:56Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Thanks @DorianBattivelli! I'm not sure why the body-part tracking would be placed at infinity, so watch out of there is something odd going on at the pose-estimation end. \r\n\r\nI've inserted a fix that replaces the inf's (like the NaN's) with zeros which should prevent SimBA from stopping, but the first frame is likely to look a little odd go pass that when when assigning identities. \r\n\r\nUpdate SimBa to version 0.83.3 and please let me know if it works - `pip install simba-uw-tf-dev --upgrade`\r\n\r\n", + "created_at": "2021-04-24T15:57:40Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you again @sgoldenlab , it works now !", + "created_at": "2021-04-24T19:58:22Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "Heat maps for each individual.", + "body": "Hello, it is me again.\r\n\r\nJust one small question: it seems that I cannot generate a heat map for my second mouse since only the body parts belonging to mouse 1 appear in the drop-down menu: \r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/115939676-1f7d6900-a49f-11eb-86a1-6d317808126b.png)\r\n\r\nIs there a way to generate a map for mouse 2?\r\n\r\nThank you, \r\nBest, \r\nDorian", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2021-04-23T23:49:00Z", + "updated_at": "2021-04-24T15:24:06Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli - again many thanks for reporting, much appreciated - keep em coming please. Can you update simba - `pip install simba-uw-tf-dev --upgrade` (version 0.83.2) and let me know if it works now?\r\n\r\nI'm very aware that these heatmaps aren't the pretties.. would probably take some post-editing to make them presentable, this function have not been a priority. ", + "created_at": "2021-04-24T03:47:03Z", + "author": "sgoldenlab" + }, + { + "body": "Hello @sgoldenlab , I'm happy to help you in the development of SimBA which is an amazing tool. \r\nI upgraded the version and can now generate heat maps for both animals, thank you !\r\n\r\nBest, \r\nDorian \r\n", + "created_at": "2021-04-24T08:54:21Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "Outlier correction - error", + "body": "Hello,\r\n\r\nI run a multi animal (2 mice) project analyzed with deeplabcut 2.1.10.2.\r\nI now intend to proceed to ROI analysis on Simba 1.3.5.\r\nThe following issue appears when I try to run the Outlier correction:\r\n\r\n \r\n![SimbaBug](https://user-images.githubusercontent.com/66886884/115903098-7bc29780-a463-11eb-8cf4-65169807e764.png)\r\n\r\nFor the body parts of my 2 animals, I selected center and tail base, and followed your recommendations with 1.5 for location criterion and 1 for movement criterion. \r\n\r\nNow, in the logs folder, I have only a \"Outliers_movement_20210423183614\" but no Outliers_location\" file. Moreover, the Outliers_movement file is empty: \r\n\r\n![SimbaBug2](https://user-images.githubusercontent.com/66886884/115903567-060afb80-a464-11eb-9261-ecadc472d06b.png)\r\n\r\nCould you please tell me how to fix this issue?\r\n\r\nThank you very much, \r\nBest,\r\nDorian\r\n\r\n\r\n", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2021-04-23T16:45:50Z", + "updated_at": "2021-04-23T22:20:56Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "I tried also to use the \"skip outlier correction\". \r\nBut once my ROIs are defined and drawn, if I try to analyze the ROI, here what I have:\r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/115909625-0d360780-a46c-11eb-9296-8a61de0b24b1.png)\r\n\r\nDo you have an idea of what is wrong in my project?\r\n\r\nThank you, \r\nBest,\r\nDorian \r\n", + "created_at": "2021-04-23T17:43:14Z", + "author": "DorianBattivelli" + }, + { + "body": "FYI: I see on my dlc csv file that some cells are blank, meaning that signal is lost for some frames. Here an example\r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/115913340-c696dc00-a470-11eb-938d-425c81c7d4d8.png)\r\n\r\nOn SimBA I imported h5 files, but I think they also contain these \"missing data\"right? Could that produce the error? \r\n\r\n ", + "created_at": "2021-04-23T18:16:28Z", + "author": "DorianBattivelli" + }, + { + "body": "Hello @DorianBattivelli, a few points:\r\n\r\n1) I suggest running the latest dev version of SimBA, rather than the TF version. So in your environment, run:\r\n\r\n`pip uninstall simba-uw-tf`, followed by \r\n`pip install simba-uw-tf-dev`\r\n\r\n2). Yes, SimBA does not like missing data. When you export your H5's from DLC, make sure you are not filtering the data based on probabilities. Take it all. SimBA should fill in the blanks with zeros - but generally - do not filter/remove any of your DLC output. \r\n\r\n3) What's happening based on this picture: \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/115927741-c16a6a80-a439-11eb-8a9f-33ad643d0018.png)\r\n\r\n... suggests that SimBA never imported the files correctly. This suggests that SimBA is looking in your `project_folder/csv/input_csv` directory for files, and finds `0` files. Could you make sure that the files are there? Try with the simba-uw-dev version, let me know if using that version of SimBA does the trick.\r\n\r\n\r\n\r\n", + "created_at": "2021-04-23T20:45:03Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab thank you very much for the help. \r\nI installed the dev version, and for now when I import the h5 files, the csv-input folder is still empty, is this normal?\r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/115931593-ade7ef80-a48b-11eb-8191-c8d7ff2a4e31.png)\r\n\r\nJust for confirmation, am I importing the good h5 file?\r\n![Untitled2](https://user-images.githubusercontent.com/66886884/115931721-ed164080-a48b-11eb-81da-1bab73a0d6df.png)\r\n\r\n", + "created_at": "2021-04-23T21:30:50Z", + "author": "DorianBattivelli" + }, + { + "body": "I also noticed this message: Cannot locate video CD1_1-ph2-1-1DLC_resnet_50_Arena-CD1-protocol2-ph2Apr12shuffle1_6000_bx.h5 in mp4 or avi format\r\n\r\nI imported the standard mp4 video (and they are in the videos folder), did I import the wrong videos?", + "created_at": "2021-04-23T21:38:47Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - ah I see, I'm not sure how this happened, but I presume that your videos are called, for example `CD1_1-ph2-1-1.mp4`. SimBA looks for a video file name in your project that matches the H5 file name, in this case you get an error as either because (i) your h5's are named differently from your videos (e.g., CD1_1-ph2-1-1DLC_resnet_50_Arena-CD1-protocol2-ph2Apr12shuffle1_6000_bx.h5 vs CD1_1-ph2-1-1.mp4). Could you try remove the DLC appended string to the filenames (e.g., `DLC_resnet_50_Arena-CD1-protocol2-ph2Apr12shuffle1_6000_bx`) and see if it works? SimBA should do this automatically so not sure why it hasn't at this instance... (ii) or you have not imported any videos. You need to import the h5's after you have imported the videos to your SimBA project. \r\n\r\n", + "created_at": "2021-04-23T21:50:29Z", + "author": "sgoldenlab" + }, + { + "body": "I am still facing the issue..\r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/115933708-b4786600-a48f-11eb-8591-27a4a5a17751.png)\r\n\r\n", + "created_at": "2021-04-23T21:58:23Z", + "author": "DorianBattivelli" + }, + { + "body": "sorry, Ignore my last msg then, and rename your files back to what they were. \r\n\r\nCan you send me a screenshot if your `project_folder/videos` directory? \r\n", + "created_at": "2021-04-23T22:03:16Z", + "author": "sgoldenlab" + }, + { + "body": "Here it is: \r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/115934138-a1b26100-a490-11eb-8636-0efd12ddaefa.png)\r\n", + "created_at": "2021-04-23T22:05:08Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks looking into this, will insert a better error msg so I can see what is going on", + "created_at": "2021-04-23T22:15:38Z", + "author": "sgoldenlab" + }, + { + "body": "I think I see what is happening, thanks for reporting this, seems like DLC has changed the appended string from `DLC_resnet50` to `DLC_resnet_50`.... for whatever reason. ", + "created_at": "2021-04-23T22:18:43Z", + "author": "sgoldenlab" + }, + { + "body": "Well done, it solved the problem, thank you very much!", + "created_at": "2021-04-23T22:20:11Z", + "author": "DorianBattivelli" + }, + { + "body": "Cheers, I will update SimBA to handle either form. ", + "created_at": "2021-04-23T22:20:55Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Is simba compatible with Drosophila?", + "body": "Is this program compatible with studying fruit flies without making any modifications to the program?", + "user": "turtlelover426", + "reaction_cnt": 0, + "created_at": "2021-04-18T19:39:17Z", + "updated_at": "2021-05-07T13:33:46Z", + "author": "turtlelover426", + "comments": [ + { + "body": "Hello @turtlelover426 - it comes down to how many flies you have I think. I have not used it with flies myself - it might be relevant thought: [Matěj Šmíd](https://github.com/smidm) used it with 6 ants, and [Caleb Voight](https://github.com/calebvogt) was using flies - you might want to reach out to them. I don't for-see any issues, but if you have dozens of animals it will be terribly slow.. \r\n", + "created_at": "2021-04-19T13:41:51Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Importing Boris Data", + "body": "Hello,\r\n\r\nWe are testing out whether we can append pre-scored data from Boris within Simba but it appears that when we load it in the targets_inserted CSV only shows zeros for our behaviours. Although it's simple enough to write a script ourselves, I was wondering if maybe we are doing something wrong. Our behaviors have the same name and the video's names match as well. I see the script skips over behavior category, subject, and comment so I'm guessing those are not important. I look forward to hearing from you. \r\n\r\n", + "user": "neurowookie", + "reaction_cnt": 0, + "created_at": "2021-04-13T11:56:33Z", + "updated_at": "2021-04-25T20:29:03Z", + "author": "neurowookie", + "comments": [ + { + "body": "Ok I found the issue. I used a .avi file while the script is setup to remove only .mp4 extensions from the base filename. Just changing the extension in my Boris CSV made it work. Maybe it would be better to use \r\n`os.path.splitext(base)[0]`\r\nI also noticed that it currently seems to skip the first 15 rows to get to the header row, if people annotated their videos using two videos(side and top for example) at the same time, this will add one more row before the header. I have told my colleagues who are using these files to remove one row for the import to work but there should be a more elegant solution. ", + "created_at": "2021-04-13T13:13:45Z", + "author": "neurowookie" + }, + { + "body": "Thanks for letting me know @neurowookie - I admit I have not seen too many Boris-output formats, so it is rather hardcoded to the situations of the people I've talked to, but this gives me an opportunity to make it more flexible. \r\n\r\nWould you mind sharing a single Boris output file? \r\n\r\nHow do you use the behavior category, subject, and comment fields in you boris output? Are they important for distinguishing which behavior the animal is doing? \r\n\r\nI will insert the mp4/avi fix. ", + "created_at": "2021-04-13T20:01:09Z", + "author": "sgoldenlab" + }, + { + "body": "Do you think this attached script would work? It assumes the data starts when the first column reads `Time`. Not sure of there are cases when that is not true. \r\n\r\n[append_boris.py.zip](https://github.com/sgoldenlab/simba/files/6308015/append_boris.py.zip)\r\n![Untitled](https://user-images.githubusercontent.com/50497030/114650626-6dbc8c00-9c97-11eb-86c0-a9e03a09485e.png)\r\n", + "created_at": "2021-04-14T03:33:34Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks for the quick response.\r\n\r\nFirst to answer your first set of questions we don't generally use behavior category or comment so that isn't something to worry about. We do however use subject sometimes when we have two animals to define which animal is doing what but for what we currently are trying to use SIMBA for this is unimportant and probably complicated to import in a useful way. \r\n\r\nI had a go with your script and it didn't work. But I really appreciate all of your work so I took some time and got it working. I'm not sure how pythonic my solution is though. Essentially once you import the whole CSV as data frame it sets Observation id and the following cell as the headers so when you try to run the skip it doesn't work. So I just reimport after finding the index. Also in order for splitext to work properly you should already get the basename. Here is the entire section that is working now on my pc. \r\n` \r\n[BorisOutput.xlsx](https://github.com/sgoldenlab/simba/files/6316876/BorisOutput.xlsx)\r\n\r\n\r\n currDf = pd.read_csv(file)\r\n index=(currDf[currDf['Observation id']==\"Time\"].index.values)\r\n currDf = pd.read_csv(file,skiprows=range(0, int(index+1))) \r\n currDf = currDf.loc[:, ~currDf.columns.str.contains('^Unnamed')]\r\n currDf.dropna()\r\n currDf.drop(['Behavioral category', 'Comment', 'Subject'], axis=1, inplace=True)\r\n for index, row in currDf.iterrows():\r\n currPath = row['Media file path']\r\n currBase= os.path.basename(currPath)\r\n currDf.at[index, 'Media file path'] = os.path.splitext(currBase)[0]\r\n combinedDf = pd.concat([combinedDf, currDf])\r\n`\r\nI have attatched a Boris output with 2 videos. These are fake observations solely for testing. \r\n\r\n", + "created_at": "2021-04-15T09:46:22Z", + "author": "neurowookie" + }, + { + "body": "@neurowookie No your way is way more pythonic, mine was a crappy for loop lol :) I have updated simba pypi version to have your code, and also appended the script here. Let me know if it works.\r\n\r\n[append_boris.py.zip](https://github.com/sgoldenlab/simba/files/6319356/append_boris.py.zip)\r\n", + "created_at": "2021-04-15T15:59:14Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "optimized feature extraction script for DLStream use", + "body": "This feature extraction is in line with the recursive feature elimination script used to optimize DLStream + SiMBA classifiers.\r\nIt is adapted from the original simba/features_scripts/extract_features_14bp.py and should fit into the SiMBA workflow without issues. Naming is consistent with previous feature_scripts.\r\n\r\nClassifiers trained with this feature selection can be used with the optimized feature extraction script (by Simon) in DLStream that pushes classfication time down to sub pose estimation inference speed. A huge benefit. It was already used for anogenital approach classification. See [DLStream Code](https://github.com/SchwarzNeuroconLab/DeepLabStream/blob/dev_classification/experiments/custom/featureextraction.py) and [DLStream Wiki](https://github.com/SchwarzNeuroconLab/DeepLabStream/wiki/Advanced-Behavior-Classification).\r\n\r\nHopefully this will enable the community to directly train \"real-time\" optimized classification in SiMBA and implement them into DLStream. Let me know what you think.", + "user": "JensBlack", + "reaction_cnt": 0, + "created_at": "2021-03-26T14:14:15Z", + "updated_at": "2021-10-01T22:47:21Z", + "author": "JensBlack", + "comments": [] + }, + { + "title": "Bump pyyaml from 5.3.1 to 5.4", + "body": "Bumps [pyyaml](https://github.com/yaml/pyyaml) from 5.3.1 to 5.4.\n
\nChangelog\n

Sourced from pyyaml's changelog.

\n
\n

5.4 (2021-01-19)

\n\n
\n
\n
\nCommits\n
    \n
  • 58d0cb7 5.4 release
  • \n
  • a60f7a1 Fix compatibility with Jython
  • \n
  • ee98abd Run CI on PR base branch changes
  • \n
  • ddf2033 constructor.timezone: _copy & deepcopy
  • \n
  • fc914d5 Avoid repeatedly appending to yaml_implicit_resolvers
  • \n
  • a001f27 Fix for CVE-2020-14343
  • \n
  • fe15062 Add 3.9 to appveyor file for completeness sake
  • \n
  • 1e1c7fb Add a newline character to end of pyproject.toml
  • \n
  • 0b6b7d6 Start sentences and phrases for capital letters
  • \n
  • c976915 Shell code improvements
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyyaml&package-manager=pip&previous-version=5.3.1&new-version=5.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-03-26T00:51:08Z", + "updated_at": "2022-03-12T20:33:43Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-03-12T20:33:35Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump pyyaml from 5.3.1 to 5.4 in /simba", + "body": "Bumps [pyyaml](https://github.com/yaml/pyyaml) from 5.3.1 to 5.4.\n
\nChangelog\n

Sourced from pyyaml's changelog.

\n
\n

5.4 (2021-01-19)

\n\n
\n
\n
\nCommits\n
    \n
  • 58d0cb7 5.4 release
  • \n
  • a60f7a1 Fix compatibility with Jython
  • \n
  • ee98abd Run CI on PR base branch changes
  • \n
  • ddf2033 constructor.timezone: _copy & deepcopy
  • \n
  • fc914d5 Avoid repeatedly appending to yaml_implicit_resolvers
  • \n
  • a001f27 Fix for CVE-2020-14343
  • \n
  • fe15062 Add 3.9 to appveyor file for completeness sake
  • \n
  • 1e1c7fb Add a newline character to end of pyproject.toml
  • \n
  • 0b6b7d6 Start sentences and phrases for capital letters
  • \n
  • c976915 Shell code improvements
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pyyaml&package-manager=pip&previous-version=5.3.1&new-version=5.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-03-25T23:18:00Z", + "updated_at": "2022-03-12T20:33:41Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "OK, I won't notify you again about this release, but will get in touch when a new version is available. If you'd rather skip all updates until the next major or minor version, let me know by commenting `@dependabot ignore this major version` or `@dependabot ignore this minor version`.\n\nIf you change your mind, just re-open this PR and I'll resolve any conflicts on it.", + "created_at": "2022-03-12T20:33:34Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 8.1.1", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 8.1.1.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

8.1.1

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.1.1.html

\n

8.1.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.1.0.html

\n

Changes

\n\n

Dependencies

\n\n

Deprecations

\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

8.1.1 (2021-03-01)

\n
    \n
  • \n

    Use more specific regex chars to prevent ReDoS. CVE-2021-25292\n[hugovk]

    \n
  • \n
  • \n

    Fix OOB Read in TiffDecode.c, and check the tile validity before reading. CVE-2021-25291\n[wiredfool]

    \n
  • \n
  • \n

    Fix negative size read in TiffDecode.c. CVE-2021-25290\n[wiredfool]

    \n
  • \n
  • \n

    Fix OOB read in SgiRleDecode.c. CVE-2021-25293\n[wiredfool]

    \n
  • \n
  • \n

    Incorrect error code checking in TiffDecode.c. CVE-2021-25289\n[wiredfool]

    \n
  • \n
  • \n

    PyModule_AddObject fix for Python 3.10 #5194\n[radarhere]

    \n
  • \n
\n

8.1.0 (2021-01-02)

\n
    \n
  • \n

    Fix TIFF OOB Write error. CVE-2020-35654 #5175\n[wiredfool]

    \n
  • \n
  • \n

    Fix for Read Overflow in PCX Decoding. CVE-2020-35653 #5174\n[wiredfool, radarhere]

    \n
  • \n
  • \n

    Fix for SGI Decode buffer overrun. CVE-2020-35655 #5173\n[wiredfool, radarhere]

    \n
  • \n
  • \n

    Fix OOB Read when saving GIF of xsize=1 #5149\n[wiredfool]

    \n
  • \n
  • \n

    Makefile updates #5159\n[wiredfool, radarhere]

    \n
  • \n
  • \n

    Add support for PySide6 #5161\n[hugovk]

    \n
  • \n
  • \n

    Use disposal settings from previous frame in APNG #5126\n[radarhere]

    \n
  • \n
  • \n

    Added exception explaining that repr_png saves to PNG #5139\n[radarhere]

    \n
  • \n
  • \n

    Use previous disposal method in GIF load_end #5125\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 741d874 8.1.1 version bump
  • \n
  • 179cd1c Added 8.1.1 release notes to index
  • \n
  • 7d29665 Update CHANGES.rst [ci skip]
  • \n
  • d25036f Credits
  • \n
  • 973a4c3 Release notes for 8.1.1
  • \n
  • 521dab9 Use more specific regex chars to prevent ReDoS
  • \n
  • 8b8076b Fix for CVE-2021-25291
  • \n
  • e25be1e Fix negative size read in TiffDecode.c
  • \n
  • f891baa Fix OOB read in SgiRleDecode.c
  • \n
  • cbfdde7 Incorrect error code checking in TiffDecode.c
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=8.1.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-03-20T17:22:28Z", + "updated_at": "2021-06-08T23:17:01Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #118.", + "created_at": "2021-06-08T23:16:59Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "can't run SimBA from repo: missing files", + "body": "**Describe the bug**\r\n\r\nThe repository version is not usable. Files are missing.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. git clone repository\r\n2. install dependencies\r\n3. run `simba`\r\n4. fail with\r\n```\r\nTraceback (most recent call last):\r\n File \".../conda/envs/simba/bin/simba\", line 5, in \r\n from simba.SimBA import main\r\n File \".../conda/envs/simba/lib/python3.6/site-packages/simba/SimBA.py\", line 23, in \r\n from simba.plotly_create_h5 import *\r\nModuleNotFoundError: No module named 'simba.plotly_create_h5'\r\n```\r\n\r\n**Expected behavior**\r\n\r\nSimBA starts\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Fedora Linux 33\r\n - Python Version: 3.6.13\r\n - Are you using anaconda? yes\r\n\r\n", + "user": "smidm", + "reaction_cnt": 0, + "created_at": "2021-03-19T15:57:42Z", + "updated_at": "2021-03-22T18:43:35Z", + "author": "smidm", + "comments": [ + { + "body": "Please uninstall your current simba and try pip install simba-uw-tf-dev==0.80.8 and see if that fixes your error.\r\n\r\nBest,\r\nJJ", + "created_at": "2021-03-20T01:30:40Z", + "author": "inoejj" + }, + { + "body": "I have a working SimBA installation. That's not the problem.\r\n\r\nI want to make some modifications to the development version of SimBA that is in this repository, but it is missing some files: plotly_create_h5.py, setup.py, ... \r\n\r\nThe usual way to contribute to an open source project is to clone a repo, make changes, test them and make a pull request. This is not easy now.", + "created_at": "2021-03-20T10:17:57Z", + "author": "smidm" + }, + { + "body": "Please try it again now :)", + "created_at": "2021-03-20T19:04:08Z", + "author": "inoejj" + }, + { + "body": "Thank for the update (including `setup.py`). There are still some files missing:\r\n```\r\nTraceback (most recent call last):\r\n File \"simba.py\", line 5, in \r\n from simba.SimBA import main\r\n File \".../simba/SimBA.py\", line 23, in \r\n from simba.plotly_create_h5 import *\r\n File \".../simba/plotly_create_h5.py\", line 5, in \r\n from simba.rw_dfs import *\r\nModuleNotFoundError: No module named 'simba.rw_dfs'\r\n\r\n```\r\n", + "created_at": "2021-03-22T15:18:01Z", + "author": "smidm" + }, + { + "body": "You will have to run your script using cmd on the simba-master directory and call `python App.py`\r\n\r\nI think if you do that the error will be gone :)", + "created_at": "2021-03-22T17:38:12Z", + "author": "inoejj" + }, + { + "body": "The results are the same. The `simba.py` and `App.py` are doing almost the same.\r\n```\r\npython App.py\r\nTraceback (most recent call last):\r\n File \"App.py\", line 2, in \r\n from simba.SimBA import main as App\r\n File \".../simba/SimBA.py\", line 23, in \r\n from simba.plotly_create_h5 import *\r\n File \".../simba/plotly_create_h5.py\", line 5, in \r\n from simba.rw_dfs import *\r\nModuleNotFoundError: No module named 'simba.rw_dfs'\r\n```\r\nThere is still a lot of differences between the packaged SimBA and the git repo:\r\n\r\n```\r\n$ diff -q simba-git/ simba-pip/ | grep -v differ | grep -v 'Common subdirectories' | sort\r\n\r\nOnly in simba-pip/: append_boris.py\r\nOnly in simba-pip/: extract_frames_fast.py\r\nOnly in simba-pip/: import_solomon.py\r\nOnly in simba-pip/: Kleinberg_burst_analysis.py\r\nOnly in simba-pip/: path_plot_new.py\r\nOnly in simba-pip/: pose_configurations_archive\r\nOnly in simba-pip/: process_movement_new.py\r\nOnly in simba-pip/: process_videos_automation_linux.py\r\nOnly in simba-pip/: process_videos_automation_win.py\r\nOnly in simba-pip/: reverse_tracking_order.py\r\nOnly in simba-pip/: ROI_add_to_features_old2.py\r\nOnly in simba-pip/: ROI_add_to_features_old.py\r\nOnly in simba-pip/: ROI_analysis_old.py\r\nOnly in simba-pip/: ROI_analysis.py\r\nOnly in simba-pip/: ROI_directionality_to_other_animals.py\r\nOnly in simba-pip/: ROI_directionality_to_other_animals_visualize.py\r\nOnly in simba-pip/: ROI_plot_multi_process.py\r\nOnly in simba-pip/: ROI_plot_old_2.py\r\nOnly in simba-pip/: ROI_plot_old.py\r\nOnly in simba-pip/: ROI_process_movement_old.py\r\nOnly in simba-pip/: run_dash_tkinter.py\r\nOnly in simba-pip/: run_RF_model_old.py\r\nOnly in simba-pip/: rw_dfs.py\r\nOnly in simba-pip/: SimBA_dash_app.py\r\n\r\nOnly in simba-git/: classifier_validation.py\r\nOnly in simba-git/: correct_devs_loc.py\r\nOnly in simba-git/: correct_devs_mov.py\r\nOnly in simba-git/: crop_video_tool_user_bounding_box_multiple.py\r\nOnly in simba-git/: extract_features_with_scaling.py\r\nOnly in simba-git/: extract_features_wo_targets.py\r\nOnly in simba-git/: gantt_old.py\r\nOnly in simba-git/: golden_lab.ico\r\nOnly in simba-git/: image.png\r\nOnly in simba-git/: mouse_diagrams\r\nOnly in simba-git/: next_clahe.py\r\nOnly in simba-git/: next_greyscale.py\r\nOnly in simba-git/: next_superimposeframe.py\r\nOnly in simba-git/: plot_heatmap_location.py\r\nOnly in simba-git/: plot_heatmap_old.py\r\nOnly in simba-git/: plot_sklearn_results.py\r\nOnly in simba-git/: process_data_log_old.py\r\nOnly in simba-git/: process_movement_old.py\r\nOnly in simba-git/: tkinter_class_test_efficient_scratch.py\r\nOnly in simba-git/: validate_model_on_single_video_copy.py\r\n```\r\n(listing just files that are unique in one of the sources)", + "created_at": "2021-03-22T18:08:48Z", + "author": "smidm" + }, + { + "body": "The listing above are differences between `simba-uw-tf` and github master.\r\n\r\nHere is `Simba_UW_tf_dev` and github master (including subfolders):\r\n\r\n```\r\n$ diff -qr simba-github-master/simba Simba_UW_tf_dev -x '*pyc' -x __pycache__ | grep Only | sort\r\n\r\nOnly in simba-github-master/simba: classifier_validation.py\r\nOnly in simba-github-master/simba: extract_features_with_scaling.py\r\nOnly in simba-github-master/simba: extract_features_wo_targets.py\r\nOnly in simba-github-master/simba/features_scripts: extract_features_16b-8extra.py\r\nOnly in simba-github-master/simba: golden_lab.ico\r\nOnly in simba-github-master/simba: image.png\r\nOnly in simba-github-master/simba: mouse_diagrams\r\nOnly in simba-github-master/simba: next_clahe.py\r\nOnly in simba-github-master/simba: next_greyscale.py\r\nOnly in simba-github-master/simba: next_superimposeframe.py\r\nOnly in simba-github-master/simba: plot_sklearn_results.py\r\nOnly in simba-github-master/simba/sklearn_plot_scripts: plot_sklearn_results_14_16bp.py\r\nOnly in simba-github-master/simba/sklearn_plot_scripts: plot_sklearn_results_2_maDLC.py\r\nOnly in simba-github-master/simba/sklearn_plot_scripts: plot_sklearn_results_4bp.py\r\nOnly in simba-github-master/simba/sklearn_plot_scripts: plot_sklearn_results_7_8bp.py\r\nOnly in simba-github-master/simba/sklearn_plot_scripts: plot_sklearn_results_9bp.py\r\nOnly in simba-github-master/simba/sklearn_plot_scripts: plot_sklearn_results_plot_features.py\r\nOnly in simba-github-master/simba/sklearn_plot_scripts: plot_sklearn_results_user_defined.py\r\nOnly in simba-github-master/simba: tkinter_class_test_efficient_scratch.py\r\nOnly in simba-github-master/simba: validate_model_on_single_video_copy.py\r\n\r\nOnly in Simba_UW_tf_dev: append_boris.py\r\nOnly in Simba_UW_tf_dev/assets: ._dash_simba_base.css\r\nOnly in Simba_UW_tf_dev: combine_to_h5.py\r\nOnly in Simba_UW_tf_dev: dash_simba.css\r\nOnly in Simba_UW_tf_dev: ._.DS_Store\r\nOnly in Simba_UW_tf_dev: .DS_Store\r\nOnly in Simba_UW_tf_dev: extract_frames_fast.py\r\nOnly in Simba_UW_tf_dev: fast_video.py\r\nOnly in Simba_UW_tf_dev/features_scripts: extract_features_16bp_new_2.py\r\nOnly in Simba_UW_tf_dev: FSTCC.py\r\nOnly in Simba_UW_tf_dev: gantt_video.py\r\nOnly in Simba_UW_tf_dev: .idea\r\nOnly in Simba_UW_tf_dev: import_solomon.py\r\nOnly in Simba_UW_tf_dev: Initate_multi_process.py\r\nOnly in Simba_UW_tf_dev: Kleinberg_burst_analysis.py\r\nOnly in Simba_UW_tf_dev/outlier_scripts: .idea\r\nOnly in Simba_UW_tf_dev/outlier_scripts/location: .idea\r\nOnly in Simba_UW_tf_dev/outlier_scripts/movement: correct_devs_mov_15bp.py\r\nOnly in Simba_UW_tf_dev/outlier_scripts/movement: correct_devs_mov_user_defined_new.py\r\nOnly in Simba_UW_tf_dev/outlier_scripts/movement: correct_devs_mov_user_defined_old.py\r\nOnly in Simba_UW_tf_dev/outlier_scripts/movement: .idea\r\nOnly in Simba_UW_tf_dev/outlier_scripts/movement: rw_dfs.py\r\nOnly in Simba_UW_tf_dev/outlier_scripts: rw_dfs.py\r\nOnly in Simba_UW_tf_dev: path_plot_new.py\r\nOnly in Simba_UW_tf_dev: pose_configurations_archive\r\nOnly in Simba_UW_tf_dev/pose_configurations/schematics: Picture13.png\r\nOnly in Simba_UW_tf_dev: process_movement_new.py\r\nOnly in Simba_UW_tf_dev: process_videos_automation_linux.py\r\nOnly in Simba_UW_tf_dev: process_videos_automation_win.py\r\nOnly in Simba_UW_tf_dev: pup_retrieval_1.py\r\nOnly in Simba_UW_tf_dev: reverse_tracking_order.py\r\nOnly in Simba_UW_tf_dev: ROI_add_to_features_old2.py\r\nOnly in Simba_UW_tf_dev: ROI_add_to_features_old.py\r\nOnly in Simba_UW_tf_dev: ROI_analysis_old.py\r\nOnly in Simba_UW_tf_dev: ROI_analysis.py\r\nOnly in Simba_UW_tf_dev: ROI_directionality_to_other_animals.py\r\nOnly in Simba_UW_tf_dev: ROI_directionality_to_other_animals_visualize.py\r\nOnly in Simba_UW_tf_dev: ROI_plot_multi_process.py\r\nOnly in Simba_UW_tf_dev: ROI_plot_new.py\r\nOnly in Simba_UW_tf_dev: ROI_plot_old_2.py\r\nOnly in Simba_UW_tf_dev: ROI_plot_old.py\r\nOnly in Simba_UW_tf_dev: ROI_process_movement_old.py\r\nOnly in Simba_UW_tf_dev: run_dash_tkinter.py\r\nOnly in Simba_UW_tf_dev: run_RF_model_old.py\r\nOnly in Simba_UW_tf_dev: rw_dfs.py\r\nOnly in Simba_UW_tf_dev: SimBA_dash_app.py\r\nOnly in Simba_UW_tf_dev/sklearn_plot_scripts: .idea\r\nOnly in Simba_UW_tf_dev: timeBins_classifiers_new.py\r\n\r\n```\r\n\r\n", + "created_at": "2021-03-22T18:43:35Z", + "author": "smidm" + } + ] + }, + { + "title": "provide package build files (setup.py)", + "body": "**Is your feature request related to a problem? Please describe.**\r\n\r\nI want to use the SimBA code and possibly contribute to the SimBA.\r\n\r\n**Describe the solution you'd like**\r\n\r\nprovide `setup.py` to be able to install the git version with `pip install -e .` \r\n\r\nThe Python packages are released and uploaded to pypi, but the build files seem to be missing.", + "user": "smidm", + "reaction_cnt": 0, + "created_at": "2021-03-19T15:08:17Z", + "updated_at": "2021-03-23T18:11:46Z", + "author": "smidm", + "comments": [] + }, + { + "title": "Bump pillow from 5.4.1 to 8.1.1 in /simba", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 8.1.1.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

8.1.1

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.1.1.html

\n

8.1.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/8.1.0.html

\n

Changes

\n\n

Dependencies

\n\n

Deprecations

\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

8.1.1 (2021-03-01)

\n
    \n
  • \n

    Use more specific regex chars to prevent ReDoS. CVE-2021-25292\n[hugovk]

    \n
  • \n
  • \n

    Fix OOB Read in TiffDecode.c, and check the tile validity before reading. CVE-2021-25291\n[wiredfool]

    \n
  • \n
  • \n

    Fix negative size read in TiffDecode.c. CVE-2021-25290\n[wiredfool]

    \n
  • \n
  • \n

    Fix OOB read in SgiRleDecode.c. CVE-2021-25293\n[wiredfool]

    \n
  • \n
  • \n

    Incorrect error code checking in TiffDecode.c. CVE-2021-25289\n[wiredfool]

    \n
  • \n
  • \n

    PyModule_AddObject fix for Python 3.10 #5194\n[radarhere]

    \n
  • \n
\n

8.1.0 (2021-01-02)

\n
    \n
  • \n

    Fix TIFF OOB Write error. CVE-2020-35654 #5175\n[wiredfool]

    \n
  • \n
  • \n

    Fix for Read Overflow in PCX Decoding. CVE-2020-35653 #5174\n[wiredfool, radarhere]

    \n
  • \n
  • \n

    Fix for SGI Decode buffer overrun. CVE-2020-35655 #5173\n[wiredfool, radarhere]

    \n
  • \n
  • \n

    Fix OOB Read when saving GIF of xsize=1 #5149\n[wiredfool]

    \n
  • \n
  • \n

    Makefile updates #5159\n[wiredfool, radarhere]

    \n
  • \n
  • \n

    Add support for PySide6 #5161\n[hugovk]

    \n
  • \n
  • \n

    Use disposal settings from previous frame in APNG #5126\n[radarhere]

    \n
  • \n
  • \n

    Added exception explaining that repr_png saves to PNG #5139\n[radarhere]

    \n
  • \n
  • \n

    Use previous disposal method in GIF load_end #5125\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • 741d874 8.1.1 version bump
  • \n
  • 179cd1c Added 8.1.1 release notes to index
  • \n
  • 7d29665 Update CHANGES.rst [ci skip]
  • \n
  • d25036f Credits
  • \n
  • 973a4c3 Release notes for 8.1.1
  • \n
  • 521dab9 Use more specific regex chars to prevent ReDoS
  • \n
  • 8b8076b Fix for CVE-2021-25291
  • \n
  • e25be1e Fix negative size read in TiffDecode.c
  • \n
  • f891baa Fix OOB read in SgiRleDecode.c
  • \n
  • cbfdde7 Incorrect error code checking in TiffDecode.c
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=8.1.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-03-19T10:02:59Z", + "updated_at": "2021-06-08T20:48:29Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #117.", + "created_at": "2021-06-08T20:48:27Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Create LICENSE", + "body": "", + "user": "sgoldenlab", + "reaction_cnt": 0, + "created_at": "2021-03-09T18:20:55Z", + "updated_at": "2022-03-12T20:34:40Z", + "author": "sgoldenlab", + "comments": [] + }, + { + "title": "behavior annotation with mat file", + "body": "**Most of our previous manual behavior annotation was using matlab. So we got a .mat file and a txt file indicating behavior start and end frame, time and behavior.\r\nIs this possible to use them to predict behavior in simba?\r\nOne sample as follows:\r\nscorevideo LOG\r\nFile: logWT_fmO2_oil_ovx_200814.mat\r\n\r\nVIDEO FILE SET\r\nVideotype: AVI\r\nDirectory: D:\\**\\200814\r\nVideo file set:\r\n WT_fmO2_oil_ovx_200814 1-65693\r\nPlay duration (min): 43.795\r\nFrames/sec of files: 25\r\nNo. of subjects: 2\r\n\r\nCOMMAND SET AND SETTINGS\r\n-------------------------------\r\nstart|stop|subject|description\r\n-------------------------------\r\nc 0 contact female\r\nd 0 feeding\r\ne 0 ejaculation\r\nf 0 female\r\ng 0 genital grooming\r\nh 0 head mount\r\ni 0 intromit\r\nm 0 mount\r\ns 0 sniff female\r\nt 0 be contacted\r\nu 0 be sniffed\r\nv 0 USV\r\nw 0 try to mount\r\nz 0 grooming\r\n-------------------------------\r\nsubject 1: subject1\r\nsubject 2: subject2\r\nsubj#: 0=either 1=subject1 2=subject2 3=both\r\nNo. of simultaneous behaviors: one\r\n\r\nRAW LOG\r\n------------------------------------------\r\nframe|time(min:sec)|command\r\n------------------------------------------\r\n 9727 6:29.08 s\r\n 9772 6:30.88\r\n12591 8:23.64 s\r\n12605 8:24.20\r\n12737 8:29.48 s\r\n12820 8:32.80\r\n13519 9:00.76 s\r\n13542 9:01.68\r\n16240 10:49.60 s\r\n16257 10:50.28\r\n18862 12:34.48 s\r\n18897 12:35.88\r\n18993 12:39.72 s\r\n19034 12:41.36\r\n19097 12:43.88 m\r\n19121 12:44.84\r\n19790 13:11.60 s\r\n19813 13:12.52\r\n20567 13:42.68 s\r\n20587 13:43.48\r\n21055 14:02.20 m\r\n21201 14:08.04 i\r\n21308 14:12.32\r\n21333 14:13.32 s\r\n21536 14:21.44\r\n24421 16:16.84 s\r\n24438 16:17.52\r\n32025 21:21.00 s\r\n32059 21:22.36\r\n32308 21:32.32 s\r\n32344 21:33.76\r\n32750 21:50.00 s\r\n32803 21:52.12\r\n38671 25:46.84 s\r\n38680 25:47.20\r\n41084 27:23.36 s\r\n41169 27:26.76\r\n43743 29:09.72 s\r\n43789 29:11.56\r\n47880 31:55.20 s\r\n47957 31:58.28\r\n48013 32:00.52 m\r\n48029 32:01.16 i\r\n48185 32:07.40 s\r\n48215 32:08.60\r\n49277 32:51.08 s\r\n49299 32:51.96\r\n49372 32:54.88 m\r\n49390 32:55.60 i\r\n49737 33:09.48\r\n------------------------------------------\r\n\r\nFULL LOG\r\n------------------------------------------\r\nframe|time(min:sec)|description|action|subject\r\n------------------------------------------\r\n 9727 6:29.08 sniff female either start\r\n 9772 6:30.88 sniff female either stop\r\n12591 8:23.64 sniff female either start\r\n12605 8:24.20 sniff female either stop\r\n12737 8:29.48 sniff female either start\r\n12820 8:32.80 sniff female either stop\r\n13519 9:00.76 sniff female either start\r\n13542 9:01.68 sniff female either stop\r\n16240 10:49.60 sniff female either start\r\n16257 10:50.28 sniff female either stop\r\n18862 12:34.48 sniff female either start\r\n18897 12:35.88 sniff female either stop\r\n18993 12:39.72 sniff female either start\r\n19034 12:41.36 sniff female either stop\r\n19097 12:43.88 mount either start\r\n19121 12:44.84 mount either stop\r\n19790 13:11.60 sniff female either start\r\n19813 13:12.52 sniff female either stop\r\n20567 13:42.68 sniff female either start\r\n20587 13:43.48 sniff female either stop\r\n21055 14:02.20 mount either start\r\n21201 14:08.04 mount either stop\r\n21201 14:08.04 intromit either start\r\n21308 14:12.32 intromit either stop\r\n21333 14:13.32 sniff female either start\r\n21536 14:21.44 sniff female either stop\r\n24421 16:16.84 sniff female either start\r\n24438 16:17.52 sniff female either stop\r\n32025 21:21.00 sniff female either start\r\n32059 21:22.36 sniff female either stop\r\n32308 21:32.32 sniff female either start\r\n32344 21:33.76 sniff female either stop\r\n32750 21:50.00 sniff female either start\r\n32803 21:52.12 sniff female either stop\r\n38671 25:46.84 sniff female either start\r\n38680 25:47.20 sniff female either stop\r\n41084 27:23.36 sniff female either start\r\n41169 27:26.76 sniff female either stop\r\n43743 29:09.72 sniff female either start\r\n43789 29:11.56 sniff female either stop\r\n47880 31:55.20 sniff female either start\r\n47957 31:58.28 sniff female either stop\r\n48013 32:00.52 mount either start\r\n48029 32:01.16 mount either stop\r\n48029 32:01.16 intromit either start\r\n48185 32:07.40 intromit either stop\r\n48185 32:07.40 sniff female either start\r\n48215 32:08.60 sniff female either stop\r\n49277 32:51.08 sniff female either start\r\n49299 32:51.96 sniff female either stop\r\n49372 32:54.88 mount either start\r\n49390 32:55.60 mount either stop\r\n49390 32:55.60 intromit either start\r\n49737 33:09.48 intromit either stop\r\n------------------------------------------\r\n\r\nNOTES\r\n------------------------------------------\r\n------------------------------------------\r\n\r\nMARKS\r\n------------------------------------------\r\nframe|time(min:sec)|mark name\r\n------------------------------------------\r\n 1 0:00.04 video start\r\n 8565 5:42.60 assay start\r\n53565 35:42.60 assay end\r\n65693 43:47.72 video end\r\n------------------------------------------", + "user": "Wutaijing9", + "reaction_cnt": 0, + "created_at": "2021-02-26T12:40:27Z", + "updated_at": "2021-03-17T14:57:29Z", + "author": "Wutaijing9", + "comments": [ + { + "body": "Hi @Wutaijing9 - I think so - which software were these created in? Was it your own custom scripts? Reminds me of Piotr's matlab annotation toolbox output files I have seen from caltech.", + "created_at": "2021-02-26T14:24:01Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "ROI data analysis", + "body": "**This was amazing to replace manual behavior annotation. \r\nWhen analyzing ROI data, I got cumulative entry times and cumulative spending time. But this was not enough for further analysis. I need to know the exact time and frame mouse entry and exit from the ROI area, like from frame 675 mouse begin to entry in the ROI area and frame #700 mouse exit from it OR the exact time.\r\nCould you add this feature in ROI analysis? Thanks a lot!", + "user": "Wutaijing9", + "reaction_cnt": 0, + "created_at": "2021-02-26T12:09:04Z", + "updated_at": "2021-03-17T14:57:22Z", + "author": "Wutaijing9", + "comments": [ + { + "body": "Hello @Wutaijing9 ! \r\n\r\nYes, what we can do is generate a sub-folder in the project logs folder, containing one CSV output file per video analyzed, the file lists the animal ID together with the shape and entry and exit frame.. the individual CSV files would look like this. Do you think that would work?\r\n\r\nIf the animal does not exit the ROI before the end of the video, the Exit_frame would be read as `-1`. \r\n\r\n![Screen Shot 2021-02-26 at 6 13 27 AM](https://user-images.githubusercontent.com/50497030/109311142-43ebfa80-77fa-11eb-8643-3c3fc17a8c8d.png)\r\n\r\n\r\n\r\n", + "created_at": "2021-02-26T14:18:09Z", + "author": "sgoldenlab" + }, + { + "body": "If you update to the latest simba-uw-tf-dev version, you should have access to it. It is documented at the end of Part II of this tutorial : https://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial.md#part-2-analyzing-roi-data \r\n\r\nLat me know if it works!", + "created_at": "2021-02-27T21:22:18Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": " ROI data analysis ", + "body": "", + "user": "Wutaijing9", + "reaction_cnt": 0, + "created_at": "2021-02-26T12:00:38Z", + "updated_at": "2021-02-26T12:01:32Z", + "author": "Wutaijing9", + "comments": [] + }, + { + "title": "SimBA for SPRGT", + "body": "I apologise if this is the wrong page to post this, i'm fairly new to GitHub.\r\nI was wondering if SimBA could be used to annotate and create behaviour classifiers for the rodent single pellet reaching task?\r\nHere is my setup:\r\nI have top down videos (and corresponding DeepLabCut csv's) of rat forelimbs reaching and grasping a sugar pellet (also annotated) on a spoon and then retrieving it. These videos are only around 28 seconds and contain 700 frames. \r\nI would like to create a classifier that would identify pronation, supination and lingering of the forelimb on the spoon as well as an overall classifier for detecting outcome (successful pellet reach or unsuccessful). \r\nI have already tried using SimBA but fear I may have used behavioural annotation incorrectly, marking frames individually rather than a frame range which would contain temporal data. \r\nMy question is, would this software actually work for this purpose and would it be able to take a chunk of frames marked as \"successful pellet retrieval\" and make an efficient classifier? \r\nThank you in advance!\r\n\r\nhttps://user-images.githubusercontent.com/65357758/108206742-b6841880-711e-11eb-92ad-7b14165dcb1f.mp4\r\n\r\n\r\n", + "user": "Shehrrykid", + "reaction_cnt": 0, + "created_at": "2021-02-17T12:50:35Z", + "updated_at": "2021-03-07T16:49:17Z", + "author": "Shehrrykid", + "comments": [ + { + "body": "Hello @Shehrrykid! The video won't play or download for some reason, if you share a gdrive link I could take a look -goldenneurolab@gmail.com \r\n\r\nSome thoughts, I'm not sure how pronation, supination and lingering looks like but generally, if you can identify a behavior by eye, and the behavior has a meaningful duration (e.g., the animal performed 3 bouts of supination for a total duration of 1.5) then you can use SimBA to get to that. \r\n\r\nI mention \"meaningful duration\" because of your second question - about successful reaching. Successful reaching doesn't really fill this criterion - SimBA will give you how many frames/ seconds/bouts successful reaching happened in - it is possible that the \"bouts\" of successful reaching that simba classifies is meaningful for you though.\r\n\r\nEach individual frame, annotated or not, does contain temporal data, up to 500ms - for each frame SimBA calculates these values - https://github.com/sgoldenlab/simba/blob/master/misc/features_user_defined_pose_config.csv \r\n\r\nI mention this as if you annotate a single frame as \"successful reaching\", what is unique about that frame compared to 3 frames downstream which is not \"successful reaching\" - probably nothing or very little - and this will confuse the classifier. \r\nI'd try to annotate a whole segment as successful reaching and see if that works. \r\n\r\nI'd also set some very clear definitions of what successful reaching is before annotating - e.g., the forelimbs have to remain _this_ position after making _this_ motion or something similar - without that it be difficult for the annotator to label frames correctly and for the computer to separate the behaviors from the background. ", + "created_at": "2021-02-18T00:12:00Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks for your quick reply! \r\n\r\nI have sent you an email :)\r\n", + "created_at": "2021-02-19T20:22:02Z", + "author": "Shehrrykid" + }, + { + "body": "Hi guys,\r\n\r\nI was just wondering, per your last email:\r\nI as the annotator i can only tell if an animal is successful in reaching when they have retrieved the pellet and it has moved out of the field of view. This is because sometimes they can drop it as they pull their paw out and this is classed as a failure. That being said, does this mean I should label all the frames AFTER the paw is no longer in view as successful of failure? If thats the case the only frames contained in that classifier would be unannotated empty frames showing the spoon. Does SimBA somehow look at those frames in relation the the rest of the video? \r\nOr would it be best to label all frames containing the paw and pellet as success or fail instead? As in if frames 200-400 contain the paw reaching and grasping the pellet successfully then they should all be labelled as successful?\r\nThank you\r\nIlyaas", + "created_at": "2021-03-03T12:49:16Z", + "author": "Shehrrykid" + } + ] + }, + { + "title": "Sklearn visualizations of single videos", + "body": "Dear SimBA developers, \r\nI'd like to generate one single exemplary video with sklearn visualization of model predictions, instead I SimBA generates visualizations of all videos at once, if I am right. \r\nWould it be possible to add such feature, similar to the ROI visualizations?\r\nBest,\r\nIrene \r\n", + "user": "AyusoAa", + "reaction_cnt": 0, + "created_at": "2021-02-05T08:40:40Z", + "updated_at": "2021-03-01T10:08:58Z", + "author": "AyusoAa", + "comments": [ + { + "body": "Hello @AyusoAa ! Yes we are on it and I will let you know when it's done - if you cannot wait: SimBA grabs all of the videos directly in the `project_folder/csv/machine_results` and plots the results when you click the sklearn visualization. If you want to do just a select few, put the videos you do **not** want the visualize in a temporarily folder somewhere else, and then move them back. ", + "created_at": "2021-02-05T16:49:38Z", + "author": "sgoldenlab" + }, + { + "body": "@AyusoAa, I have implemented what you have suggested. Please update to simba-uw-tf==1.3.3 and let us know if that works for you.", + "created_at": "2021-02-05T19:29:56Z", + "author": "sgoldenlab" + }, + { + "body": "Note you should see it in the dev version too - \r\n![image](https://user-images.githubusercontent.com/34761092/107108125-94b79700-67ea-11eb-9b25-08fcacff962f.png)\r\n", + "created_at": "2021-02-06T03:44:51Z", + "author": "sronilsson" + }, + { + "body": "@AyusoAa - did this update work as expected? ", + "created_at": "2021-02-15T18:31:17Z", + "author": "sgoldenlab" + }, + { + "body": "It does work! Thank you so much for your attention and work :)", + "created_at": "2021-03-01T10:08:58Z", + "author": "AyusoAa" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 7.1.0", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 7.1.0.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

7.1.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/7.1.0.html

\n

7.0.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/7.0.0.html

\n

6.2.2

\n

https://pillow.readthedocs.io/en/stable/releasenotes/6.2.2.html

\n

6.2.1

\n

https://pillow.readthedocs.io/en/stable/releasenotes/6.2.1.html

\n

6.2.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/6.2.0.html

\n

6.1.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/6.1.0.html

\n

6.0.0

\n

No release notes provided.

\n
\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

7.1.0 (2020-04-01)

\n
    \n
  • \n

    Fix multiple OOB reads in FLI decoding #4503\n[wiredfool]

    \n
  • \n
  • \n

    Fix buffer overflow in SGI-RLE decoding #4504\n[wiredfool, hugovk]

    \n
  • \n
  • \n

    Fix bounds overflow in JPEG 2000 decoding #4505\n[wiredfool]

    \n
  • \n
  • \n

    Fix bounds overflow in PCX decoding #4506\n[wiredfool]

    \n
  • \n
  • \n

    Fix 2 buffer overflows in TIFF decoding #4507\n[wiredfool]

    \n
  • \n
  • \n

    Add APNG support #4243\n[pmrowla, radarhere, hugovk]

    \n
  • \n
  • \n

    ImageGrab.grab() for Linux with XCB #4260\n[nulano, radarhere]

    \n
  • \n
  • \n

    Added three new channel operations #4230\n[dwastberg, radarhere]

    \n
  • \n
  • \n

    Prevent masking of Image reduce method in Jpeg2KImagePlugin #4474\n[radarhere, homm]

    \n
  • \n
  • \n

    Added reading of earlier ImageMagick PNG EXIF data #4471\n[radarhere]

    \n
  • \n
  • \n

    Fixed endian handling for I;16 getextrema #4457\n[radarhere]

    \n
  • \n
  • \n

    Release buffer if function returns prematurely #4381\n[radarhere]

    \n
  • \n
  • \n

    Add JPEG comment to info dictionary #4455\n[radarhere]

    \n
  • \n
  • \n

    Fix size calculation of Image.thumbnail() #4404\n[orlnub123]

    \n
  • \n
  • \n

    Fixed stroke on FreeType < 2.9 #4401\n[radarhere]

    \n
  • \n
  • \n

    If present, only use alpha channel for bounding box #4454\n[radarhere]

    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=7.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2021-02-02T20:24:08Z", + "updated_at": "2021-03-20T17:22:50Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #98.", + "created_at": "2021-03-20T17:22:29Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Simple-use case", + "body": "Hello\r\nI've got several hundred hours of 4 or 6-gerbil videos that we are in the process of annotating using SLEAP (DLC did not do as well on them).\r\n\r\nWe are considering investing time in learning how to use simba, but the documentation is a bit overwhelming and it's not clear how to build our use case from the provided steps.\r\n\r\nWe essentially have the .slp files (or .csv files) and would like to know how to load them for a quick test (without adjusting all the parameters).\r\n\r\nWhat do you recommend? Is there a simple-cause that we could use? Do you have a python notebook that goes through a basic .slp or .csv?\r\n\r\nOk, thanks so much,\r\ncatubc", + "user": "catubc", + "reaction_cnt": 0, + "created_at": "2021-02-02T16:15:21Z", + "updated_at": "2021-02-17T15:40:44Z", + "author": "catubc", + "comments": [ + { + "body": "Hello @catubc - I don't know your exact use case (e.g., what behaviours you are classifying) but there is nothing obvious that comes to mind that would prevent you from doing this - a lot comes down to the accuracy of the pose and keeping ID swaps at a minimum - although how much ID swaps matters again comes down to what you are aiming to do. \r\n\r\nFor a trial I'd recommend \r\n\r\n(i) installing the SimBA development wheel - `pip install simba-uw-tf-dev`. \r\n\r\n(ii) take a **short** couple of video scored by SLEAP - maybe no more than a couple thousand frames. Create a SimBA project and import these videos as documented [HERE](https://github.com/sgoldenlab/simba/blob/master/docs/Multi_animal_pose.md). As you create the SimBA project you see a dropdown option for `workflow file type` which can be either `CSV` or `parquet`. With the size of your later data I'd recommend going for the parquet option - with hundreds of of hours of video then CSVs will cost you in terms of space and time. \r\n\r\n(iii) Once your project is created, and SLEAP pose is imported into your project, begin at this step of [Scenario 1](https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#part-2-load-project-1). But again, what specifically happens next depends on your use case - if you are interested in non-classification stuff it's possible you'd be better off looking [here](https://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial.md) or [here](https://github.com/sgoldenlab/simba/blob/master/docs/directionality_between_animals.md). What are you looking to score? \r\n\r\nIf you are looking to generate classifiers, then make sure the short videos you are importing has a good amount of behaviors that you are interested in, so you are not just feeding in no-behavior examples. \r\n\r\nUnfortunately we designed SimBA to be GUI based, not as many objects as there should be - there is no going back right now and no notebooks to offer at the moment 🤷 . We are however happy to respond to any issues and explain/update code if there is something missing as you are troubleshooting and working through it. \r\n\r\n", + "created_at": "2021-02-03T06:43:29Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Data plot and wrong body parts selection", + "body": "Hey, I run into an issue with Data plot visualization. When I hit the \"generate data plot\" button in the GUI it gives me in terminal (Anaconda Prompt) a \"KeyError: 'movement_Mouse_2_No_2'\", indeed when I try to select body parts in the GUI they are wrong like truncated. In particular, the \"Select body part\" button shows me the wrong body parts (eg, \"Mouse_2_No\" instead of \"Mouse_2_Nose\" as it is in the project_bp_names.csv file). How can I fix it? Thank you so much again.\r\n", + "user": "filos93", + "reaction_cnt": 0, + "created_at": "2021-01-18T11:23:53Z", + "updated_at": "2021-01-18T23:21:17Z", + "author": "filos93", + "comments": [ + { + "body": "Hey @filos93 - sounds like a bug - which version of SimBA are you using? ", + "created_at": "2021-01-18T13:26:49Z", + "author": "sgoldenlab" + }, + { + "body": "Hey @sgoldenlab thanks for reply, I'm using the dev version ie, pip install simba-uw-tf-dev, should I change it?", + "created_at": "2021-01-18T14:57:39Z", + "author": "filos93" + }, + { + "body": "No, keep that one @filos93 - is it the latest version? I just want to be sure I am searching for the bug in the correct place. ", + "created_at": "2021-01-18T15:38:40Z", + "author": "sgoldenlab" + }, + { + "body": "Hey @sgoldenlab my version is 0.77.6 Should be the latest right?", + "created_at": "2021-01-18T16:15:42Z", + "author": "filos93" + }, + { + "body": "Yep, that's the latest. Thanks, I will get back to you, i'll see if I can recreate", + "created_at": "2021-01-18T16:36:06Z", + "author": "sgoldenlab" + }, + { + "body": "@filos93 - can you update to version 0.77.7 and see if thats fixed? \r\n\r\nHeads up, this part of the code hasn't been updated for a while and can still only handle two animals, if you have more you'll only get the data for animal 1 I believe. ", + "created_at": "2021-01-18T16:54:46Z", + "author": "sgoldenlab" + }, + { + "body": "Hey @sgoldenlab I updated my version, that fixes the bug in the GUI but I still get a KeyError in the terminal (ie, \"KeyError: 'movement_Mouse_1_Ear_left_2'), I'll attach a screenshot maybe that can help. Thanks again for your help and your patience.\r\n![Screenshot (22)](https://user-images.githubusercontent.com/64312024/104953360-8d108980-59c6-11eb-989d-dfe4b6d07f07.png)\r\n", + "created_at": "2021-01-18T18:52:18Z", + "author": "filos93" + }, + { + "body": "Ah thanks for letting me know.. just to confirm, you don't have more than 2 animals? And are the same body parts tracked on all the animals?", + "created_at": "2021-01-18T18:54:51Z", + "author": "sronilsson" + }, + { + "body": "yes two animals and tracked bodyparts are the same, here's the list: \"Mouse_1_Ear_left, Mouse_1_Ear_right, Mouse_1_Nose, Mouse_1_Tail_base, Mouse_2_Ear_left, Mouse_2_Ear_right, Mouse_2_Nose, Mouse_2_Tail_base\" from my project_bp_names csv file\r\n\r\n", + "created_at": "2021-01-18T18:57:55Z", + "author": "filos93" + }, + { + "body": "the other visualizations (eg, Gantt plot, path plot) works well, it's just with Data plot visualization the issue", + "created_at": "2021-01-18T19:00:37Z", + "author": "filos93" + }, + { + "body": "@filos93 - let's try one more time, could you update to 0.77.8 and see if that solves it?", + "created_at": "2021-01-18T20:57:25Z", + "author": "sgoldenlab" + }, + { + "body": "Hey @sgoldenlab I found the error in the data_plot.py lines: 61,63,105, you have to change \"movement_\" to \"Movement_\" (eg, if noAnimals == 2: move1ColName = \"Movement_\" + SelectedBp + '_1'\" so simba can find the \"Movement_\" in the features_extracted.csv file, after you do that everything works fine, I was able to generate the data plot! I'll attach a screnshot from the lines I changed in Notepad ++ maybe can help\r\n![Screenshot (25)](https://user-images.githubusercontent.com/64312024/104969805-cefef700-59e9-11eb-9d97-44da47ecb41c.png)\r\n![Screenshot (26)](https://user-images.githubusercontent.com/64312024/104969810-d0302400-59e9-11eb-88c8-7292021bb606.png)\r\n\r\n. Thanks again :)", + "created_at": "2021-01-18T23:04:20Z", + "author": "filos93" + }, + { + "body": "Super helpful, ta!", + "created_at": "2021-01-18T23:21:17Z", + "author": "sronilsson" + } + ] + }, + { + "title": "ROI visualization font size", + "body": "Hey, thanks again for the great software! This is more of a question than an issue. How can I change the font size in the ROI visualization particularly the font of \"timer\" and \"entries\" because right now they are a bit fuzzy and I cannot read them well. Please, let me know.", + "user": "filos93", + "reaction_cnt": 0, + "created_at": "2021-01-18T10:58:23Z", + "updated_at": "2021-01-18T13:19:36Z", + "author": "filos93", + "comments": [ + { + "body": "oh never mind found what I needed in the simba script \"ROI_plot\" (ie, user\\anaconda3\\envs\\simba\\Lib\\site-packages\\simba\\ROI_plot.py) and changed that using Notepad ++, I'll close this \"issue\" then, thanks again", + "created_at": "2021-01-18T12:26:02Z", + "author": "filos93" + }, + { + "body": "For anyone else looking into this, it's this row in ROI_plot.py:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/104920407-952be300-594c-11eb-90b3-d895627fdfc8.png)\r\n\r\nTweak the last value to modify the font scaling, decrease it and the font should get smaller. ", + "created_at": "2021-01-18T13:19:36Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "failed to install simba in linux", + "body": "**Describe the bug**\r\nwhen I try to install simba in linux system, incompatible occurs\r\n\r\n**To Reproduce**\r\nAs recommended, \r\nFirstly, Run pip install pip==20.1.1\r\nNext, run pip install simba-uw-tf\r\n![screenshot](https://user-images.githubusercontent.com/32728290/103969143-4047da00-51a0-11eb-832f-932e5ee5fc2b.png)\r\n\r\nThen I tried pip install simba-uw-tf --no-dependencies\r\n![screenshot_1](https://user-images.githubusercontent.com/32728290/103969513-05927180-51a1-11eb-8f9f-74a51b7fd4c8.png)\r\nBut when I try to install dlc, no dlc satified the requirements of simba. \r\n![screenshot_2](https://user-images.githubusercontent.com/32728290/103970141-81d98480-51a2-11eb-8464-f902b6b6b573.png)\r\nAnd THEN I go throuth requiments of dlc and simba\r\ndlc requires pandas >= 1.0.1, while simba requires pandas 0.25.3\r\n![screenshot_3](https://user-images.githubusercontent.com/32728290/103970246-bb11f480-51a2-11eb-8d9c-32d0fa3decc7.png)\r\n\r\n**Desktop (please complete the following information):**\r\n - ubantu20.04\r\n - Python3.6\r\n - anaconda3\r\n\r\nDo you have any other suggestions?\r\n ", + "user": "Wutaijing9", + "reaction_cnt": 0, + "created_at": "2021-01-08T03:17:10Z", + "updated_at": "2021-01-11T18:00:03Z", + "author": "Wutaijing9", + "comments": [ + { + "body": "Hi @Wutaijing9 - first, if you are in Linux - use the simba dev version - `pip install simba-uw-tf-dev`, not the simba-uw-tf or simba-uw-no-tf versions. \r\n\r\nThe clash you are seeing is because simba-uw-tf wraps around deeplabcut==2.0.9, which does not require pandas >=1.0. However, I recommend using the DLC interface to generate your tracking, rather than the simba-dlc interface. More info here: https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_DLC.md", + "created_at": "2021-01-08T03:51:05Z", + "author": "sronilsson" + }, + { + "body": "> Hi @Wutaijing9 - first, if you are in Linux - use the simba dev version - `pip install simba-uw-tf-dev`, not the simba-uw-tf or simba-uw-no-tf versions.\r\n> \r\n> The clash you are seeing is because simba-uw-tf wraps around deeplabcut==2.0.9, which does not require pandas >=1.0. However, I recommend using the DLC interface to generate your tracking, rather than the simba-dlc interface. More info here: https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_DLC.md\r\n\r\nI do try simba-uw-tf-dev. \r\npip install https://files.pythonhosted.org/packages/f7/cb/69a7a1dfc31068d3b96bf69822fda9140267e63e4983544573017b579a67/Simba_UW_tf_dev-0.77.1-py3-none-any.whl\r\n(Since pip install simba-uw-tf-dev reported a lot of bugs, even tried no dependencies)\r\nIt did not work when I launch simba. Do you have any more suggesdtions?\r\n![screenshot_4](https://user-images.githubusercontent.com/32728290/104005785-99cff900-51e0-11eb-9a91-02dd03f4321e.png)\r\n\r\n", + "created_at": "2021-01-08T10:38:23Z", + "author": "Wutaijing9" + }, + { + "body": "@Wutaijing9. Please delete the environment and try again use pip install simba-uw-tf-dev==0.77.1\r\n\r\nOn Linux, somehow the installation of pip install simba-uw-tf-dev is defaulted to version 0.44 which is outdated.\r\n\r\nPlease let me know if that works for you.", + "created_at": "2021-01-08T18:06:21Z", + "author": "sgoldenlab" + }, + { + "body": "> @Wutaijing9. Please delete the environment and try again use pip install simba-uw-tf-dev==0.77.1\r\n> \r\n> On Linux, somehow the installation of pip install simba-uw-tf-dev is defaulted to version 0.44 which is outdated.\r\n> \r\n> Please let me know if that works for you.\r\n\r\nThis reduced bugs compared to simba-uw-tf-dev. And bugs it reported were as follows\r\n![screenshot_5](https://user-images.githubusercontent.com/32728290/104147872-08909a80-540b-11eb-8ee0-6d52e716a27b.png)\r\n", + "created_at": "2021-01-11T04:46:38Z", + "author": "Wutaijing9" + }, + { + "body": "@Wutaijing9 - you still get the same TclError preventing boot? \r\n", + "created_at": "2021-01-11T05:58:59Z", + "author": "sronilsson" + }, + { + "body": "> @Wutaijing9 - you still get the same TclError preventing boot?\r\nExcited thouth I don't know why.\r\nNO MORE BUGS REPORTED NOW.\r\nconda create py37 -n python=3.7.3\r\npip install simba-uw-tf-dev==0.77.1\r\n\r\nsimba could launch with no bugs reported\r\n\r\n\r\nbefore \r\nconda create py36 -n python=3.6.0\r\npip install simba-uw-tf-dev==0.77.1\r\nget TclError preventing boot.\r\n\r\n", + "created_at": "2021-01-11T06:43:37Z", + "author": "Wutaijing9" + }, + { + "body": "Thanks for letting us know!", + "created_at": "2021-01-11T18:00:03Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Whenever i extract frame rates from a video dataset folder to a train_1 folder. After extracting frame rates the folder is empty where as pycharm shows it has been done successfully. Any one aware of this issue? My code", + "body": "Whenever i extract frame rates from a video dataset folder to a train_1 folder. After extracting frame rates the folder is empty where as pycharm shows it has been done successfully. Any one aware of this issue? My code\r\nfor i in tqdm(range(train.shape[0])):\r\n count = 0\r\n videoFile = train['video_name'][i]\r\n cap = cv2.VideoCapture('UCF/'+videoFile.split(' ')[0].split('/')[1]) # capturing the video from the given path\r\n frameRate = cap.get(5) #frame rate\r\n x=1\r\n while(cap.isOpened()):\r\n frameId = cap.get(1) #current frame number\r\n ret, frame = cap.read()\r\n if (ret != True):\r\n break\r\n if (frameId % math.floor(frameRate) == 0):\r\n # storing the frames in a new folder named train_1\r\n filename =r\"D:\\New Projects Pycharm\\Videoclassi\\tttt/\" + videoFile.split('/')[1].split(' ')[0] +\"_frame%d.jpg\" % count;count+=1\r\n cv2.imwrite(filename, frame)\r\n cap.release()\r\n\r\n_Originally posted by @Qasimster in https://github.com/sgoldenlab/simba/issues/8#issuecomment-756070871_", + "user": "Qasimster", + "reaction_cnt": 0, + "created_at": "2021-01-07T11:53:55Z", + "updated_at": "2021-01-08T19:06:20Z", + "author": "Qasimster", + "comments": [ + { + "body": "Hi @Qasimster - what comes to mind is the file path that you write the file to (Videoclassi/ttt/) might not exist. If you write to a path that does not exist it can appear like opencv are saving the frames but you won't find them. \r\n\r\nBefore your loop, after grabbing the video:\r\n```\r\nimport os\r\n\r\nmy_path = r\"D:\\New Projects Pycharm\\Videoclassi\\tttt\"\r\nif not os.path.exists(my_path)\r\n os.makedirs(my_path)\r\n```\r\n\r\n\r\nand in your loop:\r\n\r\n```\r\nfilename = 'frame' + str(counter) + '.png'\r\nsave_name.= os.path.join(my_path, filename)\r\ncv2.imwrite(save_name, frame)\r\ncounter+=1\r\n\r\n```\r\n", + "created_at": "2021-01-07T14:54:06Z", + "author": "sronilsson" + }, + { + "body": "Sir tttt exists and i tried your method too still it isnt working. Have a look\r\n\r\nimport cv2 # for capturing videos\r\nimport math # for mathematical operations\r\nimport matplotlib.pyplot as plt # for plotting the images\r\n# %matplotlib inline\r\nimport pandas as pd\r\nfrom keras.preprocessing import image # for preprocessing the images\r\nimport numpy as np # for mathematical operations\r\nfrom keras.utils import np_utils\r\nfrom skimage.transform import resize # for resizing images\r\nfrom sklearn.model_selection import train_test_split\r\nimport glob\r\nfrom tqdm import tqdm\r\n\r\n\r\n# open the .txt file which have names of training videos\r\nf = open(\"trainlist01.txt\", \"r\")\r\ntemp = f.read()\r\nvideos = temp.split('\\n')\r\n\r\n# creating a dataframe having video names\r\ntrain = pd.DataFrame()\r\ntrain['video_name'] = videos\r\ntrain = train[:-1]\r\nprint(\"Train \",train.head())\r\n\r\n# open the .txt file which have names of test videos\r\nf = open(\"testlist01.txt\", \"r\")\r\ntemp = f.read()\r\nvideos = temp.split('\\n')\r\n\r\n# creating a dataframe having video names\r\ntest = pd.DataFrame()\r\ntest['video_name'] = videos\r\ntest = test[:-1]\r\nprint(\"Test \",test.head())\r\n\r\nprint(\"-----------------------------------------------------------------------------------------------------------\")\r\n\r\n# creating tags for training videos\r\ntrain_video_tag = []\r\nfor i in range(train.shape[0]):\r\n train_video_tag.append(train['video_name'][i].split('/')[0])\r\n\r\ntrain['tag'] = train_video_tag\r\n\r\n# creating tags for test videos\r\ntest_video_tag = []\r\nfor i in range(test.shape[0]):\r\n test_video_tag.append(test['video_name'][i].split('/')[0])\r\n\r\ntest['tag'] = test_video_tag\r\n\r\nprint(\"Train tags ----------------\")\r\nprint(train['tag'])\r\nprint('---------------------------------------------------------')\r\nprint(\"Test tags ------------------\")\r\nprint(test['tag'])\r\n\r\nprint(\"-----------------------------------------------------------------------------------------------------------\")\r\n\r\n\r\n# storing the frames from training videos\r\nfor i in tqdm(range(train.shape[0])):\r\n count = 0\r\n videoFile = train['video_name'][i]\r\n cap = cv2.VideoCapture('UCF/'+videoFile.split(' ')[0].split('/')[1]) # capturing the video from the given path\r\n frameRate = cap.get(5) #frame rate\r\n x=1\r\n while(cap.isOpened()):\r\n frameId = cap.get(1) #current frame number\r\n ret, frame = cap.read()\r\n if (ret != True):\r\n break\r\n if (frameId % math.floor(frameRate) == 0):\r\n # storing the frames in a new folder named train_1\r\n my_path = r\"D:\\New Projects Pycharm\\Videoclassi\\tttt\"\r\n if not os.path.exists(my_path):\r\n os.makedirs(my_path)\r\n filename = 'frame' + str(counter) + '.png'\r\n save_name= os.path.join(my_path, filename)\r\n cv2.imwrite(save_name, frame)\r\n counter += 1\r\n cap.release()", + "created_at": "2021-01-07T15:02:21Z", + "author": "Qasimster" + }, + { + "body": "Just before saving the frame in the script, do a:\r\n\r\nprint(frame)\r\nprint(save_name) \r\n\r\nIt's either of those two, or both but most likely the path. What's printed? ", + "created_at": "2021-01-07T15:12:53Z", + "author": "sronilsson" + }, + { + "body": "It displays nothing.", + "created_at": "2021-01-07T15:22:16Z", + "author": "Qasimster" + }, + { + "body": "nothing? does it print an empty string? ", + "created_at": "2021-01-07T15:52:27Z", + "author": "sgoldenlab" + }, + { + "body": "It just displays nothing not even empty string", + "created_at": "2021-01-07T15:55:57Z", + "author": "Qasimster" + }, + { + "body": "Hmm.. that would mean that the code does not reach the print statement, it gets stuck somewhere before\r\n", + "created_at": "2021-01-07T15:58:29Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Bump tensorflow-gpu from 1.14.0 to 2.4.0 in /simba", + "body": "Bumps [tensorflow-gpu](https://github.com/tensorflow/tensorflow) from 1.14.0 to 2.4.0.\n
\nRelease notes\n

Sourced from tensorflow-gpu's releases.

\n
\n

TensorFlow 2.4.0

\n

Release 2.4.0

\n

Major Features and Improvements

\n
    \n
  • \n

    tf.distribute introduces experimental support for asynchronous training of models via the tf.distribute.experimental.ParameterServerStrategy API. Please see the tutorial to learn more.

    \n
  • \n
  • \n

    MultiWorkerMirroredStrategy is now a stable API and is no longer considered experimental. Some of the major improvements involve handling peer failure and many bug fixes. Please check out the detailed tutorial on Multi-worker training with Keras.

    \n
  • \n
  • \n

    Introduces experimental support for a new module named tf.experimental.numpy which is a NumPy-compatible API for writing TF programs. See the detailed guide to learn more. Additional details below.

    \n
  • \n
  • \n

    Adds Support for\nTensorFloat-32 on Ampere based GPUs. TensorFloat-32, or TF32 for short, is a math mode for NVIDIA Ampere based GPUs and is enabled by default.

    \n
  • \n
  • \n

    A major refactoring of the internals of the Keras Functional API has been completed, that should improve the reliability, stability, and performance of constructing Functional models.

    \n
  • \n
  • \n

    Keras mixed precision API tf.keras.mixed_precision is no longer experimental and allows the use of 16-bit floating point formats during training, improving performance by up to 3x on GPUs and 60% on TPUs. Please see below for additional details.

    \n
  • \n
  • \n

    TensorFlow Profiler now supports profiling MultiWorkerMirroredStrategy and tracing multiple workers using the sampling mode API.

    \n
  • \n
  • \n

    TFLite Profiler for Android is available. See the detailed guide to learn more.

    \n
  • \n
  • \n

    TensorFlow pip packages are now built with CUDA11 and cuDNN 8.0.2.

    \n
  • \n
\n

Breaking Changes

\n
    \n
  • \n

    TF Core:

    \n
      \n
    • Certain float32 ops run in lower precsion on Ampere based GPUs, including matmuls and convolutions, due to the use of TensorFloat-32. Specifically, inputs to such ops are rounded from 23 bits of precision to 10\nbits of precision. This is unlikely to cause issues in practice for deep learning models. In some cases, TensorFloat-32 is also used for complex64 ops.\nTensorFloat-32 can be disabled by running tf.config.experimental.enable_tensor_float_32_execution(False).
    • \n
    • The byte layout for string tensors across the C-API has been updated to match TF Core/C++; i.e., a contiguous array of tensorflow::tstring/TF_TStrings.
    • \n
    • C-API functions TF_StringDecode, TF_StringEncode, and TF_StringEncodedSize are no longer relevant and have been removed; see core/platform/ctstring.h for string access/modification in C.
    • \n
    • tensorflow.python, tensorflow.core and tensorflow.compiler modules are now hidden. These modules are not part of TensorFlow public API.
    • \n
    • tf.raw_ops.Max and tf.raw_ops.Min no longer accept inputs of type tf.complex64 or tf.complex128, because the behavior of these ops is not well defined for complex types.
    • \n
    • XLA:CPU and XLA:GPU devices are no longer registered by default. Use TF_XLA_FLAGS=--tf_xla_enable_xla_devices if you really need them, but this flag will eventually be removed in subsequent releases.
    • \n
    \n
  • \n
  • \n

    tf.keras:

    \n
      \n
    • The steps_per_execution argument in model.compile() is no longer experimental; if you were passing experimental_steps_per_execution, rename it to steps_per_execution in your code. This argument controls the number of batches to run during each tf.function call when calling model.fit(). Running multiple batches inside a single tf.function call can greatly improve performance on TPUs or small models with a large Python overhead.
    • \n
    • A major refactoring of the internals of the Keras Functional API may affect code that\nis relying on certain internal details:\n
        \n
      • Code that uses isinstance(x, tf.Tensor) instead of tf.is_tensor when checking Keras symbolic inputs/outputs should switch to using tf.is_tensor.
      • \n
      • Code that is overly dependent on the exact names attached to symbolic tensors (e.g. assumes there will be ":0" at the end of the inputs, treats names as unique identifiers instead of using tensor.ref(), etc.) may break.
      • \n
      • Code that uses full path for get_concrete_function to trace Keras symbolic inputs directly should switch to building matching tf.TensorSpecs directly and tracing the TensorSpec objects.
      • \n
      • Code that relies on the exact number and names of the op layers that TensorFlow operations were converted into may have changed.
      • \n
      • Code that uses tf.map_fn/tf.cond/tf.while_loop/control flow as op layers and happens to work before TF 2.4. These will explicitly be unsupported now. Converting these ops to Functional API op layers was unreliable before TF 2.4, and prone to erroring incomprehensibly or being silently buggy.
      • \n
      • Code that directly asserts on a Keras symbolic value in cases where ops like tf.rank used to return a static or symbolic value depending on if the input had a fully static shape or not. Now these ops always return symbolic values.
      • \n
      • Code already susceptible to leaking tensors outside of graphs becomes slightly more likely to do so now.
      • \n
      • Code that tries directly getting gradients with respect to symbolic Keras inputs/outputs. Use GradientTape on the actual Tensors passed to the already-constructed model instead.
      • \n
      • Code that requires very tricky shape manipulation via converted op layers in order to work, where the Keras symbolic shape inference proves insufficient.
      • \n
      • Code that tries manually walking a tf.keras.Model layer by layer and assumes layers only ever have one positional argument. This assumption doesn't hold true before TF 2.4 either, but is more likely to cause issues now.
      • \n
      \n
    • \n
    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from tensorflow-gpu's changelog.

\n
\n

Release 2.4.0

\n

Major Features and Improvements

\n\n

Breaking Changes

\n
    \n
  • TF Core:\n
      \n
    • Certain float32 ops run in lower precsion on Ampere based GPUs, including
    • \n
    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tensorflow-gpu&package-manager=pip&previous-version=1.14.0&new-version=2.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2020-12-18T17:42:17Z", + "updated_at": "2021-08-25T14:56:17Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #129.", + "created_at": "2021-08-25T14:56:15Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "There are frames are not classified as containing the behavior event when the probability are above threshold", + "body": "When I set the minimum bout to 5ms and threshold to 0.8, I found there're some frames are not annotated as the behavior event, but the probability > threshold[\r\n\"Screen\r\n](url)", + "user": "Yating-L", + "reaction_cnt": 0, + "created_at": "2020-11-25T22:54:42Z", + "updated_at": "2020-12-02T22:13:42Z", + "author": "Yating-L", + "comments": [ + { + "body": "Hi @Yating-L - what is the frame rate of the video in your screenshot? \r\n\r\nAre the frames in the screenshot consecutive, i.e, are they listed in the order they appeared in the video or is column V1 the frame #?\r\n", + "created_at": "2020-11-26T07:21:36Z", + "author": "sronilsson" + }, + { + "body": "The frame rate is 15 FPS. Yes, column V1 is the frame number. ", + "created_at": "2020-11-27T20:16:22Z", + "author": "Yating-L" + }, + { + "body": "@Yating-L - at 15fps, a single frame represents 66.6 (or 67ms), and a min bout of 5ms should therefore score every frame above 0.8 probability as containing the behavior, in other words a min bout of 5ms is equivalent to not having a min bout at all, or 0ms. \r\n\r\nIs it possible that the specific video is registered in the logs/video_info.csv file with a different fps than 15? \r\n\r\nThe min bout entry box in the SimBA GUI accepts the min bout length in milliseconds. Did you put `5` in this box? I'm asking as it could possibly be explained if `500` for example was put in this entry box. \r\n\r\nAre any of the frames in this video scored by the classifier as containing the behavior? ", + "created_at": "2020-11-27T22:36:06Z", + "author": "sronilsson" + }, + { + "body": "I checked the logs/video_info.csv file, fps is 15. I actually changed the min bout in project_config file not SimBA GUI at \r\n[Minimum_bout_lengths]\r\nmin_bout_1 = 5\r\n\r\nThe reason why I set it to 5ms is because I saw a mouse jumped 3 times in one event, when I set min bout to 200 ms. \r\n\r\nYes, there're frames classified as containing the behavior. The total number of behavior events is less than that when setting min bout to 200ms. \r\n\r\n", + "created_at": "2020-11-30T15:43:49Z", + "author": "Yating-L" + }, + { + "body": "And in the \"Video parameters\" tab, it says the fps of all my videos is 14, I have to manually correct it to 15 for all videos inside the GUI", + "created_at": "2020-11-30T15:57:33Z", + "author": "Yating-L" + }, + { + "body": "Thanks @Yating-L - very helpful, if you can help me with two questions so I can pin down the issue.\r\n\r\nWhat version and version number of SimBA are you running? \r\n\r\nDo you see the issue as well if you set `min_bout_1 = 0` ? ", + "created_at": "2020-11-30T17:17:37Z", + "author": "sgoldenlab" + }, + { + "body": "I use SimBA version 1.2\r\n\r\nI have the same issue when min_bout_1 = 0. The number of frames with probability > 0.8 is 79, but the number of jumping events is 51. ", + "created_at": "2020-11-30T18:52:32Z", + "author": "Yating-L" + }, + { + "body": "Thanks @Yating-L - could you update SimBA (version 1.2.9.2) and see if it persists? \r\n\r\nThere was a legacy hardcoded line in there, which did not allow positive classification in a single, or two consecutive frames, those classifications were removed by default. Your animal should now be able to rear for a single or two consecutive frames, if your min_bout length is set sufficiently short. Let me know how it goes. ", + "created_at": "2020-12-01T22:31:02Z", + "author": "sgoldenlab" + }, + { + "body": "It works now. Thank you for fixing this!", + "created_at": "2020-12-02T21:51:29Z", + "author": "Yating-L" + } + ] + }, + { + "title": "dependency conflict with scikit-learning", + "body": "**Describe the bug**\r\nimbalanced-learn 0.7.0 requires scikit-learn>=0.23 but simba-uw-no-tf 1.2.33 requires scikit-learn==0.22.2.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nInstall simba-uw-no-tf according to the regular Anaconda instructions\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Python Version 3.6\r\n - Are you using anaconda? Yes\r\n \r\nIs this conflict really significant? Simba launches OK, but we haven't put through any data yet. ", + "user": "mauricev", + "reaction_cnt": 0, + "created_at": "2020-11-18T20:38:15Z", + "updated_at": "2020-11-19T07:29:28Z", + "author": "mauricev", + "comments": [ + { + "body": "Hey @mauricev - no need to worry about these warnings at all - they won't disrupt any function in SimBA. \r\n\r\nAs you start putting data through and hit any snags, just open up another issue or let us know on the gitter chat https://gitter.im/SimBA-Resource/community", + "created_at": "2020-11-19T07:29:27Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Bump tensorflow-gpu from 1.14.0 to 2.3.1 in /simba", + "body": "Bumps [tensorflow-gpu](https://github.com/tensorflow/tensorflow) from 1.14.0 to 2.3.1.\n
\nRelease notes\n

Sourced from tensorflow-gpu's releases.

\n
\n

TensorFlow 2.3.1

\n

Release 2.3.1

\n

Bug Fixes and Other Changes

\n\n

TensorFlow 2.3.0

\n

Release 2.3.0

\n

Major Features and Improvements

\n
    \n
  • tf.data adds two new mechanisms to solve input pipeline bottlenecks and save resources:\n\n
  • \n
\n

In addition checkout the detailed guide for analyzing input pipeline performance with TF Profiler.

\n
    \n
  • \n

    tf.distribute.TPUStrategy is now a stable API and no longer considered experimental for TensorFlow. (earlier tf.distribute.experimental.TPUStrategy).

    \n
  • \n
  • \n

    TF Profiler introduces two new tools: a memory profiler to visualize your model’s memory usage over time and a python tracer which allows you to trace python function calls in your model. Usability improvements include better diagnostic messages and profile options to customize the host and device trace verbosity level.

    \n
  • \n
  • \n

    Introduces experimental support for Keras Preprocessing Layers API (tf.keras.layers.experimental.preprocessing.*) to handle data preprocessing operations, with support for composite tensor inputs. Please see below for additional details on these layers.

    \n
  • \n
  • \n

    TFLite now properly supports dynamic shapes during conversion and inference. We’ve also added opt-in support on Android and iOS for XNNPACK, a highly optimized set of CPU kernels, as well as opt-in support for executing quantized models on the GPU.

    \n
  • \n
  • \n

    Libtensorflow packages are available in GCS starting this release. We have also started to release a nightly version of these packages.

    \n
  • \n
  • \n

    The experimental Python API tf.debugging.experimental.enable_dump_debug_info() now allows you to instrument a TensorFlow program and dump debugging information to a directory on the file system. The directory can be read and visualized by a new interactive dashboard in TensorBoard 2.3 called Debugger V2, which reveals the details of the TensorFlow program including graph structures, history of op executions at the Python (eager) and intra-graph levels, the runtime dtype, shape, and numerical composistion of tensors, as well as their code locations.

    \n
  • \n
\n

Breaking Changes

\n
    \n
  • Increases the minimum bazel version required to build TF to 3.1.0.
  • \n
  • tf.data\n
      \n
    • Makes the following (breaking) changes to the tf.data.
    • \n
    • C++ API: - IteratorBase::RestoreInternal, IteratorBase::SaveInternal, and DatasetBase::CheckExternalState become pure-virtual and subclasses are now expected to provide an implementation.
    • \n
    • The deprecated DatasetBase::IsStateful method is removed in favor of DatasetBase::CheckExternalState.
    • \n
    • Deprecated overrides of DatasetBase::MakeIterator and MakeIteratorFromInputElement are removed.
    • \n
    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from tensorflow-gpu's changelog.

\n
\n

Release 2.3.1

\n

Bug Fixes and Other Changes

\n\n

Release 2.2.1

\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • fcc4b96 Merge pull request #43446 from tensorflow-jenkins/version-numbers-2.3.1-16251
  • \n
  • 4cf2230 Update version numbers to 2.3.1
  • \n
  • eee8224 Merge pull request #43441 from tensorflow-jenkins/relnotes-2.3.1-24672
  • \n
  • 0d41b1d Update RELEASE.md
  • \n
  • d99bd63 Insert release notes place-fill
  • \n
  • d71d3ce Merge pull request #43414 from tensorflow/mihaimaruseac-patch-1-1
  • \n
  • 9c91596 Fix missing import
  • \n
  • f9f12f6 Merge pull request #43391 from tensorflow/mihaimaruseac-patch-4
  • \n
  • 3ed271b Solve leftover from merge conflict
  • \n
  • 9cf3773 Merge pull request #43358 from tensorflow/mm-patch-r2.3
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tensorflow-gpu&package-manager=pip&previous-version=1.14.0&new-version=2.3.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/configuring-github-dependabot-security-updates)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2020-11-13T17:43:56Z", + "updated_at": "2020-12-18T17:42:21Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #82.", + "created_at": "2020-12-18T17:42:19Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "SLEAP File Import Error", + "body": "HI again @sgoldenlab !\r\n\r\nI see a new error when attempting to import tracking data from SLEAP, which I will paste below. I am currently running the dev version of SimBA. Any help you can provide is greatly appreciated :)\r\n\r\n(simba_env) C:\\Users\\matth>simba\r\nwarning: Error opening file (/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:901)\r\nwarning: C:/Users/matth/Documents/Modeling/SimBA\\test\\project_folder\\videos\\social_.mp4 (/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:902)\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\matth\\anaconda3\\envs\\simba_env\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\matth\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\SimBA.py\", line 3048, in importh5\r\n importSLEAPbottomUP(self.configinifile,self.h5path.folder_path,idlist)\r\n File \"c:\\users\\matth\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\sleap_bottom_up_convert.py\", line 176, in importSLEAPbottomUP\r\n circleScale, fontScale, spacingScale = int(myRadius / (myResolution / maxResDimension)), float(myFontScale / (myResolution / maxResDimension)), int(mySpaceScale / (myResolution / maxResDimension))\r\nZeroDivisionError: division by zero", + "user": "MattKDawson", + "reaction_cnt": 0, + "created_at": "2020-11-04T05:36:28Z", + "updated_at": "2021-03-23T18:12:13Z", + "author": "MattKDawson", + "comments": [ + { + "body": "@MattKDawson,\r\n\r\nDid you import videos before importing the sleap tracking data? I think the reason of this zerodivision error is because it could not find the videos in the folder. Hence, please try to import the videos before importing the sleap tracking data.\r\n\r\nIf that does not solve your issue, please send one of the .slp file to me so I can test it out.\r\nPlease send it to jchoong@uw.edu.\r\n\r\nThanks,\r\n\r\n-JJ", + "created_at": "2020-11-04T17:41:04Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab !\r\n\r\nI will email you a .slp file for you to use :) . In SimBA, I always import the video files first before the tracking data. I also see the following message in the main SimBA window when attempting to import the .slp file:\r\n\r\n```\r\nConverting .slp into csv dataframes...\r\nProcessing social_.slp...\r\nWarning: The video name could not be found in the .SLP meta-data table\r\nSimBA therefore gives the imported CSV the same name as the SLP file.\r\nTo be sure that SimBAs slp import function works, make sure the .SLP file and the associated video file has the same file name - e.g., \"Video1.slp\" and \"Video1.slp\" borefore importing the videos and SLP files to SimBA.\r\n```\r\nThanks again for all your help with this! :)", + "created_at": "2020-11-04T18:35:11Z", + "author": "MattKDawson" + }, + { + "body": "Hi @sgoldenlab ,\r\n\r\nI'm having the exactly same issue here. My video and .slp file having the same name, and I've imported video first and then .SLP file. However, it still give me the warning:\r\nWarning: The video name could not be found in the .SLP meta-data table\r\nSimBA therefore gives the imported CSV the same name as the SLP file.\r\nTo be sure that SimBAs slp import function works, make sure the .SLP file and the associated video file has the same file name - e.g., \"Video1.slp\" and \"Video1.slp\" borefore importing the videos and SLP files to SimBA.\r\n\r\nAs a result, I failed to see .csv file generated in the project folder. The video is imported fine as I can see all frames in project folder. Would be really appreciate with any help with this issue! Thanks!", + "created_at": "2021-03-11T09:39:30Z", + "author": "Sunday77" + }, + { + "body": "@Sunday77 , would you mind sending the video and .slp file to jchoong@uw.edu so I can troubleshoot it? \r\n\r\nThank you for reporting,\r\nJJ", + "created_at": "2021-03-11T17:37:41Z", + "author": "sgoldenlab" + }, + { + "body": "I've had the same issue when the imported video file had a different name than the video filename referenced in the SLP file (and the generated CSV file (disregarding the extension)). I've renamed the imported video file in `/project_folder/videos` to the same name as generated CSV in `/project_folder/csv/input_csv/`. It works now.", + "created_at": "2021-03-12T18:07:41Z", + "author": "smidm" + } + ] + }, + { + "title": "Unable to run RF model. KeyError: Mouse_1_note_to_tail on 'Run RF Model'", + "body": "I'm training the RF model to predict behaviors for a 4bp single animal. I.e. Type of tracking: Classic Tracking. Config: 1 animal, 4bp.\r\n\r\nI'm using SimBA on linux (the Simba-UW-tf-dev-0.73) branch. \r\n\r\nThe pipeline works up until I reach the Run Machine Model section. I can train and validate the model on a single video. When trying to 'Run RF Model', SimBA runs for a brief while then returns the error:\r\n\r\n```\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"/home/blansdell/anaconda3/envs/dlc/lib/python3.6/site-packages/pandas/core/indexes/base.py\", line 2897, in get_loc\r\n return self._engine.get_loc(key)\r\n File \"pandas/_libs/index.pyx\", line 107, in pandas._libs.index.IndexEngine.get_loc\r\n File \"pandas/_libs/index.pyx\", line 131, in pandas._libs.index.IndexEngine.get_loc\r\n File \"pandas/_libs/hashtable_class_helper.pxi\", line 1607, in pandas._libs.hashtable.PyObjectHashTable.get_item\r\n File \"pandas/_libs/hashtable_class_helper.pxi\", line 1614, in pandas._libs.hashtable.PyObjectHashTable.get_item\r\nKeyError: 'Mouse_1_nose_to_tail'\r\n```\r\n\r\nI don't find Mouse_1_nose_to_tail in the extracted features. I do find 'Mouse_nose_to_tail', however. So it seems like the feature is not named correctly, but perhaps I'm missing something?", + "user": "benlansdell", + "reaction_cnt": 0, + "created_at": "2020-11-03T13:33:38Z", + "updated_at": "2020-11-04T14:07:46Z", + "author": "benlansdell", + "comments": [ + { + "body": "@benlansdell Thank you for catching my mistake! You are correct, I messed up the naming. I have fixed this and you can update your simba to version 0.74.1 and see if it works on your end.\r\n\r\nThanks,\r\nJJ", + "created_at": "2020-11-03T19:07:06Z", + "author": "sgoldenlab" + }, + { + "body": "Hi JJ, \r\n\r\nYes, that seems to have fixed it. Thank you!", + "created_at": "2020-11-04T14:07:46Z", + "author": "benlansdell" + } + ] + }, + { + "title": "I got a KeyError when run RF model after I updated to 1.2.29.", + "body": "I got a KeyError when run RF model after I updated to 1.2.29. \r\n\"Screen\r\n\r\n_Originally posted by @Yating-L in https://github.com/sgoldenlab/simba/issues/75#issuecomment-715322269_", + "user": "Yating-L", + "reaction_cnt": 0, + "created_at": "2020-10-23T16:02:22Z", + "updated_at": "2020-10-26T20:05:15Z", + "author": "Yating-L", + "comments": [ + { + "body": "Hi Yating, what is your pose_estimation_body_parts in your project_config.ini?\r\n\r\nThis can be found under *create ensemble settings* --> pose_estimation_body_parts \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/97033384-1fb0a200-1518-11eb-84ca-b57ce6ae8dce.png)\r\n\r\n", + "created_at": "2020-10-23T17:11:36Z", + "author": "sgoldenlab" + }, + { + "body": "pose_estimation_body_parts = 8", + "created_at": "2020-10-23T17:25:19Z", + "author": "Yating-L" + }, + { + "body": "@Yating-L , please change it to user_defined as shown in the image for now and see if it works. Meanwhile I will try to find out what causes it to change to 8. \r\n\r\nWhen creating the project, did you define your own bodyparts? Or did you use the pre-made schematics? \r\n\r\nBest,\r\nJJ", + "created_at": "2020-10-23T17:38:17Z", + "author": "sgoldenlab" + }, + { + "body": "I created the project using the pre-made schematics, 1 animals 8 bps. I guess it may due to the project was created with the earlier version of SimBA, pose_estimation_body_parts was set to 8 at that time?\r\n\r\nThanks!", + "created_at": "2020-10-23T18:12:53Z", + "author": "Yating-L" + }, + { + "body": "Ahh, I see. Does changing the pose_estimation_body_parts to user_defined in project_config.ini works for you?", + "created_at": "2020-10-23T18:14:31Z", + "author": "sgoldenlab" + }, + { + "body": "I don't have access to the lab computer now. I will try it out on Monday and let you know. Thanks!", + "created_at": "2020-10-23T18:23:06Z", + "author": "Yating-L" + } + ] + }, + { + "title": "Evaluation reports will be overwrite when run train model twice", + "body": "The model evaluation tools generate precision recall curve and learning Curve for each model but the filenames only include the behavior name, for example, for behavior attack, we will get attack_precision_recall. If I re-run training step, attack_precision_recall will be overwrite.\r\n\r\nI think like the other files, we should append the index number to the filename of the precision recall curve and learning Curve.", + "user": "Yating-L", + "reaction_cnt": 0, + "created_at": "2020-10-21T23:16:02Z", + "updated_at": "2020-10-23T12:53:00Z", + "author": "Yating-L", + "comments": [ + { + "body": "@Yating-L , I have added your suggestion to the script. Please upgrade to the latest version 1.2.29 and let me know if it works on your end.\r\n\r\n", + "created_at": "2020-10-22T16:58:33Z", + "author": "sgoldenlab" + }, + { + "body": "It worked! Thank you for fixing this.", + "created_at": "2020-10-22T17:36:32Z", + "author": "Yating-L" + }, + { + "body": "I got a KeyError when run RF model after I updated to 1.2.29. \r\n\"Screen\r\n", + "created_at": "2020-10-23T12:53:00Z", + "author": "Yating-L" + } + ] + }, + { + "title": "issue with feature extraction", + "body": "Hey, thanks for the great tool :) when I try to extract features I get this error message: \"ValueError: min_periods (1) must be <= window (0)\" and nothing shows in the features_extracted folder, everything looks fine with the outlier correction I have the CSV files in the right folders, I'm using maDLC on a single animal and tracking is ok, when I created the project on Simba I defined a new configuration with four bodyparts (left_ear, right_ear, snout, tailbase) and chose multi-tracking then imported h5 tracking data file (sk.h5), everything is ok with setting parameters and run outlier correction steps (I just skipped the roi part). Can you help me, please? Thanks in advance!", + "user": "filos93", + "reaction_cnt": 0, + "created_at": "2020-10-21T17:29:14Z", + "updated_at": "2020-10-26T12:56:21Z", + "author": "filos93", + "comments": [ + { + "body": "Hi @filos93! What comes to mind is that this could happen if the fps of your videos are less than 15. SimBA tries to grab features in rolling windows, with the smalest window being the fps / 15. If you have less than 15fps, that number will be a float lower than 1, and you can't grab a rolling window size that length (the rolling window is less than a single frame). Check out out this for more information: https://github.com/sgoldenlab/simba/issues/56\r\n\r\nWhat you want to do (because you have a user defined body-part configuration, rather than 8 as in the linked issue above), is open the feature extraction script for user defined body-parts. In the linked issue, I point to changing a value in the `extract_features_8bp.py` file. You want to do **exactly** the same as in the issue, except you want to make the change to the `extract_features_user_defined.py`file. \r\n\r\nHope that makes sense and let me know how it goes! ", + "created_at": "2020-10-22T03:51:55Z", + "author": "sgoldenlab" + }, + { + "body": "PS: depending on what version of SimBA you are running, the critical line is either on line 30, or possible line 80, and looks like this: \r\n\r\n`roll_windows_values = [2, 5, 6, 7.5, 15]` - if your fps is less than 15, you want the change the last value (i.e., 15) to your fps (so if your fps is 14, change the last value to 14, for example)", + "created_at": "2020-10-22T03:57:32Z", + "author": "sgoldenlab" + }, + { + "body": "Hey, thanks so much for your reply :) I changed code as you specified, unfortunately, I keep getting the same error: \"ValueError: min_periods (1) must be <= window (0)\" and I'm unable to get features extracted. Some other suggestions or things I could do/adjust? Thanks again ", + "created_at": "2020-10-22T07:09:00Z", + "author": "filos93" + }, + { + "body": "@filos93 ah, to understand better, what's the fps of your videos? And what does your roll_windows_values list look like? ", + "created_at": "2020-10-22T14:37:14Z", + "author": "sronilsson" + }, + { + "body": "Hey, thanks again for help, the fps of my video is 14 and these are from my video_info csv file: Video | fps | Resolution_width | Resolution_height | Distance_in_mm | pixels/mm -- | -- | -- | -- | -- | -- Test66 | 14 | 360 | 476 | 140 | 2. Then, this is from extract_features_user_defined.pyfile: roll_windows = [ ] roll_windows_values = [2, 5, 6, 7.5, 14] loopy = 0 (in roll_windows_values 14 was 15). Simba gui shows: \r\nPose-estimation body part setting for feature extraction: user_defined\r\nExtracting features from 1 files...\r\nProcessing \"Test66\". Fps: 14.0. mm/ppx: 2.0\r\nCalculating euclidean distances...\r\nCalculating rolling windows data...\r\nBut, then it doesn't go on. Please, let me know how I can fix it. Thank you :)\r\n", + "created_at": "2020-10-23T06:37:19Z", + "author": "filos93" + }, + { + "body": "Thanks @filos93 - can you post a screenshot or the error msg in the main terminal window? ", + "created_at": "2020-10-23T22:49:22Z", + "author": "sronilsson" + }, + { + "body": "Hey @sronilsson thanks for all your help :) the following is the error msg: \r\n\r\nException in thread Thread-3:\r\nTraceback (most recent call last):\r\n File \"c:\\users\\filipppo\\anaconda3\\envs\\simba\\lib\\threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"c:\\users\\filipppo\\anaconda3\\envs\\simba\\lib\\threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"c:\\users\\filipppo\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 4476, in extractfeatures\r\n extract_features_wotarget_user_defined(self.projectconfigini)\r\n File \"c:\\users\\filipppo\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\features_scripts\\extract_features_user_defined.py\", line 108, in extract_features_wotarget_use\r\nr_defined\r\n csv_df[colName] = csv_df[selectedCol].rolling(roll_windows[i], min_periods=1).mean()\r\n File \"c:\\users\\filipppo\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\window.py\", line 1877, in mean\r\n return super().mean(*args, **kwargs)\r\n File \"c:\\users\\filipppo\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\window.py\", line 1180, in mean\r\n return self._apply(\"roll_mean\", \"mean\", **kwargs)\r\n File \"c:\\users\\filipppo\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\window.py\", line 965, in _apply\r\n result = calc(values)\r\n File \"c:\\users\\filipppo\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\window.py\", line 958, in calc\r\n x, window, min_periods=self.min_periods, closed=self.closed\r\n File \"c:\\users\\filipppo\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\window.py\", line 939, in func\r\n return cfunc(arg, window, minp, index_as_array, closed, **kwargs)\r\n File \"pandas/_libs/window.pyx\", line 578, in pandas._libs.window.roll_mean\r\n File \"pandas/_libs/window.pyx\", line 349, in pandas._libs.window.get_window_indexer\r\n File \"pandas/_libs/window.pyx\", line 149, in pandas._libs.window.MockFixedWindowIndexer.__init__\r\n File \"pandas/_libs/window.pyx\", line 72, in pandas._libs.window._check_minp\r\nValueError: min_periods (1) must be <= window (0)", + "created_at": "2020-10-24T11:34:00Z", + "author": "filos93" + }, + { + "body": "Hey @filos93 - hmm, strange.. let's figure out that might be going on, that should have fixed it. Inside your feature extraction script (extract_features_user_defined.py), can you insert one line (Line 46 in the image below: print(fps)) and tell me whats printed outs - if your fps is 14 you should see `14` being printed. The most immediate way to explain this is that the fps of the video is either not stored as `14` in your video_config.csv file in the project log folder and the stored value is lower than that, or that the largest roll_window_values value is larger than 14. \r\n\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/97096927-43630d80-1628-11eb-9672-9fffa973a59c.png)\r\n", + "created_at": "2020-10-25T01:43:21Z", + "author": "sgoldenlab" + }, + { + "body": "hey @sgoldenlab I changed 14 to 15 fps in my video_info csv file and it worked! don't know why Simba gui was telling me video fps was 14 when it was 15, anyway many thanks for all your help :)", + "created_at": "2020-10-26T12:56:01Z", + "author": "filos93" + } + ] + }, + { + "title": "Fix for \"Scaled_movement_M1_M2\" errors in single animal projects", + "body": "I'm putting this here as an issue rather than a pull request because I know at least one lab member has a part of this fix on the dev wheel. \r\n\r\nThere are three instances I can spot where a single animal project would run into problems due to the use of . I didn't look exhaustively so it might pop up elsewhere.\r\n\r\nThe first and most important is in . Only 16 and 14 bp projects - which both assume two animals - are given size-scaled movement values. A fix at line 130 would be something like:\r\n\r\n```if pose_estimation_body_parts == '8' or pose_estimation_body_parts == '7' or pose_estimation_body_parts == '4':\r\n mousesize = (statistics.mean(outputDf['Mouse_nose_to_tail']))\r\n mouse1Max = mousesize * 8\r\n outputDf['Scaled_movement'] = (outputDf['Total_movement_all_bodyparts'] / (mouse1Max)).round(decimals=2)\r\n```\r\n\r\nThe solution becomes more elegant if you also replace line 129 so outputs from single and multi animal projects are the same and can be more simply called elsewhere:\r\n```outputDf['Scaled_movement'] = outputDf['Scaled_movement_M1_M2'].round(decimals=2)```\r\nso you only have 'Scaled_movement' to work with, but how that column is built in the machine output depends on the BP schema. The downside is you have to chase down all the and change them to .\r\n\r\nMore easily solved are having the processes that use . I spotted it at process_severity (\"Analyze Severity\" in the GUI) and in path_plot. I know the dev wheel has a fix (although it is currently right now, and I've called it simply …\r\n\r\n\r\nThese could be fixed by having be output the same for both multianimal and single animal projects (as above) \r\n\r\nSo, if you want to keep the single and multianimal output separate:\r\n``` if severityBool == 'yes':\r\n csv_df_combined[severityTarget] = csv_df[severityTarget].values\r\n if noAnimals == 2:\r\n csv_df_combined['Scaled_movement_M1_M2'] = csv_df['Scaled_movement_M1_M2'].values\r\n else:\r\n csv_df_combined['Scaled_movement'] = csv_df['Scaled_movement'].values\r\n columnNames = list(csv_df_combined)\r\n```\r\n\r\nIf you want to merge them to make handling these data cleaner, just omit the conditional and use . \r\n\r\n\r\nPlease do check me for mistakes.", + "user": "seanpaulbradley", + "reaction_cnt": 0, + "created_at": "2020-10-16T18:31:48Z", + "updated_at": "2020-10-19T16:09:51Z", + "author": "seanpaulbradley", + "comments": [ + { + "body": "Thanks @NIHRBC - very helpful. I spotted the issue and updated the pypi packages in `path_plot.py` and `rf_model_run.py` files. \r\n\r\nThe issue is that the \"severity\", as we designed it, was a way of checking how much animals are moving during interactions, and relies, as designed, on the user employing one of the built in body-part configurations. As you say though, there should nothing stopping users having one animal to employ it though (this is what I have fixed) - although the way it is coded it would be too much work for now to get it going with user defined body part configurations. \r\n\r\nI have to ensure that the user has one of the built in body-part configurations before calculating either the scaled movement for two animals, or one animal, not elegant I admit but my hope is that it works:\r\n\r\n````\r\nif pose_estimation_body_parts == '4' or '7' or '8' or '7':\r\n mouse1size = (statistics.mean(outputDf['Mouse_1_nose_to_tail']))\r\n mouse1Max = mouse1size * 8\r\n outputDf['Scaled_movement_M1'] = (outputDf['Total_movement_all_bodyparts_M1'] / (mouse1Max))\r\nif pose_estimation_body_parts == '16' or pose_estimation_body_parts == '14':\r\n mouse1size = (statistics.mean(outputDf['Mouse_1_nose_to_tail']))\r\n mouse2size = (statistics.mean(outputDf['Mouse_2_nose_to_tail']))\r\n mouse1Max = mouse1size * 8\r\n mouse2Max = mouse2size * 8\r\n outputDf['Scaled_movement_M1'] = (outputDf['Total_movement_all_bodyparts_M1'] / (mouse1Max))\r\n outputDf['Scaled_movement_M2'] = (outputDf['Total_movement_all_bodyparts_M2'] / (mouse2Max))\r\n outputDf['Scaled_movement_M1_M2'] = (outputDf['Scaled_movement_M1'] + outputDf['Scaled_movement_M2']) / 2\r\n .....\r\n\r\n\r\n````\r\n\r\n\r\n\r\n", + "created_at": "2020-10-18T02:10:12Z", + "author": "sgoldenlab" + }, + { + "body": "A few issues (again, not putting up a pull because it is on -dev):\r\n\r\nIn ```run_RF_model.py```, line 136, edit to ```mouse1size = (statistics.mean(outputDf['Mouse_nose_to_tail']))``` to match ```extract_features_*bp.py``` for single-animal projects.\r\n\r\nIn ```path_plot.py```, in the chunk beginning at line 140, \r\n\r\n1) Since you won't have attack values for a single animal project, I'd nest that under the conditional for ```noAnimals = 2```\r\n\r\n2) For the ```noAnimals ==1: condition```, we need to define the parts we need from ```m1tuple```, i.e.\r\n``` \r\n if noAnimals == 1:\r\n midpoints = list(zip(np.linspace(m1tuple[0], 3), np.linspace(m1tuple[1], 3)))\r\n locationEventX, locationEventY = midpoints[1]\r\n```\r\n\r\nJust from what I've seen, I'm fully agreed that trying to get this to work for custom configurations would be a drag.\r\n\r\nWhat I might do for my own purposes is try to plug in a method for plotting multiple classifications directly rather than severity by printing on different color overlays to represent different behaviors. I'm not sure when (or if) I'll have the time to do it, but I'll share it with you when I'm done if you like.\r\n\r\n\r\n\r\n", + "created_at": "2020-10-19T16:09:51Z", + "author": "seanpaulbradley" + } + ] + }, + { + "title": "SLEAP File Import: Issues & Questions", + "body": "Hi @sgoldenlab ! I actually have another issue when trying to import .SLP files into my project. When trying to import my .SLP file into my project, I receive the following error readout in the Anaconda prompt:\r\n\r\nTraceback (most recent call last):\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\SimBA.py\", line 3048, in importh5\r\n importSLEAPbottomUP(self.configinifile,self.h5path.folder_path,idlist)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\sleap_bottom_up_convert.py\", line 138, in importSLEAPbottomUP\r\n dataDf.loc[len(dataDf)] = outRow\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\pandas\\core\\indexing.py\", line 205, in __setitem__\r\n self._setitem_with_indexer(indexer, value)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\pandas\\core\\indexing.py\", line 406, in _setitem_with_indexer\r\n return self._setitem_with_indexer_missing(indexer, value)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\pandas\\core\\indexing.py\", line 647, in _setitem_with_indexer_missing\r\n raise ValueError(\"cannot set a row with mismatched columns\")\r\nValueError: cannot set a row with mismatched columns\r\n\r\nI also see the following message in the main SimBA window at the same time:\r\n\r\nWarning: The video name could not be found in the .SLP meta-data table\r\nSimBA therefore gives the imported CSV the same name as the SLP file.\r\nTo be sure that SimBAs slp import function works, make sure the .SLP file and the associated video file has the same file name - e.g., \"Video1.slp\" and \"Video1.slp\" before importing the videos and SLP files to SimBA.\r\n\r\nIs this an issue with how my SLEAP model was created, or is this something to do with SimBA? My SLEAP model was mostly just a test and was created with different tracking points than what SimBA expects. I also trained trained my SLEAP model using the top-down training model and not the bottom-up model. Could these be creating this issue?\r\n\r\nOn a similar note, do you by chance know if there is an optimal resolution for capturing test footage? My experiments have been captured in 720x480 up until now but do you know if SimBA would benefit from a higher resolution?\r\n\r\nThanks! :)\r\n", + "user": "MattKDawson", + "reaction_cnt": 0, + "created_at": "2020-10-14T16:20:35Z", + "updated_at": "2020-10-20T18:57:34Z", + "author": "MattKDawson", + "comments": [ + { + "body": "Hi @MattKDawson - this warning happens when SimBA looks in the SLP h5 file, which should store the name of the video you are processing, but SimBA can't find this entry. The `ValueError` you are seeing may be related to this, because SimBA also can't find the number of columns to match the number of body-parts you have tracked. \r\n\r\nTo me this suggests that something may have changed in SLEAP since I wrote this import code, and conversations with other sleap users I have had, suggests that SLEAP now may generate multiple SLP files. Do you see any other SLP file generated when you ran inference in SLEAP, and if you do, can you try to import this file into SimBA? \r\n\r\nFor the optimal resolution: it's the smallest possible resolution where you can still reliably see the behaviors your are interested in. This will make any pose-estimation and visual renderings in SimBA the fastest it can be without loosing reliable pose, which is most important for accurate classification. ", + "created_at": "2020-10-15T13:31:20Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab ! After running an inference in SLEAP, I only see the SLEAP project file. SLEAP does have the option to export your inference as an .h5 file but that is not something automatically done by SLEAP.\r\n\r\nThank you for your input on the optimal resolution! 480p seems to work for SLEAP so far and my fingers are crossed that SimBA is also able to use that resolution for the behaviours I would like to capture.", + "created_at": "2020-10-18T23:19:46Z", + "author": "MattKDawson" + } + ] + }, + { + "title": "Tkinter Issue - Blank SimBA GUI", + "body": "Hello again @sgoldenlab ! Sorry to be posting another issue for you to deal with but I am having some trouble getting the SimBA GUI to work after installing via `pip install simba-uw-tf`. After creating a new project, when I load the project_config I see the GUI window but all the tabs except ROI are blank (see image below). I also get the following error readout in the Anaconda prompt after loading the project_config:\r\n\r\n(simba_env) C:\\Users\\carol>simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\SimBA.py\", line 3256, in \r\n launchloadprojectButton = Button(lpMenu,text='Load Project',command=lambda:self.launch(lpMenu,inputcommand))\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\SimBA.py\", line 3267, in launch\r\n command(self.projectconfigini.file_path)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\SimBA.py\", line 3601, in __init__\r\n self.bp1 = DropDownMenu(label_heatmap,'Bodypart',bpoptions,'15')\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\SimBA.py\", line 777, in __init__\r\n self.popupMenu = OptionMenu(self,self.dropdownvar,*self.choices,command=com)\r\nTypeError: __init__() missing 1 required positional argument: 'value'\r\n\r\n\r\nAny help and advice you can give me is greatly appreciated! Thank you for your time!\r\n\r\n\r\n![simba_blank_gui](https://user-images.githubusercontent.com/71152165/95510570-64163c80-0973-11eb-8793-66b5446f9423.png)\r\n", + "user": "MattKDawson", + "reaction_cnt": 0, + "created_at": "2020-10-08T22:23:41Z", + "updated_at": "2020-10-09T16:12:31Z", + "author": "MattKDawson", + "comments": [ + { + "body": "@MattKDawson , can you let us know which version of simba you are currently using? Go to cmd and type ` pip show simba-uw-tf `\r\n\r\nPlease make sure that you are using the latest version, which is version 1.2.23.\r\n\r\nPlease let us know if this happens to you on the latest version.\r\n\r\nThanks,\r\nJJ", + "created_at": "2020-10-09T00:01:07Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab ! I am currently using the latest version, 1.2.23, based on the info returned by the `pip show simba-uw-tf` command :)", + "created_at": "2020-10-09T00:11:54Z", + "author": "MattKDawson" + }, + { + "body": "@MattKDawson - I thought I had sorted this potential issue in an earlier version so surprised to see it return.\r\n\r\nCan you install the development version of simba and let me know if the error persists? \r\n\r\nSo first `pip uninstall simba-uw-tf`\r\n\r\nThen `pip install simba-uw-tf-dev'\r\n\r\nthen Launch SimBA with `simba` - and see if you encounter the same error loading your project?\r\n\r\n", + "created_at": "2020-10-09T02:31:40Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab ! I uninstalled `simba-uw-tf` and installed the development version of SimBA, and now see this error readout when attempting to launch SimBA with `simba`:\r\n\r\n(simba_env) C:\\Users\\carol>simba\r\nTraceback (most recent call last):\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\carol\\anaconda3\\envs\\simba_env\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\SimBA.py\", line 44, in \r\n from simba.train_multiple_models_from_meta import *\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\train_multiple_models_from_meta.py\", line 26, in \r\n import shap\r\nModuleNotFoundError: No module named 'shap'", + "created_at": "2020-10-09T04:02:11Z", + "author": "MattKDawson" + }, + { + "body": "I have to package it with it, I forgot. Try running pip install shap, then start simba. ", + "created_at": "2020-10-09T04:09:05Z", + "author": "sgoldenlab" + }, + { + "body": "I think that did it! I can now see the GUI. Thank you again for your help!", + "created_at": "2020-10-09T04:22:04Z", + "author": "MattKDawson" + }, + { + "body": "@MattKDawson if you run into any further issues don't hesitate to open a new issue thread ", + "created_at": "2020-10-09T15:13:48Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab ! I actually have another issue when trying to import .SLP files into my project related to Tkinter. When trying to import my .SLP file into my project, I receive the following error readout in the Anaconda prompt:\r\n\r\nTraceback (most recent call last):\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\SimBA.py\", line 3048, in importh5\r\n importSLEAPbottomUP(self.configinifile,self.h5path.folder_path,idlist)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\sleap_bottom_up_convert.py\", line 138, in importSLEAPbottomUP\r\n dataDf.loc[len(dataDf)] = outRow\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\pandas\\core\\indexing.py\", line 205, in __setitem__\r\n self._setitem_with_indexer(indexer, value)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\pandas\\core\\indexing.py\", line 406, in _setitem_with_indexer\r\n return self._setitem_with_indexer_missing(indexer, value)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\pandas\\core\\indexing.py\", line 647, in _setitem_with_indexer_missing\r\n raise ValueError(\"cannot set a row with mismatched columns\")\r\nValueError: cannot set a row with mismatched columns\r\n\r\nI also see the following message in the main SimBA window at the same time:\r\n\r\nWarning: The video name could not be found in the .SLP meta-data table\r\nSimBA therefore gives the imported CSV the same name as the SLP file.\r\nTo be sure that SimBAs slp import function works, make sure the .SLP file and the associated video file has the same file name - e.g., \"Video1.slp\" and \"Video1.slp\" borefore importing the videos and SLP files to SimBA.\r\n\r\nIs this an issue with how my SLEAP model was created, or is this something to do with SimBA? My SLEAP model was mostly just a test and was created with different tracking points than what SimBA expects and was also trained using the top-down training model and not the bottom-up model. Could these be creating this issue?\r\n\r\nThanks! :)", + "created_at": "2020-10-09T16:12:30Z", + "author": "MattKDawson" + } + ] + }, + { + "title": "DLL Error When Attempting to Run SimBA", + "body": "When I attempt to run SimBAxTf in a virtual environment using the Anaconda command prompt, I receive a WinError 126 (see below for the full error readout). I did have some issues with getting SimBA to install and I think that I installed CUDA and cuDNN properly, but either of those may be causing the issues related to my error. Any help or advice you could give me would be extremely appreciated, as my coding experience is limited to basics and I am at a loss as to how to troubleshoot my error.\r\n\r\nThank you!\r\n\r\nERROR READOUT:\r\n(simba_env) C:\\Users\\carol>simba\r\nTraceback (most recent call last):\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\carol\\anaconda3\\envs\\simba_env\\Scripts\\simba.exe\\__main__.py\", line 4, in \r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\SimBA.py\", line 48, in \r\n from simba.ROI_freehand_draw_3 import roiFreehand\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\simba\\ROI_freehand_draw_3.py\", line 6, in \r\n from shapely.geometry import Polygon\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\shapely\\geometry\\__init__.py\", line 4, in \r\n from .base import CAP_STYLE, JOIN_STYLE\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\shapely\\geometry\\base.py\", line 18, in \r\n from shapely.coords import CoordinateSequence\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\shapely\\coords.py\", line 8, in \r\n from shapely.geos import lgeos\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\site-packages\\shapely\\geos.py\", line 145, in \r\n _lgeos = CDLL(os.path.join(sys.prefix, 'Library', 'bin', 'geos_c.dll'))\r\n File \"c:\\users\\carol\\anaconda3\\envs\\simba_env\\lib\\ctypes\\__init__.py\", line 348, in __init__\r\n self._handle = _dlopen(self._name, mode)\r\nOSError: [WinError 126] The specified module could not be found", + "user": "MattKDawson", + "reaction_cnt": 0, + "created_at": "2020-10-03T04:41:42Z", + "updated_at": "2020-10-04T01:56:57Z", + "author": "MattKDawson", + "comments": [ + { + "body": "Hi @MattKDawson - yes this is a common issue, and there should be an easy fix.\r\n\r\n1. After activating your `simba_env`, type `pip uninstall shapely`. \r\n\r\n2. Next, type `pip install shapely`. \r\n\r\n3. Then, try booting simba again with `simba`. If that does not work, try `conda install shapely` in step 3 above and see if simba boots by typing `simba`. If that also does not work:\r\n\r\n1. Go to this address https://www.lfd.uci.edu/~gohlke/pythonlibs/#shapely and download Shapely‑1.7.1‑cp36‑cp36m‑win_amd64.whl\r\n\r\n2. Next, with your `simba_env` activated, navigate to your downloads folder (I presume you should type `cd c:\\users\\carol\\Downloads` \r\n\r\n3. Next, type pip install Shapely‑1.7.1‑cp36‑cp36m‑win_amd64.whl\r\n\r\n4. Check if SimBA starts by typing `simba`\r\n\r\nLet me know if that solves it for you!\r\n\r\n", + "created_at": "2020-10-04T00:14:55Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab !\r\n\r\nUsing `conda install shapely` worked! Thank you so much for your help, SimBA will be such a valuable tool for me and my lab.\r\n\r\nTake care!", + "created_at": "2020-10-04T01:56:57Z", + "author": "MattKDawson" + } + ] + }, + { + "title": "Question on behaviour analysis", + "body": "Hi,\r\n\r\nis SimBA capable of detecting behaviours such as locomotion, rearing, sniffing on a single mouse?\r\n\r\nThank you in advance!", + "user": "shinhs0506", + "reaction_cnt": 0, + "created_at": "2020-09-28T17:18:10Z", + "updated_at": "2020-09-28T20:40:54Z", + "author": "shinhs0506", + "comments": [ + { + "body": "Yes, SimBA can be used to generate classifiers for all of these behaviors once they have been pose-estimated by DLC, SLEAP, DPK, etc. Locomotion can be determined using descriptive statistics, with no need for SimBA, but SimBA can still be used to generate and visualize these descriptive features. \r\n\r\nYou can download classifiers for sniffing from the SimBA OSF repository, and iterate the model to work under your conditions. The instructions to do this are available in the SimBA documentation (https://github.com/sgoldenlab/simba#pipeline-). \r\n\r\nOthers have created rearing classifiers that you could try to start with (https://github.com/saviochan/SimBA-OpenFieldArena).", + "created_at": "2020-09-28T20:40:54Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Bump tensorflow-gpu from 1.14.0 to 1.15.4 in /simba", + "body": "Bumps [tensorflow-gpu](https://github.com/tensorflow/tensorflow) from 1.14.0 to 1.15.4.\n
\nRelease notes\n

Sourced from tensorflow-gpu's releases.

\n
\n

TensorFlow 1.15.4

\n

Release 1.15.4

\n

Bug Fixes and Other Changes

\n\n

TensorFlow 1.15.3

\n

Bug Fixes and Other Changes

\n\n

TensorFlow 1.15.2

\n

Release 1.15.2

\n

Note that this release no longer has a single pip package for GPU and CPU. Please see #36347 for history and details

\n

Bug Fixes and Other Changes

\n\n

TensorFlow 1.15.0

\n

Release 1.15.0

\n

This is the last 1.x release for TensorFlow. We do not expect to update the 1.x branch with features, although we will issue patch releases to fix vulnerabilities for at least one year.

\n

Major Features and Improvements

\n
    \n
  • As announced, tensorflow pip package will by default include GPU support (same as tensorflow-gpu now) for the platforms we currently have GPU support (Linux and Windows). It will work on machines with and without Nvidia GPUs. tensorflow-gpu will still be available, and CPU-only packages can be downloaded at tensorflow-cpu for users who are concerned about package size.
  • \n
  • TensorFlow 1.15 contains a complete implementation of the 2.0 API in its compat.v2 module. It contains a copy of the 1.15 main module (without contrib) in the compat.v1 module. TensorFlow 1.15 is able to emulate 2.0 behavior using the enable_v2_behavior() function.\nThis enables writing forward compatible code: by explicitly importing either tensorflow.compat.v1 or tensorflow.compat.v2, you can ensure that your code works without modifications against an installation of 1.15 or 2.0.
  • \n
  • EagerTensor now supports numpy buffer interface for tensors.
  • \n
  • Add toggles tf.enable_control_flow_v2() and tf.disable_control_flow_v2() for enabling/disabling v2 control flow.
  • \n
  • Enable v2 control flow as part of tf.enable_v2_behavior() and TF2_BEHAVIOR=1.
  • \n
  • AutoGraph translates Python control flow into TensorFlow expressions, allowing users to write regular Python inside tf.function-decorated functions. AutoGraph is also applied in functions used with tf.data, tf.distribute and tf.keras APIS.
  • \n
  • Adds enable_tensor_equality(), which switches the behavior such that:\n
      \n
    • Tensors are no longer hashable.
    • \n
    \n
  • \n
\n\n
\n

... (truncated)

\n
\n
\nChangelog\n

Sourced from tensorflow-gpu's changelog.

\n
\n

Release 1.15.4

\n

Bug Fixes and Other Changes

\n\n

Release 2.3.0

\n

Major Features and Improvements

\n
    \n
  • tf.data adds two new mechanisms to solve input pipeline bottlenecks and save resources:
  • \n
\n\n
\n

... (truncated)

\n
\n
\nCommits\n
    \n
  • df8c55c Merge pull request #43442 from tensorflow-jenkins/version-numbers-1.15.4-31571
  • \n
  • 0e8cbcb Update version numbers to 1.15.4
  • \n
  • 5b65bf2 Merge pull request #43437 from tensorflow-jenkins/relnotes-1.15.4-10691
  • \n
  • 814e8d8 Update RELEASE.md
  • \n
  • 757085e Insert release notes place-fill
  • \n
  • e99e53d Merge pull request #43410 from tensorflow/mm-fix-1.15
  • \n
  • bad36df Add missing import
  • \n
  • f3f1835 No disable_tfrt present on this branch
  • \n
  • 7ef5c62 Merge pull request #43406 from tensorflow/mihaimaruseac-patch-1
  • \n
  • abbf34a Remove import that is not needed
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tensorflow-gpu&package-manager=pip&previous-version=1.14.0&new-version=1.15.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/configuring-github-dependabot-security-updates)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2020-09-25T22:33:42Z", + "updated_at": "2020-11-13T17:44:00Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #79.", + "created_at": "2020-11-13T17:43:58Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Outlier Correction length mismatch", + "body": "I am using the development version of SIMBA (0.61) primarily with the GUI on Windows 10 with Python 3.7 through Anaconda.\r\n\r\nMy pose_config is very similar to that in #66 such that I have 2 animals with the same body parts and 1 animal with different body parts.\r\n\r\nI have successfully imported a multi-animal DLC H5 file and have verified the presence of the .csv file located in the \"input_csv\" directory. Using the most recent version of DLC I have already corrected for outliers and wish to skip this step in SIMBA. After clicking \"Skip outlier correction\" I receive the following error:\r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\brand\\anaconda3\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\brand\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 3513, in \r\n button_skipOC = Button(label_outliercorrection,text='Skip outlier correction (CAUTION)',fg='red', command=lambda:skip_outlier_c(self.projectconfigini))\r\n File \"c:\\users\\brand\\anaconda3\\envs\\simba\\lib\\site-packages\\simba\\outlier_scripts\\skip_outlierCorrection.py\", line 52, in skip_outlier_c\r\n csv_df.columns = newHeaders\r\n File \"c:\\users\\brand\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\generic.py\", line 5192, in __setattr__\r\n return object.__setattr__(self, name, value)\r\n File \"pandas/_libs/properties.pyx\", line 67, in pandas._libs.properties.AxisProperty.__set__\r\n File \"c:\\users\\brand\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\generic.py\", line 690, in _set_axis\r\n self._data.set_axis(axis, labels)\r\n File \"c:\\users\\brand\\anaconda3\\envs\\simba\\lib\\site-packages\\pandas\\core\\internals\\managers.py\", line 183, in set_axis\r\n \"values have {new} elements\".format(old=old_len, new=new_len)\r\nValueError: Length mismatch: Expected axis has 57 elements, new values have 69 elements\r\n\r\nAny insight would be appreciated, thanks.", + "user": "blogeman", + "reaction_cnt": 0, + "created_at": "2020-09-16T15:57:41Z", + "updated_at": "2021-03-15T19:46:56Z", + "author": "blogeman", + "comments": [ + { + "body": "Hi @blogeman - thanks for reporting - and sorry - I don't have your kind of datasets to troubleshoot with and can't replicate it so will have to ask ask questions to see if we can figure it out! \r\n\r\nWhat did you name your individual body-parts in the SimBA user-defined pose configuration menu? SimBA needs to know which body-part belongs to which animal, and, currently, the process of getting that done is to add a number at the end of each body-part in the user-defined pose-configuration box:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/93389940-c4f19f80-f821-11ea-98e9-976dcd140edd.png)\r\n\r\nAny body part ending with `_1` belongs to the first animal, any body part ending with `_2` belongs to the second animal, etc..\r\n\r\nThe error that you are seeing could be produced by a typo here, which could cause SimBA to expects more (or less) columns than actually exists. \r\n\r\n\r\n", + "created_at": "2020-09-16T20:42:18Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab thanks very much for your assistance, much appreciated. After some investigation, I think I am onto something. Per your recommendation, the individual body parts seem to have the correct spelling and possess the \"_X\" to indicate which animal the point belongs to in the pose-configuration menu. For what its worth, the body parts for animal 3 are exactly the same as for animal 2 but with the \"_3\" instead of the \"_2\".\r\n![pose_config](https://user-images.githubusercontent.com/62612214/93482385-a812a600-f8cd-11ea-8087-c42434d65aee.PNG)\r\n\r\n...but when I load the project the opening GUI only displays the body parts for animals 1 and 2 while omitting the bodyparts from animal 3.\r\n![opening_GUI](https://user-images.githubusercontent.com/62612214/93482642-ec9e4180-f8cd-11ea-8ca2-7f4c738a1ec2.PNG)\r\n\r\nThe mismatch error I'm getting is expecting 57 columns while the file has 69, a difference of 12 columns. Knowing that this data comes from DLC, which for each body part uses 3 columns (X, Y, and p-value), the error would be consistent with SIMBA not expecting animal_3 which has 4 body parts (a total of 12 columns). This corresponds to the opening GUI not displaying animal_3.......but I don't understand why its missing animal_3.", + "created_at": "2020-09-17T14:17:52Z", + "author": "blogeman" + }, + { + "body": "Thanks @blogeman - to figure out where it goes wrong:\r\n\r\n1. Could you open the `project_folder\\logs\\measures\\pose_configs\\bp_names` csv file and paste a screenshot of the content? I think there might be something there, where the bodyparts of the last animals isn't saved properly at import. \r\n\r\n2. Could you open the `project_folder/project_config.ini`, and tell me what it reads on line 73, under [create ensemble settings], after `pose_estimation_body_parts = `. It should read user_defined. \r\n\r\n3. Also in the `project_folder/project_config.ini`, could you tell me what it reads after `animal_no` on line 8 under [General settings]? \r\n\r\nLastly, are you working with DeepLabCut data, or is it from SLEAP? \r\n\r\nEDIT: Sorry, scrolled up as saw that it was DLC!\r\n", + "created_at": "2020-09-17T18:29:14Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks for troubleshooting this with me @sgoldenlab. Below are my responses:\r\n\r\n1. Here is the screenshot of the \"bp_names.csv\" file. All looks as it should to my eyes.\r\n![bp_names](https://user-images.githubusercontent.com/62612214/93538750-e4212780-f91c-11ea-8b00-4706bcdff960.PNG)\r\n\r\n2. Line 73 of the project_config.ini file reads \"pose_estimation_body_parts = user_defined\"\r\n\r\n3. Line 8 of the project_config.ini file reads \"animal_no = 3\"\r\n\r\nWhen I start to play around with the \"animal_no\" variable in the config file things behave strangely. For starters, even though it says 3 the GUI is only displaying lists for 2 animals (see screenshot from last post). If I change the animal_no to 1, it display all body parts inside the first list and the second list reads \"No body parts\". ![animal=1](https://user-images.githubusercontent.com/62612214/93539476-ee442580-f91e-11ea-976a-0402bb3c6f1f.PNG)\r\n\r\nIt seems as though the program is stuck creating 2 lists / animals. Does this make sense?\r\n\r\n", + "created_at": "2020-09-17T23:50:43Z", + "author": "blogeman" + }, + { + "body": "Thanks @blogeman - ah I think we are dealing with 2 separate issues, I'm onto it. \r\n\r\nOne more question.\r\n\r\nCan you tell me what you can see on Line 122 in the project config? A screenshot would be best, like this:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/93541399-33f6f300-f90b-11ea-9159-f1fdfe71d83d.png)\r\n\r\n\r\n ", + "created_at": "2020-09-18T00:28:35Z", + "author": "sgoldenlab" + }, + { + "body": "I think I got it, but will come back to this a little later on today. ", + "created_at": "2020-09-18T00:43:19Z", + "author": "sgoldenlab" + }, + { + "body": "Hmm, it looks like my id_list is empty...\r\n![list](https://user-images.githubusercontent.com/62612214/93543671-2ac94e80-f92a-11ea-93f4-492d4515e559.PNG)\r\n", + "created_at": "2020-09-18T01:10:43Z", + "author": "blogeman" + }, + { + "body": "Ah, that could be part of it. Let me make a small update first. Hang on. ", + "created_at": "2020-09-18T01:17:32Z", + "author": "sgoldenlab" + }, + { + "body": "@blogeman - something has gone wrong at the import but I think we can fix this error manually. \r\n\r\nFirst, update to Simba-dev==0.63. \r\n\r\nNext, in your `id_list` manually insert names for your three animals. E.g., \r\n\r\n`id_list = Animal1,Animal2,Animal3`\r\n\r\nNOTE: do not use any spaces between your animal names.\r\n\r\nLet me know if that helps or not. \r\n\r\n", + "created_at": "2020-09-18T01:22:21Z", + "author": "sgoldenlab" + }, + { + "body": "Updated to dev==0.63 and manually edited the config file id_list to contain the names of my 3 animals. After running the \"Skip outlier correction\" step I get the following error:\r\n![test](https://user-images.githubusercontent.com/62612214/93604025-d1e1d080-f992-11ea-89c3-1c89fc44dc80.PNG)\r\n\r\nThe original problem was that at this step SIMBA was only expected 57 columns (corresponding to 2 animals) but my dataset had 69 columns (3 animals). Now SIMBA is expecting 69 columns.....but the dataset contains 115. I can't figure out where the number 115 comes from.....\r\n\r\n115 is divisible by 23 (the number of bodyparts in the dataset), but I can't think of what would be represented in the 5 columns per bodypart.\r\n", + "created_at": "2020-09-18T13:44:22Z", + "author": "blogeman" + }, + { + "body": "I see, that did not go as I have planned. I will look into this later today. \r\n\r\nJust to confirm, your animals, they do have their respective body parts ending with `_1` and `_2` and `_3` ? \r\n\r\nIt is the other way around - so your data frame contains 69 columns, and that's how many new column headers are expected. For some reason SimBA generates a list of 115 column names now in your scenario, and tries to use those for you 69 columns. ", + "created_at": "2020-09-18T14:01:11Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks @sgoldenlab. To confirm, all body parts have the \"_X\" identifier.\r\n![1](https://user-images.githubusercontent.com/62612214/93607099-e1fbaf00-f996-11ea-86be-8351bf898c8d.PNG)\r\n", + "created_at": "2020-09-18T14:08:45Z", + "author": "blogeman" + }, + { + "body": "Thanks @blogeman: one final question for now. I'm trying to reproduce your project and error but I am not getting it. Any chance you could send me the input H5 or CSV from DLC? It would be extremely helpful. I am only really interested in the header structure so I am not missing anything. If you don't want to share the entire file just the headers lines and a few rows will do \r\n\r\n", + "created_at": "2020-09-18T15:24:33Z", + "author": "sgoldenlab" + }, + { + "body": "Please forgive for my ignorance, but how can I send the file to you? I've tried to drag and drop it into the comment box but it wont allow for an H5 file type. I've tried to google for the answer, but haven't gotten anywhere. I'm also new to Github and not sure what the proper etiquette for this kind of thing is, hope I'm not bothering you.", + "created_at": "2020-09-18T15:43:29Z", + "author": "blogeman" + }, + { + "body": "No no @blogeman, not at all! People's tracking parameters are becoming more complex, and I need a stable and flexible solution for file import. What limits me is that I don't have tracking files for your situation, so I somewhat shoot in the dark with my own mock-up hand-generated csv/h5 files, so this is helpful. \r\n\r\nI'd say you can either try and paste a gdrive link here to the file. Or share it with sgoldenlab@gmail.com \r\n\r\n ", + "created_at": "2020-09-18T16:08:30Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab, I e-mailed the CSV and the H5 files to the e-mail address you indicated. If there is anything else you need from me I will happily provide.", + "created_at": "2020-09-21T20:15:36Z", + "author": "blogeman" + }, + { + "body": "Hi @blogeman - sorry about this - can you try again sending it to goldenneurolab@gmail.com ? \r\n\r\nI will try to look at it before end of week. \r\n\r\n", + "created_at": "2020-09-21T22:37:01Z", + "author": "sgoldenlab" + }, + { + "body": "Hello, i am having this same problem except my error message states \"expected axis has 16 elements, new values have 24 elements.\" Did you all find a fix to this problem? ", + "created_at": "2021-03-12T17:52:35Z", + "author": "adamz10" + }, + { + "body": "Hi @adamz10. Unfortunately I never figured out how to solve this problem.", + "created_at": "2021-03-15T19:29:52Z", + "author": "blogeman" + }, + { + "body": "Sorry to hear that @blogeman. I appreciate the update though! ", + "created_at": "2021-03-15T19:46:56Z", + "author": "adamz10" + } + ] + }, + { + "title": "Multi-Animal DLC Import Error", + "body": "Hi again! Unfortunately, I seem to have run into a few more errors (that I suspect are closely related). The goal of the SimBA project is to create and track classifiers for multi-animal DLC data which includes 2 pups and 1 adult mouse. Our DLC H5 file contains both labeled multi-animal (for the 2 pups) body parts and unique body parts (for the adult). In order to troubleshoot this error, we have tried to split our H5 file into three separate tests: adult only, infants only, and the original merged dataset. The adult only data works perfectly with SimBA, but I'm finding unique issues with both the infants and the merged data.\r\n\r\nInfants Only:\r\nAlthough I am able to successfully import the video and tracking data for the 2 infants, the load project GUI is completely blank (i.e. only has the tabs on the top, but no buttons within. The error on the terminal is attached below:\r\n![CaptureInfants](https://user-images.githubusercontent.com/43099772/92410759-21aed500-f113-11ea-80f9-cc18c0490dcb.PNG)\r\n\r\nMerged Data:\r\nWhen we include both the multi-animal infants and the adult in one file, SimBA does not allow the import of the H5 file, with an length mismatch error (shown below). On the other hand, when we convert the H5 file to a CSV, it is successfully imported through the initial create project step, but later fails when we try to extract features.\r\n![LengthMismatchError](https://user-images.githubusercontent.com/43099772/92410820-60448f80-f113-11ea-8725-0535bad15ef7.PNG)\r\n\r\nAny thoughts would be much appreciated and sorry for the long post!\r\n\r\n\r\n\r\n", + "user": "ahabchopra", + "reaction_cnt": 0, + "created_at": "2020-09-07T18:07:28Z", + "updated_at": "2021-11-19T19:15:58Z", + "author": "ahabchopra", + "comments": [ + { + "body": "Hi @ahabchopra!\r\n\r\nThe short answer: could you try using the development version of SimBA and see if it works?\r\n\r\n1. Uninstall the version of SimBA you have - `pip uninstall simba-uw-tf` or `pip uninstall simba-uw-no-tf` \r\n\r\n2. Install the development version `pip install simba-uw-tf-dev` and create or load your project. I inserted a fix for this in this simba version the other day. Let me know how it goes! (this is soon going to become the master version). \r\n\r\n\r\nLonger answer: I'm working with a few labs with tracking scenarios just like yours. Originally I did not anticipate that different animals may have different number of tracked body-parts, and therefore `simba-uw-tf` and `simba-uw-no-tf` versions gives you the error you are seeing. Different number of body-parts and differently named body-parts for different animals require more dynamic menus than earlier versions, and I've tried to fix this in the development version. \r\n\r\nThat said, the labs I'm working with that have several pups and a dam have yet to perfect their tracking models so the updates I've made with dynamic menues etc. in SimBA are in anticipation of them getting accurate DLC/SLEAP pose - so I haven't been able to verify it's working well throughout. So, if you report any errors you encounter with the development version here, it would be extremely helpful for me so I can tidy up any functions if needed!\r\n\r\n", + "created_at": "2020-09-08T03:15:42Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab! Thanks for this thorough explanation! I have just a few follow up questions:\r\n1. When installing the \"development version,\" is there a \"without integrated TensorFlow\" version, i.e. can I perhaps run `pip install simba-uw-no-tf-dev`?\r\n\r\n2. The longer explanation definitely makes sense for the \"merged\" dataset I described above, but do you know why we would be thrown an error for even the data file with just infants? In that case, the two animals have the same number of labeled body parts (4). \r\n\r\nP.S. Will be sure to update you with any future encountered errors for any small function adjustment 👍 ", + "created_at": "2020-09-08T03:48:05Z", + "author": "ahabchopra" + }, + { + "body": "Hi @ahabchopra!\r\n\r\n1. I only keep one development version, but(!), even though there is a `tf` in the version name, it does not require tensorflow. If you install version `0.57` with `pip install simba-uw-tf-dev==0.57` is should work. \r\n\r\n2. I tried to recreate the first error you were using using the development version by opening a range of different project with different number of animals and boy-parts but I couldn't. I remember having seen this error before, too, but I seem to have fixed it at some point. Of course let me know if you see it on your end and I will look over it again. \r\n\r\nThanks ", + "created_at": "2020-09-08T13:49:19Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab - successfully installed the dev version 0.57, and happy to report that it seems to be handling the multi-animal 2 infant project well. However, I'm still getting a length mismatch error when I try to introduce the adult to the project (although it seems to be a different # mismatch than the previous error I showed above). Any advice on how to approach this situation - I've attached an image of the terminal below:\r\n![CaptureDev](https://user-images.githubusercontent.com/43099772/92496776-94c55380-f1c6-11ea-9659-0efac5872ee8.PNG)\r\n", + "created_at": "2020-09-08T15:30:21Z", + "author": "ahabchopra" + }, + { + "body": "Hi @ahabchopra - I know this is slightly petty and I am not sure what the documentation says (I may have to update it), but can you stick and underscore between the number and the name of the animal and let me know if it works. I.e., `Single_1` and `Infant_2` and `Infant_3`? \r\n\r\nEDIT: I think it might be the `single` that also need a digit. ", + "created_at": "2020-09-08T15:37:29Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab - definitely not a petty suggestion, but unfortunately it seems to throw the same error about the Length mismatch when I try `single_1 Infant_1 Infant_2.` Not quite sure I understand your edit, but do you mean to say that for the pose_config, I should also put an `_1` after all the single bodyparts, `_2 `and` _3` for the infants?", + "created_at": "2020-09-08T15:51:15Z", + "author": "ahabchopra" + }, + { + "body": "Yes, we need a way of tying each bodypart to each individual, I try to be clearer and will get you a screenshot when I get to a computer. Try this:\n\nYou should be able to name you animals whatever you wish (single, infant1, infant2 should work, I was wrong above).\n\nNext, for the body parts, add `_1` to the end of each bodypart that belongs to single, add `_2` to the end of each bodypart that belongs to infant1, and `_3` to each bodypart that belongs to infant2. Does that work?\n\nI am aware it's a better way to to this with DeepLabCut data as the individual name is in the multiindex header, but haven't found time to implement it yet.\n\n________________________________\nFrom: ahabchopra \nSent: Tuesday, September 8, 2020, 8:51 AM\nTo: sgoldenlab/simba\nCc: Simon Nilsson; Manual\nSubject: Re: [sgoldenlab/simba] Multi-Animal DLC Import Error (#66)\n\n\nHi @sgoldenlab - definitely not a petty suggestion, but unfortunately it seems to throw the same error about the Length mismatch when I try single_1 Infant_1 Infant_2. Not quite sure I understand your edit, but do you mean to say that for the pose_config, I should also put an _1 after all the single bodyparts, _2 and _3 for the infants?\n\n—\nYou are receiving this because you are subscribed to this thread.\nReply to this email directly, view it on GitHub, or unsubscribe.\n\n", + "created_at": "2020-09-08T16:01:44Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson - thanks for your suggestion - with this new implementation I was able to successfully extract and label features!", + "created_at": "2020-09-08T18:47:41Z", + "author": "ahabchopra" + }, + { + "body": "Great @ahabchopra - let me know if anything else comes up", + "created_at": "2020-09-08T20:10:51Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab - sorry to have another issue so soon, but when I move on to the next step (i.e. label behavior), no images appear in the GUI to be labeled, although frames were successfully extracted to the \"frames\" folder in the project. Moreover, after clicking \"Open current video\" a seemingly infinite number of mini videos appear on my screen. Not sure if the two errors are related, but just thought I would let you know about the second in case it becomes relevant. Any ideas how to go about this? ", + "created_at": "2020-09-10T02:53:24Z", + "author": "ahabchopra" + }, + { + "body": "Hi @ahabchopra - thanks for reporting this. Let me see if I can replicate it on my end. ", + "created_at": "2020-09-10T02:58:35Z", + "author": "sgoldenlab" + }, + { + "body": "@ahabchopra - does it come with an error msg in the terminal?", + "created_at": "2020-09-10T03:23:14Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab - it does! Sorry I didn't include that previously. Here it is below: the first error is shown in the first two lines (both begin with \"warning\") and actually occurs when I open the video through the \"video parameters\" tab. No error, however, is shown in the main SimBA GUI. \r\n\r\nThe second error (starting with \"Traceback\") occurs when I try to \"show current frame\" in the infinite videos that open after I \"open current video.\" However, I don't see an error when I open the label behavior tab with the folder of frames or try to save the Annotations CSV.\r\n\r\n![MergeError](https://user-images.githubusercontent.com/43099772/92680178-39937e00-f2f8-11ea-8dbe-ecc12863cdab.PNG)\r\n", + "created_at": "2020-09-10T03:59:08Z", + "author": "ahabchopra" + }, + { + "body": "Thanks @ahabchopra - very helpful. I've tried to recreate the error, I got a similar error to what you where seeing (with frames not displaying in some projects), but unfortunately I couldn't replicate that specific error msg. \r\n\r\nShort explanation, could you update SimBA and try again? `pip install simba-uw-tf==0.58`. SimBA now tries to put a text label on each animal in multi-animal projects (https://github.com/sgoldenlab/simba/blob/master/images/Capturesss.JPG) and these labels could become messed up in the annotator in some tracking scenarios. I've inserted a fix to remove the labels if the error pops up. \r\n\r\nThe error you are seeing though, suggest the program can't find the specific frame. In your `project_folder/frames/input` folder, there should be a directory that has the same name as your video. If you are trying to label a video called `Video1.mp4`, you should have a folder in `project_folder\\frames\\input\\` directory called `Video1` which contains all the frames in your video, starting with `0.png`. Fot now, try to avoid spaces in the video and folder names. \r\n\r\nAlso, when you try to open the annotator again in the new SimBA version, could you delete the `project_folder\\labelling_info.txt` file in your project before giving it a go?\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2020-09-10T04:36:36Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab - will try this fix and let you know how it works! A quick clarification first - do you mean to say I should still install the dev version, i.e., `pip install simba-uw-tf-dev==0.58` or rather just `pip install simba-uw-tf==0.58`. Thanks!", + "created_at": "2020-09-10T13:24:21Z", + "author": "ahabchopra" + }, + { + "body": "Hi @ahabchopra - sorry it's the dev upgrade, `pip install simba-uw-tf-dev --upgrade`", + "created_at": "2020-09-10T14:19:00Z", + "author": "sgoldenlab" + }, + { + "body": "Hey @sgoldenlab - I appreciate you helping me work through these small bugs - I think we're close! Currently, the GUI successfully displays the image (with the 0.58 update). However, it seems to think that both infants are \"Infant 1\" and the adult is both \"single\" and \"Infant 2\" simultaneously. I tried to play with the pose_config, but to no avail. Any way to tell SimBA that the two infants are distinct besides the _2 and _3 labeling in the pose_config? Or will this even be a problem in post processing analysis?\r\n\r\n![CaptureLabels](https://user-images.githubusercontent.com/43099772/92820465-fa1c6e80-f397-11ea-8eb9-0494789240b6.PNG)\r\n", + "created_at": "2020-09-10T23:01:25Z", + "author": "ahabchopra" + }, + { + "body": "Almost there @ahabchopra, and thanks for helping me troubleshoot this.\r\n\r\nI noticed another potential bug if you use unequal number of body-parts per animal, and I have fixed it. I don't know if that was causing the issue you are seeing, but could you upgrade to 0.59 and see if it works? \r\n\r\nI don't have projects with your tracking data so it's difficult to troubleshoot. If it does not work, could you send me your video + the associated features_extracted file and I will take a stab at it?\r\n\r\nAnd, no, its just a visualization issue, so it does not matter, but it will confuse users so I better handle it. You annotations are still assigned to the specific frames and that's the only real critical part. \r\n\r\nI will try to check in here once daily, and might not be as quick to respond in the upcoming weeks. Thanks gain for your help. ", + "created_at": "2020-09-11T02:44:09Z", + "author": "sgoldenlab" + }, + { + "body": "Almost there indeed @sgoldenlab! Version 0.59 seems to be working much better. Although I'm getting the same visual error in the GUI I described above, I am annotation as if the labels were correct. While the model seems to run well, I seem to be finding an error during visualization. Specifically, the first generate video \"Sklearn visualization\" step. I tried to install cmaps and it seems to be from a matlab package, but that fix didn't quite work - any thoughts for this one?\r\n\r\n![CaptureCmap](https://user-images.githubusercontent.com/43099772/93145478-665de180-f6ba-11ea-9654-f6a659995a3a.PNG)\r\n", + "created_at": "2020-09-14T22:45:12Z", + "author": "ahabchopra" + }, + { + "body": "Hi @ahabchopra - my bad, I had missed the definitions of the color maps in the plot script. Can you try again, I am up to version 0.61 now, and let me know if it runs?\r\n\r\n**Beware**, if you have high resolution videos, visualizations in SimBA can be slow at the moment, but I will look to sort this when time allows. ", + "created_at": "2020-09-14T23:54:10Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab! Glad to report that the SimBA program is working well so far! Just having a slight issue with the visualization step, specifically on the \"Sklearn visualization.\" I'm getting an \"list index out of range error\" for the hullColor array, and my small debugging attempts don't seem to have solved it - any thoughts on this one? Terminal error code shown below:\r\n![sklearn](https://user-images.githubusercontent.com/43099772/94591630-4dc2ff00-0256-11eb-90a6-75d4b9468d87.PNG)\r\n", + "created_at": "2020-09-29T17:19:00Z", + "author": "ahabchopra" + }, + { + "body": "Hi @ahabchopra - first, thanks for reporting this. It looks like there could have been a bug limiting the number of colors generated in situations like yours. \r\n\r\nCould you upgrade SimBA (now version 0.68 - `pip install simba-uw-tf-dev --upgrade`) and report back if it is fixed or not? ", + "created_at": "2020-09-30T03:17:13Z", + "author": "sgoldenlab" + }, + { + "body": "Fix works great @sgoldenlab! Appreciate all of your help!", + "created_at": "2020-09-30T19:05:10Z", + "author": "ahabchopra" + }, + { + "body": "Hi @sgoldenlab, I am also having a similar issue where when I try and import the h5 files using H5 (multianimalDLC):\r\n\r\n\"Importing 0 multi-animal DLC h5 files to the current project\r\nAll multi-animal DLC .h5 tracking files ordered and imported into SimBA project\r\nin the chosen workflow file format\"\r\n\r\nIt works when I select CSV (DLC/DeepPoseKit) but using that option leads to issues with Extracting Features. \r\n\r\nI currently have simba-uw-tf-dev and I uninstalled it and tried both versions simba-uw-tf-dev==0.57 and simba-uw-tf-dev==0.58 and did not have any luck. \r\n\r\nThank you for the help!", + "created_at": "2021-11-19T19:15:58Z", + "author": "yana-yuhai" + } + ] + }, + { + "title": "Outlier Correction to ROI error", + "body": "When I run outlier correction, the SimBA window shows that the outlier correction has been completed but when I step over to the ROI window, I receive a message that outlier correction has not been completed. I assume this is a type of formatting issue but unsure, any suggestion? Below is a snip of the SimBA window.\r\n\r\nUsing Anaconda on Windows 10.\r\n![Simba1](https://user-images.githubusercontent.com/70701840/92114816-75da5200-edbf-11ea-9e12-21ab016be9c5.JPG)\r\n\r\n\r\n\r\n\r\n \r\n", + "user": "JIUthor", + "reaction_cnt": 0, + "created_at": "2020-09-03T12:28:40Z", + "updated_at": "2020-10-06T15:29:44Z", + "author": "JIUthor", + "comments": [ + { + "body": "Hi @JIUthor - it seems like SimBA can't locate either your pose-estimation CSVs or your outlier corrected pose estimation CSVs. These should be in your project_folder/csv/input_csv and project_folder/csv/outlier_corrected_movement_location folders, respectively. If you go to these folders in your project directory, can you see CSV files with the same file names as the videos you are trying to analyse? ", + "created_at": "2020-09-03T13:10:33Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab When I go to said locations, along with any folders within the csv folder, they all show they are empty.", + "created_at": "2020-09-11T17:26:45Z", + "author": "JIUthor" + }, + { + "body": "@sgoldenlab I was curious if you knew what I should do next to try and address this issue?\r\n", + "created_at": "2020-09-17T18:28:01Z", + "author": "JIUthor" + }, + { + "body": "Hey @JIUthor - First! sorry I had missed this. Thanks for the bump. \r\n\r\nIf I understand correctly, your `project_folder/csv/input_csv` folder is empty. This means something went astray when you imported your tracking files into your SimBA project. \r\n\r\nWhen you imported your tracking files - and used this menu - did you see any error msgs being printer, either in the main SimBA terminal window, or the windows terminal? \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/93537251-e3c66380-f8ff-11ea-8c30-6961278f1926.png)\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2020-09-17T23:08:13Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Error with Generate / Save csv on Label Behavior Page", + "body": "Hi - having an issue with the Label Behavior portion of the SimBA process. Although I am able to successfully extract frames and label behaviors, I seem to get a repeated error when it comes time to Generate/Save CSV. Screenshot of the terminal attached below. Let me know if there are any suggestions I can try! \r\n![Capture](https://user-images.githubusercontent.com/43099772/91525569-cb9a9000-e8cf-11ea-83a6-48bd45878034.PNG)\r\n\r\n", + "user": "ahabchopra", + "reaction_cnt": 0, + "created_at": "2020-08-28T05:48:48Z", + "updated_at": "2020-08-29T14:21:42Z", + "author": "ahabchopra", + "comments": [ + { + "body": "Hello @ahabchopra - thanks for the screenshot very helpful. Check out this suggested solution to a user that reported similar issues recently: https://github.com/sgoldenlab/simba/issues/55#issuecomment-665910732.\r\n\r\nWhen you have completed your labels for `video_40` using the labelling GUI and press `Generate/Save CSV`, SimBA tries to append the labels to the CSV file containing the extracted features from `video_40` which will be located in the `project_folder/csv/features_extracted` directory. If SimBA can't find the `project_folder/csv/features_extracted/video_40.csv` file, then it can't append your annotations, and you will see the error message you are seeing. Before starting to label behaviours, make sure to extract the features by clicking on this button:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/91566644-d3225f00-e8f8-11ea-9629-8fc9beb0d428.png)\r\n\r\nLet me know if this helps you pass the error or not, thanks! \r\n\r\n\r\n\r\n", + "created_at": "2020-08-28T13:38:33Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab! Thanks for the quick response 👍. I actually saw the earlier thread, but it didn't resolve the error. When I try to \"extract features,\" the SimBA GUI displays something along the lines of \"0 features extracted.\" If it would help to have a screenshot of this I can rerun and paste it here - let me know. ", + "created_at": "2020-08-28T16:54:27Z", + "author": "ahabchopra" + }, + { + "body": "@ahabchopra I see, then it makes sense :) if you look in your `project_folder/csv/features_extracted` directory it is probably empty. \r\n\r\nTo extract features, we first have to correct any gross pose-estimation tracking outliers (or tell the code to skip the outlier correction step). The feature extraction step looks in the `project_folder/csv/outlier_corrected_movement_location` folder, and extracts features from any files that have had tracking inaccuracies corrected first, and then creates new CSVs and places them in the `project_folder/csv/features_extracted` folder. Did you go through the step of clicking any of these buttons?\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/91600384-b6981e00-e91c-11ea-8820-59292e5e4af5.png)\r\n\r\n\r\n\r\n\r\n", + "created_at": "2020-08-28T17:53:42Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab - sorry should have clarified my earlier steps. After creating a project and extracting frames, I went through the video parameters step (and saved), clicked skip outlier correction, and then went to extract features. However, even after skipping outlier corrections, I don't see any files in the \"csv\" folder. Hope that is more helpful in diagnosing the error. \r\n\r\nIf it's relevant, I'm inputting an H5 file from a mutt-animal DLC project. ", + "created_at": "2020-08-28T17:57:09Z", + "author": "ahabchopra" + }, + { + "body": "Cheers @ahabchopra - do you see any files in your `project_folder/csv/input_csv` folder? ", + "created_at": "2020-08-28T18:14:57Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab not seeing any files in that folder - is that where the h5 should have been copied?", + "created_at": "2020-08-28T18:19:28Z", + "author": "ahabchopra" + }, + { + "body": "Yeah, when you go through this step: https://github.com/sgoldenlab/simba/blob/master/docs/Multi_animal_pose.md\r\n\r\nAt the end of the process, the CSV's, one for each of the h5 files in your project, should be located in that folder. If you go through the steps in the tutorial link again, could you paste me any screenshot of the terminal window at the end of the process that would help me diagnose the problem?", + "created_at": "2020-08-28T18:29:56Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab thanks for all your help! Think it was a combination of going through the tutorial again and reading some more threads that has resolved all of the errors. ", + "created_at": "2020-08-29T07:45:14Z", + "author": "ahabchopra" + }, + { + "body": "@ahabchopra fantastic, let me know if anything else comes up though. ", + "created_at": "2020-08-29T14:21:41Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Conflict dependencies", + "body": "**Describe the bug**\r\nI pip installed simba-uw-tf in a fresh conda environment but cannot run it. I tried to uninstall and reinstall shapely from conda-forge as suggested by earlier posts but that did not work.\r\n\r\nHowever, I noticed conflict dependencies when I tried to pip check simba-uw-tf.\r\n\r\nI didn't test all but here are some example messages I got. \r\n\r\n1. pip check simba-uw-tf\r\n**tensorflow-gpu 1.14.0 has requirement protobuf>=3.6.1, but you have protobuf 3.6.0.**\r\nimbalanced-learn 0.7.0 has requirement scikit-learn>=0.23, but you have scikit-learn 0.22.2.\r\ndeeplabcut 2.0.9 has requirement numpy~=1.14.5, but you have numpy 1.18.1.\r\ndeeplabcut 2.0.9 has requirement scikit-learn~=0.19.2, but you have scikit-learn 0.22.2\r\n\r\n2. After I install protobuf 3.6.0. using pip install protobuf==3.6.0\r\n**tensorflow-gpu 1.14.0 requires protobuf>=3.6.1, but you'll have protobuf 3.6.0 which is incompatible.**\r\n...\r\n\r\n3. Then I downgrade tensorflow from 1.14.0 to 1.13.2\r\n**simba-uw-tf 1.2.20 has requirement protobuf==3.6.0, but you have protobuf 3.13.0.\r\nsimba-uw-tf 1.2.20 has requirement tensorflow-gpu==1.14.0, but you have tensorflow-gpu 1.13.2.**\r\n\r\nI am totally confused . \r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. pip install simba-uw-tf \r\n2. pip check simba-uw-tf \r\n3. try to adjust the module version as expected\r\n\r\n**Expected behavior**\r\n\r\n**Screenshots**\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu 18.04 LTS\r\n - Python Version [e.g. 3.6.0] 3.6.0\r\n - Are you using anaconda? Yes\r\n\r\n\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n", + "user": "flyegg", + "reaction_cnt": 0, + "created_at": "2020-08-21T08:49:37Z", + "updated_at": "2020-09-04T02:37:49Z", + "author": "flyegg", + "comments": [ + { + "body": "Hi @flyegg, \r\n\r\nThe \"error\" msgs you are seeing after your first points are more \"warnings\" that shouldn't affect the boot-up and performance of SimBA. However, in my experience, you do need protobuf ==3.6.0, but do not downgrade tensoflow-gpu, stick with 1.14.0. \r\n\r\nCan you try the following, if you are running it on Linux (as indicated in your msg), type:\r\n\r\n`pip uninstall simba-uw-tf`, then:\r\n\r\n`pip install simba-uw-tf-dev`\r\n\r\nignore any of the warning msgs you see, then type:\r\n\r\n`simba`\r\n\r\nIf you see any error msgs, please post me a screenshot. \r\n\r\nThanks\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n", + "created_at": "2020-08-22T03:18:04Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks, it works like a charm. ", + "created_at": "2020-09-04T02:37:48Z", + "author": "flyegg" + } + ] + }, + { + "title": "Successfully installed Simba but cannot run Simba", + "body": "Hi,\r\n\r\nI created a conda environment for Simba only and installed Simba under this environment. The installation was smooth but simba keep deliver error message \r\n\r\nI have tried to run DLC in a different environment and everything worked perfectly. I guess the hardware should not be a problem here. \r\n\r\nHere are the scripts and error messages I got. Basically I did three thing: uninstall simba, reinstall simba and run simba. \r\n\r\nIs there any clue?\r\n\r\nBest regards\r\n\r\n-Yi\r\n\r\n(Simba-GPU) yi@Precision-5820-Tower:~$ pip uninstall simba-uw-tf \r\nFound existing installation: Simba-UW-tf 1.2.19\r\nUninstalling Simba-UW-tf-1.2.19:\r\n Would remove:\r\n /home/yi/anaconda3/envs/Simba-GPU/bin/simba\r\n /home/yi/anaconda3/envs/Simba-GPU/lib/python3.6/site-packages/Simba_UW_tf-1.2.19.dist-info/*\r\n /home/yi/anaconda3/envs/Simba-GPU/lib/python3.6/site-packages/simba/*\r\nProceed (y/n)? y\r\n Successfully uninstalled Simba-UW-tf-1.2.19\r\n(Simba-GPU) yi@Precision-5820-Tower:~$ pip uninstall simba-uw-tf\r\nWARNING: Skipping simba-uw-tf as it is not installed.\r\n(Simba-GPU) yi@Precision-5820-Tower:~$ pip install simba-uw-tf\r\nCollecting simba-uw-tf\r\n Using cached Simba_UW_tf-1.2.19-py3-none-any.whl (3.3 MB)\r\nRequirement already satisfied: tqdm==4.30.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (4.30.0)\r\nRequirement already satisfied: Shapely==1.7 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (1.7.0)\r\nRequirement already satisfied: imutils==0.5.2 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.5.2)\r\nRequirement already satisfied: pyyaml==5.3.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (5.3.1)\r\nRequirement already satisfied: dtreeviz==0.8.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.8.1)\r\nRequirement already satisfied: wxpython==4.0.4 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (4.0.4)\r\nRequirement already satisfied: tensorflow-gpu==1.14.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (1.14.0)\r\nRequirement already satisfied: tabulate==0.8.3 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.8.3)\r\nRequirement already satisfied: scipy==1.1.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (1.1.0)\r\nRequirement already satisfied: numpy==1.18.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (1.18.1)\r\nRequirement already satisfied: deeplabcut==2.0.9 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (2.0.9)\r\nRequirement already satisfied: protobuf==3.6.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (3.6.0)\r\nRequirement already satisfied: deepposekit==0.3.5 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.3.5)\r\nRequirement already satisfied: imblearn==0.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.0)\r\nRequirement already satisfied: scikit-learn==0.22.2 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.22.2)\r\nRequirement already satisfied: seaborn==0.9.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.9.0)\r\nRequirement already satisfied: xgboost==0.90 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.90)\r\nRequirement already satisfied: matplotlib==3.0.3 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (3.0.3)\r\nRequirement already satisfied: yellowbrick==0.9.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.9.1)\r\nRequirement already satisfied: eli5==0.10.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.10.1)\r\nRequirement already satisfied: Pillow==5.4.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (5.4.1)\r\nRequirement already satisfied: opencv-python==3.4.5.20 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (3.4.5.20)\r\nRequirement already satisfied: pandas==0.25.3 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.25.3)\r\nRequirement already satisfied: scikit-image==0.14.2 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.14.2)\r\nRequirement already satisfied: graphviz==0.11 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.11)\r\nRequirement already satisfied: imgaug==0.4.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from simba-uw-tf) (0.4.0)\r\nRequirement already satisfied: colour in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from dtreeviz==0.8.1->simba-uw-tf) (0.1.5)\r\nRequirement already satisfied: six in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from wxpython==4.0.4->simba-uw-tf) (1.11.0)\r\nRequirement already satisfied: keras-preprocessing>=1.0.5 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (1.1.2)\r\nRequirement already satisfied: astor>=0.6.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (0.8.1)\r\nRequirement already satisfied: absl-py>=0.7.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (0.10.0)\r\nRequirement already satisfied: google-pasta>=0.1.6 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (0.2.0)\r\nRequirement already satisfied: tensorboard<1.15.0,>=1.14.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (1.14.0)\r\nRequirement already satisfied: tensorflow-estimator<1.15.0rc0,>=1.14.0rc0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (1.14.0)\r\nRequirement already satisfied: wheel>=0.26 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (0.31.1)\r\nRequirement already satisfied: grpcio>=1.8.6 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (1.31.0)\r\nRequirement already satisfied: termcolor>=1.1.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (1.1.0)\r\nRequirement already satisfied: keras-applications>=1.0.6 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (1.0.8)\r\nRequirement already satisfied: wrapt>=1.11.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (1.12.1)\r\nRequirement already satisfied: gast>=0.2.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0->simba-uw-tf) (0.4.0)\r\nRequirement already satisfied: tensorpack~=0.9.7.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (0.9.7.1)\r\nRequirement already satisfied: python-dateutil~=2.7.3 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (2.7.5)\r\nRequirement already satisfied: click in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (7.1.2)\r\nRequirement already satisfied: h5py~=2.7 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (2.10.0)\r\nRequirement already satisfied: statsmodels~=0.9.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (0.9.0)\r\nRequirement already satisfied: intel-openmp in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (2020.0.133)\r\nRequirement already satisfied: moviepy~=0.2.3.5 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (0.2.3.5)\r\nRequirement already satisfied: patsy in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (0.5.1)\r\nRequirement already satisfied: certifi in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (2020.6.20)\r\nRequirement already satisfied: imageio~=2.3.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (2.3.0)\r\nRequirement already satisfied: chardet~=3.0.4 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (3.0.4)\r\nRequirement already satisfied: easydict~=1.7 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (1.9)\r\nRequirement already satisfied: setuptools in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (49.6.0.post20200814)\r\nRequirement already satisfied: ipython-genutils~=0.2.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (0.2.0)\r\nRequirement already satisfied: tables in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (3.6.1)\r\nRequirement already satisfied: ipython~=6.0.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (6.0.0)\r\nRequirement already satisfied: requests in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (2.24.0)\r\nRequirement already satisfied: ruamel.yaml~=0.15 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from deeplabcut==2.0.9->simba-uw-tf) (0.16.10)\r\nRequirement already satisfied: imbalanced-learn in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from imblearn==0.0->simba-uw-tf) (0.7.0)\r\nRequirement already satisfied: joblib>=0.11 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from scikit-learn==0.22.2->simba-uw-tf) (0.16.0)\r\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf) (2.4.7)\r\nRequirement already satisfied: cycler>=0.10 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf) (0.10.0)\r\nRequirement already satisfied: kiwisolver>=1.0.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from matplotlib==3.0.3->simba-uw-tf) (1.2.0)\r\nRequirement already satisfied: jinja2 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from eli5==0.10.1->simba-uw-tf) (2.11.2)\r\nRequirement already satisfied: attrs>16.0.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from eli5==0.10.1->simba-uw-tf) (19.3.0)\r\nRequirement already satisfied: pytz>=2017.2 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from pandas==0.25.3->simba-uw-tf) (2020.1)\r\nRequirement already satisfied: PyWavelets>=0.4.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf) (1.1.1)\r\nRequirement already satisfied: cloudpickle>=0.2.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf) (1.5.0)\r\nRequirement already satisfied: networkx>=1.8 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf) (2.4)\r\nRequirement already satisfied: dask[array]>=1.0.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from scikit-image==0.14.2->simba-uw-tf) (2.23.0)\r\nRequirement already satisfied: werkzeug>=0.11.15 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow-gpu==1.14.0->simba-uw-tf) (1.0.1)\r\nRequirement already satisfied: markdown>=2.6.8 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow-gpu==1.14.0->simba-uw-tf) (3.2.2)\r\nRequirement already satisfied: pyzmq>=16 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorpack~=0.9.7.1->deeplabcut==2.0.9->simba-uw-tf) (19.0.2)\r\nRequirement already satisfied: msgpack>=0.5.2 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorpack~=0.9.7.1->deeplabcut==2.0.9->simba-uw-tf) (1.0.0)\r\nRequirement already satisfied: msgpack-numpy>=0.4.4.2 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorpack~=0.9.7.1->deeplabcut==2.0.9->simba-uw-tf) (0.4.6.1)\r\nRequirement already satisfied: psutil>=5 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tensorpack~=0.9.7.1->deeplabcut==2.0.9->simba-uw-tf) (5.7.2)\r\nRequirement already satisfied: decorator<5.0,>=4.0.2 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from moviepy~=0.2.3.5->deeplabcut==2.0.9->simba-uw-tf) (4.4.2)\r\nRequirement already satisfied: numexpr>=2.6.2 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from tables->deeplabcut==2.0.9->simba-uw-tf) (2.7.1)\r\nRequirement already satisfied: jedi>=0.10 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from ipython~=6.0.0->deeplabcut==2.0.9->simba-uw-tf) (0.17.2)\r\nRequirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from ipython~=6.0.0->deeplabcut==2.0.9->simba-uw-tf) (1.0.18)\r\nRequirement already satisfied: pickleshare in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from ipython~=6.0.0->deeplabcut==2.0.9->simba-uw-tf) (0.7.5)\r\nRequirement already satisfied: traitlets>=4.2 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from ipython~=6.0.0->deeplabcut==2.0.9->simba-uw-tf) (4.3.3)\r\nRequirement already satisfied: simplegeneric>0.8 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from ipython~=6.0.0->deeplabcut==2.0.9->simba-uw-tf) (0.8.1)\r\nRequirement already satisfied: pygments in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from ipython~=6.0.0->deeplabcut==2.0.9->simba-uw-tf) (2.6.1)\r\nRequirement already satisfied: pexpect; sys_platform != \"win32\" in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from ipython~=6.0.0->deeplabcut==2.0.9->simba-uw-tf) (4.8.0)\r\nRequirement already satisfied: idna<3,>=2.5 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from requests->deeplabcut==2.0.9->simba-uw-tf) (2.10)\r\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from requests->deeplabcut==2.0.9->simba-uw-tf) (1.25.10)\r\nRequirement already satisfied: ruamel.yaml.clib>=0.1.2; platform_python_implementation == \"CPython\" and python_version < \"3.9\" in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from ruamel.yaml~=0.15->deeplabcut==2.0.9->simba-uw-tf) (0.2.0)\r\nRequirement already satisfied: MarkupSafe>=0.23 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from jinja2->eli5==0.10.1->simba-uw-tf) (1.1.1)\r\nRequirement already satisfied: toolz>=0.8.2; extra == \"array\" in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from dask[array]>=1.0.0->scikit-image==0.14.2->simba-uw-tf) (0.10.0)\r\nRequirement already satisfied: importlib-metadata; python_version < \"3.8\" in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from markdown>=2.6.8->tensorboard<1.15.0,>=1.14.0->tensorflow-gpu==1.14.0->simba-uw-tf) (1.7.0)\r\nRequirement already satisfied: parso<0.8.0,>=0.7.0 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from jedi>=0.10->ipython~=6.0.0->deeplabcut==2.0.9->simba-uw-tf) (0.7.1)\r\nRequirement already satisfied: wcwidth in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython~=6.0.0->deeplabcut==2.0.9->simba-uw-tf) (0.2.5)\r\nRequirement already satisfied: ptyprocess>=0.5 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from pexpect; sys_platform != \"win32\"->ipython~=6.0.0->deeplabcut==2.0.9->simba-uw-tf) (0.6.0)\r\nRequirement already satisfied: zipp>=0.5 in ./anaconda3/envs/Simba-GPU/lib/python3.6/site-packages (from importlib-metadata; python_version < \"3.8\"->markdown>=2.6.8->tensorboard<1.15.0,>=1.14.0->tensorflow-gpu==1.14.0->simba-uw-tf) (3.1.0)\r\nInstalling collected packages: simba-uw-tf\r\nSuccessfully installed simba-uw-tf-1.2.19\r\n(Simba-GPU) yi@Precision-5820-Tower:~$ simba\r\nTraceback (most recent call last):\r\n File \"/home/yi/anaconda3/envs/Simba-GPU/bin/simba\", line 8, in \r\n sys.exit(main())\r\n File \"/home/yi/anaconda3/envs/Simba-GPU/lib/python3.6/site-packages/simba/SimBA.py\", line 5217, in main\r\n app = SplashScreen(root)\r\n File \"/home/yi/anaconda3/envs/Simba-GPU/lib/python3.6/site-packages/simba/SimBA.py\", line 5197, in __init__\r\n self.Splash()\r\n File \"/home/yi/anaconda3/envs/Simba-GPU/lib/python3.6/site-packages/simba/SimBA.py\", line 5202, in Splash\r\n self.image = Image.open(os.path.join(scriptdir,\"TheGoldenLab.png\"))\r\n File \"/home/yi/anaconda3/envs/Simba-GPU/lib/python3.6/site-packages/PIL/Image.py\", line 2634, in open\r\n fp = builtins.open(filename, \"rb\")\r\nFileNotFoundError: [Errno 2] No such file or directory: '/home/yi/anaconda3/envs/Simba-GPU/lib/python3.6/site-packages/simba/TheGoldenLab.png'\r\n", + "user": "flyegg", + "reaction_cnt": 0, + "created_at": "2020-08-21T00:15:14Z", + "updated_at": "2020-11-17T16:13:04Z", + "author": "flyegg", + "comments": [ + { + "body": "Hi @flyegg , check the filename of the PNG, in my case there was an error.", + "created_at": "2020-11-17T16:13:03Z", + "author": "benjaws" + } + ] + }, + { + "title": "Questions on behavioral analysis", + "body": "Hello! I'd like to start by saying that this program looks very exciting. \r\n\r\nI'm currently working on project wherein we're trying to determine naturalistic behavior of an absence seizure rat model, and I was wondering if this program would be compatible for our purposes. I have a series of questions.\r\n\r\n1: Are there constraints in terms of camera angle? I'm noticing that the examples provided show a top-down view, however our video perspective is side-view.\r\n2: Can this program determine naturalistic behaviors that are not predetermined?\r\n3: Does the program use dimensionality reduction such as t-SNE or UMAP?\r\n4: Are there any other limitations in the types of behaviors that can be detected? If we were to employ a reaching pellet retrieval task, for example, would this program be able to determine differences there?\r\n\r\nThank you!\r\nJuan Enrique Villacres\r\n\r\n", + "user": "jevillacres", + "reaction_cnt": 0, + "created_at": "2020-08-19T23:49:19Z", + "updated_at": "2020-08-23T01:56:08Z", + "author": "jevillacres", + "comments": [ + { + "body": "Hi @jevillacres!\r\n\r\n1.\tYes SimBA relies on accurate pose-estimation from either DeepLabCut, SLEAP, or DeepPoseKit, and SimBA does not take depth into account when generating features. Side-views can disrupt pose-estimation (protocols involving one or several freely moving animals allows scenarios where the bodyparts are completely hidden if cameras are side-view). Movements further away from the camera are also not the same real-life distances as movement close to the camera and SimBA does not take these differences into account. This is not so relevant for head-fixed reaching tasks though, but it is in freely moving protocols (e.g., I don’t know how your set-up looks like, but it may be difficult to cluster / classify different forms of seizures, when all you and the computer sees is the animals rear). \r\n2.\tNo, SimBA is about supervised machine learning for now and does not do clustering dimensionality/reduction.\r\n3.\tNo, SimBA only does supervised learning. I’d check out BSOID https://github.com/YttriLab/B-SOID, or VAME https://github.com/LINCellularNeuroscience/VAME, or VAE-SNE https://github.com/jgraving/vaesne and perhaps UMAP-learn https://umap-learn.readthedocs.io/en/latest/, HDBSCAN https://github.com/scikit-learn-contrib/hdbscan packages for feature calculations and clustering your pose-estimation data. \r\n4.\tSimBA can detect any behavior that you can reliably observe – the rule of thumb is if you can see it than you can train the classifier to see it. So if you are after new motifs, then the package links above would be the place to start. \r\n", + "created_at": "2020-08-20T17:23:52Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you for the insightful responses, I really appreciate it.", + "created_at": "2020-08-20T17:50:12Z", + "author": "jevillacres" + } + ] + }, + { + "title": "Extract Features", + "body": "Hi, \r\nAfter I created a new project, and attempted to \"Extract Features\", however, instead I received this message on anaconda shell.\r\n\r\nException in thread Thread-5:\r\nTraceback (most recent call last):\r\n File \"c:\\users\\jean\\anaconda3\\envs\\simba2\\lib\\threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"c:\\users\\jean\\anaconda3\\envs\\simba2\\lib\\threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"c:\\users\\jean\\anaconda3\\envs\\simba2\\lib\\site-packages\\simba\\SimBA.py\", line 4470, in extractfeatures\r\n extract_features_wotarget_8(self.projectconfigini)\r\n File \"c:\\users\\jean\\anaconda3\\envs\\simba2\\lib\\site-packages\\simba\\features_scripts\\extract_features_8bp.py\", line 60, in extract_features_wotarget_8\r\n fps = float(currVideoSettings['fps'])\r\n File \"c:\\users\\jean\\anaconda3\\envs\\simba2\\lib\\site-packages\\pandas\\core\\series.py\", line 131, in wrapper\r\n raise TypeError(\"cannot convert the series to \" \"{0}\".format(str(converter)))\r\nTypeError: cannot convert the series to \r\n\r\n\r\nI also got an error on SimBA GUI, so I checked video info on the file, but all seems correct.\r\n![image](https://user-images.githubusercontent.com/62602896/90171133-0993a200-dd6f-11ea-8a22-403b82d97984.png)\r\n\r\n\r\nOne thing I was concerning was that my laptop does not meet the minimum requirement you suggested. (spec is Windows10, 8GB RAM, & i5core). Does this error come due to the insufficient environment?\r\nIn addition, I ran this program without GPU, and I have downloaded simba-uw-no-tf.\r\n\r\nBest, \r\nDoyeon Jang", + "user": "DJang11", + "reaction_cnt": 0, + "created_at": "2020-08-13T18:16:48Z", + "updated_at": "2020-10-06T15:30:35Z", + "author": "DJang11", + "comments": [ + { + "body": "Hi @DJang11! \r\n\r\nCheck out this FAQ issue: https://github.com/sgoldenlab/simba/blob/SimBA_no_TF/docs/FAQ.md#when-i-try-to-execute-some-process-in-simba-eg-feature-extraction-or-generate-frames-or-videos-etc-i-get-a-typeerror-that-may-look-somthing-like-this\r\n\r\nTo understand if what might be happening, could you send me two screenshots, (1) of the content of your `project_filder/csv/outlier_corrected_movement_location` folder, and (2) the content of your `project_folder/logs/video_info.csv` file. That will help me a lot for understanding the issue. \r\n\r\nYour computer specs should not be an issue! (Might be a little slow at times, but that's it)\r\n\r\nThanks!\r\n\r\n\r\n", + "created_at": "2020-08-14T01:27:23Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "ROI that changes with every frame", + "body": "Thanks for the great tool, excited to start using SIMBA. It appears that currently only static ROI's are supported, am I correct? I'm interested in nest building and have been using a MASK-RCNN based approach to identify the changing shape of the nest in every frame of a video. The output is a Shapely polygon which I use to create a dataframe with the columns containing polygon x,y and each frame is a row (just like the output from DLC). Is there a way that I could import this continually evolving polygon feature as an ROI?\r\n\r\nThanks!", + "user": "blogeman", + "reaction_cnt": 0, + "created_at": "2020-08-10T16:29:59Z", + "updated_at": "2020-08-11T13:54:05Z", + "author": "blogeman", + "comments": [ + { + "body": "Hello @blogeman! - that's cool! And sure, it can be imported as a feature (but not through the GUI... )\r\n\r\nYou are correct, only static ROI's work. \r\n\r\nIn SimBA, once you have labelled your images for the behaviors of interest, there will be a CSV file for every annotated video in the `project_folder/csv/target_inserted` directory. SimBA will use all the columns (minus the annotations and pose-estimation body-part locations) as features. If you can append your mask information as one or several additional columns within these files, then those columns will also be used used, automatically, in SimBA, when creating the classifiers.\r\n\r\nI think the mask vertices can be represented as lists of various lengths, and it wouldn't work if you create, for example, two new columns in the `project_folder/csv/target_inserted` files that each contain a list for every frame (the x, y cords of all the vertices). I don't know if it would be relevant, but if you instead had single values that somehow represent you nest mask features it would work nicely for behavioral predictions (i.e., nest_area mm2 or pixels). \r\n\r\nI don't know what you are looking to score but if you are only interested in if the animal body-part(s) are inside your mask rcnn region, then SimBA can't help at the moment, the code doesn't update the ROI coordinates for new frames, it assumes it is static. If that's what you are interested in you can check line 131-136 in ROI_analysis_2.py in SimBA to see how I did it - shapely has a function to check if a coordinate is in a polygon but my code only reads in the vertices once. \r\n\r\n```\r\npointList = []\r\nfor point in vertices:\r\n pointList.append(geometry.Point(point))\r\npolyGon = geometry.Polygon([[p.x, p.y] for p in pointList])\r\nCurrPoint = Point(int(currentPoints[bodyparts]), int(currentPoints[bodyparts+arrayIndex]))\r\npolyGonStatus = (polyGon.contains(CurrPoint))\r\n```\r\n", + "created_at": "2020-08-10T22:54:04Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks for the help @sgoldenlab. I'll try using the total area of the polygon as an appended feature like you suggest. The behavior of nest building should have a high permutation importance to the total nesting material area as the animal collects the material and localizes it into a single nest (aka total area decreases as the nest is built).", + "created_at": "2020-08-11T13:54:05Z", + "author": "blogeman" + } + ] + }, + { + "title": "extract_features error: input is less than 2-dimensional since it has the same x coordinate\\n\\n while executing", + "body": "Hi! Great tool!\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nWhen I Click on 'extract_features' after the outlier correction and an error throwed out.\r\n\r\n**Describe the bug**\r\ninput is less than 2-dimensional since it has the same x coordinate\\n\\n while executing\r\n\r\n**Screenshots**\r\n results[i] = self.f(v)\r\n File \"c:\\programdata\\anaconda3\\envs\\simba-tf\\lib\\site-packages\\simba\\features_scripts\\extract_features_16bp.py\", line 97, in \r\n [x['Center_1_x'], x[\"Center_1_y\"]]])).area, axis=1)\r\n File \"qhull.pyx\", line 2335, in scipy.spatial.qhull.ConvexHull.__init__\r\n File \"qhull.pyx\", line 354, in scipy.spatial.qhull._Qhull.__init__\r\nscipy.spatial.qhull.QhullError: ('QH6013 qhull input error: input is less than 2-dimensional since it has the same x coordinate\\n\\nWhile executing: | qhull i Qt\\nOptions selected for Qhull 2015.2.r 2016/01/18:\\n run-id 117636664 incidence Qtriangulate _pre-merge _zero-centrum\\n _max-width 0 Error-roundoff 0 _one-merge 0 _near-inside 0\\n Visible-distance 0 U-coplanar-distance 0 Width-outside 0 _wide-facet 0\\n', 'occurred at index 0')\r\n\r\n**Desktop (please complete the following information):**\r\n - Windows\r\n - Python Version, 3.6.0\r\n - Are you using anaconda? yes\r\n\r\nDo you have any idea what causes the problem? Maybe I need fix the coordinate? \r\n\r\nThanks a lot!\r\n", + "user": "neugun", + "reaction_cnt": 0, + "created_at": "2020-08-07T08:39:41Z", + "updated_at": "2020-08-08T01:38:24Z", + "author": "neugun", + "comments": [ + { + "body": "Hi @neugun!\r\n\r\nYes - check out this prior error report: https://github.com/sgoldenlab/simba/issues/42#issuecomment-653085571\r\n\r\nAnd the 2nd FAQ entry here: https://github.com/sgoldenlab/simba/blob/SimBA_no_TF/docs/FAQ.md\r\n\r\nIn short, this can happen when there are no animals, or a single animal, in the image, and all the body-parts are placed on a single x,y coordinate during pose-estimation. When SimBA then tries to calculate the hull of the animal to estimate its size, it fails because the animal is not represented in 2D. If you look into either the outlier correction and/or video pre-processing, you should be able to fix it. \r\n\r\n\r\n\r\n", + "created_at": "2020-08-07T14:28:32Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you for the reply!", + "created_at": "2020-08-08T01:38:24Z", + "author": "neugun" + } + ] + }, + { + "title": "Problem of switching identities. ", + "body": "Hello again ;)\r\n\r\nI improved my model significantly I guess (increasing the labeled frames, the number of iterations on dlc and using another camera to increase the resolution - I have now a 640x480 video).\r\n\r\nThe problem I have now appear when I play the ROI tracking video. \r\nThe identity dot (back body part) regularly switches from one animal to another when mice are close.\r\nHere an illustration of a sequence when it switches: \r\n\r\n![id1](https://user-images.githubusercontent.com/66886884/89524081-36cad800-d7e4-11ea-9eb4-6757518580f7.png)\r\n\r\n\r\n![id2](https://user-images.githubusercontent.com/66886884/89524090-3af6f580-d7e4-11ea-9f7b-7d34c27f6b5a.png)\r\n\r\nWe shaved a spot on the back on one mouse to avoid this problem, but it did not fix it.\r\nHave you any suggestion to solve this issue? Do you think increasing the shaving or adding extra physical distinctions can help?\r\n\r\nThank you,\r\nBest\r\nDorian", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2020-08-06T10:57:25Z", + "updated_at": "2020-08-09T15:57:53Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi Dorian - I know these issues rather well, but I say try the DLC gitter or dlc issue tracker for this problem - they will have a far better answer than me for their software. \r\n\r\nGenerally, ID switches are still an issue for me when I use maDLC and have videos with a lot of occlusion (e.g. a lot of heavy fighting between animals). When animals are more friendly with each other, the idea is that you should be able to solve the switches with a combination of (i) a lot accurate annotations, (ii) a well connected skeleton (connect nearly all bodyparts), (iii) correcting any identity switches using their in-built interactive tracklets tool and feed it back into the model. ", + "created_at": "2020-08-06T15:08:04Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab,\r\n\r\nThank you for your answer and your suggestions. I also wrote a post on the dlc GitHub. \r\nI'm working on territoriality, so fights are an important part of the behaviors I would like to score. I hope that increasing the shaving will improve the reliability of the tracking. \r\n\r\nBest, \r\nDorian ", + "created_at": "2020-08-07T07:53:30Z", + "author": "DorianBattivelli" + }, + { + "body": "Hi @DorianBattivelli - yes we are facing similar issues so far with maDLC, and I've spent a fair bit of time on it (and we've tried shavings, but not extensively) - and we've yet to get it to work satisfactory when animals are fighting a lot. The solution I think, is the tracklets interface (but it does not work well in Windows, at least in the 2.2b5/ 2.2b6 versions).", + "created_at": "2020-08-07T15:20:44Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Error in Extract Features Rolling Filter", + "body": "**Describe the bug**\r\nI'm doing a test run using an hour long mp4 file and DLC labels for 1 animal, 8 body parts. Extract Features appears to be working until it hits \"Calculating rolling windows: medians, medians, and sums...\" at which I get the following error: \r\n\r\n[\r\n![Simba error](https://user-images.githubusercontent.com/43044612/88926680-f1366a00-d23b-11ea-94cc-71ca0008c952.jpg)\r\n](url)\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Python Version: 3.6.2\r\n - Are you using anaconda? Yes\r\n", + "user": "lucytian828", + "reaction_cnt": 0, + "created_at": "2020-07-30T13:10:01Z", + "updated_at": "2020-07-31T19:25:51Z", + "author": "lucytian828", + "comments": [ + { + "body": "Hi @lucytian828 - the first thing that comes to mind is the fps of your video. Is it less than 15 fps? SimBA tries to divide the frames into rolling bins, with the smallest being 66ms, (fps / 15) if you can't do this (for example a single frame is longer than 66ms) this would happen. If that's the case we can modify the feature extraction script. ", + "created_at": "2020-07-30T14:32:42Z", + "author": "sgoldenlab" + }, + { + "body": "That would be the issue. The fps is slightly less than 15.", + "created_at": "2020-07-30T14:35:14Z", + "author": "lucytian828" + }, + { + "body": "@lucytian828 - there's currently no way of modifying the rolling window sized directly in the `project_config.ini` but I will include it in the future. \r\n\r\nTo get rid of, or add/change rolling window sized navigate to SimBAs feature extraction scripts. On my computer they are in this path but it will be slightly different for you as you are using Anaconda: `C:\\Python36\\Lib\\site-packages\\simba\\features_scripts`.\r\n\r\nNext, open the feature extraction script you are using. One animal and 8bps would be the `extract_features_8bp.py` script. The critical line you need to edit is line 34, and specifically the last value in this list:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88941929-73c02900-d23e-11ea-9d87-497e0d104752.png)\r\n\r\nCurrently it says 15, which means it tries to divide your fps with 15 - and it returns a value less than 1. Try changing it to 14 and let me know how it goes. \r\n\r\n\r\n\r\n", + "created_at": "2020-07-30T15:28:25Z", + "author": "sgoldenlab" + }, + { + "body": "That did the trick, thanks!", + "created_at": "2020-07-31T19:25:51Z", + "author": "lucytian828" + } + ] + }, + { + "title": "Simba 1.2 Error annotating/labeling behaviors in larval zebrafish", + "body": "SIMBA v1.2 error while trying to label behaviors in larval zebrafish (TL= 100px = 5mm) in a 30x30mm arena (1024x1024px videos). Seven body part labeled (two eyes, swim bladder, four along tail)\r\n\r\nSuccessful steps:\r\nProject ini file created and loaded, \r\n20 Videos loaded top video folder, \r\nDLC csv files imported to csv/input_csv/original filename folder\r\nVideo parameters adjusted\r\nOutlier correction settings of 0.7 (movement) and 1.5 (location) attempted but main screen window/log says:\r\nProcessing 0 files for location outliers...\r\nLog for corrected \"location outliers\" saved in project_folder/logs\r\nOutlier correction complete.\r\nROIs drawn\r\nFeatures (6000 png frames) extracted to 20 folders (one per video) \r\n\r\nError occurs when Labeling behaviors: \r\nvideo frames loaded and displayed in GUI window, \r\nrheotaxis behaviors check box selected, \r\nframe navigation works, \r\nframe range selected (e.g. 3280-5005) and click save and advance to next frame:\r\nAnnotated behavior: Rheotaxis. Start frame: 3280. End frame: 5005 \r\n**Click “generate / save csv” button gives the following file not found error:**\r\n\r\n(SIMBA) C:\\Users\\Fish_Behavior>simba\r\nwarning: Error opening file (/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:901)\r\nwarning: C:/Users/Fish_Behavior/Desktop/SIMBA/1LZF_model/1LZF_model_07_28/project_folder\\videos\\0 (/build/opencv/modules/videoio/src/cap_ffmpeg_impl.hpp:902)\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 3389, in \r\n button_setscale = Button(label_setscale,text='Set video parameters',command=lambda:video_info_table(self.projectconfigini))\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 1050, in __init__\r\n self.getdata()\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 1100, in getdata\r\n self.button = Button_getcoord(self.xscrollbar, self.data_lists[1], self.data_lists[5],self.pixel_list)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\simba\\SimBA.py\", line 877, in __init__\r\n self.ppmvar[i].set(ppmlist[i])\r\nIndexError: list index out of range\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\simba\\labelling_aggression.py\", line 135, in \r\n self.generate = Button(self.window, text=\"Generate / Save csv\", command=lambda: save_video(self.window))\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\simba\\labelling_aggression.py\", line 474, in save_video\r\n data = pd.read_csv(input_file)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 685, in parser_f\r\n return _read(filepath_or_buffer, kwds)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 457, in _read\r\n parser = TextFileReader(fp_or_buf, **kwds)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 895, in __init__\r\n self._make_engine(self.engine)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 1135, in _make_engine\r\n self._engine = CParserWrapper(self.f, **self.options)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 1917, in __init__\r\n self._reader = parsers.TextReader(src, **kwds)\r\n File \"pandas/_libs/parsers.pyx\", line 382, in pandas._libs.parsers.TextReader.__cinit__\r\n File \"pandas/_libs/parsers.pyx\", line 689, in pandas._libs.parsers.TextReader._setup_parser_source\r\nFileNotFoundError: [Errno 2] File b'C:\\\\Users\\\\Fish_Behavior\\\\Desktop\\\\SIMBA\\\\1LZF_model\\\\1LZF_model_07_28\\\\project_folder\\\\csv\\\\features_extracted\\\\\\\\20200318_AB_7dpf_ctl.csv' does not exist: b'C:\\\\Users\\\\Fish_Behavior\\\\Desktop\\\\SIMBA\\\\1LZF_model\\\\1LZF_model_07_28\\\\project_folder\\\\csv\\\\features_extracted\\\\\\\\20200318_AB_7dpf_ctl.csv'\r\n\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to '...'\r\n2. Click on '....'\r\n3. Scroll down to '....'\r\n4. See error\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Python Version [e.g. 3.6.0]\r\n - Are you using anaconda?\r\n \r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n", + "user": "kylecnewton", + "reaction_cnt": 0, + "created_at": "2020-07-29T18:36:20Z", + "updated_at": "2020-08-28T22:20:53Z", + "author": "kylecnewton", + "comments": [ + { + "body": "Hi @kylecnewton ! Sorry about this but I think we can figure this out, the last lines give a hint and I was able to replicate your error:\r\n\r\n**b'C:\\Users\\Fish_Behavior\\Desktop\\SIMBA\\1LZF_model\\1LZF_model_07_28\\project_folder\\csv\\features_extracted\\\\20200318_AB_7dpf_ctl.csv' does not exist:** \r\n\r\nSimBA is looking for a CSV file in your `project_folder/csv/features_extracted` directory with the same name as your directory containing the frames you are labelling. So if you are labelling frames in a folder called `20200318_AB_7dpf_ctl`, then there should be a CSV file with features in this path : `project_folder/csv/features_extracted/20200318_AB_7dpf_ctl.csv`. \r\n\r\nYou mention that you have extracted the features, but is it possible that the names don't match up? (e.g., the video frame folder has been renamed somewhere along the process?) \r\n\r\nI will make sure SimBA prints a statements that is more informative. ", + "created_at": "2020-07-29T20:31:39Z", + "author": "sgoldenlab" + }, + { + "body": "Hi Sam,\r\n\r\nI have my extracted frames in C:\\Users\\Fish_Behavior\\Desktop\\SIMBA\\1LZF_model\\1LZF_model_07_28\\project_folder\\frames\\20200318_AB_7dpf_ctl\r\nbut there is no corresponding CSV file in the csv\\features_extracted folder. This is the case for all 20 videos: frames extracted but no csv file. hmm\r\n\r\nI did notice one thing: I analyzed these videos using an old DLC model that had 5 fish body part labels instead of the 7 I defined when I created the Simba project config file. Furthermore, the labels for the 5 body parts in that DLC csv file are not exactly the same as those in the Simba config file. I assume that the number and names of the labeled body parts in DLC model must match those in Simba, correct?\r\n\r\nthanks for your help. BTW I love the detail of the step by step tutorials on GitHub - infinitely better than DLC ;)\r\n", + "created_at": "2020-07-30T19:20:43Z", + "author": "kylecnewton" + }, + { + "body": "Hey @kylecnewton - to be honest, I have not explored how tracking one set of body-parts and defining another in SimBA affects the workflow, but I'd go back, create a new project, and specifying the correct number of body-parts and order at project creation. \r\n\r\nWhat happens is when you create a project and specify the number of body-parts you have in SimBA, is that you also tell SimBA about how many columns to expect when reading in your pose-estimation files at all different steps. If SimBA founds more, or less columns in your files you are likely to hit errors. The number of body-parts that your project have also dictates how many and which features should be created. \r\n\r\nTo save your annotated images you first have to generate the features. After correcting the outliers (or pressing skip outlier correction in your case), you should click this button and it should take care of it. Let me know if it works!\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88988345-801fa280-d28d-11ea-8d5d-d316d0b76afc.png)\r\n\r\nAnd thanks! That warms!! :) \r\n\r\n\r\n", + "created_at": "2020-07-31T00:55:31Z", + "author": "sgoldenlab" + }, + { + "body": "Hey @sgoldenlab , it worked!!! Here is the breakdown of what I have discovered so far:\r\n1- I had to start over a few times to make sure I did everything exactly as in the tutorial, this included running a new DLCma 2.2b6 model on 1 fish\r\n2- created a new Simba project config with user defined pose config (same 7 exact body parts and labels as DLCma model) and selected multi-animal tracking (very important).\r\n3 - imported videos and DLCma h5 (obviously csv doesn't work)\r\n4 - set video parameters, corrected outliers, set ROI, extracted features and currently labeling behaviors.\r\n\r\nI have two questions about outlier correction: First, I chose the left eye and swim bladder for the body parts as they are the most obvious and reliable because the four tail labels are often off. My concern is that these structures are close together (~20px or 1mm apart) and I was wondering if I should choose the eye and tip of the tail (~120px or 5mm apart) even though the tail is tougher to track? \r\n\r\nSecond, I chose movement and location corrections of 0.7 and 1.5, respectively (it was in the bioRx pre print). Given the small size of the animals (5mm larvae in a 30x30mm arena, filmed at 1024x1024 resolution) does this seem reasonable?\r\n\r\nA suggestion about the behavior labeling GUI: the navigation hotkeys are very counter-intuitive (o,e,w,t,s,x). Is there any way to use the actual arrow keys for single frame advance, alt+arrow for 10 frames, tab+arrow for 1 sec? This physical arrangement would make it easier to navigate video without having to constantly refer to the legend for hotkeys spread out across the keyboard. Or you could group all the reverse keys together and the forward keys together but mirroring each other in space and function: w = -10 fr, e = -2 fr , r = -1sec, then u = +1sec, i =+2fr, o = +10fr. I'm not sure if this is feasible but something like this might speed up the labeling process.\r\n\r\nthanks again for all the help. I will keep you posted on the progress and let me know if you would like any files to add to the OSF database.\r\n", + "created_at": "2020-07-31T21:13:37Z", + "author": "kylecnewton" + }, + { + "body": "Hi @kylecnewton - how does your tracking look pre-outlier correction? Currently, the outlier correction tools struggle when users have user-defined body-parts + multi animal tracking. I have written a fix but the main scripts are not yet calling this new outlier correction function. In your case you only have one animal as I understand, so it will be fine, but as you introduce more animals you might hit an error. I hope to get this in towards end of August..\r\n\r\nFor the body-parts I don't have a good answer, I think you need to try both ways. The tail end seems more intuitive but it will be no use if it's frequently and massively off. \r\n\r\nI updated Simba with your suggested keyboard shortcuts (w,e,r and (u,i,o) - you should see it if you upgrade Simba to 1.2.11. The modifier keys can be problematic in OpenCV which it runs on, otherwise I would have gone for that suggestion.\r\n\r\nThanks!\r\n\r\n", + "created_at": "2020-08-01T01:28:25Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab, I seem to recall the tracking looking good out of DLCma. I used the original bx.h5 files instead of a bx.filtered.h5 files because I wanted to see how the Simba models performed. I suppose I could use the filtered data to see if the tail markers are more stable and reliable. Unfortunately, it is not always clear (to me) how the various GUI options in DLC can truly optimize tracking results. However, the models generated by the new DLCma software for a single fish are FAR SUPERIOR to the tracking generated by previous software versions. Once I optimize the tracking and behavioral classification results for one fish, I plan to add in videos with 2+ fish to see how well everything holds up.\r\n\r\nSpeaking of behaviors, my first SiMBA model only had one behavior (positive rheotaxis = orienting and swimming into water flow). Should I have created two or more behaviors to give Simba a \"choice\" for the trees or is one behavior classification enough to begin learning the software? I assumed one behavior could be a yes/no sort of decision tree.\r\n\r\nWow, thanks for incorporating my suggestions. I am excited to see where all of this ML stuff goes and how I can implement it into ethological studies with wildlife in more natural settings. I like your idea of making all the classifiers open access so that we can compare apples to apples across studies. This is part of my motivation to create a \"universal\" larval zebrafish rheotaxis assay with a fundamental appeal to biomed and wildlife/eco researchers.\r\n", + "created_at": "2020-08-01T19:20:14Z", + "author": "kylecnewton" + }, + { + "body": "Hi @kylecnewton - sounds good - I think one of the main path to improve multi-animal tracking in the maDLC interface is the 'tracklets' interface - https://www.youtube.com/watch?v=bEuBKB7eqmk - it comes with some caveats at the moment - the hot-keys doesn't work in Windows environment (it is related to why I didn't implement the modifying keys you suggested: there are some Windows/Linux clashes in how keystrokes are interpreted in OpenCV) so I haven't been able to explore it much, but I think they will sort it soon and multi-animal tracking will become easier. If you have Linux machine you should be good to go though. \r\n\r\nOne classifier is good enough - it is boolean yes/no - but, SimBA will give you a probability value that the behavior is occurring in each frame, and you titrate the `discrimination threshold` to get the yes/no decision division exactly right: https://github.com/sgoldenlab/simba/blob/master/docs/Scenario1.md#critical-validation-step-before-running-machine-model-on-new-data. When sharing your classifiers with other using slightly different recording set-ups / video formats / camera angles, shifting this discrimination threshold can often be enough to get it working and validated in the new setting. ", + "created_at": "2020-08-02T01:32:48Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab ok now it makes sense why the DLC tracklet GUIs are so glitchy to navigate on Windows.\r\n\r\nre: Run Machine model/validation - this is my initial attempt to validate the model using a feature extracted video not used in training. As you can see, there are not any nice peaks in the data. I am only looking at positive rheotaxis, or swimming into oncoming flow. The water is flowing top to bottom, so I am just looking for the fish to swim up in the frames. IN the screenshots, I show frames where the fish is performing rheotaxis and where it is not but the models classified it incorrectly (e.g frame 1900). Any thoughts about how I can improve this? More annotated videos? Use filtered DLCma data? Use bodyparts spaced further apart for outlier correction?\r\n\r\nI also included a screen shot of the DLC output video showing the 7 landmarks in the raw h5 data files. On further examination, the second landmark from the swimbladder toward the end of the tail seems stable so I will try to use that for outlier correction.\r\n\r\nThanks so much for all the help!\r\n\r\n\"Validation_plot\"\r\n\r\n\"Not_rheotaxis\"\r\n\r\n\"Rheotaxis\"\r\n\r\n\"Raw_track_jittery\"\r\n", + "created_at": "2020-08-03T22:04:54Z", + "author": "kylecnewton" + }, + { + "body": "Hi @kylecnewton - adding annotations is usually a very good approach but perhaps it's possible to be a bit more surgical than that... \r\n\r\nThe random forest seems to pick up a near on/off and something happens in this frame - is it possible to look at the video and see what is happening? \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/89241564-1e747680-d5b4-11ea-8729-17019468d256.png)\r\n\r\nMy first thought is that you have appended the ROIs as features (I think you mentioned you had earlier?), and the classifier has picked up a relationship between one or more of the the ROI's and your annotations (e.g., most annotations for the animal swimming upwards happened to be when the animal is a certain distance away from, or more likely inside, a certain ROI. If you did not append ROI data I have to think a little more and it would be helpful to know what happens at about 3200 frames - removing the ROI features could help. \r\n\r\nMy fish experience is VERY(!) limited and I don't know positive rheotaxis looks like :) will the flow always come from the top or can it sometimes come from other angles? If it is the case that it's always from, we can insert a couple of lines in the feature extraction script: check the movements of all body-parts along Y normalized to body-part movements along X, this would come close to the answer I think? We do a version of it to get to tail rattling in the mouse (tail-end movement normalized to centroid and tail-base movement) and may work here too but I don't know if your behavior is as simple as that. \r\n\r\n\r\n\r\n", + "created_at": "2020-08-04T01:24:45Z", + "author": "sgoldenlab" + }, + { + "body": "HI @sgoldenlab, Yes I did append the ROI settings. I guess I assumed it was part of the linear workflow going from tab to tab in the GUI. How do I remove the connection? Do I hit the reset button on the ROI table or remove the ROI entries in the project_config.ini?\r\n\r\nPositive rheotaxis is basically swimming upstream. The fish orients toward the source of the stimulus, which in this assay always moves from top to bottom. Negative rheotaxis would be swimming with the flow of water, or downstream.\r\n\r\nI am in the middle of retraining the model on the filtered H5 files and with 100K rf N estimators (I wasn't sure if 100K was better than 2K, so I am experimenting). Once this is done, should I remove the ROIs, re-extract features, and retrain the model with 2K estimators? I can also add more annotated videos. I assume this will be an iterative process :) ", + "created_at": "2020-08-04T18:04:14Z", + "author": "kylecnewton" + }, + { + "body": "Hi @kylecnewton - first I'm sorry that I hadn't made this clear enough in the documentation / GUI - I will insert a sentence in the GUI next to the `Append ROI data to features` to emphasize caution, that this is optional, and in fact, should be avoided in most cases. It's only relevant when the behavior you are classifying has a strong relevant spatial component. \r\n\r\nFor removing the ROI features from your dataset, I don't have a function at the moment - and it will have to be done by hand - how many videos did you annotate? I hope not too many yet.. \r\n\r\nIn your `project_folder/csv/targets_inserted` directory, you will see the csv files that are used to build your model. Each of these files contain your annotations, all the pose estimation data, and your annotations, and ROI features. The ROI features are right towards the end, and are named according to what you named them, for example: \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/89364674-ce66e400-d687-11ea-92ed-27a7e7ea6566.png)\r\n\r\nGo ahead and delete these columns in all the files in the `project_folder/csv/targets_inserted` directory, and then re-generate your classifiers using the `Train Machine Model` tab. This way SimBA will focus on swim speed and body-part movements, rtather than pick up some spurious relationships between where in the arena your animal is and the annotations. \r\n\r\nIt is an \"iterative process\" forever :) but I think that you will be able to get a classifier that performs well for your behavior relatively quickly. PS. The number of estimators, in my experience, rather quickly reaches ceiling and 2k can often be overkill in our settings.\r\n\r\n", + "created_at": "2020-08-05T02:29:25Z", + "author": "sgoldenlab" + }, + { + "body": "HI @sgoldenlab, I looked at the CSV files you mentioned and it appears that I did not append the ROI data to the features. Out of curiousity, I looked at the project_config.ini and noticed this: \r\n\r\n[ROI settings]\r\nanimal_1_bp = Fish1_SwimBladder\r\nanimal_2_bp = No body parts\r\ndirectionality_data = \r\nvisualize_feature_data = \r\nno_of_animals = 1\r\n\r\nShould I alter this or leave it alone? Other than this, it seems that I need to add more annotate more videos? I have used 20 so far: 19 for training and 1 for validation. I can add another 20 easily.\r\n\r\n ", + "created_at": "2020-08-05T18:50:57Z", + "author": "kylecnewton" + }, + { + "body": "@sgoldenlab - one thing I forgot to ask: my videos are huge (30sec x 200fps=6000 frames = 1.6GB each for AVI) and the extracted frames take up ~100GB or so. Is there any point when I can remove those frames and free up some space? My original files are MOV and much smaller but the conversion to AVI or MP4 doubled the SDD space needed. Any plans to use the MOV wrapper?\r\n\r\nin the future I will be shooting at 60fps based on your advice from the workshop - i.e. the behavior is discernible to the naked eye so no need to over sample. ", + "created_at": "2020-08-05T19:39:39Z", + "author": "kylecnewton" + }, + { + "body": "Ouch 200 is a lot! I What is your resolution like? \r\n\r\nI don't know how far down you can go with fps, but yes, the frames take up a lot of space. The only thing they are really useful for nowadays in SimBA is the human annotation labeling - and I will look to get rid of it when time allows and pull sraight from the videos, together with getting all functions working with MOV. With the mice, we work with 30fps and there is a tool to downsample fps in SimBA and I'd recommend using it prior to pose-estimation: \r\n![image](https://user-images.githubusercontent.com/50497030/89458240-901b0480-d71b-11ea-9113-031b855fdb1a.png)\r\n\r\n\r\n", + "created_at": "2020-08-05T20:02:46Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab the resolution is 1024x1024. I am converting new videos to avi and downsampling to 60fps as we speak. Then I will re-run them thru DLCma and add them to the SImba model. Hopefully this will be the solution.\r\n\r\nSo if I understand you correctly, I can move the frames for the previous 19 videos that I used for annotation and training the model to a storage HDD? ", + "created_at": "2020-08-05T21:01:42Z", + "author": "kylecnewton" + }, + { + "body": "@sgoldenlab \r\n\r\nFYI: when I try to downsample a batch of MOV videos in a folder, I get an error but if I do them individually it works. \r\n\r\nffmpeg version git-2020-06-26-7447045 Copyright (c) 2000-2020 the FFmpeg developers\r\n built with gcc 9.3.1 (GCC) 20200621\r\n configuration: --enable-gpl --enable-version3 --enable-sdl2 --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libdav1d --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libsrt --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libzimg --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvmaf --enable-libvorbis --enable-libvo-amrwbenc --enable-libmysofa --enable-libspeex --enable-libxvid --enable-libaom --enable-libgsm --disable-w32threads --enable-libmfx --enable-ffnvcodec --enable-cuda-llvm --enable-cuvid --enable-d3d11va --enable-nvenc --enable-nvdec --enable-dxva2 --enable-avisynth --enable-libopenmpt --enable-amf\r\n libavutil 56. 55.100 / 56. 55.100\r\n libavcodec 58. 93.100 / 58. 93.100\r\n libavformat 58. 47.100 / 58. 47.100\r\n libavdevice 58. 11.100 / 58. 11.100\r\n libavfilter 7. 86.100 / 7. 86.100\r\n libswscale 5. 8.100 / 5. 8.100\r\n libswresample 3. 8.100 / 3. 8.100\r\n libpostproc 55. 8.100 / 55. 8.100\r\nC:/Users/Fish_Behavior/Desktop/SIMBA/1LZF_DLCma_model/project_folder/videos/New: No such file or directory", + "created_at": "2020-08-05T21:20:22Z", + "author": "kylecnewton" + }, + { + "body": "@sgoldenlab, oops never mind - I had spaces in the folder name ", + "created_at": "2020-08-05T21:28:33Z", + "author": "kylecnewton" + }, + { + "body": "Hi @sgoldenlab, okay I have another error :/ I feel like I owe you authorship at this point ;)\r\n\r\nThis time I was able to import 19 more videos and h5 files for additional annotation then run the outlier correction. Simba generated the new movement and location csv files but gave me this error:\r\n\r\nError: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file \r\n\r\nI checked the file and the 19 new videos were not listed. I appended the new video file names to the video_info.csv and was able to load a new video to annotate the rheotaxis behaviors. Then I got the following error because the 19 new videos did not have the extracted features csv files generated. The old files are present in project_folder/csv/features_extracted. \r\n\r\nThanks again!\r\n\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\simba\\labelling_aggression.py\", line 135, in \r\n self.generate = Button(self.window, text=\"Generate / Save csv\", command=lambda: save_video(self.window))\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\simba\\labelling_aggression.py\", line 474, in save_video\r\n data = pd.read_csv(input_file)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 685, in parser_f\r\n return _read(filepath_or_buffer, kwds)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 457, in _read\r\n parser = TextFileReader(fp_or_buf, **kwds)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 895, in __init__\r\n self._make_engine(self.engine)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 1135, in _make_engine\r\n self._engine = CParserWrapper(self.f, **self.options)\r\n File \"c:\\users\\fish_behavior\\.conda\\envs\\simba\\lib\\site-packages\\pandas\\io\\parsers.py\", line 1917, in __init__\r\n self._reader = parsers.TextReader(src, **kwds)\r\n File \"pandas/_libs/parsers.pyx\", line 382, in pandas._libs.parsers.TextReader.__cinit__\r\n File \"pandas/_libs/parsers.pyx\", line 689, in pandas._libs.parsers.TextReader._setup_parser_source\r\nFileNotFoundError: [Errno 2] File b'C:\\\\Users\\\\Fish_Behavior\\\\Desktop\\\\SIMBA\\\\1LZF_DLCma_model\\\\project_folder\\\\csv\\\\features_extracted\\\\\\\\fps_30_20200317_AB_6dpf_ctl.csv' does not exist: b'C:\\\\Users\\\\Fish_Behavior\\\\Desktop\\\\SIMBA\\\\1LZF_DLCma_model\\\\project_folder\\\\csv\\\\features_extracted\\\\\\\\fps_30_20200317_AB_6dpf_ctl.csv'", + "created_at": "2020-08-06T22:36:31Z", + "author": "kylecnewton" + }, + { + "body": "Hi @kylecnewton - no problem at all! This should be an easy one - however, SimBA-related questions are coming thicker and faster and I may not be able to answer very fast, especially if it's more complex. \r\n\r\nI good indication of whats going on is in this tutorial: https://github.com/sgoldenlab/simba/blob/master/docs/Scenario4.md\r\n\r\nWhat has happened is that you have added more videos, you need to extract the features for these files too before you start annotating these new videos. When you press `Extract features`, SimBA looks in your `project_folder/csv/outlier_corrected_movement_location` and process each csv in this folder, and saves the file in the `project_folder/csv/features_extracted` directory. \r\n\r\nTo extract the features for your new files, and no other files, I would look in your `project_folder/csv/outlier_corrected_movement_location` folder, and remove the files you have already processed features for (perhaps create a new folder called `temp` and move your already processed files in there: that way, SimBA can no longer see them and won't process them again). Then open your project in SimBA, load your project, and press `Extract features`. Your new features files should appear in your `features_extracted` folder. Let me know how that goes!\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2020-08-07T02:22:31Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab, your solution is working perfectly - thanks for the help.\r\n\r\nSome GUI observations that might help others: \r\n\r\n1-After loading the project and clicking the \"further imports\" tab, the physical placement of \"import further tracking data\" above import further videos\" implies that tracking data should be imported first, then videos. I suggest swapping their placement to ensure people have a logical top to bottom workflow. This would also mimic the placement within the GUI of importing videos then data when you originally created the project file.\r\n\r\n2- When labeling behaviors, I like using the arrow buttons and jump advance below the frame display. Clicking on a single frame advance is nice when precisely labeling, but the immediate proximity of the single frame advance to the first/last frame buttons makes it easy to accidentally go the the beginning/end of the video. Then I have to hunt through 6000 frames to find where I was unless I remember the frame number. My suggestion is to make the buttons a little bigger and separate them a bit so that folks don't get themselves lost. Maybe the buttons are small because I am using 4K monitors? \r\n\r\nJust my $0.02.\r\n\r\n", + "created_at": "2020-08-07T17:36:25Z", + "author": "kylecnewton" + }, + { + "body": "Hi @sgoldenlab, is there any way to use the DLC skeleton data and angle of orientation of the body segments (say from the swim bladder to the eyes) to filter out false positives? The model has changed due to adding new videos but it is still misidentifying negative rheotaxis (swimming with the flow) as positive rheotaxis (swimming against the flow). \r\n\r\nRecall that the videos are shot from above and the flow goes from top to bottom, so positive rheotaxis is swimming with a \"northward\", \"upward\", or 12 o'clock orientation. DLC defines a northern oriented body segment as both 0 and 360 degrees (a full circle), then the acceptable range of rheotactic orientation angles would be from 0-45 degrees and 325-360 degrees. \r\n\r\nDoes that make sense?\r\n", + "created_at": "2020-08-07T22:26:20Z", + "author": "kylecnewton" + }, + { + "body": "Thanks @kylecnewton - put in your suggested updates, 5 pixels between the buttons and reversed the order of the menues. If you update SimBA you should see it. \r\n\r\nYes, I think you are right - your behavior is very \"spatial\" - but in a way that it is not handled well either by the default feature scripts, or by the inclusion static ROIs. The feature you mention, and the features I mentioned earlier, should be enough to get to the classifications you need. \r\n\r\nI think the only way to handle it is to use this function in the development wheel of SimBA (`pip install simba-uw-tf-dev`, where you apply your own feature extraction script that calculates the metrics you need):\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/89716147-736f0e80-d95f-11ea-8c1c-c78b51d1516e.png)\r\n\r\nFor more information, check this doc think this scenario is partly what it is meant for: https://github.com/sgoldenlab/simba/blob/master/docs/extractFeatures.md\r\n\r\nI could help you write the first feature extraction script, it shouldn't take me very long (maybe next weekend) - this function needs to be piloted more anyways and this seems like a good opportunity, but you'll need to share the project with me - perhaps zipped through a gdrive if very large - snilsson@uw.edu \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n ", + "created_at": "2020-08-08T17:21:23Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab, thanks so much - you're help writing scripts would be essential. I am a noob to coding and command line programs so it would take me forever.", + "created_at": "2020-08-10T19:44:28Z", + "author": "kylecnewton" + } + ] + }, + { + "title": "Issue importing .slp files. ", + "body": "Hello,\r\n\r\n@sgoldenlab sorry for this new request, but I'm actually unable to import slp files:\r\n\r\n\"Schermata\r\n\r\nWould you have some suggestion to fix that? \r\nThank you, \r\nBest, \r\nDorian \r\n\r\n", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2020-07-27T15:32:54Z", + "updated_at": "2020-07-30T19:07:40Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli! Let me see if I can recreate it - are you using the SLEAP bottom up or the SLEAP top down architecture?", + "created_at": "2020-07-27T17:55:22Z", + "author": "sgoldenlab" + }, + { + "body": "The bottom-up one. I had just one doubt about the SLEAP file I must upload on SimBa. \nIn this trial I took the \"NAMEOFVIDEO.predictions.slp\" that has been generated in my video folder when the inference is finished. \nI also read that it is possible to extract a data file for analysis, but it is a h5 file, so I deduced it was not the good one. \nCan you confirm me that the\"predictions.slp\" is the good file to upload on Simba?", + "created_at": "2020-07-27T17:57:04Z", + "author": "DorianBattivelli" + }, + { + "body": "Is there any body-parts in you SLP file, that you tracked, but did not specify as you were generating the project? For example in this menu:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88575711-27d07280-cff9-11ea-8a65-b60966c77e9a.png)\r\n\r\nOr did you use one of the built in body-part configurations? ", + "created_at": "2020-07-27T18:06:34Z", + "author": "sgoldenlab" + }, + { + "body": "Actually, I never dealt with the pose config window in simba. At which point should it pop out? \nI defined the bp and the skeleton on SLEAP, and all have been specified. \nI used the 8 bp, as suggested for dlc.", + "created_at": "2020-07-27T18:16:35Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks - another issue opened that bump into the same error - I will check it out with the files @L-Jacinto shared with me. ", + "created_at": "2020-07-27T18:34:26Z", + "author": "sgoldenlab" + }, + { + "body": "Ok, thank you!", + "created_at": "2020-07-27T18:36:36Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - check this response. Let me know if it makes sense and if it imports for you or not. \r\n\r\nhttps://github.com/sgoldenlab/simba/issues/53#issuecomment-664597813", + "created_at": "2020-07-27T19:40:17Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you for the feedback. I installed the new version, but it still fails to upload my .slp file. \r\nDo you confirm that the one to use is the VIDEONAME.predictions.slp generated in my video folder after the inference is done? \r\n\r\nHere the error: \r\n\"Schermata\r\n", + "created_at": "2020-07-27T20:50:56Z", + "author": "DorianBattivelli" + }, + { + "body": "Still happens hmm.. can you just double check, which SimBA version you are running - is it 1.2.10?", + "created_at": "2020-07-27T21:03:19Z", + "author": "sgoldenlab" + }, + { + "body": "Theoretically yes, I just created a new environment and installed the last version inside. Can you please tell me the command to check the version I'm actually using?", + "created_at": "2020-07-27T21:09:39Z", + "author": "DorianBattivelli" + }, + { + "body": "When I go in my site-packages folder, I see a simba-uw-no-tf 1.2.10.dist.info, so I think yes, I'm using the last version.", + "created_at": "2020-07-27T21:15:28Z", + "author": "DorianBattivelli" + }, + { + "body": "I'd do `pip uninstall simba-uw-tf`\r\n\r\nthen I'd\r\n\r\nI'd do `pip install simba-uw-tf==1.2.10` for example, or `pip install simba-uw-no-tf==1.2.10`, depending on which package you are using\r\n\r\nThe errors for the other user seems to be persisting in a different form - I'm not sure why but will look at it later https://github.com/sgoldenlab/simba/issues/53#issuecomment-664624443", + "created_at": "2020-07-27T21:44:20Z", + "author": "sgoldenlab" + }, + { + "body": "Well, I run the command, but the error still occurs. ", + "created_at": "2020-07-27T21:57:02Z", + "author": "DorianBattivelli" + }, + { + "body": "Alright - sorry to have to do this again! But would you mind sharing an slp file and the associated video, easier to trouble shoot. It's somehow related to the naming of the tracks I think that comes out of SLP. ", + "created_at": "2020-07-28T01:45:50Z", + "author": "sgoldenlab" + }, + { + "body": "No problem, I added a subfolder called \"slp_issue\" in the SimBA_CSV we already share. \r\nps: I tried both with .avi and .AVI in the title of the slp file, it does not change anything. \r\n\r\nThank you again for the help. ", + "created_at": "2020-07-28T08:33:04Z", + "author": "DorianBattivelli" + }, + { + "body": "Hey @DorianBattivelli - yes it is not an issue with the AVI's, the error occurs prior to this and at the moment, I cannot find an easy solution. This is the error: \r\n\r\nIn your SLP file all the animal detections get assigned a track. For example, in your first frame, SLP finds two tracks named `1857` and `1858`, which is all good. These tracks persist for a few frames but then SLEAP finds new tracks, with new identities, and SimBA struggles to to which tracks are relevant, and which isn't in the end, and therefore fails to create the correct number of headings in the output data frame. For example, in frame 1-2 it looks good, but then already at frame 3 we have an issue, with a third track available: \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88697092-f028ff80-d0b8-11ea-987c-bc2120132021.png)\r\n\r\nSimBA could handle this by dropping the track with the lowest probability, but then we encounter another issue: the top 2 tracks in many images keep changing names, we don't know who is who any more: for example in the second to last frame the top two tracks are called `1912` and `1922` - and I can't tell, for example, if the original `1858` is track `1922` or `1912`, or neither of them There may be a fix for this (beyond better tracking but I don't know how to handle it in SimBA at the moment... 🤔)\r\n\r\n", + "created_at": "2020-07-28T17:06:19Z", + "author": "sgoldenlab" + }, + { + "body": "Ok, so it's definitely a problem with the quality of my videos. I'll try to generate models with higher quality, thank you anyway,\nBest\nDorian", + "created_at": "2020-07-28T18:21:19Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "error importing SLEAP .slp file", + "body": "When trying to import a .slp file into simba I get the following error in \"project configuration / import tracking data\":\r\n\r\n![simba_error](https://user-images.githubusercontent.com/68620126/88539499-45a5d380-d009-11ea-97ed-caa7a80f190d.png)\r\n\r\nI have imported the corresponding video in \"project configuration / import videos into project folder\".\r\n\r\nWhat is the cause of this issue? Any suggestions on how to solve it?\r\n", + "user": "LJ-lab", + "reaction_cnt": 0, + "created_at": "2020-07-27T12:02:56Z", + "updated_at": "2021-03-12T17:08:25Z", + "author": "LJ-lab", + "comments": [ + { + "body": "Hi @L-Jacinto! First I don't have an intimidate idea for a fix and will need to know more. \r\n\r\nThe cause is SimBA looking for the associated video file name which is stored inside of the SLP file (it is typically stored in a dictionary called `provenance` within an entry called `video.path`), and SimBA can't find this entry, and it breaks. \r\n\r\nThe reason could be that SLEAP has changed their SLP structure slightly (for naming and structure), or the `video.path` entry is only generated when SLP files are created through some architectures, not others, for example. \r\n\r\nIt's difficult for me to troubleshoot from scratch, as I'd have to try and recreate your tracking model. Would you mind sharing your SLP file you are trying to import to a gdrive link? It would be much quicker for me to find out the cause that way. Thanks! \r\n\r\n", + "created_at": "2020-07-27T14:32:20Z", + "author": "sgoldenlab" + }, + { + "body": "Hi. Thank you for the quick reply.\r\n\r\nI'd be happy to share the .slp and video files if that helps solving the issue. Is there an email address I can send the link to?\r\n\r\nThanks again.", + "created_at": "2020-07-27T14:52:29Z", + "author": "LJ-lab" + }, + { + "body": "Additionally, I see in the error, that it's looking for a SLEAPbottom-up file; but the tracking file we've generated in SLEAP was from a top-down model. Can this be the issue? Does SIMBA support tracking generated by both approaches (bottom-up and top-down)?", + "created_at": "2020-07-27T14:56:23Z", + "author": "LJ-lab" + }, + { + "body": "I was thinking that could also be the case, as I only worked with bottom up when I created the pipeline in SimBA and assumed the output file structure would be the same though both methods. If you share your file, I could make sure it work both ways. \r\n\r\nCould you share it with snilsson@uw.edu?\r\n\r\n\r\n\r\n", + "created_at": "2020-07-27T15:04:00Z", + "author": "sgoldenlab" + }, + { + "body": "Sent. Thank you.", + "created_at": "2020-07-27T15:37:15Z", + "author": "LJ-lab" + }, + { + "body": "Cheers, got it thanks! I took a peak and the meta-data in the SLP does not contain a video name, it's an empty list - I am not sure why this would be - it could be a bottom up vs top down thing. Thus, it will be difficult for SimBA right now to know what video-file the the SLP belongs to, especially when the videos and associated file names are different like this:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88569349-2dc15600-cfef-11ea-9226-7798860e1051.png)\r\n\r\nI am inserting a fix now for you to try. If SimBA finds an empty video name like this, it will re-name the imported CSV as the same filename as the original slp file. The following msg will also be printed:\r\n\r\n'Warning: The video name could not be found in the .SLP meta-data table. SimBA therefore gives the imported CSV the same name as the SLP file. To be sure that SimBAs SLEAP import function works, make sure the imported SLP file(s) and the associated video file(s) has the same file name - e.g., \"Video1.slp\" and \"Video1.slp\" etc, before importing the videos and SLP files to SimBA.')\r\n\r\nI let you know when the fix is up, let me know if that makes sense. \r\n", + "created_at": "2020-07-27T17:42:03Z", + "author": "sgoldenlab" + }, + { + "body": "It's up - can you give it a go after upgrading SimBA to version 1.2.9?", + "created_at": "2020-07-27T17:51:41Z", + "author": "sgoldenlab" + }, + { + "body": "Great. Thanks for the update, it seems its solves the import .slp file issue. However, we now have a different error (the same reported on issue #54)\r\n\r\n![simba_error_2](https://user-images.githubusercontent.com/68620126/88577932-aa7c2080-d03f-11ea-8ca9-c5713c05dbda.png)\r\n\r\nFor the bodyparts we use the simba model for 2 animals / 8 bdp.\r\n ", + "created_at": "2020-07-27T18:32:18Z", + "author": "LJ-lab" + }, + { + "body": "I'm onto it! Thanks for reporting this @L-Jacinto ", + "created_at": "2020-07-27T18:39:31Z", + "author": "sgoldenlab" + }, + { + "body": "@L-Jacinto - can you try again (SimBA version 1.2.10) and let me know how it goes. \r\n\r\nBasically, what was happening for some reason - is that your tracks where called \"track 14\" and \"track 15\", while SimBA expected the first two tracks to be called \"track 0\" and \"track 1\". SimBA filled in all the data for \"track 14\" and \"track 15\", and then generated two further empty tracks (track 0-1) because it assumed those animals where in your dataset and they were occluded. \r\n\r\nSo you ended up with 4 tracks, but only a dataframe big enough for 2 tracks, so it failed and threw you that error msg. \r\n\r\nPS. I visualized an image or two of your tracking, it was not amazing but I don't think it is a SimBA error right now - it may be worth going back to SLEAP later on and improving it. \r\n\r\nLet me know how it goes!\r\n ", + "created_at": "2020-07-27T19:39:06Z", + "author": "sgoldenlab" + }, + { + "body": "Amazing - it worked. Thank you so much for your help.\r\n\r\nHowever, the bodypart labels seems off in the frame to identify the animals when compared with the original video (we browsed several frames, and in most of them the labels seemed off) (even though, you are right, the tracking is not so good on that video; it was just a quick inference video from a poorly trained network to test the full SLEAP-to-SIMBA pipeline).\r\n\r\nTo ensure it was not a video problem, we tested with another video with better tracking, and we got a new error this time:\r\n\r\n![error3](https://user-images.githubusercontent.com/68620126/88588044-d0f58800-d04e-11ea-841d-ad4a1c3d7513.png)\r\n\r\nIn case you want to check it I'm sending you the video and .slp file via email.\r\n\r\nps. Indeed, SLEAP can call tracks any number/name, it does not necessarily label two animals with track0 and track1 (especially if you have multiple tracks from the inference step).\r\n", + "created_at": "2020-07-27T20:20:14Z", + "author": "LJ-lab" + }, + { + "body": "Apparentely there was something wrong with that .slp file. We generated a new one and it worked. \r\n\r\nHowever, the labels are still off when compared with the SLEAP-labeled video; and perhaps that may be the reason why the tracking was apparentely not so great in the frames you saw. \r\n\r\nI will mail you the original video, sleap-labeled video and .slp file, so you can have a look.\r\n\r\nthank you again for your great help.", + "created_at": "2020-07-27T20:35:24Z", + "author": "LJ-lab" + }, + { + "body": "Thanks - I will take a look a little later today! Sounds like there may be an alignment issue with the tracking and the frames when the tracks are labelled differently than beginning with zero. ", + "created_at": "2020-07-27T21:07:06Z", + "author": "sgoldenlab" + }, + { + "body": "Also, since the updates we have been enable to properly open the \"load project menu\" (either with DLC or SLEAP-based projects):\r\n\r\n![empty_menu](https://user-images.githubusercontent.com/68620126/88603477-f09ba900-d06c-11ea-9309-c13088b5722a.png)\r\n\r\nWe get the following error on the console:\r\n\r\n![error4](https://user-images.githubusercontent.com/68620126/88603488-f98c7a80-d06c-11ea-98b2-ccf4de50a86a.png)\r\n\r\n\r\nThis was working fine for us before (at least with DLC files).", + "created_at": "2020-07-27T23:57:24Z", + "author": "LJ-lab" + }, + { + "body": "Ah sorry I had this conversation with someone the other day, it is a problem with the names of the body-parts. I'm going to paste it here to see if it helps you: \r\n\r\nsgoldenlab @sgoldenlab Jul 23 21:57\r\nI'm however suprised that you weren't able to boot up your project. Could you send me a screengrab of the content of your project_folder\\logs\\measures\\pose_configs\\bp_names\\project_bp_names.csv file to help me understand it?\r\n\r\nCarmenWinters @CarmenWinters Jul 23 21:59\r\n![image](https://user-images.githubusercontent.com/50497030/88604961-77309600-d02d-11ea-82e9-b62f683277e6.png)\r\n\r\nsgoldenlab @sgoldenlab Jul 23 22:04\r\nThank you, is this the order they appear in the CSV tracking file?\r\nLike, the order you labelled them in DLC?\r\n\r\nCarmenWinters @CarmenWinters Jul 23 22:04\r\nYep!\r\n\r\nsgoldenlab @sgoldenlab Jul 23 22:17\r\nAh thanks, SimBA needs a way of telling which animal is which, and currently it does so by looking for numbers in the animal bodypart names. If you try to rename the animals \"Mother_1\", \"Pup_2\", and \"Mother_3\", I think it might work. It should help you start the project.\r\nI'll fix the code to look for the names of the animals instead\r\n\r\nCarmenWinters @CarmenWinters Jul 23 22:27\r\nIT WORKED!!!! Thank you! You are a magician!\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2020-07-28T00:21:40Z", + "author": "sgoldenlab" + }, + { + "body": "Is it right that you can no longer open projects that you could previously open though?", + "created_at": "2020-07-28T00:22:06Z", + "author": "sgoldenlab" + }, + { + "body": "Looking at the sleap import error now - thanks for the labeled video also, very helpful. ", + "created_at": "2020-07-28T01:47:54Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @L-Jacinto - I've been trying to make sense of it but cannot get my head around it at the moment. Looking at your SLEAP dataframe, there is an initial track for animal 14 in frame 0. One of the body-parts in this track is located at x=419, y = 152. But this is not at all near where the animals are in frame 0, and do not match with the labeled video you sent, but it is the coordinate SimBA picks up. Just wanted to check: have you rotated, clipped, cropped or downsampled the video in any way after performing the tracking? \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88615002-d4d0dc80-d045-11ea-91ba-1846902b4c1e.png)\r\n", + "created_at": "2020-07-28T03:17:40Z", + "author": "sgoldenlab" + }, + { + "body": "Regarding the project loading: we are not naming any bodyparts, we are using the predefined simba \"multi tracking / multianimals, 8 bps\" model. And correct, we can no longer load projects created after the update. With the exact same project configuration and video files we were able to create and load before the update.", + "created_at": "2020-07-28T08:28:10Z", + "author": "LJ-lab" + }, + { + "body": "Here is the screengrab of the bodyparts file:\r\n\r\n![bdps_scp](https://user-images.githubusercontent.com/68620126/88649250-9e37a800-d0bf-11ea-89b2-8484dbc868c8.png)\r\n", + "created_at": "2020-07-28T09:46:47Z", + "author": "LJ-lab" + }, + { + "body": "Regarding the tracking issue. That is really odd. We did not crop, rotate or change the video in any way after tracking. The video I sent you is the same video we used to train the network, infer the tracks and then export the .slp file we are trying to import into SIMBA.", + "created_at": "2020-07-28T09:50:14Z", + "author": "LJ-lab" + }, + { + "body": "Regarding the project loading: I reverted to version 1.2.7 and the projects load just fine (ie. all options on all tabs in \"load project\" show up, and operations - video parameters, label behavior, etc - are functional).\r\n\r\nFrom version 1.2.9, the loading stops working (for the files/projects that previously worked).", + "created_at": "2020-07-28T10:35:54Z", + "author": "LJ-lab" + }, + { + "body": "Thanks again @L-Jacinto for reporting this. I will dig into it more - I should have another tracking data file from the same tracking set up from the user user experiencing similar issues as you and maybe this will help me make sense of it. ", + "created_at": "2020-07-28T14:00:08Z", + "author": "sgoldenlab" + }, + { + "body": "We reverted to version 1.2.1 and were able to go through the whole process (ie. create project, import .slp, load project and label videos) with a .slp file without any errors. With versions 1.2.5 onward we get all kind of errors, including the ones reported here. (did not test between versions 1.2.1 and 1.2.5).\r\n\r\n(however, for the \"extract features\" step to work, in this version - the only in which we were able to import the .slp file and complete the whole process, but we assume it's the same for all versions - the .slp file must contain instances (i.e. skeletons) IDs for all animal on all frames (this has to be solved in SLEAP first). Additionally, even if you have all instances on all frames, if some bodyparts are missing on some frames on the .slp file (for example, they were occluded on those frames) extract features will fail. One way to solve this is to run the \"outlier correction\" step with higher criteria values. (this may be related with issue #42 )", + "created_at": "2020-07-30T09:25:44Z", + "author": "LJ-lab" + }, + { + "body": "We have also narrowed down one of the issues: the mispositioned labels. The .slp file that needs to be imported into SIMBA is NOT the \"main\" .slp generated in \"file/save\" in SLEAP; but the one found in the folder \"\\predictions\". With this file, all labels appear in the correct position and the video.path error does not occur (regardless of the file names).", + "created_at": "2020-07-30T10:26:32Z", + "author": "LJ-lab" + }, + { + "body": "Thanks @L-Jacinto - that's great to know", + "created_at": "2020-07-30T15:20:12Z", + "author": "sgoldenlab" + }, + { + "body": "Just an update, regarding the .slp to be imported and the mispositioned labels. Using the .slp file found on the \"\\predictions\" folder in SLEAP works well to solve the mispositioned labels and video.path errors in SIMBA but has a major caveat: corrections to the inference in SLEAP (which are much needed to refine the tracking/labeling and avoid the extract features errors in SIMBA) will not be saved into the predictions .slp. \r\n\r\nWould there be a way for SIMBA users to import the main .slp or .h5 from SLEAP (which allows saving changes/updates to labels/tracking) without the mispositioned labels? Or is there another solution to this (perhapas in SLEAP)?", + "created_at": "2020-07-31T09:26:53Z", + "author": "LJ-lab" + }, + { + "body": "Hi @L-Jacinto - that does sound like a major caveat. So if I understand correctly - after running inference on the video, SLEAP saves two separate h5 SLP files - one with additional track corrections and one without track corrections, and SimBA only works on the one without? The file we worked on earlier, was that the SLP file with corrections? I'm trying to get my head around the idea why SLEAP would save a SLP with erroneous tracks. \r\n\r\n\r\n\r\nUnfortunately I have other pressing things happening at the lab at the moment, so I won't be able to dig deep into this right now but will help as much I can. I think it is important to get SimBA working properly on the correct output file from SLEAP though, so we will have to figure out what the differences are between the two containers.\r\n\r\n", + "created_at": "2020-07-31T14:34:27Z", + "author": "sgoldenlab" + }, + { + "body": "Whenever you save on \"File/Save\" on SLEAP's menu a .slp file - that is created the first time you click save - is updated. I can only assume that this would be the .slp file that contains the most current labels and tracks. However, that was the file I had sent you previously and that created the mispositioned labels in SIMBA (and originated the video.path error before v1.2.9 update).\r\n\r\nAdditionally, whenever you run an inference on SLEAP, a prediction .slp is also generated in a \"/predictions\" folder. That file cannot be updated by subsequent changes to the inference result. However, when loading this file into SIMBA we get the labels in the correct position (and the video.path error does not occur, regardless of the file names). (but, as reported, has the caveat of not allowing prediction's refinement, which is crucial)\r\n\r\nOn top of that, it is also possible to export a .h5 file from \"File/Export Analysis HDF5\". But SIMBA cannot load this file.", + "created_at": "2020-07-31T15:17:35Z", + "author": "LJ-lab" + }, + { + "body": "so, no solution for this?", + "created_at": "2020-10-20T20:33:00Z", + "author": "LJ-lab" + } + ] + }, + { + "title": "Issue during features extraction.", + "body": "@sgoldenlab , Hello, I'm unfortunately dealing with a new error message.\r\nI have been able to analyze the ROI and to obtain: number of entries, cumulative time, distance and velocity data.\r\nNow I would like to draw heat maps. But I should first extract features right? \r\n\r\nWhen I click Extract Features, it seems that the process stops, and here what happens: \r\n\r\n\"Schermata\r\n\r\nI then click on append ROI data to features, and here what appears:\r\n\r\n\"Schermata\r\n\r\nHave you any idea about the reason why it's not working? \r\n\r\nAs reminder, I'm working on multi animal project, on windows 7, avi videos, SimBA = 1.2.7. Concerning the outlier correction, I skipped it. \r\nThank you, \r\nBest, \r\nDorian ", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2020-07-24T13:41:08Z", + "updated_at": "2020-07-27T13:37:13Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Ok, I found the topic in your FAQ. I'll follow the the Batch pre-process videos in SimBA tutorial. ", + "created_at": "2020-07-24T14:18:20Z", + "author": "DorianBattivelli" + }, + { + "body": "Hi @DorianBattivelli - yes it's to do with all body-parts falling on the same X/Y co-ordinates. Happy you found it. \r\n\r\nThere are two types of heatmaps - one that requires features and one that doesn't. \r\n\r\nThe one that does require features plots heatmaps representing the time/location of animals engaging in classified behaviours and is accessed in the visualization tab. \r\n\r\nThe one that doesn't require features plots heatmaps representing the time spend in locations and is accessed in ROI tab:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88402953-acfd2280-cd80-11ea-9497-83acf636441c.png)\r\n \r\n\r\n\r\n\r\n", + "created_at": "2020-07-24T14:39:00Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab Thank you for the answer. Indeed I tried this function, but it does not work neither. \r\nHere what appear in my terminal: \r\n\r\n\"Schermata\r\n", + "created_at": "2020-07-24T14:56:19Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah could be another .avi / .mp4 issue. Let me check.\r\n\r\nNope, it works fine regardless of mp4 / avi. \r\n\r\nI think the program has troubles finding the resolution of your video in your `logs/video_info.csv` file. Does your video_info.csv contain the resolution width and height for all videos like this? \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88408694-c4400e00-cd88-11ea-82e4-ca3d22d953cd.png)\r\n\r\n\r\n\r\n", + "created_at": "2020-07-24T15:22:30Z", + "author": "sgoldenlab" + }, + { + "body": "Yes, here one file for one set of videos: \r\n\r\n\"Schermata\r\n", + "created_at": "2020-07-24T15:47:06Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah thanks - I'm not sure why - but the heading seems to have moved one row? It now starts at row two, whereas SimBA expects the heading in row 1. Does it work if you move it up?\r\n\r\n", + "created_at": "2020-07-24T15:55:34Z", + "author": "sgoldenlab" + }, + { + "body": "I think it is because I converted it in xlsx file to make easier the lecture on the screenshot, but the original csv starts from row 1:\r\n\r\n\"Schermata\r\n", + "created_at": "2020-07-24T15:58:45Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks - let me check this again when I get to a computer. Might be that I can replicate it by using the auto argument or playing more with the bin size. ", + "created_at": "2020-07-24T16:20:03Z", + "author": "sgoldenlab" + }, + { + "body": "@DorianBattivelli - would you happen to have any files in your outlier_corrected_movement_location folder which **is not** represented in your video_info.csv file?\r\n", + "created_at": "2020-07-24T16:53:44Z", + "author": "sgoldenlab" + }, + { + "body": "Not sur to understand what you mean :/ \r\n\r\nThe files in my outlier_corrected_movement_location folder are the same than the ones in my video_info.csv file:\r\n\r\n\"Schermata\r\n", + "created_at": "2020-07-24T17:00:24Z", + "author": "DorianBattivelli" + }, + { + "body": "That's what I meant, it looks like it should! ", + "created_at": "2020-07-24T17:02:46Z", + "author": "sgoldenlab" + }, + { + "body": "As I can't replicate it here, I wonder if you can help me by inserting a line in your code and tell me what's printed out?\r\n\r\nIn this file:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88416622-5c43f480-cd95-11ea-856f-ad7a4cbe3c0d.png)\r\n\r\n\r\nCould you insert a new line on Line 41 which would read like this? \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88416706-84335800-cd95-11ea-87e7-fe4de6a40552.png)\r\n\r\nAt least of these guys for some reason is going to be zero in your setup and it would help knowing which one it is \r\n\r\n", + "created_at": "2020-07-24T17:08:31Z", + "author": "sgoldenlab" + }, + { + "body": "Don't worry about the try and except you see at the lower part, you don't have it, I just inserted it now to catch the error you are seeing\r\n", + "created_at": "2020-07-24T17:09:24Z", + "author": "sgoldenlab" + }, + { + "body": "I'll try that as soon as I'll be back at mine, and I let you know, thanks.", + "created_at": "2020-07-24T17:14:09Z", + "author": "DorianBattivelli" + }, + { + "body": "@sgoldenlab, I added the line you told me (print(...)), but the line starting by binWidth, etc. was not at line 40 in my document, but at the line 54, so I added the line at line 55.\r\nIn yellow you can see the line 41, while in blue next to the cursor is the line I inserted (at line 55) (I don't know how to make the row number visible on wordpad, so I counted it manually).\r\n\r\n\"Schermata\r\n\r\nNow that I saved the file like that, I can't launch SimBA anymore, here what appears: \r\n\r\n\"Schermata\r\n", + "created_at": "2020-07-24T20:27:37Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah its common one, it needs to be uniform 4 spaces or uniform tabs, I will send you the file ", + "created_at": "2020-07-24T20:29:07Z", + "author": "sgoldenlab" + }, + { + "body": "[plot_heatmap_location_new.zip](https://github.com/sgoldenlab/simba/files/4974117/plot_heatmap_location_new.zip)\r\n\r\njust replace the one you have with this\r\n", + "created_at": "2020-07-24T20:29:42Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you, I managed to open SimBA with your file.\r\nHere the error when I try to generate the heat map:\r\n\r\n\"Schermata\r\n", + "created_at": "2020-07-24T20:36:57Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah I think I understand. The bin size becomes rounded of to less than a pixel due to a rather low resolution that I did not anticipate sorry. Try this - in the script - change `int` to `float`. \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88433831-b0f66800-cdb3-11ea-9bcf-683cf065b9f3.png)\r\n ", + "created_at": "2020-07-24T20:43:51Z", + "author": "sgoldenlab" + }, + { + "body": "I did the change:\r\n\r\n\"Schermata\r\n\r\nBut the error still occurs, even after that I restarted Simba from scratch:\r\n\r\n\"Schermata\r\n", + "created_at": "2020-07-24T20:50:21Z", + "author": "DorianBattivelli" + }, + { + "body": "ah\r\n", + "created_at": "2020-07-24T20:55:25Z", + "author": "sgoldenlab" + }, + { + "body": "Sorry one more float!\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88434620-6bd33580-cdb5-11ea-9f5f-8eda00e7d129.png)\r\n", + "created_at": "2020-07-24T20:56:08Z", + "author": "sgoldenlab" + }, + { + "body": "I did the change: \r\n\r\n\"Schermata\r\n\r\nBut now I cannot fill the bin window anymore, I can't write anything:\r\n\r\n\"Schermata\r\n", + "created_at": "2020-07-24T21:06:55Z", + "author": "DorianBattivelli" + }, + { + "body": "That seems very strange, does it come with en error msg in the terminal? Does it persist after restarting SimBA?", + "created_at": "2020-07-24T21:41:59Z", + "author": "sgoldenlab" + }, + { + "body": "Actually, I don't know what happened, but now it is working, the analysis is on going, thank you!", + "created_at": "2020-07-24T21:49:08Z", + "author": "DorianBattivelli" + }, + { + "body": "good news again!", + "created_at": "2020-07-24T21:54:18Z", + "author": "sgoldenlab" + }, + { + "body": "I have actually one issue again for heat map generation. I'm able to generate the videos, but when I ask to save the last image only, it just analyses the first video, and stops. It does not analyse the next videos:\r\n\"Schermata\r\n\r\nShould I modify something in the code? ", + "created_at": "2020-07-25T00:03:06Z", + "author": "DorianBattivelli" + }, + { + "body": "Hmm, I don't think so - are the videos all the same resolution? \r\n\r\nI may have to troubleshoot this tomorrow.", + "created_at": "2020-07-25T00:08:51Z", + "author": "sgoldenlab" + }, + { + "body": "Yes, they have the same resolution.\r\nOk, so let's see that later, thank you again, \r\nBest, \r\nDorian ", + "created_at": "2020-07-25T00:09:41Z", + "author": "DorianBattivelli" + }, + { + "body": "@sgoldenlab, to give you further inputs about my issue.\r\n\r\nThis morning I tried to generate heat map (only the last picture) with another set of videos and I get a strange result. Since I have 2 animals, I first run for \"center1\", and it generated one picture for all my videos, so I though the problem was solved. But then, when I run for \"center2\", one picture is missing for my last video. \r\n\r\nFYI:\r\nThe resolution of the videos for which I failed to obtain a heat map yesterday (the program stopped after having generated the HM of the first video) picture is:\r\n\"Schermata\r\n\r\nThe resolution of the videos I worked with this morning:\r\n\"Schermata\r\n\r\nI have the feeling that the resolution of my videos is very low, and that can be very problematic. Actually it's because I cropped a subfield of my raw videos, so the remaining portion on which I'm focusing for working ends up with a pretty bad resolution. \r\nBtw, turning back to my original problem that concerned the issue I have to extract de features, I don't think that the pre process tutorial can help me. I already did what is explained there for all my videos: cropping and splitting my files before working on it. So I actually don't know how I will be able to build classifiers from these files. \r\nLet me know if you have some ideas, \r\nThanks! ", + "created_at": "2020-07-25T07:51:27Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": " name 'deeplabcut' is not defined", + "body": "**Describe the bug** When I run simba and error about dlc appears.\r\n(deeplabcut-py36) D:\\Users\\Administrator\\Documents>simba\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\programdata\\anaconda3\\envs\\deeplabcut-py36\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\programdata\\anaconda3\\envs\\deeplabcut-py36\\lib\\site-packages\\simba\\SimBA.py\", line 1904, in \r\n button_generatetempyaml_single = Button(label_tempyamlsingle,text='Add single video',command=lambda:generatetempyaml(self.label_set_configpath.file_path,self.label_genyamlsinglevideo.file_path))\r\n File \"c:\\programdata\\anaconda3\\envs\\deeplabcut-py36\\lib\\site-packages\\simba\\dlc_change_yamlfile.py\", line 15, in generatetempyaml\r\n deeplabcut.add_new_videos(tempyaml,[videolist],copy_videos=True)\r\nNameError: name 'deeplabcut' is not defined\r\n\r\n**Desktop (please complete the following information):**\r\n - windows 10\r\n - Python Version [ 3.6.0]\r\n - Are you using anaconda? yes\r\n intalled with SimBAxTF.\r\n\r\nDo you have any ideas? Thanks!\r\n", + "user": "neugun", + "reaction_cnt": 0, + "created_at": "2020-07-24T09:12:59Z", + "updated_at": "2020-07-26T09:34:16Z", + "author": "neugun", + "comments": [ + { + "body": "Hi @neugun - it seems DeepLabCut may not be installed in the environment from which you are launching SimBAxTF. Within the SimBA environment, could you try doing a `pip install deeplabcut==2.09` followed typing `simba` and let me know if it launches?\r\n\r\n", + "created_at": "2020-07-24T14:41:50Z", + "author": "sgoldenlab" + }, + { + "body": "Hi! @sgoldenlab I rerun conda install shapely, and it is finally work! Thanks to your reply!", + "created_at": "2020-07-26T09:34:16Z", + "author": "neugun" + } + ] + }, + { + "title": "Issue for ROI analyze. ", + "body": "Hello, \r\n\r\nI'm using SimBA with data obtained from multi animal pose estimation from DLC. \r\nI have been able to extract my frames, to scale the video and to draw the ROIs. But when I want to run the ROI analysis (2 animals, one body part), there is an error. Here a screen shot of the prompt: \r\n\r\n\"Schermata\r\n\r\nI'm analyzing 4 videos, and used the function \"apply to all\" after drawing the ROI of the first video. The first video seems ok, but the issue occurs at the second video: \r\n\r\n\"Schermata\r\n\r\nSomeone would have a solution to help me fixing this issue? \r\n\r\nFYI: I'm using SimBA 1.2.5 on windows 7. My videos are \".avi\".\r\nI already successfully run ROI analysis with the same program configuration (but never using \"apply to all\" function) with other videos. So I don't know why this time it is not working. \r\n\r\nThank you, \r\nBest, \r\nDorian ", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2020-07-23T16:11:05Z", + "updated_at": "2020-07-24T00:54:04Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli - i tried to re-create it here (with and w/o \"apply to all\") but it ran through so at this moment i don't think it's a bug. The issue is SimBA trying to grab the current X,Y coordinate of the body-part of the animal. And it then, just to make sure, tries to convert it to an integer value (in case it turns out to be a float value), but it turns out to be a NaN, can't work with that, and it breaks. \r\n\r\nIf you go to the `project_folder\\csv\\outlier_corrected_movement_location\\cEFp1_30.csv` and open it up, can you see anything differently compared to the other files - is there any reason this file would have empty entries / cells? Was this file processed the same way as the others through pose-estimation and outlier correction?\r\n\r\nIf you analyze this file only, not using the \"apply to all\" function, does it work?\r\n\r\n", + "created_at": "2020-07-23T16:38:02Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab Thank you for the answer. \r\n\r\nI check the project_folder\\csv\\outlier_corrected_movement_location\\cEFp1_30.csv file, I did not notice any difference expect its size: 4.5 mo while the 3 others are around 7.5 mo. \r\n\r\n", + "created_at": "2020-07-23T16:47:43Z", + "author": "DorianBattivelli" + }, + { + "body": "I will try later to draw each video without the \"apply to all \" function, and I'll let you know, \r\n\r\nThank you, \r\nBest, \r\nDorian ", + "created_at": "2020-07-23T16:53:10Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli alright let me know! If the video is the same length as the others it's should be the same file size as the rest, and it should be a single header row in all files. I'll insert something that catches NaN errors in the next update", + "created_at": "2020-07-23T16:56:16Z", + "author": "sgoldenlab" + }, + { + "body": "They are all 15 min videos, so with the same length. Strange so their file sizes are different. \r\nI failed to draw manually the other ROI, since when I click \"reset\" next to _30, _45 and _60 video, I have this message:\r\n\r\n\"Schermata\r\n\r\nIt seems that it considers that no ROI is saved for these videos. But when I click draw next to each of them, the ROI drew ont the first video appear. Is that normal? \r\n\r\nBtw, I confirm that I have a single header row in all files.\r\n", + "created_at": "2020-07-23T17:03:44Z", + "author": "DorianBattivelli" + }, + { + "body": "Actually, casting a more attentive look, I see that the cEFp1_30 is full of empty cells. I don't know why. \r\n\r\nHere how is the situation: \r\n\"Schermata\r\n\r\nThe other excel files are full. \r\nHave you an idea of the reason why it is like that? ", + "created_at": "2020-07-23T17:27:29Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks @DorianBattivelli - I will have to come back to this in the afternoon, but no - that is not normal and should not happen, and I have a feeling I have another .avi / .mp4 bug on my hands with is separate from your original error. You could check this by converting your avi's to mp4's in your videos folder and run it again in the meantime but I will let you know. ", + "created_at": "2020-07-23T17:27:32Z", + "author": "sgoldenlab" + }, + { + "body": "If you look in your original DLC input file in your `project_folder/csv/input_csv`, is that file also missing values?\r\n", + "created_at": "2020-07-23T17:29:44Z", + "author": "sgoldenlab" + }, + { + "body": "Yes, this one also has missing values. While the files related to the other videos have not. ", + "created_at": "2020-07-23T17:31:45Z", + "author": "DorianBattivelli" + }, + { + "body": "Ok, I will try the mp4 solution later and let you know, thank you\r\n", + "created_at": "2020-07-23T17:33:34Z", + "author": "DorianBattivelli" + }, + { + "body": "Can you go back and check the very original file, in the DLC project, I think something could have gone wrong when you ran it through DLC potentially ", + "created_at": "2020-07-23T17:34:09Z", + "author": "sgoldenlab" + }, + { + "body": "Do you mean the bx.h5 file? ", + "created_at": "2020-07-23T17:37:16Z", + "author": "DorianBattivelli" + }, + { + "body": "Ah yes, forgot it was multi animal - yes, something has gone astray with this file - either it has not been converted properly to csv or something went wrong when it was generated in DLC. \r\n", + "created_at": "2020-07-23T17:49:38Z", + "author": "sgoldenlab" + }, + { + "body": "Ok, I'll re run DLC to generate it again, and will let you know if it fixed the problem. ", + "created_at": "2020-07-23T17:51:21Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - I've pushed an update that should fix the reset issue you were seeing with your .avi files - that should be taken care of if you upgrade SimBA to version 1.2.7 ", + "created_at": "2020-07-23T19:15:37Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you for the release. \r\nI regenerated a h5 file from dlc, and tried to analyze ROI on SimBA: it works perfectly. Thank you for the help, \r\nBest, \r\nDorian ", + "created_at": "2020-07-24T00:00:35Z", + "author": "DorianBattivelli" + }, + { + "body": "I am happy to hear that! ", + "created_at": "2020-07-24T00:54:03Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Add Simba to Open Neuroscience", + "body": "\r\nHello!\r\n\r\nWe are reaching out because we would love to have your project listed on [Open Neuroscience](), and also share information about this project:\r\n\r\nOpen Neuroscience is a community run project, where we are curating and highlighting open source projects related to neurosciences!\r\n\r\nBriefly, we have a website where short descritptions about projects are listed, with links to the projects themselves, their authors, together with images and other links. \r\n\r\nOnce a new entry is made, we make a quick check for spam, and publish it. \r\n\r\nOnce published, we make people aware of the new entry by [Twitter](https://twitter.com/openneurosci) and a [Facebook](https://www.facebook.com/OpenNeuroscience) group. \r\n\r\n**To add information about their project, developers only need to fill out this [form](https://forms.gle/ByM8thAhZJkHBMQN8)**\r\n\r\n\r\nIn the form, people can add subfields and tags to their entries, so that projects are filterable and searchable on the website!\r\n\r\nThe reason why we have the form system is that it makes it open for everyone to contribute to the website and allows developers themselves to describe their projects! \r\n\r\nAlso, there are so many amazing projects coming out in Neurosciences that it would be impossible for us to keep track and log them all!\r\n\r\n\r\n\r\nPlease [get in touch](mailto:openeuroscience@gmail.com) if you have any questions or would like to collaborate!\r\n", + "user": "amchagas", + "reaction_cnt": 0, + "created_at": "2020-07-23T11:48:06Z", + "updated_at": "2020-07-23T15:38:44Z", + "author": "amchagas", + "comments": [ + { + "body": "Hi @amchagas - fantastic! I have filled out the form - thank you! 😄 ", + "created_at": "2020-07-23T15:38:43Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Issue with graphviz not being recognized", + "body": "Hi again, @sgoldenlab, I had hoped it would be longer before I pestered you again!\r\n\r\nThe \"Train Machine Model\" step ran well for a few minutes before running into the below error:\r\n\"Screen\r\n\r\nThoughts on how to circumvent this? I did a bit of looking around other forums (not related to SimBA) and found a suggestion that I add Graphviz to the Windows path. Thanks in advance for your help!", + "user": "emmonseb", + "reaction_cnt": 0, + "created_at": "2020-07-22T22:20:17Z", + "updated_at": "2020-07-24T02:11:05Z", + "author": "emmonseb", + "comments": [ + { + "body": "Haha @emmonseb - keep em' coming! :)\r\n\r\nTo install graphiz, there is a guide that I found useful that you can access through this menu in SimBA: \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88235909-b0819400-cc30-11ea-9c73-6a850eaf2c55.png)\r\n\r\nTo make sure it's working, you need to add it to the environmental paths in windows, in the same way as (I presume) you have already added FFmpeg to the path. It's not directly part of python, so you can't do a `pip` install. \r\n\r\nTo circumvent this issue, you can also untick this box in SimBA to not generate a decision tree visualization using graphviz:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88236229-5f25d480-cc31-11ea-883f-7f31c4a90cf9.png)\r\n", + "created_at": "2020-07-22T22:38:45Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks @sgoldenlab! I'm trying it without the graphviz requirement for now, and am now getting this exception:\r\n\"Screen\r\n\r\nWhen this does eventually create a model, where will the .sav file be stored? I looked over the tutorial pretty thoroughly and wasn't able to find the location. Thanks!", + "created_at": "2020-07-22T23:11:19Z", + "author": "emmonseb" + }, + { + "body": "@emmonseb - this error is produced by your settings in this entry box:\r\n![image](https://user-images.githubusercontent.com/50497030/88238819-9303f880-cc37-11ea-9cee-7ada706b8ae1.png)\r\n\r\nIf you have ticked this box, the entry box should become un-greyed out and allow you to insert an integer number (\"How many features do you want to display in your feature importance graph?\"). It seems like SimBA can't find the integer. Did you enter a value in this box after ticking the tick-box?\r\n", + "created_at": "2020-07-22T23:24:55Z", + "author": "sgoldenlab" + }, + { + "body": "Wow, talk about a dumb mistake, sorry about that! When I had previously done the settings I had put a number in but overlooked it this time. I'm re-running it now.\r\n\r\nThanks for your patience!", + "created_at": "2020-07-22T23:54:15Z", + "author": "emmonseb" + }, + { + "body": "It's an easy miss to make, I should insert a error message that catches it. ", + "created_at": "2020-07-23T00:00:39Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Pointer to folder with dlc files", + "body": "Hi, I'm using SimBA on some DLC output files, which I had to move to a different drive post-labelling. Now I'm not able to train the network.\r\n\r\nIs there a config file with the project pointing to the DLC files I can access somewhere to edit to the new directory?\r\n", + "user": "akaye", + "reaction_cnt": 0, + "created_at": "2020-07-21T20:44:35Z", + "updated_at": "2020-07-23T16:46:00Z", + "author": "akaye", + "comments": [ + { + "body": "Hi @akaye - just to be able to troubleshoot this, are you using DLC within the SimBA GUI (https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_DLC.md) or are you using DLC separately from SimBA? It will help me understand what's going on, thanks!", + "created_at": "2020-07-22T00:16:55Z", + "author": "sgoldenlab" + }, + { + "body": "We used DLC separately", + "created_at": "2020-07-22T00:22:25Z", + "author": "akaye" + }, + { + "body": "One more question! When you say 'post-labelling' - do you mean after labelling the frames in SimBA (labelling behaviors) or do you mean body-part labelling in DLC? ", + "created_at": "2020-07-22T00:37:53Z", + "author": "sgoldenlab" + }, + { + "body": "Post-labelling behaviors (frames in SimBA) we had to move some of the source files because of hard drive space issues, I'm wondering if I can redirect the source file pointer to the new location", + "created_at": "2020-07-22T14:28:46Z", + "author": "akaye" + }, + { + "body": "@akaye thanks I think I get it now. The project paths are saved inside your project `project_config.ini` file. What I would normally do is open the file in notepad++, and do a control+F search and replace, replacing the old path substrings in the entire file with the new path substring. \r\n\r\nYou'll see a [General settings] heading with a `project_path` entry at the top, this is the most important. But to be sure all functions in SimBA works I'd make sure I'd replace any old paths in this file. \r\n\r\nSo e.g.,\r\n**Search for**\r\nC:\\myoldprojectpath\\\r\n\r\n**Replace with**\r\nZ:\\mynewprojectpath\\\r\n\r\nLet me know it that works\r\n\r\n\r\n\r\n", + "created_at": "2020-07-22T15:10:08Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab,\r\n\r\nI'm the collaborator of @akaye who ran into the project path issue yesterday--thank you very much for resolving that! \r\n\r\nUnfortunately it seems like the issue we're having is related to something else entirely. Suggestions for how to address the below error?\r\n\r\n\"Screen\r\n\r\n", + "created_at": "2020-07-22T20:33:22Z", + "author": "emmonseb" + }, + { + "body": "Hi @emmonseb! There seems to have been a mis-reading in SimBA for what body-parts your project contains, it could have happened when you created the project. To figure out what's going on, could you open this file in your project `project_folder\\logs\\measures\\pose_configs\\bp_names` - and send a screenshot to me? This file should contain a list of the body parts that you are using in your project and might look like this: \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88230216-da35bd80-cc26-11ea-86a1-ed41ce9e747f.png)\r\n\r\nDo the listed body-part in your file seem familiar? Otherwise, I'd suggest either update the body-parts in this file to the correct ones, or try and create a new project and let me know if the error persists. \r\n\r\n\r\n", + "created_at": "2020-07-22T21:24:40Z", + "author": "sgoldenlab" + }, + { + "body": "Here's what I see in `project_folder\\logs\\measures\\pose_configs\\bp_names'\r\n\r\n\"Screen\r\n\r\nThese names correspond to the body parts that I used for my DLC tracking, though I worded them differently. I tried changing the Excel file to the abbreviations that I used, reran, and had the same issue.\r\n\r\nIf I create a new project will I be able to reference and use the outlier corrections, extracted features, and labeled frames from my past project?\r\n\r\nThanks for all of your help!", + "created_at": "2020-07-22T21:38:28Z", + "author": "emmonseb" + }, + { + "body": "I see, thanks @emmonseb - to help me understand how to fix this, did you used a user-defined pose-configuration or did you set it to an in-built one? The body-parts I am seeing corresponds with this built in one. \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88232005-f6872980-cc29-11ea-9643-e85d2e2948b6.png)\r\n\r\nTo give you an an idea why this is happening - SimBA tries to build a model by grabbing your CSV files in your `project_folder/csv/targets_inserted' directory and that works fine. However, SimBA then tries to remove your pose-estimation columns and just keep the features. SimBA begins, for example, to look for a column called, for example, 'Ear_left_x' to discard it, and it can't find it, thereby the error. If you open your csv file in excel (if it is not to large) you can take a peek on what your body-parts in fact are called, which may help you troubleshoot what have gone wrong, i.e.: \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/88232426-af4d6880-cc2a-11ea-84f2-6cce34053a30.png)\r\n", + "created_at": "2020-07-22T21:52:00Z", + "author": "sgoldenlab" + }, + { + "body": "Such helpful and clear guidance, thank you. I made the relevant changes and things are humming along nicely.\r\n\r\nIn case it's helpful for someone else, it seems that the default names given by SimBA for this layout are:\r\n\"Screen\r\n\r\nThanks again, I really appreciate your thorough and responsive help!", + "created_at": "2020-07-22T22:05:17Z", + "author": "emmonseb" + }, + { + "body": "👍 thanks for letting me know, and please let me know if you bump into any other issues", + "created_at": "2020-07-22T22:20:02Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Impossible to set video parameters.", + "body": "Hello,\r\n\r\nI'm using multi-animal pose-estimation data in SimBA. I already extracted the frames, and should now set the video parameters. But here the error message my prompt returns:\r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/87948623-3ffa3c00-caa5-11ea-9043-0d4793a01abf.png)\r\n\r\nConsequently, no video row appear, and I cannot define the scales on my videos.\r\n\r\nWould someone please have a suggestion to fix this problem?\r\n\r\nFYI: I'm using the 1.2.5 simba version and get the same error on windows 7 and 10. My videos are .avi. \r\n\r\nThank you, \r\nBest, \r\nDorian \r\n", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2020-07-20T14:24:14Z", + "updated_at": "2020-07-20T23:01:12Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "I fixed the bug. \r\nThe extension of the video files were in capital letters, while it should not.\r\n\r\nI also report here another bug: my files in the csv input folder have been generated with avi extension, and so I had to modify them manually in \"çsv\" to make it work.\r\n\r\nThank you, \r\nBest, \r\nDorian ", + "created_at": "2020-07-20T15:10:06Z", + "author": "DorianBattivelli" + }, + { + "body": "Thanks @DorianBattivelli - that's exactly what I was going to ask you (about the capital letters) - I was looking through the code and saw that this could happen, I will push a fix for this now. \r\n\r\nI will also fix the issue in the import of the CSV, it's likely to be due to the same capitalization reason.\r\n\r\nThanks again!", + "created_at": "2020-07-20T15:21:44Z", + "author": "sgoldenlab" + }, + { + "body": "@DorianBattivelli - I've uploaded a fix (version 1.2.6 now) - the fix is to rename all file extensions to lowercase (e.g., \".AVI\" or \".MP4\" becomes \".avi\" or \".mp4\") when imported into SimBA. Let me know if you bump into any more issues like this. Thanks!", + "created_at": "2020-07-20T16:25:04Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab thank you so much,\r\nBest, \r\nDorian ", + "created_at": "2020-07-20T18:24:12Z", + "author": "DorianBattivelli" + } + ] + }, + { + "title": "Issue when uploading bx.h5 files. ", + "body": "Hello, \r\n\r\nI am working on a maDLC project, and I have an error message when I try to import my h5 files on Simba. \r\n\r\nHere the screenshot:\r\n![Untitled](https://user-images.githubusercontent.com/66886884/87858369-baa04b80-c92d-11ea-816f-f6fc917ff169.png)\r\n\r\nWould someone have a suggestion to fix this problem?\r\n\r\nFYI: I labelled the body parts through DLC 2.b.5 on windows 7, then run the training and analysis on colab, modifying the first cell code to use DLC 2.b.7, and now I tried to switch on Simba on two different windows computer, one with windows7 the other with windows 10. Nothing works, always the same error.\r\n\r\nThank you for the help, \r\nBest, \r\nDorian ", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2020-07-18T17:38:27Z", + "updated_at": "2020-07-19T15:05:07Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli - sorry about this and thanks for reporting it. The issue most likely comes from SimBA trying to read the resolution of your video, to make sure the circles are visualized at appropriate size (not too big / not too small), and it can't find the resolution, or possibly the video. \r\n\r\nTo get this working, do you happen to see any error msg in the main SimBA terminal printed at the same time as this error msg? \r\n\r\nAre your videos in .mp4 and/or .avi format? \r\n\r\n", + "created_at": "2020-07-18T19:31:37Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you for the help,\r\n\r\nNo, I did not see any error message in the main Simba terminal. I just have the \"Importing 8 multi-animal DLC h5 files to teh current project\".\r\n I am working on avi files,\r\n\r\nThank you", + "created_at": "2020-07-19T00:37:33Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli Thanks, let me see if I can fix it - I think it could be an Avi issue with the code. ", + "created_at": "2020-07-19T01:01:58Z", + "author": "sgoldenlab" + }, + { + "body": "@DorianBattivelli - sorry there was a line in there that hardcoded it to look for an mp4, which likely caused the issue - either (1) download the latest SimBA `pip install simba-uw-tf==1.2.5` or `pip install simba-uw-no-tf==1.2.5`, and I should have fixed it, or (2) Convert your videos to mp4 (https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_tools.md#change-video-format) - let me know if that fixes it or not", + "created_at": "2020-07-19T01:24:20Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab Both solutions perfectly fixed the problem. Everything is working now, thank you so much! ", + "created_at": "2020-07-19T11:17:55Z", + "author": "DorianBattivelli" + }, + { + "body": "Great and thanks for letting me know !", + "created_at": "2020-07-19T15:05:07Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Issue when loading .h5 files. Fuzzy instructions. ", + "body": "Hello, \r\n\r\nI'm using SimBA for the first time, and I have an issue just after clicking \"import h5\". The instructions are fuzzy and I cannot read what is asked. Here a picture:\r\n\r\n![Untitled](https://user-images.githubusercontent.com/66886884/87134203-a3f85580-c298-11ea-88db-c119d156bada.png)\r\n\r\nI tried to recreate a new project several times, but I obtain systematically the same issue. would you have some suggestion to fix that bug?\r\n\r\nFYI: I am using windows 10, and my training has been done on maDeeplabcut 2.2.b6 with 8bp. I worked on mov format for dlc analysis, and then I converted them in mp4 to switch on SimBA.\r\n\r\nThank you for the help, \r\nBest, \r\nDorian ", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2020-07-10T08:36:12Z", + "updated_at": "2020-07-10T18:53:10Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli - many thanks for reporting this, it's very helpful and I appreciate it. It's my mistake / bug, I'm printing the text with too big line width, which depends on the resolution of your videos, so if you work on smaller resolution videos, then it can start to look fuzzy like this and difficult to read, and I will fix it. But, meanwhile, if you wanted to fix it yourself! (It's a small change):\r\n\r\n1). Go to your this file in your where SimBA is installed, likely something similar to this: `C:\\Python36\\Lib\\site-packages\\simba\\read_DLCmulti_h5_function.py`. Open the file and change these values to either 1 or 2 rather than the original 3 and it should look much better. \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/87162194-73a1be80-c27a-11ea-9e99-e7d039320106.png)\r\n\r\n2). Save your updated file and restart SimBA!\r\n\r\nI will however fix it in the next update with some other fixes - hopefully I have it done by end of weekend. \r\n", + "created_at": "2020-07-10T14:01:20Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab , it works now, thank you very much!\r\n", + "created_at": "2020-07-10T16:09:22Z", + "author": "DorianBattivelli" + }, + { + "body": "Great! 👍 ", + "created_at": "2020-07-10T18:53:09Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Issue when importing DLC .H5 file", + "body": "Hello,\r\n\r\nI'm attempting to create a new project using multi-animal pose estimation data from DLC. However, I am unable to import the tracking data in .H5 file format from DeepLabCut.\r\n\r\nFirst I generated the Project Config, defined the tracking as multi-animal and imported the raw videos used to train the network on DLC. Yet, when i try to import the tracking data in .H5 file format from DeepLabCut from the DLC output folder \"\\training-datasets\\iteration-0\\UnaugmentedDataSet\", with the .H5 file, I receive the following message on SimBA: \r\n\"Importing 0 multi-animal DLC h5 files to teh current project. All multi-animal DLC .h5 tracking files ordered and imported into SimBA project in CSV file format\".\r\n\r\nHow can i solve these problems?\r\n\r\nThank you.", + "user": "franciscamachado", + "reaction_cnt": 0, + "created_at": "2020-07-07T15:05:22Z", + "updated_at": "2020-07-24T14:54:27Z", + "author": "franciscamachado", + "comments": [ + { + "body": "Hi @franciscamachado - first, the file you have in your `\\training-datasets\\iteration-0\\UnaugmentedDataSet` folder is likely **not** the DLC output file we need to import into SimBA. The `\\training-datasets\\iteration-0\\UnaugmentedDataSet` folder typically does not store pose estimation tracking data (unless you moved a video in there and then analyzed it, which is a little unusual), but rather it stores **your annotations** which is used to train DLC models. In order words, the file you may be trying to import into SimBA may look a little like this (if opening the .csv version of the file), and contain the coordinates of your own mouse clicks during annotations in DLC (you are likely to have less empty cells than this, but I just want to make the point):\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/86857701-ee4cbd00-c073-11ea-8345-7103209007ec.png)\r\n\r\nWhat we need to import into SimBA from maDLC it the H5 file generated when analyzing a video in DLC. This file will be located in the same folder as the video file you analyzed. For example, in this screenshot below, I have analyzed a video in maDLC called \"Together1.mp4\" using the \"skeleton\" option in DLC:\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/86858036-b1cd9100-c074-11ea-8b61-a82f38225aa5.png)\r\n\r\nIn the SimBA interface you tell which option you used to analyze the file in DLC (skeleton or box). If you used \"skeleton\" in DLC, then you h5 files will be tagged with a file ending \"_sk.h5\", as in my example screenshoot above. If you used box, then the files will be tagged with a file ending \"_bx.h5\". If you tell SimBA you used a skeleton setting, then SimBA is looking for files in your selected folder ending with \"_sk.h5\". I think that part of the issue you are having is that you are pointing SimBA to a folder that does not contain any \"_sk.h5\" files. Let me know if this makes sense! Thanks\r\n\r\n\r\n", + "created_at": "2020-07-08T00:16:27Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you for the reply and help. I was using maDLC and I had to go through the refine tracklets stage to get the h5. As you described, after that stage, the h5 does appear on the same folder of the original video.\r\n\r\nRelated to this issue, when I import the h5 into SIMBA I get an error if I used an .avi video file to create the h5 file, as SIMBA always ends up looking for a .mp4 file. If I convert the video to .mp4 it works fine. How can i solve this problem?\r\n\r\nThank you.", + "created_at": "2020-07-22T11:44:27Z", + "author": "franciscamachado" + }, + { + "body": "Hi @franciscamachado - yes I noticed this the other day - it was reported by another user https://github.com/sgoldenlab/simba/issues/46#issuecomment-661153273\r\n\r\nCan you update SimBA to the latest version via pip - `pip install simba-uw-no-tf==1.2.6` or `pip install simba-uw-tf==1.2.6` and let me know if that fixes the issue?", + "created_at": "2020-07-22T15:00:50Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Issue when extracting features", + "body": "Hello everybody!\r\n\r\nI am attempting to extract the features, but I run into this error message \r\n![image](https://user-images.githubusercontent.com/57539512/86372170-1efeb200-bc82-11ea-9083-6e2bfb52e698.png)\r\n\r\nI created an environment containing only python 3.6.0, then (inside the environment) I pip install simba-uw-no-tf, then conda install shapely, and conda install h5py. SimBA launches, and all the steps went fine until the extract features bit, where I get that error message. Any idea how to solve this? I am using the 1 animal 8bp config\r\n \r\n", + "user": "2909ft", + "reaction_cnt": 0, + "created_at": "2020-07-02T14:41:51Z", + "updated_at": "2020-07-02T16:20:14Z", + "author": "2909ft", + "comments": [ + { + "body": "Hi @2909ft ! \r\n\r\nTypically this error happens when your pose-estimation tool, like Deeplabcut, has placed all the body-parts in the image on a single x-y coordinate. When SimBA then tried to calculate features that requires 2d (like the millimeter area of the animal), this error happens, as we can't calculate the area when the coordinates fall on one dimension. \r\n\r\nI wrote a FAQ that mentions this error but I forgot to link to it on the main page:\r\n\r\nhttps://github.com/sgoldenlab/simba/blob/SimBA_no_TF/docs/FAQ.md\r\n\r\nThere may also be some relevant info how to solve it on the gitter page if you search for 'qhull'. \r\n\r\nThe typical reason is that your first image in your video does not contain an animal, or the animal is covered by the experimenter hand or similar, and the pose tool has placed all the body-parts, for example, in the top left corner of the image. You can solve this by cutting those early parts of the video out before anslysis. If that's not the case, it may be that something has gone wrong in the outlier correction step, and you may want to make your outlier criteria less strongest (increase the criteria values).\r\n\r\nLet me know if that fixes it or not!\r\n\r\n\r\n", + "created_at": "2020-07-02T15:50:11Z", + "author": "sronilsson" + }, + { + "body": "Indeed, I was less stringent with the outlier correction step and now it seems to be fine :) thank you for the reply! ", + "created_at": "2020-07-02T16:20:14Z", + "author": "2909ft" + } + ] + }, + { + "title": "Unable to open simba gui", + "body": "I have install Simba using pip install simba-uw-no-tf, but when I tried launching Simba, I got the traceback below. I know this it is successfully installed because I opened the directory in my terminal (also shown below). I am using a Linux virtual machine is that changes anything. \r\n![image](https://user-images.githubusercontent.com/66270143/86048001-caf39380-ba1d-11ea-826e-4b219ee102a9.png)\r\n![image](https://user-images.githubusercontent.com/66270143/86048138-fe362280-ba1d-11ea-9611-2cd95debeac1.png)\r\n\r\n", + "user": "jacobk14", + "reaction_cnt": 0, + "created_at": "2020-06-29T19:34:26Z", + "updated_at": "2020-07-23T19:41:21Z", + "author": "jacobk14", + "comments": [ + { + "body": "Hi @jacobk14 - first thanks for trying SimBA! However, for the moment, we only support Windows use. This is a known issue with the encoded windows path preventing SimBA to boot on Linux / MacOS. Even if you do get past this specific issue, there are many path readings in SimBA that are designed for Windows making it difficult to work with in Linux at the moment. \r\n\r\nPlease see these closed issues for some related info:\r\n\r\nhttps://github.com/sgoldenlab/simba/issues/37\r\n\r\nhttps://github.com/sgoldenlab/simba/issues/39\r\n\r\nFor now, I recommend getting hold of a Windows machine and run SimBA on it. You mention you are running Linux on a virtual machine. If this happens to be on native Windows, I'd suggest exiting the virtual machine and installing/running SimBA in native Windows. \r\n\r\n\r\n ", + "created_at": "2020-06-30T04:20:58Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @jacobk14 - I've uploaded a Linux compatible version here - it's very much work in development but most of the function (excluding some of the video pre-processing functions) should work in Linux. If you do decide to try it out and encounter some bugs it would be great if you report them so I can fix them asap\r\n\r\n`pip install simba-uw-tf-dev`", + "created_at": "2020-07-20T01:33:41Z", + "author": "sgoldenlab" + }, + { + "body": "Hi! First, thanks so much @sgoldenlab for the work in progress Linux version! I'm trying to use it now and running into a few kinks regarding install on an Ubuntu virtual machine (through MATE) when I try to run `simba`. The first two were straightforward enough: didn't find module named `numba` so I ran `pip install numba`; then couldn't find the file `TheGoldenLab.png`, so I manually found the file and changed the name from `TheGoldenLab.PNG` to lowercase. \r\nNow I'm getting another error after the GoldenLab image pops up:\r\n`_tkinter.TclError: couldn't recognize data in image file \"/sfs/qumulo/qhome/akh8zm/.conda/envs/simba-linux/lib/python3.6/site-packages/simba/golden.png\" `\r\nI've verified that the file does exist and the properties look ok to me:\r\n\r\n\r\nNot sure about next steps. Some issues I've found online say that tkinter can only support GIFs but that would require changing the filename.", + "created_at": "2020-07-23T19:38:51Z", + "author": "akh8zm" + } + ] + }, + { + "title": "Cannot extract frames", + "body": "Hi, First of all thank you for creating SIMBA! This is my 1st approach to this kind of tracking programs and I'm just learning to use the program.\r\nI'm running SIMBA w/TF in Windows 10 with:\r\n\r\n- Intel i7 (8th gen)\r\n- NVIDIA GeForce MX250 GPU\r\n- 16GB RAM\r\n\r\nI created a DLC project with one video, and when I want to extract the frames in this video (w/automatic form and uniform algorithm) I get the following in my anaconda prompt:\r\n\r\n\r\n```\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"c:\\users\\agost\\anaconda3\\envs\\dlc-gpu\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"c:\\users\\agost\\anaconda3\\envs\\dlc-gpu\\lib\\site-packages\\simba\\SimBA.py\", line 2148, in dlc_extractframes_command\r\n select_numfram2pick(config_path,self.label_numframes2pick.entry_get)\r\nNameError: name 'select_numfram2pick' is not defined\r\n\r\n```\r\n", + "user": "asacson", + "reaction_cnt": 0, + "created_at": "2020-06-29T18:28:01Z", + "updated_at": "2020-07-01T04:14:52Z", + "author": "asacson", + "comments": [ + { + "body": "Hi @asacson - thanks for trying SimBA and thanks for reporting this, it's very helpful. Can you download the latest version of SimBA using `pip install simba-uw-tf --upgrade` (version 1.2.3.11), load your project, and try to extract frames and let me know if it works now? Thanks again!", + "created_at": "2020-06-30T04:10:16Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you very much for the quick response, it's working now.", + "created_at": "2020-07-01T04:14:52Z", + "author": "asacson" + } + ] + }, + { + "title": "Path not working on MacOS", + "body": "**Describe the bug**\r\nA clear and concise description of what the bug is.\r\nSome of your scripts use backslashes for generating a path, which does not work on MacOS.\r\nFor example, in the create_project_init.py, \r\n```project_folder = str(directory +'\\\\' + project_name + '\\\\project_folder')```\r\nIt would be better to use ```os.path.join()```\r\n\r\nThe backslashes resulted in folders like this: \r\n![image](https://user-images.githubusercontent.com/8534966/85917709-7d630500-b82a-11ea-9911-b20b2f3437b7.png)\r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to '...'\r\n2. Click on '....'\r\n3. Scroll down to '....'\r\n4. See error\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Python Version [e.g. 3.6.0]\r\n - Are you using anaconda?\r\n \r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n", + "user": "EvenGu", + "reaction_cnt": 0, + "created_at": "2020-06-27T07:59:22Z", + "updated_at": "2020-06-27T17:57:34Z", + "author": "EvenGu", + "comments": [ + { + "body": "Hi @EvenGu - currently we only support Windows. I.e., check this recent issue for suggested work arounds (e.g., virtual Win environment for now) :\r\n\r\nhttps://github.com/sgoldenlab/simba/issues/37#issuecomment-643699314\r\n\r\nThere been a lot of Mac/Linux users reaching out about issues like this, and I know some may be working on updating the SimBA code to ensure Mac/Linux compatibility and when this is available I will let you know. \r\n\r\n ", + "created_at": "2020-06-27T17:57:34Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Conflicting Dependencies + Failure to Run", + "body": "Hello! This is my first time writing an issue, so feel free to request lots of clarification.\r\n\r\nI'm running the most recent version of Anaconda and attempting to install Simba. I've followed the instructions [here](https://github.com/sgoldenlab/simba/blob/master/docs/installation.md#install-simba-standalone-package-without-tensorflow-or-integrated-deeplabcutdeepposekit-support) for installing the standalone package w/o tf. I've been trying to resolve this for the past few hours to no avail.\r\n\r\n**Describe the bug**\r\nSimba will not run and has trouble installing.\r\n\r\n**To Reproduce**\r\n1. Create environment with python 3.6: `conda create --name new_env python==3.6`\r\n2. Activate environment: `conda activate new_env`\r\n3. Within `new_env`, run `pip install simba-uw-no-tf`. This is using pip version 20.1.1. Everything appears to build correctly except with the following warning:\r\n** `ERROR: imbalanced-learn 0.7.0 has requirement scikit-learn>=0.23, but you'll have scikit-learn 0.22.2 which is incompatible.`\r\n4. Ignoring that, run `simba` in `new_env`. This fails and produces a long traceback. \r\n**`Traceback (most recent call last):`\r\n`File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)`\r\n ` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)`\r\n` File \"C:\\ProgramData\\Anaconda3\\envs\\new_env\\Scripts\\simba.exe\\__main__.py\", line 4, in `\r\n ` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\site-packages\\simba\\SimBA.py\", line 47, in \r\n from simba.ROI_freehand_draw_3 import roiFreehand`\r\n ` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\site-packages\\simba\\ROI_freehand_draw_3.py\", line 6, in \r\n from shapely.geometry import Polygon`\r\n ` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\site-packages\\shapely\\geometry\\__init__.py\", line 4, in \r\n from .base import CAP_STYLE, JOIN_STYLE`\r\n ` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\site-packages\\shapely\\geometry\\base.py\", line 18, in \r\n from shapely.coords import CoordinateSequence`\r\n ` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\site-packages\\shapely\\coords.py\", line 8, in \r\n from shapely.geos import lgeos`\r\n` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\site-packages\\shapely\\geos.py\", line 145, in \r\n _lgeos = CDLL(os.path.join(sys.prefix, 'Library', 'bin', 'geos_c.dll'))`\r\n` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\ctypes\\__init__.py\", line 348, in __init__\r\n self._handle = _dlopen(self._name, mode)`\r\n`OSError: [WinError 126] The specified module could not be found`\r\n\r\n**Expected behavior**\r\nExpect no conflicting dependencies and simba to start when `simba` is run in `new_env`\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Python Version: 3.6.0 in `new_env`, 3.8 in base environ\r\n - Are you using anaconda? Yes\r\n \r\n**Things I have tried**\r\n- making sure shapely is installed in `new_env`\r\n- same thing with Python 3.7 and 3.8 (has even more issues)\r\n- Interestingly, imbalanced-learn 0.7 has listed on their [pypi site](https://pypi.org/project/imbalanced-learn/) that it is compatible with scikit-learn versions >= 0.22...I tried installing scikit-learn version 0.23 anyways, which conflicts with simba but satisfies imbalanced-learn. Produces same error when trying to run `simba`. \r\n- same thing with imbalanced-learn 0.62 in the hopes it doesn't need scikit-learn 0.23... same error when trying to run `simba`\r\n\r\nAny help/thoughts much appreciated!\r\n", + "user": "akh8zm", + "reaction_cnt": 0, + "created_at": "2020-06-26T00:31:08Z", + "updated_at": "2020-06-26T18:48:03Z", + "author": "akh8zm", + "comments": [ + { + "body": "Hi @akh8zm - this is most likely a Shapely issue - the same issue and error msg has been reported and solved a few times in the past and unrelated to imblearn / scikit.\r\n\r\nhttps://github.com/sgoldenlab/simba/issues/12#issue-583261742\r\n\r\nhttps://github.com/sgoldenlab/simba/issues/11#issuecomment-596805732\r\n\r\nhttps://github.com/sgoldenlab/simba/issues/33#issue-617328532\r\n\r\nI think it relates to some modules of the Shapely package not meant to be run in python. \r\n\r\nI'd try some of the following to see if you can get it started: Remove shapely if installed: `conda remove shapely` and install it again `conda install shapely` and see if that works. \r\n\r\nOtherwise, the fix that has been working for me is the second link above - download the wheel yourself and install it.\r\n\r\nLet me know if that helps!\r\n\r\n\r\n\r\n", + "created_at": "2020-06-26T06:35:44Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks for the prompt reply! I removed and reinstalled shapely as you said using conda, didn't work and produced new error message. I'm gonna post the error message just for completeness.\r\n\r\n`Traceback (most recent call last):`\r\n` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\runpy.py\", line 193, in _run_module_as_main \"__main__\", mod_spec)`\r\n` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)`\r\n` File \"C:\\ProgramData\\Anaconda3\\envs\\new_env\\Scripts\\simba.exe\\__main__.py\", line 4, in `\r\n` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\site-packages\\simba\\SimBA.py\", line 90, in \r\n from simba.sleap_bottom_up_convert import importSLEAPbottomUP`\r\n` File \"c:\\programdata\\anaconda3\\envs\\new_env\\lib\\site-packages\\simba\\sleap_bottom_up_convert.py\", line 1, in \r\n import h5py`\r\n`ModuleNotFoundError: No module named 'h5py'`\r\n\r\nI next tried to download the wheel directly from the link you said, installed it using pip and reinstalled simba. Running `simba` results in the same error message as above. \r\n\r\nIn the install documentation for simba-no-tf it says \"This does not require a GPU, or local installations of DeepLabCut, DeepPoseKit, or SLEAP.\", but since I'm getting an error missing a module, is there something else I need to install? I'm gonna keep trying to troubleshoot with my limited skills 😄 \r\n\r\n- removing and reinstalling shapely with conda-forge doesn't fix it..\r\n- created fresh environment and as per [this comment](https://github.com/sgoldenlab/simba/issues/11#issuecomment-597118320), installed shapely with conda *first*, then installed simba using pip. Getting the same h5py error when trying to run `simba`\r\n- tried to use method 2, installing by cloning from github. Seemed to work with a few warning messages about incompatible packages, but running `python simBA.py` in the simba folder produces the following error message traceback:\r\n```\r\nC:\\ProgramData\\Anaconda3\\envs\\test\\lib\\site-packages\\sklearn\\ensemble\\weight_boosting.py:29: DeprecationWarning: numpy.core.umath_tests is an internal NumPy module and should not be imported. It will be removed in a future NumPy release.\r\n from numpy.core.umath_tests import inner1d\r\nTraceback (most recent call last):\r\n File \"simBA.py\", line 23, in \r\n from sklearn_DLC_RF_train_model import RF_trainmodel\r\n File \"C:\\ProgramData\\Anaconda3\\simba\\simba\\sklearn_DLC_RF_train_model.py\", line 19, in \r\n from imblearn.combine import SMOTEENN\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\test\\lib\\site-packages\\imblearn\\__init__.py\", line 37, in \r\n from . import combine\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\test\\lib\\site-packages\\imblearn\\combine\\__init__.py\", line 5, in \r\n from ._smote_enn import SMOTEENN\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\test\\lib\\site-packages\\imblearn\\combine\\_smote_enn.py\", line 10, in \r\n from ..base import BaseSampler\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\test\\lib\\site-packages\\imblearn\\base.py\", line 15, in \r\n from .utils import check_sampling_strategy, check_target_type\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\test\\lib\\site-packages\\imblearn\\utils\\__init__.py\", line 7, in \r\n from ._validation import check_neighbors_object\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\test\\lib\\site-packages\\imblearn\\utils\\_validation.py\", line 15, in \r\n from sklearn.neighbors._base import KNeighborsMixin\r\nModuleNotFoundError: No module named 'sklearn.neighbors._base'\r\n```", + "created_at": "2020-06-26T16:29:51Z", + "author": "akh8zm" + }, + { + "body": "@akh8zm You are almost there! For some reason h5py hasn't installed in your environment either. \r\n\r\nYou fixed the shapely error. Once that is done, run:\r\n\r\n`pip install h5py`\r\n\r\nand try and launch SimBA again and let me know if that fixes it!", + "created_at": "2020-06-26T17:24:10Z", + "author": "sgoldenlab" + }, + { + "body": "oh my gosh it worked! Thank you so much. \r\nFor posterity, I created a new environment and did:\r\n```\r\nconda install shapely\r\npip install simba-uw-no-tf\r\npip install h5py\r\nsimba\r\n```", + "created_at": "2020-06-26T17:36:29Z", + "author": "akh8zm" + }, + { + "body": "Great - let me know if you get into any more issues @akh8zm ", + "created_at": "2020-06-26T18:48:03Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Impossible to run simba on Mac os", + "body": "Hello, \r\n\r\nThank you for the new release of Simba. \r\nI tried to install it on my mac using \"pip install simba-uw-no-tf\" command. \r\n\r\nDuring the installation I noticed a bug: whereas this command should specify for intallation without TensorFlow, an error message appeared:\r\n\r\n\"Could not find a version that satisfies the requirement tensorflow_gpu==1.14.0...\"\r\n\r\nI added '#' before the tensorflow line in requirments, and that fixed the issue. \r\n\r\nNevertheless, I can not launch the program from my terminal. \r\nOnce cd in my simba folder, I tried both: \"simba.py\" and \"pythonw simba.py\" commands. \r\n\r\nHere the error message returned: \r\n\r\n![b95a3806-9308-4a56-b941-532f8d33f5c8](https://user-images.githubusercontent.com/66886884/84579069-19bcee80-adcb-11ea-99e8-ccc61b800eef.jpg)\r\n\r\nFYI: I'm using python 3.7.6, with anaconda. I'm also using DeepLabCut 2.2.b5. Finally one friend tried also to install Simba on her Macos, and she had the same error. \r\n\r\nHave you some suggestion to fix the bug? \r\n\r\nThank you very much!\r\nBest,\r\nDorian ", + "user": "DorianBattivelli", + "reaction_cnt": 0, + "created_at": "2020-06-13T21:20:30Z", + "updated_at": "2020-06-17T02:26:51Z", + "author": "DorianBattivelli", + "comments": [ + { + "body": "Hi @DorianBattivelli – thanks for looking into SimBA! At the moment we only support Windows use and I’m aware of Mac/Linux issues in relation to the paths, which appears to be what’s preventing the boot on your machine.\r\n\r\nEnsuring Mac/Linux compatibility is not a priority right now. Is there any chance you could find a windows machine, or would running SimBA through a virtual windows environment e.g., virtualbox or vmware, be an alternative for you? That would prevent these errors you are seeing from showing up.\r\n", + "created_at": "2020-06-14T00:24:28Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab, \r\n\r\nThank you for your answer. I will try to install it on a windows computer, \r\n\r\nBest, \r\nDorian ", + "created_at": "2020-06-16T14:51:18Z", + "author": "DorianBattivelli" + }, + { + "body": "@DorianBattivelli - let me know how it goes!", + "created_at": "2020-06-17T02:26:51Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 6.2.2", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 6.2.2.\n
\nRelease notes\n

Sourced from pillow's releases.

\n
\n

6.2.2

\n

https://pillow.readthedocs.io/en/stable/releasenotes/6.2.2.html

\n

6.2.1

\n

https://pillow.readthedocs.io/en/stable/releasenotes/6.2.1.html

\n

6.2.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/6.2.0.html

\n

6.1.0

\n

https://pillow.readthedocs.io/en/stable/releasenotes/6.1.0.html

\n

6.0.0

\n

No release notes provided.

\n
\n
\n
\nChangelog\n

Sourced from pillow's changelog.

\n
\n

6.2.2 (2020-01-02)

\n
    \n
  • \n

    This is the last Pillow release to support Python 2.7 #3642

    \n
  • \n
  • \n

    Overflow checks for realloc for tiff decoding. CVE-2020-5310\n[wiredfool, radarhere]

    \n
  • \n
  • \n

    Catch SGI buffer overrun. CVE-2020-5311\n[radarhere]

    \n
  • \n
  • \n

    Catch PCX P mode buffer overrun. CVE-2020-5312\n[radarhere]

    \n
  • \n
  • \n

    Catch FLI buffer overrun. CVE-2020-5313\n[radarhere]

    \n
  • \n
  • \n

    Raise an error for an invalid number of bands in FPX image. CVE-2019-19911\n[wiredfool, radarhere]

    \n
  • \n
\n

6.2.1 (2019-10-21)

\n
    \n
  • Add support for Python 3.8 #4141\n[hugovk]
  • \n
\n

6.2.0 (2019-10-01)

\n
    \n
  • \n

    Catch buffer overruns #4104\n[radarhere]

    \n
  • \n
  • \n

    Initialize rows_per_strip when RowsPerStrip tag is missing #4034\n[cgohlke, radarhere]

    \n
  • \n
  • \n

    Raise error if TIFF dimension is a string #4103\n[radarhere]

    \n
  • \n
  • \n

    Added decompression bomb checks #4102\n[radarhere]

    \n
  • \n
  • \n

    Fix ImageGrab.grab DPI scaling on Windows 10 version 1607+ #4000\n[nulano, radarhere]

    \n
  • \n
  • \n

    Corrected negative seeks #4101\n[radarhere]

    \n
  • \n
  • \n

    Added argument to capture all screens on Windows #3950\n[nulano, radarhere]

    \n
  • \n
\n ... (truncated)\n
\n
\n
\nCommits\n
    \n
  • a45c858 Release notes for 6.2.2
  • \n
  • 83efad4 6.2.2 version bump
  • \n
  • 4820f79 Added release notes [ci skip]
  • \n
  • 4e2def2 Overflow checks for realloc for tiff decoding
  • \n
  • a79b65c Catch SGI buffer overruns
  • \n
  • 93b22b8 Catch PCX P mode buffer overrun
  • \n
  • a09acd0 Catch FLI buffer overrun
  • \n
  • 774e53b Raise an error for an invalid number of bands in FPX image
  • \n
  • 8892aec Added security notes [ci skip]
  • \n
  • 46c35f0 Updated copyright year
  • \n
  • Additional commits viewable in compare view
  • \n
\n
\n
\n\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=6.2.2)](https://help.github.com/articles/configuring-automated-security-fixes)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2020-06-12T20:19:18Z", + "updated_at": "2021-02-02T20:24:11Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #88.", + "created_at": "2021-02-02T20:24:09Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Simba Create Project", + "body": "Hi\r\n\r\nI installed the Simba (pip install simba-uw-no-tf) in my local. then i launched \"simba\". I am trying to create a project, after filling all fields the following error came out:\r\n\r\nTraceback (most recent call last):\r\nFile \"C:\\users\\...\\appdata\\local\\programspython\\python37\\lib\\thinter\\__init__.py\", line 1705, in __call__\r\nreturn self.func(*args)\r\nFile \"C:\\users\\...\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\simba\\simba.py\", line 3056, in make_projectini\r\nwith open(no animalsPath, \"r\", encoding='utf8') as f:\r\nFileNotFoundError: [errno 2] No such file or directory: 'C:\\\\users\\\\...\\\\pose_configurations\\\\no_animals\\\\no_animals.csv\r\n\r\nIn fact the pose configurations folder is empty. \r\nWhat should I do?\r\n", + "user": "Pinh13", + "reaction_cnt": 0, + "created_at": "2020-06-09T15:58:22Z", + "updated_at": "2020-06-11T17:30:14Z", + "author": "Pinh13", + "comments": [ + { + "body": "@Pinh13, thank you for reporting this bug. I fixed the bug and you can now download it using `pip install simba-uw-no-tf==1.1.7\r\n\r\nPlease do not hesitate to let me know if you have anymore issue.\r\n\r\nThank you!", + "created_at": "2020-06-10T05:47:46Z", + "author": "inoejj" + }, + { + "body": "Hi @Pinh13 - yes echo @inoejj thanks for reporting this. Can you let us know if the fix by inoejj resolves it or not on your end? \r\n\r\n", + "created_at": "2020-06-10T14:19:17Z", + "author": "sronilsson" + }, + { + "body": "Thanks for your reply.\r\nI did what you recomend, a folder with the name of the project is created. Then I load the project and it works.\r\n\r\nHowever if I create a pose config (3 animas 12 bodyparts) did not works.\r\n", + "created_at": "2020-06-10T14:48:31Z", + "author": "Pinh13" + }, + { + "body": "@Pinh13 just to confirm does your videos contain 3 animals? Also when you say it doesn't work is it the same error message as earlier or a different one?", + "created_at": "2020-06-10T15:12:36Z", + "author": "sronilsson" + }, + { + "body": "@sronilsson Thanks for your help.\r\nwhen I created the pose config file (3 animals 12 bodyparts as my videos), -simba created the project but when i load the project_config.yaml file the \"further imports\", \"video parameters\" and \"outliers\" are empty sections.\r\n\r\nThe cmd presented the following error:\r\n\r\nTraceback (most recent call last):\r\nFile \"C:\\users...\\appdata\\local\\programs\\python\\python37\\lib\\thinter_init_.py\", line 1705, in call\r\nreturn self.func(*args)\r\nFile \"C:\\users...\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\simba\\simba.py\", line 3088, in lambda\r\nlaunchladprojectbutton=button(1pMenu, text='Load Project', command=lambda:self.launch(1pMenu, input comand))\r\n\r\nFile \"C:\\users...\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\simba\\simba.py\", line 3099, in launch\r\ncommand(self.projectconfigini.file_path)\r\nFile \"C:\\users...\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\simba\\simba.py\", line 3403, in __init__\r\nself.bp1= DropDownMenu(label_heatmap, 'bodypart', bpoptions, '15')\r\nFile \"C:\\users...\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\simba\\simba.py\", line 782, in __init__\r\nself.popupMenu=OptionMenu(self,sel.dropdownvar,*self.choices, comand=com)\r\n\r\ntypeError: __init__() mssing 1 required positional argument: 'value'", + "created_at": "2020-06-10T15:30:11Z", + "author": "Pinh13" + }, + { + "body": "Hi @Pinh13 - yes the current version of SimBA is only designed / optimised for 1 or 2 animals - when you have a third animal some of the menus won't like it and will throw errors. It's something we may look into add support for in future but for now you can only have videos with 1 or 2 animals. ", + "created_at": "2020-06-10T16:01:46Z", + "author": "sronilsson" + }, + { + "body": "thanks a lot, i hope that it will be possible in the future.\r\nI will try to do combination of 2 out of the 3, maybe in this way works.\r\n\r\nThanks again.\r\nBest\r\nJ\r\n\r\n", + "created_at": "2020-06-10T16:54:52Z", + "author": "Pinh13" + } + ] + }, + { + "title": "Running Simba as singularity container on HPC cluster", + "body": "Hello, we are trying to run Simba as a singularity container on our cluster, the pip install fails (wxpython 4.0.1 fails to build because of older version of libgtk), we don't have root privileges on the machine so we decided to use singularity container instead. \r\n\r\nThe official 'pip3 install simba-uw-tf' ran fine and the binary was built but when we try to launch Simba get get the error(s):\r\n\r\n> ` ImportError: Cannot load backend 'TkAgg' which requires the 'tk' interactive framework, as 'headless' is currently running\r\n> \r\n> Singularity simba.sif:~/scratch> simba\r\n> Traceback (most recent call last):\r\n> File \"/usr/local/bin/simba\", line 7, in \r\n> from simba.SimBA import main\r\n> File \"/usr/local/lib/python3.6/dist-packages/simba/SimBA.py\", line 94, in \r\n> from simba.dpk_script.train_model import trainDPKmodel\r\n> File \"/usr/local/lib/python3.6/dist-packages/simba/dpk_script/train_model.py\", line 6, in \r\n> np.random.bit_generator = np.random._bit_generator\r\n> AttributeError: module 'numpy.random' has no attribute '_bit_generator'\r\n\r\nHere is the singularity recipe file we used:\r\n\r\n> Bootstrap: docker\r\n> From: ubuntu:18.04\r\n> \r\n> %post\r\n> \tapt-get -y update && DEBIAN_FRONTEND=noninteractive\r\n> \tapt-get install python3-pip -y\r\n> \tapt-get install git -y\r\n> \tDEBIAN_FRONTEND=\"noninteractive\" apt-get install tzdata python3-tk -y\r\n> \tapt-get install make gcc libgtk-3-dev libwebkitgtk-dev libwebkitgtk-3.0-dev libgstreamer-gl1.0-0 freeglut3 freeglut3-dev python-gst-1.0 python3-gst-1.0 libglib2.0-dev libgstreamer-plugins-base1.0-dev -y\r\n> \tpip3 install simba-uw-tf\r\n> \tpip3 install tk\r\n\r\nWould it be possible to build a docker container for Simba, or any tips?", + "user": "singhsaluja", + "reaction_cnt": 0, + "created_at": "2020-05-25T19:19:04Z", + "updated_at": "2020-06-15T07:44:47Z", + "author": "singhsaluja", + "comments": [ + { + "body": "Hi @singhsaluja - don't have a solution to this situation, but, some of the error messages arefamiliar. \r\n\r\nFirst, we have written SimBA for Win only at this point and I do not know how it behaves in Linux. \r\n\r\nThe numpy AttributeError: make sure you are running numpy==1.18.1 and tensorflow-gpu==1.14.1 - if you already are - then this has helped me in different situations (non-SimBA related), for the same error msg, in the past: https://github.com/aleju/imgaug/issues/537#issuecomment-569880719, I'd stick the `np.random.bit_generator = np.random._bit_generator` after all imports in SimBA.py\r\n\r\nFor the tk error, I have not encountered this, it seems to be related to import of matplotlib: https://stackoverflow.com/questions/55811545/importerror-cannot-load-backend-tkagg-which-requires-the-tk-interactive-fra \r\n\r\nMatplotlib is used in the following script and imported on the following lines:\r\ndpk_script\\train_model.py: 7\r\ngantt.py: 2\r\nlabelling_aggression.py: 8\r\nline_plot.py: 4\r\nplot_heatmap.py: 3\r\nprob_graph.py: 4\r\nsklearn_DLC_RF_train_model.py: 17\r\ntrain_model\\train_model_user_defined.py: 24\r\ntrain_model_2.py: 23\r\ntrain_multiple_models_from_meta.py: 15\r\nvalidate_model_on_single_video.py: 13\r\n\r\nI would try to adopt the matplotlib import as discussed in the thread and see if that starts it.\r\n\r\nAlso, SimBA does not require wxpython, but DeepLabCut and perhaps also DeepPoseKit does. You can try and skip wxpython installation. Try to install SimBA without tensorflow and in-build DeepLabCut/DeepPoseKit menus - `pip install simba-uw-no-tf` \r\n\r\nThanks", + "created_at": "2020-05-26T00:29:19Z", + "author": "sgoldenlab" + }, + { + "body": "To run deeplabcut on docker/singularity, i.e. headless, you must `export DLClight=True` before import. https://github.com/DeepLabCut/Docker4DeepLabCut2.0", + "created_at": "2020-06-05T19:07:51Z", + "author": "MMathisLab" + } + ] + }, + { + "title": "Simba installation issue on Win10", + "body": "You might know about this bug, and I know I'm not installing simba as per the instructions (using a conda environment etc.), but it seems like it should be an acceptable way to install it.\r\n\r\n**Describe the bug**\r\nInstalling simba on Windows 10 in a conda environment fails due to geos DLL import error.\r\n\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n```\r\nconda create --name simba_bug_test python=3.6\r\nconda activate simba_bug_test\r\ngit clone -b master https://github.com/sgoldenlab/simba.git\r\npip install -r simba\\simba\\requirements.txt\r\nconda install cudatoolkit=10.0 cudnn\r\ncd simba\\simba\r\npython SimBA.py\r\n```\r\ngives:\r\n```\r\n File \"C:\\Users\\adam.\\.conda\\envs\\simba_bug_test\\lib\\site-packages\\shapely\\coords.py\", line 8, in \r\n from shapely.geos import lgeos\r\n File \"C:\\Users\\adam\\.conda\\envs\\simba_bug_test\\lib\\site-packages\\shapely\\geos.py\", line 145, in \r\n _lgeos = CDLL(os.path.join(sys.prefix, 'Library', 'bin', 'geos_c.dll'))\r\n File \"C:\\Users\\adam\\.conda\\envs\\simba_bug_test\\lib\\ctypes\\__init__.py\", line 348, in __init__\r\n self._handle = _dlopen(self._name, mode)\r\nOSError: [WinError 126] The specified module could not be found\r\n```\r\nIf I try:\r\n```\r\npip uninstall shapely\r\npip install shapely\r\n```\r\nThen the error persists. However, if I install shapely from conda forge `conda install -c conda-forge shapely`, then simba loads.\r\n\r\n_N.B. it's also a bit annoying that simba has to be run from `simba\\simba`_ \r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Python Version **3.6.10**\r\n - Are you using anaconda? yep (well miniconda)\r\n", + "user": "adamltyson", + "reaction_cnt": 0, + "created_at": "2020-05-13T10:22:10Z", + "updated_at": "2020-06-15T07:44:58Z", + "author": "adamltyson", + "comments": [ + { + "body": "Hi @adamltyson - yes, shapely can cause issues at times - I've had a few of these raised and closed, could you try the fix in this link, and let me know if it works? \r\n\r\nhttps://github.com/sgoldenlab/simba/issues/11#issuecomment-596805732\r\n\r\n*N.B. I will look into the simba/simba annoyance, see what others think - perhaps best to get a pip package* ", + "created_at": "2020-05-13T13:34:44Z", + "author": "sgoldenlab" + }, + { + "body": "Sorry I should have checked the closed issues. It's working for me by installing from conda, so from my POV all is fine. I can invisage other potential users giving up at this point though.\r\n\r\nJust my 2c - I find distributing via pypi really convenient, and great to keep track of versions. If you ask me (which you didn't), I would:\r\n* Set simba\\simba\\SimBA.py as a console entry point. \r\n* Add the images etc. to the manifest so you can reference them by location in the package. This might fix a lot of the mac/linux incompatibilities too\r\n* Distribute via PyPI\r\n* profit\r\n\r\nHappy to submit a PR sometime if you like.\r\n\r\nP.S. Really impressed with SimBA. I'm trying it out to present at lab meeting, and so far everything is super easy to use.", + "created_at": "2020-05-13T13:43:50Z", + "author": "adamltyson" + }, + { + "body": "Thanks @adamltyson - and the first three of the 2c's have been on my mind for some time. It has just been the question of time an getting it done. With your nudge I'll see what we can get done this week. Thanks again ", + "created_at": "2020-05-13T14:34:06Z", + "author": "sgoldenlab" + }, + { + "body": "Hey @adamltyson - we've got a 'pip install' option up:\r\n\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/installation.md\r\n\r\nWould you mind trying it and see if it works OK on your end?\r\n\r\nThanks! ", + "created_at": "2020-05-15T20:48:20Z", + "author": "sgoldenlab" + }, + { + "body": "Hi,\r\n\r\nIf (after cuda & cudnn installation), I try `pip install simba-uw-tf`, I get this error on Win 10 (similar on Ubuntu 18.04):\r\n\r\n```\r\nERROR: tensorflow-gpu 1.14.0 has requirement protobuf>=3.6.1, but you'll have protobuf 3.6.0 which is incompatible.\r\nERROR: deeplabcut 2.0.9 has requirement imageio~=2.3.0, but you'll have imageio 2.8.0 which is incompatible.\r\nERROR: deeplabcut 2.0.9 has requirement numpy~=1.14.5, but you'll have numpy 1.18.1 which is incompatible.\r\nERROR: deeplabcut 2.0.9 has requirement python-dateutil~=2.7.3, but you'll have python-dateutil 2.8.1 which is incompatible.\r\nERROR: deeplabcut 2.0.9 has requirement scikit-learn~=0.19.2, but you'll have scikit-learn 0.22.2 which is incompatible.\r\nERROR: deeplabcut 2.0.9 has requirement six~=1.11.0, but you'll have six 1.14.0 which is incompatible.\r\nERROR: deeplabcut 2.0.9 has requirement wheel~=0.31.1, but you'll have wheel 0.34.2 which is incompatible.\r\n```\r\n\r\nThe same fix as above (install shapely from conda-forge) then works, and simba runs.\r\n\r\nIf I try `pip install simba-uw-no-tf`, then it installs, but running `simba` gives the same error as above:\r\n```\r\n from shapely.geos import lgeos\r\n File \"c:\\users\\adam.garfield-win\\.conda\\envs\\simbapiptest\\lib\\site-packages\\shapely\\geos.py\", line 145, in \r\n _lgeos = CDLL(os.path.join(sys.prefix, 'Library', 'bin', 'geos_c.dll'))\r\n File \"c:\\users\\adam.garfield-win\\.conda\\envs\\simbapiptest\\lib\\ctypes\\__init__.py\", line 348, in __init__\r\n self._handle = _dlopen(self._name, mode)\r\nOSError: [WinError 126] The specified module could not be found\r\n```\r\n\r\n", + "created_at": "2020-05-18T09:51:00Z", + "author": "adamltyson" + }, + { + "body": "I have no problem installing and running simba-uw-no-tf. Were you installing both simba-uw-no-tf and simba-uw-tf on the same virtual environment?", + "created_at": "2020-05-18T21:50:38Z", + "author": "inoejj" + }, + { + "body": "I was, but the same thing happens in a fresh conda env, and is only fixed by reinstalling shapely from conda-forge. ", + "created_at": "2020-05-19T05:08:02Z", + "author": "adamltyson" + }, + { + "body": "Is there a reason you pin `deeplabcut==2.0.9`? I would strongly suggest an update to at least 2.1.8.2; many important updates, bug fixes, etc in the past year. ", + "created_at": "2020-06-05T19:11:05Z", + "author": "MMathisLab" + }, + { + "body": "Hi @MMathisLab - I will look over this and make it compatible asap. But I think it is primarily the pandas >= 1.0.0 dependency that made it more than a quick fix for me ", + "created_at": "2020-06-05T21:22:30Z", + "author": "sgoldenlab" + }, + { + "body": "@MMathisLab I should add: DLC 2.0.9 in SimBA is only used by SimBAs DLC GUI: https://github.com/sgoldenlab/simba/blob/master/docs/Tutorial_DLC.md - this is largely legacy code now, as most use newer versions of DLC and DLC GUI to generate the pose that is exported to SimBA", + "created_at": "2020-06-05T22:20:27Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Request for adding heat map plotter for descriptive statistics", + "body": "", + "user": "amirhosseinazami", + "reaction_cnt": 0, + "created_at": "2020-05-11T20:23:48Z", + "updated_at": "2020-06-24T00:15:27Z", + "author": "amirhosseinazami", + "comments": [ + { + "body": "Added, see docs for more info:\r\n\r\nhttps://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial.md#part-5-miscellaneous-roi-tools", + "created_at": "2020-05-20T02:14:45Z", + "author": "sronilsson" + } + ] + }, + { + "title": "Remove restriction on osf.io repository", + "body": "Might be the wrong place, but it was the place I thought easiest to contact you and its semi a part of the software.\r\n\r\nI suggest removing the current restriction on your osf.io repositories. They are current closed and you have to request access. It puts a unnecessary(?) delay on working with the software.", + "user": "Ejdrup", + "reaction_cnt": 0, + "created_at": "2020-05-01T13:12:04Z", + "updated_at": "2020-05-01T14:37:32Z", + "author": "Ejdrup", + "comments": [ + { + "body": "You're right - it should be open now, no request access required. ", + "created_at": "2020-05-01T14:21:56Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Automatically track frames annotated for each labeling session", + "body": "Rather than having the user supply the exact frame range labeled in the session to avoid erasing all previous labeling when saving (ouch I lost a lot of hours), I suggest tracking the frames actively labeled for each session, and then overwriting only those.", + "user": "Ejdrup", + "reaction_cnt": 0, + "created_at": "2020-05-01T13:05:30Z", + "updated_at": "2020-05-01T19:48:45Z", + "author": "Ejdrup", + "comments": [ + { + "body": "Hej @Ejdrup - sorry about the lost hours!! There are two buttons under the label behavior tab. When using the first, SimBA assumes you are starting afresh, and a new file will be generated (if any previous annotations are there, they will be overwritten). The second assumes you are continuing a previously saved annotation session, and previous annotations are added to, rather than overwritten. \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/80812714-54512b80-8b7d-11ea-91aa-9639262789af.png)\r\n", + "created_at": "2020-05-01T14:28:43Z", + "author": "sgoldenlab" + }, + { + "body": "I am already using that function. However, saving after loading a save session overwrites the old session with only the newly annotated frames, unless the specific range to save is specified. I suggest automatically embedding that function, so pressing save does not overwrite the entire file, if no range has been supplied under frame range.", + "created_at": "2020-05-01T15:33:09Z", + "author": "Ejdrup" + }, + { + "body": "Thanks - I'll look over this today. ", + "created_at": "2020-05-01T16:08:43Z", + "author": "sgoldenlab" + }, + { + "body": "@Ejdrup The bug had been reintroduced by mistake and it is fixed now. Please download the latest version of SimBA and test it out. Please test out the labelling and load labelling for a minute and see if it works for you. \r\n\r\nPlease let me know if you have anymore questions, I would be more than happy to assist you.\r\n\r\nSincerely,\r\nJJ", + "created_at": "2020-05-01T19:48:41Z", + "author": "inoejj" + } + ] + }, + { + "title": "Labeling session reloading at wrong frame", + "body": "After saving a labeling section and reopening at a later timepoint, SimBA keeps reopening at an older save. The file last_framelog remains unchanged as the following when saving at different timepoints:\r\n\r\n`[Last saved frames]'\r\n'tracking_video2020-02-26t10_19_20 = 3736`\r\n\r\nAdditionally, I'm not seeing any labels in the csv under targets_inserted, however, that might be because they are stored somewhere else?\r\n", + "user": "Ejdrup", + "reaction_cnt": 0, + "created_at": "2020-05-01T12:57:56Z", + "updated_at": "2020-05-01T19:49:08Z", + "author": "Ejdrup", + "comments": [ + { + "body": "I realize now saving overwrites previous efforts unless the newly annotated frames are saved with the specific range annotated in the frame range selector above the save button. I'll open a feature suggesting for saving only the newly annotated frames to the csv.", + "created_at": "2020-05-01T13:03:43Z", + "author": "Ejdrup" + } + ] + }, + { + "title": "Error during Outlier correction", + "body": "Setting an outlier correct C similar to described in the documentation (C = 1.5), or any other value causes the SimBA main window to be stuck at:\r\n`Pose-estimation body part setting for outlier correction: 7`\r\n`Processing 1 files for movement outliers...`\r\nWhile cmd prompts the following:\r\n```\r\nException in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Ejdrup\\AppData\\Local\\Programs\\Python\\Python36\\lib\\tkinter\\__init__.py\", line 1699, in __call__\r\n return self.func(*args)\r\n File \"SimBA.py\", line 3981, in correct_outlier\r\n dev_move_user_defined(configini)\r\n File \"C:\\ProgramData\\simba\\simba\\outlier_scripts\\movement\\correct_devs_mov_user_defined.py\", line 115, in dev_move_user_defined\r\n df_p_cols = pd.DataFrame([csv_df.pop(x) for x in p_cols]).T\r\n File \"C:\\Users\\Ejdrup\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\pandas\\core\\frame.py\", line 450, in __init__\r\n arrays, columns = to_arrays(data, columns, dtype=dtype)\r\n File \"C:\\Users\\Ejdrup\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\pandas\\core\\internals\\construction.py\", line 471, in to_arrays\r\n data, columns, coerce_float=coerce_float, dtype=dtype\r\n File \"C:\\Users\\Ejdrup\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\pandas\\core\\internals\\construction.py\", line 522, in _list_of_series_to_arrays\r\n indexer = indexer_cache[id(index)] = index.get_indexer(columns)\r\n File \"C:\\Users\\Ejdrup\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\pandas\\core\\indexes\\base.py\", line 2985, in get_indexer\r\n \"Reindexing only valid with uniquely\" \" valued Index objects\"\r\npandas.core.indexes.base.InvalidIndexError: Reindexing only valid with uniquely valued Index objects\r\n```\r\n\r\n", + "user": "Ejdrup", + "reaction_cnt": 0, + "created_at": "2020-04-30T20:59:32Z", + "updated_at": "2020-04-30T21:13:57Z", + "author": "Ejdrup", + "comments": [ + { + "body": "Hi @Ejdrup - it appears that multiple columns in the csv has the same name. Let me try and re-create this.", + "created_at": "2020-04-30T21:06:07Z", + "author": "sgoldenlab" + }, + { + "body": "Simply deleting the project and recreating made it work. I cannot recreate the situation. You can delete the issue or just close to leave as a possible quick fix for other with similar issues.", + "created_at": "2020-04-30T21:13:00Z", + "author": "Ejdrup" + } + ] + }, + { + "title": "Error when extecuting SimBA.py", + "body": "When executing SimBA.py I get the following error:\r\n\r\nMy input:\r\n`C:\\ProgramData\\simba\\simba>python SimBA.py`\r\nError output:\r\n```\r\nC:\\ProgramData\\simba\\simba>python SimBA.py\r\nModuleNotFoundError: No module named 'numpy.core._multiarray_umath'\r\nImportError: numpy.core.multiarray failed to import\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"\", line 960, in _find_and_load\r\nSystemError: returned a result with an error set\r\nImportError: numpy.core._multiarray_umath failed to import\r\nImportError: numpy.core.umath failed to import\r\n2020-04-27 14:43:13.667999: F tensorflow/python/lib/core/bfloat16.cc:675] Check failed: PyBfloat16_Type.tp_base != nullptr\r\n```\r\n\r\nPassing:\r\n```\r\npython\r\nimport numpy as np\r\nnp.__version__\r\n```\r\nreturns 1.14.5, which is what is specified in the requirements.txt.", + "user": "Ejdrup", + "reaction_cnt": 0, + "created_at": "2020-04-27T12:44:50Z", + "updated_at": "2020-04-27T21:09:43Z", + "author": "Ejdrup", + "comments": [ + { + "body": "After chewing myself through every requirement that wasn't the correct version I finally got it to work. In short, the supplied requirements.txt did not contain a setup of versions that worked for me.", + "created_at": "2020-04-27T13:30:53Z", + "author": "Ejdrup" + }, + { + "body": "Hi @Ejdrup thanks for reporting this - yes SimBA requires numpy 1.18.1, which seems to be the numpy version specified in the requirements.txt. \r\n\r\nFor anyone else seeing this error at startup, run:\r\n`pip install numpy==1.18.1`\r\n\r\nIf the numpy install failed, there is risk the installation of correct versions of pandas/pytables also may have failed which could cause errors further down the line. For the correct versions of pandas/pytables, run:\r\n\r\n`pip install pandas==0.25.3`\r\n`pip install tables==3.5.1`\r\n\r\nThanks again \r\n", + "created_at": "2020-04-27T15:05:26Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Incompatabilities when installing SimBA with TF in fresh conda env.", + "body": "When installing SimBA with TF in a fresh conda env. on windows 10 I get the following error message:\r\n```\r\nERROR: imgaug 0.4.0 has requirement numpy>=1.15, but you'll have numpy 1.14.5 which is incompatible.\r\nERROR: imbalanced-learn 0.6.2 has requirement scikit-learn>=0.22, but you'll have scikit-learn 0.19.2 which is incompatible.\r\nERROR: tensorflow-gpu 1.14.0 has requirement protobuf>=3.6.1, but you'll have protobuf 3.6.0 which is incompatible.\r\nERROR: yellowbrick 0.9.1 has requirement scikit-learn>=0.20, but you'll have scikit-learn 0.19.2 which is incompatible.\r\n```\r\n\r\nI input the following commands:\r\n```\r\nconda create --name SimBA python=3.6\r\nconda activate SimBA\r\ngit clone -b master https://github.com/sgoldenlab/simba.git\r\npip install -r simba/simba/requirements.txt\r\n```\r\nIt installs several of the packages before I encounter the error.\r\n ", + "user": "Ejdrup", + "reaction_cnt": 0, + "created_at": "2020-04-27T10:31:11Z", + "updated_at": "2020-04-27T10:57:16Z", + "author": "Ejdrup", + "comments": [ + { + "body": "I've given up on trying to make it run with conda.", + "created_at": "2020-04-27T10:57:16Z", + "author": "Ejdrup" + } + ] + }, + { + "title": "Problem with Analyse ROI Data", + "body": "Hello everybody!\r\n\r\nFirst of all, thank you so much for implementing the new features on simba! I can also see that you have added a lot of new (very clear!) documentation, and I really appreciate your effort, as I am sure it took a lot of hours. Thank you so much!\r\n\r\nThat being said, I am having some issues running the analysis of the ROI Data. All of the steps ran smoothly until then, but when I click the Analyse ROI Data button, the cmd terminal displays this:\r\n\"Screen \r\nThe steps before this ran correctly, I believe, as an ROI_definitions h5 file was created \r\n\"Screen\r\n\r\nAny idea how to solve this? I am using Python 3.6.4\r\n\r\n", + "user": "2909ft", + "reaction_cnt": 0, + "created_at": "2020-04-25T21:18:05Z", + "updated_at": "2020-05-03T03:35:12Z", + "author": "2909ft", + "comments": [ + { + "body": "Hi @2909ft ! This TypeError happens when SimBA can't find the frame rate of the video it is trying to analyze. The frame-rate is saved in the video_info.csv in the project_folder/logs directory. \r\n\r\nI'd suggest open this csv file and make sure that your videos are represented as single row with a frame rate number in the frame rate column. Also make sure that you don't have two rows with the same video name: \"the series to class int\" suggests that SimBA finds multiple rows in the video_info.csv file with the same video name, and can't convert the multiple instances to a single fps integer number. \r\n\r\nIf the error still persists, then also make sure that your video file names does not contain any spaces.\r\n\r\nLet me know id that fixes the issue!", + "created_at": "2020-04-26T01:07:17Z", + "author": "sgoldenlab" + }, + { + "body": "Hello!\r\n\r\nThank you so much for the reply! The names are different for each video, and there are no spaces on them. Any other suggestion?\r\n\r\n\"Screen\r\n", + "created_at": "2020-04-26T01:29:09Z", + "author": "2909ft" + }, + { + "body": "Hmm.. would you have a screenshot of the content of your project_folder/csv/outlier_correction_movement_location folder? Could it be that you have any files in there, that are not represented in the video_info.csv file? ", + "created_at": "2020-04-26T01:41:10Z", + "author": "sgoldenlab" + }, + { + "body": "It seems like everything matches \r\n\"Screen\r\nDo I need to remove the \"DLC_resnet50_DeepLabCut_Labels_ACC_PAGJan21shuffle1_24500\" bit?\r\n", + "created_at": "2020-04-26T02:06:23Z", + "author": "2909ft" + }, + { + "body": "Ah nevermind, just realised I placed them there manually. I have created a new simba project, this time using the import tracking data feature of the GUI, and the names match. Thank you for your patience and apologies for the inconvenience :) ", + "created_at": "2020-04-26T02:14:37Z", + "author": "2909ft" + }, + { + "body": "No trouble at allow, just ask if anything else pops up @2909ft SimBA removes the long dlc model iteration reference from the filename when it's imported. It makes the file names and paths more straightforward to handle in python, and easier to look at ", + "created_at": "2020-04-26T02:41:49Z", + "author": "sgoldenlab" + }, + { + "body": "Hello again!\r\n\r\nThis time I was trying to generate the data plots, and this error occured \r\n\"Screen\r\nIt seems like simba is looking for the column \"Movement_mouse_1_centroid\", right? However, on the machine results folder, the files have a column called \"Movement_mouse_centroid\". Could this be the reason why I got this error? (I'm only using 1 mouse)\r\n", + "created_at": "2020-04-27T20:32:41Z", + "author": "2909ft" + }, + { + "body": "Yes, that's definitely the reason and this shouldn't happen - I will look over the code. \r\n\r\nFirst can you tell me what body part pose configuration you are using, and how many animals you are tracking? \r\n\r\nAlso, when was the last time you downloaded SimBA? A similar bug, that could cause this error, was fixed last week. \r\n\r\n\r\nEDIT: Just saw that it was one animal. But are you using on of the built in body-pose configs, or is it a \"user defined\" one. \r\n", + "created_at": "2020-04-27T21:04:19Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks for the reply! I am using the built in body-pose configs (the 8bp one, for 1 animal). I downloaded simba last Friday again. ", + "created_at": "2020-04-27T21:14:16Z", + "author": "2909ft" + }, + { + "body": "Thanks, I can spot the bug in the plot script :) let me just confirm that the script is working before I upload it. I'll let you know when done. ", + "created_at": "2020-04-27T21:22:16Z", + "author": "sgoldenlab" + }, + { + "body": "@2909ft - I've made an update , can you download SimBA again, load your project, and let me know if it works? Thanks for reporting this.", + "created_at": "2020-04-27T21:37:02Z", + "author": "sgoldenlab" + }, + { + "body": "Yes! It works! Thanks a bunch!", + "created_at": "2020-04-27T22:19:26Z", + "author": "2909ft" + }, + { + "body": "Another issue has popped up, it seems. I was trying to merge the Sklearn results with the Path and the live data and everything works fine until the second-to-last frame of the first video being analysed, when I get the following error message:\r\n\"Screen\r\nIf I check the results on the output/merged folder, there is one video which seems to have something merged, but it is very blurry (see screenshot). Both the Sklearn results, the path, and the live data seem to have been generated correctly, so I don't really know what the issue is here. Any suggestions?", + "created_at": "2020-04-28T00:50:12Z", + "author": "2909ft" + }, + { + "body": "Hey @2909ft - the error, when trying to merge the sklearn, path and live plots, would happen if there is an unequal number of frames in the three folders. Let's say there are 14108 frames of sklearn, but 14107 'live data' plots, then this error would pop up on the last frame. Could you open the project_folder/frames/output/sklearn and so on, and for each of your, and check that the folders named after your videos contain any equal number of frames? \r\n\r\nIt is very grainy indeed - I have not seen that before I will have to have a think. The error might come from the video not being generated and closed appropriately because of the last frame missing. ", + "created_at": "2020-04-28T01:09:41Z", + "author": "sgoldenlab" + }, + { + "body": "The folders all have 14106 frames, including the input one. I just checked and this is also the number of frames on the DLC csv file for that video. Weird ", + "created_at": "2020-04-28T01:27:13Z", + "author": "2909ft" + }, + { + "body": "Hmm, could it be that some frames in the middle of the video has been removed/renamed for some reason? E.g., does it say 14106 at the bottom of the window for each video frame folder like this below? It's likely you do but I can see no other reason for this to happen. \r\n\r\nIs it possible for you to merge other combinations of frames into a video?\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/80437607-de1ba300-88b6-11ea-8e52-2ac206960c3e.png)\r\n\r\n\r\n", + "created_at": "2020-04-28T01:43:58Z", + "author": "sgoldenlab" + }, + { + "body": "You don't happen to have any other files, or folders, hidden or otherwise, in the first videos Sklearn output frames folder do you? \r\n\r\n", + "created_at": "2020-04-28T01:52:52Z", + "author": "sgoldenlab" + }, + { + "body": "The number of files is indeed 14108 for all of the folders (14107 png, if we count the 0 frame, and one system Thumbs.db file). Could this db file be the source of the issue?", + "created_at": "2020-04-28T02:08:34Z", + "author": "2909ft" + }, + { + "body": "Ah I don't know what \"Thumbs.db\" is, or where it comes from, it should not be there - it is not part of SimBA. Could you delete it or move it and try gain? ", + "created_at": "2020-04-28T02:14:24Z", + "author": "sgoldenlab" + }, + { + "body": "Thumbs.db seems to be a system file that Windows uses to store tumbnails for all the files on a given folder, it was automatically generated, I did not put it there. I have now deleted all of them and the merge seems to work correctly until the bit where it actually merges. This is the video result, with the error message on the side. The error appears once all the image frames for the first video have been processed.\r\n\"Screen\r\n", + "created_at": "2020-04-28T12:58:44Z", + "author": "2909ft" + }, + { + "body": "Interesting - I won't be able to re-create this error but will make a few updates to the code to count / merge only image files and disregard anything else - let's see if that fixes it.", + "created_at": "2020-04-28T14:38:01Z", + "author": "sgoldenlab" + }, + { + "body": "Brilliant thanks!", + "created_at": "2020-04-28T16:09:33Z", + "author": "2909ft" + }, + { + "body": "Hi @2909ft - I've introduced some fixes, that should avoid the issues you are having with the .db files. And it now catches the errors if the number of files don't add up. \r\n\r\nWould you mind downloading SimBA again, load your project, and see if it works? I am not sure what's going on with the grainy messed up frames. Let's see if this fixes it first. ", + "created_at": "2020-04-28T19:36:55Z", + "author": "sgoldenlab" + }, + { + "body": "Yes it works!! The merged video is now fine, no grainy textures :)) However (this is something minor, but I think you ought to know), the sklearn results and the path plots are on a different orientation- when the mouse is going south on the video, the path shows a dot going north etc.\r\n\r\nThank you so much!! I have done a correlation analysis between what I behaviours I had scored on a given video and what SimBA scored, and I got a 0.998 correlation value! Truly impressive!! ", + "created_at": "2020-04-29T12:16:39Z", + "author": "2909ft" + }, + { + "body": "Nice on! Thanks for letting me know - makes me happy to hear that it is working. For the path plot, I will insert a fix - I suspect it is line 128 in merge_frames_movie.py, that should be 270 rather than 90, but ill look it over and make sure it is robust\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/80599630-136fe000-89e0-11ea-9a37-dc697912c481.png)\r\n\r\n\r\nEDIT: It was actually the rotation in the path_plot.py script. I've fixed it - but you'd have to download SimBA again, and generate your path plots again, to get it fixed. Cheers!\r\n", + "created_at": "2020-04-29T13:10:26Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Report: Upper Limit in Memory with pandas [MemoryError]", + "body": "**Disclaimer**\r\nThis is not a SimBa code bug. But probably a pandas or system specific behavior. This is why I called this \"Report\". Please feel free to close the issue if you see fit.\r\n\r\n**Describe the bug**\r\nAs with all machine learning problems, I thought the best way to solve accuracy problems is to throw data at it:\r\n\r\nTrying out 10 csv files from my experiments (about 600k frames in total) resulted in this error popping up. \r\nI included my system info to give you a better overview. As mentioned below this might happen from overcommitment protection within the OS.\r\n\r\n```\r\n[Parallel(n_jobs=-1)]: Done 26 tasks | elapsed: 46.8s\r\nException in thread Thread-20:\r\nTraceback (most recent call last):\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\threading.py\", line 926, in _bootstrap_inner\r\n self.run()\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\threading.py\", line 870, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"simba.py\", line 3764, in trainmultimodel\r\n train_multimodel(self.projectconfigini)\r\n File \"C:\\Users\\schwa\\PycharmProjects\\simba\\simba\\train_multiple_models_from_meta.py\", line 164, in train_multimodel\r\n features = baseFeatureFrame.copy()\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\pandas\\core\\generic.py\", line 5996, in copy\r\n data = self._data.copy(deep=deep)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\pandas\\core\\internals\\managers.py\", line 788, in copy\r\n return self.apply(\"copy\", axes=new_axes, deep=deep, do_integrity_check=False)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\pandas\\core\\internals\\managers.py\", line 438, in apply\r\n applied = getattr(b, f)(**kwargs)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\pandas\\core\\internals\\blocks.py\", line 771, in copy\r\n values = values.copy()\r\nMemoryError: Unable to allocate 3.22 GiB for an array with shape (787, 549864) and data type float64\r\n```\r\n**Simba console output**\r\n\r\n![grafik](https://user-images.githubusercontent.com/44863941/78978150-9278a380-7b19-11ea-876e-4033929234c3.png)\r\n\r\n**What google says:**\r\nA quick google hints at a OS issue/safety mechanism for memory allocation (although the thread is originally for Linux). [Stackoverflow](https://stackoverflow.com/questions/57507832/unable-to-allocate-array-with-shape-and-data-type). There are a few provided solutions (including for windows) but I have not tried them yet.\r\n\r\n**Desktop (please complete the following information):**\r\n\r\nSimba-GPU installation with Anaconda\r\nOperating System: Windows 10 Home 64-bit (10.0, Build 18362) (18362.19h1_release.190318-1202)\r\nSystem Manufacturer: LENOVO\r\nSystem Model: 81UH\r\nProcessor: Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz (12 CPUs), ~2.6GHz\r\nMemory: 16384MB RAM\r\n \r\n\r\n**Additional context**\r\nAs I was writing this issue, I restarted the process and I was able to reproduce the same result. But to my confusion the training continued despite of the memory issue.\r\n\r\n![grafik](https://user-images.githubusercontent.com/44863941/78977589-a40d7b80-7b18-11ea-9946-2734d5d1be75.png)\r\n\r\n\r\n", + "user": "JensBlack", + "reaction_cnt": 0, + "created_at": "2020-04-10T08:59:34Z", + "updated_at": "2020-04-23T19:44:16Z", + "author": "JensBlack", + "comments": [ + { + "body": "**Follow up**\r\nSame training (several moments later).\r\nI was training with the \"multi model setting\" and encountered another issue after a while.\r\nIt completed the first model without any further error, although the console tells me that it created several other 2000 task jobs over and over. The SimBa console tells me that it just started the second model (which seems to have crashed now completely, although i will leave it running for a while to see whether anything changes, cpu is down to 5% again and the console crashed. not sure if it was me or the error). Interestingly the 2nd model has less positive training data then the first and this is a similiar but different error.\r\n\r\n![grafik](https://user-images.githubusercontent.com/44863941/78987348-0ae95f80-7b2e-11ea-80c7-4e3dc04c560d.png)\r\n\r\n**SimBa console ouput**\r\n\r\n![grafik](https://user-images.githubusercontent.com/44863941/78987393-26ed0100-7b2e-11ea-81ca-bb398b8d7d64.png)\r\n \r\n\r\n", + "created_at": "2020-04-10T11:26:58Z", + "author": "JensBlack" + }, + { + "body": "Thanks @JensBlack - I think it's in part relate to the large number of features created by SimBA when training on user-defined body-parts, combined with the number of targets, and the 16GB RAM - I will wok to reduce the sizes of the arrays and dataframe. ", + "created_at": "2020-04-10T15:30:29Z", + "author": "sgoldenlab" + }, + { + "body": "@JensBlack , what version of python is in your machine? Python 3.6 32 bit or Python 3.6 64 bit?", + "created_at": "2020-04-10T19:04:59Z", + "author": "inoejj" + }, + { + "body": "I will post `conda info` just to give you the full overview, but i am pretty sure that I have 64bit.\r\n\r\n```\r\n active environment : simba-gpu\r\n active env location : D:\\Anaconda\\envs\\simba-gpu\r\n shell level : 2\r\n user config file : C:\\Users\\schwa\\.condarc\r\n populated config files : C:\\Users\\schwa\\.condarc\r\n conda version : 4.8.2\r\n conda-build version : 3.18.11\r\n python version : 3.7.6.final.0\r\n virtual packages : __cuda=10.1\r\n base environment : D:\\Anaconda (writable)\r\n channel URLs : https://repo.anaconda.com/pkgs/main/win-64\r\n https://repo.anaconda.com/pkgs/main/noarch\r\n https://repo.anaconda.com/pkgs/r/win-64\r\n https://repo.anaconda.com/pkgs/r/noarch\r\n https://repo.anaconda.com/pkgs/msys2/win-64\r\n https://repo.anaconda.com/pkgs/msys2/noarch\r\n package cache : D:\\Anaconda\\pkgs\r\n C:\\Users\\schwa\\.conda\\pkgs\r\n C:\\Users\\schwa\\AppData\\Local\\conda\\conda\\pkgs\r\n envs directories : D:\\Anaconda\\envs\r\n C:\\Users\\schwa\\.conda\\envs\r\n C:\\Users\\schwa\\AppData\\Local\\conda\\conda\\envs\r\n platform : win-64\r\n user-agent : conda/4.8.2 requests/2.22.0 CPython/3.7.6 Windows/10 Windows/10.0.18362\r\n administrator : False\r\n netrc file : None\r\n offline mode : False\r\n```", + "created_at": "2020-04-14T07:03:08Z", + "author": "JensBlack" + }, + { + "body": "@JensBlack , let's try to change your python to 3.6 in conda so I can narrow down the root cause of this issue. \r\n\r\nThanks,\r\nJJ", + "created_at": "2020-04-16T00:59:07Z", + "author": "inoejj" + } + ] + }, + { + "title": "Error in Train model", + "body": "**Describe the bug**\r\nAfter getting the error in issue #22 , I toggled both boxes (see below) to circumvent the error #22 .\r\nThe following error occured after initializing training (or rather before it). I checked the code and it's the exception for an error in training (fitting), so could be anything? I checked both csv files if they contained the array and if it was valid (see further below).\r\n\r\n_simba/train_model_2.py:237_\r\n```\r\n try:\r\n clf.fit(data_train, target_train)\r\n except ValueError:\r\n print('ERROR: The model contains a faulty array. This may happen when trying to train a model with 0 examples of the behavior of interest')\r\n```\r\n\r\nI tried both train methods using global environment for 1 model and specific for 3 seperate models (as visible in the SimBa log). The console output is the same, so i didn't post the second.\r\nRemoving both boxes (Generate Features Importance Log and Graph) does not change the error (tested in the global settings method).\r\n\r\nSettings:\r\n\r\n![grafik](https://user-images.githubusercontent.com/44863941/78661892-ccef0000-78cf-11ea-9cac-83b152c86228.png)\r\n\r\n\r\nSimBa - Log:\r\n\r\n![grafik](https://user-images.githubusercontent.com/44863941/78661547-41756f00-78cf-11ea-8867-d6c758622a07.png)\r\n\r\nConsole output:\r\n\r\n```\r\n(simba-gpu) C:\\Users\\schwa\\PycharmProjects\\simba\\simba>python simba.py\r\nException in thread Thread-1:\r\nTraceback (most recent call last):\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\threading.py\", line 926, in _bootstrap_inner\r\n self.run()\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\threading.py\", line 870, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"simba.py\", line 3734, in trainmultimodel\r\n train_multimodel(self.projectconfigini)\r\n File \"C:\\Users\\schwa\\PycharmProjects\\simba\\simba\\train_multiple_models_from_meta.py\", line 263, in train_multimodel\r\n generateClassificationReport(clf, class_names, classifierName, saveFileNo)\r\n File \"C:\\Users\\schwa\\PycharmProjects\\simba\\simba\\train_multiple_models_from_meta.py\", line 51, in generateClassificationReport\r\n visualizer.score(data_test, target_test)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\yellowbrick\\classifier\\classification_report.py\", line 130, in score\r\n y_pred = self.predict(X)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\sklearn\\ensemble\\_forest.py\", line 612, in predict\r\n proba = self.predict_proba(X)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\sklearn\\ensemble\\_forest.py\", line 654, in predict_proba\r\n check_is_fitted(self)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\sklearn\\utils\\validation.py\", line 967, in check_is_fitted\r\n raise NotFittedError(msg % {'name': type(estimator).__name__})\r\nsklearn.exceptions.NotFittedError: This RandomForestClassifier instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator.\r\n```\r\n\r\npd.Dataframe.describe() and np.sum() for both target_inserted csv files over all labelled frames (model names: 'active', 'resting' and 'inner_circle')\r\n\r\n**Dataframe 1**\r\n```\r\nDataframe.describe()\r\n Active Resting Inner_circle\r\ncount 54892.000000 54892.000000 54892.000000\r\nmean 0.750000 0.250000 0.394065\r\nstd 0.433017 0.433017 0.488653\r\nmin 0.000000 0.000000 0.000000\r\n25% 0.750000 0.000000 0.000000\r\n50% 1.000000 0.000000 0.000000\r\n75% 1.000000 0.250000 1.000000\r\nmax 1.000000 1.000000 1.000000\r\n\r\nnp.sum()\r\nActive 41169.0\r\nResting 13723.0\r\nInner_circle 21631.0\r\n```\r\n**Dataframe 2**\r\n\r\n```\r\nDataframe.describe()\r\n Active Resting Inner_circle\r\ncount 54892.000000 54892.000000 54892.000000\r\nmean 0.750000 0.250000 0.394065\r\nstd 0.433017 0.433017 0.488653\r\nmin 0.000000 0.000000 0.000000\r\n25% 0.750000 0.000000 0.000000\r\n50% 1.000000 0.000000 0.000000\r\n75% 1.000000 0.250000 1.000000\r\nmax 1.000000 1.000000 1.000000\r\n\r\nnp.sum()\r\nActive 41169.0\r\nResting 13723.0\r\nInner_circle 21631.0\r\ndtype: float64\r\n```\r\nAs you can see both Dataframes (csv files) have labelled frames (with both True/1 and False/0 values). As I did the exact same thing, but with 1 csv file less, just yesterday. I am confused.\r\n\r\n**Additional context**\r\nThe additional csv file was added using the provided functions. It's noteworthy that I did not label the csv file manually but added columns to the outlier corrected version via some automatic labelling method. This method worked before.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - SimBa-GPU install (newest version 07.04.2020) with Anaconda ", + "user": "JensBlack", + "reaction_cnt": 0, + "created_at": "2020-04-07T11:23:09Z", + "updated_at": "2020-11-17T16:09:33Z", + "author": "JensBlack", + "comments": [ + { + "body": "This is a print out from the function simba/train_model_2.py just before the try (mentioned above).\r\n\r\n```\r\nprint(data_train)\r\n Deviation_from_median_Movement_difference_movement_center_movement_left_hip ... movement_tail_tip\r\n89150 0.548807 ... 4.247683\r\n104100 -0.112851 ... 0.638911\r\n56938 0.541995 ... 1.495396\r\n74278 0.249147 ... 0.541398\r\n69342 0.345994 ... 0.886768\r\n... ... ... ...\r\n48366 -1.046650 ... 2.202447\r\n27243 0.826896 ... 1.022565\r\n79840 0.674166 ... 1.100300\r\n35520 0.391709 ... 2.451280\r\n83918 0.488544 ... 1.342929\r\n\r\n[91256 rows x 763 columns] \r\n\r\nprint(target_train)\r\n[1. 1. 1. ... 1. 1. 1.]\r\n\r\nprint(len(target_train))\r\n91256 \r\n```", + "created_at": "2020-04-07T11:44:04Z", + "author": "JensBlack" + }, + { + "body": "Update:\r\nRemoving the additional csv file (by deleting it in the target_inserted folder) and keeping the original csv file did not raise the error.\r\nRemoving the original csv file (but keeping the additional) raised the error.\r\n\r\nTherefore I will add information about my procedure of adding the additional csv file:\r\n\r\n1. Import csv file using the \"import single csv file\" in the \"further imports\" tab.\r\n2. Copying video and frames (manually into the corresponding folders)\r\n3. setting video parameters\r\n4. outlier correction (parameter set to 1000; above resolution): No Correction\r\n5. feature extraction\r\n6. Addition of label columns for 3 models in both csv files via custom script (result see above)\r\n7. Set Settings and train model\r\n\r\n", + "created_at": "2020-04-07T12:00:53Z", + "author": "JensBlack" + }, + { + "body": "i solved the issue.... it was an error in my labelling method. I will close the issue.\r\n\r\nFor future reference:\r\nThe number of rows in the labelled columns did not match the number of frames in the csv file.\r\ninterestingly: \r\nthis is not represented in data_train and and target_train length, which both have the same length.", + "created_at": "2020-04-07T12:14:14Z", + "author": "JensBlack" + }, + { + "body": "Hi @JensBlack, I may be having the same issue as you. What was the error you were making in your labeling method?\r\n\r\nThanks.", + "created_at": "2020-10-26T23:29:38Z", + "author": "benlansdell" + }, + { + "body": "Hi @benlansdell, \r\nI have the same problem, did you solved it ?\r\n\r\nMany thanks", + "created_at": "2020-11-17T16:09:33Z", + "author": "benjaws" + } + ] + }, + { + "title": "Error in generateFeatureImportanceBarGraph", + "body": "**Describe the bug**\r\nAfter starting the training with the following settings, the error occured.\r\n\r\n![grafik](https://user-images.githubusercontent.com/44863941/78660379-420d0600-78cd-11ea-8aef-796e77f7505d.png)\r\n\r\n```\r\n[Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 12 concurrent workers.\r\n[Parallel(n_jobs=-1)]: Done 26 tasks | elapsed: 8.9s\r\n[Parallel(n_jobs=-1)]: Done 176 tasks | elapsed: 51.3s\r\n[Parallel(n_jobs=-1)]: Done 426 tasks | elapsed: 2.1min\r\n[Parallel(n_jobs=-1)]: Done 776 tasks | elapsed: 3.7min\r\n[Parallel(n_jobs=-1)]: Done 1226 tasks | elapsed: 5.8min\r\n[Parallel(n_jobs=-1)]: Done 1776 tasks | elapsed: 8.4min\r\n[Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 9.4min finished\r\n[Parallel(n_jobs=12)]: Using backend ThreadingBackend with 12 concurrent workers.\r\n[Parallel(n_jobs=12)]: Done 26 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=12)]: Done 176 tasks | elapsed: 0.2s\r\n[Parallel(n_jobs=12)]: Done 426 tasks | elapsed: 0.5s\r\n[Parallel(n_jobs=12)]: Done 776 tasks | elapsed: 1.0s\r\n[Parallel(n_jobs=12)]: Done 1226 tasks | elapsed: 1.6s\r\n[Parallel(n_jobs=12)]: Done 1776 tasks | elapsed: 2.4s\r\n[Parallel(n_jobs=12)]: Done 2000 out of 2000 | elapsed: 2.7s finished\r\n[Parallel(n_jobs=12)]: Using backend ThreadingBackend with 12 concurrent workers.\r\n[Parallel(n_jobs=12)]: Done 26 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=12)]: Done 176 tasks | elapsed: 0.2s\r\n[Parallel(n_jobs=12)]: Done 426 tasks | elapsed: 0.5s\r\n[Parallel(n_jobs=12)]: Done 776 tasks | elapsed: 1.0s\r\n[Parallel(n_jobs=12)]: Done 1226 tasks | elapsed: 1.6s\r\n[Parallel(n_jobs=12)]: Done 1776 tasks | elapsed: 2.4s\r\n[Parallel(n_jobs=12)]: Done 2000 out of 2000 | elapsed: 2.7s finished\r\n[Parallel(n_jobs=12)]: Using backend ThreadingBackend with 12 concurrent workers.\r\n[Parallel(n_jobs=12)]: Done 26 tasks | elapsed: 0.0s\r\n[Parallel(n_jobs=12)]: Done 176 tasks | elapsed: 0.2s\r\n[Parallel(n_jobs=12)]: Done 426 tasks | elapsed: 0.5s\r\n[Parallel(n_jobs=12)]: Done 776 tasks | elapsed: 1.0s\r\n[Parallel(n_jobs=12)]: Done 1226 tasks | elapsed: 1.6s\r\n[Parallel(n_jobs=12)]: Done 1776 tasks | elapsed: 2.3s\r\n[Parallel(n_jobs=12)]: Done 2000 out of 2000 | elapsed: 2.6s finished\r\nException in thread Thread-7:\r\nTraceback (most recent call last):\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\threading.py\", line 926, in _bootstrap_inner\r\n self.run()\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\threading.py\", line 870, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"simba.py\", line 3737, in trainsinglemodel\r\n trainmodel2(self.projectconfigini)\r\n File \"C:\\Users\\schwa\\PycharmProjects\\simba\\simba\\train_model_2.py\", line 302, in trainmodel2\r\n generateFeatureImportanceBarGraph(log_df, N_feature_importance_bars)\r\nUnboundLocalError: local variable 'log_df' referenced before assignment\r\n```\r\n\r\n\r\n**Additional context**\r\nI guess this is an error based on the missing selection/toggle of \"Generate Features Importance Log\". Do you need to select both?\r\n", + "user": "JensBlack", + "reaction_cnt": 0, + "created_at": "2020-04-07T10:46:50Z", + "updated_at": "2020-04-08T07:59:47Z", + "author": "JensBlack", + "comments": [ + { + "body": "I add the part from the tutorial for future reference and readability.\r\nQuote from the Readme (Generic SimBA Tutorial, 07.04.2020):\r\n\r\n> Generate Features Importance Log: Creates a .csv file that lists the importance's (gini importances) of all features for the classifier.\r\n> \r\n> Generate Features Importance Bar Graph: Creates a bar chart of the top N features based on gini importances. Specify N in the N feature importance bars entry box below.\r\n> \r\n> N feature importance bars: Integer defining the number of top features to be included in the bar graph (e.g., 15).", + "created_at": "2020-04-07T10:49:36Z", + "author": "JensBlack" + }, + { + "body": "Thanks @JensBlack - as i think you got, the graph can't be created without the `generate feature importance log`. I will look to add a feature in the GUI to grey the `Generate Feature importance bar graph` out unless the `generate feature importance log` is ticked. ", + "created_at": "2020-04-07T18:53:37Z", + "author": "sgoldenlab" + }, + { + "body": "Okay, with both options enabled and a valid value entered the bars plotted just fine. I will close this issue then.", + "created_at": "2020-04-08T07:59:47Z", + "author": "JensBlack" + } + ] + }, + { + "title": "Feature: Validate Settings Input before training start", + "body": "**Is your feature request related to a problem? Please describe.**\r\nThe Settings for training a machine model in SimBa have multiple options that present a predefined Value (e.g. NaN) when they are toggled. If you accept invalid settings (such as NaN) there is no feedback for the user but training the model will result in an error (often at the end of training). \r\nSee below for an example:\r\n\r\n\r\n```\r\n[Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 12 concurrent workers.\r\n[Parallel(n_jobs=-1)]: Done 26 tasks | elapsed: 4.9s\r\n[Parallel(n_jobs=-1)]: Done 176 tasks | elapsed: 26.0s\r\n[Parallel(n_jobs=-1)]: Done 426 tasks | elapsed: 1.0min\r\n[Parallel(n_jobs=-1)]: Done 776 tasks | elapsed: 1.9min\r\n[Parallel(n_jobs=-1)]: Done 1226 tasks | elapsed: 3.0min\r\n[Parallel(n_jobs=-1)]: Done 1776 tasks | elapsed: 4.4min\r\n[Parallel(n_jobs=-1)]: Done 2000 out of 2000 | elapsed: 5.0min finished\r\nException in thread Thread-1:\r\nTraceback (most recent call last):\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\threading.py\", line 926, in _bootstrap_inner\r\n self.run()\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\threading.py\", line 870, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"simba.py\", line 3737, in trainsinglemodel\r\n trainmodel2(self.projectconfigini)\r\n File \"C:\\Users\\schwa\\PycharmProjects\\simba\\simba\\train_model_2.py\", line 259, in trainmodel2\r\n shuffle_splits = config.getint('create ensemble settings', 'LearningCurve_shuffle_k_splits')\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\configparser.py\", line 818, in getint\r\n fallback=fallback, **kwargs)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\configparser.py\", line 808, in _get_conv\r\n **kwargs)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\configparser.py\", line 802, in _get\r\n return conv(self.get(section, option, **kwargs))\r\nValueError: invalid literal for int() with base 10: 'NaN'\r\n```\r\n\r\nI assumed that there was a default value that would be taken if i leave the textbox 'NaN'. \r\n\r\n**Describe the solution you'd like**\r\nThis is a user created issue (and i won't do it again), but it might be worthwhile to either validate the input when saving the settings (e.g. comparing if it is 'int', 'float', etc.). Then if it is invalid, setting the value to default (while telling the user) or making the user choose again. This would save some users (like me) from their own ignorance and others from wasting time training a machine model that will ultimately not be saved, because the validation process causes an error and everything is gone.\r\n\r\n**Describe alternatives you've considered**\r\nAn alternative would be to validate the settings before starting the training. Simply throwing out an error (e.g. \"ValueError: Some of your parameters are invalid. Consider reading the readme files again.\")\r\n\r\nAnother alternative would be to check user input when pressing the save button. Similiar to a contact formular on a website. I guess this would take much more time (as it's related to gui programming) though.\r\n\r\n**Additional context**\r\nI would not document this kind of request if your overall goal wasn't to create a tool that can be used without any (or without a lot of) programming experience. SimBa is already simplifying a complicated process so much and it is doing great work! \r\n\r\n", + "user": "JensBlack", + "reaction_cnt": 0, + "created_at": "2020-04-07T07:06:45Z", + "updated_at": "2020-06-15T07:45:21Z", + "author": "JensBlack", + "comments": [] + }, + { + "title": "RuntimeError: main thread is not in main loop", + "body": "**Describe the bug**\r\nWhen training a model using the app function \"train single model from global environment\" the following Error popt up. It seems to be connected to yellowbrick (so reporting?) but messes with the functionality. The model will not be saved (even though it was successfully trained). \r\n\r\n **This is the simba report window**\r\n\r\n![grafik](https://user-images.githubusercontent.com/44863941/78555751-f7788480-780d-11ea-9341-05866862122c.png)\r\n\r\n**This is the console until the error**\r\n\r\n![grafik](https://user-images.githubusercontent.com/44863941/78555611-b54f4300-780d-11ea-8d2a-26bd05479a10.png)\r\n\r\n **This is the error message**\r\n```\r\nException in thread Thread-182:\r\nTraceback (most recent call last):\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\threading.py\", line 926, in _bootstrap_inner\r\n self.run()\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\threading.py\", line 870, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"simba.py\", line 3726, in trainsinglemodel\r\n trainmodel2(self.projectconfigini)\r\n File \"C:\\Users\\schwa\\PycharmProjects\\simba\\simba\\train_model_2.py\", line 290, in trainmodel2\r\n generateClassificationReport(clf, class_names)\r\n File \"C:\\Users\\schwa\\PycharmProjects\\simba\\simba\\train_model_2.py\", line 68, in generateClassificationReport\r\n g = visualizer.poof(outpath=visualizerPath, clear_figure=True)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\yellowbrick\\base.py\", line 215, in poof\r\n plt.savefig(outpath, **kwargs)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\matplotlib\\pyplot.py\", line 690, in savefig\r\n fig.canvas.draw_idle() # need this if 'transparent=True' to reset colors\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\site-packages\\matplotlib\\backends\\_backend_tk.py\", line 350, in draw_idle\r\n self._idle_callback = self._tkcanvas.after_idle(idle_draw)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\tkinter\\__init__.py\", line 764, in after_idle\r\n return self.after('idle', func, *args)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\tkinter\\__init__.py\", line 756, in after\r\n name = self._register(callit)\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\tkinter\\__init__.py\", line 1372, in _register\r\n self.tk.createcommand(name, f)\r\nRuntimeError: main thread is not in main loop\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\nSimba GPU installation (Anaconda) on Windows 10\r\n \r\n**Additional context**\r\n**A restart of the whole procedure went completely smooth without any errors.** No restart necessary or anything. Just clicked the button again.\r\n", + "user": "JensBlack", + "reaction_cnt": 0, + "created_at": "2020-04-06T11:55:58Z", + "updated_at": "2020-05-11T19:25:35Z", + "author": "JensBlack", + "comments": [ + { + "body": "@JensBlack , I think this is a threading issue. Let me know if this happens again and I will redesign the code.", + "created_at": "2020-04-06T19:26:00Z", + "author": "inoejj" + } + ] + }, + { + "title": "Feature: Additional models after Project Creation", + "body": "Disclaimer: This is a feature that mainly is directed at easy use and therefore is only meant as a suggestion.\r\n\r\nI am currently trying to merge different automatic labelling approaches into one project and noticed that there is no option to add additional models after the initial project creation. This is probably connected to the consistency of the feature extraction tool, but could be easily implemented in a workaround solution (like the one i do).\r\n**Describe the solution you'd like**\r\nSo far i saw the following necessary changes to add a model into an existing project:\r\nin the `project_config` file add a new line in the following sections and increase the number of targets:\r\n\r\n[SML settings]\r\nmodel_dir = D:/SimBa\\MovementDetector\\models\r\nmodel_path_1 = D:/SimBa\\ProjectName\\models\\Existing_model1.sav\r\nmodel_path_2 = D:/SimBa\\ProjectName\\models\\Existing_model2.sav\r\n**model_path_3 = D:/SimBa\\ProjectName\\models\\New_model.sav**\r\n**no_targets = 3**\r\ntarget_name_1 = Existing_model1\r\ntarget_name_2 = Existing_model2\r\n**target_name_3 = New_model**\r\n\r\n[threshold_settings]\r\nthreshold_1 = \r\nthreshold_2 = \r\n**threshold_3 =** \r\n\r\n[Minimum_bout_lengths]\r\nmin_bout_1 = \r\nmin_bout_2 = \r\n**min_bout_3 =** \r\n\r\n\r\nThis addition to `project_config` file could be easily done via the app (i think) and would allow non advanced users to change the number of existing models without the need to create a new project.\r\n\r\nWith these changes and an external addition of extracted features to the necessary csv file i was able to create a new model and train it within the simba app.\r\n\r\n**Additional context**\r\nI have not tested if the feature extraction is working with all 3 models (Old_model1, old_model2, New_Model) if there are already feature extractions for the old models. You can probably elaborate if you are overwriting the feature extracted csv file or add columns to it.\r\n\r\nEdit:\r\nspelling\r\n\r\n\r\n", + "user": "JensBlack", + "reaction_cnt": 0, + "created_at": "2020-04-06T11:33:20Z", + "updated_at": "2020-05-11T19:25:26Z", + "author": "JensBlack", + "comments": [ + { + "body": "Hi @JensBlack - thanks for pointing this out: we have added a function in the [Further imports (data/video/frames] tab in the Load project menu which you can use to add further classifiers, or remove existing ones, from the project: \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/79537237-b286f600-8036-11ea-8732-11cfeebcc558.png)\r\n", + "created_at": "2020-04-17T06:05:13Z", + "author": "sgoldenlab" + }, + { + "body": "PS. The min_bout, threshold, and model path in the project_config.ini, will be updates when you set the variables for the model in the [Run machine model] tab. ", + "created_at": "2020-04-17T06:07:08Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Feature: Skip outlier correction", + "body": "**Is your feature request related to a problem? Please describe.**\r\nThis feature request is in direct connection to this [issue](https://github.com/sgoldenlab/simba/issues/17#issue-593348435)\r\n\r\n**Describe the solution you'd like**\r\nAs this is mainly a copy/paste of data, plus some minor adjustments to the additional logs you create: \r\nMy suggestion would be a button in the outlier correction tab that creates a dummy outlier correction (without the calculations) file and handles the data movement/creation just as the normal function would. \r\nAnother alternative would be to allow the import of filtered/corrected files that will be transformed into your style of logging/handling these kinds of files.\r\nIt probably depends if there is any use for a truly raw tracking file in SimBa's workflow if the advanced user has already the abililty to filter/correct them disconnected from SimBa.\r\n\r\n**Describe alternatives you've considered**\r\nAn easy alternative is to set the parameters given to the correction higher than the max resolution of the video, so that no bodypart can ever reach that distance.\r\n\r\n**Additional context**\r\nI understand that this is a feature that helps refining raw tracking data further and is therefore intentionally not skippable. This is a minor request and the community can probably live without it for sure. But, as many groups work with tracking data like the one from DLC, some adapted their own tricks to handle outliers. Therefore it might be a usefull feature, for advanced users.\r\n\r\nThank you for your hard work so far.\r\n", + "user": "JensBlack", + "reaction_cnt": 0, + "created_at": "2020-04-03T13:12:08Z", + "updated_at": "2020-04-09T12:36:57Z", + "author": "JensBlack", + "comments": [ + { + "body": "Hi @JensBlack - thanks for this! We have created a button for completely skipping outlier corrections in the `outlier correction tab`:\r\n\r\n![Skip](https://user-images.githubusercontent.com/50497030/78675276-a910c780-7899-11ea-960b-c7107c244cdb.JPG)\r\n\r\nClicking this button corrects the header rows of the input CSVs (from DLC multi-index-style to single-level style) and copies the CSVs to the `outlier_corrected_movement_location` folder in the project, without checking/correcting outliers in the tracking, and you can proceed extracting features / analyzing ROIs. Let me know if it works! Thanks!\r\n", + "created_at": "2020-04-07T13:37:25Z", + "author": "sgoldenlab" + }, + { + "body": " I tried the new function and it did work out of the box! Just as a note: It only created the outlier csv in the folder 'outlier_corrected_movement_location' ( not in 'outlier_corrected_movement'). I processed the files using the extract feature function, so i guess everything is working!\r\n\r\n", + "created_at": "2020-04-09T12:35:15Z", + "author": "JensBlack" + } + ] + }, + { + "title": "Error in Outlier correction", + "body": "**Describe the bug**\r\nAfter starting the outlier correction with both parameters set to 0 (the plan was to skip the correction with this), i got the following error message:\r\n\r\n`Exception in Tkinter callback\r\nTraceback (most recent call last):\r\n File \"D:\\Anaconda\\envs\\simba-gpu\\lib\\tkinter\\__init__.py\", line 1705, in __call__\r\n return self.func(*args)\r\n File \"simba.py\", line 3818, in correct_outlier\r\n dev_loc_user_defined(configini)\r\n File \"C:\\Users\\schwa\\PycharmProjects\\simba\\simba\\outlier_scripts\\location\\correct_devs_loc_user_defined.py\", line 100, in dev_loc_user_defined\r\n currentArray[currentPosition][0] = reliableCoordinates[currentPosition][0]\r\nIndexError: index 7 is out of bounds for axis 0 with size 7`\r\n\r\nThe csv file was transfered/created in the correct folder i assume \"project_folder\\csv\\outlier_corrected_movement\" but there is no file in \"outlier_corrected_movement_location\".\r\nI checked your code and i guess it did not go through as the error occurs in one of the essential scripts.\r\n\r\n**To Reproduce**\r\nMy user defined settings are: 9 points , 1 Animal (directly from DLC analysis)\r\nThe csv file is ca. 55k rows long\r\n\r\nI am using a Windows 10 computer and your newest simba version (updated at 10 am local time)\r\n \r\n\r\n**Additional context**\r\nI cannot continue with extraction of features however.\r\nIt results in a message within your app:\r\n`Pose-estimation body part setting for feature extraction: user_defined\r\nExtracting features from 0 files...\r\nAll feature extraction complete.`\r\nwhich is probably due a non working import in your code:\r\n\r\n` \r\ndef extract_features_wotarget(inifile):\r\n....\r\n csv_dir_in = os.path.join(csv_dir, 'outlier_corrected_movement_location')\r\n....\r\n`\r\n\r\n\r\n` ########### FIND CSV FILES ###########\r\n for i in os.listdir(csv_dir_in):\r\n if i.__contains__(\".csv\"):\r\n fname = os.path.join(csv_dir_in, i)\r\n filesFound.append(fname)\r\n print('Extracting features from ' + str(len(filesFound)) + ' files...')`\r\n\r\nI can however continue if i copy+paste the same csv file (from outlier_corrected_movement) into the other folder. I haven't read through all of your code, but i assume that you keep the general structure of the dataframe that comes from dlc so this can work as a workaround for now (especially since i did not want to do a outlier correction anyway).\r\n", + "user": "JensBlack", + "reaction_cnt": 0, + "created_at": "2020-04-03T12:30:09Z", + "updated_at": "2020-04-06T11:56:37Z", + "author": "JensBlack", + "comments": [ + { + "body": "I just noticed that it wasn't very smart to set the boundary for the corretion to 0 if i wanted to skip everything. \r\nIt actually corrected 100% of the data, which is definetly as intended by you... \r\nSetting the boundary higher than the resolution solved that minor hickup and solved the issue. \r\n\r\nI will leave this open for you to check out, but i guess the whole issue was an unlikely user interaction you didn't count in when writing your code. You can close it anytime.", + "created_at": "2020-04-03T12:39:20Z", + "author": "JensBlack" + } + ] + }, + { + "title": "Pose Configuration: Window sizing bug", + "body": "**Describe the bug**\r\nWhen importing a small image to create a pose configuration (96x161) the autosizing of the window is buggy (see image below).\r\n\r\n**Screenshots**\r\n![grafik](https://user-images.githubusercontent.com/44863941/77520404-74f0dc00-6e81-11ea-8646-e4bea9eb1f60.png)\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n- Anaconda\r\n- simba_no_tf\r\n\r\n**Additional context**\r\nI guess you will run into this issue with apps if you autosize and go to the limits of tkinter and such. So this is more of a note, than a real big issue. But as the original video resolution was 848x480 (quite common for tracking) the cut out might be something that happens more than once :)\r\n", + "user": "JensBlack", + "reaction_cnt": 0, + "created_at": "2020-03-25T09:19:15Z", + "updated_at": "2020-03-26T10:20:30Z", + "author": "JensBlack", + "comments": [ + { + "body": "additional note: maximizing the window does not solve the issue, but expands the window with additional grey areas.", + "created_at": "2020-03-25T09:20:38Z", + "author": "JensBlack" + }, + { + "body": "Hi @JensBlack - many thanks for reporting this! I've inserted a fix - could you download it again and check if its working on your end? In the fix, small images (less than 300px width) are resized to 800px wide. So you should be able to place out your dots appropriately but - beware, it will look rather grainy! \r\n\r\n ", + "created_at": "2020-03-25T15:51:04Z", + "author": "sgoldenlab" + }, + { + "body": "Thanks for the fast reply! The hotfix works.\r\nI just tested it. The image is displayed in maximized window mode and is quite stretched (as you mentioned) but the window can be resized, so the mouse is miceshaped again! See screenshots below:\r\n\r\nmaximized window:\r\n![grafik](https://user-images.githubusercontent.com/44863941/77635854-7b4f8880-6f53-11ea-8a66-3112346a2209.png)\r\nwindowed window:\r\n![grafik](https://user-images.githubusercontent.com/44863941/77635906-95896680-6f53-11ea-81af-177d133c3975.png)\r\n\r\nI will close the \"issue\". Thank you again for the hotfix! ", + "created_at": "2020-03-26T10:20:30Z", + "author": "JensBlack" + } + ] + }, + { + "title": "Movie filename mismatch in csv files: a bug and a fix", + "body": "Hi again,\r\n\r\nI am using the new version 1.1 of the SimBA environment, and came over a small glitch, I think related rather to my particular way of using it. \r\n\r\nSo, the bug:\r\n\r\nafter the \"Outlier Correction\" step I proceeded directly to \"Extract Features\" step, but got an error message in the main SimBA GUI window: \"Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file\". The step thus has failed.\r\n \r\n\r\nPossible reason and working empiric fix:\r\n\r\nThe postures were analysed separately using DeepLabCut (not via SimBA pipeline), and then the .csv files with coordinates from DeepLabCut were loaded into the the resulting .csv files. Thus, the names of those files (something like \"Dec0419_Trial1DLC_resnet50_resident_intruder_optogeneticsFeb14shuffle1_600000.csv\") were not the same as the names of the movies loaded into the project (\"Dec0419_Trial1.mp4\"). The former names were listed in the log files project_folder\\log\\Outliers_location_nnnnnnn.csv and project_folder\\log\\Outliers_movement_nnnnnnnnnn.csv, but the latter (simpler) movie names were listed in the file project_folder\\log\\video_info.csv\r\nThe issue was resolved by pasting the values of the movie names from Outliers_*.csv log files into the video_info.csv log file.\r\n\r\nSincerely,\r\nOlexiy.", + "user": "bmi-lsym", + "reaction_cnt": 2, + "created_at": "2020-03-19T23:23:08Z", + "updated_at": "2020-03-22T22:47:27Z", + "author": "bmi-lsym", + "comments": [ + { + "body": "Hi @bmi-lsym. Thanks Olexiy! This may also be related to an error that was highlighted and resolved this morning - related to DeepLabCut changing their CSV output file names in their newer versions: https://github.com/sgoldenlab/simba/issues/13. If you download SimBA again this error has been fixed. \r\n\r\nAs you say, in the video_info.csv - make sure all the videos meta data and correct video file names are stored before proceeding through outlier correction and feature extraction steps. These are set in the [Video parameters] tab in SimBA. The video_config.csv may look like this, where the video column names corresponds to the names of your tracking/video files. \r\n\r\n![image](https://user-images.githubusercontent.com/50497030/77126960-071f5d00-6a08-11ea-917e-c2c67851127c.png)\r\n\r\nThanks again \r\n\r\n\r\n\r\n\r\n\r\n", + "created_at": "2020-03-20T00:40:09Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "'Append ROI data to features' is not functioning", + "body": "**Describe the bug**\r\n'Append ROI data to features' is not functioning as expected\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nAfter successful execution of 'Analyze ROI' and 'Visualize ROI' steps:\r\n1. Load the project\r\n2. Go to 'Extract features' tab, click on 'Extract features', wait until 'All feature extraction complete.' message\r\n3. Click 'Append ROI data to features'\r\n4. See error\r\n\r\n**Screenshots**\r\n![Bug](https://user-images.githubusercontent.com/31575364/77113850-ccc7b700-6a2b-11ea-8ec8-b6fdc33ac06c.PNG)\r\n![Bug2](https://user-images.githubusercontent.com/31575364/77113855-ce917a80-6a2b-11ea-9539-0206ebcf1f40.PNG)\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Browser: Chrome\r\n", + "user": "onurserce", + "reaction_cnt": 0, + "created_at": "2020-03-19T20:54:03Z", + "updated_at": "2020-03-20T17:17:46Z", + "author": "onurserce", + "comments": [ + { + "body": "Thank you for the in depth report. In your anaconda prompt, can you type in `import pandas as pd`, then `pd.__version__`, and let me know what version of pandas you have installed?\r\n\r\nIf the version is not 0.25.3, please uninstall your pandas and reinstall pandas--version 0.25.3.\r\n\r\n", + "created_at": "2020-03-19T21:31:25Z", + "author": "inoejj" + }, + { + "body": "Hi @inoejj,\r\n\r\nMy `pandas.__version__` seems to be 0.25.3 as required. The problem is arising from another source. \r\n\r\nCould that be because of numpy or something else?\r\n\r\nAttached you can find all packages and their versions in my environment obtained with the command `conda list`\r\n\r\n[packages.txt.txt](https://github.com/sgoldenlab/simba/files/4359937/packages.txt.txt)\r\n", + "created_at": "2020-03-20T14:02:39Z", + "author": "onurserce" + }, + { + "body": "@inoejj @sronilsson \r\n\r\nProblem is caused by different column names in following two files:\r\n...\\project_folder\\logs\\measures\\pose_configs\\bp_names**\\project_body_names.csv** and\r\n...\\project_folder\\csv\\features_extracted\\extracted_features.csv\r\n\r\nproject_body_names.csv by default names body parts without any numbers (except Center_2, I selected 1 animal with 8bps throughout the whole project). However, SimBA generates the extracted features csv file such that it inserts numbers to each body part (either 1 or 2), probably owing to the fact that SimBA was initially written for tracking 2 animals and features were calculated based on these. \r\n\r\nThe problem is solved by editing the project_body_names.csv file and appending `_1` to all body part names (except Center, it was changed from Center_2 to Center_1).", + "created_at": "2020-03-20T15:06:18Z", + "author": "onurserce" + }, + { + "body": "@onurserce - thanks for finding this one - I have updated the code to fix this issue ", + "created_at": "2020-03-20T16:30:28Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you for the update. Let me know if I can help in any way!", + "created_at": "2020-03-20T17:17:45Z", + "author": "onurserce" + } + ] + }, + { + "title": "ROI module not working", + "body": "**Describe the bug**\r\nROI module is not working. After selecting the ROI's, the output CSV file appears to be empty.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nAfter loading the project\r\n1. Go to 'ROI' tab\r\n2. Define ROI's and click 'Show Shape Definitions table'\r\n3. Enter shape names, leave dimensions 0, click on Set Shape Definitions\r\n4. There's only one video, click on draw and draw shapes according to instructions\r\n5. Click on 'Analyse ROI Data'\r\n\r\n**Expected behavior**\r\nA CSV file with analyzed ROI's\r\n\r\n**Screenshots**\r\n![bug1](https://user-images.githubusercontent.com/31575364/77079048-b7389a00-69f7-11ea-9b53-2787a463fd8d.PNG)\r\n![bug2](https://user-images.githubusercontent.com/31575364/77079052-b869c700-69f7-11ea-827d-7a2136e6e60a.PNG)\r\n![bug3](https://user-images.githubusercontent.com/31575364/77079054-b9025d80-69f7-11ea-94eb-69df0206bb55.PNG)\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Browser: Chrome\r\n\r\n**Additional context**\r\n1. Outlier correction step was done successfully and non-empty .csv files are available at project_folder\\csv\\outlier_corrected_movement_location and project_folder\\csv\\outlier_corrected_movement\r\n2. Video name in the file project_folder\\logs\\video_info.csv was changed due to incompatibility of Simba with Deeplabcut while importing videos into Simba ( #8 )\r\n3. All .csv files highlighted in the screenshot are empty.\r\n", + "user": "onurserce", + "reaction_cnt": 0, + "created_at": "2020-03-19T14:41:34Z", + "updated_at": "2020-03-19T19:36:43Z", + "author": "onurserce", + "comments": [ + { + "body": "Thanks @onurserce - you did catch a bug with the file import, some CSV tracking file names were not changed appropriately when imported into SimBA - primarily as DeepLabCut has changed it output file names to contain \"DLC\" rather than \"DeepCut\" - I will have an updated version online shortly and get back to you. \r\n\r\nIn the meantime, is it right that you have a space in your file name (\"Test 2DLC...\") ? SimBA and python are not very good at handling spaces. Try removing it before you importing it (e.g., import a video named Test.mp4, and a tracking file named Test.cvs) and let me know how it goes. ", + "created_at": "2020-03-19T16:54:29Z", + "author": "sgoldenlab" + }, + { + "body": "I have fixed the bug - could you download SimBA again, start a new project, and give it a go without spaces in the filenames, and let me know how it works? ", + "created_at": "2020-03-19T17:07:44Z", + "author": "sgoldenlab" + }, + { + "body": "Confirmed fixed by another user. Thanks!", + "created_at": "2020-03-19T18:03:49Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab Indeed I had a lot of trouble due to that space in the filename and I had to change the project_folder\\videos\\Test 2 to project_folder\\videos\\Test and that fixed the problem encountered during the frame extraction step.\r\n\r\nHowever, even though I reinstalled SimBA. ROI module still seems to be not working for me, I just have two more empty CSV files under logs folder. Should I try something else?\r\n\r\nEdit: After starting with a new project, now the issue seems to be resolved. Thank you @sgoldenlab ", + "created_at": "2020-03-19T18:23:15Z", + "author": "onurserce" + }, + { + "body": "Great, many thanks for letting me know about the issue - and please let me know if any more issues pop up", + "created_at": "2020-03-19T19:36:43Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Installation of the version 1.1", + "body": "Hello, \r\n\r\nI wanted to upgrade to SimBA 1.1 (installation of the previous 1.0 version went smoothly).\r\nUsing Anaconda3 command prompt.\r\n\r\nFollowed the instructions on Github page:\r\n\r\n> git clone -b master https://github.com/sgoldenlab/simba.git\r\n> pip install -r simba/simba/requirements.txt\r\n\r\nFFmpeg was already installed.\r\n\r\nThen, in Anaconda3 run\r\n>python SimBA.py\r\n \r\nSimBA GUI did not start, and instead resulted in the error message listed below.\r\n\r\nTried to go around by cloning the Tensorflow-free installation, repeating all the steps above and the following command: \r\n>git clone -b SimBA_no_TF https://github.com/sgoldenlab/simba.git\r\n \r\nAs a result, got the same error message.\r\n\r\nTried uninstalling Anaconda, then installed Anaconda & DeepLabCut newly, then repeated SimBA installation steps.... unfortunately, the same outcome.\r\n\r\nCould you please help?\r\n\r\nThank you very much in advance.\r\n\r\nSincerely,\r\nOlexiy. \r\n\r\n\r\n**Error messages in Anaconda prompt**\r\n\r\n(base) c:\\Alex\\simba\\simba>python SimBA.py\r\nTraceback (most recent call last):\r\n File \"SimBA.py\", line 51, in \r\n from ROI_freehand_draw_3 import roiFreehand\r\n File \"c:\\Alex\\simba\\simba\\ROI_freehand_draw_3.py\", line 6, in \r\n from shapely.geometry import Polygon\r\n File \"C:\\anaconda3\\lib\\site-packages\\shapely\\geometry\\__init__.py\", line 4, in\r\n \r\n from .base import CAP_STYLE, JOIN_STYLE\r\n File \"C:\\anaconda3\\lib\\site-packages\\shapely\\geometry\\base.py\", line 18, in \r\n from shapely.coords import CoordinateSequence\r\n File \"C:\\anaconda3\\lib\\site-packages\\shapely\\coords.py\", line 8, in \r\n from shapely.geos import lgeos\r\n File \"C:\\anaconda3\\lib\\site-packages\\shapely\\geos.py\", line 145, in \r\n _lgeos = CDLL(os.path.join(sys.prefix, 'Library', 'bin', 'geos_c.dll'))\r\n File \"C:\\anaconda3\\lib\\ctypes\\__init__.py\", line 364, in __init__\r\n self._handle = _dlopen(self._name, mode)\r\nOSError: [WinError 126] The specified module could not be found\r\n\r\n(base) c:\\Alex\\simba\\simba>\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows \r\n - Browser Firefox\r\n \r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n", + "user": "bmi-lsym", + "reaction_cnt": 0, + "created_at": "2020-03-17T19:56:03Z", + "updated_at": "2020-03-25T09:09:16Z", + "author": "bmi-lsym", + "comments": [ + { + "body": "Ok, after trying a bit more myself, following the error message, could fix the issue by manually copying the missing file \"geos_c.dll\" from its source location \"...\\anaconda3\\Lib\\site-packages\\shapely\\DLLs\\\" to the required location \"...\\anaconda3\\Library\\bin\\\".\r\n", + "created_at": "2020-03-17T20:38:24Z", + "author": "bmi-lsym" + }, + { + "body": "Thank for for posting the error and the fix! We have added this to our notes to look into further. Please let us know if anything else pops up", + "created_at": "2020-03-18T17:38:08Z", + "author": "sgoldenlab" + }, + { + "body": "i encountered the same problem but was able to solve it using\r\n`conda install shapely`\r\na quick google suggests that using pip does not install the correct versions for windows users.\r\nNote: i just installed shapely on top of you requirements.txt, it worked.", + "created_at": "2020-03-25T09:09:16Z", + "author": "JensBlack" + } + ] + }, + { + "title": "Install Issue Shapely", + "body": "After installing from the requirements.txt, getting the following Traceback:\r\n\r\nTraceback (most recent call last):\r\n File \"SimBA.py\", line 51, in \r\n from ROI_freehand_draw_3 import roiFreehand\r\n File \"C:\\Users\\dlhag\\simba\\simba\\ROI_freehand_draw_3.py\", line 6, in \r\n from shapely.geometry import Polygon\r\n File \"C:\\Users\\dlhag\\Anaconda3\\envs\\simba\\lib\\site-packages\\shapely\\geometry\\__init__.py\", line 4, in \r\n from .base import CAP_STYLE, JOIN_STYLE\r\n File \"C:\\Users\\dlhag\\Anaconda3\\envs\\simba\\lib\\site-packages\\shapely\\geometry\\base.py\", line 18, in \r\n from shapely.coords import CoordinateSequence\r\n File \"C:\\Users\\dlhag\\Anaconda3\\envs\\simba\\lib\\site-packages\\shapely\\coords.py\", line 8, in \r\n from shapely.geos import lgeos\r\n File \"C:\\Users\\dlhag\\Anaconda3\\envs\\simba\\lib\\site-packages\\shapely\\geos.py\", line 145, in \r\n _lgeos = CDLL(os.path.join(sys.prefix, 'Library', 'bin', 'geos_c.dll'))\r\n File \"C:\\Users\\dlhag\\Anaconda3\\envs\\simba\\lib\\ctypes\\__init__.py\", line 348, in __init__\r\n self._handle = _dlopen(self._name, mode)\r\nOSError: [WinError 126] The specified module could not be found\r\n\r\nTried a few shapely versions, couldn't get anything other than this. \r\n\r\nAny thoughts?", + "user": "dlhagger", + "reaction_cnt": 0, + "created_at": "2020-03-09T20:22:37Z", + "updated_at": "2020-05-15T20:27:50Z", + "author": "dlhagger", + "comments": [ + { + "body": "Hi @dlhagger - yes, try this one:\r\n\r\nGo to the following address and download Shapely 1.7.0: https://www.lfd.uci.edu/~gohlke/pythonlibs/#shapely\r\n\r\nIf you are using Python 3.6.0 and windows (as me) click on: Shapely‑1.7.0‑cp36‑cp36m‑win_amd64.whl (or here is a direct link to the file: https://download.lfd.uci.edu/pythonlibs/s2jqpv5t/Shapely-1.7.0-cp36-cp36m-win_amd64.whl)\r\n\r\nNext, navigate to the download folder, open a command prompt and run: pip install Shapely-1.7.0-cp36-cp36m-win_amd64.whl \r\n\r\nLet me know if that works.\r\n\r\nNB: `conda install shapely` works as well.\r\n", + "created_at": "2020-03-09T22:16:34Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab - tried to download directly and still got the same issue. \r\n\r\nI did get it to work by building a python 3.6.10 env and using conda to install shapely, then pip installing the rest of the packages. ", + "created_at": "2020-03-10T14:32:23Z", + "author": "dlhagger" + } + ] + }, + { + "title": "Bump tensorflow-gpu from 1.14.0 to 1.15.2 in /simba", + "body": "Bumps [tensorflow-gpu](https://github.com/tensorflow/tensorflow) from 1.14.0 to 1.15.2.\n
\nRelease notes\n\n*Sourced from [tensorflow-gpu's releases](https://github.com/tensorflow/tensorflow/releases).*\n\n> ## TensorFlow 1.15.2\n> # Release 1.15.2\r\n> \r\n> Note that this release no longer has a single pip package for GPU and CPU. Please see [#36347](https://github-redirect.dependabot.com/tensorflow/tensorflow/issues/36347) for history and details\r\n> \r\n> ## Bug Fixes and Other Changes\r\n> * Fixes a security vulnerability where converting a Python string to a `tf.float16` value produces a segmentation fault ([CVE-2020-5215](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-5215))\r\n> * Updates `curl` to `7.66.0` to handle [CVE-2019-5482](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5482) and [CVE-2019-5481](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5481)\r\n> * Updates `sqlite3` to `3.30.01` to handle [CVE-2019-19646](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-19646), [CVE-2019-19645](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-19645) and [CVE-2019-16168](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-16168)\n> \n> ## TensorFlow 1.15.0\n> # Release 1.15.0\r\n> This is the last 1.x release for TensorFlow. We do not expect to update the 1.x branch with features, although we will issue patch releases to fix vulnerabilities for at least one year.\r\n> \r\n> ## Major Features and Improvements\r\n> * As [announced](https://groups.google.com/a/tensorflow.org/forum/#!topic/developers/iRCt5m4qUz0), `tensorflow` pip package will by default include GPU support (same as `tensorflow-gpu` now) for the platforms we currently have GPU support (Linux and Windows). It will work on machines with and without Nvidia GPUs. `tensorflow-gpu` will still be available, and CPU-only packages can be downloaded at `tensorflow-cpu` for users who are concerned about package size.\r\n> * TensorFlow 1.15 contains a complete implementation of the 2.0 API in its `compat.v2` module. It contains a copy of the 1.15 main module (without `contrib`) in the `compat.v1` module. TensorFlow 1.15 is able to emulate 2.0 behavior using the `enable_v2_behavior()` function.\r\n> This enables writing forward compatible code: by explicitly importing either `tensorflow.compat.v1` or `tensorflow.compat.v2`, you can ensure that your code works without modifications against an installation of 1.15 or 2.0.\r\n> * `EagerTensor` now supports numpy buffer interface for tensors.\r\n> * Add toggles `tf.enable_control_flow_v2()` and `tf.disable_control_flow_v2()` for enabling/disabling v2 control flow.\r\n> * Enable v2 control flow as part of `tf.enable_v2_behavior()` and `TF2_BEHAVIOR=1`.\r\n> * AutoGraph translates Python control flow into TensorFlow expressions, allowing users to write regular Python inside `tf.function`-decorated functions. AutoGraph is also applied in functions used with `tf.data`, `tf.distribute` and `tf.keras` APIS.\r\n> * Adds `enable_tensor_equality()`, which switches the behavior such that: \r\n> * Tensors are no longer hashable.\r\n> * Tensors can be compared with `==` and `!=`, yielding a Boolean Tensor with element-wise comparison results. This will be the default behavior in 2.0.\r\n> * Auto Mixed-Precision graph optimizer simplifies converting models to `float16` for acceleration on Volta and Turing Tensor Cores. This feature can be enabled by wrapping an optimizer class with `tf.train.experimental.enable_mixed_precision_graph_rewrite()`.\r\n> * Add environment variable `TF_CUDNN_DETERMINISTIC`. Setting to \"true\" or \"1\" forces the selection of deterministic cuDNN convolution and max-pooling algorithms. When this is enabled, the algorithm selection procedure itself is also deterministic.\r\n> * TensorRT\r\n> * Migrate TensorRT conversion sources from contrib to compiler directory in preparation for TF 2.0.\r\n> * Add additional, user friendly `TrtGraphConverter` API for TensorRT conversion.\r\n> * Expand support for TensorFlow operators in TensorRT conversion (e.g.\r\n> `Gather`, `Slice`, `Pack`, `Unpack`, `ArgMin`, `ArgMax`,`DepthSpaceShuffle`). \r\n> * Support TensorFlow operator `CombinedNonMaxSuppression` in TensorRT conversion which \r\n> significantly accelerates object detection models.\r\n> \r\n> ## Breaking Changes\r\n> * Tensorflow code now produces 2 different pip packages: `tensorflow_core` containing all the code (in the future it will contain only the private implementation) and `tensorflow` which is a virtual pip package doing forwarding to `tensorflow_core` (and in the future will contain only the public API of tensorflow). We don't expect this to be breaking, unless you were importing directly from the implementation.\r\n> * TensorFlow 1.15 is built using devtoolset7 (GCC7) on Ubuntu 16. This may lead to ABI incompatibilities with extensions built against earlier versions of TensorFlow.\r\n> * Deprecated the use of `constraint=` and `.constraint` with ResourceVariable.\r\n> * `tf.keras`:\r\n> * `OMP_NUM_THREADS` is no longer used by the default Keras config. To configure the number of threads, use `tf.config.threading` APIs.\r\n> * `tf.keras.model.save_model` and `model.save` now defaults to saving a TensorFlow SavedModel.\r\n> * `keras.backend.resize_images` (and consequently, `keras.layers.Upsampling2D`) behavior has changed, a bug in the resizing implementation was fixed.\r\n> * Layers now default to `float32`, and automatically cast their inputs to the layer's dtype. If you had a model that used `float64`, it will probably silently use `float32` in TensorFlow2, and a warning will be issued that starts with Layer \"layer-name\" is casting an input tensor from dtype float64 to the layer's dtype of float32. To fix, either set the default dtype to float64 with `tf.keras.backend.set_floatx('float64')`, or pass `dtype='float64'` to each of the Layer constructors. See `tf.keras.layers.Layer` for more information.\r\n> * Some `tf.assert_*` methods now raise assertions at operation creation time (i.e. when this Python line executes) if the input tensors' values are known at that time, not during the session.run(). When this happens, a noop is returned and the input tensors are marked non-feedable. In other words, if they are used as keys in `feed_dict` argument to `session.run()`, an error will be raised. Also, because some assert ops don't make it into the graph, the graph structure changes. A different graph can result in different per-op random seeds when they are not given explicitly (most often).\r\n> \r\n> ## Bug Fixes and Other Changes\r\n> * `tf.estimator`:\r\n> * `tf.keras.estimator.model_to_estimator` now supports exporting to `tf.train.Checkpoint` format, which allows the saved checkpoints to be compatible with `model.load_weights`.\r\n> * Fix tests in canned estimators.\r\n> ... (truncated)\n
\n
\nChangelog\n\n*Sourced from [tensorflow-gpu's changelog](https://github.com/tensorflow/tensorflow/blob/master/RELEASE.md).*\n\n> # Release 1.15.2\n> \n> ## Bug Fixes and Other Changes\n> * Fixes a security vulnerability where converting a Python string to a `tf.float16` value produces a segmentation fault ([CVE-2020-5215](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-5215))\n> * Updates `curl` to `7.66.0` to handle [CVE-2019-5482](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5482) and [CVE-2019-5481](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-5481)\n> * Updates `sqlite3` to `3.30.01` to handle [CVE-2019-19646](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-19646), [CVE-2019-19645](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-19645) and [CVE-2019-16168](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-16168)\n> \n> \n> # Release 2.1.0\n> \n> TensorFlow 2.1 will be the last TF release supporting Python 2. Python 2 support [officially ends an January 1, 2020](https://www.python.org/dev/peps/pep-0373/#update). [As announced earlier](https://groups.google.com/a/tensorflow.org/d/msg/announce/gVwS5RC8mds/dCt1ka2XAAAJ), TensorFlow will also stop supporting Python 2 starting January 1, 2020, and no more releases are expected in 2019.\n> \n> ## Major Features and Improvements\n> * The `tensorflow` pip package now includes GPU support by default (same as `tensorflow-gpu`) for both Linux and Windows. This runs on machines with and without NVIDIA GPUs. `tensorflow-gpu` is still available, and CPU-only packages can be downloaded at `tensorflow-cpu` for users who are concerned about package size.\n> * **Windows users:** Officially-released `tensorflow` Pip packages are now built with Visual Studio 2019 version 16.4 in order to take advantage of the new `/d2ReducedOptimizeHugeFunctions` compiler flag. To use these new packages, you must install \"Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017 and 2019\", available from Microsoft's website [here](https://support.microsoft.com/help/2977003/the-latest-supported-visual-c-downloads).\n> * This does not change the minimum required version for building TensorFlow from source on Windows, but builds enabling `EIGEN_STRONG_INLINE` can take over 48 hours to compile without this flag. Refer to `configure.py` for more information about `EIGEN_STRONG_INLINE` and `/d2ReducedOptimizeHugeFunctions`.\n> * If either of the required DLLs, `msvcp140.dll` (old) or `msvcp140_1.dll` (new), are missing on your machine, `import tensorflow` will print a warning message.\n> * The `tensorflow` pip package is built with CUDA 10.1 and cuDNN 7.6.\n> * `tf.keras`\n> * Experimental support for mixed precision is available on GPUs and Cloud TPUs. See [usage guide](https://www.tensorflow.org/guide/keras/mixed_precision).\n> * Introduced the `TextVectorization` layer, which takes as input raw strings and takes care of text standardization, tokenization, n-gram generation, and vocabulary indexing. See this [end-to-end text classification example](https://colab.research.google.com/drive/1RvCnR7h0_l4Ekn5vINWToI9TNJdpUZB3).\n> * Keras `.compile` `.fit` `.evaluate` and `.predict` are allowed to be outside of the DistributionStrategy scope, as long as the model was constructed inside of a scope.\n> * Experimental support for Keras `.compile`, `.fit`, `.evaluate`, and `.predict` is available for Cloud TPUs, Cloud TPU, for all types of Keras models (sequential, functional and subclassing models).\n> * Automatic outside compilation is now enabled for Cloud TPUs. This allows `tf.summary` to be used more conveniently with Cloud TPUs.\n> * Dynamic batch sizes with DistributionStrategy and Keras are supported on Cloud TPUs.\n> * Support for `.fit`, `.evaluate`, `.predict` on TPU using numpy data, in addition to `tf.data.Dataset`.\n> * Keras reference implementations for many popular models are available in the TensorFlow [Model Garden](https://github.com/tensorflow/models/tree/master/official).\n> * `tf.data`\n> * Changes rebatching for `tf.data datasets` + DistributionStrategy for better performance. Note that the dataset also behaves slightly differently, in that the rebatched dataset cardinality will always be a multiple of the number of replicas.\n> * `tf.data.Dataset` now supports automatic data distribution and sharding in distributed environments, including on TPU pods.\n> * Distribution policies for `tf.data.Dataset` can now be tuned with 1. `tf.data.experimental.AutoShardPolicy(OFF, AUTO, FILE, DATA)` 2. `tf.data.experimental.ExternalStatePolicy(WARN, IGNORE, FAIL)`\n> * `tf.debugging`\n> * Add `tf.debugging.enable_check_numerics()` and `tf.debugging.disable_check_numerics()` to help debugging the root causes of issues involving infinities and `NaN`s.\n> * `tf.distribute`\n> * Custom training loop support on TPUs and TPU pods is avaiable through `strategy.experimental_distribute_dataset`, `strategy.experimental_distribute_datasets_from_function`, `strategy.experimental_run_v2`, `strategy.reduce`.\n> * Support for a global distribution strategy through `tf.distribute.experimental_set_strategy(),` in addition to `strategy.scope()`.\n> * `TensorRT`\n> * [TensorRT 6.0](https://developer.nvidia.com/tensorrt#tensorrt-whats-new) is now supported and enabled by default. This adds support for more TensorFlow ops including Conv3D, Conv3DBackpropInputV2, AvgPool3D, MaxPool3D, ResizeBilinear, and ResizeNearestNeighbor. In addition, the TensorFlow-TensorRT python conversion API is exported as `tf.experimental.tensorrt.Converter`.\n> * Environment variable `TF_DETERMINISTIC_OPS` has been added. When set to \"true\" or \"1\", this environment variable makes `tf.nn.bias_add` operate deterministically (i.e. reproducibly), but currently only when XLA JIT compilation is *not* enabled. Setting `TF_DETERMINISTIC_OPS` to \"true\" or \"1\" also makes cuDNN convolution and max-pooling operate deterministically. This makes Keras Conv\\*D and MaxPool\\*D layers operate deterministically in both the forward and backward directions when running on a CUDA-enabled GPU.\n> \n> ## Breaking Changes\n> * Deletes `Operation.traceback_with_start_lines` for which we know of no usages.\n> * Removed `id` from `tf.Tensor.__repr__()` as `id` is not useful other than internal debugging.\n> * Some `tf.assert_*` methods now raise assertions at operation creation time if the input tensors' values are known at that time, not during the `session.run()`. This only changes behavior when the graph execution would have resulted in an error. When this happens, a noop is returned and the input tensors are marked non-feedable. In other words, if they are used as keys in `feed_dict` argument to `session.run()`, an error will be raised. Also, because some assert ops don't make it into the graph, the graph structure changes. A different graph can result in different per-op random seeds when they are not given explicitly (most often).\n> * The following APIs are not longer experimental: `tf.config.list_logical_devices`, `tf.config.list_physical_devices`, `tf.config.get_visible_devices`, `tf.config.set_visible_devices`, `tf.config.get_logical_device_configuration`, `tf.config.set_logical_device_configuration`.\n> * `tf.config.experimentalVirtualDeviceConfiguration` has been renamed to `tf.config.LogicalDeviceConfiguration`.\n> * `tf.config.experimental_list_devices` has been removed, please use\n> `tf.config.list_logical_devices`.\n> \n> ## Bug Fixes and Other Changes\n> ... (truncated)\n
\n
\nCommits\n\n- [`5d80e1e`](https://github.com/tensorflow/tensorflow/commit/5d80e1e8e6ee999be7db39461e0e79c90403a2e4) Merge pull request [#36215](https://github-redirect.dependabot.com/tensorflow/tensorflow/issues/36215) from tensorflow-jenkins/version-numbers-1.15.2-8214\n- [`71e9d8f`](https://github.com/tensorflow/tensorflow/commit/71e9d8f8eddfe283943d62554d4c676bdaf79372) Update version numbers to 1.15.2\n- [`e50120e`](https://github.com/tensorflow/tensorflow/commit/e50120ee34e1e29252f4cbc8ac4cd328e9a9840c) Merge pull request [#36214](https://github-redirect.dependabot.com/tensorflow/tensorflow/issues/36214) from tensorflow-jenkins/relnotes-1.15.2-2203\n- [`1a7e9fb`](https://github.com/tensorflow/tensorflow/commit/1a7e9fbf670ef9d03b2f8fdf1ae2276b2d100fab) Releasing 1.15.2 instead of 1.15.1\n- [`85f7aab`](https://github.com/tensorflow/tensorflow/commit/85f7aab93b65ed1fcc589f54d40793b1afb65bf4) Insert release notes place-fill\n- [`e75a6d6`](https://github.com/tensorflow/tensorflow/commit/e75a6d6e6e20df83f19e72e04c7984587d768bd3) Merge pull request [#36190](https://github-redirect.dependabot.com/tensorflow/tensorflow/issues/36190) from tensorflow/mm-r1.15-fix-v2-build\n- [`a6d8973`](https://github.com/tensorflow/tensorflow/commit/a6d897351e483dfd0418e5cad2900ad9ef24188c) Use `config=v1` as this is `r1.15` branch.\n- [`fdb8589`](https://github.com/tensorflow/tensorflow/commit/fdb85890df5df1e6b3867c842aabb44f561b446d) Merge pull request [#35912](https://github-redirect.dependabot.com/tensorflow/tensorflow/issues/35912) from tensorflow-jenkins/relnotes-1.15.1-31298\n- [`a6051e8`](https://github.com/tensorflow/tensorflow/commit/a6051e8094c5e7d26ec9573a740246c92e4057a2) Add CVE number for main patch\n- [`360b2e3`](https://github.com/tensorflow/tensorflow/commit/360b2e318af2db59152e35be31c8aab1fb164088) Merge pull request [#34532](https://github-redirect.dependabot.com/tensorflow/tensorflow/issues/34532) from ROCmSoftwarePlatform/r1.15-rccl-upstream-patch\n- Additional commits viewable in [compare view](https://github.com/tensorflow/tensorflow/compare/v1.14.0...v1.15.2)\n
\n
\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=tensorflow-gpu&package-manager=pip&previous-version=1.14.0&new-version=1.15.2)](https://help.github.com/articles/configuring-automated-security-fixes)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually\n- `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2020-03-05T01:24:23Z", + "updated_at": "2020-09-25T22:33:46Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #68.", + "created_at": "2020-09-25T22:33:44Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Installation help", + "body": "**Describe the bug**\r\nAfter entering the command\r\n\r\ngit clone -b SimBA_no_DLC https://github.com/sgoldenlab/simba.git\r\n\r\nfiles are updated correctly, then after the command\r\n\r\npip install -r simba/SimBA/requirements.txt\r\n\r\nI receive the error\r\n\r\nERROR: Cannot uninstall 'PyYAML'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall.\r\n\r\nUnder the correct directory, then running the command\r\n\r\npython SimBA.py \r\n\r\nproduces the following\r\n\r\nTraceback (most recent call last):\r\n File \"SimBA.py\", line 8, in \r\n from tkinter_functions import *\r\nModuleNotFoundError: No module named 'tkinter_functions'\r\n\r\nAny advice would be appreciated.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to '...'\r\n2. Click on '....'\r\n3. Scroll down to '....'\r\n4. See error\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. iOS] Windows 10\r\n - Browser [e.g. chrome, safari] Chrome\r\n - Version [e.g. 22]\r\n\r\n**Smartphone (please complete the following information):**\r\n - Device: [e.g. iPhone6]\r\n - OS: [e.g. iOS8.1]\r\n - Browser [e.g. stock browser, safari]\r\n - Version [e.g. 22]\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n", + "user": "DanePatey", + "reaction_cnt": 0, + "created_at": "2020-02-26T17:28:29Z", + "updated_at": "2020-03-18T17:38:28Z", + "author": "DanePatey", + "comments": [ + { + "body": "Hi @DanePatey - to solve this, there seems to be two option: https://stackoverflow.com/questions/49911550/how-to-upgrade-disutils-package-pyyaml\r\n\r\nI suggest running: \r\npip install --upgrade pip==9.0.1\r\n\r\nand then try again with: pip install -r simba/SimBA/requirements.txt\r\n\r\nLet me know how it goes!", + "created_at": "2020-02-26T19:33:36Z", + "author": "sgoldenlab" + }, + { + "body": "Hi @sgoldenlab \r\nRunning \r\n\r\npip install --upgrade pip==9.0.1\r\n\r\ngives the following\r\n\r\nRequirement already up-to-date: pip==9.0.1 in c:\\users\\lilab.uw-mxl8522rkv\\anaconda3\\lib\\site-packages\r\n\r\nSo the package is already up to date. \r\nNow when running \r\npython SimBA.py i get the following as an output\r\n\r\nTraceback (most recent call last):\r\n File \"SimBA.py\", line 8, in \r\n from tkinter_functions import *\r\n File \"C:\\Users\\LiLab.UW-MXL8522RKV\\Anaconda3\\simba\\simba\\tkinter_functions.py\", line 4, in \r\n import cv2\r\n File \"C:\\Users\\LiLab.UW-MXL8522RKV\\Anaconda3\\lib\\site-packages\\cv2\\__init__.py\", line 3, in \r\n from .cv2 import *\r\nImportError: DLL load failed: The specified module could not be found.", + "created_at": "2020-02-28T17:47:58Z", + "author": "DanePatey" + }, + { + "body": "@DanePatey - you seem to be missing OpenCV . Run pip install opencv-python.\r\n\r\nIf the error persist, check this which the discuss the error in FAQ: https://pypi.org/project/opencv-python/\r\n\r\n![image](https://user-images.githubusercontent.com/50497030/75697724-59b7f580-5c62-11ea-99dc-2dcdcd72a168.png)\r\n\r\n", + "created_at": "2020-03-02T16:46:22Z", + "author": "sgoldenlab" + }, + { + "body": "@sgoldenlab \r\n\r\nSorry for the late reply back.\r\nafter running\r\npip install opencv-python\r\nI get a return of the requirement is already satisfied.\r\nThen running \r\npython simba.py\r\nstill gives the same issue.\r\nAm I missing something where I need to have done all of this in a particular conda environment or something?", + "created_at": "2020-03-06T21:51:36Z", + "author": "DanePatey" + }, + { + "body": "Did you pip install in your conda environment, or did you do in the windows terminal outside of the conda environment to install opencv? Be sure you are in your conda environment. \r\n\r\nAlso, in your conda environment, you can run \"conda list\" - make sure that you can see opencv in the printed list. ", + "created_at": "2020-03-07T00:16:10Z", + "author": "sgoldenlab" + }, + { + "body": "Closing issue.", + "created_at": "2020-03-18T17:38:28Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Frames input folder files empty", + "body": "Hello! :)\r\nI am having issues with labelling the behaviours. Whenever I try to do it, it seems like the GUI does not load the extracted frames correctly, leaving me with a blank space where the video frames should be presented. I have tried extracting the frames again, but it tells me the frames have already been extracted, despite the fact that the folders inside input folder are empty. Any ideas on how to solve this? I am using the no DLC version, and I obtained the csvs from colab. \r\n\r\nThank you!\r\nFilipa\r\n", + "user": "2909ft", + "reaction_cnt": 0, + "created_at": "2020-01-28T10:46:57Z", + "updated_at": "2021-01-07T11:53:10Z", + "author": "2909ft", + "comments": [ + { + "body": "How did you extract your frames? Please follow the steps below and see if it works.\r\n\r\n1. Please make sure that the `project_folder/videos/` folder contains videos.\r\n\r\n2. Click on File --> Load project --> Load Project .ini and load the `project_config.ini`.\r\n\r\n3. Under \"Extract further frames into project folder\", click `Extract frames`.\r\n\r\nIf you do not have any videos in the `project_folder/videos` folder, you can import the videos in the second tab of \"Load project\".", + "created_at": "2020-01-28T18:31:09Z", + "author": "inoejj" + }, + { + "body": "Hello inoejj! Thanks for the reply!\r\n\r\nYes, I followed exactly those steps when I first tried to extract the frames. The videos folder has all the videos inside. In fact, I could even watch the videos when I attempted to label the frames, by clicking on the \"open video\" button on the gui.\r\n\r\nBest regards,\r\nFillipa", + "created_at": "2020-01-28T19:47:53Z", + "author": "2909ft" + }, + { + "body": "Hi @2909ft! I can't recreate the issue at the moment but we should be able to solve it.\r\n\r\n1) Do you have ffmpeg installed? (install instructions here: https://m.wikihow.com/Install-FFmpeg-on-Windows)\r\n\r\n2) Delete the empty folder that should contain the frames (e.g., project_folder\\frames\\input\\VideoName), and click on extract frames again. What is printed out in the SimBA main console window? \r\n\r\n3) To extract frames for single videos, you can use the tools menu. You can try this, and if this works we do not have an ffmpeg issue. Click on Tools--> Extract frames--> Extract frames. Select the path to your video and click on *extract all frames*. Does this work? \r\n\r\nThanks!\r\n\r\n", + "created_at": "2020-01-28T20:06:08Z", + "author": "sgoldenlab" + }, + { + "body": "Ah yes, that might be the issue! I switched computers yesterday and forgot to install ffmpeg. That should fix the issue, I will try it tomorrow.", + "created_at": "2020-01-28T20:33:27Z", + "author": "2909ft" + }, + { + "body": "@2909ft Are you able to resolve the issue?\r\n\r\n", + "created_at": "2020-01-29T20:28:00Z", + "author": "sgoldenlab" + }, + { + "body": "Yes I was, thank you! \r\nHowever, now I have another issue: whenever I try to train the model, I get the following error message \"Error: the dataframe does not contain any target annotations. Please check the csv files in the project_folder/csv/target_inserted folder\". There are in fact no csv files inside that folder. Do I have to label all the frames for the generation of the csv files to work? I labelled around 200.", + "created_at": "2020-01-29T21:22:49Z", + "author": "2909ft" + }, + { + "body": "Hi @2909ft - you do not have to label all of the frames. I highly recommend it, but it will depend on your specific case. If you do not look at all of the frames using the behavior labelling tool, there may be some frames that contain the behavior of interest, that will automatically labelled as not containing the behavior of interest, and this will disrupt the classifier. \r\n\r\nWhen you finished labelling the 200 frames, there should be a new csv generated in the target inserted folder. This is generated when you click on the \"Save and quit\" button at the bottom of the label frames interface to save the data: https://github.com/sgoldenlab/simba/blob/master/docs/labelling_aggression_tutorial.md. Make sure you click this button. \r\n\r\nAlso, can you see your .csv files in the festures_extracted folder? ", + "created_at": "2020-01-30T02:07:37Z", + "author": "sronilsson" + }, + { + "body": "Thank you for the reply! My features_extracted folder is empty indeed, but I am unsure why since I clicked on \"extract features\". I also do not have a \"save and quit\" button, I have a \"generate/save csv\", but I guess that doesn't make much of a difference.", + "created_at": "2020-01-30T09:34:53Z", + "author": "2909ft" + }, + { + "body": "You're correct - \"Generate/save csv\" is what the button should read, \r\n\r\nAn empty feature_extracted folder suggests an earlier step didn't go as planned.\r\n\r\nIn your csv folder, which subfolders do contain csv files - can you see your csv files in your \"input\" subfolder and your two different outlier correction subfolders?", + "created_at": "2020-01-30T16:14:33Z", + "author": "sronilsson" + }, + { + "body": "Only the input subfolder contains csv files, both of the outlier correction subfolders are empty.", + "created_at": "2020-01-30T16:46:12Z", + "author": "2909ft" + }, + { + "body": "That would be the issue - did you do the outlier correction step? This is required for the rest of the steps to work: https://github.com/sgoldenlab/simba/blob/master/docs/tutorial.md#step-4-outlier-correction\r\n\r\nIf you did, and it didn't work as planned, what was printed out in the main SimBA terminal window when you clicked on 'Correct outlier'? \r\n", + "created_at": "2020-01-30T19:56:35Z", + "author": "sronilsson" + }, + { + "body": "I had only selected movement criteria for correction, not location, I thought it would work with just one of them. I have now selected for both, and there are new csvs on the subfolders, apologies for the misconception :) \r\nHowever, the extraction still does not output anything. On the terminal it is simply displayed \"Pose-estimation body part setting for feature extraction: 8\" and nothing else.", + "created_at": "2020-01-30T21:58:11Z", + "author": "2909ft" + }, + { + "body": "Thanks!\r\n\r\n1) To confirm, when you created the project, did you set the 'Animal Settings\" to \"1 animal, 8bp\" like this?\r\n\r\n![image](https://user-images.githubusercontent.com/34761092/73498191-6dc1bc00-4371-11ea-840c-9727bce2cc5c.png)\r\n\r\n2) In the main terminal window (the terminal window you used to start SimBA and were you wrote \"python Simba.py\") do you see any messages being printed after clicking on \"Extract Features\"? \r\n", + "created_at": "2020-01-30T23:03:51Z", + "author": "sronilsson" + }, + { + "body": "Thank you!\r\nYes, I set the animal settings to 1 animal 8 points. \r\n\r\nYes, there's the following error message. However, the file exists in that directory (as you can see from the screenshot).\r\n\"Screen\r\n\r\n", + "created_at": "2020-01-30T23:28:13Z", + "author": "2909ft" + }, + { + "body": "Thanks - the non-16 body part feature extraction scripts contained typos - I'm sorry about that. To get passed this error:\r\n\r\ni) In he features_scripts folder in Simba and, open \"extract_features_8bp.py\". On line 18, you should see: vidInfPath = os.path.join(vidInfPath, 'project_folder', 'logs')\r\n\r\nChange this line to: vidInfPath = os.path.join(vidInfPath,'logs'), i.e, remove 'project_folder' from this line. \r\n\r\nLet me know how it goes! ", + "created_at": "2020-01-31T00:36:26Z", + "author": "sronilsson" + }, + { + "body": "I have now deleted the 'project_folder' from the script, and now I get a different error message ahaha \"Error: make sure all the videos that are going to be analyzed are represented in the project_folder/logs/video_info.csv file\", but they are (see screenshot) \r\nOn the python terminal it says \"TypeError: cannot convert the series to .\r\nI have noticed that the column corresponding to the distance is blank, but the pixels/mm was calculated correctly. Could this be the issue?\r\n![image](https://user-images.githubusercontent.com/57539512/73530603-1b3bda80-4419-11ea-895a-37743878cbc2.png)\r\n", + "created_at": "2020-01-31T10:02:08Z", + "author": "2909ft" + }, + { + "body": "Hi @2909ft - this may be another bug in SimBA, in handling .avi's. To check if this is the case there are two options. I can't see the entire column A in your screenshot of the video_config, but if you expand it, do you see the entire filename including the file endings? \r\n\r\nIf you see the file endings, can you remove the \".avi\" from each row and try again?\r\n\r\nAlternatively, could you try and replace the .avi's with mp4 versions of the videos and try to extract features again? There is a tool in SimBA in the tools menu to convert video file format. \r\n\r\nThanks! ", + "created_at": "2020-01-31T17:37:39Z", + "author": "sronilsson" + }, + { + "body": "Hey!\r\n\r\nI do not see the avi ending at the end, it is simply the name of the file. I have tried with the mp4 format and it did not work either", + "created_at": "2020-01-31T18:11:52Z", + "author": "2909ft" + }, + { + "body": "Thanks for testing this out - the \"TypeError: cannot convert the series to error comes from the code finding multiple rows in the video_config file, when it should only find one row. The code looks at each of your file names in your outlier_corrected_movevement_location folder, and finds the matching row in the video_config file, using the Video column to get a match. Things could go wrong if the filenames aren't matching. To check whats happening, could you open \"extract_features_8bp.py\", and after this line (line 55): \r\n\r\ncurrVideoSettings = vidinfDf.loc[vidinfDf['Video'] == currVidName] \r\n\r\ninsert a new line:\r\n\r\nprint(currVideoSettings)\r\n\r\ntry again, and then tell me whats printed out in the main SimBA console. \r\n\r\n\r\n", + "created_at": "2020-02-01T14:21:24Z", + "author": "sronilsson" + }, + { + "body": "Hey! Thanks for all the help! Once I did that, simba console output this. I believe the file names are not matching (as you can see from the screenshot) could that be the issue?\r\n![image](https://user-images.githubusercontent.com/57539512/73653716-221d5400-468a-11ea-8f44-ced299d071cb.png)\r\n", + "created_at": "2020-02-03T12:37:27Z", + "author": "2909ft" + }, + { + "body": "That's it! SimBA should fix the DLC csv filenames (i.e. remove the long part of the filename that specify the model and the iterations the file was generated by) when imported through the GUI. Try and rename the filenames to match the video_info file and re-run feature extraction and let me know if it works", + "created_at": "2020-02-03T16:27:57Z", + "author": "sronilsson" + }, + { + "body": "Following up, has this resolved the issue?", + "created_at": "2020-02-06T21:02:51Z", + "author": "sgoldenlab" + }, + { + "body": "Yes it did, thank you! I am now trying to get my head around the hyperparameters. Thanks for all the help! ", + "created_at": "2020-02-06T21:14:46Z", + "author": "2909ft" + }, + { + "body": "@2909ft - a heads up, the code isn't optimized for single animals and 8 body-parts yet, it's something we're working on - to have SimBA accept any body part configuration. So if you happen to bump into any issues let us know and we'll work though them. ", + "created_at": "2020-02-06T21:31:24Z", + "author": "sronilsson" + }, + { + "body": "Awesome thank you!", + "created_at": "2020-02-06T21:35:41Z", + "author": "2909ft" + }, + { + "body": "Whenever i extract frame rates from a video dataset folder to a train_1 folder. After extracting frame rates the folder is empty where as pycharm shows it has been done successfully. Any one aware of this issue? My code\r\nfor i in tqdm(range(train.shape[0])):\r\n count = 0\r\n videoFile = train['video_name'][i]\r\n cap = cv2.VideoCapture('UCF/'+videoFile.split(' ')[0].split('/')[1]) # capturing the video from the given path\r\n frameRate = cap.get(5) #frame rate\r\n x=1\r\n while(cap.isOpened()):\r\n frameId = cap.get(1) #current frame number\r\n ret, frame = cap.read()\r\n if (ret != True):\r\n break\r\n if (frameId % math.floor(frameRate) == 0):\r\n # storing the frames in a new folder named train_1\r\n filename =r\"D:\\New Projects Pycharm\\Videoclassi\\tttt/\" + videoFile.split('/')[1].split(' ')[0] +\"_frame%d.jpg\" % count;count+=1\r\n cv2.imwrite(filename, frame)\r\n cap.release()", + "created_at": "2021-01-07T11:53:10Z", + "author": "Qasimster" + } + ] + }, + { + "title": "install issue/question", + "body": "First of all, thank you for developing SimBA!\r\n\r\nI'm having an install issue that I cant seem to get around with dlc-windowsGPU (windows environment)\r\n\r\n(dlc-windowsGPU) C:\\Users\\xxxxx\\simba\\simba>simba.py\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\maplight\\simba\\simba\\SimBA.py\", line 5, in \r\n import deeplabcut\r\nModuleNotFoundError: No module named 'deeplabcut'\r\n\r\nI have all the proper dependencies installed, I'm assuming it's a path I'm missing somewhere. using Anaconda. I have DeepLab installed:\r\n\r\n(dlc-windowsGPU) C:\\Users\\xxxxx\\DeepLabCut\\\r\n\r\nThank you in advance for any guidance. ", + "user": "eventhorizonzero", + "reaction_cnt": 0, + "created_at": "2020-01-24T12:31:06Z", + "updated_at": "2020-01-26T18:59:34Z", + "author": "eventhorizonzero", + "comments": [ + { + "body": "Hi @eventhorizonzero! I'm working on finding answer to this. Meanwhile, can you run `conda list` in your dlc-windowsGPU environment, paste the output here, and make sure DeepLabCut is visible in list. ", + "created_at": "2020-01-24T19:01:50Z", + "author": "sgoldenlab" + }, + { + "body": "@eventhorizonzero There are multiple ways to solve this issue.\r\n\r\n1. You can install deeplabcut using pip install deeplabcut in the terminal.\r\n\r\n2. This is probably the better way. Use the terminal in Anaconda(dlc-windowsGPU) and try to run SimBA via the terminal ( python SimBA.py).\r\n\r\nPlease let me know if this resolve your issue.", + "created_at": "2020-01-24T20:34:38Z", + "author": "inoejj" + }, + { + "body": "It indeed was missing from the \"conda list\" output. after running the pip install it was listed and I was able to run. thank you to you both for the assistance! issue resolved!", + "created_at": "2020-01-25T13:25:56Z", + "author": "eventhorizonzero" + }, + { + "body": "Great! Issue closed.", + "created_at": "2020-01-26T18:59:34Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "ROI feature?", + "body": "Hello everybody!\r\n\r\nFirst of all, thank you for developing SimBA :) I was wondering if in the future it would be possible to add a feature that would allow users to define a ROI and use that feature as part of the classifier training. \r\n\r\nThanks!\r\nFilipa", + "user": "2909ft", + "reaction_cnt": 0, + "created_at": "2020-01-22T17:32:29Z", + "updated_at": "2020-03-05T19:28:16Z", + "author": "2909ft", + "comments": [ + { + "body": ":) that and more in regard to ROIs coming in the next release. ROIs in general have been a heavily requested feature and we are fully incorporating them into the SimBA pipeline, including analysis and visualization.\r\n\r\nThat said, please provide feedback on how you would like ROIs to be incorporated. Any thoughts welcomed.", + "created_at": "2020-01-22T18:07:33Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you for the reply!\r\n\r\nMy idea was to use the ROI to help me define what I consider as \"investigation\" on my analysis; ie: whenever the mouse is within a certain distance of the ROI, I would label those frames as \"investigation\"", + "created_at": "2020-01-22T21:27:08Z", + "author": "2909ft" + }, + { + "body": "Thank for the explanation. Yes, that can be easily done with the new ROI function, but depending on how you want to incorporate ROIs into your analysis could also be done without ML, posthoc, on a ML classifier dataset. Lots of ways to accomplish that goal, looking forward to seeing how you decide to approach it.\r\n\r\nI am going to leave this Issue open until we release the new ROI module so that others can comment as well if they like.", + "created_at": "2020-01-23T17:58:11Z", + "author": "sgoldenlab" + }, + { + "body": "Thank you for the reply :) Is there any documentation somewhere that explains how to do that?\r\n\r\nBest regards,\r\nFilipa", + "created_at": "2020-01-24T09:05:34Z", + "author": "2909ft" + }, + { + "body": "There will be detailed documentation once we release it, not sure on exact time frame, but we will send an email to everyone on the listserve with the information when the release happens.\r\n\r\nHere is a teaser! \r\n![ROI](https://user-images.githubusercontent.com/50497030/73094925-72c8cc00-3e96-11ea-9375-99df478d952a.gif)\r\n", + "created_at": "2020-01-24T18:44:27Z", + "author": "sgoldenlab" + }, + { + "body": "Awesome! Thanks a lot!", + "created_at": "2020-01-28T10:37:29Z", + "author": "2909ft" + }, + { + "body": "Version 1.1 now includes full ROI support. Please see documentation here: https://github.com/sgoldenlab/simba/blob/master/docs/ROI_tutorial.md", + "created_at": "2020-03-05T19:28:16Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "portability", + "body": "I try to run SimBA.py on OpenSUSE 15.1 x64 and ran into various portability errors. Mostly it's case-sensitivity (ie TheGoldenLab.PNG -> TheGoldenLab.png) and various instances of '\\\\' in the code. \r\nAs both linux, mac and windows works fine with '/' as directory delimiter, I think it would be nice to change the \\\\'s to /\r\n\r\n\r\n", + "user": "rkoot", + "reaction_cnt": 0, + "created_at": "2020-01-20T12:02:29Z", + "updated_at": "2020-01-21T20:50:53Z", + "author": "rkoot", + "comments": [ + { + "body": "Thanks for pointing this out @rkoot - very helpful - it has been added to the list ", + "created_at": "2020-01-21T03:46:41Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "3d-coordinates", + "body": "Is it possible to use 3d coordinates with this software? theoretically it should make it more accurate, especially for videos shot at non-90* angles.", + "user": "PolarBean", + "reaction_cnt": 0, + "created_at": "2020-01-12T02:16:08Z", + "updated_at": "2020-01-16T00:42:57Z", + "author": "PolarBean", + "comments": [ + { + "body": "This is definitely something we have thought about, and it is on the list of potential future additions, but at this time no. SimBA gets good accuracy with a single camera at 90 degrees, and for initial implementation we are keeping it simple.\r\n\r\nThere are a number of other 3D approaches (Anderson Lab approach in PNAS 2015, http://www.3dtracker.org/, https://livemousetracker.org/) that use a 3D approach well, although they tend to use more specialized hardware (depth sensing, RFID) that we are hoping to avoid.\r\n\r\nWe are keeping track of requests though and have added this to the list. Thanks!\r\n\r\nAlthough if people want to help add this feature and have datasets already available, we are down to look into it more seriously!", + "created_at": "2020-01-13T18:08:15Z", + "author": "sgoldenlab" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 6.2.0 in /simba", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 6.2.0.\n
\nRelease notes\n\n*Sourced from [pillow's releases](https://github.com/python-pillow/Pillow/releases).*\n\n> ## 6.2.0\n> https://pillow.readthedocs.io/en/stable/releasenotes/6.2.0.html\n> \n> ## 6.1.0\n> https://pillow.readthedocs.io/en/stable/releasenotes/6.1.0.html\n> \n> ## 6.0.0\n> No release notes provided.\n
\n
\nChangelog\n\n*Sourced from [pillow's changelog](https://github.com/python-pillow/Pillow/blob/master/CHANGES.rst).*\n\n> 6.2.0 (2019-10-01)\n> ------------------\n> \n> - Catch buffer overruns [#4104](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4104)\n> [radarhere]\n> \n> - Initialize rows_per_strip when RowsPerStrip tag is missing [#4034](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4034)\n> [cgohlke, radarhere]\n> \n> - Raise error if TIFF dimension is a string [#4103](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4103)\n> [radarhere]\n> \n> - Added decompression bomb checks [#4102](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4102)\n> [radarhere]\n> \n> - Fix ImageGrab.grab DPI scaling on Windows 10 version 1607+ [#4000](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4000)\n> [nulano, radarhere]\n> \n> - Corrected negative seeks [#4101](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4101)\n> [radarhere]\n> \n> - Added argument to capture all screens on Windows [#3950](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/3950)\n> [nulano, radarhere]\n> \n> - Updated warning to specify when Image.frombuffer defaults will change [#4086](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4086)\n> [radarhere]\n> \n> - Changed WindowsViewer format to PNG [#4080](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4080)\n> [radarhere]\n> \n> - Use TIFF orientation [#4063](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4063)\n> [radarhere]\n> \n> - Raise the same error if a truncated image is loaded a second time [#3965](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/3965)\n> [radarhere]\n> \n> - Lazily use ImageFileDirectory_v1 values from Exif [#4031](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4031)\n> [radarhere]\n> \n> - Improved HSV conversion [#4004](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4004)\n> [radarhere]\n> \n> - Added text stroking [#3978](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/3978)\n> [radarhere, hugovk]\n> \n> - No more deprecated bdist_wininst .exe installers [#4029](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4029)\n> [hugovk]\n> \n> - Do not allow floodfill to extend into negative coordinates [#4017](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4017)\n> [radarhere]\n> ... (truncated)\n
\n
\nCommits\n\n- [`8a30d13`](https://github.com/python-pillow/Pillow/commit/8a30d135378dc6a1c3c08fa4bb9fbc15370feedf) Updated CHANGES.rst [ci skip]\n- [`75602d1`](https://github.com/python-pillow/Pillow/commit/75602d12e1b6f2152ab5bd1acfb62a9c8a4a0432) 6.2.0 version bump\n- [`4756af9`](https://github.com/python-pillow/Pillow/commit/4756af9c1027ae620eaa9538d6b0dd9b0e844fca) Updated CHANGES.rst [ci skip]\n- [`cc16025`](https://github.com/python-pillow/Pillow/commit/cc16025e234b7a7a4dd3a86d2fdc0980698db9cc) Merge pull request [#4104](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4104) from radarhere/overrun\n- [`fb84701`](https://github.com/python-pillow/Pillow/commit/fb8470187a45043c33b1c75e7dca48b38d5db7a6) Merge pull request [#4034](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4034) from cgohlke/patch-1\n- [`b9693a5`](https://github.com/python-pillow/Pillow/commit/b9693a51c99c260bd66d1affeeab4a226cf7e5a5) Merge pull request [#4103](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4103) from radarhere/dimension\n- [`f228d0c`](https://github.com/python-pillow/Pillow/commit/f228d0ccbf6bf9392d7fcd51356ef2cfda80c75a) Merge pull request [#4102](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4102) from radarhere/decompression\n- [`aaf2c42`](https://github.com/python-pillow/Pillow/commit/aaf2c421564fcf96bd030487f09b648f7feb7b67) Merge pull request [#4000](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4000) from nulano/dpi_fix\n- [`b36c1bc`](https://github.com/python-pillow/Pillow/commit/b36c1bc943d554ba223086c7efb502d080f73905) Merge pull request [#4101](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4101) from radarhere/negative_seek\n- [`9a977b9`](https://github.com/python-pillow/Pillow/commit/9a977b975cd871ef9a9128b72414c0de3a292591) Raise error if dimension is a string\n- Additional commits viewable in [compare view](https://github.com/python-pillow/Pillow/compare/5.4.1...6.2.0)\n
\n
\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=6.2.0)](https://help.github.com/articles/configuring-automated-security-fixes)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot ignore this [patch|minor|major] version` will close this PR and stop Dependabot creating any more for this minor/major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2020-01-10T18:01:56Z", + "updated_at": "2020-01-10T19:05:18Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Looks like pillow is up-to-date now, so this is no longer needed.", + "created_at": "2020-01-10T19:05:15Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Bump pillow from 5.4.1 to 6.2.0", + "body": "Bumps [pillow](https://github.com/python-pillow/Pillow) from 5.4.1 to 6.2.0.\n
\nRelease notes\n\n*Sourced from [pillow's releases](https://github.com/python-pillow/Pillow/releases).*\n\n> ## 6.2.0\n> https://pillow.readthedocs.io/en/stable/releasenotes/6.2.0.html\n> \n> ## 6.1.0\n> https://pillow.readthedocs.io/en/stable/releasenotes/6.1.0.html\n> \n> ## 6.0.0\n> No release notes provided.\n
\n
\nChangelog\n\n*Sourced from [pillow's changelog](https://github.com/python-pillow/Pillow/blob/master/CHANGES.rst).*\n\n> 6.2.0 (2019-10-01)\n> ------------------\n> \n> - Catch buffer overruns [#4104](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4104)\n> [radarhere]\n> \n> - Initialize rows_per_strip when RowsPerStrip tag is missing [#4034](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4034)\n> [cgohlke, radarhere]\n> \n> - Raise error if TIFF dimension is a string [#4103](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4103)\n> [radarhere]\n> \n> - Added decompression bomb checks [#4102](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4102)\n> [radarhere]\n> \n> - Fix ImageGrab.grab DPI scaling on Windows 10 version 1607+ [#4000](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4000)\n> [nulano, radarhere]\n> \n> - Corrected negative seeks [#4101](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4101)\n> [radarhere]\n> \n> - Added argument to capture all screens on Windows [#3950](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/3950)\n> [nulano, radarhere]\n> \n> - Updated warning to specify when Image.frombuffer defaults will change [#4086](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4086)\n> [radarhere]\n> \n> - Changed WindowsViewer format to PNG [#4080](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4080)\n> [radarhere]\n> \n> - Use TIFF orientation [#4063](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4063)\n> [radarhere]\n> \n> - Raise the same error if a truncated image is loaded a second time [#3965](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/3965)\n> [radarhere]\n> \n> - Lazily use ImageFileDirectory_v1 values from Exif [#4031](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4031)\n> [radarhere]\n> \n> - Improved HSV conversion [#4004](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4004)\n> [radarhere]\n> \n> - Added text stroking [#3978](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/3978)\n> [radarhere, hugovk]\n> \n> - No more deprecated bdist_wininst .exe installers [#4029](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4029)\n> [hugovk]\n> \n> - Do not allow floodfill to extend into negative coordinates [#4017](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4017)\n> [radarhere]\n> ... (truncated)\n
\n
\nCommits\n\n- [`8a30d13`](https://github.com/python-pillow/Pillow/commit/8a30d135378dc6a1c3c08fa4bb9fbc15370feedf) Updated CHANGES.rst [ci skip]\n- [`75602d1`](https://github.com/python-pillow/Pillow/commit/75602d12e1b6f2152ab5bd1acfb62a9c8a4a0432) 6.2.0 version bump\n- [`4756af9`](https://github.com/python-pillow/Pillow/commit/4756af9c1027ae620eaa9538d6b0dd9b0e844fca) Updated CHANGES.rst [ci skip]\n- [`cc16025`](https://github.com/python-pillow/Pillow/commit/cc16025e234b7a7a4dd3a86d2fdc0980698db9cc) Merge pull request [#4104](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4104) from radarhere/overrun\n- [`fb84701`](https://github.com/python-pillow/Pillow/commit/fb8470187a45043c33b1c75e7dca48b38d5db7a6) Merge pull request [#4034](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4034) from cgohlke/patch-1\n- [`b9693a5`](https://github.com/python-pillow/Pillow/commit/b9693a51c99c260bd66d1affeeab4a226cf7e5a5) Merge pull request [#4103](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4103) from radarhere/dimension\n- [`f228d0c`](https://github.com/python-pillow/Pillow/commit/f228d0ccbf6bf9392d7fcd51356ef2cfda80c75a) Merge pull request [#4102](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4102) from radarhere/decompression\n- [`aaf2c42`](https://github.com/python-pillow/Pillow/commit/aaf2c421564fcf96bd030487f09b648f7feb7b67) Merge pull request [#4000](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4000) from nulano/dpi_fix\n- [`b36c1bc`](https://github.com/python-pillow/Pillow/commit/b36c1bc943d554ba223086c7efb502d080f73905) Merge pull request [#4101](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4101) from radarhere/negative_seek\n- [`9a977b9`](https://github.com/python-pillow/Pillow/commit/9a977b975cd871ef9a9128b72414c0de3a292591) Raise error if dimension is a string\n- Additional commits viewable in [compare view](https://github.com/python-pillow/Pillow/compare/5.4.1...6.2.0)\n
\n
\n\n[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pillow&package-manager=pip&previous-version=5.4.1&new-version=6.2.0)](https://help.github.com/articles/configuring-automated-security-fixes)\n\nDependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`.\n\n[//]: # (dependabot-automerge-start)\n[//]: # (dependabot-automerge-end)\n\n---\n\n
\nDependabot commands and options\n
\n\nYou can trigger Dependabot actions by commenting on this PR:\n- `@dependabot rebase` will rebase this PR\n- `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it\n- `@dependabot merge` will merge this PR after your CI passes on it\n- `@dependabot squash and merge` will squash and merge this PR after your CI passes on it\n- `@dependabot cancel merge` will cancel a previously requested merge and block automerging\n- `@dependabot reopen` will reopen this PR if it is closed\n- `@dependabot ignore this [patch|minor|major] version` will close this PR and stop Dependabot creating any more for this minor/major version (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)\n- `@dependabot use these labels` will set the current labels as the default for future PRs for this repo and language\n- `@dependabot use these reviewers` will set the current reviewers as the default for future PRs for this repo and language\n- `@dependabot use these assignees` will set the current assignees as the default for future PRs for this repo and language\n- `@dependabot use this milestone` will set the current milestone as the default for future PRs for this repo and language\n\nYou can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sgoldenlab/simba/network/alerts).\n\n
", + "user": "dependabot[bot]", + "reaction_cnt": 0, + "created_at": "2020-01-10T18:01:54Z", + "updated_at": "2020-06-12T20:19:22Z", + "author": "dependabot[bot]", + "comments": [ + { + "body": "Superseded by #36.", + "created_at": "2020-06-12T20:19:20Z", + "author": "dependabot[bot]" + } + ] + }, + { + "title": "Create LICENSE", + "body": "", + "user": "sgoldenlab", + "reaction_cnt": 0, + "created_at": "2019-12-04T22:17:22Z", + "updated_at": "2019-12-04T22:17:53Z", + "author": "sgoldenlab", + "comments": [] + } +] \ No newline at end of file diff --git a/simba/sandbox/github_issues_download.py b/simba/sandbox/github_issues_download.py new file mode 100644 index 000000000..4a21929e1 --- /dev/null +++ b/simba/sandbox/github_issues_download.py @@ -0,0 +1,102 @@ +import requests +import json + +# Replace with your GitHub username, repository name, and personal access token +GITHUB_USERNAME = 'sgoldenlab' +GITHUB_REPOSITORY = 'simba' +GITHUB_TOKEN = '' + +# GitHub API endpoint for issues +api_url = f'https://api.github.com/repos/{GITHUB_USERNAME}/{GITHUB_REPOSITORY}/issues' + +# Headers for authentication +headers = { + 'Authorization': f'token {GITHUB_TOKEN}', + 'Accept': 'application/vnd.github.v3+json' +} + +def get_all_issues(api_url, headers): + issues = [] + page = 1 + while True: + response = requests.get(api_url, headers=headers, params={'state': 'all', 'page': page, 'per_page': 100}) + if response.status_code != 200: + break + page_issues = response.json() + if not page_issues: + break + issues.extend(page_issues) + page += 1 + return issues + +# Get all issues +all_issues = get_all_issues(api_url, headers) + +def get_comments(issue_number, headers): + comments_url = f"https://api.github.com/repos/{GITHUB_USERNAME}/{GITHUB_REPOSITORY}/issues/{issue_number}/comments" + response = requests.get(comments_url, headers=headers) + if response.status_code == 200: + return response.json() + return [] + +def add_comments_to_issues(issues, headers): + for issue in issues: + issue_number = issue['number'] + comments = get_comments(issue_number, headers) + issue['comments_data'] = comments + return issues + +# Add comments to issues +all_issues_with_comments = add_comments_to_issues(all_issues, headers) + + +def preprocess_issues_with_metadata(issues): + processed_issues = [] + for issue in issues: + print() + processed_issue = { + 'title': issue.get('title', ''), + 'body': issue.get('body', ''), + 'user': issue['user'].get('login', ''), + 'reaction_cnt': issue['reactions'].get('total_count', ''), + 'created_at': issue.get('created_at', ''), + 'updated_at': issue.get('updated_at', ''), + 'author': issue.get('user', {}).get('login', ''), + 'comments': [{'body': comment['body'], 'created_at': comment['created_at'], 'author': comment['user']['login']} for comment in issue.get('comments_data', [])] + } + processed_issues.append(processed_issue) + return processed_issues + +# Preprocess issues with metadata +processed_issues_with_metadata = preprocess_issues_with_metadata(all_issues_with_comments) + + +def convert_issues_with_metadata_to_text(issues): + text_data = '' + for issue in issues: + text_data += f"Title: {issue['title']}\n" + text_data += f"Body: {issue['body']}\n" + text_data += f"Created at: {issue['created_at']}\n" + text_data += f"Updated at: {issue['updated_at']}\n" + text_data += f"Author: {issue['author']}\n" + text_data += f"Reactions: {issue['reaction_cnt']}\n" + for comment in issue['comments']: + text_data += f"Comment: {comment['body']}\n" + text_data += f"Comment created at: {comment['created_at']}\n" + text_data += f"Comment author: {comment['author']}\n" + text_data += '\n' + '='*50 + '\n' + return text_data + +# Convert issues with metadata to text +issues_text_with_metadata = convert_issues_with_metadata_to_text(processed_issues_with_metadata) + + +with open('/Users/simon/Desktop/envs/simba/simba/simba/sandbox/github_issues.json', 'w', encoding='utf-8') as f: + json.dump(issues_text_with_metadata, f, ensure_ascii=False, indent=4) + + +# Load data +with open('/Users/simon/Desktop/envs/simba/simba/simba/sandbox/github_issues.json', 'r', encoding='utf-8') as f: + data = json.load(f) + +tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') diff --git a/simba/sandbox/grangercausalitytests.py b/simba/sandbox/grangercausalitytests.py new file mode 100644 index 000000000..4b3232528 --- /dev/null +++ b/simba/sandbox/grangercausalitytests.py @@ -0,0 +1,70 @@ +import pandas as pd +from statsmodels.tsa.stattools import grangercausalitytests +import numpy as np +import itertools +from typing import List +from simba.utils.checks import check_int, check_instance, check_that_column_exist, check_str, check_valid_lst, check_int +try: + from typing import Literal +except: + from typing_extensions import Literal + + +def granger_tests(data: pd.DataFrame, + variables: List[str], + lag: int, + test: Literal['ssr_ftest', 'ssr_chi2test', 'lrtest', 'params_ftest'] = 'ssr_chi2test') -> pd.DataFrame: + """ + Perform Granger causality tests between pairs of variables in a DataFrame. + + This function computes Granger causality tests between pairs of variables in a DataFrame + using the statsmodels library. The Granger causality test assesses whether one time series + variable (predictor) can predict another time series variable (outcome). This test can help + determine the presence of causal relationships between variables. + + .. note:: + Modified from `Selva Prabhakaran `_. + + :example: + >>> x = np.random.randint(0, 50, (100, 2)) + >>> data = pd.DataFrame(x, columns=['r', 'k']) + >>> granger_tests(data=data, variables=['r', 'k'], lag=4, test='ssr_chi2test') + """ + + check_instance(source=granger_tests.__name__, instance=data, accepted_types=(pd.DataFrame,)) + check_valid_lst(data=variables, source=granger_tests.__name__, valid_dtypes=(str,), min_len=2) + check_that_column_exist(df=data, column_name=variables, file_name='') + check_str(name=granger_tests.__name__, value=test, options=('ssr_ftest', 'ssr_chi2test', 'lrtest', 'params_ftest')) + check_int(name=granger_tests.__name__, value=lag, min_value=1) + df = pd.DataFrame(np.zeros((len(variables), len(variables))), columns=variables, index=variables) + for c, r in itertools.product(df.columns, df.index): + result = grangercausalitytests(data[[r, c]], maxlag=[lag], verbose=False) + print(result) + p_val = min([round(result[lag][0][test][1], 4) for i in range(1)]) + df.loc[r, c] = p_val + return df + + +x = np.random.randint(0, 50, (100, 2)) +data = pd.DataFrame(x, columns=['r', 'k']) +granger_tests(data=data, variables=['r', 'k'], lag=4, test='ssr_chi2test') + + + +# +# for c in df.columns: +# for r in df.index: +# test_result = grangercausalitytests(data[[r, c]], maxlag=maxlag, verbose=False) +# p_values = [round(test_result[i+1][0][test][1], 4) for i in range(maxlag)] +# if verbose: print(f'Y = {r}, X = {c}, P Values = {p_values}') +# min_p_value = np.min(p_values) +# df.loc[r, c] = min_p_value +# df.columns = [var + '_x' for var in variables] +# df.index = [var + '_y' for var in variables] + + + + + + +#results = grangercausalitytests(x=x, maxlag=[4], verbose=False) \ No newline at end of file diff --git a/simba/sandbox/graph.html b/simba/sandbox/graph.html new file mode 100644 index 000000000..2360ac921 --- /dev/null +++ b/simba/sandbox/graph.html @@ -0,0 +1,164 @@ + + + + + + + + + +
+

+
+ + + + + + +
+

+
+ + + + + +
+ + +
+
+ + + + + + + \ No newline at end of file diff --git a/simba/sandbox/graph_flow.html b/simba/sandbox/graph_flow.html new file mode 100644 index 000000000..e9ae4be05 --- /dev/null +++ b/simba/sandbox/graph_flow.html @@ -0,0 +1,164 @@ + + + + + + + + + +
+

+
+ + + + + + +
+

+
+ + + + + +
+ + +
+
+ + + + + + + \ No newline at end of file diff --git a/simba/sandbox/graph_katz.html b/simba/sandbox/graph_katz.html new file mode 100644 index 000000000..0acd1db31 --- /dev/null +++ b/simba/sandbox/graph_katz.html @@ -0,0 +1,164 @@ + + + + + + + + + +
+

+
+ + + + + + +
+

+
+ + + + + +
+ + +
+
+ + + + + + + \ No newline at end of file diff --git a/simba/sandbox/graph_pagerank.html b/simba/sandbox/graph_pagerank.html new file mode 100644 index 000000000..403ddb7f4 --- /dev/null +++ b/simba/sandbox/graph_pagerank.html @@ -0,0 +1,164 @@ + + + + + + + + + +
+

+
+ + + + + + +
+

+
+ + + + + +
+ + +
+
+ + + + + + + \ No newline at end of file diff --git a/simba/sandbox/grid_transition_probabilities.py b/simba/sandbox/grid_transition_probabilities.py new file mode 100644 index 000000000..eaf935fc3 --- /dev/null +++ b/simba/sandbox/grid_transition_probabilities.py @@ -0,0 +1,104 @@ +import numpy as np +from shapely.geometry import Polygon, Point +from typing import Dict, Tuple, Optional +import functools +import multiprocessing + +from simba.utils.read_write import get_video_meta_data, read_df, find_core_cnt +from simba.mixins.geometry_mixin import GeometryMixin +from simba.utils.printing import SimbaTimer +from simba.utils.checks import check_valid_array, check_valid_dict, check_int +from simba.utils.enums import Formats, Defaults + +def _compute_framewise_geometry_idx(data: np.ndarray, + grid: Dict[Tuple[int, int], Polygon], + verbose: bool): + + frm_idxs, cords = data[:, 0], data[:, 1:] + results = np.full(shape=(data.shape[0], 3), dtype=np.int32, fill_value=-1) + for frm_idx in range(frm_idxs.shape[0]): + frm_id, frm_point = frm_idxs[frm_idx], Point(cords[frm_idx]) + if verbose: + print(f'Processing frame {frm_id}...') + for grid_idx, grid_geometry in grid.items(): + if grid_geometry.contains(frm_point) or grid_geometry.touches(frm_point): + results[frm_idx] = np.array([frm_id, grid_idx[0], grid_idx[1]]) + + return results + + + + +def geometry_transition_probabilities(data: np.ndarray, + grid: Dict[Tuple[int, int], Polygon], + core_cnt: Optional[int] = -1, + verbose: Optional[bool] = False) -> (Dict[Tuple[int, int], float], Dict[Tuple[int, int], int]): + """ + Calculate geometry transition probabilities based on spatial transitions between grid cells. + + Computes transition probabilities between pairs of spatial grid cells, represented as polygons. For each cell, it calculates the likelihood of transitioning to other cells. + + :param np.ndarray data: A 2D array where each row represents a point in space with two coordinates [x, y]. + :param Dict[Tuple[int, int], Polygon] grid: A dictionary mapping grid cell identifiers (tuple of int, int) to their corresponding polygon objects. + Each grid cell is represented by a tuple key (e.g., (row, col)) and its spatial boundaries as a `Polygon`. Can be computed with E.g., created by :func:`simba.mixins.geometry_mixin.GeometryMixin.bucket_img_into_grid_square` or :func:`simba.mixins.geometry_mixin.GeometryMixin.bucket_img_into_grid_hexagon`. + :param Optional[int] core_cnt: The number of cores to use for parallel processing. Default is -1, which uses the maximum available cores. + :param Optional[bool] verbose: If True, the function will print additional information, including the elapsed time for processing. + :return: A tuple containing two dictionaries: + - A dictionary of transition probabilities between grid cells, where each key is a grid cell tuple (row, col), + and each value is another dictionary representing the transition probabilities to other cells. + - A dictionary of transition counts between grid cells, where each key is a grid cell tuple (row, col), + and each value is another dictionary representing the transition counts to other cells. + :rtype: Tuple[Dict[Tuple[int, int], Dict[Tuple[int, int], float]], Dict[Tuple[int, int], Dict[Tuple[int, int], int]]] + + :example: + >>> video_meta_data = get_video_meta_data(video_path=r"C:\troubleshooting\mitra\project_folder\videos\708_MA149_Gq_CNO_0515.mp4") + >>> w, h = video_meta_data['width'], video_meta_data['height'] + >>> grid = GeometryMixin().bucket_img_into_grid_square(bucket_grid_size=(5, 5), bucket_grid_size_mm=None, img_size=(h, w), verbose=False)[0] + >>> data = read_df(file_path=r'C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\708_MA149_Gq_CNO_0515.csv', file_type='csv')[['Nose_x', 'Nose_y']].values + >>> transition_probabilities, _ = geometry_transition_probabilities(data=data, grid=grid) + """ + + timer = SimbaTimer(start=True) + check_valid_array(data=data, source=geometry_transition_probabilities.__name__, accepted_ndims=(2,), accepted_axis_1_shape=[2,], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_dict(x=grid, valid_key_dtypes=(tuple,), valid_values_dtypes=(Polygon,)) + check_int(name="core_cnt", value=core_cnt, min_value=-1, unaccepted_vals=[0]) + if core_cnt == -1 or core_cnt > find_core_cnt()[0]: core_cnt = find_core_cnt()[0] + frm_id = np.arange(0, data.shape[0]).reshape(-1, 1) + data = np.hstack((frm_id, data)).reshape(-1, 3).astype(np.int32) + data, results = np.array_split(data, core_cnt), [] + with multiprocessing.Pool(core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool: + constants = functools.partial(_compute_framewise_geometry_idx, grid=grid, verbose=verbose) + for cnt, result in enumerate(pool.imap(constants, data, chunksize=1)): + results.append(result) + pool.join(); pool.terminate(); del data + + results = np.vstack(results)[:, 1:].astype(np.int32) + out_transition_probabilities, out_transition_cnts = {}, {} + unique_grids = np.unique(results, axis=0) + for unique_grid in unique_grids: + in_grid_idx = np.where(np.all(results == unique_grid, axis=1))[0] + in_grid_idx = np.split(in_grid_idx, np.where(np.diff(in_grid_idx) > 1)[0] + 1) + transition_idx = [np.max(x)+1 for x in in_grid_idx if np.max(x)+1 < results.shape[0]] + transition_geometries = results[transition_idx, :] + unique_rows, counts = np.unique(transition_geometries, axis=0, return_counts=True) + grid_dict = {tuple(row): count for row, count in zip(unique_rows, counts)} + non_transition_grids = [tuple(x) for x in unique_grids if tuple(x) not in grid_dict.keys()] + non_transition_grids = {k: 0 for k in non_transition_grids} + grid_dict.update(non_transition_grids) + transition_cnt = sum(grid_dict.values()) + out_transition_probabilities[tuple(unique_grid)] = {k: v/transition_cnt for k,v in grid_dict.items()} + out_transition_cnts[tuple(unique_grid)] = grid_dict + timer.stop_timer() + if verbose: + print(f'Geometry transition probabilities complete. Elapsed time: {timer.elapsed_time_str}') + return (out_transition_probabilities, out_transition_cnts) + + +# if __name__=="__main__": +# video_meta_data = get_video_meta_data(video_path=r"C:\troubleshooting\mitra\project_folder\videos\708_MA149_Gq_CNO_0515.mp4") +# w, h = video_meta_data['width'], video_meta_data['height'] +# grid = GeometryMixin().bucket_img_into_grid_square(bucket_grid_size=(5, 5), bucket_grid_size_mm=None, img_size=(h, w), verbose=False)[0] +# data = read_df(file_path=r'C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\708_MA149_Gq_CNO_0515.csv', file_type='csv')[['Nose_x', 'Nose_y']].values +# transition_probabilities, _ = geometry_transition_probabilities(data=data, grid=grid) +# print(transition_probabilities) + diff --git a/simba/sandbox/grubbs_test.py b/simba/sandbox/grubbs_test.py new file mode 100644 index 000000000..ead33aa0e --- /dev/null +++ b/simba/sandbox/grubbs_test.py @@ -0,0 +1,28 @@ +import numpy as np +from simba.utils.checks import check_valid_array +from typing import Optional + +def grubbs_test(x: np.ndarray, left_tail: Optional[bool] = False) -> float: + """ + Perform Grubbs' test to detect outliers if the minimum or maximum value in a feature series is an outlier. + + :param np.ndarray x: 1D array representing numeric data. + :param Optional[bool] left_tail: If True, the test calculates the Grubbs' test statistic for the left tail (minimum value). If False (default), it calculates the statistic for the right tail (maximum value). + :return float: The computed Grubbs' test statistic. + + :example: + >>> x = np.random.random((100,)) + >>> grubbs_test(x=x) + """ + check_valid_array(data=x, source=grubbs_test.__name__, accepted_ndims=(1,), accepted_dtypes=(np.float32, np.float64, np.int64, np.float32)) + x = np.sort(x) + if left_tail: + return (np.mean(x) - np.min(x)) / np.std(x) + else: + return (np.max(x) - np.mean(x)) / np.std(x) + + + + + + diff --git a/simba/sandbox/hartley_fmax.py b/simba/sandbox/hartley_fmax.py new file mode 100644 index 000000000..20fe56d22 --- /dev/null +++ b/simba/sandbox/hartley_fmax.py @@ -0,0 +1,22 @@ +import numpy as np +from simba.utils.checks import check_valid_array + +def hartley_fmax(x: np.ndarray, y: np.ndarray): + """ + Compute Hartley's Fmax statistic to test for equality of variances between two features or groups. + + Values close to one represents closer to equal variance. + + :param np.ndarray x: 1D array representing numeric data of the first group/feature. + :param np.ndarray x: 1D array representing numeric data of the second group/feature. + + :example: + >>> x = np.random.random((100,)) + >>> y = np.random.random((100,)) + >>> hartley_fmax(x=x, y=y) + """ + check_valid_array(data=x, source=hartley_fmax.__name__, accepted_ndims=(1,), accepted_dtypes=(np.float32, np.float64, np.int64, np.float32)) + check_valid_array(data=y, source=hartley_fmax.__name__, accepted_ndims=(1,), accepted_dtypes=(np.float32, np.float64, np.int64, np.float32)) + max_var = np.max((np.var(x), np.var(y))) + min_var = np.min((np.var(x), np.var(y))) + return max_var / min_var \ No newline at end of file diff --git a/simba/sandbox/hausdorff.py b/simba/sandbox/hausdorff.py new file mode 100644 index 000000000..0a82c4ac1 --- /dev/null +++ b/simba/sandbox/hausdorff.py @@ -0,0 +1,81 @@ +from shapely.geometry import Polygon, LineString +from typing import List, Union, Optional +from simba.utils.checks import check_valid_lst, check_instance, check_int +import numpy as np +from simba.utils.read_write import read_df, find_core_cnt +from simba.mixins.geometry_mixin import GeometryMixin +from simba.utils.enums import Defaults + +import multiprocessing + +def hausdorff_distance(geometries: List[List[Union[Polygon, LineString]]]) -> np.ndarray: + """ + The Hausdorff distance measure of the similarity between time-series sequential geometries. It is defined as the maximum of the distances + from each point in one set to the nearest point in the other set. + + Hausdorff distance can be used to measure the similarity of the geometry in one frame relative to the geometry in the next frame. + Large values indicate that the animal has a different shape than in the preceding shape. + + :param List[List[Union[Polygon, LineString]]] geometries: List of list where each list has two geometries. + :return np.ndarray: 1D array of hausdorff distances of geometries in each list. + + :example: + >>> x = Polygon([[0,1], [0, 2], [1,1]]) + >>> y = Polygon([[0,1], [0, 2], [0,1]]) + >>> hausdorff_distance(geometries=[[x, y]]) + >>> [1.] + """ + + check_instance(source=hausdorff_distance.__name__, instance=geometries, accepted_types=(list,)) + for i in geometries: + check_valid_lst(source=hausdorff_distance.__name__, data=i, valid_dtypes=(Polygon, LineString,), exact_len=2) + results = np.full((len(geometries)), np.nan) + for i in range(len(geometries)): + results[i] = geometries[i][0].hausdorff_distance(geometries[i][1]) + return results + +def multiframe_hausdorff_distance(geometries: List[Union[Polygon, LineString]], + lag: Optional[int] = 1, + core_cnt: Optional[int] = -1) -> List[float]: + """ + The Hausdorff distance measure of the similarity between sequential time-series geometries. + + :example: + >>> df = read_df(file_path='/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/csv/outlier_corrected_movement_location/SI_DAY3_308_CD1_PRESENT.csv', file_type='csv') + >>> cols = [x for x in df.columns if not x.endswith('_p')] + >>> data = df[cols].values.reshape(len(df), -1 , 2).astype(np.int) + >>> geometries = GeometryMixin().multiframe_bodyparts_to_polygon(data=data, pixels_per_mm=1, parallel_offset=1, verbose=False, core_cnt=-1) + >>> hausdorff_distances = multiframe_hausdorff_distance(geometries=geometries) + """ + check_valid_lst(source=multiframe_hausdorff_distance.__name__, data=geometries, valid_dtypes=(Polygon, LineString,), min_len=1) + check_int(name=f"{multiframe_hausdorff_distance.__name__} CORE COUNT", value=core_cnt, min_value=-1, max_value=find_core_cnt()[0], raise_error=True) + check_int(name=f"{multiframe_hausdorff_distance.__name__} LAG", value=lag, min_value=-1, max_value=len(geometries)-1, raise_error=True) + if core_cnt == -1: core_cnt = find_core_cnt()[0] + reshaped_geometries = [] + for i in range(lag): reshaped_geometries.append([[geometries[i], geometries[i]]]) + for i in range(lag, len(geometries)): reshaped_geometries.append([[geometries[i-lag], geometries[i]]]) + results = [] + with multiprocessing.Pool(core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool: + for cnt, mp_return in enumerate(pool.imap(hausdorff_distance, reshaped_geometries, chunksize=1)): + results.append(mp_return[0]) + return results + + + + + + + + +# +# + +# +# +# hausdorff(geometries=data) +# +# + + + + diff --git a/simba/sandbox/heading.py b/simba/sandbox/heading.py new file mode 100644 index 000000000..7c419e791 --- /dev/null +++ b/simba/sandbox/heading.py @@ -0,0 +1,34 @@ +import numpy as np +from numba import njit + +@njit('(int32[:,:], float64, float64)') +def sliding_bearing(x: np.ndarray, lag: float, fps: float) -> np.ndarray: + """ + Calculates the sliding bearing (direction) of movement in degrees for a sequence of 2D points representing a single body-part. + + .. note:: + To calculate frame-by-frame bearing, pass fps == 1 and lag == 1. + + .. image:: _static/img/sliding_bearing.png + :width: 600 + :align: center + + :param np.ndarray x: An array of shape (n, 2) representing the time-series sequence of 2D points. + :param float lag: The lag time (in seconds) used for calculating the sliding bearing. E.g., if 1, then bearing will be calculated using coordinates in the current frame vs the frame 1s previously. + :param float fps: The sample rate (frames per second) of the sequence. + :return np.ndarray: An array containing the sliding bearings (in degrees) for each point in the sequence. + + :example: + >>> x = np.array([[10, 10], [20, 10]]) + >>> sliding_bearing(x=x, lag=1, fps=1) + >>> [-1. 90.] + """ + + results = np.full((x.shape[0]), -1.0) + lag = int(lag * fps) + for i in range(lag, x.shape[0]): + x1, y1 = x[i-lag, 0], x[i-lag, 1] + x2, y2 = x[i, 1], x[i, 1] + degree = 90 - np.degrees(np.arctan2(y1 - y2, x2 - x1)) + results[i] = degree + 360 if degree < 0 else degree + return results \ No newline at end of file diff --git a/simba/sandbox/horizontal_videos_concat.py b/simba/sandbox/horizontal_videos_concat.py new file mode 100644 index 000000000..2612fd114 --- /dev/null +++ b/simba/sandbox/horizontal_videos_concat.py @@ -0,0 +1,354 @@ +import os +from typing import List, Union, Optional +import subprocess +from datetime import datetime +import shutil + +from simba.utils.read_write import get_video_meta_data +from simba.utils.checks import check_valid_lst, check_if_dir_exists, check_int, check_ffmpeg_available, check_nvidea_gpu_available +from simba.utils.errors import InvalidInputError, FFMPEGCodecGPUError +from simba.utils.printing import SimbaTimer +from simba.video_processors.video_processing import create_blank_video + +def horizontal_video_concatenator(video_paths: List[Union[str, os.PathLike]], + save_path: Union[str, os.PathLike], + height_px: Optional[Union[int, str]] = None, + height_idx: Optional[Union[int, str]] = None, + gpu: Optional[bool] = False, + verbose: Optional[bool] = True) -> None: + + """ + Concatenates multiple videos horizontally. + + :param List[Union[str, os.PathLike]] video_paths: List of input video file paths. + :param Union[str, os.PathLike] save_path: File path to save the concatenated video. + :param Optional[int] height_px: Height of the output video in pixels. + :param Optional[int] height_idx: Index of the video to use for determining Height. + :param Optional[bool] gpu: Whether to use GPU-accelerated codec (default: False). + :param Optional[bool] verbose:Whether to print progress messages (default: True). + + :example: + >>> video_paths = ['video1.mp4', 'video2.mp4'] + >>> x = horizontal_video_concatenator(video_paths=video_paths, height_px=50, save_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/08102021_DOT_Rat7_8(2)_.mp4', gpu=False) + """ + check_ffmpeg_available() + if gpu and not check_nvidea_gpu_available(): + raise FFMPEGCodecGPUError(msg="NVIDEA GPU not available (as evaluated by nvidea-smi returning None)", source=horizontal_video_concatenator.__name__) + timer = SimbaTimer(start=True) + check_valid_lst(data=video_paths, source=horizontal_video_concatenator.__name__, min_len=2) + check_if_dir_exists(in_dir=os.path.dirname(save_path), source=horizontal_video_concatenator.__name__) + video_meta_data = [get_video_meta_data(video_path=video_path) for video_path in video_paths] + if ((height_px is None) and (height_idx is None)) or ((height_px is not None) and (height_idx is not None)): + raise InvalidInputError(msg='Provide a height_px OR height_idx', source=horizontal_video_concatenator.__name__) + if height_idx is not None: + check_int(name=f'{horizontal_video_concatenator.__name__} height', value=height_idx, min_value=0, max_value=len(video_paths)-1) + height = int(video_meta_data[height_idx]['height']) + else: + check_int(name=f'{horizontal_video_concatenator.__name__} height', value=height_px, min_value=1) + height = int(height_px) + video_path_str = " ".join([f'-i "{path}"' for path in video_paths]) + codec = 'h264_nvenc' if gpu else 'libvpx-vp9' + filter_complex = ";".join([f"[{idx}:v]scale=-1:{height}[v{idx}]" for idx in range(len(video_paths))]) + filter_complex += f";{''.join([f'[v{idx}]' for idx in range(len(video_paths))])}hstack=inputs={len(video_paths)}[v]" + if verbose: + print(f'Concatenating {len(video_paths)} videos horizontally with a {height} pixel height... ') + cmd = f'ffmpeg {video_path_str} -filter_complex "{filter_complex}" -map "[v]" -c:v {codec} -loglevel error -stats "{save_path}" -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + if verbose: + print(f'Horizontal concatenation complete, saved at {save_path} (elapsed time: {timer.elapsed_time_str}s.)') + + +def vertical_video_concatenator(video_paths: List[Union[str, os.PathLike]], + save_path: Union[str, os.PathLike], + width_px: Optional[int] = None, + width_idx: Optional[int] = None, + gpu: Optional[bool] = False, + verbose: Optional[bool] = True) -> None: + """ + Concatenates multiple videos vertically. + + :param List[Union[str, os.PathLike]] video_paths: List of input video file paths. + :param Union[str, os.PathLike] save_path: File path to save the concatenated video. + :param Optional[int] width_px: Width of the output video in pixels. + :param Optional[int] width_idx: Index of the video to use for determining width. + :param Optional[bool] gpu: Whether to use GPU-accelerated codec (default: False). + :param Optional[bool] verbose:Whether to print progress messages (default: True). + :raises FFMPEGCodecGPUError: If GPU is requested but not available. + :raises InvalidInputError: If both or neither width_px and width_idx are provided. + + :example: + >>> video_paths = ['/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat7_8(2).mp4', + >>> '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat11_12.mp4', + >>> '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat11_12_1.mp4'] + >>> _ = vertical_video_concatenator(video_paths=video_paths, width_idx=1, save_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/08102021_DOT_Rat7_8(2)_.mp4', gpu=False) + """ + + check_ffmpeg_available() + if gpu and not check_nvidea_gpu_available(): raise FFMPEGCodecGPUError(msg="NVIDIA GPU not available", source=vertical_video_concatenator.__name__) + video_meta_data = [get_video_meta_data(video_path=video_path) for video_path in video_paths] + timer = SimbaTimer(start=True) + check_valid_lst(data=video_paths, source=vertical_video_concatenator.__name__, min_len=2) + check_if_dir_exists(in_dir=os.path.dirname(save_path), source=vertical_video_concatenator.__name__) + if ((width_px is None) and (width_idx is None)) or ((width_px is not None) and (width_idx is not None)): + raise InvalidInputError(msg='Provide a width_px OR width_idx', source=vertical_video_concatenator.__name__) + if width_idx is not None: + check_int(name=f'{vertical_video_concatenator.__name__} width index', value=width_idx, min_value=0, max_value=len(video_paths) - 1) + width = int(video_meta_data[width_idx]['width']) + else: + check_int(name=f'{vertical_video_concatenator.__name__} width', value=width_px, min_value=1) + width = int(width_px) + video_path_str = " ".join([f'-i "{path}"' for path in video_paths]) + codec = 'h264_nvenc' if gpu else 'libvpx-vp9' + filter_complex = ";".join([f"[{idx}:v]scale={width}:-1[v{idx}]" for idx in range(len(video_paths))]) + filter_complex += f";{''.join([f'[v{idx}]' for idx in range(len(video_paths))])}" + filter_complex += f"vstack=inputs={len(video_paths)}[v]" + if verbose: + print(f'Concatenating {len(video_paths)} videos vertically with a {width} pixel width...') + cmd = f'ffmpeg {video_path_str} -filter_complex "{filter_complex}" -map "[v]" -c:v {codec} -loglevel error -stats "{save_path}" -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + if verbose: + print(f'Vertical concatenation complete. Saved at {save_path} (Elapsed time: {timer.elapsed_time_str}s.)') + +def mosaic_concatenator(video_paths: List[Union[str, os.PathLike]], + save_path: Union[str, os.PathLike], + width_idx: Optional[Union[int, str]] = None, + width_px: Optional[Union[int, str]] = None, + height_idx: Optional[Union[int, str]] = None, + height_px: Optional[Union[int, str]] = None, + gpu: Optional[bool] = False, + verbose: Optional[bool] = True, + uneven_fill_color: Optional[str] = 'black') -> None: + """ + Concatenates multiple videos into a mosaic layout. + + .. note:: + if an uneven number of videos, the last index will be filled by ``uneven_fill_color``. + + :param List[Union[str, os.PathLike]] video_paths: List of input video file paths. + :param Union[str, os.PathLike] save_path: File path to save the concatenated video. + :param Optional[int] width_px: Width of the output video in pixels. + :param Optional[int] width_idx: Index of the video to use for determining width. + :param Optional[int] height_px: Height of the output video panels in pixels. + :param Optional[int] height_idx: Height of the video to use for determining width. + :param Optional[bool] gpu: Whether to use GPU-accelerated codec (default: False). + :param Optional[bool] verbose: Whether to print progress messages (default: True). + + :example: + >>> video_paths = ['/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat7_8(2).mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat11_12.mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/2022-06-21_NOB_IOT_23.mp4'] + >>> save_path = '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/blank_test.mp4' + >>> mosaic_concatenator(video_paths=video_paths, save_path=save_path, width_idx=1, height_idx=1, gpu=False) + """ + + check_ffmpeg_available() + if gpu and not check_nvidea_gpu_available(): raise FFMPEGCodecGPUError(msg="NVIDIA GPU not available", source=mosaic_concatenator.__name__) + timer = SimbaTimer(start=True) + dt = datetime.now().strftime("%Y%m%d%H%M%S") + check_valid_lst(data=video_paths, source=f'{mosaic_concatenator.__name__} video_paths', min_len=3) + video_meta_data = [get_video_meta_data(video_path=video_path) for video_path in video_paths] + max_video_length = max([x['video_length_s'] for x in video_meta_data]) + if ((width_px is None) and (width_idx is None)) or ((width_px is not None) and (width_idx is not None)): + raise InvalidInputError(msg='Provide a width_px OR width_idx', source=mosaic_concatenator.__name__) + if ((height_px is None) and (height_idx is None)) or ((height_px is not None) and (height_idx is not None)): + raise InvalidInputError(msg='Provide a height_px OR height_idx', source=mosaic_concatenator.__name__) + if width_idx is not None: + check_int(name=f'{vertical_video_concatenator.__name__} width index', value=width_idx, min_value=1, max_value=len(video_paths) - 1) + width = int(video_meta_data[width_idx]['width']) + else: + width = width_px + if height_idx is not None: + check_int(name=f'{vertical_video_concatenator.__name__} height index', value=width_idx, min_value=1, max_value=len(video_paths) - 1) + height = int(video_meta_data[width_idx]['height']) + else: + height = height_px + if verbose: + print(f'Creating mosaic video ...') + temp_dir = os.path.join(os.path.dirname(video_paths[0]), f'temp_{dt}') + os.makedirs(temp_dir) + if not (len(video_paths) % 2) == 0: + blank_path = os.path.join(temp_dir, f'{dt}.mp4') + create_blank_video(path=blank_path, length=max_video_length, width=width, height=height, gpu=gpu, verbose=verbose, color=uneven_fill_color) + video_paths.append(blank_path) + upper_videos, lower_videos = video_paths[:len(video_paths)//2], video_paths[len(video_paths)//2:] + if verbose: print('Creating upper mosaic... (Step 1/3)') + if len(upper_videos) > 1: + upper_path = os.path.join(temp_dir, 'upper.mp4') + horizontal_video_concatenator(video_paths=upper_videos, save_path=upper_path, gpu=gpu, height_px=height, verbose=verbose) + else: + upper_path = upper_videos[0] + if verbose: print('Creating lower mosaic... (Step 2/3)') + if len(lower_videos) > 1: + lower_path = os.path.join(temp_dir, 'lower.mp4') + horizontal_video_concatenator(video_paths=lower_videos, save_path=lower_path, gpu=gpu, height_px=height, verbose=verbose) + else: + lower_path = lower_videos[0] + panels_meta = [get_video_meta_data(video_path=video_path) for video_path in [lower_path, upper_path]] + if verbose: print('Joining upper and lower mosaic... (Step 2/3)') + vertical_video_concatenator(video_paths=[upper_path, lower_path], save_path=save_path, verbose=verbose, gpu=gpu, width_px=max([x['width'] for x in panels_meta])) + timer.stop_timer() + shutil.rmtree(temp_dir) + if verbose: + print(f'Mosaic concatenation complete. Saved at {save_path} (Elapsed time: {timer.elapsed_time_str}s.)') + +def mixed_mosaic_concatenator(video_paths: List[Union[str, os.PathLike]], + save_path: Union[str, os.PathLike], + gpu: Optional[bool] = False, + verbose: Optional[bool] = True, + uneven_fill_color: Optional[str] = 'black') -> None: + """ + Create a mixed mosaic video by concatenating multiple input videos in a mosaic layout of various sizes. + + .. note:: + The resolution of the output video is determined by the resolution of the video path at the first index. + + If an uneven number of right-panel videos ( if not (len(video_paths)-1) % 2) == 0), then the last index will be filled by ``uneven_fill_color``. + + :param List[Union[str, os.PathLike]] video_paths: List of input video file paths. + :param Union[str, os.PathLike] save_path: File path to save the concatenated video. + :param Optional[bool] gpu: Whether to use GPU-accelerated codec (default: False). + :param Optional[bool] verbose: Whether to print progress messages (default: True). + + :example: + >>> video_paths = ['video1.mp4', 'video2.mp4', 'video3.mp4'] + >>> save_path = '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/blank_test.mp4' + >>> mixed_mosaic_concatenator(video_paths=video_paths, save_path=save_path, gpu=False, verbose=True) + """ + + check_ffmpeg_available() + if gpu and not check_nvidea_gpu_available(): raise FFMPEGCodecGPUError(msg="NVIDIA GPU not available", source=mixed_mosaic_concatenator.__name__) + timer = SimbaTimer(start=True) + check_valid_lst(data=video_paths, source=mixed_mosaic_concatenator.__name__, min_len=2) + dt = datetime.now().strftime("%Y%m%d%H%M%S") + video_meta_data = [get_video_meta_data(video_path=video_path) for video_path in video_paths] + max_video_length = max([x['video_length_s'] for x in video_meta_data]) + check_if_dir_exists(in_dir=os.path.dirname(save_path), source=mixed_mosaic_concatenator.__name__) + large_mosaic_path, video_paths = video_paths[0], video_paths[1:] + mosaic_height = int(video_meta_data[0]['height'] / 2) + if verbose: print('Creating mixed mosaic video... ') + temp_dir = os.path.join(os.path.dirname(video_paths[0]), f'temp_{dt}') + os.makedirs(temp_dir) + if not (len(video_paths) % 2) == 0: + blank_path = os.path.join(temp_dir, f'{dt}.mp4') + create_blank_video(path=blank_path, length=max_video_length, width=video_meta_data[-1]['width'], height=mosaic_height, gpu=gpu, verbose=True, color=uneven_fill_color) + video_paths.append(blank_path) + upper_videos, lower_videos = video_paths[:len(video_paths) // 2], video_paths[len(video_paths) // 2:] + if verbose: print('Creating upper right mosaic ... (Step 1/4)') + if len(upper_videos) > 1: + upper_path = os.path.join(temp_dir, 'upper.mp4') + horizontal_video_concatenator(video_paths=upper_videos, save_path=upper_path, gpu=gpu, height_px=mosaic_height, verbose=verbose) + else: + upper_path = upper_videos[0] + if verbose: print('Creating lower right mosaic ... (Step 2/4)') + if len(lower_videos) > 1: + lower_path = os.path.join(temp_dir, 'lower.mp4') + horizontal_video_concatenator(video_paths=lower_videos, save_path=lower_path, gpu=gpu, verbose=verbose) + else: + lower_path = lower_videos[0] + panels_meta = [get_video_meta_data(video_path=video_path) for video_path in [lower_path, upper_path]] + mosaic_path = os.path.join(temp_dir, 'mosaic.mp4') + if verbose: print('Joining upper and lower right mosaic... (Step 3/4)') + vertical_video_concatenator(video_paths=[upper_path, lower_path], width_px=min([x['width'] for x in panels_meta]), save_path=mosaic_path, gpu=gpu, verbose=verbose) + if verbose: print('Joining left and right mosaic... (Step 4/4)') + horizontal_video_concatenator(video_paths=[large_mosaic_path, mosaic_path], height_idx=0, save_path=save_path, gpu=gpu) + timer.stop_timer() + shutil.rmtree(temp_dir) + if verbose: + print(f'Mixed mosaic concatenation complete. Saved at {save_path} (Elapsed time: {timer.elapsed_time_str}s.)') + +video_paths = [ + '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat7_8(2).mp4', + '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat11_12.mp4', + '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/2022-06-21_NOB_IOT_23.mp4'] +save_path = '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/blank_test.mp4' +mixed_mosaic_concatenator(video_paths=video_paths, save_path=save_path, gpu=False, verbose=True) + + + + # + # mosaic_right_path = os.path.join(save_dir, 'mosaic.mp4') + # + # + # shutil.rmtree(save_dir) + # timer.stop_timer() + # if verbose: + # print(f'Mixed mosaic video complete. Elapsed time: {timer.elapsed_time_str}s.') + + + + + # + # filter_complex = "" + # for i in range(len(video_paths)): + # filter_complex += f"[{i}:v]scale={width}:{height}[scaled_{i}];" + # filter_complex += f"tile={col_cnt}x2:padding=10:width=2*in_w:height=2*in_h" + # ffmpeg_command = ( + # f"ffmpeg " + # + " ".join([f'-i "{input_file}"' for input_file in video_paths]) + # + f" -filter_complex \"{filter_complex}\" " + # + f"-c:v libx264 -crf 18 -preset veryfast {save_path}" + # ) + # subprocess.run(ffmpeg_command, shell=True) + +# video_paths = ['/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat7_8(2).mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat11_12.mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/2022-06-21_NOB_IOT_23.mp4'] +# save_path = '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/blank_test.mp4' +# mosaic_concatenator(video_paths=video_paths, save_path=save_path, width_idx=1, height_idx=1, gpu=False) +# # # # +# import subprocess +# +# def create_mosaic(input_files, output_file, panel_width, panel_height, num_cols): +# num_videos = len(input_files) +# num_rows = (num_videos + num_cols - 1) // num_cols +# +# # Generate filter_complex string with scaling and tile +# filter_complex = "" +# for i in range(num_videos): +# filter_complex += f"[{i}:v]scale={panel_width}:{panel_height}[scaled_{i}];" +# filter_complex += "".join([f"[scaled_{i}]" for i in range(num_videos)]) +# filter_complex += f"tile={num_cols}x{num_rows}:padding=10" +# +# # Generate FFmpeg command +# ffmpeg_command = ( +# f"ffmpeg " +# + " ".join([f'-i "{input_file}"' for input_file in input_files]) +# + f" -filter_complex \"{filter_complex}\" " +# + f"-c:v libx264 -crf 18 -preset veryfast {output_file}" +# ) +# +# # Run FFmpeg command +# subprocess.run(ffmpeg_command, shell=True) +# +# +# # Example usage: +# input_files = ['/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat7_8(2).mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat11_12.mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/2022-06-21_NOB_IOT_23.mp4'] +# output_file = '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/blank_test.mp4' +# panel_width = 100 # Example width for each panel +# panel_height = 100 # Example height for each panel +# num_cols = 2 # Example number of columns +# create_mosaic(input_files, output_file, panel_width, panel_height, num_cols) + + + # + # + # + # video_paths = resize_videos_by_width_and_height(video_paths=video_paths, height=height, width=width, overwrite=False, save_dir=save_dir) + # + # video_lengths = [] + # for i in video_paths: video_lengths.append(get_video_meta_data(video_path=i)['video_length_s']) + # + # + # if len(upper_videos) > 1: + # upper_path = os.path.join(save_dir, 'upper.mp4') + # horizontal_video_concatenator(video_paths=upper_videos, save_path=upper_path, gpu=gpu) + # else: + # upper_path = upper_videos[0] + # if len(lower_videos) > 1: + # lower_path = os.path.join(save_dir, 'lower.mp4') + # horizontal_video_concatenator(video_paths=lower_videos, save_path=lower_path, gpu=gpu) + # else: + # lower_path = lower_videos[0] + # vertical_video_concatenator(video_paths=[upper_path, lower_path], save_path=save_path, verbose=verbose, gpu=gpu) + # shutil.rmtree(save_dir) + # timer.stop_timer() + # if verbose: + # print(f'Mosaic video complete. Elapsed time: {timer.elapsed_time_str}s.') diff --git a/simba/sandbox/img_conv.py b/simba/sandbox/img_conv.py new file mode 100644 index 000000000..ab88924ce --- /dev/null +++ b/simba/sandbox/img_conv.py @@ -0,0 +1,48 @@ + +try: + from typing import Literal +except: + from typing_extensions import Literal +from typing import Union, Optional +import os +from tkinter import * +from datetime import datetime +from PIL import Image +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.ui.tkinter_functions import CreateLabelFrameWithIcon, DropDownMenu, FolderSelect +import cv2 + +from simba.utils.checks import check_if_dir_exists, check_str, check_int +from simba.utils.enums import Options, Keys, Links +from simba.utils.read_write import find_files_of_filetypes_in_directory, get_fn_ext, str_2_bool +from simba.utils.printing import SimbaTimer, stdout_success +from simba.video_processors.video_processing import convert_to_tiff + + +class Convert2TIFFPopUp(PopUpMixin): + def __init__(self): + super().__init__(title="CONVERT IMAGE DIRECTORY TO TIFF") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_frame_dir = FolderSelect(settings_frm, "IMAGE DIRECTORY PATH:", title="Select a image directory", lblwidth=25) + self.compression_dropdown = DropDownMenu(settings_frm, "COMPRESSION:", ['raw', 'tiff_deflate', 'tiff_lzw'], labelwidth=25) + self.compression_dropdown.setChoices('raw') + self.stack_dropdown = DropDownMenu(settings_frm, "STACK:", ['FALSE', 'TRUE'], labelwidth=25) + self.stack_dropdown.setChoices('FALSE') + self.create_run_frm(run_function=self.run, title='RUN TIFF CONVERSION') + + settings_frm.grid(row=0, column=0, sticky="NW") + self.selected_frame_dir.grid(row=0, column=0, sticky="NW") + self.compression_dropdown.grid(row=1, column=0, sticky="NW") + self.stack_dropdown.grid(row=2, column=0, sticky="NW") + self.main_frm.mainloop() + + def run(self): + folder_path = self.selected_frame_dir.folder_path + check_if_dir_exists(in_dir=folder_path) + stack = str_2_bool(self.stack_dropdown.getChoices()) + convert_to_tiff(directory=folder_path, compression=self.compression_dropdown.getChoices(), verbose=True, stack=stack) + +Convert2TIFFPopUp() + +# convert_to_webp('/Users/simon/Desktop/imgs', quality=80) + diff --git a/simba/sandbox/img_kmeans.py b/simba/sandbox/img_kmeans.py new file mode 100644 index 000000000..88d4a7767 --- /dev/null +++ b/simba/sandbox/img_kmeans.py @@ -0,0 +1,66 @@ +from simba.mixins.geometry_mixin import GeometryMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.read_write import read_df, read_frm_of_video +from simba.utils.data import savgol_smoother +from simba.mixins.image_mixin import ImageMixin +import cv2 +from simba.utils.read_write import get_video_meta_data +import numpy as np +from sklearn.cluster import KMeans +from simba.utils.checks import check_valid_array + + + + + +def img_kmeans(data: np.ndarray, + greyscale: bool = True, + k: int = 3, + n_init: int = 5): + + check_valid_array(data=data, accepted_ndims=(4, 3,), accepted_dtypes=(np.uint8,)) + print('s') + + + + + # N, H, W, C = data.shape + # kmeans = KMeans(n_clusters=k, n_init=n_init).fit(imgs) + # + # + # + pass + + +CONFIG_PATH = r"C:\troubleshooting\RAT_NOR\project_folder\project_config.ini" +VIDEO_PATH = r"C:\troubleshooting\RAT_NOR\project_folder\videos\2022-06-20_NOB_DOT_4_clipped.mp4" + +video_meta = get_video_meta_data(video_path=VIDEO_PATH) + +config = ConfigReader(config_path=CONFIG_PATH, read_video_info=False) +config.read_roi_data() +shapes, _ = GeometryMixin.simba_roi_to_geometries(rectangles_df=config.rectangles_df, circles_df=config.circles_df, polygons_df=config.polygon_df, color=True) +shape = shapes['2022-06-20_NOB_DOT_4']['Rectangle'] +shapes = [] +for i in range(video_meta['frame_count']): + shapes.append(shape) + +imgs = ImageMixin().slice_shapes_in_imgs(imgs=VIDEO_PATH, shapes=shapes) +imgs = ImageMixin.pad_img_stack(image_dict=imgs, pad_value=0) +imgs = np.stack(imgs.values(), axis=0) + +img_kmeans(data=imgs) + + + + +# cv2.imshow('asdasdasd', imgs[60]) +# cv2.waitKey(0) + + + + + + + + diff --git a/simba/sandbox/img_stack_brightness.py b/simba/sandbox/img_stack_brightness.py new file mode 100644 index 000000000..8922d8863 --- /dev/null +++ b/simba/sandbox/img_stack_brightness.py @@ -0,0 +1,95 @@ +import time +from copy import deepcopy +from typing import Optional, Union + +import numpy as np + +try: + from typing import Literal +except: + from typing_extensions import Literal + +from numba import cuda + +from simba.utils.checks import check_if_valid_img, check_instance +from simba.utils.read_write import read_img_batch_from_video_gpu + +PHOTOMETRIC = 'photometric' +DIGITAL = 'digital' + +@cuda.jit() +def _photometric(data, results): + y, x, i = cuda.grid(3) + if i < 0 or x < 0 or y < 0: + return + if i > results.shape[0] - 1 or x > results.shape[1] - 1 or y > results.shape[2] - 1: + return + else: + r, g, b = data[i][x][y][0], data[i][x][y][1], data[i][x][y][2] + results[i][x][y] = (0.2126 * r) + (0.7152 * g) + (0.0722 * b) + +@cuda.jit() +def _digital(data, results): + y, x, i = cuda.grid(3) + if i < 0 or x < 0 or y < 0: + return + if i > results.shape[0] - 1 or x > results.shape[1] - 1 or y > results.shape[2] - 1: + return + else: + r, g, b = data[i][x][y][0], data[i][x][y][1], data[i][x][y][2] + results[i][x][y] = (0.299 * r) + (0.587 * g) + (0.114 * b) + +def img_stack_brightness(x: np.ndarray, + method: Optional[Literal['photometric', 'digital']] = 'digital', + ignore_black: Optional[bool] = True) -> np.ndarray: + """ + Calculate the average brightness of a stack of images using a specified method. + + + - **Photometric Method**: The brightness is calculated using the formula: + + .. math:: + \text{brightness} = 0.2126 \cdot R + 0.7152 \cdot G + 0.0722 \cdot B + + - **Digital Method**: The brightness is calculated using the formula: + + .. math:: + \text{brightness} = 0.299 \cdot R + 0.587 \cdot G + 0.114 \cdot B + + + :param np.ndarray x: A 4D array of images with dimensions (N, H, W, C), where N is the number of images, H and W are the height and width, and C is the number of channels (RGB). + :param Optional[Literal['photometric', 'digital']] method: The method to use for calculating brightness. It can be 'photometric' for the standard luminance calculation or 'digital' for an alternative set of coefficients. Default is 'digital'. + :param Optional[bool] ignore_black: If True, black pixels (i.e., pixels with brightness value 0) will be ignored in the calculation of the average brightness. Default is True. + :return np.ndarray: A 1D array of average brightness values for each image in the stack. If `ignore_black` is True, black pixels are ignored in the averaging process. + + + :example: + >>> imgs = read_img_batch_from_video_gpu(video_path=r"/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/2022-06-20_NOB_DOT_4_downsampled.mp4", start_frm=0, end_frm=5000) + >>> imgs = np.stack(list(imgs.values()), axis=0) + >>> x = img_stack_brightness(x=imgs) + """ + + check_instance(source=img_stack_brightness.__name__, instance=x, accepted_types=(np.ndarray,)) + check_if_valid_img(data=x[0], source=img_stack_brightness.__name__) + x = np.ascontiguousarray(x).astype(np.uint8) + if x.ndim == 4: + grid_x = (x.shape[1] + 16 - 1) // 16 + grid_y = (x.shape[2] + 16 - 1) // 16 + grid_z = x.shape[0] + threads_per_block = (16, 16, 1) + blocks_per_grid = (grid_y, grid_x, grid_z) + x_dev = cuda.to_device(x) + results = cuda.device_array((x.shape[0], x.shape[1], x.shape[2]), dtype=np.uint8) + if method == PHOTOMETRIC: + _photometric[blocks_per_grid, threads_per_block](x_dev, results) + else: + _digital[blocks_per_grid, threads_per_block](x_dev, results) + results = results.copy_to_host() + if ignore_black: + masked_array = np.ma.masked_equal(results, 0) + results = np.mean(masked_array, axis=(1, 2)).filled(0) + else: + results = deepcopy(x) + results = np.mean(results, axis=(1, 2)) + + return results \ No newline at end of file diff --git a/simba/sandbox/img_stack_mse.py b/simba/sandbox/img_stack_mse.py new file mode 100644 index 000000000..78b18b56d --- /dev/null +++ b/simba/sandbox/img_stack_mse.py @@ -0,0 +1,106 @@ +from typing import Optional + +import numpy as np +from numba import cuda + +from simba.utils.checks import (check_if_valid_img, check_instance, + check_valid_array) + + +@cuda.jit() +def _grey_mse(data, ref_img, stride, batch_cnt, mse_arr): + y, x, i = cuda.grid(3) + stride = stride[0] + batch_cnt = batch_cnt[0] + if batch_cnt == 0: + if (i - stride) < 0 or x < 0 or y < 0: + return + else: + if i < 0 or x < 0 or y < 0: + return + if i > mse_arr.shape[0] - 1 or x > mse_arr.shape[1] - 1 or y > mse_arr.shape[2] - 1: + return + else: + img_val = data[i][x][y] + if i == 0: + prev_val = ref_img[x][y] + else: + img_val = data[i][x][y] + prev_val = data[i - stride][x][y] + mse_arr[i][x][y] = (img_val - prev_val) ** 2 + + +@cuda.jit() +def _rgb_mse(data, ref_img, stride, batch_cnt, mse_arr): + y, x, i = cuda.grid(3) + stride = stride[0] + batch_cnt = batch_cnt[0] + if batch_cnt == 0: + if (i - stride) < 0 or x < 0 or y < 0: + return + else: + if i < 0 or x < 0 or y < 0: + return + if i > mse_arr.shape[0] - 1 or x > mse_arr.shape[1] - 1 or y > mse_arr.shape[2] - 1: + return + else: + img_val = data[i][x][y] + if i != 0: + prev_val = data[i - stride][x][y] + else: + prev_val = ref_img[x][y] + r_diff = (img_val[0] - prev_val[0]) ** 2 + g_diff = (img_val[1] - prev_val[1]) ** 2 + b_diff = (img_val[2] - prev_val[2]) ** 2 + mse_arr[i][x][y] = r_diff + g_diff + b_diff + +def stack_sliding_mse(x: np.ndarray, + stride: Optional[int] = 1, + batch_size: Optional[int] = 1000) -> np.ndarray: + """ + Computes the Mean Squared Error (MSE) between each image in a stack and a reference image, + where the reference image is determined by a sliding window approach with a specified stride. + The function is optimized for large image stacks by processing them in batches. + + :param np.ndarray x: Input array of images, where the first dimension corresponds to the stack of images. The array should be either 3D (height, width, channels) or 4D (batch, height, width, channels). + :param Optional[int] stride: The stride or step size for the sliding window that determines the reference image. Defaults to 1, meaning the previous image in the stack is used as the reference. + :param Optional[int] batch_size: The number of images to process in a single batch. Larger batch sizes may improve performance but require more GPU memory. Defaults to 1000. + :return: A 1D NumPy array containing the MSE for each image in the stack compared to its corresponding reference image. The length of the array is equal to the number of images in the input stack. + :rtype: np.ndarray + + """ + + check_instance(source=stack_sliding_mse.__name__, instance=x, accepted_types=(np.ndarray,)) + check_if_valid_img(data=x[0], source=stack_sliding_mse.__name__) + check_valid_array(data=x, source=stack_sliding_mse.__name__, accepted_ndims=[3, 4]) + stride = np.array([stride], dtype=np.int32) + stride_dev = cuda.to_device(stride) + out = np.full((x.shape[0]), fill_value=0.0, dtype=np.float32) + for batch_cnt, l in enumerate(range(0, x.shape[0], batch_size)): + r = l + batch_size + batch_x = x[l:r] + if batch_cnt != 0: + if x.ndim == 3: + ref_img = x[l-stride].astype(np.uint8).reshape(x.shape[1], x.shape[2]) + else: + ref_img = x[l-stride].astype(np.uint8).reshape(x.shape[1], x.shape[2], 3) + else: + ref_img = np.full_like(x[l], dtype=np.uint8, fill_value=0) + ref_img = ref_img.astype(np.uint8) + grid_x = (batch_x.shape[1] + 16 - 1) // 16 + grid_y = (batch_x.shape[2] + 16 - 1) // 16 + grid_z = batch_x.shape[0] + threads_per_block = (16, 16, 1) + blocks_per_grid = (grid_y, grid_x, grid_z) + ref_img_dev = cuda.to_device(ref_img) + x_dev = cuda.to_device(batch_x) + results = cuda.device_array((batch_x.shape[0], batch_x.shape[1], batch_x.shape[2]), dtype=np.uint8) + batch_cnt_dev = np.array([batch_cnt], dtype=np.int32) + if x.ndim == 3: + _grey_mse[blocks_per_grid, threads_per_block](x_dev, ref_img_dev, stride_dev, batch_cnt_dev, results) + else: + _rgb_mse[blocks_per_grid, threads_per_block](x_dev, ref_img_dev, stride_dev, batch_cnt_dev, results) + results = results.copy_to_host() + results = np.mean(results, axis=(1, 2)) + out[l:r] = results + return out \ No newline at end of file diff --git a/simba/sandbox/img_stack_to_bw.py b/simba/sandbox/img_stack_to_bw.py new file mode 100644 index 000000000..1eba7c0d1 --- /dev/null +++ b/simba/sandbox/img_stack_to_bw.py @@ -0,0 +1,62 @@ +import time + +import numpy as np +import cv2 +from simba.mixins.image_mixin import ImageMixin +from numba import jit + + +@jit(nopython=True) +def img_stack_to_bw(imgs: np.ndarray, + lower_thresh: int, + upper_thresh: int, + invert: bool): + """ + Convert a stack of color images into black and white format. + + .. note:: + If converting a single image, consider ``simba.mixins.imgage_mixin.ImageMixin.img_to_bw()`` + + :param np.ndarray img: 4-dimensional array of color images. + :param Optional[int] lower_thresh: Lower threshold value for binary conversion. Pixels below this value become black. Default is 20. + :param Optional[int] upper_thresh: Upper threshold value for binary conversion. Pixels above this value become white. Default is 250. + :param Optional[bool] invert: Flag indicating whether to invert the binary image (black becomes white and vice versa). Default is True. + :return np.ndarray: 4-dimensional array with black and white image. + + :example: + >>> imgs = ImageMixin.read_img_batch_from_video(video_path='/Users/simon/Downloads/3A_Mouse_5-choice_MouseTouchBasic_a1.mp4', start_frm=0, end_frm=100) + >>> imgs = np.stack(imgs.values(), axis=0) + >>> bw_imgs = img_stack_to_bw(imgs=imgs, upper_thresh=255, lower_thresh=20, invert=False) + """ + + results = np.full((imgs.shape[:3]), np.nan) + for cnt in range(imgs.shape[0]): + arr = imgs[cnt] + m, n, _ = arr.shape + img_mean = np.zeros((m, n)) + for i in range(m): + for j in range(n): + total = 0.0 + for k in range(arr.shape[2]): + total += arr[i, j, k] + img_mean[i, j] = total / arr.shape[2] + img = np.where(img_mean < lower_thresh, 0, img_mean) + img = np.where(img > upper_thresh, 1, img) + if invert: + img = 1 - img + results[cnt] = img + return results + +# imgs = ImageMixin.read_img_batch_from_video(video_path='/Users/simon/Downloads/3A_Mouse_5-choice_MouseTouchBasic_a1.mp4', start_frm=0, end_frm=100) +# imgs = np.stack(imgs.values(), axis=0) +# bw_imgs = img_stack_to_bw(imgs=imgs, upper_thresh=255, lower_thresh=20, invert=False) + + + +#mgs[0] + +# img = cv2.imread('/Users/simon/Desktop/test.png') + + +cv2.imshow('sdsdf', bw_imgs[0]) +cv2.waitKey(5000) \ No newline at end of file diff --git a/simba/sandbox/imgs_to_grayscale_cupy.py b/simba/sandbox/imgs_to_grayscale_cupy.py new file mode 100644 index 000000000..5815174b6 --- /dev/null +++ b/simba/sandbox/imgs_to_grayscale_cupy.py @@ -0,0 +1,44 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +from typing import Optional + +import cupy as cp +import numpy as np + +from simba.utils.checks import check_if_valid_img, check_instance +from simba.utils.read_write import read_img_batch_from_video_gpu + + +def img_stack_to_grayscale_cupy(imgs: np.ndarray, + batch_size: Optional[int] = 250) -> np.ndarray: + """ + Converts a stack of color images to grayscale using GPU acceleration with CuPy. + + :param np.ndarray imgs: A 4D NumPy array representing a stack of images with shape (num_images, height, width, channels). The images are expected to have 3 channels (RGB). + :param Optional[int] batch_size: The number of images to process in each batch. Defaults to 250. Adjust this parameter to fit your GPU's memory capacity. + :return np.ndarray: m A 3D NumPy array of shape (num_images, height, width) containing the grayscale images. If the input array is not 4D, the function returns the input as is. + + :example: + >>> imgs = read_img_batch_from_video_gpu(video_path=r"/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/2022-06-20_NOB_IOT_1_cropped.mp4", verbose=False, start_frm=0, end_frm=i) + >>> imgs = np.stack(list(imgs.values()), axis=0).astype(np.uint8) + >>> gray_imgs = img_stack_to_grayscale_cupy(imgs=imgs) + """ + + + check_instance(source=img_stack_to_grayscale_cupy.__name__, instance=imgs, accepted_types=(np.ndarray,)) + check_if_valid_img(data=imgs[0], source=img_stack_to_grayscale_cupy.__name__) + if imgs.ndim != 4: + return imgs + results = cp.zeros((imgs.shape[0], imgs.shape[1], imgs.shape[2]), dtype=np.uint8) + n = int(np.ceil((imgs.shape[0] / batch_size))) + imgs = np.array_split(imgs, n) + start = 0 + for i in range(len(imgs)): + img_batch = cp.array(imgs[i]) + batch_cnt = img_batch.shape[0] + end = start + batch_cnt + vals = (0.07 * img_batch[:, :, :, 2] + 0.72 * img_batch[:, :, :, 1] + 0.21 * img_batch[:, :, :, 0]) + results[start:end] = vals.astype(cp.uint8) + start = end + return results.get() \ No newline at end of file diff --git a/simba/sandbox/imgs_to_greyscale_cuda.py b/simba/sandbox/imgs_to_greyscale_cuda.py new file mode 100644 index 000000000..fea95a7bd --- /dev/null +++ b/simba/sandbox/imgs_to_greyscale_cuda.py @@ -0,0 +1,53 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import numpy as np +from numba import cuda + +from simba.utils.checks import check_if_valid_img, check_instance +from simba.utils.read_write import read_img_batch_from_video_gpu + + +@cuda.jit() +def _img_stack_to_grayscale(data, results): + y, x, i = cuda.grid(3) + if i < 0 or x < 0 or y < 0: + return + if i > results.shape[0] - 1 or x > results.shape[1] - 1 or y > results.shape[2] - 1: + return + else: + b = 0.07 * data[i][x][y][2] + g = 0.72 * data[i][x][y][1] + r = 0.21 * data[i][x][y][0] + val = b + g + r + results[i][x][y] = val + +def img_stack_to_grayscale_cuda(x: np.ndarray) -> np.ndarray: + """ + Convert image stack to grayscale using CUDA. + + :param np.ndarray x: 4d array of color images in numpy format. + :return np.ndarray: 3D array of greyscaled images. + + :example: + >>> imgs = read_img_batch_from_video_gpu(video_path=r"/mnt/c/troubleshooting/mitra/project_folder/videos/temp_2/592_MA147_Gq_Saline_0516_downsampled.mp4", verbose=False, start_frm=0, end_frm=i) + >>> imgs = np.stack(list(imgs.values()), axis=0).astype(np.uint8) + >>> grey_images = img_stack_to_grayscale_cuda(x=imgs) + """ + check_instance(source=img_stack_to_grayscale_cuda.__name__, instance=imgs, accepted_types=(np.ndarray,)) + check_if_valid_img(data=x[0], source=img_stack_to_grayscale_cuda.__name__) + if x.ndim != 4: + return x + x = np.ascontiguousarray(x).astype(np.uint8) + x_dev = cuda.to_device(x) + results = cuda.device_array((x.shape[0], x.shape[1], x.shape[2]), dtype=np.uint8) + grid_x = (x.shape[1] + 16 - 1) // 16 + grid_y = (x.shape[2] + 16 - 1) // 16 + grid_z = x.shape[0] + threads_per_block = (16, 16, 1) + blocks_per_grid = (grid_y, grid_x, grid_z) + _img_stack_to_grayscale[blocks_per_grid, threads_per_block](x_dev, results) + results = results.copy_to_host() + return results + + diff --git a/simba/sandbox/increase_fps.py b/simba/sandbox/increase_fps.py new file mode 100644 index 000000000..21fe899e2 --- /dev/null +++ b/simba/sandbox/increase_fps.py @@ -0,0 +1,227 @@ +import os +from typing import Union, Optional +import subprocess +from simba.utils.read_write import get_fn_ext, get_video_meta_data + +import threading +import functools +import glob +import multiprocessing +import os +import platform +import shutil +import subprocess +import time +from copy import deepcopy +from datetime import datetime +from tkinter import * +from typing import Any, Dict, List, Optional, Tuple, Union + +import cv2 +import numpy as np +from PIL import Image, ImageTk +from shapely.geometry import Polygon + +try: + from typing import Literal +except: + from typing_extensions import Literal + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.image_mixin import ImageMixin +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, check_float, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_instance, check_int, + check_nvidea_gpu_available, check_str, + check_that_hhmmss_start_is_before_end, + check_valid_lst, check_valid_tuple) +from simba.utils.data import find_frame_numbers_from_time_stamp +from simba.utils.enums import OS, ConfigKey, Formats, Options, Paths +from simba.utils.errors import (CountError, DirectoryExistError, + FFMPEGCodecGPUError, FFMPEGNotFoundError, + FileExistError, FrameRangeError, + InvalidFileTypeError, InvalidInputError, + InvalidVideoFileError, NoDataError, + NoFilesFoundError, NotDirectoryError) +from simba.utils.lookups import (get_ffmpeg_crossfade_methods, get_fonts, + percent_to_crf_lookup, percent_to_qv_lk) +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, find_core_cnt, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + read_config_entry, read_config_file, read_frm_of_video) +from simba.utils.warnings import (FileExistWarning, InValidUserInputWarning, + SameInputAndOutputWarning, FrameRangeWarning) +from simba.video_processors.extract_frames import video_to_frames +from simba.video_processors.roi_selector import ROISelector +from simba.video_processors.roi_selector_circle import ROISelectorCircle +from simba.video_processors.roi_selector_polygon import ROISelectorPolygon + +from tkinter import * +from typing import Optional, Union + +import numpy as np +from PIL import Image, ImageTk + +import simba +from simba.labelling.extract_labelled_frames import AnnotationFrameExtractor +from simba.mixins.config_reader import ConfigReader +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.plotting.frame_mergerer_ffmpeg import FrameMergererFFmpeg +from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, + CreateToolTip, DropDownMenu, Entry_Box, + FileSelect, FolderSelect) +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_int, check_nvidea_gpu_available, + check_str, + check_that_hhmmss_start_is_before_end) +from simba.utils.data import convert_roi_definitions +from simba.utils.enums import Dtypes, Formats, Keys, Links, Options, Paths +from simba.utils.errors import (CountError, DuplicationError, FrameRangeError, + InvalidInputError, MixedMosaicError, + NoChoosenClassifierError, NoFilesFoundError, + NotDirectoryError) +from simba.utils.lookups import get_color_dict, get_fonts +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + seconds_to_timestamp, str_2_bool) +from simba.video_processors.brightness_contrast_ui import \ + brightness_contrast_ui +from simba.video_processors.clahe_ui import interactive_clahe_ui +from simba.video_processors.extract_seqframes import extract_seq_frames +from simba.video_processors.multi_cropper import MultiCropper +from simba.video_processors.px_to_mm import get_coordinates_nilsson +from simba.video_processors.video_processing import ( + VideoRotator, batch_convert_video_format, batch_create_frames, + batch_video_to_greyscale, change_fps_of_multiple_videos, change_img_format, + change_single_video_fps, clahe_enhance_video, clip_video_in_range, + clip_videos_by_frame_ids, convert_to_avi, convert_to_bmp, convert_to_jpeg, + convert_to_mov, convert_to_mp4, convert_to_png, convert_to_tiff, + convert_to_webm, convert_to_webp, + convert_video_powerpoint_compatible_format, copy_img_folder, + crop_multiple_videos, crop_multiple_videos_circles, + crop_multiple_videos_polygons, crop_single_video, crop_single_video_circle, + crop_single_video_polygon, downsample_video, extract_frame_range, + extract_frames_single_video, frames_to_movie, gif_creator, + multi_split_video, remove_beginning_of_video, resize_videos_by_height, + resize_videos_by_width, roi_blurbox, superimpose_elapsed_time, + superimpose_frame_count, superimpose_freetext, superimpose_overlay_video, + superimpose_video_names, superimpose_video_progressbar, + video_bg_subtraction_mp, video_bg_subtraction, video_concatenator, + video_to_greyscale, watermark_video, rotate_video, flip_videos) + + +def upsample_fps(video_path: Union[str, os.PathLike], + fps: int, + quality: Optional[int] = 60, + save_dir: Optional[Union[str, os.PathLike]] = None) -> None: + """ + Upsample the frame rate of a video or all videos in a directory to a specified fps with a given quality. + + .. note:: + Uses ``ffmpeg minterpolate`` + + .. warning:: + Long run-times for higher resolution videos. + + :param Union[str, os.PathLike] video_path: The path to the input video file or directory containing video files. + :param int fps: The target frame rate to upsample the video(s) to. + :param Optional[int] quality: The quality level of the output video(s), represented as a percentage (1-100). Lower values indicate higher quality. Default is 60. + :param Optional[Union[str, os.PathLike]] save_dir: The directory to save the upsampled video(s). If None, the videos will be saved in the same directory as the input video(s). + :return: None. The function saves the upsampled video(s) to the specified directory. + + :example: + >>> upsample_fps(video_path='/Users/simon/Desktop/Box2_IF19_7_20211109T173625_4_851_873_1_cropped.mp4', fps=100, quality=100) + """ + + timer = SimbaTimer(start=True) + check_ffmpeg_available(raise_error=True) + check_int(name=f'{upsample_fps.__name__} quality', value=quality, min_value=1, max_value=100) + check_float(name=f'{upsample_fps.__name__} fps', value=quality, min_value=10e-16) + crf_lk = percent_to_crf_lookup() + crf = crf_lk[str(quality)] + if os.path.isfile(video_path): + video_paths = [video_path] + elif os.path.isdir(video_path): + video_paths = list(find_all_videos_in_directory(directory=video_path, as_dict=True, raise_error=True).values()) + else: + raise InvalidInputError(msg=f'{video_path} is not a valid file path or a valid directory path', source=rotate_video.__name__) + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir) + else: + save_dir = os.path.dirname(video_paths[0]) + for file_cnt, video_path in enumerate(video_paths): + _, video_name, ext = get_fn_ext(video_path) + input_video_meta = get_video_meta_data(video_path=video_path) + if input_video_meta['fps'] >= fps: + FrameRangeWarning(msg=f"The FPS of the input video named {video_name} ({input_video_meta['fps']}) is the same or larger than the upscaled target FPS ({fps})", source=upsample_fps.__name__) + print(f'Up-sampling video {video_name} from {input_video_meta["fps"]} to {fps} FPS (Video {file_cnt + 1}/{len(video_paths)})...') + save_path = os.path.join(save_dir, f'{video_name}_upsampled{ext}') + cmd = f"""ffmpeg -i "{video_path}" -filter:v "minterpolate='fps={fps}:scd=none:mi_mode=mci:mc_mode=aobmc:me_mode=bidir:me=epzs:search_param=16:vsbmc=1'" -c:v libx264 -crf {crf} "{save_path}" -loglevel error -stats -y""" + subprocess.call(cmd, shell=True) + timer.stop_timer() + stdout_success(msg=f"{len(video_paths)} video(s) upsampled to {fps} and saved in {save_dir} directory.", elapsed_time=timer.elapsed_time_str, source=upsample_fps.__name__, ) + + +class UpsampleVideospopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="UPSAMPLE VIDEOS USING INTERPOLATION (WARNING: LONG RUN-TIMES)") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.fps_dropdown = DropDownMenu(settings_frm, "NEW FRAME-RATE (FPS):", list(range(1, 500)), labelwidth=25) + self.quality_dropdown = DropDownMenu(settings_frm, "OUTPUT VIDEO QUALITY (%):", list(range(10, 110, 10)), labelwidth=25) + + self.fps_dropdown.setChoices(60) + self.quality_dropdown.setChoices(60) + settings_frm.grid(row=0, column=0, sticky="NW") + self.fps_dropdown.grid(row=0, column=0, sticky="NW") + self.quality_dropdown.grid(row=1, column=0, sticky="NW") + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - UP-SAMPLE", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - UP-SAMPLE", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run(multiple=True)) + + multiple_videos_frm.grid(row=2, column=0, sticky="NW") + self.selected_video_dir.grid(row=0, column=0, sticky="NW") + multiple_videos_run.grid(row=1, column=0, sticky="NW") + self.main_frm.mainloop() + + def run(self, multiple: bool): + target_fps = int(self.fps_dropdown.getChoices()) + target_quality = int(self.quality_dropdown.getChoices()) + if not multiple: + data_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=data_path) + else: + data_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=data_path) + + threading.Thread(target=upsample_fps(video_path=data_path, + fps=target_fps, + quality=target_quality)).start() + + +# get_video_meta_data(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/videos/interpolation_test/results.mp4') +# get_video_meta_data('/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/videos/interpolation_test/Together_1.mp4') +# +# +UpsampleVideospopUp() +#upsample_fps(video_path='/Users/simon/Desktop/Box2_IF19_7_20211109T173625_4_851_873_1_cropped.mp4', fps=100, quality=100) diff --git a/simba/sandbox/inset_overlay_video.py b/simba/sandbox/inset_overlay_video.py new file mode 100644 index 000000000..82e562dab --- /dev/null +++ b/simba/sandbox/inset_overlay_video.py @@ -0,0 +1,79 @@ +from typing import Union, Optional +try: + from typing import Literal +except: + from typing_extensions import Literal + +import os +import subprocess +from simba.utils.read_write import get_fn_ext, find_all_videos_in_directory, get_video_meta_data +from simba.utils.checks import check_float, check_str, check_if_dir_exists, check_file_exist_and_readable +from simba.utils.errors import InvalidInputError +from simba.utils.printing import SimbaTimer, stdout_success + +def inset_overlay_video(video_path: Union[str, os.PathLike], + overlay_video_path: Union[str, os.PathLike], + position: Optional[Literal['top_left', 'bottom_right', 'top_right', 'bottom_left', 'center']] = 'top_left', + opacity: Optional[float] = 0.5, + scale: Optional[float] = 0.05, + save_dir: Optional[Union[str, os.PathLike]] = None) -> None: + """ + Inset a video overlay on a second video with specified size, opacity, and location. + + .. video:: _static/img/inset_overlay_video.webm + :loop: + + :param Union[str, os.PathLike] video_path: The path to the video file. + :param Union[str, os.PathLike] overlay_video_path: The path to video to be inserted into the ``video_path`` video. + :param Optional[str] position: The position of the inset overlay video. Options: 'top_left', 'bottom_right', 'top_right', 'bottom_left', 'center' + :param Optional[float] opacity: The opacity of the inset overlay video as a value between 0-1.0. 1.0 meaning the same as input image. Default: 0.5. + :param Optional[float] scale: The scale of the inset overlay video as a ratio os the image size. Default: 0.05. + :param Optional[Union[str, os.PathLike]] save_dir: The location where to save the output video. If None, then saves the video in the same directory as the first video. + :return: None + + :example: + >>> inset_overlay_video(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/multi_animal_dlc_two_c57/project_folder/videos/watermark/Together_1_powerpointready.mp4', overlay_video_path='/Users/simon/Desktop/splash.png', position='top_left', opacity=1.0, scale=0.2) + """ + + timer = SimbaTimer(start=True) + POSITIONS = ['top_left', 'bottom_right', 'top_right', 'bottom_left', 'center'] + check_float(name=f'{inset_overlay_video.__name__} opacity', value=opacity, min_value=0.001, max_value=1.0) + check_float(name=f'{inset_overlay_video.__name__} scale', value=scale, min_value=0.001, max_value=0.999) + check_str(name=f'{inset_overlay_video.__name__} position', value=position, options=POSITIONS) + check_file_exist_and_readable(file_path=video_path) + check_file_exist_and_readable(file_path=overlay_video_path) + + if os.path.isfile(video_path): + video_paths = [video_path] + elif os.path.isdir(video_path): + video_paths = list(find_all_videos_in_directory(directory=video_path, as_dict=True, raise_error=True).values()) + else: + raise InvalidInputError(msg=f'{video_path} is not a valid file path or a valid directory path', source=inset_overlay_video.__name__) + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir) + else: + save_dir = os.path.dirname(video_paths[0]) + for file_cnt, video_path in enumerate(video_paths): + _, video_name, video_ext = get_fn_ext(video_path) + _ = get_video_meta_data(video_path=video_path) + print(f'Inserting overlay onto {video_name} (Video {file_cnt+1}/{len(video_paths)})...') + out_path = os.path.join(save_dir, f'{video_name}_inset_overlay{video_ext}') + print(out_path) + if position == POSITIONS[0]: + cmd = f'ffmpeg -i "{video_path}" -i "{overlay_video_path}" -filter_complex "[1:v]scale=iw*{scale}:-1,format=rgba,colorchannelmixer=aa={opacity}[inset];[0:v][inset]overlay=0:0" "{out_path}" -y' + elif position == POSITIONS[1]: + cmd = f'ffmpeg -i "{video_path}" -i "{overlay_video_path}" -filter_complex "[1:v]scale=iw*{scale}:-1,format=rgba,colorchannelmixer=aa={opacity}[inset];[0:v][inset]overlay=W-w:H-h" "{out_path}" -y' + elif position == POSITIONS[2]: + cmd = f'ffmpeg -i "{video_path}" -i "{overlay_video_path}" -filter_complex "[1:v]scale=iw*{scale}:-1,format=rgba,colorchannelmixer=aa={opacity}[inset];[0:v][inset]overlay=W-w:0" "{out_path}" -y' + elif position == POSITIONS[3]: + cmd = f'ffmpeg -i "{video_path}" -i "{overlay_video_path}" -filter_complex "[1:v]scale=iw*{scale}:-1,format=rgba,colorchannelmixer=aa={opacity}[inset];[0:v][inset]overlay=0:H-h" "{out_path}" -y' + else: + cmd = f'ffmpeg -i "{video_path}" -i "{overlay_video_path}" -filter_complex "[1:v]scale=iw*{scale}:-1,format=rgba,colorchannelmixer=aa={opacity}[inset];[0:v][inset]overlay=(W-w)/2:(H-h)/2" "{out_path}" -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f'{len(video_paths)} overlay video(s) saved in {save_dir}', elapsed_time=timer.elapsed_time_str) + + +# inset_overlay_video(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/2022-06-20_NOB_DOT_4.mp4', +# overlay_video_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/frames/output/heatmaps_locations/2022-06-20_NOB_DOT_4.mp4', +# position='top_left', opacity=0.5, scale=0.3) diff --git a/simba/sandbox/interpolate.py b/simba/sandbox/interpolate.py new file mode 100644 index 000000000..9e2e72070 --- /dev/null +++ b/simba/sandbox/interpolate.py @@ -0,0 +1,111 @@ +import pandas as pd +pd.options.mode.chained_assignment = None +import os +from typing import Optional, Union, List +from copy import deepcopy + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + +from simba.mixins.config_reader import ConfigReader +from simba.utils.checks import (check_str, check_valid_lst, check_file_exist_and_readable) +from simba.utils.enums import TagNames +from simba.utils.errors import DataHeaderError, InvalidInputError +from simba.utils.printing import SimbaTimer, log_event, stdout_success +from simba.utils.read_write import (find_files_of_filetypes_in_directory, get_fn_ext, read_df, write_df, copy_files_to_directory) +from simba.utils.data import animal_interpolator, body_part_interpolator + + +class Interpolate(ConfigReader): + """ + Interpolate missing body-parts in pose-estimation data. "Missing" is defined as either (i) when a single body-parts is None, or + when all body-parts belonging to an animal are identical (i.e., the same 2D coordinate or all None). + + .. image:: _static/img/interpolation_comparison.png + :width: 500 + :align: center + + .. note:: + `Interpolation tutorial `__. + + .. importants:: + The interpolated data overwrites the original data on disk. If the original data is required, pass ``copy_originals = True`` to save a copy of the original data. + + + :param Union[str, os.PathLike] config_path: path to SimBA project config file in Configparser format. + :param Union[str, os.PathLike] data_path: Path to a directory, path to a file, or a list of file paths to files with pose-estimation data in CSV or parquet format. + :param Optional[Literal['body-parts', 'animals']] type: If 'animals', then interpolation is performed when all body-parts belonging to an animal are identical (i.e., the same 2D coordinate or all None). If 'body-parts` then all body-parts that are None will be interpolated. Default: body-parts. + :param Optional[Literal['nearest', 'linear', 'quadratic']] method: If 'animals', then interpolation is performed when all body-parts belonging to an animal are identical (i.e., the same 2D coordinate or all None). If 'body-parts` then all body-parts that are None will be interpolated. Default: body-parts. + :param Optional[bool] multi_index_df_headers: If truth-like, then the input data is anticipated to have multiple header columns, and output columns will have multiple header columns. Default: False. + :param Optional[bool] copy_originals: If truth-like, then the pre-interpolated, original data, will be bo stored in a subdirectory of the original data. The subdirectory is named according to the type of interpolation and datetime of the operation. + + :example: + >>> interpolator = Interpolate(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', data_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/csv/input_csv/test', type='body-parts', multi_index_df_headers=True, copy_originals=True) + >>> interpolator.run() + + """ + def __init__(self, + config_path: Union[str, os.PathLike], + data_path: Union[str, os.PathLike, List[Union[str, os.PathLike]]], + type: Optional[Literal['body-parts', 'animals']] = 'body-parts', + method: Optional[Literal['nearest', 'linear', 'quadratic']] = 'nearest', + multi_index_df_headers: Optional[bool] = False, + copy_originals: Optional[bool] = False) -> None: + + log_event(logger_name=str(self.__class__.__name__), log_type=TagNames.CLASS_INIT.value, msg=self.create_log_msg_from_init_args(locals=locals())) + ConfigReader.__init__(self, config_path=config_path, read_video_info=False) + check_str(name=f'{self.__class__.__name__} type', value=type.lower(), options=('body-parts', 'animals')) + check_str(name=f'{self.__class__.__name__} method', value=method.lower(), options=('nearest', 'linear', 'quadratic')) + if isinstance(data_path, list): + check_valid_lst(data=data_path, source=self.__class__.__name__, valid_dtypes=(str,)) + for i in data_path: check_file_exist_and_readable(file_path=i) + self.file_paths = deepcopy(data_path) + elif os.path.isdir(data_path): + self.file_paths = find_files_of_filetypes_in_directory(directory=data_path, extensions=[f'.{self.file_type}'], raise_error=True) + elif os.path.isfile(data_path): + check_file_exist_and_readable(file_path=data_path) + self.file_paths = [data_path] + else: + raise InvalidInputError(msg=f'{data_path} is not a valid data directory, or a valid file path, or a valid list of file paths', source=self.__class__.__name__) + if copy_originals: + self.originals_dir = os.path.join(os.path.dirname(self.file_paths[0]), f"Pre_{method}_{type}_interpolation_{self.datetime}") + os.makedirs(self.originals_dir) + self.type, self.method, self.multi_index_df_headers, self.copy_originals = type.lower(), method.lower(), multi_index_df_headers, copy_originals + + def __insert_multiindex_header(self, df: pd.DataFrame): + multi_idx_header = [] + for i in range(len(df.columns)): + multi_idx_header.append(("IMPORTED_POSE", "IMPORTED_POSE", list(df.columns)[i])) + df.columns = pd.MultiIndex.from_tuples(multi_idx_header) + return df + + def run(self): + for file_cnt, file_path in enumerate(self.file_paths): + video_timer = SimbaTimer(start=True) + _, self.video_name, _ = get_fn_ext(filepath=file_path) + df = read_df(file_path=file_path, file_type=self.file_type, check_multiindex=True) + if len(df.columns) != len(self.bp_headers): + raise DataHeaderError( msg=f"The file {file_path} contains {len(df.columns)} columns, but your SimBA project expects {len(self.bp_headers)} columns representing {int(len(self.bp_headers) / 3)} body-parts (x, y, p). Check that the {self.body_parts_path} lists the correct body-parts associated with the project", source=self.__class__.__name__) + df.columns = self.bp_headers + df = df.apply(pd.to_numeric, errors="coerce").fillna(0) + df[df < 0] = 0 + if self.type == 'animals': + df = animal_interpolator(df=df, animal_bp_dict=self.animal_bp_dict, source=file_path, method=self.method) + else: + df =body_part_interpolator(df=df, animal_bp_dict=self.animal_bp_dict, source=file_path, method=self.method) + if self.multi_index_df_headers: + df = self.__insert_multiindex_header(df=df) + if self.copy_originals: + copy_files_to_directory(file_paths=[file_path], dir=self.originals_dir) + write_df(df=df, file_type=self.file_type, save_path=file_path, multi_idx_header=self.multi_index_df_headers) + video_timer.stop_timer() + print(f"Video {self.video_name} interpolated (elapsed time {video_timer.elapsed_time_str}) ...") + self.timer.stop_timer() + if self.copy_originals: + msg = f"{len(self.file_paths)} data file(s) interpolated using {self.type} {self.method} methods. Originals saved in {self.originals_dir} directory." + else: + msg = f"{len(self.file_paths)} data file(s) interpolated using {self.type} {self.method} methods." + stdout_success(msg=msg, elapsed_time=self.timer.elapsed_time_str, source=self.__class__.__name__) + diff --git a/simba/sandbox/is_inside_circle.py b/simba/sandbox/is_inside_circle.py new file mode 100644 index 000000000..6d18445c1 --- /dev/null +++ b/simba/sandbox/is_inside_circle.py @@ -0,0 +1,39 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import math + +import numpy as np +from numba import cuda + +THREADS_PER_BLOCK = 1024 +@cuda.jit +def _cuda_is_inside_circle(x, y, r, results): + i = cuda.grid(1) + if i > results.shape[0]: + return + else: + p = (math.sqrt((x[i][0] - y[0][0]) ** 2 + (x[i][1] - y[0][1]) ** 2)) + if p <= r[0]: + results[i] = 1 +def is_inside_circle(x: np.ndarray, y: np.ndarray, r: float) -> np.ndarray: + """ + Determines whether points in array `x` are inside the rectangle defined by the top left and bottom right vertices in array `y`. + + :param np.ndarray x: 2d numeric np.ndarray size (N, 2). + :param np.ndarray y: 2d numeric np.ndarray size (2, 2) (top left[x, y], bottom right[x, y]) + :return np.ndarray: 2d numeric boolean (N, 1) with 1s representing the point being inside the rectangle and 0 if the point is outside the rectangle. + """ + + x = np.ascontiguousarray(x).astype(np.int32) + y = np.ascontiguousarray(y).astype(np.int32) + x_dev = cuda.to_device(x) + y_dev = cuda.to_device(y) + r = np.array([r]).astype(np.float32) + r_dev = cuda.to_device(r) + results = cuda.device_array((x.shape[0]), dtype=np.int8) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + del x, y + _cuda_is_inside_circle[bpg, THREADS_PER_BLOCK](x_dev, y_dev, r_dev, results) + results = results.copy_to_host() + return results diff --git a/simba/sandbox/is_inside_polygon.py b/simba/sandbox/is_inside_polygon.py new file mode 100644 index 000000000..b106a05fa --- /dev/null +++ b/simba/sandbox/is_inside_polygon.py @@ -0,0 +1,65 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import numpy as np +from numba import cuda + +THREADS_PER_BLOCK = 1024 + +@cuda.jit +def _cuda_is_inside_polygon(x, p, r): + i = cuda.grid(1) + if i > r.shape[0]: + return + else: + x, y, n = x[i][0], x[i][1], len(p) + p2x, p2y, xints, inside = 0.0, 0.0, 0.0, False + p1x, p1y = p[0] + for j in range(n + 1): + p2x, p2y = p[j % n] + if ( + (y > min(p1y, p2y)) + and (y <= max(p1y, p2y)) + and (x <= max(p1x, p2x)) + ): + if p1y != p2y: + xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x + if p1x == p2x or x <= xints: + inside = not inside + p1x, p1y = p2x, p2y + if inside: + r[i] = 1 + + +def is_inside_polygon(x: np.ndarray, y: np.ndarray) -> np.ndarray: + """ + Determines whether points in array `x` are inside the polygon defined by the vertices in array `y`. + + This function uses GPU acceleration to perform the point-in-polygon test. The points in `x` are tested against + the polygon defined by the vertices in `y`. The result is an array where each element indicates whether + the corresponding point is inside the polygon. + + .. image:: _static/img/is_inside_polygon_cuda.webp + :width: 500 + :align: center + + :param np.ndarray x: An array of shape (N, 2) where each row represents a point in 2D space. The points are checked against the polygon. + :param np.ndarray y: An array of shape (M, 2) where each row represents a vertex of the polygon in 2D space. + :return np.ndarray: An array of shape (N,) where each element is 1 if the corresponding point in `x` is inside the polygon defined by `y`, and 0 otherwise. + + :example: + >>> x = np.random.randint(0, 200, (i, 2)).astype(np.int8) + >>> y = np.random.randint(0, 200, (4, 2)).astype(np.int8) + >>> results = is_inside_polygon(x=x, y=y) + >>> print(results) + >>> [1 0 1 0 1 1 0 0 1 0] + """ + x = np.ascontiguousarray(x).astype(np.int32) + y = np.ascontiguousarray(y).astype(np.int32) + x_dev = cuda.to_device(x) + y_dev = cuda.to_device(y) + results = cuda.device_array((x.shape[0]), dtype=np.int8) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _cuda_is_inside_polygon[bpg, THREADS_PER_BLOCK](x_dev, y_dev, results) + results = results.copy_to_host() + return results \ No newline at end of file diff --git a/simba/sandbox/is_inside_rectangle.py b/simba/sandbox/is_inside_rectangle.py new file mode 100644 index 000000000..3920380a8 --- /dev/null +++ b/simba/sandbox/is_inside_rectangle.py @@ -0,0 +1,45 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import numpy as np +from numba import cuda + +THREADS_PER_BLOCK = 1024 +@cuda.jit +def _cuda_is_inside_rectangle(x, y, r): + i = cuda.grid(1) + if i > r.shape[0]: + return + else: + if (x[i][0] >= y[0][0]) and (x[i][0] <= y[1][0]): + if (x[i][1] >= y[0][1]) and (x[i][1] <= y[1][1]): + r[i] = 1 + +def is_inside_rectangle(x: np.ndarray, y: np.ndarray) -> np.ndarray: + """ + Determines whether points in array `x` are inside the rectangle defined by the top left and bottom right vertices in array `y`. + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/is_inside_rectangle.csv + :widths: 10, 45, 45 + :align: center + :class: simba-table + :header-rows: 1 + + :param np.ndarray x: 2d numeric np.ndarray size (N, 2). + :param np.ndarray y: 2d numeric np.ndarray size (2, 2) (top left[x, y], bottom right[x, y]) + :return np.ndarray: 2d numeric boolean (N, 1) with 1s representing the point being inside the rectangle and 0 if the point is outside the rectangle. + + """ + + + x = np.ascontiguousarray(x).astype(np.int32) + y = np.ascontiguousarray(y).astype(np.int32) + x_dev = cuda.to_device(x) + y_dev = cuda.to_device(y) + results = cuda.device_array((x.shape[0]), dtype=np.int8) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _cuda_is_inside_rectangle[bpg, THREADS_PER_BLOCK](x_dev, y_dev, results) + results = results.copy_to_host() + return results \ No newline at end of file diff --git a/simba/sandbox/isolation_forest.py b/simba/sandbox/isolation_forest.py new file mode 100644 index 000000000..9cf30b6e1 --- /dev/null +++ b/simba/sandbox/isolation_forest.py @@ -0,0 +1,82 @@ +from sklearn.ensemble import IsolationForest +import numpy as np +from typing import Union, Optional +from sklearn.datasets import make_blobs +import pandas as pd + +from simba.utils.checks import check_valid_array, check_float, check_int +from simba.mixins.plotting_mixin import PlottingMixin + + +def isolation_forest(x: np.ndarray, + estimators: Union[int, float] = 0.2, + groupby_idx: Optional[int] = None, + normalize: Optional[bool] = False): + + """ + An implementation of the Isolation Forest algorithm for outlier detection. + + .. image:: _static/img/isolation_forest.png.png + :width: 700 + :align: center + + .. note:: + The isolation forest scores are negated. Thus, higher values indicate more atypical (outlier) data points. + + :param np.ndarray x: 2-D array with feature values. + :param Union[int, float] estimators: Number of splits. If the value is a float, then interpreted as the ratio of x shape. + :param Optional[int] groupby_idx: If int, then the index 1 of ``data`` for which to group the data and compute LOF on each segment. E.g., can be field holding a cluster identifier. + :param Optional[bool] normalize: Whether to normalize the outlier score between 0 and 1. Defaults to False. + :return: + + :example: + >>> x, lbls = make_blobs(n_samples=10000, n_features=2, centers=10, random_state=42) + >>> x = np.hstack((x, lbls.reshape(-1, 1))) + >>> scores = isolation_forest(x=x, estimators=10, normalize=True) + >>> results = np.hstack((x[:, 0:2], scores.reshape(scores.shape[0], 1))) + >>> results = pd.DataFrame(results, columns=['X', 'Y', 'ISOLATION SCORE']) + >>> PlottingMixin.continuous_scatter(data=results, palette='seismic', bg_clr='lightgrey', columns=['X', 'Y', 'ISOLATION SCORE'],size=30) + + """ + + def get_if_scores(x: np.ndarray, estimators: estimators): + if isinstance(estimators, float): + check_float(name=f'{isolation_forest.__name__} estimators', value=estimators, min_value=10e-6, max_value=1.0) + estimators = x.shape[0] * estimators + if estimators < 1: estimators = 1 + else: + check_int(name=f'{isolation_forest.__name__} estimators', value=estimators, min_value=1) + mdl = IsolationForest(n_estimators=estimators, n_jobs=-1, behaviour='new', contamination='auto') + r = abs(mdl.fit(x).score_samples(x)) + if normalize: + r = (r - np.min(r)) / (np.max(r) - np.min(r)) + return r + + if groupby_idx is None: + check_valid_array(data=x, source=isolation_forest.__name__, accepted_ndims=(2,), min_axis_1=2, accepted_dtypes=(np.int64, np.int32,np.int8,np.float32,np.float64,int,float)) + return get_if_scores(x=x, estimators=estimators) + + else: + check_valid_array(data=x, source=isolation_forest.__name__, accepted_ndims=(2,), min_axis_1=3, accepted_dtypes=(np.int64, np.int32,np.int8,np.float32,np.float64,int,float)) + results = [] + data_w_idx = np.hstack((np.arange(0, x.shape[0]).reshape(-1, 1), x)) + unique_c = np.unique(x[:, groupby_idx]).astype(np.float32) + if -1.0 in unique_c: + unique_c = unique_c[np.where(unique_c != -1)] + unclustered_idx = np.argwhere(x[:, groupby_idx] == -1.0).flatten() + unclustered = data_w_idx[unclustered_idx] + data_w_idx = np.delete(data_w_idx, unclustered_idx, axis=0) + else: + unclustered = None + for i in unique_c: + s_data = data_w_idx[np.argwhere(data_w_idx[:, groupby_idx+1] == i)].reshape(-1, data_w_idx.shape[1]) + idx = s_data[:, 0].reshape(s_data.shape[0], 1) + s_data = np.delete(s_data, [0, groupby_idx+1], 1) + i_f = get_if_scores(s_data, estimators).reshape(s_data.shape[0], 1) + results.append(np.hstack((idx, i_f))) + x = np.concatenate(results, axis=0) + if unclustered is not None: + max_if = np.full((unclustered.shape[0], 1), np.max(x[:, -1])) + unclustered = np.hstack((unclustered, max_if))[:, [0, -1]] + x = np.vstack((x, unclustered)) + return x[np.argsort(x[:, 0])][:, -1] \ No newline at end of file diff --git a/simba/sandbox/jaccard_distance.py b/simba/sandbox/jaccard_distance.py new file mode 100644 index 000000000..37699b657 --- /dev/null +++ b/simba/sandbox/jaccard_distance.py @@ -0,0 +1,33 @@ +import numpy as np +from simba.utils.checks import check_valid_array + +def jaccard_distance(x: np.ndarray, y: np.ndarray) -> float: + """ + Calculate the Jaccard distance between two 1D NumPy arrays. + + The Jaccard distance is a measure of dissimilarity between two sets. It is defined as the size of the + intersection of the sets divided by the size of the union of the sets. + + :param np.ndarray x: The first 1D NumPy array. + :param np.ndarray y: The second 1D NumPy array. + :return float: The Jaccard distance between arrays x and y. + + :example: + >>> x = np.random.randint(0, 5, (100)) + >>> y = np.random.randint(0, 7, (100)) + >>> jaccard_distance(x=x, y=y) + >>> 0.71428573 + """ + check_valid_array(data=x, source=f'{jaccard_distance.__name__} x', accepted_ndims=(1,)) + check_valid_array(data=y, source=f'{jaccard_distance.__name__} y', accepted_ndims=(1,), accepted_dtypes=[x.dtype.type]) + u_x, u_y = np.unique(x), np.unique(y) + return np.float32(1 -(len(np.intersect1d(u_x, u_y)) / len(np.unique(np.hstack((u_x, u_y)))))) + + + + #union = np.unique(np.hstack((x, y))) + + +x = np.random.randint(0, 5, (100)) +y = np.random.randint(0, 7, (100)) +jaccard_distance(x=x, y=y) \ No newline at end of file diff --git a/simba/sandbox/joint_plotter.py b/simba/sandbox/joint_plotter.py new file mode 100644 index 000000000..af58fb066 --- /dev/null +++ b/simba/sandbox/joint_plotter.py @@ -0,0 +1,159 @@ +import os +from typing import Union, Optional, List +import numpy as np +import pandas as pd +import seaborn as sns +import matplotlib.pyplot as plt +from simba.utils.checks import (check_valid_array, check_instance, check_str, check_that_column_exist, check_if_dir_exists) +from simba.utils.lookups import get_categorical_palettes +from simba.utils.errors import InvalidInputError +import warnings +warnings.filterwarnings("ignore") + + +from simba.utils.read_write import read_pickle + + +def joint_plot(data: Union[np.ndarray, pd.DataFrame], + columns: Optional[List[str]] = ('X', 'Y', 'Cluster'), + palette: Optional[str] = 'Set1', + kind: Optional[str] = 'scatter', + size: Optional[int] = 10, + title: Optional[str] = None, + save_path: Optional[Union[str, os.PathLike]] = None): + + """ + :example: + >>> x = np.hstack([np.random.normal(loc=10, scale=4, size=(100, 2)), np.random.randint(0, 1, size=(100, 1))]) + >>> y = np.hstack([np.random.normal(loc=25, scale=4, size=(100, 2)), np.random.randint(1, 2, size=(100, 1))]) + >>> plot = joint_plot(data=np.vstack([x, y]), columns=['X', 'Y', 'Cluster'], title='The plot') + + >>> data = read_pickle(data_path='/Users/simon/Desktop/envs/NG_Unsupervised/project_folder/clusters/ghostly_banzai.pickle') + >>> embedding_data = data['DR_MODEL']['MODEL'].embedding_ + >>> labels = data['CLUSTER_MODEL']['MODEL'].labels_ + >>> data = np.hstack([embedding_data, labels.reshape(-1, 1)]) + >>> plot = joint_plot(data=data) + """ + + cmaps = get_categorical_palettes() + if palette not in cmaps: raise InvalidInputError(msg=f'{palette} is not a valid palette. Accepted options: {cmaps}', source=joint_plot.__name__) + check_instance(source=f'{joint_plot.__name__} data' , instance=data, accepted_types=(np.ndarray, pd.DataFrame)) + check_str(name=f'{joint_plot.__name__} kind', value=kind, options=('kde', 'reg', 'hist', 'scatter')) + if isinstance(data, pd.DataFrame): + check_that_column_exist(df=data, column_name=columns, file_name=joint_plot.__name__) + data = data[list(columns)] + else: + check_valid_array(data=data, source=joint_plot.__name__, accepted_ndims=(2,), max_axis_1=len(columns), min_axis_1=len(columns)) + data = pd.DataFrame(data, columns=list(columns)) + + sns.set_palette(palette) + pct_x = np.percentile(data[columns[0]].values, 10) + pct_y = np.percentile(data[columns[1]].values, 10) + plot = sns.jointplot(data=data, + x=columns[0], + y=columns[1], + hue=columns[2], + xlim=(data[columns[0]].min() - pct_x, data[columns[0]].max() + pct_x), + ylim=(data[columns[1]].min() - pct_y, data[columns[1]].max() + pct_y), + palette=sns.color_palette(palette, len(data[columns[2]].unique())), + kind=kind, + marginal_ticks=False, + s=size) + + if title is not None: + plot.fig.suptitle(title, va='baseline', ha='center', fontsize=15, bbox={"facecolor": "orange", "alpha": 0.5, "pad": 0}) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + plot.savefig(save_path) + plt.close("all") + else: + return plot + +def categorical_scatter(data: Union[np.ndarray, pd.DataFrame], + columns: Optional[List[str]] = ('X', 'Y', 'Cluster'), + palette: Optional[str] = 'Set1', + size: Optional[int] = 10, + title: Optional[str] = None, + save_path: Optional[Union[str, os.PathLike]] = None): + + cmaps = get_categorical_palettes() + if palette not in cmaps: raise InvalidInputError(msg=f'{palette} is not a valid palette. Accepted options: {cmaps}.', source=joint_plot.__name__) + check_instance(source=f'{joint_plot.__name__} data' , instance=data, accepted_types=(np.ndarray, pd.DataFrame)) + if isinstance(data, pd.DataFrame): + check_that_column_exist(df=data, column_name=columns, file_name=joint_plot.__name__) + data = data[list(columns)] + else: + check_valid_array(data=data, source=joint_plot.__name__, accepted_ndims=(2,), max_axis_1=len(columns), min_axis_1=len(columns)) + data = pd.DataFrame(data, columns=list(columns)) + + pct_x = np.percentile(data[columns[0]].values, 25) + pct_y = np.percentile(data[columns[1]].values, 25) + plt.xlim(data[columns[0]].min() - pct_x, data[columns[0]].max() + pct_x) + plt.ylim(data[columns[1]].min() - pct_y, data[columns[1]].max() + pct_y) + + plot = sns.scatterplot(data=data, + x=columns[0], + y=columns[1], + hue=columns[2], + palette=sns.color_palette(palette, len(data[columns[2]].unique())), + s=size) + if title is not None: + plt.title(title, ha="center", fontsize=15, bbox={"facecolor": "orange", "alpha": 0.5, "pad": 0}) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + plt.savefig(save_path) + plt.close("all") + else: + return plot + +def continuous_scatter(data: Union[np.ndarray, pd.DataFrame], + columns: Optional[List[str]] = ('X', 'Y', 'Cluster'), + palette: Optional[str] = 'Set1', + size: Optional[int] = 10, + title: Optional[str] = None, + save_path: Optional[Union[str, os.PathLike]] = None): + + check_instance(source=f'{joint_plot.__name__} data', instance=data, accepted_types=(np.ndarray, pd.DataFrame)) + if isinstance(data, pd.DataFrame): + check_that_column_exist(df=data, column_name=columns, file_name=joint_plot.__name__) + data = data[list(columns)] + else: + check_valid_array(data=data, source=joint_plot.__name__, accepted_ndims=(2,), max_axis_1=len(columns), min_axis_1=len(columns)) + data = pd.DataFrame(data, columns=list(columns)) + + fig, ax = plt.subplots() + plt.xlabel(columns[0]) + plt.ylabel(columns[1]) + plot = ax.scatter(data[columns[0]], data[columns[1]], c=data[columns[2]], s=size, cmap=palette) + cbar = fig.colorbar(plot) + cbar.set_label(columns[2], loc="center") + if title is not None: + plt.title(title, ha="center", fontsize=15, bbox={"facecolor": "orange", "alpha": 0.5, "pad": 0}) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + fig.savefig(save_path) + plt.close("all") + else: + return plot + +data = read_pickle(data_path='/Users/simon/Desktop/envs/NG_Unsupervised/project_folder/clusters/beautiful_beaver.pickle') +embedding_data = data['DR_MODEL']['MODEL'].embedding_ +#labels = data['DATA']['BOUTS_TARGETS']['CLASSIFIER'].values + +labels = data['CLUSTER_MODEL']['MODEL'].labels_ +data = pd.DataFrame(np.hstack([embedding_data, labels.reshape(-1, 1)]), columns=['X', 'Y', 'Cluster']) +#joint_plot(data=data, save_path='/Users/simon/Desktop/envs/NG_Unsupervised/project_folder/cluster_vis/beautiful_beaver.png', palette='tab20', size=20, title='beautiful_beaver', kind='scatter') + +#categorical_scatter(data=data, save_path='/Users/simon/Desktop/envs/NG_Unsupervised/project_folder/cluster_vis/beautiful_beaver.png', palette='tab20', size=20, title='beautiful_beaver') + + +continuous_scatter(data=data, save_path='/Users/simon/Desktop/envs/NG_Unsupervised/project_folder/cluster_vis/beautiful_beaver.png', palette='magma', size=20, title='beautiful_beaver') + + +# x = np.hstack([np.random.normal(loc=10, scale=4, size=(100, 2)), np.random.randint(0, 3, size=(100, 1))]) +# y = np.hstack([np.random.normal(loc=25, scale=4, size=(100, 2)), np.random.randint(3, 9, size=(100, 1))]) +# data = pd.DataFrame(np.vstack([x, y]), columns=['X', 'Y', 'Cluster']) + + + +#plot = continuous_scatter(data=np.vstack([x, y]), columns=['X', 'Y', 'Cluster'], title='The plot') diff --git a/simba/sandbox/kalman.py b/simba/sandbox/kalman.py new file mode 100644 index 000000000..2302bf6a9 --- /dev/null +++ b/simba/sandbox/kalman.py @@ -0,0 +1,48 @@ +import numpy as np +import pandas as pd + + +class KalmanFilter(object): + def __init__(self, H: np.ndarray, fps: float): + + dt = 1.0 / fps + print(dt) + self.F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]]) + self.H = H + self.B = 0 + self.n = self.F.shape[1] + self.m = self.H.shape[1] + self.Q = np.array([[0.05, 0.05, 0.0], [0.05, 0.05, 0.0], [0.0, 0.0, 0.0]]) + self.R = np.array([0.5]).reshape(1, 1) + self.P = np.eye(self.n) + self.x = np.zeros((self.n, 1)) + + + def predict(self, u = 0): + self.x = np.dot(self.F, self.x) + np.dot(self.B, u) + self.P = np.dot(np.dot(self.F, self.P), self.F.T) + self.Q + return self.x + + def update(self, z): + y = z - np.dot(self.H, self.x) + S = self.R + np.dot(self.H, np.dot(self.P, self.H.T)) + K = np.dot(np.dot(self.P, self.H.T), np.linalg.inv(S)) + self.x = self.x + np.dot(K, y) + I = np.eye(self.n) + self.P = np.dot(np.dot(I - np.dot(K, self.H), self.P), (I - np.dot(K, self.H)).T) + np.dot(np.dot(K, self.R), K.T) + + + + +H= np.array([1, 0, 0]).reshape(1, 3) +kf = KalmanFilter(fps=60, H=H) + +df = pd.read_csv('/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/csv/outlier_corrected_movement_location/Together_1.csv') +for i in df.columns: + x = df[i].values + predictions = np.full((x.shape[0]), np.nan) + for c, z in enumerate(x): + val = np.dot(H, kf.predict())[0] + predictions[c] = np.dot(H, kf.predict())[0] + #predictions.append(np.dot(H, kf.predict())[0]) + kf.update(z) \ No newline at end of file diff --git a/simba/sandbox/keyPoi.py b/simba/sandbox/keyPoi.py new file mode 100644 index 000000000..33f27ff49 --- /dev/null +++ b/simba/sandbox/keyPoi.py @@ -0,0 +1,23 @@ +import cv2 +import json + + + + +DATA_PATH = r"C:\troubleshooting\coco_data\annotations.json" +IMAGE_PATH = r"C:\troubleshooting\coco_data\img\FRR_gq_Saline_0624_0.png" + +with open(DATA_PATH) as json_data: + data = json.load(json_data) + + +annot = data['annotations'][0] +img = cv2.imread(IMAGE_PATH) + + +top_left = (annot['bbox'][0], annot['bbox'][1]) +bottom_right = (int(annot['bbox'][0] + annot['bbox'][2]), int(annot['bbox'][1] + annot['bbox'][3])) + +img = cv2.rectangle(img, top_left, bottom_right, (0, 0, 255), 3) +cv2.imshow('sdasd', img) +cv2.waitKey(5000) \ No newline at end of file diff --git a/simba/sandbox/kinetic_energy.py b/simba/sandbox/kinetic_energy.py new file mode 100644 index 000000000..237868f5a --- /dev/null +++ b/simba/sandbox/kinetic_energy.py @@ -0,0 +1,39 @@ +import numpy as np + + +def avg_kinetic_energy(x: np.ndarray, mass: float, sample_rate: float) -> float: + """ + Calculate the average kinetic energy of an object based on its velocity. + + :param np.ndarray x: A 2D NumPy array of shape (n, 2), where each row contains the x and y position coordinates of the object at each time step. + :param float mass: The mass of the object. + :param float sample_rate: The sampling rate (Hz), i.e., the number of data points per second. + :return: The average kinetic energy of the object. + :rtype: float: The mean kinetic energy calculated from the velocity data. + """ + delta_t = np.round(1 / sample_rate, 2) + vx, vy = np.gradient(x[:, 0], delta_t), np.gradient(x[:, 1], delta_t) + speed = np.sqrt(vx ** 2 + vy ** 2) + kinetic_energy = 0.5 * mass * speed ** 2 + + return np.mean(kinetic_energy).astype(np.float32) + + + + + +import pandas as pd + +x = pd.read_csv(r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement\501_MA142_Gi_CNO_0514.csv", usecols=['Nose_x', 'Nose_y']).values.astype(np.int32) + +#x = np.random.randint(0, 500, (10, 2)) +mass = 1 +avg_kinetic_energy(x=x, mass=mass,sample_rate=30) + + + + + + + + diff --git a/simba/sandbox/knn.py b/simba/sandbox/knn.py new file mode 100644 index 000000000..6fbc62b50 --- /dev/null +++ b/simba/sandbox/knn.py @@ -0,0 +1,86 @@ +import numpy as np + +def knn(data: np.ndarray, + k: int, + target: np.ndarray): + + norm = np.full(data.shape, np.nan) + for i in range(data.shape[1]): + norm[:, i] = (data[:, i]-np.min(data[:, i]))/(np.max(data[:, i])-np.min(data[:, i])) + + results = {} + for target in np.unique(target): + congruent_idx = np.argwhere(bool_target == target).flatten() + incongruent_idx = np.argwhere(bool_target != target).flatten() + for i in congruent_idx: + results[i] = {} + for j in incongruent_idx: + n_dist = 0 + for k in range(norm[i, :].shape[0]): + n_dist += np.abs(norm[i, k] - norm[j, k]) + results[i][j] = n_dist + + + + + + + # for i in range(norm.shape[0]): + # sum[i] = np.sum(norm[i]) + # for i in np.unique(bool_target): + # idx = np.argwhere(bool_target == i).flatten() + # n_idx = np.argwhere(bool_target != i).flatten() + # for j in idx: + # results[j] = {} + # for k in n_idx: + # results[j][k] = np.abs(sum[j] - sum[k])[0] + # for k, v in results.items(): + # keys = list(v.keys()) + # values = list(v.values()) + # sorted_value_index = np.argsort(values) + # sorted_dict = [keys[i] for i in sorted_value_index[:k]] + # print(sorted_dict) + # + + + #keys = list(dict.keys()) + #values = list() + + + + + # for i in range(sum.shape[0]): + # for j in range(i, sum.shape[0]): + # dist = np.abs(sum[i] - sum[j]) + # dist_matrix[i, j] = dist + # dist_matrix[j, i] = dist + # for i in range(dist_matrix.) + + + #x = np.linalg.norm() + # print(x) + + + + +data = np.array([[1, 5], + [2, 4], + [3, 3], + [4, 2], + [0, 0]]) + +bool_target = np.array([0, 0, 0, 1, 1]) + +knn(data=data, k=4, target=bool_target) + + + + + + + + + + + + diff --git a/simba/sandbox/kumar.py b/simba/sandbox/kumar.py new file mode 100644 index 000000000..a9cc1b19b --- /dev/null +++ b/simba/sandbox/kumar.py @@ -0,0 +1,137 @@ +from numba import njit, jit +import numpy as np +from simba.utils.enums import Formats +from simba.utils.checks import check_valid_array + +def kumar_hassebrook_similarity(x: np.ndarray, y: np.ndarray) -> float: + """ + Kumar-Hassebrook similarity is a measure used to quantify the similarity between two vectors. + + .. note:: + Kumar-Hassebrook similarity score of 1 indicates identical vectors and 0 indicating no similarity + + :param np.ndarray x: 1D array representing the first feature values. + :param np.ndarray y: 1D array representing the second feature values. + :return: Kumar-Hassebrook similarity between vectors x and y. + + :example: + >>> x, y = np.random.randint(0, 500, (1000,)), np.random.randint(0, 500, (1000,)) + >>> kumar_hassebrook_similarity(x=x, y=y) + """ + check_valid_array(data=x, source=f'{kumar_hassebrook_similarity.__name__} x', accepted_ndims=(1,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=y, source=f'{kumar_hassebrook_similarity.__name__} y', accepted_ndims=(1,), accepted_shapes=(x.shape,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + dot_product = np.dot(x, y) + norm_x = np.linalg.norm(x) + norm_y = np.linalg.norm(y) + return dot_product / (norm_x**2 + norm_y**2 - dot_product) + + +def wave_hedges_distance(x: np.ndarray, y: np.ndarray) -> float: + """ + + Computes the Wave-Hedges distance between two 1-dimensional arrays `x` and `y`. The Wave-Hedges distance is a measure of dissimilarity between arrays. + + .. note:: + Wave-Hedges distance score of 0 indicate identical arrays. There is no upper bound. + + + :example: + >>> x = np.random.randint(0, 500, (1000,)) + >>> y = np.random.randint(0, 500, (1000,)) + >>> wave_hedges_distance(x=x, y=y) + """ + + check_valid_array(data=x, source=f'{kumar_hassebrook_similarity.__name__} x', accepted_ndims=(1,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=y, source=f'{kumar_hassebrook_similarity.__name__} y', accepted_ndims=(1,), accepted_shapes=(x.shape,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + x_y = abs(x - y) + xy_max = np.maximum(x, y) + return np.sum(np.where(((x_y != 0) & (xy_max != 0)), x_y / xy_max, 0)) + + +def gower_distance(x: np.ndarray, y: np.ndarray) -> np.ndarray: + """ + Compute Gower-like distance vector between corresponding rows of two numerical matrices. + Gower distance is a measure of dissimilarity between two vectors (or rows in this case). + + .. note:: + This function assumes x and y have the same shape and only considers numerical attributes. + Each observation in x is compared to the corresponding observation in y based on normalized + absolute differences across numerical columns. + + :param np.ndarray x: First numerical matrix with shape (m, n). + :param np.ndarray y: Second numerical matrix with shape (m, n). + :return np.ndarray: Gower-like distance vector with shape (m,). + + :example: + >>> x, y = np.random.randint(0, 500, (1000, 6000)), np.random.randint(0, 500, (1000, 6000)) + >>> gower_distance(x=x, y=y) + + """ + check_valid_array(data=x, source=f'{gower_distance.__name__} x', accepted_ndims=(1, 2), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=y, source=f'{gower_distance.__name__} y', accepted_ndims=(x.ndim,), accepted_shapes=(x.shape,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + field_ranges = np.max(x, axis=0) - np.min(x, axis=0) + results = np.full((x.shape[0]), np.nan) + for i in range(x.shape[0]): + u, v = x[i], y[i] + dist = 0.0 + for j in range(u.shape[0]): + if field_ranges[j] != 0: + dist += np.abs(u[j] - v[j]) / field_ranges[j] + results[i] = dist / u.shape[0] + return results + + +def normalized_google_distance(x: np.ndarray, y: np.ndarray) -> float: + """ + Compute Normalized Google Distance (NGD) between two vectors or matrices. + + .. note:: + This function assumes x and y have the same shape. It computes NGD based on the sum of elements and the minimum values between corresponding elements of x and y. + + :param np.ndarray x: First numerical matrix with shape (m, n). + :param np.ndarray y: Second array or matrix with shape (m, n). + :return float: Normalized Google Distance between x and y. + + :example: + >>> x, y = np.random.randint(0, 500, (1000,200)), np.random.randint(0, 500, (1000,200)) + >>> normalized_google_distance(x=y, y=x) + """ + check_valid_array(data=x, source=f'{normalized_google_distance.__name__} x', accepted_ndims=(1, 2), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=y, source=f'{normalized_google_distance.__name__} y', accepted_ndims=(x.ndim,), accepted_shapes=(x.shape,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + + sum_x, sum_y = np.sum(x), np.sum(y) + sum_min = np.sum(np.minimum(x, y)) + D = (sum_x + sum_y) - np.min([sum_x, sum_y]) + N = np.max([sum_x, sum_y]) - sum_min + if D == 0: + return -1.0 + else: + return N / D + + + + + + + + + + + + + +#kumar_hassebrook_similarity(x=x, y=y) + +# + + + + +# def gower_distance(x: np.ndarray, y: np.ndarray) -> float: +# check_ +# +# +# +# +# np.sum(np.abs(x - y)) / x.size +# diff --git a/simba/sandbox/labelme_to_dlc.py b/simba/sandbox/labelme_to_dlc.py new file mode 100644 index 000000000..1a82b2c4a --- /dev/null +++ b/simba/sandbox/labelme_to_dlc.py @@ -0,0 +1,88 @@ +import os +import json +import itertools + +import cv2 +import pandas as pd +import numpy as np +from typing import Union, Optional +from simba.utils.checks import check_if_dir_exists, check_if_keys_exist_in_dict +from simba.utils.read_write import find_files_of_filetypes_in_directory, get_fn_ext, copy_files_to_directory +from simba.utils.errors import NoFilesFoundError +import PIL +import base64 +import io + + +def _b64_to_arr(img_b64): + """ + Helper to convert byte string (e.g., from labelme, to image in numpy format + """ + f = io.BytesIO() + f.write(base64.b64decode(img_b64)) + img_arr = np.array(PIL.Image.open(f)) + return img_arr + + +def labelme_to_dlc(labelme_dir: Union[str, os.PathLike], + scorer: Optional[str] = 'SN', + save_dir: Optional[Union[str, os.PathLike]] = None) -> None: + + """ + :param Union[str, os.PathLike] labelme_dir: Directory with labelme json files. + :param Optional[str] scorer: Name of the scorer (anticipated by DLC as header) + :param Optional[Union[str, os.PathLike]] save_dir: Directory where to save the DLC annotations. If None, then same directory as labelme_dir with `_dlc_annotations` suffix. + :return: None + + :example: + >>> labelme_dir = r'D:\ts_annotations' + >>> labelme_to_dlc(labelme_dir=labelme_dir) + """ + + check_if_dir_exists(in_dir=labelme_dir) + annotation_paths = find_files_of_filetypes_in_directory(directory=labelme_dir, extensions=['.json'], raise_error=True) + results_dict = {} + images = {} + for annot_path in annotation_paths: + with open(annot_path) as f: annot_data = json.load(f) + check_if_keys_exist_in_dict(data=annot_data, key=['shapes', 'imageData', 'imagePath'], name=annot_path) + img_name = os.path.basename(annot_data['imagePath']) + images[img_name] = _b64_to_arr(annot_data['imageData']) + for bp_data in annot_data['shapes']: + check_if_keys_exist_in_dict(data=bp_data, key=['label', 'points'], name=annot_path) + point_x, point_y = bp_data['points'][0][0], bp_data['points'][0][1] + lbl = bp_data['label'] + id = os.path.join('labeled-data', os.path.basename(labelme_dir), img_name) + if id not in results_dict.keys(): + results_dict[id] = {f'{lbl}': {'x': point_x, 'y': point_y}} + else: + results_dict[id].update({f'{lbl}': {'x': point_x, 'y': point_y}}) + + if save_dir is None: + save_dir = os.path.join(os.path.dirname(labelme_dir), os.path.basename(labelme_dir) + '_dlc_annotations') + if not os.path.isdir(save_dir): os.makedirs(save_dir) + + bp_names = set() + for img, bp in results_dict.items(): bp_names.update(set(bp.keys())) + col_names = list(itertools.product(*[[scorer], bp_names, ['x', 'y']])) + columns = pd.MultiIndex.from_tuples(col_names) + results = pd.DataFrame(columns=columns) + results.columns.names = ['scorer', 'bodyparts', 'coords'] + for img, bp_data in results_dict.items(): + for bp_name, bp_cords in bp_data.items(): + results.at[img, (scorer, bp_name, 'x')] = bp_cords['x'] + results.at[img, (scorer, bp_name, 'y')] = bp_cords['y'] + + for img_name, img in images.items(): + img_save_path = os.path.join(save_dir, img_name) + cv2.imwrite(img_save_path, img) + save_path = os.path.join(save_dir, f'CollectedData_{scorer}.csv') + results.to_csv(save_path) + + +labelme_dir = r'D:\ts_annotations' +labelme_to_dlc(labelme_dir=labelme_dir) + + + + diff --git a/simba/sandbox/lbp.py b/simba/sandbox/lbp.py new file mode 100644 index 000000000..a21152304 --- /dev/null +++ b/simba/sandbox/lbp.py @@ -0,0 +1,20 @@ +import numpy as np +from skimage import feature +import cv2 + +def local_binary_patterns(img: np.ndarray, P=1, R=3): + lbp = feature.local_binary_pattern(img, P=P, R=R, method="default") + hist, _ = np.histogram(lbp.ravel(), bins=np.arange(0, P+3), range=(0, P + 2)) + print(hist) + hist = hist.astype("float") + hist /= (hist.sum() + 0.001) + print(hist) + + pass + + + + +img = cv2.imread('/Users/simon/Desktop/gresyscale.png', cv2.IMREAD_GRAYSCALE) +local_binary_patterns(img=img, P=4) + diff --git a/simba/sandbox/lcs.py b/simba/sandbox/lcs.py new file mode 100644 index 000000000..f57075ae8 --- /dev/null +++ b/simba/sandbox/lcs.py @@ -0,0 +1,5 @@ +import numpy as np + +def lcs(x: np.ndarray, y: np.ndarray): + pass + diff --git a/simba/sandbox/levenshtein.py b/simba/sandbox/levenshtein.py new file mode 100644 index 000000000..536fe3699 --- /dev/null +++ b/simba/sandbox/levenshtein.py @@ -0,0 +1,39 @@ +import numpy as np +from numba import jit + +#@jit(nopython=True) +def _levenshtein(x, y): + D = np.zeros((len(x) + 1, len(y) + 1), dtype=int) + D[0, 1:] = range(1, len(y) + 1) + D[1:, 0] = range(1, len(x) + 1) + + for i in range(1, len(x) + 1): + for j in range(1, len(y) + 1): + delta = 2 if x[i - 1] != y[j - 1] else 0 + D[i, j] = min(D[i - 1, j - 1] + delta, D[i - 1, j] + 1, D[i, j - 1] + 1) + return D[-1, -1], D + +def levenshtein(x, y): + """ levenshtein distance for iterable sequences + """ + # check type + if (np.all(map(type, x)) is str) and (np.all(map(type, y)) is str): + _x = np.array(x, dtype=np.str) + _y = np.array(y, dtype=np.str) + elif (np.all(map(type, x)) is int) and (np.all(map(type, y)) is int): + _x = np.array(x, dtype=np.int) + _y = np.array(y, dtype=np.int) + elif type(x) is str and type(y) is str: + _x = np.array(list(x), dtype=np.str) + _y = np.array(list(y), dtype=np.str) + else: + raise TypeError + print(_x, _y) + d, D = _levenshtein(_x, _y) + return d, D + +x = np.array(['kitten']) +y = np.array(['kitten']) +# x = "kitten" +# y = "sitting" +r = levenshtein(x, y) \ No newline at end of file diff --git a/simba/sandbox/line_locate_point.py b/simba/sandbox/line_locate_point.py new file mode 100644 index 000000000..21fa1cd4d --- /dev/null +++ b/simba/sandbox/line_locate_point.py @@ -0,0 +1,71 @@ +import numpy as np +from shapely.geometry import LineString, Polygon, Point +from typing import Union, Optional +from simba.utils.checks import check_instance, check_valid_array, check_float, check_int +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.geometry_mixin import GeometryMixin +from simba.utils.read_write import find_core_cnt + +def locate_line_point(path: Union[LineString, np.ndarray], + geometry: Union[LineString, Polygon, Point], + px_per_mm: Optional[float] = 1, + fps: Optional[float] = 1, + core_cnt: Optional[int] = -1, + distance_min: Optional[bool] = True, + time_prior: Optional[bool] = True): + + """ + Compute the time and distance travelled to along a path to reach the most proximal point in reference to a second geometry. + + .. note:: + (i) To compute the time and distance travelled to along a path to reach the most distal point to a second geometry, pass ``distance_min = False``. + + (ii) To compute the time and distance travelled along a path **after** reaching the most distal or proximal point to a second geometry, pass ``time_prior = False``. + + .. image:: _static/img/locate_line_point.png + :width: 600 + :align: center + + :example: + >>> line = LineString([[10, 10], [7.5, 7.5], [15, 15], [7.5, 7.5]]) + >>> polygon = Polygon([[0, 5], [0, 0], [5, 0], [5, 5]]) + >>> locate_line_point(path=line, geometry=polygon) + >>> {'distance_value': 3.5355339059327378, 'distance_travelled': 3.5355339059327378, 'time_travelled': 1.0, 'distance_index': 1} + """ + + + + check_instance(source=locate_line_point.__name__, instance=path, accepted_types=(LineString, np.ndarray)) + check_instance(source=locate_line_point.__name__, instance=geometry, accepted_types=(LineString, Polygon, Point)) + check_int(name="CORE COUNT",value=core_cnt,min_value=-1,max_value=find_core_cnt()[0],raise_error=True,) + check_float(name="PIXELS PER MM",value=px_per_mm,min_value=0.1,raise_error=True) + check_float(name="FPS", value=fps, min_value=1, raise_error=True) + if core_cnt == -1: core_cnt = find_core_cnt()[0] + + if isinstance(path, np.ndarray): + check_valid_array(data=path, accepted_axis_1_shape=(2,), accepted_dtypes=(np.float32, np.float64, np.int64, np.int32)) + path = LineString(path) + if isinstance(geometry, Point): + geometry = np.array(geometry.coords) + distances = FeatureExtractionMixin.framewise_euclidean_distance_roi(location_1=np.array(path.coords), location_2=geometry, px_per_mm=px_per_mm) + else: + points = [Point(x) for x in np.array(path.coords)] + geometry = [geometry for x in range(len(points))] + distances = GeometryMixin().multiframe_shape_distance(shape_1=points, shape_2=geometry, pixels_per_mm=px_per_mm, core_cnt=core_cnt) + + if distance_min: + distance_idx = np.argmin(distances) + else: + distance_idx = np.argmax(distances) + if time_prior: + dist_travelled = np.sum(np.abs(np.diff(distances[:distance_idx + 1]))) / px_per_mm + time_travelled = distance_idx / fps + else: + dist_travelled = np.sum(np.abs(np.diff(distances[distance_idx:]))) / px_per_mm + time_travelled = (distances - distance_idx) / fps + dist_val = distances[distance_idx] / px_per_mm + + return {'distance_value': dist_val, + 'distance_travelled': dist_travelled, + 'time_travelled': time_travelled, + 'distance_index': distance_idx} \ No newline at end of file diff --git a/simba/sandbox/line_plot.py b/simba/sandbox/line_plot.py new file mode 100644 index 000000000..05f842325 --- /dev/null +++ b/simba/sandbox/line_plot.py @@ -0,0 +1,43 @@ +import os +from typing import Optional, Union +import pandas as pd +import seaborn as sns +import matplotlib.pyplot as plt +from simba.utils.checks import check_instance, check_str, check_if_dir_exists + + +def line_plot(df: pd.DataFrame, + x: str, + y: str, + x_label: Optional[str] = None, + y_label: Optional[str] = None, + title: Optional[str] = None, + save_path: Optional[Union[str, os.PathLike]] = None): + + check_instance(source=f'{line_plot.__name__} df', instance=df, accepted_types=(pd.DataFrame)) + check_str(name=f'{line_plot.__name__} x', value=x, options=tuple(df.columns)) + check_str(name=f'{line_plot.__name__} y', value=y, options=tuple(df.columns)) + sns.set_style("whitegrid", {"grid.linestyle": "--"}) + plot = sns.lineplot(data=df, x=x, y=y) + + if x_label is not None: + check_str(name=f'{line_plot.__name__} x_label', value=x_label) + plt.xlabel(x_label) + if y_label is not None: + check_str(name=f'{line_plot.__name__} y_label', value=y_label) + plt.ylabel(y_label) + if title is not None: + check_str(name=f'{line_plot.__name__} title', value=title) + plt.title(title, ha="center", fontsize=15) + if save_path is not None: + check_str(name=f'{line_plot.__name__} save_path', value=save_path) + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + plot.figure.savefig(save_path) + plt.close("all") + else: + return plot + + + + + diff --git a/simba/sandbox/line_plot_plotly.py b/simba/sandbox/line_plot_plotly.py new file mode 100644 index 000000000..cad7f4c32 --- /dev/null +++ b/simba/sandbox/line_plot_plotly.py @@ -0,0 +1,140 @@ +from typing import List, Optional, Union +import os + +import cv2 +import numpy as np +import plotly.graph_objs as go +import plotly.io as pio +from PIL import Image +import io + +from simba.utils.lookups import get_color_dict +from simba.utils.printing import stdout_success + + +def make_line_plot_plotly(data: List[np.ndarray], + colors: List[str], + show_box: Optional[bool] = True, + show_grid: Optional[bool] = True, + width: Optional[int] = 640, + height: Optional[int] = 480, + line_width: Optional[int] = 6, + font_size: Optional[int] = 8, + bg_clr: Optional[str] = 'white', + x_lbl_divisor: Optional[float] = None, + title: Optional[str] = None, + y_lbl: Optional[str] = None, + x_lbl: Optional[str] = None, + y_max: Optional[int] = -1, + line_opacity: Optional[int] = 0.5, + save_path: Optional[Union[str, os.PathLike]] = None): + + """ + Create a line plot using Plotly. + + .. note:: + Plotly can be more reliable than matplotlib on some systems when accessed through multprocessing calls. + + If **not** called though multiprocessing, consider using ``simba.mixins.plotting_mixin.PlottingMixin.make_line_plot()`` + + Uses ``kaleido`` for transform image to numpy array or save to disk. + + :param List[np.ndarray] data: List of 1D numpy arrays representing lines. + :param List[str] colors: List of named colors of size len(data). + :param bool show_box: Whether to show the plot box (axes, title, etc.). + :param bool show_grid: Whether to show gridlines on the plot. + :param int width: Width of the plot in pixels. + :param int height: Height of the plot in pixels. + :param int line_width: Width of the lines in the plot. + :param int font_size: Font size for axis labels and tick labels. + :param str bg_clr: Background color of the plot. + :param float x_lbl_divisor: Divisor for adjusting the tick spacing on the x-axis. + :param str title: Title of the plot. + :param str y_lbl: Label for the y-axis. + :param str x_lbl: Label for the x-axis. + :param int y_max: Maximum value for the y-axis. + :param float line_opacity: Opacity of the lines in the plot. + :param Union[str, os.PathLike] save_path: Path to save the plot image. If None, returns a numpy array of the plot. + :return: If save_path is None, returns a numpy array representing the plot image. + + :example: + >>> x = np.random.randint(0, 50, (100,)) + >>> y = np.random.randint(0, 50, (200,)) + >>> data = [x, y] + >>> img = make_line_plot_plotly(data=data, show_box=False, font_size=20, bg_clr='white', show_grid=False, x_lbl_divisor=30, colors=['Red', 'Green'], save_path='/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/line_plot/Trial 3_final_img.png') + + + """ + + def tick_formatter(x): + if x_lbl_divisor is not None: + return x / x_lbl_divisor + else: + return x + + fig = go.Figure() + clr_dict = get_color_dict() + if y_max == -1: y_max = max([np.max(i) for i in data]) + for i in range(len(data)): + line_clr = clr_dict[colors[i]][::-1] + line_clr = f'rgba({line_clr[0]}, {line_clr[1]}, {line_clr[2]}, {line_opacity})' + fig.add_trace(go.Scatter(y=data[i], mode='lines', line=dict(color=line_clr, width=line_width))) + + if not show_box: + fig.update_layout(width=width, height=height, title=title, xaxis_visible=False, yaxis_visible=False, showlegend=False) + else: + if fig['layout']['xaxis']['tickvals'] is None: + tickvals = [i for i in range(len(data))] + else: + tickvals = fig['layout']['xaxis']['tickvals'] + ticktext = [tick_formatter(x) for x in tickvals] + fig.update_layout( + width=width, + height=height, + title=title, + xaxis=dict( + title=x_lbl, + tickmode='linear' if x_lbl_divisor is None else 'auto', + tickvals=tickvals, + ticktext=ticktext, + tick0=0, + dtick=10, + tickfont=dict(size=font_size), + showgrid=show_grid, + ), + yaxis=dict( + title=y_lbl, + tickfont=dict(size=font_size), + range=[0, y_max], + showgrid=show_grid, + ), + showlegend=False + ) + + if bg_clr is not None: + fig.update_layout(plot_bgcolor=bg_clr) + if save_path is not None: + pio.write_image(fig, save_path) + stdout_success(msg=f'Line plot saved at {save_path}') + else: + img_bytes = fig.to_image(format="png") + img = Image.open(io.BytesIO(img_bytes)) + fig.purge() + return np.array(img).astype(np.uint8) + + + +x = np.random.randint(0, 50, (100,)) +y = np.random.randint(0, 50, (200,)) +data = [x, y] +img = make_line_plot_plotly(data=data, show_box=True, font_size=20, bg_clr='white', show_grid=False, x_lbl_divisor=30.2, colors=['Red', 'Green'], save_path='/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/line_plot/Trial 3_final_img.png') + +# +# img = make_line_plot(data=data, show_box=False, font_size=20, bg_clr='white', show_grid=False, x_lbl_divisor=30, colors=['Red', 'Green'], save_path=None) +# +# import cv2 +# cv2.imshow('img', img) +# cv2.waitKey(5000) + + + diff --git a/simba/sandbox/linear_fretchet.py b/simba/sandbox/linear_fretchet.py new file mode 100644 index 000000000..a7e1c7788 --- /dev/null +++ b/simba/sandbox/linear_fretchet.py @@ -0,0 +1,69 @@ +import time + +import numpy as np +from numba import njit, jit, prange + +@njit('(float32[:,:], float32[:,:], int64)') +def linear_frechet_distance(x: np.ndarray, y: np.ndarray, sample: int = 100) -> float: + """ + Compute the Linear Fréchet Distance between two trajectories. + + The Fréchet Distance measures the dissimilarity between two continuous + curves or trajectories represented as sequences of points in a 2-dimensional + space. + + :param ndarray data: First 2D array of size len(frames) representing body-part coordinates x and y. + :param ndarray data: Second 2D array of size len(frames) representing body-part coordinates x and y. + :param int sample: The downsampling factor for the trajectories (default is 100If sample > 1, the trajectories are downsampled by selecting every sample-th point. + + .. note:: + Slightly modified from `João Paulo Figueira `_ + + :example: + >>> x = np.random.randint(0, 100, (10000, 2)).astype(np.float32) + >>> y = np.random.randint(0, 100, (10000, 2)).astype(np.float32) + >>> distance = linear_frechet_distance(x=x, y=y, sample=100) + + """ + if sample > 1: x, y = x[::sample], y[::sample] + n_p, n_q = x.shape[0], y.shape[0] + ca = np.full((n_p, n_q), 0.0) + for i in prange(n_p): + for j in range(n_q): + d = x[i] - y[j] + d = np.sqrt(np.dot(d, d)) + if i > 0 and j > 0: + ca[i, j] = max(min(ca[i - 1, j], ca[i - 1, j - 1], ca[i, j - 1]), d) + elif i > 0 and j == 0: + ca[i, j] = max(ca[i - 1, 0], d) + elif i == 0 and j > 0: + ca[i, j] = max(ca[0, j - 1], d) + else: + ca[i, j] = d + return ca[n_p - 1, n_q - 1] + + +# x = np.random.randint(0, 100, (10000, 2)).astype(np.float32) +# y = np.random.randint(0, 100, (10000, 2)).astype(np.float32) +# distance = linear_frechet_distance(x=x, y=y, sample=100) +# +# start = time.time() +# results = linear_frechet_distance(x=x, y=y, sample=100) +# print(time.time() - start) + + +x1 = np.full((1000, 1), 1) +y1 = np.full((1000,), 2) +for i in range(y1.shape[0]): + y1[i] = i +line_1 = np.hstack((x1, y1.reshape(-1, 1))).astype(np.float32) + +x1 = np.full((1000, 1), 5) +y1 = np.full((1000,), 2) +for i in range(y1.shape[0]): + y1[i] = i +line_2 = np.hstack((x1, y1.reshape(-1, 1))).astype(np.float32) + + +linear_frechet_distance(x=line_1, y=line_2, sample=1) + diff --git a/simba/sandbox/linearity_index.py b/simba/sandbox/linearity_index.py new file mode 100644 index 000000000..8f4295eeb --- /dev/null +++ b/simba/sandbox/linearity_index.py @@ -0,0 +1,62 @@ +import numpy as np + +from simba.utils.checks import check_valid_array, check_float +from simba.utils.enums import Formats + +def linearity_index(x: np.ndarray) -> float: + + """ + Calculates the straightness (linearity) index of a path. + + :param np.ndarray x: An (N, M) array representing the path, where N is the number of points and M is the number of spatial dimensions (e.g., 2 for 2D or 3 for 3D). Each row represents the coordinates of a point along the path. + :return: The straightness index of the path, a value between 0 and 1, where 1 indicates a perfectly straight path. + :rtype: float + + :example: + >>> x = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) + >>> linearity_index(x=x) + >>> x = np.random.randint(0, 100, (100, 2)) + >>> linearity_index(x=x) + """ + + check_valid_array(data=x, source=f'{linearity_index.__name__} x', accepted_ndims=(2,), accepted_axis_1_shape=[2, ], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + straight_line_distance = np.linalg.norm(x[0] - x[-1]) + path_length = np.sum(np.linalg.norm(np.diff(x, axis=0), axis=1)) + if path_length == 0: + return 0.0 + else: + return straight_line_distance / path_length + + +def sliding_linearity_index(x: np.ndarray, + window_size: float, + sample_rate: float) -> np.ndarray: + + """ + Calculates the Linearity Index (Path Straightness) over a sliding window for a path represented by an array of points. + + The Linearity Index measures how straight a path is by comparing the straight-line distance between the start and end points of each window to the total distance traveled along the path. + + :param np.ndarray x: An (N, M) array representing the path, where N is the number of points and M is the number of spatial dimensions (e.g., 2 for 2D or 3 for 3D). Each row represents the coordinates of a point along the path. + :param float x: The size of the sliding window in seconds. This defines the time window over which the linearity index is calculated. The window size should be specified in seconds. + :param float sample_rate: The sample rate in Hz (samples per second), which is used to convert the window size from seconds to frames. + :return: A 1D array of length N, where each element represents the linearity index of the path within a sliding window. The value is a ratio between the straight-line distance and the actual path length for each window. Values range from 0 to 1, with 1 indicating a perfectly straight path. + :rtype: np.ndarray + """ + + frame_step = int(max(1.0, window_size * sample_rate)) + results = np.full(x.shape[0], fill_value=0.0, dtype=np.float32) + for r in range(frame_step, x.shape[0]): + l = r - frame_step + sample_x = x[l:r, :] + straight_line_distance = np.linalg.norm(sample_x[0] - sample_x[-1]) + path_length = np.sum(np.linalg.norm(np.diff(sample_x, axis=0), axis=1)) + if path_length == 0: + results[r] = 0.0 + else: + results[r] = straight_line_distance / path_length + return results + + +x = np.random.randint(0, 100, (100, 2)) +sliding_linearity_index(x=x, window_size=1, sample_rate=10) \ No newline at end of file diff --git a/simba/sandbox/linestring_path.py b/simba/sandbox/linestring_path.py new file mode 100644 index 000000000..12f4a2964 --- /dev/null +++ b/simba/sandbox/linestring_path.py @@ -0,0 +1,63 @@ +import os.path +import cv2 + +from simba.mixins.geometry_mixin import GeometryMixin +from simba.mixins.config_reader import ConfigReader + +from simba.utils.read_write import read_df, find_video_of_file, read_frm_of_video + +""" +Here we create simple images representing the animals +""" + +### WE DEFINE THE PATH TO THE SIMBA PROJECT CONFIG, THE BODY-PART WE WABT TO USE TO INFER THE PATH, AND THE VIDEO +### THAT WE WANT TO CREATE THE PATH FOR +CONFIG_PATH = '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/project_config.ini' +BP_NAME = 'Nose' +VIDEO_NAME = '2022-06-20_NOB_DOT_4' + +### WE READ IN THE PROJECT CONFIG, AND THE POSE-ESTIMATION DATA FOR THE BODY-PART THAT WE WANT TO VISUALIZE THE PATH FOR +config = ConfigReader(config_path=CONFIG_PATH) +data = read_df(os.path.join(config.outlier_corrected_dir, f'{VIDEO_NAME}.csv'), file_type='csv', usecols=[f'{BP_NAME}_x', f'{BP_NAME}_y']).values.astype(int) + + +### WE CREATE A LINESTRING OBJECT REPRESENTING THE ANIMAL PATH, AND VISUALIZE IT ON A SIMPLE WHITE BACKGROUND. +linestring = GeometryMixin.to_linestring(data=data) +img = GeometryMixin.view_shapes(shapes=[linestring], bg_img=None, bg_clr=None, size=None, color_palette=None) + +### ALTERNATIVELY, WE MAY WANT TO CHANGE THE BACKGROUND COLOR OF THE PATH PLOT TO MAKE IT MORE SALIENT +### HERE WE SET THE BACKGROUND COLOR TO BLUE (BGR: (0, 0, 255)) +img = GeometryMixin.view_shapes(shapes=[linestring], bg_img=None, bg_clr=(173, 216, 230), size=None, color_palette=None) + +cv2.imshow('asasd', img) +cv2.waitKey(3000) + +### ALTERNATIVELY, WE MAY WANT THE ACTUAL VIDEO AS A BACKGROUND (WITH SOME OPACITY) +### HERE, WE READ ON A FRAME FROM THE ASSOCIATED VIDEO, SET THE OPACITY OF THAT FRAME TO 50%, AND USE THAT AS A +### BACKGROUND FOR THE PATH PLOT +video_path = find_video_of_file(video_dir=config.video_dir, filename=VIDEO_NAME) +video_frm = read_frm_of_video(video_path=video_path, frame_index=300, opacity=50) +img = GeometryMixin.view_shapes(shapes=[linestring], bg_img=video_frm, bg_clr=None, size=None, color_palette=None) + + +### ALTERNATIVELY, WE MAY WANT TO COLOR THE PATH WITH THE COLOR REPRESENTING THE TIME OF THE SESSION. +### HERE WE USE THE MAGMA COLOR PALETTE TO COLOR THE PATH FROM EARLY -> LATE IN THE SESSION. +img = GeometryMixin.view_shapes(shapes=[linestring], bg_img=video_frm, bg_clr=None, size=None, color_palette='magma') + + + + + + + + + + + + + + + + + + diff --git a/simba/sandbox/madmedianrule.py b/simba/sandbox/madmedianrule.py new file mode 100644 index 000000000..f3265562d --- /dev/null +++ b/simba/sandbox/madmedianrule.py @@ -0,0 +1,59 @@ +import time + +import numpy as np +from numba import njit + +@njit('(float32[:], float64,)') +def mad_median_rule(data: np.ndarray, k: int) -> np.ndarray: + """ + Detect outliers using the MAD-Median Rule. Returns 1d array of size data.shape[0] with 1 representing outlier and 0 representing inlier. + + :example: + >>> data = np.random.randint(0, 600, (9000000,)).astype(np.float32) + >>> mad_median_rule(data=data, k=1.0) + """ + + median = np.median(data) + mad = np.median(np.abs(data - median)) + threshold = k * mad + outliers = np.abs(data - median) > threshold + return outliers * 1 + +@njit('(float32[:], float64, float64[:], float64)') +def sliding_mad_median_rule(data: np.ndarray, k: int, time_windows: np.ndarray, fps: float) -> np.ndarray: + """ + Count the number of outliers in a sliding time-window using the MAD-Median Rule. + + :param np.ndarray data: 1D numerical array representing feature. + :param int k: The outlier threshold defined as k * median absolute deviation in each time window. + :param np.ndarray time_windows: 1D array of time window sizes in seconds. + :param float fps: The frequency of the signal. + :return np.ndarray: Array of size (data.shape[0], time_windows.shape[0]) with counts if outliers detected. + + :example: + >>> data = np.random.randint(0, 50, (50000,)).astype(np.float32) + >>> sliding_mad_median_rule(data=data, k=2, time_windows=np.array([20.0]), fps=1.0) + """ + results = np.full((data.shape[0], time_windows.shape[0]), -1) + for time_window in time_windows: + w = int(fps * time_window) + for i in range(w, data.shape[0]+1, 1): + w_data = data[i-w:i] + median = np.median(w_data) + mad = np.median(np.abs(w_data - median)) + threshold = k * mad + outliers = np.abs(w_data - median) > threshold + results[i-1] = np.sum(outliers * 1) + return results + + + + + +# data = np.random.randint(0, 50, (50000,)).astype(np.float32) +# start = time.time() +# sliding_mad_median_rule(data=data, k=2, time_windows=np.array([20.0]), fps=1.0) +# print(time.time() - start) +# +# mad_median_rule(data=data, k=1.0) +# print(time.time() - start) diff --git a/simba/sandbox/mahalanobis.py b/simba/sandbox/mahalanobis.py new file mode 100644 index 000000000..253d53466 --- /dev/null +++ b/simba/sandbox/mahalanobis.py @@ -0,0 +1,64 @@ +import time + +import numpy as np +from simba.mixins.statistics_mixin import Statistics +from numba import jit, njit, prange +from scipy.spatial.distance import cdist + +@jit('(float32[:,:],)') +def mahalanobis_distance_cdist(data: np.ndarray) -> np.ndarray: + """ + Compute the Mahalanobis distance between every pair of observations in a 2D array using Numba. + + The Mahalanobis distance is a measure of the distance between a point and a distribution. It accounts for correlations between variables and the scales of the variables, making it suitable for datasets where features are not independent and have different variances. + + However, Mahalanobis distance may not be suitable in certain scenarios, such as: + - When the dataset is small and the covariance matrix is not accurately estimated. + - When the dataset contains outliers that significantly affect the estimation of the covariance matrix. + - When the assumptions of multivariate normality are violated. + + :param np.ndarray data: 2D array with feature observations. Frames on axis 0 and feature values on axis 1 + :return np.ndarray: Pairwise Mahalanobis distance matrix where element (i, j) represents the Mahalanobis distance between observations i and j. + + :example: + >>> data = np.random.randint(0, 50, (1000, 200)).astype(np.float32) + >>> x = mahalanobis_distance_cdist(data=data) + """ + + covariance_matrix = np.cov(data, rowvar=False) + inv_covariance_matrix = np.linalg.inv(covariance_matrix).astype(np.float32) + n = data.shape[0] + distances = np.zeros((n, n)) + for i in prange(n): + for j in range(n): + diff = data[i] - data[j] + diff = diff.astype(np.float32) + distances[i, j] = np.sqrt(np.dot(np.dot(diff, inv_covariance_matrix), diff.T)) + return distances + + + + + + + + + + + pass + + + + +data = np.random.randint(0, 50, (1000, 200)).astype(np.float32) +#data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float64) +start = time.time() +x = mahalanobis_distance(data=data) +print(time.time() - start) + +start = time.time() +covariance_matrix = np.cov(data, rowvar=False) +inv_covariance_matrix = np.linalg.inv(covariance_matrix) +distances = cdist(data, data, 'mahalanobis', VI=inv_covariance_matrix) +print(time.time() - start) + diff --git a/simba/sandbox/make_path_plot.py b/simba/sandbox/make_path_plot.py new file mode 100644 index 000000000..b404e7cde --- /dev/null +++ b/simba/sandbox/make_path_plot.py @@ -0,0 +1,106 @@ +from typing import List, Optional, Tuple, Union, Dict, Any +from simba.utils.printing import stdout_success, SimbaTimer +import os +import cv2 +import numpy as np +from simba.utils.checks import check_int, check_instance, check_valid_lst, check_if_valid_rgb_tuple, check_if_dir_exists, check_valid_array, check_if_keys_exist_in_dict, check_float + +def make_path_plot(data: List[np.ndarray], + colors: List[Tuple[int, int, int]], + width: Optional[int] = 640, + height: Optional[int] = 480, + max_lines: Optional[int] = None, + bg_clr: Optional[Union[Tuple[int, int, int], np.ndarray]] = (255, 255, 255), + circle_size: Optional[Union[int, None]] = 3, + font_size: Optional[float] = 2.0, + font_thickness: Optional[int] = 2, + line_width: Optional[int] = 2, + animal_names: Optional[List[str]] = None, + clf_attr: Optional[Dict[str, Any]] = None, + save_path: Optional[Union[str, os.PathLike]] = None) -> Union[None, np.ndarray]: + + """ + Creates a path plot visualization from the given data. + + .. image:: _static/img/make_path_plot.png + :width: 500 + :align: center + + :param List[np.ndarray] data: List of numpy arrays containing path data. + :param List[Tuple[int, int, int]] colors: List of RGB tuples representing colors for each path. + :param width: Width of the output image (default is 640 pixels). + :param height: Height of the output image (default is 480 pixels). + :param max_lines: Maximum number of lines to plot from each path data. + :param bg_clr: Background color of the plot (default is white). + :param circle_size: Size of the circle marker at the end of each path (default is 3). + :param font_size: Font size for displaying animal names (default is 2.0). + :param font_thickness: Thickness of the font for displaying animal names (default is 2). + :param line_width: Width of the lines representing paths (default is 2). + :param animal_names: List of names for the animals corresponding to each path. + :param clf_attr: Dictionary containing attributes for classification markers. + :param save_path: Path to save the generated plot image. + :return: If save_path is None, returns the generated image as a numpy array, otherwise, returns None. + + + :example: + >>> x = np.random.randint(0, 500, (100, 2)) + >>> y = np.random.randint(0, 500, (100, 2)) + >>> position_data = np.random.randint(0, 500, (100, 2)) + >>> clf_data_1 = np.random.randint(0, 2, (100,)) + >>> clf_data_2 = np.random.randint(0, 2, (100,)) + >>> clf_data = {'Attack': {'color': (155, 1, 10), 'size': 30, 'positions': position_data, 'clfs': clf_data_1}, 'Sniffing': {'color': (155, 90, 10), 'size': 30, 'positions': position_data, 'clfs': clf_data_2}} + >>> make_path_plot(data=[x, y], colors=[(0, 255, 0), (255, 0, 0)], clf_attr=clf_data) + """ + + check_valid_lst(data=data, source=make_path_plot.__name__, valid_dtypes=(np.ndarray,), min_len=1) + for i in data: check_valid_array(data=i, source=make_path_plot.__name__, accepted_ndims=(2,), accepted_axis_1_shape=(2,), accepted_dtypes=(int, float, np.int32, np.int64, np.float32, np.float64)) + check_valid_lst(data=colors, source=make_path_plot.__name__, valid_dtypes=(tuple,), exact_len=len(data)) + for i in colors: check_if_valid_rgb_tuple(data=i) + check_instance(source='bg_clr', instance=bg_clr, accepted_types=(np.ndarray, tuple)) + if isinstance(bg_clr, tuple): + check_if_valid_rgb_tuple(data=bg_clr) + check_int(name=f'{make_path_plot.__name__} height', value=height, min_value=1) + check_int(name=f'{make_path_plot.__name__} height', value=width, min_value=1) + check_float(name=f'{make_path_plot.__name__} font_size', value=font_size) + check_int(name=f'{make_path_plot.__name__} font_thickness', value=font_thickness) + check_int(name=f'{make_path_plot.__name__} line_width', value=line_width) + timer = SimbaTimer(start=True) + img = np.zeros((height, width, 3)) + img[:] = bg_clr + for line_cnt in range(len(data)): + clr = colors[line_cnt] + line_data = data[line_cnt] + if max_lines is not None: + check_int(name=f'{make_path_plot.__name__} max_lines', value=max_lines, min_value=1) + line_data = line_data[-max_lines:] + for i in range(1, line_data.shape[0]): + cv2.line(img, tuple(line_data[i]), tuple(line_data[i-1]), clr, line_width) + if circle_size is not None: + cv2.circle(img, tuple(line_data[-1]), 0, clr, circle_size) + if animal_names is not None: + cv2.putText(img, animal_names[line_cnt], tuple(line_data[-1]), cv2.FONT_HERSHEY_COMPLEX, font_size, clr, font_thickness) + if clf_attr is not None: + check_instance(source=make_path_plot.__name__, instance=clf_attr, accepted_types=(dict,)) + for k, v in clf_attr.items(): + check_if_keys_exist_in_dict(data=v, key=['color', 'size', 'positions', 'clfs'], name='clf_attr') + for clf_name, clf_data in clf_attr.items(): + clf_positions = clf_data['positions'][np.argwhere(clf_data['clfs'] == 1).flatten()] + for i in clf_positions: + cv2.circle(img, tuple(i), 0, clf_data['color'], clf_data['size']) + img = cv2.resize(img, (width, height)).astype(np.uint8) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + timer.stop_timer() + cv2.imwrite(save_path, img) + stdout_success(msg=f"Path plot saved at {save_path}", elapsed_time=timer.elapsed_time_str, source=make_path_plot.__name__,) + else: + return img + +# x = np.random.randint(0, 500, (100, 2)) +# y = np.random.randint(0, 500, (100, 2)) +# position_data = np.random.randint(0, 500, (100, 2)) +# clf_data_1 = np.random.randint(0, 2, (100,)) +# clf_data_2 = np.random.randint(0, 2, (100,)) +# clf_data = {'Attack': {'color': (155, 1, 10), 'size': 30, 'positions': position_data, 'clfs': clf_data_1}, 'Sniffing': {'color': (155, 90, 10), 'size': 30, 'positions': position_data, 'clfs': clf_data_2}} +# plot = make_path_plot(data=[x, y], colors=[(0, 255, 0), (255, 0, 0)], clf_attr=clf_data) +# diff --git a/simba/sandbox/manhattan_distance.py b/simba/sandbox/manhattan_distance.py new file mode 100644 index 000000000..577e8d749 --- /dev/null +++ b/simba/sandbox/manhattan_distance.py @@ -0,0 +1,36 @@ +import time + +import numpy as np +from simba.utils.checks import check_valid_array + + +def manhattan_distance_cdist(data: np.ndarray) -> np.ndarray: + """ + Compute the pairwise Manhattan distance matrix between points in a 2D array. + + Can be preferred over Euclidean distance in scenarios where the movement is restricted + to grid-based paths and/or the data is high dimensional. + + .. math:: + D_{\text{Manhattan}} = |x_2 - x_1| + |y_2 - y_1| + + :param data: 2D array where each row represents a featurized observation (e.g., frame) + :return np.ndarray: Pairwise Manhattan distance matrix where element (i, j) represents the distance between points i and j. + + :example: + >>> data = np.random.randint(0, 50, (10000, 2)) + >>> manhattan_distance_cdist(data=data) + """ + check_valid_array(data=data, source=f'{manhattan_distance_cdist} data', accepted_ndims=(2,), accepted_dtypes=(np.float32, np.float64, np.int64, np.int32, int, float, np.float16, np.int8, np.int16)) + differences = np.abs(data[:, np.newaxis, :] - data) + results = np.sum(differences, axis=-1) + return results + +data = np.random.randint(0, 50, (10000, 2)) +y = manhattan_distance_cdist(data=data) +start = time.time() +x = manhattan_distance_cdist(data=data) +print(time.time() - start) +start = time.time() +y = manhattan_distance_cdist_2(data=data) +print(time.time() - start) diff --git a/simba/sandbox/margalef_diversification_index.py b/simba/sandbox/margalef_diversification_index.py new file mode 100644 index 000000000..87ca59eec --- /dev/null +++ b/simba/sandbox/margalef_diversification_index.py @@ -0,0 +1,23 @@ +import numpy as np +from simba.utils.checks import check_valid_array + + +def margalef_diversification_index(x: np.array) -> float: + """ + Calculate the Margalef Diversification Index for a given array of values. + + The Margalef Diversification Index is a measure of category diversity. It quantifies the richness of a community + relative to the number of individuals. + + :example: + >>> x = np.random.randint(0, 100, (100,)) + >>> margalef_diversification_index(x=x) + """ + check_valid_array(source=f'{margalef_diversification_index.__name__} x', accepted_ndims=(1,), data=x, accepted_dtypes=(np.float32, np.float64, np.int32, np.int64, np.int8), min_axis_0=2) + n_unique = np.unique(x).shape[0] + return (n_unique-1) / np.log(x.shape[0]) + + + +x = np.random.randint(0, 100, (100,)) +margalef_diversification_index(x=x) \ No newline at end of file diff --git a/simba/sandbox/mcnamar.py b/simba/sandbox/mcnamar.py new file mode 100644 index 000000000..072bde273 --- /dev/null +++ b/simba/sandbox/mcnamar.py @@ -0,0 +1,58 @@ +from typing import Optional, Tuple +import numpy as np +from scipy.stats.distributions import chi2 +from simba.utils.checks import check_valid_array +from simba.utils.errors import CountError, InvalidInputError + + +def mcnemar(x: np.ndarray, y: np.ndarray, ground_truth: np.ndarray, continuity_corrected: Optional[bool] = True) -> Tuple[float, float]: + """ + McNemar's Test to compare the difference in predictive accuracy of two models. + + E.g., can be used to compute if the accuracy of two classifiers are significantly different when transforming the same data. + + .. note:: + `mlextend `__. + + + :param np.ndarray x: 1-dimensional Boolean array with predictions of the first model. + :param np.ndarray x: 1-dimensional Boolean array with predictions of the second model. + :param np.ndarray x: 1-dimensional Boolean array with ground truth labels. + :param Optional[bool] continuity_corrected : Whether to apply continuity correction. Default is True. + + :example: + >>> x = np.random.randint(0, 2, (100000, )) + >>> y = np.random.randint(0, 2, (100000, )) + >>> ground_truth = np.random.randint(0, 2, (100000, )) + >>> mcnemar(x=x, y=y, ground_truth=ground_truth) + """ + + check_valid_array(data=x, source=mcnemar.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, np.int8)) + check_valid_array(data=y, source=mcnemar.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, np.int8)) + check_valid_array(data=ground_truth, source=mcnemar.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, np.int8)) + if len(list({x.shape[0], y.shape[0], ground_truth.shape[0]})) != 1: + raise CountError(msg=f'The three arrays has to be equal lengths but got: {x.shape[0], y.shape[0], ground_truth.shape[0]}', source=mcnemar.__name__) + for i in [x, y, ground_truth]: + additional = list(set(list(np.sort(np.unique(i)))) - {0, 1}) + if len(additional) > 0: raise InvalidInputError(msg=f'Mcnemar requires binary input data but found {additional}', source=mcnemar.__name__) + data = np.hstack((x.reshape(-1, 1), y.reshape(-1, 1), ground_truth.reshape(-1, 1))) + b = np.where((data == (0, 1, 0)).all(axis=1))[0].shape[0] + np.where((data == (1, 0, 1)).all(axis=1))[0].shape[0] + c = np.where((data == (1, 0, 0)).all(axis=1))[0].shape[0] + np.where((data == (0, 1, 1)).all(axis=1))[0].shape[0] + if not continuity_corrected: + x = (np.square(b-c)) / (b+c) + else: + x = (np.square(np.abs(b-c)-1)) / (b+c) + p = chi2.sf(x, 1) + return x, p + + + + + + + + +x = np.random.randint(0, 2, (100000, )) +y = np.random.randint(0, 2, (100000, )) +ground_truth = np.random.randint(0, 2, (100000, )) +mcnemar(x=x, y=y, ground_truth=ground_truth) \ No newline at end of file diff --git a/simba/sandbox/mean_squared_jerk.py b/simba/sandbox/mean_squared_jerk.py new file mode 100644 index 000000000..00ac7f3dc --- /dev/null +++ b/simba/sandbox/mean_squared_jerk.py @@ -0,0 +1,107 @@ +import numpy as np + +from simba.utils.checks import check_valid_array, check_float +from simba.utils.enums import Formats + +def mean_squared_jerk(x: np.ndarray, + time_step: float, + sample_rate: float) -> float: + + """ + Calculate the Mean Squared Jerk (MSJ) for a given set of 2D positions over time. + + The Mean Squared Jerk is a measure of the smoothness of movement, calculated as the mean of + squared third derivatives of the position with respect to time. It provides an indication of + how abrupt or smooth a trajectory is, with higher values indicating more erratic movements. + + :param np.ndarray x: A 2D array where each row represents the [x, y] position at a time step. + :param float time_step: The time difference between successive positions in seconds. + :param float sample_rate: The rate at which the positions are sampled (samples per second). + :return: The computed Mean Squared Jerk for the input trajectory data. + :rtype: float + + :example I: + >>> x = np.random.randint(0, 500, (100, 2)) + >>> mean_squared_jerk(x=x, time_step=1.0, sample_rate=30) + """ + + check_float(name=f'{mean_squared_jerk.__name__} time_step', min_value=10e-6, value=time_step) + check_float(name=f'{mean_squared_jerk.__name__} sample_rate', min_value=10e-6, value=sample_rate) + check_valid_array(data=x, source=f'{mean_squared_jerk.__name__} x', accepted_ndims=(2,), accepted_axis_1_shape=[2,], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + + frame_step = int(max(1.0, time_step * sample_rate)) + V = np.diff(x, axis=0) / frame_step + A = np.diff(V, axis=0) / frame_step + jerks = np.diff(A, axis=0) / frame_step + squared_jerks = np.sum(jerks ** 2, axis=1) + return np.mean(squared_jerks) + + +def sliding_mean_squared_jerk(x: np.ndarray, + window_size: float, + sample_rate: float) -> np.ndarray: + """ + Calculates the mean squared jerk (rate of change of acceleration) for a position path in a sliding window. + + Jerk is the derivative of acceleration, and this function computes the mean squared jerk over sliding windows + across the entire path. High jerk values indicate abrupt changes in acceleration, while low values indicate + smoother motion. + + :param np.ndarray x: An (N, M) array representing the path of an object, where N is the number of samples (time steps) and M is the number of spatial dimensions (e.g., 2 for 2D motion). Each row represents the position at a time step. + :param float window_size: The size of each sliding window in seconds. This defines the interval over which the mean squared jerk is calculated. + :param float sample_rate: The sampling rate in Hz (samples per second), which is used to convert the window size from seconds to frames. + :return: A 1D array of length N, containing the mean squared jerk for each sliding window that ends at each time step. The first `frame_step` values will be NaN, as they do not have enough preceding data points to compute jerk over the full window. + :rtype: np.ndarray + + :example: + >>> x = np.random.randint(0, 500, (12, 2)) + >>> sliding_mean_squared_jerk(x=x, window_size=1.0, sample_rate=2) + + :example II: + >>> jerky_path = np.zeros((100, 2)) + >>> jerky_path[::10] = np.random.randint(0, 500, (10, 2)) + >>> non_jerky_path = np.linspace(0, 500, 100).reshape(-1, 1) + >>> non_jerky_path = np.hstack((non_jerky_path, non_jerky_path)) + >>> jerky_jerk_result = sliding_mean_squared_jerk(jerky_path, 1.0, 10) + >>> non_jerky_jerk_result = sliding_mean_squared_jerk(non_jerky_path, 1.0, 10) + """ + + V = np.diff(x, axis=0) + A = np.diff(V, axis=0) + frame_step = int(max(1.0, window_size * sample_rate)) + results = np.full(x.shape[0], fill_value=0, dtype=np.int64) + for r in range(frame_step, x.shape[0]): + l = r - frame_step + V_a = A[l:r, :] + jerks = np.diff(V_a, axis=0) + if jerks.shape[0] == 0: + results[r] = 0 + else: + results[r] = np.sum(jerks ** 2) / jerks.shape[0] + + return results + +x = np.random.randint(0, 500, (12, 2)) +sliding_mean_squared_jerk(x=x, window_size=1.0, sample_rate=2) + + +jerky_path = np.zeros((100, 2)) +jerky_path[::10] = np.random.randint(0, 500, (10, 2)) +non_jerky_path = np.linspace(0, 500, 100).reshape(-1, 1) +non_jerky_path = np.hstack((non_jerky_path, non_jerky_path)) +jerky_jerk_result = sliding_mean_squared_jerk(jerky_path, 1.0, 10) +non_jerky_jerk_result = sliding_mean_squared_jerk(non_jerky_path, 1.0, 10) + + +# Parameters +window_size = 1.0 # seconds +sample_rate = 10 # samples per second + +# Apply the function to both paths + + +# Print results +print("Jerky Path Mean Squared Jerk:", np.nanmean(jerky_jerk_result)) +print("Non-Jerky Path Mean Squared Jerk:", np.nanmean(non_jerky_jerk_result)) + +#sliding_mean_squared_jerk() \ No newline at end of file diff --git a/simba/sandbox/menhinicks_index.py b/simba/sandbox/menhinicks_index.py new file mode 100644 index 000000000..62a8557d5 --- /dev/null +++ b/simba/sandbox/menhinicks_index.py @@ -0,0 +1,17 @@ +import numpy as np +from simba.utils.checks import check_valid_array + + +def menhinicks_index(x: np.array) -> float: + """ + Calculate the Menhinick's Index for a given array of values. + + Menhinick's Index is a measure of category richness. + It quantifies the number of categories relative to the square root of the total number of observations. + + :example: + >>> x = np.random.randint(0, 5, (1000,)) + >>> menhinicks_index(x=x) + """ + check_valid_array(source=f'{menhinicks_index.__name__} x', accepted_ndims=(1,), data=x, accepted_dtypes=(np.float32, np.float64, np.int32, np.int64, np.int8), min_axis_0=2) + return np.unique(x).shape[0] / np.sqrt(x.shape[0]) \ No newline at end of file diff --git a/simba/sandbox/mitra_appand_additional.py b/simba/sandbox/mitra_appand_additional.py new file mode 100644 index 000000000..4921646a5 --- /dev/null +++ b/simba/sandbox/mitra_appand_additional.py @@ -0,0 +1,29 @@ +import os.path + +import pandas as pd +from simba.utils.read_write import find_files_of_filetypes_in_directory, get_fn_ext + + + +ADDITIONAL_FEATURES_LIST_PATH = r"C:\troubleshooting\mitra\additional_features_TAIL.csv" +FEATURES_DIR = r'C:\troubleshooting\mitra\project_folder\csv\features_extracted' +NEW_FEATURES_DIR = r'C:\troubleshooting\mitra\project_folder\videos\additional\bg_removed\rotated\tail_features_additional' +SAVE_DIR = r"C:\troubleshooting\mitra\project_folder\videos\additional\bg_removed\rotated\tail_features_additional\APPENDED" +#CLF_NAMES = ['rearing', 'grooming', 'immobility', 'lay-on-belly', 'straub_tail', 'circling', 'shaking'] + +additional_feature_names = list(pd.read_csv(ADDITIONAL_FEATURES_LIST_PATH, index_col=None)['ADDITIONAL_FEATURES']) +new_features_files = find_files_of_filetypes_in_directory(directory=NEW_FEATURES_DIR, extensions=['.csv']) + +for file_path in new_features_files: + print(file_path) + df = pd.read_csv(file_path, index_col=0) + #df_clf = df[CLF_NAMES] + #df = df.drop(CLF_NAMES, axis=1) + video_name = get_fn_ext(filepath=file_path)[1] + features_path = os.path.join(FEATURES_DIR, video_name + '.csv') + features_df = pd.read_csv(features_path, index_col=0)[additional_feature_names] + df = pd.concat([df, features_df], axis=1) + save_path = os.path.join(SAVE_DIR, video_name + '.csv') + df.to_csv(save_path) + + #additional_features = df[additional_feature_names] \ No newline at end of file diff --git a/simba/sandbox/mitra_bar_graph.py b/simba/sandbox/mitra_bar_graph.py new file mode 100644 index 000000000..39dc49b5c --- /dev/null +++ b/simba/sandbox/mitra_bar_graph.py @@ -0,0 +1,41 @@ +import pandas as pd +import numpy as np +from simba.mixins.plotting_mixin import PlottingMixin + +GI_PATH = r"C:\troubleshooting\mitra\project_folder\logs\straub_tail\straub_tail_aggregates\straub_tail_aggregates_gi.csv" +GQ_PATH = r"D:\troubleshooting\mitra\project_folder\logs\straub_tail_data\aggregate_straub_tail\straub_tail_aggregates.csv" + +gi_df = pd.read_csv(GI_PATH) +gq_df = pd.read_csv(GQ_PATH) + +gi_df.columns = [x.lower() for x in gi_df.columns] +gq_df.columns = [x.lower() for x in gq_df.columns] + + + +gi_df['video'] = gi_df['video'].str.lower() +gq_df['video'] = gq_df['video'].str.lower() + +conditions_1 = [ + gi_df['video'].str.contains('_gi_cno_'), + gi_df['video'].str.contains('_gi_saline_'), + gi_df['video'].str.contains('_gq_cno_'), + gi_df['video'].str.contains('_gq_saline_'), +] + +conditions_2 = [ + gq_df['video'].str.contains('_cno'), + gq_df['video'].str.contains('_saline'), +] + + +choices_1 = ['Gi CNO', 'Gi Saline', 'Gq CNO', 'Gq Saline'] +choices_2 = ['Gq CNO', 'Gq Saline'] + +gi_df['group'] = np.select(conditions_1, choices_1, default='Unknown') +gq_df['group'] = np.select(conditions_2, choices_2, default='Unknown') +gq_df['experiment'] = 2 +gi_df['experiment'] = 1 + +PlottingMixin.plot_bar_chart(df=gi_df, x='group', y='straub_tail - total event duration (s)') + diff --git a/simba/sandbox/mitra_bg_remover.py b/simba/sandbox/mitra_bg_remover.py new file mode 100644 index 000000000..c284ad046 --- /dev/null +++ b/simba/sandbox/mitra_bg_remover.py @@ -0,0 +1,22 @@ +import os +import glob +from simba.utils.read_write import get_fn_ext + +from simba.video_processors.video_processing import video_bg_subtraction_mp + +video_paths = glob.glob(r'C:\troubleshooting\mitra\project_folder\videos\additional' + '/*.mp4') +save_dir = r"C:\troubleshooting\mitra\project_folder\videos\additional\bg_removed" + +video_paths = [r"C:\troubleshooting\mitra\project_folder\videos\additional\501_MA142_Gi_Saline_0517.mp4"] + +for file_cnt, file_path in enumerate(video_paths): + _, video_name, _ = get_fn_ext(filepath=file_path) + save_path = os.path.join(save_dir, f'{video_name}.mp4') + if not os.path.isfile(save_path): + video_bg_subtraction_mp(video_path=file_path, save_path=save_path, verbose=True, bg_color=(255, 255, 255), gpu=False) + + + + + + diff --git a/simba/sandbox/mitra_circling_detector.py b/simba/sandbox/mitra_circling_detector.py new file mode 100644 index 000000000..97de2220a --- /dev/null +++ b/simba/sandbox/mitra_circling_detector.py @@ -0,0 +1,92 @@ +import os +import numpy as np +import pandas as pd +from numba import typed +from simba.utils.read_write import find_files_of_filetypes_in_directory, read_df, get_fn_ext, read_video_info +from simba.mixins.circular_statistics import CircularStatisticsMixin +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.timeseries_features_mixin import TimeseriesFeatureMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.enums import Formats +from typing import Union, Optional +from simba.utils.checks import check_if_dir_exists, check_str, check_valid_dataframe, check_int, check_all_file_names_are_represented_in_video_log +from simba.utils.data import detect_bouts, plug_holes_shortest_bout +from simba.utils.printing import stdout_success + +CIRCLING = 'CIRCLING' + +class MitraCirclingDetector(ConfigReader): + + def __init__(self, + data_dir: Union[str, os.PathLike], + config_path: Union[str, os.PathLike], + nose_name: Optional[str] = 'nose', + left_ear_name: Optional[str] = 'left_ear', + right_ear_name: Optional[str] = 'right_ear', + tail_base_name: Optional[str] = 'tail_base', + time_threshold: Optional[int] = 10, + circular_range_threshold: Optional[int] = 320, + movement_threshold: Optional[int] = 60, + center_name: Optional[str] = 'tail_base', + save_dir: Optional[Union[str, os.PathLike]] = None): + + check_if_dir_exists(in_dir=data_dir) + for bp_name in [nose_name, left_ear_name, right_ear_name, tail_base_name]: check_str(name='body part name', value=bp_name, allow_blank=False) + self.data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv']) + ConfigReader.__init__(self, config_path=config_path, read_video_info=True, create_logger=False) + self.nose_heads = [f'{nose_name}_x'.lower(), f'{nose_name}_y'.lower()] + self.left_ear_heads = [f'{left_ear_name}_x'.lower(), f'{left_ear_name}_y'.lower()] + self.right_ear_heads = [f'{right_ear_name}_x'.lower(), f'{right_ear_name}_y'.lower()] + self.center_heads = [f'{center_name}_x'.lower(), f'{center_name}_y'.lower()] + self.required_field = self.nose_heads + self.left_ear_heads + self.right_ear_heads + self.save_dir = save_dir + if self.save_dir is None: + self.save_dir = os.path.join(self.logs_path, f'circling_data_{self.datetime}') + os.makedirs(self.save_dir) + else: + check_if_dir_exists(in_dir=self.save_dir) + self.time_threshold, self.circular_range_threshold, self.movement_threshold = time_threshold, circular_range_threshold, movement_threshold + self.run() + + def run(self): + agg_results = pd.DataFrame(columns=['VIDEO', 'CIRCLING FRAMES', 'CIRCLING TIME (S)', 'CIRCLING BOUT COUNTS', 'CIRCLING PCT OF SESSION', 'VIDEO TOTAL FRAMES', 'VIDEO TOTAL TIME (S)']) + agg_results_path = os.path.join(self.save_dir, 'aggregate_circling_results.csv') + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.data_paths) + for file_cnt, file_path in enumerate(self.data_paths): + video_name = get_fn_ext(filepath=file_path)[1] + print(f'Analyzing {video_name} ({file_cnt+1}/{len(self.data_paths)})...') + save_file_path = os.path.join(self.save_dir, f'{video_name}.csv') + df = read_df(file_path=file_path, file_type='csv').reset_index(drop=True) + _, px_per_mm, fps = read_video_info(vid_info_df=self.video_info_df, video_name=video_name) + df.columns = [str(x).lower() for x in df.columns] + check_valid_dataframe(df=df, valid_dtypes=Formats.NUMERIC_DTYPES.value, required_fields=self.required_field) + + nose_arr = df[self.nose_heads].values.astype(np.float32) + left_ear_arr = df[self.left_ear_heads].values.astype(np.float32) + right_ear_arr = df[self.right_ear_heads].values.astype(np.float32) + + center_shifted = FeatureExtractionMixin.create_shifted_df(df[self.center_heads]) + center_1, center_2 = center_shifted.iloc[:, 0:2].values, center_shifted.iloc[:, 2:4].values + + angle_degrees = CircularStatisticsMixin().direction_three_bps(nose_loc=nose_arr, left_ear_loc=left_ear_arr, right_ear_loc=right_ear_arr).astype(np.float32) + sliding_circular_range = CircularStatisticsMixin().sliding_circular_range(data=angle_degrees, time_windows=np.array([self.time_threshold], dtype=np.float64), fps=int(fps)).flatten() + movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=center_1[:, 0].flatten(), bp_2_x=center_2[:, 0].flatten(), bp_1_y=center_1[:, 1].flatten(), bp_2_y=center_2[:, 1].flatten(), px_per_mm=2.15) + movement_sum = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=movement.astype(np.float32), window_sizes=np.array([self.time_threshold], dtype=np.float64), sample_rate=fps, statistics=typed.List(["sum"])).astype(np.int32)[0].flatten() + + circling_idx = np.argwhere(sliding_circular_range >= self.circular_range_threshold).astype(np.int32).flatten() + movement_idx = np.argwhere(movement_sum >= self.movement_threshold).astype(np.int32).flatten() + circling_idx = [x for x in movement_idx if x in circling_idx] + df[CIRCLING] = 0 + df.loc[circling_idx, CIRCLING] = 1 + bouts = detect_bouts(data_df=df, target_lst=[CIRCLING], fps=fps) + df = plug_holes_shortest_bout(data_df=df, clf_name=CIRCLING, fps=fps, shortest_bout=100) + df.to_csv(save_file_path) + agg_results.loc[len(agg_results)] = [video_name, len(circling_idx), round(len(circling_idx) / fps, 4), len(bouts), round((len(circling_idx) / len(df)) * 100, 4), len(df), round(len(df)/fps, 2) ] + + agg_results.to_csv(agg_results_path) + stdout_success(msg=f'Results saved in {self.save_dir} directory.') + + + +#MitraCirclingDetector(data_dir=r'D:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location', config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini") + diff --git a/simba/sandbox/mitra_correlation_checks.py b/simba/sandbox/mitra_correlation_checks.py new file mode 100644 index 000000000..8bbcd6af4 --- /dev/null +++ b/simba/sandbox/mitra_correlation_checks.py @@ -0,0 +1,31 @@ +import os +from typing import Union +import numpy as np +import pandas as pd +from copy import deepcopy +from simba.mixins.config_reader import ConfigReader +from simba.utils.checks import check_file_exist_and_readable +from simba.utils.read_write import read_df, write_df +from simba.utils.printing import SimbaTimer + + +class MitraCorrelationChecks(ConfigReader): + + def __init__(self, + config_path: Union[str, os.PathLike]): + + ConfigReader.__init__(self, config_path=config_path) + + def run(self): + results = [] + for file_path in self.target_file_paths: + df = read_df(file_path=file_path, file_type=self.file_type).astype(np.float32) + results.append(df) + results = pd.concat(results, axis=0).reset_index(drop=True) + for clf in self.clf_names: + df = results.corrwith(results[clf]).sort_values(ascending=False) + df.to_csv(f'/Users/simon/Desktop/envs/simba/troubleshooting/mitra/correlations/{clf}.csv') + print(f'Saved {clf}') + +x = MitraCorrelationChecks('/Users/simon/Desktop/envs/simba/troubleshooting/mitra/project_folder/project_config.ini') +x.run() \ No newline at end of file diff --git a/simba/sandbox/mitra_downsample_data.py b/simba/sandbox/mitra_downsample_data.py new file mode 100644 index 000000000..c8caf4bed --- /dev/null +++ b/simba/sandbox/mitra_downsample_data.py @@ -0,0 +1,48 @@ +import os +from typing import Union +import pandas as pd +import numpy as np +from copy import deepcopy +from simba.mixins.config_reader import ConfigReader +from simba.utils.checks import check_file_exist_and_readable +from simba.utils.read_write import read_df, write_df, find_files_of_filetypes_in_directory, get_fn_ext +from simba.utils.printing import SimbaTimer + +class MitraDownSampler(ConfigReader): + + def __init__(self, + config_path: os.PathLike, + data_path: os.PathLike): + + ConfigReader.__init__(self, config_path=config_path) + self.data_path = data_path + self.data_paths = find_files_of_filetypes_in_directory(directory=self.data_path, extensions=['.' + self.file_type]) + + def run(self): + for file_path in self.data_paths: + df = read_df(file_path=file_path, file_type=self.file_type) + _, video_name, _ = get_fn_ext(filepath=file_path) + for clf in self.clf_names: + save_path = os.path.join(self.targets_folder, clf, video_name + '.' + self.file_type) + if not os.path.isdir(os.path.dirname(save_path)): + os.makedirs(os.path.dirname(save_path)) + annot = df[df[clf] == 1] + n_samples = int(len(annot) * 10) + not_annot = df[df[clf] == 0] + if n_samples > len(not_annot): + n_samples = len(not_annot) + if n_samples < 5000: + n_samples = 5000 + not_annot = not_annot.sample(n=n_samples) + idx = list(not_annot.index) + list(annot.index) + out = df.loc[idx, :].sort_index() + print(len(out), len(annot), n_samples, len(not_annot), clf, video_name) + write_df(df=out.astype(np.float32), file_type=self.file_type, save_path=save_path) + + + + + +x = MitraDownSampler(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/mitra/project_folder/project_config.ini', + data_path='/Users/simon/Desktop/envs/simba/troubleshooting/mitra/project_folder/csv/targets_inserted/originals') +x.run() \ No newline at end of file diff --git a/simba/sandbox/mitra_freezing_detector.py b/simba/sandbox/mitra_freezing_detector.py new file mode 100644 index 000000000..ff908fe8a --- /dev/null +++ b/simba/sandbox/mitra_freezing_detector.py @@ -0,0 +1,219 @@ +import os +from typing import Union, Optional +import numpy as np +import pandas as pd +from numba import typed +from simba.utils.read_write import find_files_of_filetypes_in_directory, read_df, get_fn_ext, read_video_info +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.timeseries_features_mixin import TimeseriesFeatureMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.checks import check_if_dir_exists, check_str, check_valid_dataframe, check_int, check_all_file_names_are_represented_in_video_log +from simba.utils.enums import Formats +from simba.utils.data import detect_bouts, plug_holes_shortest_bout +from simba.utils.printing import stdout_success + + +NAPE_X, NAPE_Y = 'nape_x', 'nape_y' +FREEZING = 'FREEZING' + +class MitraFreezingDetector(ConfigReader): + + """ + Detects freezing behavior in rodent movement data as outlined in `Sabnis et al.` (2024). + + + This implementation follows the methodology for freezing detection using supervised machine learning, + specifically applied to mouse behavioral analysis. The freezing detection algorithm is based on movement + thresholds calculated from tracked body parts (nose, ears, and tail base) over time. + + The detector identifies freezing episodes by analyzing reduced movement across the defined body parts, + applying bout detection, and classifying freezing bouts based on predefined criteria, such as a + minimum movement threshold over a specified bout duration. + + :example: + >>> MitraFreezingDetector(data_dir=r'D:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location', config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini") + + References + ---------- + .. [1] Sabnis et al., Visual detection of seizures in mice using supervised machine learning, `biorxiv`, doi: https://doi.org/10.1101/2024.05.29.596520. + + """ + + def __init__(self, + data_dir: Union[str, os.PathLike], + config_path: Union[str, os.PathLike], + nose_name: Optional[str] = 'nose', + left_ear_name: Optional[str] = 'Left_ear', + right_ear_name: Optional[str] = 'right_ear', + tail_base_name: Optional[str] = 'tail_base', + time_window: Optional[int] = 3, + movement_threshold: Optional[int] = 5, + save_dir: Optional[Union[str, os.PathLike]] = None): + + check_if_dir_exists(in_dir=data_dir) + for bp_name in [nose_name, left_ear_name, right_ear_name, tail_base_name]: check_str(name='body part name', value=bp_name, allow_blank=False) + self.data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv']) + ConfigReader.__init__(self, config_path=config_path, read_video_info=True, create_logger=False) + self.nose_heads = [f'{nose_name}_x'.lower(), f'{nose_name}_y'.lower()] + self.left_ear_heads = [f'{left_ear_name}_x'.lower(), f'{left_ear_name}_y'.lower()] + self.right_ear_heads = [f'{right_ear_name}_x'.lower(), f'{right_ear_name}_y'.lower()] + self.tail_base_heads = [f'{tail_base_name}_x'.lower(), f'{tail_base_name}_y'.lower()] + self.required_field = self.nose_heads + self.left_ear_heads + self.right_ear_heads + self.tail_base_heads + check_int(name='time_window', value=time_window, min_value=1) + check_int(name='movement_threshold', value=movement_threshold, min_value=1) + self.save_dir = save_dir + if self.save_dir is None: + self.save_dir = os.path.join(self.logs_path, f'freezing_data_time_{time_window}s_{self.datetime}') + os.makedirs(self.save_dir) + else: + check_if_dir_exists(in_dir=self.save_dir) + self.time_window, self.movement_threshold = time_window, movement_threshold + self.movement_threshold = movement_threshold + self.run() + + def run(self): + agg_results = pd.DataFrame(columns=['VIDEO', 'FREEZING FRAMES', 'FREEZING TIME (S)', 'FREEZING BOUT COUNTS', 'FREEZING PCT OF SESSION', 'VIDEO TOTAL FRAMES', 'VIDEO TOTAL TIME (S)']) + agg_results_path = os.path.join(self.save_dir, 'aggregate_freezing_results.csv') + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.data_paths) + for file_cnt, file_path in enumerate(self.data_paths): + video_name = get_fn_ext(filepath=file_path)[1] + print(f'Analyzing {video_name}...') + save_file_path = os.path.join(self.save_dir, f'{video_name}.csv') + df = read_df(file_path=file_path, file_type='csv').reset_index(drop=True) + _, px_per_mm, fps = read_video_info(vid_info_df=self.video_info_df, video_name=video_name) + df.columns = [str(x).lower() for x in df.columns] + check_valid_dataframe(df=df, valid_dtypes=Formats.NUMERIC_DTYPES.value, required_fields=self.required_field) + nose_shifted = FeatureExtractionMixin.create_shifted_df(df[self.nose_heads]) + nose_1, nose_2 = nose_shifted.iloc[:, 0:2].values, nose_shifted.iloc[:, 2:4].values + nose_movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=nose_1[:, 0].flatten(), bp_2_x=nose_2[:, 0].flatten(), bp_1_y=nose_1[:, 1].flatten(), bp_2_y=nose_2[:, 1].flatten(), px_per_mm=px_per_mm) + tail_base_shifted = FeatureExtractionMixin.create_shifted_df(df[self.tail_base_heads]) + tail_base_shifted_1, tail_base_shifted_2 = tail_base_shifted.iloc[:, 0:2].values, tail_base_shifted.iloc[:, 2:4].values + tail_base_movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=tail_base_shifted_1[:, 0].flatten(), bp_2_x=tail_base_shifted_2[:, 0].flatten(), bp_1_y=tail_base_shifted_1[:, 1].flatten(), bp_2_y=tail_base_shifted_2[:, 1].flatten(), px_per_mm=px_per_mm) + left_ear_arr = df[self.left_ear_heads].values.astype(np.int64) + right_ear_arr = df[self.right_ear_heads].values.astype(np.int64) + nape_arr = pd.DataFrame(FeatureExtractionMixin.find_midpoints(bp_1=left_ear_arr, bp_2=right_ear_arr, percentile=np.float64(0.5)), columns=[NAPE_X, NAPE_Y]) + nape_shifted = FeatureExtractionMixin.create_shifted_df(nape_arr[[NAPE_X, NAPE_Y]]) + nape_shifted_1, nape_shifted_2 = nape_shifted.iloc[:, 0:2].values, nape_shifted.iloc[:, 2:4].values + nape_movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=nape_shifted_1[:, 0].flatten(), bp_2_x=nape_shifted_2[:, 0].flatten(), bp_1_y=nape_shifted_1[:, 1].flatten(), bp_2_y=nape_shifted_2[:, 1].flatten(), px_per_mm=px_per_mm) + movement = np.hstack([nose_movement.reshape(-1, 1), nape_movement.reshape(-1, 1), tail_base_movement.reshape(-1, 1)]) + mean_movement = np.mean(movement, axis=1) + mm_s = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=mean_movement.astype(np.float32), window_sizes=np.array([1], dtype=np.float64), sample_rate=int(fps), statistics=typed.List(["sum"]))[0].flatten() + freezing_idx = np.argwhere(mm_s <= self.movement_threshold).astype(np.int32).flatten() + df[FREEZING] = 0 + df.loc[freezing_idx, FREEZING] = 1 + df = plug_holes_shortest_bout(data_df=df, clf_name=FREEZING, fps=fps, shortest_bout=100) + bouts = detect_bouts(data_df=df, target_lst=[FREEZING], fps=fps) + bouts = bouts[bouts['Bout_time'] >= self.time_window] + if len(bouts) > 0: + freezing_idx = list(bouts.apply(lambda x: list(range(int(x["Start_frame"]), int(x["End_frame"]) + 1)), 1)) + freezing_idx = [x for xs in freezing_idx for x in xs] + df.loc[freezing_idx, FREEZING] = 1 + else: + freezing_idx = [] + + df.to_csv(save_file_path) + agg_results.loc[len(agg_results)] = [video_name, len(freezing_idx), round(len(freezing_idx) / fps, 4), len(bouts), round((len(freezing_idx) / len(df)) * 100, 4), len(df), round(len(df)/fps, 2) ] + agg_results.to_csv(agg_results_path) + stdout_success(msg=f'Results saved in {self.save_dir} directory.') + + + + + + + + # + # freezing_idx = np.argwhere(movement_mean <= movement_threshold) + # + + # freezing_df = video_df[video_df['Movement_2s'] <= 66] + + +# +# +# +# +# print(file_cnt) +# video_name = get_fn_ext(filepath=file_path)[1].lower() +# if '_cno_' in video_name: +# drug = 'cno' +# elif '_saline_' in video_name: +# drug = 'saline' +# else: +# drug = 'dzo' +# if '_gi_' in video_name: +# group = 'gi' +# elif '_gq_' in video_name: +# group = 'gq' +# else: +# group = 'dzo' +# df = read_df(file_path=file_path, file_type='csv') +# nose_shifted = FeatureExtractionMixin.create_shifted_df(df[['Nose_x', 'Nose_y']]) +# nose_1 = nose_shifted.iloc[:, 0:2].values +# nose_2 = nose_shifted.iloc[:, 2:4].values +# nose_movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=nose_1[:, 0].flatten(), +# bp_2_x=nose_2[:, 0].flatten(), +# bp_1_y=nose_1[:, 1].flatten(), +# bp_2_y=nose_2[:, 1].flatten(), px_per_mm=2.15) +# +# +# tail_base_shifted = FeatureExtractionMixin.create_shifted_df(df[['Tail_base_x', 'Tail_base_y']]) +# tail_base_shifted_1 = tail_base_shifted.iloc[:, 0:2].values +# tail_base_shifted_2 = tail_base_shifted.iloc[:, 2:4].values +# tail_base_movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=tail_base_shifted_1[:, 0].flatten(), +# bp_2_x=tail_base_shifted_2[:, 0].flatten(), +# bp_1_y=tail_base_shifted_1[:, 1].flatten(), +# bp_2_y=tail_base_shifted_2[:, 1].flatten(), px_per_mm=2.15) +# +# +# left_ear_arr = df[['Left_ear_x', 'Left_ear_y']].values.astype(np.int64) +# right_ear_arr = df[['Right_ear_x', 'Right_ear_y']].values.astype(np.int64) +# nape_arr = pd.DataFrame(FeatureExtractionMixin.find_midpoints(bp_1=left_ear_arr, bp_2=right_ear_arr, percentile=np.float64(0.5)), columns=['Nape_x', 'Nape_y']) +# nape_shifted = FeatureExtractionMixin.create_shifted_df(nape_arr[['Nape_x', 'Nape_y']]) +# +# nape_shifted_1 = nape_shifted.iloc[:, 0:2].values +# nape_shifted_2 = nape_shifted.iloc[:, 2:4].values +# nape_movement = FeatureExtractionMixin.euclidean_distance(bp_1_x=nape_shifted_1[:, 0].flatten(), +# bp_2_x=nape_shifted_2[:, 0].flatten(), +# bp_1_y=nape_shifted_1[:, 1].flatten(), +# bp_2_y=nape_shifted_2[:, 1].flatten(), px_per_mm=2.15) +# +# movement = np.hstack([nose_movement.reshape(-1, 1), nape_movement.reshape(-1, 1), tail_base_movement.reshape(-1, 1)]) +# mean_movement = np.mean(movement, axis=1) +# +# movement_mean = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=nose_movement.astype(np.float32), window_sizes=np.array([2, 3, 4, 6], dtype=np.float64), sample_rate=30, statistics=typed.List(["sum"]))[0] +# movement_df = pd.DataFrame(movement_mean, columns=['Movement_2s', 'Movement_3s', 'Movement_4s', 'Movement_6s']) +# out = pd.concat([movement_df], axis=1) +# out['video'] = get_fn_ext(filepath=file_path)[1] +# out['drug'] = drug +# out['group'] = group +# out['condition'] = f'{group}_{drug}'.upper() +# results.append(out) +# +# out = pd.concat(results, axis=0) +# final_results = pd.DataFrame(columns=['DRUG', 'GROUP', 'CONDITION', '% SESSION']) +# for video_cnt, video in enumerate(out['video'].unique()): +# print(video_cnt) +# video_df = out[out['video'] == video].reset_index(drop=True) +# drug, group, condition = video_df['drug'].iloc[0], video_df['group'].iloc[0], video_df['condition'].iloc[0] +# if condition == 'GQ_CNO' or condition == 'GQ_SALINE': +# freezing_df = video_df[video_df['Movement_2s'] <= 66] +# if len(freezing_df) == 0: +# time = 0 +# else: +# time = (len(freezing_df) / len(video_df)) +# final_results.loc[len(final_results)] = [drug, group, condition, time] +# +# +# +# cno = final_results[final_results['CONDITION'] == 'GQ_CNO']['% SESSION'].values +# saline = final_results[final_results['CONDITION'] == 'GQ_SALINE']['% SESSION'].values +# +# ttest_ind(cno, saline, equal_var=True) +# +# +# plot = sns.stripplot(data=final_results, x='CONDITION', y='% SESSION', linewidth=2) +# plot = sns.barplot(data=final_results, x='CONDITION', y='% SESSION') + +MitraFreezingDetector(data_dir=r'D:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location', + config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini") \ No newline at end of file diff --git a/simba/sandbox/mitra_frequency_grapher.py b/simba/sandbox/mitra_frequency_grapher.py new file mode 100644 index 000000000..f86a6ba76 --- /dev/null +++ b/simba/sandbox/mitra_frequency_grapher.py @@ -0,0 +1,97 @@ +import os +from typing import Union, Optional +import matplotlib +import numpy as np +matplotlib.use('Agg') # For non-GUI environments +import matplotlib.pyplot as plt +import seaborn as sns +import pandas as pd +from simba.utils.checks import check_if_dir_exists, check_all_file_names_are_represented_in_video_log, check_float +from simba.utils.read_write import find_files_of_filetypes_in_directory, read_df, read_video_info_csv, get_fn_ext, read_video_info +from simba.utils.data import detect_bouts + +GROUP_COLORS = {'CNO': 'red', 'SALINE': 'blue'} + +def frequency_grapher(data_dir: Union[str, os.PathLike], + video_info_path: Union[str, os.PathLike], + start_times_path: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike], + min_bout: float, + clf: str, + bin_size: Optional[int] = 55) -> None: + + """ + :param Union[str, os.PathLike] data_dir: Path to directory holding machine learning results. + :param Union[str, os.PathLike] video_info_path: Path to CSV holding video sample rate (fps). + :param Union[str, os.PathLike] start_times_path: Path to CSV holding the CNO onset times. + :param Union[str, os.PathLike] save_path: Where to save th image. + :param float min_bout: The minimum bout to plot in seconds. + :param str clf: The name of the classifier. + :param Optional[int] bin_size: The size of each plotted bar in frames. + + """ + + plt.close('all') + data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv']) + video_info_df = read_video_info_csv(file_path=video_info_path) + check_all_file_names_are_represented_in_video_log(video_info_df=video_info_df, data_paths=data_paths) + start_times = pd.read_csv(start_times_path, index_col=0) + check_float(name='min_bout', value=min_bout, min_value=10e-7) + check_if_dir_exists(in_dir=os.path.dirname(save_dir)) + df_save_path = os.path.join(save_dir, f'{clf}.csv') + img_save_path = os.path.join(save_dir, f'{clf}.png') + results, fps_dict = [], {} + for file_cnt, file_path in enumerate(data_paths): + video_name = get_fn_ext(filepath=file_path)[1] + print(f'Analyzing {video_name}...') + group = 'SALINE' + if 'CNO' in video_name: + group = 'CNO' + df = read_df(file_path=file_path, file_type='csv', usecols=[clf]) + _, _, fps = read_video_info(video_name=video_name, video_info_df=video_info_df) + fps_dict[video_name] = fps + start_frm_number = start_times[start_times['VIDEO'] == video_name]['CNO onset (frame)'].values[0] + start_frm = max(0, start_frm_number - int(fps * 120)) + end_frm = start_frm_number + int((fps * 60) * 10) + df = df.loc[start_frm:end_frm, :].reset_index(drop=True) + bouts = detect_bouts(data_df=df, target_lst=[clf], fps=fps) + bouts = bouts[bouts['Bout_time'] >= min_bout] + bouts = list(bouts['Start_frame']) + video_results = pd.DataFrame() + video_results['start_frame'] = bouts + video_results['start_time'] = video_results['start_frame'] / fps + video_results['start_time'] = video_results['start_time'] - 120 + video_results['duration'] = bin_size + video_results['group'] = group + video_results['event'] = clf + video_results['length'] = len(df) / fps + video_results['group'] = group + video_results['video_name'] = video_name + results.append(video_results) + + results = pd.concat(results, axis=0).sort_values(by=['start_time']).reset_index(drop=True) + results['length'] = round(results['length'], 2) + results['start_time'] = round(results['start_time'], 2) + sns.set_style("white") + tick_positions = np.arange(-120, results['length'].max(), 60) + tick_labels = [str(int(i)) for i in tick_positions] + plt.xticks(ticks=tick_positions, labels=tick_labels) + plt.xlim(-120, 601) + + for idx, row in results.iterrows(): + plt.barh(y=row['event'], width=row['duration'], left=row['start_time'] + 0.1 * idx, color=GROUP_COLORS[row['group']], edgecolor=None, height=0.8, alpha=0.4) + + plt.xlabel('time (s)') + plt.legend(handles=[plt.Rectangle((0, 0), 1, 1, color=GROUP_COLORS['CNO']), plt.Rectangle((0, 0), 1, 1, color=GROUP_COLORS['SALINE'])], labels=['CNO', 'SALINE'], title='Groups') + plt.savefig(img_save_path, format='png', dpi=1200, bbox_inches='tight') + results.to_csv(df_save_path) + + + +frequency_grapher(data_dir=r"D:\troubleshooting\mitra\project_folder\logs\straub_tail_data", + clf='straub_tail', + video_info_path=r"D:\troubleshooting\mitra\project_folder\logs\video_info.csv", + start_times_path=r"D:\troubleshooting\mitra\Start_annotations_Simon_Hallie.csv", + min_bout=2.5, + bin_size=2, + save_dir=r'C:\Users\sroni\OneDrive\Desktop\mitra') \ No newline at end of file diff --git a/simba/sandbox/mitra_frequency_grapher.zip b/simba/sandbox/mitra_frequency_grapher.zip new file mode 100644 index 0000000000000000000000000000000000000000..9faca64320759251c1fa1e6a8c8534c5e2a01801 GIT binary patch literal 1838 zcmbW2X*3&%7RO^>TB?N6P)m#@rj|i77(>#rR5Pv6Ml012vDOkoTCGwPV=Ed%#J)r+ zq6}$8>^dpN-Xhl8*HTnPEcI&6d*{8+@Bcq{JNMjkKmL#iK7JrT_?sc&J`R9?t6mZS z00EQ$zFt8Dtdl#zEr96ehkM}ULBRTZx)HSfAE0iC0fZFV^gT`joTcwU3_bcvDJ-~c?uBdxCvhKjHH2eL$oN8@+2Wn+HwR5g06Qg_c$~rYe zQIWV5?Djymq4%@%<&IO^Sx|E?;`Z+1`I);~iS`0Fdp8Co$(&$tH_5I(y_EnRu~Z%{ zyH@F{DIlp=LLavW2Fkq5PJv=BaA(VXE2o=P!3aC7WSDwIjqO!eJ2gs*iEYI+6dR)+ zLyozUK*{FRNV)4DkW=4$cn?zm;QBkS|93q;j}?Q8G>8_M1^U*-*~e zErOT5qGsSFH)97j(VVbB`YmF_?{{qS8AxD7buK|jjl{rU8z6goC({P_SY*VGV`cEBPW*Ed70S$GS{-^qoq=5-Wl21p zv0zDCstr?wk-*2Ueq79>g_`BP*pH!Kv>kXqf0n-WHW88Ck{wzk_-gTk1{(HNw+uI1 z;Gcq5uK5v8|6O)5zazexTSu#mL;4AJ8vQIsN)6nG|6t1ESK>J9mWt1j&e_WovVy$e zpn%L;XBC9j^@{aKc(1fNNxvus5bI>^F{kdpwsyiT&+a3<+w|cX33db**&7xaY4`RF zPBFQY9&K7iJQ7m03-{jyn%Rbu56u9b^3p0|2A956W#h}Xyf%uD^x4t)!due(5RHux zGP3`r47&JsSh$ch2r2oq*Z_qSrc9xOr6znteP#>VMY`rh7L4Zmv!X367Q6GmbXRE# z)1hHYb7qxIxp{f1^AVSxtJTTd?Nk&Gix-+`Md|yXK}YJZQct_|^#Jz^auzMU zV}H%fk5pPslrI$o8AvAoh*wU`Y^{-BdxP^Vd}5X8`($0&dnA_(DL&s09Gyo4(W=eu zN@out!ywrK^46$I#nq{mQCzlGek&(oYllU^mB>7&Wnds-UT-pObQnjgL8IO`jx!7;v)_MR?G`)NlaCtP(lFcQ62AO>OBH|3*bN=Jm!R72Z;kynu8(uHl-?WX z{=#e*D-W8x{hq0}o;i+L6ue}BN z(xFiuDn*4kNqe0(a+=U|a+RoAbZ$%x&ah~^%A&O@oyIgaG^L>aI4z^6SQE{g;G122 z-=D5&2wkoEE-K(BW?4RW!y?2x&L+JNRqktOltUzhzeb6GdmauPf%Rs|617uaiuk0z z?d6S_dBz{94rg@MUo^?Lr3uD1$5vGRgaCYBTPXN+?+W-)yZ@L4j?J)O;>;DouiFGf zr*s2FqVDyIEq{t|A2btkn!3}(5Ov;thtY6P&CkLY;5XWxWkOQeou04W0@)huJBIQ1 zXnC;g^WxZTKBG`0(5}hCtwl_*)vA>aDxnc1W*clUam76IFFPGI+Gd@-?6a*4toWcf zoh03h_0+tcc#grIm~`5X%`-H4l>M~KJG*w5F35l^4gugADEYq*xjW(Cv zG5i2qOHw6Jm0t!`-{}chs77ux6ravOU`MDYZo%e>XMKhz5enjd;R)s~#;_1P@8>ss%0>+6f=j+OzLooc_h{L2WYqWc#RLcJh}{I-82$y@rgP0u zB!{bo8QGm=DGPS4$2deWAQ?sQxC9kz1iT)ZkzXHi%a-bM=!z!*QP* zM_@E`{Q~14qv?Ra?Can--Q_rpqTM;wKHAkGcaB&KMBT$PNQ98EyLvI{vLFC(LM0@E ohZo5AUt<05?u1zX(W{7)?*Hc(5+U&IBLKkj&7R+C)j!j}0MH;=8~^|S literal 0 HcmV?d00001 diff --git a/simba/sandbox/mitra_laying_down_analyzer.py b/simba/sandbox/mitra_laying_down_analyzer.py new file mode 100644 index 000000000..08b9cc605 --- /dev/null +++ b/simba/sandbox/mitra_laying_down_analyzer.py @@ -0,0 +1,212 @@ +import os +from typing import Union, Optional, Iterable, List + +import pandas as pd +from numba import typed + +try: + from typing import Literal +except: + from typing_extensions import Literal +import numpy as np + +from copy import deepcopy +from simba.mixins.config_reader import ConfigReader +from simba.utils.read_write import find_files_of_filetypes_in_directory, find_video_of_file, get_fn_ext, read_df, \ + read_video_info, read_frm_of_video, find_core_cnt, write_df +from simba.utils.checks import check_all_file_names_are_represented_in_video_log, check_valid_dataframe, check_int, \ + check_float +from simba.utils.enums import Formats, Defaults +from simba.video_processors.video_processing import video_bg_subtraction_mp +from simba.mixins.geometry_mixin import GeometryMixin +from simba.mixins.image_mixin import ImageMixin +from simba.plotting.geometry_plotter import GeometryPlotter +from simba.utils.checks import check_valid_array +import multiprocessing +import functools +from simba.utils.printing import SimbaTimer +from simba.mixins.timeseries_features_mixin import TimeseriesFeatureMixin + +NOSE = 'nose' +LEFT_SIDE = 'left_side' +RIGHT_SIDE = 'right_side' +LEFT_EAR = 'left_ear' +RIGHT_EAR = 'right_ear' +CENTER = 'center' +TAIL_BASE = 'tail_base' +TAIL_CENTER = 'tail_center' +TAIL_TIP = 'tail_tip' + + +class MitraLayingDownAnalyzer(ConfigReader): + + def __init__(self, + config_path: Union[str, os.PathLike], + anchor_points: Iterable[str], + body_parts: Iterable[str], + save_dir: Union[str, os.PathLike], + data_dir: Optional[Union[str, os.PathLike]] = None, + video_dir: Optional[Union[str, os.PathLike]] = None): + + ConfigReader.__init__(self, config_path=config_path, read_video_info=True, create_logger=False) + if data_dir is None: + self.data_paths = find_files_of_filetypes_in_directory(directory=self.outlier_corrected_dir, + extensions=['.csv']) + else: + self.data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv']) + if video_dir is not None: + self.video_dir = video_dir + self.paths = {} + for data_path in self.data_paths: + video = find_video_of_file(video_dir=self.video_dir, filename=get_fn_ext(filepath=data_path)[1]) + self.paths[data_path] = video + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.data_paths) + self.tail_cols, self.bp_cols = [], [] + for bp in anchor_points: + self.tail_cols.append(f'{bp}_x'.lower()) + self.tail_cols.append(f'{bp}_y'.lower()) + for bp in body_parts: + self.bp_cols.append(f'{bp}_x'.lower()) + self.bp_cols.append(f'{bp}_y'.lower()) + self.required_cols = self.tail_cols + self.bp_cols + self.save_dir = save_dir + + def run(self): + for file_cnt, (file_path, video_path) in enumerate(self.paths.items()): + video_timer = SimbaTimer(start=True) + _, video_name, _ = get_fn_ext(filepath=file_path) + _, px_per_mm, fps = read_video_info(vid_info_df=self.video_info_df, video_name=video_name) + print(f'Analyzing {video_name} ({file_cnt + 1}/{len(self.data_paths)})...') + save_path = os.path.join(self.save_dir, f'{video_name}.csv') + print(video_path, save_path) + if not os.path.isfile(save_path) and (video_path is not None) and os.path.isfile(video_path): + df = read_df(file_path=file_path, file_type=self.file_type) + out_df = deepcopy(df) + df.columns = [str(x).lower() for x in df.columns] + check_valid_dataframe(df=df, valid_dtypes=Formats.NUMERIC_DTYPES.value, required_fields=self.required_cols) + + tail_geometry_df = df[self.tail_cols].values.reshape(len(df), int(len(self.tail_cols) / 2), 2).astype(np.int64) + tail_geometries = GeometryMixin().bodyparts_to_polygon(data=tail_geometry_df, parallel_offset=35, pixels_per_mm=px_per_mm) + + hull_geometry_df = df[self.bp_cols].values.reshape(len(df), int(len(self.bp_cols) / 2), 2).astype(np.int64) + hull_geometries = GeometryMixin().bodyparts_to_polygon(data=hull_geometry_df, parallel_offset=40, pixels_per_mm=px_per_mm) + animal_geometries = GeometryMixin().multiframe_union(shapes=np.array([tail_geometries, hull_geometries]).T) + + out_df['animal_area'] = GeometryMixin().multiframe_area(shapes=animal_geometries, pixels_per_mm=px_per_mm) + animal_area_std = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=out_df['animal_area'].values.astype(np.float32), window_sizes=np.array([0.5, 1.0, 2.0]), sample_rate=fps, statistics=typed.List(['std'])) + out_df = pd.concat([out_df, pd.DataFrame(animal_area_std[0], columns=['animal_area_std_05', 'animal_area_std_1', 'animal_area_std_2'])], axis=1) + + animal_area_mean = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=out_df['animal_area'].values.astype(np.float32), window_sizes=np.array([0.5, 1.0, 2.0]), sample_rate=fps, statistics=typed.List(['mean'])) + out_df = pd.concat([out_df, pd.DataFrame(animal_area_mean[0], columns=['animal_area_mean_05', 'animal_area_mean_1', 'animal_area_mean_2'])], axis=1) + + animal_area_mad = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=out_df['animal_area'].values.astype(np.float32), window_sizes=np.array([0.5, 1.0, 2.0]), sample_rate=fps, statistics=typed.List(['mad'])) + out_df = pd.concat([out_df, pd.DataFrame(animal_area_mad[0], columns=['animal_area_mad_05', 'animal_area_mad_1', 'animal_area_mad_2'])], axis=1) + + out_df['animal_hausdorf_05'] = GeometryMixin().multiframe_hausdorff_distance(geometries=animal_geometries, lag=0.5, sample_rate=fps) + out_df['animal_hausdorf_1'] = GeometryMixin().multiframe_hausdorff_distance(geometries=animal_geometries, lag=1, sample_rate=fps) + out_df['animal_hausdorf_2'] = GeometryMixin().multiframe_hausdorff_distance(geometries=animal_geometries, lag=2, sample_rate=fps) + + out_df['hull_hausdorf_05'] = GeometryMixin().multiframe_hausdorff_distance(geometries=hull_geometries, lag=0.5, sample_rate=fps) + out_df['hull_hausdorf_1'] = GeometryMixin().multiframe_hausdorff_distance(geometries=hull_geometries, lag=1, sample_rate=fps) + out_df['hull_hausdorf_2'] = GeometryMixin().multiframe_hausdorff_distance(geometries=hull_geometries, lag=2, sample_rate=fps) + + animal_lower_body_arr = df[[f'{LEFT_SIDE}_x', f'{LEFT_SIDE}_y', f'{RIGHT_SIDE}_x', f'{RIGHT_SIDE}_y', f'{TAIL_BASE}_x', f'{TAIL_BASE}_y']].values.astype(np.float32).reshape(len(df), 3, 2) + lower_body_geometry = GeometryMixin().bodyparts_to_polygon(data=animal_lower_body_arr, parallel_offset=40, pixels_per_mm=px_per_mm) + out_df['lower_body_area'] = GeometryMixin().multiframe_area(shapes=lower_body_geometry, pixels_per_mm=px_per_mm) + + lower_body_area_std = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=out_df['lower_body_area'].values.astype(np.float32), window_sizes=np.array([0.5, 1.0, 2.0]), sample_rate=fps, statistics=typed.List(['std'])) + out_df = pd.concat([out_df, pd.DataFrame(lower_body_area_std[0], columns=['lower_body_area_std_05', 'lower_body_area_std_1', 'lower_body_area_std_2'])], axis=1) + + out_df['lower_body_hausdorf_05'] = GeometryMixin().multiframe_hausdorff_distance(geometries=lower_body_geometry, lag=0.5, sample_rate=fps) + out_df['lower_body_hausdorf_1'] = GeometryMixin().multiframe_hausdorff_distance(geometries=lower_body_geometry, lag=1, sample_rate=fps) + out_df['lower_body_hausdorf_2'] = GeometryMixin().multiframe_hausdorff_distance(geometries=lower_body_geometry, lag=2, sample_rate=fps) + + if video_path is not None: + hull_imgs = ImageMixin().slice_shapes_in_imgs(imgs=video_path, shapes=hull_geometries, bg_color=(255, 255, 255), core_cnt=8, verbose=True) + hull_imgs = ImageMixin.pad_img_stack(image_dict=hull_imgs) + hull_imgs = np.stack(list(hull_imgs.values())) + out_df['hull_mse_05'] = ImageMixin.img_sliding_mse(imgs=hull_imgs, slide_length=0.5, sample_rate=float(fps)) + out_df['hull_mse_1'] = ImageMixin.img_sliding_mse(imgs=hull_imgs, slide_length=1.0, sample_rate=float(fps)) + out_df['hull_mse_2'] = ImageMixin.img_sliding_mse(imgs=hull_imgs, slide_length=2.0, sample_rate=float(fps)) + video_timer.stop_timer() + write_df(df=out_df, file_type='csv', save_path=save_path) + print(video_timer.elapsed_time_str) + + + # tail_area_std = TimeseriesFeatureMixin.sliding_descriptive_statistics( + # data=out_df['tail_area'].values.astype(np.float32), window_sizes=np.array([0.5, 1.0, 2.0]), + # sample_rate=fps, statistics=typed.List(['std'])) + # out_df = pd.concat([out_df, pd.DataFrame(tail_area_std[0], + # columns=['tail_area_std_05', 'tail_area_std_1', + # 'tail_area_std_2'])], axis=1) + # + # tail_area_mean = TimeseriesFeatureMixin.sliding_descriptive_statistics( + # data=out_df['tail_area'].values.astype(np.float32), window_sizes=np.array([0.5, 1.0, 2.0]), + # sample_rate=fps, statistics=typed.List(['mean'])) + # out_df = pd.concat([out_df, pd.DataFrame(tail_area_mean[0], + # columns=['tail_area_mean_05', 'tail_area_mean_1', + # 'tail_area_mean_2'])], axis=1) + # + # tail_area_mad = TimeseriesFeatureMixin.sliding_descriptive_statistics( + # data=out_df['tail_area'].values.astype(np.float32), window_sizes=np.array([0.5, 1.0, 2.0]), + # sample_rate=fps, statistics=typed.List(['mad'])) + # out_df = pd.concat([out_df, pd.DataFrame(tail_area_mad[0], + # columns=['tail_area_mad_05', 'tail_area_mad_1', + # 'tail_area_mad_2'])], axis=1) + # + # out_df['tail_hausdorf_05'] = GeometryMixin().multiframe_hausdorff_distance(geometries=tail_geometries, + # lag=0.5, sample_rate=fps) + # out_df['tail_hausdorf_1'] = GeometryMixin().multiframe_hausdorff_distance(geometries=tail_geometries, + # lag=1, sample_rate=fps) + # out_df['tail_hausdorf_2'] = GeometryMixin().multiframe_hausdorff_distance(geometries=tail_geometries, + # lag=2, sample_rate=fps) + # + # out_df['hull_hausdorf_05'] = GeometryMixin().multiframe_hausdorff_distance(geometries=hull_geometries, + # lag=0.5, sample_rate=fps) + # out_df['hull_hausdorf_1'] = GeometryMixin().multiframe_hausdorff_distance(geometries=hull_geometries, + # lag=1, sample_rate=fps) + # out_df['hull_hausdorf_2'] = GeometryMixin().multiframe_hausdorff_distance(geometries=hull_geometries, + # lag=2, sample_rate=fps) + # + # if video_path is not None: + # tail_imgs = ImageMixin().slice_shapes_in_imgs(imgs=video_path, shapes=tail_geometries, + # bg_color=(255, 255, 255), core_cnt=8, verbose=True) + # tail_imgs = ImageMixin.pad_img_stack(image_dict=tail_imgs) + # tail_imgs = np.stack(list(tail_imgs.values())) + # out_df['tail_mse_05'] = ImageMixin.img_sliding_mse(imgs=tail_imgs, slide_length=0.5, + # sample_rate=float(fps)) + # out_df['tail_mse_1'] = ImageMixin.img_sliding_mse(imgs=tail_imgs, slide_length=1.0, + # sample_rate=float(fps)) + # out_df['tail_mse_2'] = ImageMixin.img_sliding_mse(imgs=tail_imgs, slide_length=2.0, + # sample_rate=float(fps)) + # video_timer.stop_timer() + # write_df(df=out_df, file_type='csv', save_path=save_path) + # print(video_timer.elapsed_time_str) + + + +runner = MitraLayingDownAnalyzer(config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini", + data_dir=r'D:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated', + video_dir=r'D:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated', + save_dir=r"D:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated\laying_down_features", + anchor_points=('tail_base', 'tail_center', 'tail_tip'), + body_parts=('nose', 'left_ear', 'right_ear', 'right_side', 'left_side', 'tail_base')) +runner.run() + +# runner = MitraLayingDownAnalyzer(config_path=r"C:\troubleshooting\mitra\project_folder\project_config.ini", +# data_dir=r'C:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated', +# video_dir=r'C:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated', +# save_dir=r"C:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated\laying_down_features", +# anchor_points=('tail_base', 'tail_center', 'tail_tip'), +# body_parts=('nose', 'left_ear', 'right_ear', 'right_side', 'left_side', 'tail_base')) +# runner.run() + + + +# runner = MitraTailAnalyzer(config_path=r"D:\troubleshooting\mitra\project_folder\project_config.ini", +# data_dir=r'D:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated', +# video_dir=r'D:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated', +# save_dir=r'D:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated\tail_features', +# anchor_points=('tail_base', 'tail_center', 'tail_tip'), +# body_parts=('nose', 'left_ear', 'right_ear', 'right_side', 'left_side', 'tail_base')) +# runner.run() \ No newline at end of file diff --git a/simba/sandbox/mitra_style_annotation_appender.py b/simba/sandbox/mitra_style_annotation_appender.py new file mode 100644 index 000000000..d3c2b25b3 --- /dev/null +++ b/simba/sandbox/mitra_style_annotation_appender.py @@ -0,0 +1,75 @@ +import os +from typing import Union +import pandas as pd +from copy import deepcopy +from simba.mixins.config_reader import ConfigReader +from simba.utils.checks import check_file_exist_and_readable, check_if_dir_exists +from simba.utils.read_write import read_df, write_df +from simba.utils.printing import SimbaTimer + +class MitraStyleAnnotationAppender(ConfigReader): + + def __init__(self, + config_path: Union[str, os.PathLike], + data_path: Union[str, os.PathLike], + features_dir: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike]): + + ConfigReader.__init__(self, config_path=config_path) + check_file_exist_and_readable(file_path=data_path) + check_if_dir_exists(in_dir=features_dir) + check_if_dir_exists(in_dir=save_dir) + self.data_path = data_path + self.save_dir, self.features_dir = save_dir, features_dir + + def run(self): + total_cnt = {} + df_dict = pd.read_excel(self.data_path, sheet_name=None) + for file_name, file_df in df_dict.items(): + video_timer = SimbaTimer(start=True) + data_path = os.path.join(self.features_dir, file_name + '.csv') + if os.path.isfile(data_path): + data_df = read_df(file_path=data_path, file_type='csv') + out_df = deepcopy(data_df) + save_path = os.path.join(self.save_dir, file_name + '.csv') + df = pd.DataFrame(file_df.values[1:, :3], columns=['BEHAVIOR', 'START', 'STOP']) + df['BEHAVIOR'] = df['BEHAVIOR'].str.lower() + for clf in self.clf_names: + if clf not in total_cnt.keys(): + total_cnt[clf] = 0 + clf_df = df[df['BEHAVIOR'] == clf].sort_values(['START']) + out_df[clf] = 0 + if len(clf_df) > 0: + annot_idx = list(clf_df.apply(lambda x: list(range(int(x["START"]), int(x["STOP"]) + 1)), 1)) + annot_idx = [x for xs in annot_idx for x in xs] + if len(annot_idx) > 0: + out_df.loc[annot_idx, clf] = 1 + total_cnt[clf] += out_df[clf].sum() + #write_df(df=out_df, file_type=self.file_type, save_path=save_path) + video_timer.stop_timer() + print(total_cnt) + print(f'{file_name} saved..') + + print(total_cnt) + + # + # + # + # + # + # + + # + +data_path = r"C:\troubleshooting\mitra\Start-Stop Annotations.xlsx" +features_dir = r"C:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated\laying_down_features\APPENDED" +save_dir = r"C:\troubleshooting\mitra\project_folder\videos\bg_removed\rotated\laying_down_features\APPENDED\targets_inserted" +config_path = r"C:\troubleshooting\mitra\project_folder\project_config.ini" + +x = MitraStyleAnnotationAppender(data_path=data_path, features_dir=features_dir, save_dir=save_dir, config_path=config_path) +x.run() + + + + + diff --git a/simba/sandbox/mitra_tail_analyzer.py b/simba/sandbox/mitra_tail_analyzer.py new file mode 100644 index 000000000..5c71f803c --- /dev/null +++ b/simba/sandbox/mitra_tail_analyzer.py @@ -0,0 +1,166 @@ +import os +from typing import Union, Optional, Iterable, List + +import pandas as pd +from numba import typed +try: + from typing import Literal +except: + from typing_extensions import Literal +import numpy as np + +from copy import deepcopy +from simba.mixins.config_reader import ConfigReader +from simba.utils.read_write import find_files_of_filetypes_in_directory, find_video_of_file, get_fn_ext, read_df, read_video_info, read_frm_of_video, find_core_cnt, write_df +from simba.utils.checks import check_all_file_names_are_represented_in_video_log, check_valid_dataframe, check_int, check_float +from simba.utils.enums import Formats, Defaults +from simba.video_processors.video_processing import video_bg_subtraction_mp +from simba.mixins.geometry_mixin import GeometryMixin +from simba.mixins.image_mixin import ImageMixin +from simba.plotting.geometry_plotter import GeometryPlotter +from simba.utils.checks import check_valid_array +import multiprocessing +import functools +from simba.utils.printing import SimbaTimer +from simba.mixins.timeseries_features_mixin import TimeseriesFeatureMixin + + +def _multiframe_img_histocomparison(data: List[int], + imgs: np.ndarray, + lag: int, + method: str): + + results = [] + for current_frm_idx in range(data.shape[0]): + + + + + frm_range = frm_index[frm_range_idx] + print(f"Analyzing frame {frm_range[1]}...") + +def multiframe_img_histocomparison(imgs: np.ndarray, + lag: Optional[Union[float, int]] = 1, + fps: Optional[Union[float, int]] = 1, + core_cnt: Optional[int] = -1, + method: Optional[Literal["simple", "none", "l2", "kcos"]] = "simple", + canny: Optional[bool] = True): + + check_valid_array(data=imgs, source=multiframe_img_histocomparison.__name__) + check_float(name=f'{multiframe_img_histocomparison.__name__} lag', value=lag, min_value=10e-16) + check_float(name=f'{multiframe_img_histocomparison.__name__} fps', value=fps, min_value=10e-16) + check_int(name=f'{multiframe_img_histocomparison.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0]) + if core_cnt <= 0: + core_cnt = find_core_cnt()[0] + + frm_idx = np.arange(0, imgs.shape[0]) + print(frm_idx) + results = [] + with multiprocessing.Pool(core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool: + constants = functools.partial(_multiframe_img_histocomparison, + data=imgs, + canny=canny, + lag=lag, + method=method) + for cnt, result in enumerate(pool.imap(constants, imgs, chunksize=1)): + results.append(result) + + return [item for sublist in results for item in sublist] + + +class MitraTailAnalyzer(ConfigReader): + + def __init__(self, + config_path: Union[str, os.PathLike], + anchor_points: Iterable[str], + body_parts: Iterable[str], + save_dir: Union[str, os.PathLike], + data_dir: Optional[Union[str, os.PathLike]] = None, + video_dir: Optional[Union[str, os.PathLike]] = None): + + ConfigReader.__init__(self, config_path=config_path, read_video_info=True, create_logger=False) + if data_dir is None: + self.data_paths = find_files_of_filetypes_in_directory(directory=self.outlier_corrected_dir, extensions=['.csv']) + else: + self.data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv']) + if video_dir is not None: + self.video_dir = video_dir + self.paths = {} + for data_path in self.data_paths: + video = find_video_of_file(video_dir=self.video_dir, filename=get_fn_ext(filepath=data_path)[1]) + self.paths[data_path] = video + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.data_paths) + self.tail_cols, self.bp_cols = [], [] + for bp in anchor_points: + self.tail_cols.append(f'{bp}_x'.lower()); self.tail_cols.append(f'{bp}_y'.lower()) + for bp in body_parts: + self.bp_cols.append(f'{bp}_x'.lower()); self.bp_cols.append(f'{bp}_y'.lower()) + self.required_cols = self.tail_cols + self.bp_cols + self.save_dir = save_dir + + def run(self): + for file_cnt, (file_path, video_path) in enumerate(self.paths.items()): + video_timer = SimbaTimer(start=True) + _, video_name, _ = get_fn_ext(filepath=file_path) + _, px_per_mm, fps = read_video_info(vid_info_df=self.video_info_df, video_name=video_name) + print(f'Analyzing {video_name} ({file_cnt+1}/{len(self.data_paths)})...') + save_path = os.path.join(self.save_dir, f'{video_name}.csv') + print(video_path, save_path) + if not os.path.isfile(save_path) and (video_path is not None) and os.path.isfile(video_path): + df = read_df(file_path=file_path, file_type=self.file_type) + out_df = deepcopy(df) + df.columns = [str(x).lower() for x in df.columns] + check_valid_dataframe(df=df, valid_dtypes=Formats.NUMERIC_DTYPES.value, required_fields=self.required_cols) + + tail_geometry_df = df[self.tail_cols].values.reshape(len(df), int(len(self.tail_cols) /2), 2).astype(np.int64) + tail_geometries = GeometryMixin().bodyparts_to_polygon(data=tail_geometry_df, parallel_offset=35, pixels_per_mm=px_per_mm) + + hull_geometry_df = df[self.bp_cols].values.reshape(len(df), int(len(self.bp_cols) / 2), 2).astype(np.int64) + hull_geometries = GeometryMixin().bodyparts_to_polygon(data=hull_geometry_df, parallel_offset=40, pixels_per_mm=px_per_mm) + + + + + + + + + + + out_df['tail_area'] = GeometryMixin().multiframe_area(shapes=tail_geometries, pixels_per_mm=px_per_mm) + + tail_area_std = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=out_df['tail_area'].values.astype(np.float32), window_sizes=np.array([0.5, 1.0, 2.0]), sample_rate=fps, statistics=typed.List(['std'])) + out_df = pd.concat([out_df, pd.DataFrame(tail_area_std[0], columns=['tail_area_std_05', 'tail_area_std_1', 'tail_area_std_2'])], axis=1) + + tail_area_mean = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=out_df['tail_area'].values.astype(np.float32), window_sizes=np.array([0.5, 1.0, 2.0]), sample_rate=fps, statistics=typed.List(['mean'])) + out_df = pd.concat([out_df, pd.DataFrame(tail_area_mean[0], columns=['tail_area_mean_05', 'tail_area_mean_1', 'tail_area_mean_2'])], axis=1) + + tail_area_mad = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=out_df['tail_area'].values.astype(np.float32), window_sizes=np.array([0.5, 1.0, 2.0]), sample_rate=fps, statistics=typed.List(['mad'])) + out_df = pd.concat([out_df, pd.DataFrame(tail_area_mad[0], columns=['tail_area_mad_05', 'tail_area_mad_1', 'tail_area_mad_2'])], axis=1) + + out_df['tail_hausdorf_05'] = GeometryMixin().multiframe_hausdorff_distance(geometries=tail_geometries, lag=0.5, sample_rate=fps) + out_df['tail_hausdorf_1'] = GeometryMixin().multiframe_hausdorff_distance(geometries=tail_geometries, lag=1, sample_rate=fps) + out_df['tail_hausdorf_2'] = GeometryMixin().multiframe_hausdorff_distance(geometries=tail_geometries, lag=2, sample_rate=fps) + + out_df['hull_hausdorf_05'] = GeometryMixin().multiframe_hausdorff_distance(geometries=hull_geometries, lag=0.5, sample_rate=fps) + out_df['hull_hausdorf_1'] = GeometryMixin().multiframe_hausdorff_distance(geometries=hull_geometries, lag=1, sample_rate=fps) + out_df['hull_hausdorf_2'] = GeometryMixin().multiframe_hausdorff_distance(geometries=hull_geometries, lag=2, sample_rate=fps) + + if video_path is not None: + tail_imgs = ImageMixin().slice_shapes_in_imgs(imgs=video_path, shapes=tail_geometries, bg_color=(255, 255, 255), core_cnt=8, verbose=True) + tail_imgs = ImageMixin.pad_img_stack(image_dict=tail_imgs) + tail_imgs = np.stack(list(tail_imgs.values())) + out_df['tail_mse_05'] = ImageMixin.img_sliding_mse(imgs=tail_imgs, slide_length=0.5, sample_rate=float(fps)) + out_df['tail_mse_1'] = ImageMixin.img_sliding_mse(imgs=tail_imgs, slide_length=1.0, sample_rate=float(fps)) + out_df['tail_mse_2'] = ImageMixin.img_sliding_mse(imgs=tail_imgs, slide_length=2.0, sample_rate=float(fps)) + video_timer.stop_timer() + write_df(df=out_df, file_type='csv', save_path=save_path) + print(video_timer.elapsed_time_str) + +runner = MitraTailAnalyzer(config_path=r"C:\troubleshooting\mitra\project_folder\project_config.ini", + data_dir=r'C:\troubleshooting\mitra\project_folder\videos\additional\bg_removed\rotated', + video_dir=r'C:\troubleshooting\mitra\project_folder\videos\additional\bg_removed\rotated', + save_dir=r'C:\troubleshooting\mitra\project_folder\videos\additional\bg_removed\rotated\tail_features_additional', + anchor_points=('tail_base', 'tail_center', 'tail_tip'), + body_parts=('nose', 'left_ear', 'right_ear', 'right_side', 'left_side', 'tail_base')) +runner.run() \ No newline at end of file diff --git a/simba/sandbox/mitra_timebins.py b/simba/sandbox/mitra_timebins.py new file mode 100644 index 000000000..4d9e4d6fc --- /dev/null +++ b/simba/sandbox/mitra_timebins.py @@ -0,0 +1,63 @@ +import os +from typing import Union +import pandas as pd +import numpy as np + +from simba.utils.checks import check_int, check_if_dir_exists, check_file_exist_and_readable, check_str, check_valid_dataframe, check_all_file_names_are_represented_in_video_log +from simba.utils.read_write import find_files_of_filetypes_in_directory, read_df, get_fn_ext, read_video_info_csv, read_video_info + + + +def mitra_timebins(data_dir: Union[str, os.PathLike], + frm_path: Union[str, os.PathLike], + clf_name: str, + video_video_path: Union[str, os.PathLike], + save_path: Union[str, os.PathLike], + window_size: int): + + check_file_exist_and_readable(file_path=video_video_path) + check_file_exist_and_readable(file_path=frm_path) + check_if_dir_exists(in_dir=data_dir) + check_int(name='window_size', value=window_size, min_value=1) + check_str(name='clf', value=clf_name) + data_paths = find_files_of_filetypes_in_directory(directory=data_dir, extensions=['.csv'], raise_error=True) + frm_df = pd.read_csv(frm_path) + video_info = read_video_info_csv(file_path=video_video_path) + check_all_file_names_are_represented_in_video_log(video_info_df=video_info, data_paths=data_paths) + + results = pd.DataFrame(columns=['VIDEO', 'TIME-BIN', f'BEHAVIOR {clf_name} (S)', f'BEHAVIOR {clf_name} (FRAMES)']) + + for file_cnt, file_path in enumerate(data_paths): + video_name = get_fn_ext(filepath=file_path)[1] + _, _, fps = read_video_info(vid_info_df=video_info, video_name=video_name) + two_min_frames = (fps * 60) * 2 + data_df = read_df(file_path=file_path, file_type='csv', usecols=[clf_name]).reset_index(drop=True) + print(video_name) + start_frm = frm_df[frm_df['VIDEO'] == video_name].iloc[0]['CNO onset (frame)'] + end_frm = np.ceil(((fps * 60) * 10)) + start_frm + frm_win_size = np.ceil(fps * window_size) + pre_bins_cnt = int(120 / window_size) + video_bins_pre = np.arange(start_frm, two_min_frames, -frm_win_size)[:pre_bins_cnt] + video_bins_pre = np.append(video_bins_pre, two_min_frames).astype(np.int32) + print(video_bins_pre) + video_bins_post = np.arange(start_frm, end_frm+frm_win_size, frm_win_size).astype(np.int32) + print(video_bins_post, end_frm, start_frm) + for epoch_idx in range(video_bins_pre.shape[0]-1): + stop, start = video_bins_pre[epoch_idx], video_bins_pre[epoch_idx+1] + epoch_arr = data_df.loc[start:stop][clf_name].values + epoch_s = np.sum(epoch_arr) / fps + results.loc[len(results)] = [video_name, -(epoch_idx)-1, round(epoch_s, 4), np.sum(epoch_arr)] + for epoch_idx in range(video_bins_post.shape[0] - 1): + start, stop = video_bins_post[epoch_idx], video_bins_post[epoch_idx + 1] + epoch_arr = data_df.loc[start:stop][clf_name].values + epoch_s = np.sum(epoch_arr) / fps + results.loc[len(results)] = [video_name, epoch_idx, round(epoch_s, 4), np.sum(epoch_arr)] + + results.sort_values(['VIDEO', 'TIME-BIN']).to_csv(save_path, index=False) + +frm_path = r"D:\troubleshooting\mitra\Start_annotations_Simon_Hallie.csv" +data_dir = r"D:\troubleshooting\mitra\project_folder\logs\rearing_data" +window_size = 120 +video_video_path = r"D:\troubleshooting\mitra\project_folder\logs\video_info.csv" +save_path = r"D:\troubleshooting\mitra\project_folder\logs\rearing_timebins_120s.csv" +mitra_timebins(data_dir=data_dir, frm_path=frm_path, window_size=window_size, video_video_path=video_video_path, clf_name='rearing', save_path=save_path) \ No newline at end of file diff --git a/simba/sandbox/momentum.py b/simba/sandbox/momentum.py new file mode 100644 index 000000000..98d7e243e --- /dev/null +++ b/simba/sandbox/momentum.py @@ -0,0 +1,46 @@ +import numpy as np +from simba.utils.checks import check_valid_array, check_float +from simba.utils.enums import Formats + +def momentum_magnitude(x: np.ndarray, mass: float, sample_rate: float) -> float: + """ + Compute the magnitude of momentum given 2D positional data and mass. + + :param np.ndarray x: 2D array of shape (n_samples, 2) representing positions. + :param float mass: Mass of the object. + :param float sample_rate: Sampling rate in FPS. + :returns: Magnitude of the momentum. + :rtype: float + """ + + check_valid_array(data=x, source=f'{momentum_magnitude.__name__} x', accepted_ndims=(2,), accepted_axis_1_shape=[2,], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_float(name=f'{momentum_magnitude.__name__} mass', value=mass, min_value=10e-6) + check_float(name=f'{momentum_magnitude.__name__} sample_rate', value=sample_rate, min_value=10e-6) + dx, dy = np.diff(x[:, 0].flatten()), np.diff(x[:, 1].flatten()) + speed = np.mean(np.sqrt(dx ** 2 + dy ** 2) / (1 / sample_rate)) + return mass * speed + + +def sliding_momentum_magnitude(x: np.ndarray, mass: np.ndarray, sample_rate: float, time_window: float) -> np.ndarray: + """ + Compute the sliding window momentum magnitude for 2D positional data. + + :param np.ndarray x: 2D array of shape (n_samples, 2) representing positions. + :param np.ndarray mass: Array of mass values for each frame. + :param float sample_rate: Sampling rate in FPS. + :param float time_window: Time window in seconds for sliding momentum calculation. + :returns: Momentum magnitudes computed for each frame, with results from frames that cannot form a complete window filled with -1.0. + :rtype: np.ndarray + + """ + time_window_frms = np.ceil(sample_rate * time_window) + results = np.full(shape=(x.shape[0]), fill_value=-1.0, dtype=np.float32) + delta_t = 1 / sample_rate + for r in range(time_window_frms, x.shape[0] + 1): + l = r - time_window_frms + keypoint_sample, mass_sample = x[l:r], mass[l:r] + mass_sample_mean = np.mean(mass_sample) + dx, dy = np.diff(keypoint_sample[:, 0].flatten()), np.diff(keypoint_sample[:, 1].flatten()) + speed = np.mean(np.sqrt(dx ** 2 + dy ** 2) / delta_t) + results[r - 1] = mass_sample_mean * speed + return results diff --git a/simba/sandbox/morans.py b/simba/sandbox/morans.py new file mode 100644 index 000000000..78f7dcbd3 --- /dev/null +++ b/simba/sandbox/morans.py @@ -0,0 +1,106 @@ +import pandas as pd +import libpysal as lps +import esda +import numpy as np +from typing import Dict, Tuple +from simba.mixins.geometry_mixin import GeometryMixin +from shapely.geometry import Polygon +from simba.utils.read_write import read_df, get_video_meta_data, read_frm_of_video +from simba.utils.data import create_color_palette, find_ranked_colors +from simba.utils.enums import Formats +from simba.utils.checks import check_valid_dict, check_valid_array +from simba.utils.errors import InvalidInputError + +QUAD_MAP = {1: 'HH', 2: 'LH', 3: 'LL', 4: 'HL'} +MORAN_COLORS = {'HH': (215,25,28), 'LH': (171,217,233), 'LL': (44,123,182), 'HL': (253,174,97)} + + +def morans_local_i(x: np.ndarray, + grid: Dict[Tuple[int, ...], Polygon], + bg_img: np.ndarray) -> Tuple[pd.DataFrame, np.ndarray]: + + check_valid_dict(x=grid, valid_key_dtypes=(tuple,), valid_values_dtypes=(Polygon,), min_len_keys=2) + check_valid_array(data=x, accepted_ndims=(2,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + indices = [(i, j) for i in range(x.shape[0]) for j in range(x.shape[1])] + if (len([x for x in indices if x not in grid.keys()])) > 0 or (len([x for x in grid.keys() if x not in indices])): + raise InvalidInputError(msg=f'The size of x ({x.shape}) and the number of keys in grid ({len(grid.keys())}) do not match', source=morans_local_i.__name__) + df = pd.DataFrame(x.flatten(), index=indices, columns=["value"]) + df['geometry'] = grid.values() + queen_weights = lps.weights.Queen.from_dataframe(df, silence_warnings=True) + li = esda.moran.Moran_Local(df['value'], queen_weights) + li = [[x, y] for x, y in zip(list(li.q), list(li.p_sim))] + + moran_df = pd.DataFrame.from_records(li, columns=['quadrant_type', 'significance'], index=indices) + moran_df[['hotzone_index', 'outlier_index']] = None + moran_df['hotzone_index'] = moran_df.apply(lambda x: 1 - x['significance'] if x['quadrant_type'] == 1 else x['hotzone_index'], axis=1) + moran_df['hotzone_index'] = moran_df.apply(lambda x: - 1 + x['significance'] if x['quadrant_type'] == 3 else x['hotzone_index'], axis=1) + moran_df['outlier_index'] = moran_df.apply(lambda x: - 1 + x['significance'] if x['quadrant_type'] == 2 else x['outlier_index'], axis=1) + moran_df['outlier_index'] = moran_df.apply(lambda x: 1 - x['significance'] if x['quadrant_type'] == 4 else x['outlier_index'], axis=1) + moran_df.fillna(0, inplace=True) + + moran_df['quadrant_code'] = moran_df['quadrant_type'].map(QUAD_MAP) + moran_df['quadrant_clr'] = moran_df['quadrant_code'].map(MORAN_COLORS) + moran_df['quadrant'] = list(grid.values()) + clrs = list(moran_df['quadrant_clr']) + + img = GeometryMixin.view_shapes(shapes=list(grid.values()), color_palette=clrs, fill_shapes=True, pixel_buffer=0, bg_img=bg_img) + + return moran_df, img + + +DATA_PATH = r"D:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\FR_MA152_Saline2_0711.csv" +data_arr = read_df(file_path=DATA_PATH, file_type='csv', usecols=['nose_x', 'nose_y']).values +grid = GeometryMixin.bucket_img_into_grid_square(img_size=(668, 540), bucket_grid_size=(5, 5))[0] +cumsum_time = GeometryMixin().cumsum_coord_geometries(data=data_arr, geometries=grid, fps=30, verbose=False)[-1] + +data, img = morans_local_i(x=cumsum_time, grid=grid, bg_img=np.zeros(shape=(668, 540))) + +import cv2 +cv2.imshow('sdasdasd', img) +cv2.waitKey(10000) + + + +# +# +# +# +# # VIDEO_PATH = '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/2022-06-20_NOB_DOT_4.mp4' +# # video_frm = read_frm_of_video(video_path=VIDEO_PATH) +# +# +# +# +# indices = [f'{i} {j}' for i in range(cumsum_time.shape[0]) for j in range(cumsum_time.shape[1])] +# +# df = pd.DataFrame(cumsum_time.flatten(), index=indices, columns=["value"]) +# df['geometry'] = grid.values() +# +# queen_weights = lps.weights.Queen.from_dataframe(df, silence_warnings=True) +# li = esda.moran.Moran_Local(df['value'], queen_weights) +# morans_lst = [[x, y] for x, y in zip(list(li.q), list(li.p_sim))] +# +# moran_df = pd.DataFrame.from_records(morans_lst, columns=['quadrant_type', 'significance'], index=indices) +# +# moran_df[['hotzone_index', 'outlier_index']] = None +# moran_df['hotzone_index'] = moran_df.apply(lambda x: 1 - x['significance'] if x['quadrant_type'] == 1 else x['hotzone_index'], axis=1) +# moran_df['hotzone_index'] = moran_df.apply(lambda x: - 1 + x['significance'] if x['quadrant_type'] == 3 else x['hotzone_index'], axis=1) +# moran_df['outlier_index'] = moran_df.apply(lambda x: - 1 + x['significance'] if x['quadrant_type'] == 2 else x['outlier_index'], axis=1) +# moran_df['outlier_index'] = moran_df.apply(lambda x: 1 - x['significance'] if x['quadrant_type'] == 4 else x['outlier_index'], axis=1) +# moran_df.fillna(0, inplace=True) +# +# moran_df['quadrant_code'] = moran_df['quadrant_type'].map(QUAD_MAP) +# moran_df['quadrant_clr'] = moran_df['quadrant_code'].map(colors) +# moran_df['quadrant'] = list(grid.values()) +# +# +# clrs = list(moran_df['quadrant_clr']) +# clrs = [[int(value) for value in sublist] for sublist in clrs] +# clrs = [tuple(i) for i in clrs] +# +# import cv2 +# img = GeometryMixin.view_shapes(shapes=list(grid.values()), color_palette=clrs, fill_shapes=True) +# #img = cv2.resize(img, (400, 500)) +# +# cv2.imshow('sdasdasd', img) +# cv2.waitKey(10000) \ No newline at end of file diff --git a/simba/sandbox/mosaic.py b/simba/sandbox/mosaic.py new file mode 100644 index 000000000..ad3fec4ac --- /dev/null +++ b/simba/sandbox/mosaic.py @@ -0,0 +1,35 @@ +import subprocess + +def create_mosaic(input_files, output_file, panel_width, panel_height): + num_videos = len(input_files) + num_cols = (num_videos + 1) // 2 # Ensure at least 2 columns + num_rows = 2 # Fixed two rows + + # Generate filter_complex string with tile + filter_complex = f"nullsrc=size={panel_width}x{panel_height} [base];" + for i, input_file in enumerate(input_files): + filter_complex += f"[{i}:v] setpts=PTS-STARTPTS, scale={panel_width}x{panel_height} [vid{i}];" + + filter_complex += f"[base][vid0] overlay=shortest=1 [tmp];" + for i in range(1, num_videos): + filter_complex += f"[tmp][vid{i}] overlay=shortest=1:x={i * panel_width} [tmp{i}];" + + filter_complex += f"[tmp{num_videos - 1}] tile={num_cols}x{num_rows}" + + # Generate FFmpeg command + ffmpeg_command = ( + f"ffmpeg " + + " ".join([f'-i "{input_file}"' for input_file in input_files]) + + f" -filter_complex \"{filter_complex}\" " + + f"-c:v libx264 -crf 18 -preset veryfast {output_file} -y" + ) + + # Run FFmpeg command and capture stderr for logging + subprocess.run(ffmpeg_command, shell=True) + +# Example usage: +input_files = ['/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat7_8(2).mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat11_12.mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/2022-06-21_NOB_IOT_23.mp4', '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat11_12.mp4'] +output_file = '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/blank_test_.mp4' +panel_width = 100 # Example width for each panel +panel_height = 100 # Example height for each panel +create_mosaic(input_files, output_file, panel_width, panel_height) diff --git a/simba/sandbox/multiframe_is_shape_covered.py b/simba/sandbox/multiframe_is_shape_covered.py new file mode 100644 index 000000000..1a6905aab --- /dev/null +++ b/simba/sandbox/multiframe_is_shape_covered.py @@ -0,0 +1,50 @@ +from shapely.geometry import Polygon, LineString, MultiPolygon +from typing import List, Optional +import numpy as np +from simba.mixins.geometry_mixin import GeometryMixin +from simba.utils.checks import check_valid_lst, check_int +from simba.utils.errors import InvalidInputError +from simba.utils.read_write import find_core_cnt +import multiprocessing +from simba.utils.enums import Defaults + + + +def multiframe_is_shape_covered(self, + shape_1: List[Polygon], + shape_2: List[Polygon], + core_cnt: Optional[int] = -1) -> List[bool]: + """ + For each shape in time-series of shapes, check if another shape in the same time-series fully covers the + first shape. + + .. image:: _static/img/multiframe_is_shape_covered.png + :width: 600 + :align: center + + + :example: + >>> shape_1 = GeometryMixin().multiframe_bodyparts_to_polygon(data=np.random.randint(0, 200, (100, 6, 2))) + >>> shape_2 = [Polygon([[0, 0], [20, 20], [20, 10], [10, 20]]) for x in range(len(shape_1))] + >>> multiframe_is_shape_covered(shape_1=shape_1, shape_2=shape_2, core_cnt=3) + """ + check_valid_lst(data=shape_1, source=multiframe_is_shape_covered.__name__, valid_dtypes=(LineString, Polygon, MultiPolygon,)) + check_valid_lst(data=shape_2, source=multiframe_is_shape_covered.__name__, valid_dtypes=(LineString, Polygon, MultiPolygon,)) + if len(shape_1) != len(shape_2): + raise InvalidInputError(msg=f'shape_1 ({len(shape_1)}) and shape_2 ({len(shape_2)}) are unequal length', source=multiframe_is_shape_covered.__name__) + check_int(name="CORE COUNT", value=core_cnt, min_value=-1, max_value=find_core_cnt()[0], raise_error=True) + if core_cnt == -1: core_cnt = find_core_cnt()[0] + shapes = [list(x) for x in zip(shape_1, shape_2)] + results = [] + with multiprocessing.Pool(core_cnt, maxtasksperchild=Defaults.LARGE_MAX_TASK_PER_CHILD.value) as pool: + for cnt, mp_return in enumerate(pool.imap(GeometryMixin.is_shape_covered, shapes, chunksize=1)): + results.append(mp_return) + pool.join() + pool.terminate() + return results + + +shape_1 = GeometryMixin().multiframe_bodyparts_to_polygon(data=np.random.randint(0, 200, (100, 6, 2))) +shape_2 = [Polygon([[0, 0], [20, 20], [20, 10], [10, 20]]) for x in range(len(shape_1))] +multiframe_is_shape_covered(shape_1=shape_1, shape_2=shape_2, core_cnt=3) + diff --git a/simba/sandbox/network.py b/simba/sandbox/network.py new file mode 100644 index 000000000..44c9b0db3 --- /dev/null +++ b/simba/sandbox/network.py @@ -0,0 +1,83 @@ +import itertools +import os +from typing import Dict, List, Optional, Tuple, Union +try: + from typing import Literal +except: + from typing_extensions import Literal + +import networkx as nx +import numpy as np +from numba import jit +from pyvis.network import Network +from simba.mixins.network_mixin import NetworkMixin + +from simba.utils.checks import (check_float, check_instance, check_int, + check_iterable_length, check_str, + check_valid_array, check_valid_hex_color, + check_valid_tuple, check_if_dir_exists, check_valid_lst) +from simba.utils.data import create_color_palette, find_ranked_colors, get_mode +from simba.utils.errors import CountError, InvalidInputError +from itertools import combinations + + +def graph_load_centrality(g: nx.Graph): + check_instance(source=f'{graph_load_centrality.__name__} g', instance=g, accepted_types=(nx.Graph, )) + load_centrality = nx.load_centrality(g, weight='weight') + load_centrality = {key: value * 100 for key, value in load_centrality.items()} + + weighted_betweenness_centrality = nx.betweenness_centrality(g, weight='weight') + harmonic_centrality = nx.harmonic_centrality(g, distance='weight') + weighted_closeness_centrality = nx.closeness_centrality(g, distance='weight') + weighted_degree_centrality = {n: sum(data['weight'] for _, _, data in g.edges(n, data=True)) for n in g.nodes()} + eigenvector_centrality = nx.eigenvector_centrality(g, weight='weight') + + return eigenvector_centrality + + + + + + +animals = ['Simon', 'JJ', 'Nastacia', 'Liana', 'Roël'] +animals = list(combinations(animals, 2)) + +weights = list(range(len(animals)*10, 0, -5)) + + +# +#weights = np.random.randint(0, 10, size=(len(animals))) +graph_input = {} +for i in range(len(animals)): graph_input[animals[i]] = int(weights[i]) +g = NetworkMixin.create_graph(data=graph_input) +load = graph_load_centrality(g=g) + + +page_rank = NetworkMixin.graph_page_rank(graph=g) +page_rank = {key: value * 100 if value > 0 else 0 for key, value in page_rank.items()} +katz = NetworkMixin.graph_katz_centrality(graph=g) +katz = {key: value * 100 if value > 0 else 0 for key, value in katz.items()} + +graph_current_flow_closeness_centrality = NetworkMixin.graph_current_flow_closeness_centrality(graph=g) +graph_current_flow_closeness_centrality = {key: value * 1 if value > 0 else 0 for key, value in graph_current_flow_closeness_centrality.items()} + + +graph_clrs = find_ranked_colors(data=katz, palette='magma', as_hex=True) + + + + +NetworkMixin.visualize(graph=g, + node_size=load, + save_path='/Users/simon/Desktop/envs/simba/simba/simba/sandbox/graph.html', + node_shape='dot', + smooth_type='dynamic', palette=graph_clrs) + + + + + + + + + diff --git a/simba/sandbox/new_outlier_corrector_location.py b/simba/sandbox/new_outlier_corrector_location.py new file mode 100644 index 000000000..3606f43dd --- /dev/null +++ b/simba/sandbox/new_outlier_corrector_location.py @@ -0,0 +1,157 @@ +__author__ = "Simon Nilsson" + + +import functools +import multiprocessing +import os +from typing import Dict, Optional, Union + +import numpy as np +import pandas as pd + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.utils.checks import check_float, check_if_dir_exists +from simba.utils.enums import ConfigKey, Dtypes +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import (find_files_of_filetypes_in_directory, get_fn_ext, read_config_entry, read_df, write_df) + +class OutlierCorrecterLocation(ConfigReader, FeatureExtractionMixin): + """ + Detect and amend outliers in pose-estimation data based in the location of the body-parts + in the current frame relative to the location of the body-part in the preceding frame using heuristic rules. + + Uses heuristic rules critera is grabbed from the SimBA project project_config.ini under the [Outlier settings] header. + + .. note:: + `Documentation `_. + + .. image:: _static/img/location_outlier.png + :width: 500 + :align: center + + :param Union[str, os.PathLike] config_path: path to SimBA project config file in Configparser format + :param Optional[Union[str, os.PathLike]] data_dir: The directory storing the input data. If None, then the ``outlier_corrected_movement`` directory of the SimBA project. + :param Optional[Union[str, os.PathLike]] save_dir: The directory to store the results. If None, then the ``outlier_corrected_movement_location`` directory of the SimBA project. + :param Optional[Dict[str, Dict[str, str]]] animal_dict: Dictionary holding the animal names, and the two body-parts to use to measure the mean or median size of the animals. If None, grabs the info from the SimBA project config. + :param Optional[float] criterion: The criterion multiplier. If None, grabs the info from the SimBA project config. + + :example: + >>> _ = OutlierCorrecterLocation(config_path='MyProjectConfig').run() + """ + + def __init__(self, + config_path: Union[str, os.PathLike], + data_dir: Optional[Union[str, os.PathLike]] = None, + save_dir: Optional[Union[str, os.PathLike]] = None, + animal_dict: Optional[Dict[str, Dict[str, str]]] = None, + criterion: Optional[float] = None): + + ConfigReader.__init__(self, config_path=config_path, create_logger=False, read_video_info=False) + FeatureExtractionMixin.__init__(self) + if not os.path.exists(self.outlier_corrected_dir): + os.makedirs(self.outlier_corrected_dir) + if criterion is None: + self.criterion = read_config_entry(self.config, ConfigKey.OUTLIER_SETTINGS.value, ConfigKey.LOCATION_CRITERION.value, Dtypes.FLOAT.value) + else: + check_float(name=f'{criterion} criterion', value=criterion, min_value=10e-10) + self.criterion = criterion + if data_dir is not None: + check_if_dir_exists(in_dir=data_dir, source=self.__class__.__name__) + self.data_dir = data_dir + else: + self.data_dir = self.outlier_corrected_movement_dir + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir, source=self.__class__.__name__) + self.save_dir = save_dir + else: + self.save_dir = self.outlier_corrected_dir + + self.above_criterion_dict_dict, self.below_criterion_dict_dict = {},{} + if animal_dict is None: + self.outlier_bp_dict = {} + if self.animal_cnt == 1: + self.animal_id = read_config_entry(self.config, ConfigKey.MULTI_ANIMAL_ID_SETTING.value, ConfigKey.MULTI_ANIMAL_IDS.value, Dtypes.STR.value) + if self.animal_id != "None": + self.animal_bp_dict[self.animal_id] = self.animal_bp_dict.pop("Animal_1") + + for animal_name in self.animal_bp_dict.keys(): + self.outlier_bp_dict[animal_name] = {} + self.outlier_bp_dict[animal_name]["bp_1"] = read_config_entry(self.config, ConfigKey.OUTLIER_SETTINGS.value, "location_bodypart1_{}".format(animal_name.lower()),"str") + self.outlier_bp_dict[animal_name]["bp_2"] = read_config_entry(self.config, ConfigKey.OUTLIER_SETTINGS.value, "location_bodypart2_{}".format(animal_name.lower()),"str") + else: + self.outlier_bp_dict = animal_dict + + def __find_location_outliers(self, bp_dict: dict, animal_criteria: dict): + above_criteria_dict, below_criteria_dict = {}, {} + for animal_name, animal_data in bp_dict.items(): + animal_criterion = animal_criteria[animal_name] + above_criteria_dict[animal_name]= {} + for first_bp_cnt, (first_body_part_name, first_bp_cords) in enumerate(animal_data.items()): + second_bp_names = [x for x in list(animal_data.keys()) if x != first_body_part_name] + above_criterion_frms = [] + for second_bp_cnt, second_bp in enumerate(second_bp_names): + second_bp_cords = animal_data[second_bp] + distances = self.framewise_euclidean_distance(location_1=first_bp_cords, location_2=second_bp_cords, px_per_mm=1.0, centimeter=False) + above_criterion_frms.extend(np.argwhere(distances > animal_criterion).flatten()) + unique, counts = np.unique(above_criterion_frms, return_counts=True) + above_criteria_dict[animal_name][first_body_part_name] = np.sort(unique[counts > 1]) + return above_criteria_dict + + + def __correct_outliers(self, df: pd.DataFrame, above_criteria_dict: dict): + for animal_name, animal_data in above_criteria_dict.items(): + for body_part_name, frm_idx in animal_data.items(): + col_names = [f'{body_part_name}_x', f'{body_part_name}_y'] + if len(frm_idx) > 0: + df.loc[frm_idx, col_names] = np.nan + return df.fillna(method='ffill', axis=1).fillna(0) + + def run(self): + self.logs, self.frm_cnts = {}, {} + data_paths = find_files_of_filetypes_in_directory(directory=self.data_dir, extensions=[f'.{self.file_type}'], raise_error=True) + for file_cnt, data_path in enumerate(data_paths): + video_timer = SimbaTimer(start=True) + _, video_name, _ = get_fn_ext(data_path) + print(f"Processing video {video_name}..") + save_path = os.path.join(self.save_dir, f"{video_name}.{self.file_type}") + above_criterion_dict, below_criterion_dict, animal_criteria, bp_dict = {}, {}, {}, {} + df = read_df(data_path, self.file_type) + for animal_name, animal_bps in self.outlier_bp_dict.items(): + animal_bp_distances = np.sqrt((df[animal_bps["bp_1"] + "_x"] - df[animal_bps["bp_2"] + "_x"]) ** 2 + (df[animal_bps["bp_1"] + "_y"] - df[animal_bps["bp_2"] + "_y"]) ** 2) + animal_criteria[animal_name] = (animal_bp_distances.mean() * self.criterion) + for animal_name, animal_bps in self.animal_bp_dict.items(): + bp_col_names = np.array([[i, j] for i, j in zip(animal_bps["X_bps"], animal_bps["Y_bps"])]).ravel() + animal_arr = df[bp_col_names].to_numpy() + bp_dict[animal_name] = {} + for bp_cnt, bp_col_start in enumerate(range(0, animal_arr.shape[1], 2)): + bp_name = animal_bps["X_bps"][bp_cnt][:-2] + bp_dict[animal_name][bp_name] = animal_arr[:, bp_col_start: bp_col_start + 2] + above_criteria_dict = self.__find_location_outliers(bp_dict=bp_dict, animal_criteria=animal_criteria) + + df = self.__correct_outliers(df=df, above_criteria_dict=above_criteria_dict) + write_df(df=df, file_type=self.file_type, save_path=save_path) + self.logs[video_name], self.frm_cnts[video_name] = above_criteria_dict, len(df) + video_timer.stop_timer() + print(f"Corrected location outliers for file {video_name} (elapsed time: {video_timer.elapsed_time_str}s)...") + self.__save_log_file() + + def __save_log_file(self): + out_df = pd.DataFrame(columns=['VIDEO', 'ANIMAL', 'BODY-PART', 'CORRECTION COUNT', 'CORRECTION RATIO']) + for video_name, video_data in self.logs.items(): + for animal_name, animal_data in video_data.items(): + for bp_name, bp_data in animal_data.items(): + correction_ratio = round(len(bp_data) / self.frm_cnts[video_name], 6) + out_df.loc[len(out_df)] = [video_name, animal_name, bp_name, len(bp_data), correction_ratio] + self.logs_path = os.path.join(self.logs_path, f"Outliers_location_{self.datetime}.csv") + out_df.to_csv(self.logs_path) + self.timer.stop_timer() + stdout_success(msg='Log for corrected "location outliers" saved in project_folder/logs', elapsed_time=self.timer.elapsed_time_str) + + + +# test = OutlierCorrecterLocation(config_path=r"C:\troubleshooting\two_black_animals_14bp\project_folder\project_config.ini") +# test.run() + +# test = OutlierCorrecterLocation(config_path='/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini') +# test.correct_location_outliers() diff --git a/simba/sandbox/normalized_cross_correlation.py b/simba/sandbox/normalized_cross_correlation.py new file mode 100644 index 000000000..0e6b04b59 --- /dev/null +++ b/simba/sandbox/normalized_cross_correlation.py @@ -0,0 +1,185 @@ +from skimage.metrics import structural_similarity +import numpy as np +import cv2 +from simba.utils.checks import check_if_valid_img, check_valid_lst, check_int +from typing import List, Optional +from simba.mixins.image_mixin import ImageMixin +from numba import jit, njit, prange + + +@njit(["(uint8[:, :], uint8[:, :])", + "(uint8[:, :, :], uint8[:, :, :])"]) +def cross_correlation_similarity(img_1: np.ndarray, img_2: np.ndarray) -> float: + """ + Computes the Normalized Cross-Correlation (NCC) similarity between two images. + + The NCC measures the similarity between two images by calculating the correlation + coefficient of their pixel values. The output value ranges from -1 to 1, where 1 indicates perfect positive correlation, 0 indicates no correlation, and -1 indicates perfect negative correlation. + + :param np.ndarray img_1: The first input image. It can be a 2D grayscale image or a 3D color image. + :param np.ndarray img_2: The second input image. It must have the same dimensions as img_1. + :return float: The NCC value representing the similarity between the two images. Returns 0.0 if the denominator is zero, indicating no similarity. + + :example: + >>> img_1 = cv2.imread('/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/a.png').astype(np.uint8) + >>> img_2 = cv2.imread('/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/f.png').astype(np.uint8) + >>> cross_correlation_similarity(img_1=img_1, img_2=img_2) + """ + + img1_flat = img_1.flatten() + img2_flat = img_2.flatten() + mean_1, mean_2 = np.mean(img1_flat), np.mean(img2_flat) + N = np.sum((img1_flat - mean_1) * (img2_flat - mean_2)) + D = np.sqrt(np.sum((img1_flat - mean_1) ** 2) * np.sum((img2_flat - mean_2) ** 2)) + if D == 0: + return 0.0 + else: + return N / D + +@njit(["(uint8[:, :, :], int64)", + "(uint8[:, :, :, :], int64)"]) +def sliding_cross_correlation_similarity(imgs: np.ndarray, + stride: int) -> np.ndarray: + """ + Computes the Normalized Cross-Correlation (NCC) similarity for a sequence of images using a sliding window approach. + + This function calculates the NCC between each image and the image that is `stride` positions before it in the sequence. The result is an array of NCC values representing + the similarity between successive images. + + .. seealso:: + ``simba.mixins.image_mixin.ImageMixin.cross_correlation_similarity`` + ``simba.mixins.image_mixin.ImageMixin.cross_correlation_matrix`` + + :param np.ndarray imgs: A 3D array (for grayscale images) or a 4D array (for color images) containing the sequence of images. Each image should have the same size. + :param int stride: The stride length for comparing images. Determines how many steps back in the sequence each image is compared to. + :return np.ndarray: A 1D array of NCC values representing the similarity between each image and the image `stride` positions before it. The length of the array is the same as the number of images. + + :example: + >>> imgs = ImageMixin.read_all_img_in_dir(dir='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/08102021_DOT_Rat11_12_frames') + >>> imgs = {k: imgs[k] for k in sorted(imgs, key=lambda x: int(x.split('.')[0]))} + >>> imgs = np.stack(list(imgs.values())) + >>> results = sliding_cross_correlation_similarity(imgs=imgs, stride=1) + """ + results = np.ones((imgs.shape[0]), dtype=np.float32) + for i in prange(stride, imgs.shape[0]): + img1_flat, img2_flat = imgs[i-stride].flatten(), imgs[i].flatten() + mean_1, mean_2 = np.mean(img1_flat), np.mean(img2_flat) + N = np.sum((img1_flat - mean_1) * (img2_flat - mean_2)) + D = np.sqrt(np.sum((img1_flat - mean_1) ** 2) * np.sum((img2_flat - mean_2) ** 2)) + if D == 0: + results[i] = 0.0 + else: + results[i] = N / D + return results + +@njit(["(uint8[:, :, :],)", + "(uint8[:, :, :, :],)"]) +def cross_correlation_matrix(imgs: np.array) -> np.array: + """ + Computes the cross-correlation matrix for a given array of images. + + This function calculates the cross-correlation coefficient between each pair of images in the input array. + The cross-correlation coefficient is a measure of similarity between two images, with values ranging from + -1 (completely dissimilar) to 1 (identical). + + The function uses the `numba` library for Just-In-Time (JIT) compilation to optimize performance, and + `prange` for parallel execution over the image pairs. + + .. seealso:: + ``simba.mixins.image_mixin.ImageMixin.cross_correlation_similarity`` + ``simba.mixins.image_mixin.ImageMixin.sliding_cross_correlation_similarity`` + + .. note:: + Use greyscale images for faster runtime. Ideally should be move dto GPU. + + + :param np.array imgs: A 3D (or 4D) numpy array of images where the first dimension indexes the images, + and the remaining dimensions are the image dimensions (height, width, [channels]). + - For grayscale images: shape should be (n_images, height, width) + - For color images: shape should be (n_images, height, width, channels) + + :return np.array: A 2D numpy array representing the cross-correlation matrix, where the element at [i, j] + contains the cross-correlation coefficient between the i-th and j-th images. + + :example: + >>> imgs = ImageMixin.read_all_img_in_dir(dir='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/test') + >>> imgs = ImageMixin.read_all_img_in_dir(dir='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/08102021_DOT_Rat11_12_frames') + >>> imgs = {k: imgs[k] for k in sorted(imgs, key=lambda x: int(x.split('.')[0]))} + >>> imgs = np.stack(list(imgs.values())) + >>> imgs = ImageMixin.img_stack_to_greyscale(imgs=imgs) + >>> results = cross_correlation_matrix(imgs=imgs) + """ + + results = np.ones((imgs.shape[0], imgs.shape[0]), dtype=np.float32) + for i in prange(imgs.shape[0]): + img1_flat = imgs[i].flatten() + mean_1 = np.mean(img1_flat) + for j in range(i + 1, imgs.shape[0]): + img2_flat = imgs[j].flatten() + mean_2 = np.mean(img2_flat) + N = np.sum((img1_flat - mean_1) * (img2_flat - mean_2)) + D = np.sqrt(np.sum((img1_flat - mean_1) ** 2) * np.sum((img2_flat - mean_2) ** 2)) + if D == 0: val = 0.0 + else: val = N / D + results[i, j] = val + results[j, i] = val + return results + + + + + + + + +# +imgs = ImageMixin.read_all_img_in_dir(dir='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/test') +# imgs = ImageMixin.read_all_img_in_dir(dir='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/08102021_DOT_Rat11_12_frames') +imgs = {k: imgs[k] for k in sorted(imgs, key=lambda x: int(x.split('.')[0]))} +#imgs = list(imgs.values())[0:10] +imgs = np.stack(list(imgs.values())) +imgs = ImageMixin.img_stack_to_greyscale(imgs=imgs) +results = cross_correlation_matrix(imgs=imgs) + +import time +start = time.time() +results = cross_correlation_matrix(imgs=imgs) +print(time.time() - start) + + +# import time +# start = time.time() +# results = sliding_cross_correlation_similarity(imgs=imgs, stride=1) +# print(time.time() - start) + + +# imgs = ImageMixin.read_all_img_in_dir(dir='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/08102021_DOT_Rat11_12_frames') +# imgs = {k: imgs[k] for k in sorted(imgs, key=lambda x: int(x.split('.')[0]))} +# imgs = np.stack(list(imgs.values())) +# results = sliding_cross_correlation_similarity(imgs=imgs, stride=1) +# + + + +# img_1 = cv2.imread('/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/a.png').astype(np.uint8) +# img_2 = cv2.imread('/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/f.png').astype(np.uint8) +# normalized_cross_correlation(img_1=img_1, img_2=img_2) +# img_1 = ImageMixin.img_to_greyscale(img=img_1) +# img_2 = ImageMixin.img_to_greyscale(img=img_2) + + +# ImageMixin.img_emd(img_1=img_1, img_2=img_2, lower_bound=0.5, verbose=True) + + + + + + + +#results = sliding_structural_similarity_matrix(imgs=imgs) + + + + + + diff --git a/simba/sandbox/opencv_cuda.py b/simba/sandbox/opencv_cuda.py new file mode 100644 index 000000000..375c54635 --- /dev/null +++ b/simba/sandbox/opencv_cuda.py @@ -0,0 +1,20 @@ +from numba import cuda +import math +import numpy as np +import time + +@cuda.jit +def add_array(a, b, c): + i = cuda.threadIdx.x + cuda.blockDim.x * cuda.blockIdx.x + if i < a.size: + c[i] = a[i] + b[i] + +N = 20 +a = np.arange(N, dtype=np.float32) +b = np.arange(N, dtype=np.float32) +dev_c = cuda.device_array_like(a) + +add_array[4, 8](a, b, dev_c) + +c = dev_c.copy_to_host() +print(c) \ No newline at end of file diff --git a/simba/sandbox/optimal_font_scale.py b/simba/sandbox/optimal_font_scale.py new file mode 100644 index 000000000..d87f1f971 --- /dev/null +++ b/simba/sandbox/optimal_font_scale.py @@ -0,0 +1,72 @@ +import cv2 +from typing import Optional, Tuple, Union, List +from simba.utils.checks import check_int, check_valid_tuple + +def get_optimal_font_scale(text: Union[str, List[str]], + accepted_px_width: int, + accepted_px_height: int, + text_thickness: Optional[int] = 2, + font: Optional[int] = cv2.FONT_HERSHEY_TRIPLEX) -> Tuple[float, int, int]: + + """ + Get the optimal font size, column-wise and row-wise text distance of printed text for printing on images. + + :param str text: The text to be printed. Either a string or a list of strings. If a list, then the longest string will be used to evaluate spacings/font. + :param int accepted_px_width: The widest allowed string in pixels. E.g., 1/4th of the image width. + :param int accepted_px_height: The highest allowed string in pixels. E.g., 1/10th of the image size. + :param Optional[int] text_thickness: The thickness of the font. Default: 2. + :param Optional[int] font: The font integer representation 0-7. See ``simba.utils.enums.Options.CV2_FONTS.values + :returns Tuple[int, int, int]: The font size, the shift on x between successive columns, the shift in y between successive rows. + + :example: + >>> img = cv2.imread('/Users/simon/Desktop/Screenshot 2024-07-08 at 4.46.03 PM.png') + >>> accepted_px_width = int(img.shape[1] / 4) + >>> accepted_px_height = int(img.shape[0] / 10) + >>>>text = 'HELLO MY FELLOW' + >>> get_optimal_font_scale(text=text, accepted_px_width=accepted_px_width, accepted_px_height=accepted_px_height, text_thickness=2) + """ + + check_int(name='accepted_px_width', value=accepted_px_width, min_value=1) + check_int(name='accepted_px_height', value=accepted_px_height, min_value=1) + check_int(name='text_thickness', value=text_thickness, min_value=1) + check_int(name='font', value=font, min_value=0, max_value=7) + for scale in reversed(range(0, 100, 1)): + text_size = cv2.getTextSize(text, fontFace=font, fontScale=scale/10, thickness=text_thickness) + print(text_size) + new_width, new_height = text_size[0][0], text_size[0][1] + if (new_width <= accepted_px_width) and (new_height <= accepted_px_height): + font_scale = scale / 10 + x_shift = new_width + text_size[1] + y_shift = new_height + text_size[1] + return (font_scale, x_shift, y_shift) + return None, None, None + + +def get_optimal_circle_size(frame_size: Tuple[int, int], + circle_frame_ratio: Optional[int] = 100): + + check_int(name='accepted_circle_size', value=circle_frame_ratio, min_value=1) + check_valid_tuple(x=frame_size, source='frame_size', accepted_lengths=(2,), valid_dtypes=(int,)) + for i in frame_size: + check_int(name='frame_size', value=i, min_value=1) + return int(max(frame_size[0], frame_size[1]) / circle_frame_ratio) + + + + +# +# img = cv2.imread('/Users/simon/Desktop/Screenshot 2024-07-08 at 4.46.03 PM.png') +# text_width = int(img.shape[1] / 4) +# text_height = int(img.shape[0] / 10) +# text = 'HELLO MY FELLOW' +# get_optimal_font_scale(text=text, accepted_px_width=text_width, accepted_px_height=text_height, text_thickness=2) +# +# #img = cv2.putText(img=img, text=text, org=(10, 10), fontScale=fontScale, fontFace=cv2.FONT_HERSHEY_COMPLEX, color=(0, 255, 0), thickness=2) +# img = cv2.putText(img=img, text=text, org=(10, 10+y_shift), fontScale=fontScale, fontFace=cv2.FONT_HERSHEY_COMPLEX, color=(0, 255, 0), thickness=2) +# img = cv2.putText(img=img, text=text, org=(10, 10+(y_shift*2)), fontScale=fontScale, fontFace=cv2.FONT_HERSHEY_COMPLEX, color=(0, 255, 0), thickness=2) +# img = cv2.putText(img=img, text=text, org=(10+x_shift, 10+y_shift), fontScale=fontScale, fontFace=cv2.FONT_HERSHEY_COMPLEX, color=(0, 255, 0), thickness=2) +# +# +# cv2.imshow('sadasd', img) +# cv2.waitKey(5000) + diff --git a/simba/sandbox/ordinal_clf.py b/simba/sandbox/ordinal_clf.py new file mode 100644 index 000000000..939291436 --- /dev/null +++ b/simba/sandbox/ordinal_clf.py @@ -0,0 +1,108 @@ +import os +from typing import Union, Optional, Dict +from sklearn.ensemble import RandomForestClassifier +import numpy as np +from joblib import Parallel, delayed +from sklearn import clone +from simba.mixins.train_model_mixin import TrainModelMixin +from simba.utils.errors import SamplingError, InvalidInputError +from simba.utils.checks import check_valid_array, check_int, check_if_dir_exists +from simba.utils.enums import Formats +from simba.utils.read_write import find_core_cnt, write_pickle + +ACCEPTED_MODELS = RandomForestClassifier + +class OrdinalClassifier(): + + """ + + .. note:: + `Modified from sklego <`https://github.com/koaning/scikit-lego/blob/main/sklego/meta/ordinal_classification.py>`__. + + The implementation is based on the paper [A simple approach to ordinal classification](https://www.cs.waikato.ac.nz/~eibe/pubs/ordinal_tech_report.pdf) + by Eibe Frank and Mark Hall. + + References + ---------- + .. [1] Frank, Eibe, and Mark Hall. “A Simple Approach to Ordinal Classification.” In Machine Learning: ECML 2001, edited by Luc De Raedt and Peter Flach, 2167:145–56. Lecture Notes in Computer Science. Berlin, Heidelberg: Springer Berlin Heidelberg, 2001. https://doi.org/10.1007/3-540-44795-4_13. + + :example: + >>> X = np.random.randint(0, 500, (100, 50)) + >>> y = np.random.randint(1, 6, (100)) + >>> rf_mdl = TrainModelMixin().clf_define() + >>> fitted_mdl = OrdinalClassifier.fit(X, y, rf_mdl, -1) + >>> y_hat = OrdinalClassifier.predict_proba(X, fitted_mdl) + >>> y = OrdinalClassifier.predict(X, fitted_mdl) + >>> OrdinalClassifier.save(mdl=fitted_mdls, save_path=r"C:\Users\sroni\OneDrive\Desktop\mdl.pk") + """ + + def __init__(self): + pass + + @staticmethod + def fit(X: np.ndarray, y: np.ndarray, clf: Union[ACCEPTED_MODELS], core_cnt: int = -1): + + def _fit_binary_estimator(clf, X, y, y_label): + y_bin = (y <= y_label).astype(int) + return clone(clf).fit(X, y_bin) + + classes_ = np.sort(np.unique(y)) + check_valid_array(data=classes_, source=f'{__class__.__name__} y', accepted_ndims=(1,), accepted_dtypes=(int,)) + if len(classes_) < 3: + raise InvalidInputError(msg=f'Found {len(classes_)} classes in y [{classes_}], requires at least 3', source=f'{OrdinalClassifier.__name__} fit') + intervals = [classes_[i] - classes_[i-1] for i in range(1, len(classes_))] + if len(set(intervals)) != 1: + raise InvalidInputError(msg=f'The values in y ({classes_}) are not of equal interval.', source=f'{OrdinalClassifier.__name__} fit') + check_valid_array(data=X, source=f'{__class__.__name__} x', accepted_ndims=(2,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + if not isinstance(clf, (RandomForestClassifier,)) or ('predict_proba' not in dir(clf)): + raise InvalidInputError(msg=f'clf is not of valid type: {type(clf)} (accepted: {ACCEPTED_MODELS})', source=f'{OrdinalClassifier.__name__} fit') + check_int(name='core_cnt', min_value=-1, unaccepted_vals=[0], value=core_cnt) + core_cnt = [find_core_cnt()[0] if core_cnt == -1 or core_cnt > find_core_cnt()[0] else core_cnt][0] + return dict(zip(classes_[:-1], Parallel(n_jobs=core_cnt)(delayed(_fit_binary_estimator)(clf, X, y, y_label) for y_label in classes_[:-1]))) + + @staticmethod + def predict_proba(X: np.ndarray, mdl: Dict[int, Union[ACCEPTED_MODELS]]): + OrdinalClassifier._check_valid_mdl_dict(mdls=mdl) + check_valid_array(data=X, source=f'{__class__.__name__} x', accepted_ndims=(2,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + if mdl[list(mdl.keys())[0]].n_features_ != X.shape[1]: + raise InvalidInputError(msg=f'Model expects {mdl[list(mdl.keys())[0]].n_features_} features, got {X.shape[1]}.', source=f'{OrdinalClassifier.__name__} predict') + check_valid_array(data=X, source=f'{__class__.__name__} x', accepted_ndims=(2,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + raw_proba = np.array([estimator.predict_proba(X)[:, 1] for estimator in mdl.values()]).T + p_y_le = np.column_stack((np.zeros(X.shape[0]), raw_proba, np.ones(X.shape[0]))) + return np.diff(p_y_le, n=1, axis=1) + + @staticmethod + def predict(X: np.ndarray, mdl: Dict[int, Union[ACCEPTED_MODELS]]): + OrdinalClassifier._check_valid_mdl_dict(mdls=mdl) + check_valid_array(data=X, source=f'{__class__.__name__} x', accepted_ndims=(2,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + if mdl[list(mdl.keys())[0]].n_features_ != X.shape[1]: + raise InvalidInputError(msg=f'Model expects {mdl[list(mdl.keys())[0]].n_features_} features, got {X.shape[1]}.', source=f'{OrdinalClassifier.__name__} predict') + return np.argmax(OrdinalClassifier.predict_proba(X, mdl=mdl), axis=1) + + + @staticmethod + def save(mdl: Dict[int, Union[ACCEPTED_MODELS]], save_path: Union[str, os.PathLike]): + OrdinalClassifier._check_valid_mdl_dict(mdls=mdl) + check_if_dir_exists(in_dir=os.path.dirname(save_path), source=f'{OrdinalClassifier.__name__} save') + write_pickle(data=mdl, save_path=save_path) + + @staticmethod + def _check_valid_mdl_dict(mdls: Dict[int, Union[ACCEPTED_MODELS]]) -> None: + features_in_cnt = [] + for mdl in mdls.values(): features_in_cnt.append(mdl.n_features_) + if len(set(features_in_cnt)) != 1: + raise InvalidInputError(msg=f'The models has different N features [{features_in_cnt}]') + +# X = np.random.randint(0, 500, (100, 50)) +# y = np.random.randint(1, 6, (100)) +# +# rf_mdl = TrainModelMixin().clf_define() +# +# fitted_mdls = OrdinalClassifier.fit(X, y, rf_mdl, -1) +# +# y_hat = OrdinalClassifier.predict_proba(X, fitted_mdls) +# y = OrdinalClassifier.predict(X, fitted_mdls) +# OrdinalClassifier.save(mdl=fitted_mdls, save_path=r"C:\Users\sroni\OneDrive\Desktop\mdl.pk") + +#predict_proba(X) +# ordinal_clf.predict(X) \ No newline at end of file diff --git a/simba/sandbox/outliers_tietjen.py b/simba/sandbox/outliers_tietjen.py new file mode 100644 index 000000000..674407433 --- /dev/null +++ b/simba/sandbox/outliers_tietjen.py @@ -0,0 +1,45 @@ +from typing import List, Union +import numpy as np + +def outliers_tietjen(x: Union[List, np.ndarray], k: int = 2, hypo: bool = False, alpha: float = 0.05) -> Union[np.ndarray, bool]: + arr = np.copy(x) + n = arr.size + + def tietjen(x_, k_): + x_mean = x_.mean() + r = np.abs(x_ - x_mean) + z = x_[r.argsort()] + E = np.sum((z[:-k_] - z[:-k_].mean()) ** 2) / np.sum((z - x_mean) ** 2) + return E + + e_x = tietjen(arr, k) + e_norm = np.zeros(10000) + + for i in np.arange(10000): + norm = np.random.normal(size=n) + e_norm[i] = tietjen(norm, k) + + CV = np.percentile(e_norm, alpha * 100) + result = e_x < CV + + if hypo: + return result + else: + if result: + ind = np.argpartition(np.abs(arr - arr.mean()), -k)[-k:] + return np.delete(arr, ind) + else: + return arr + + +x = np.random.randint(0, 100, (100, )) + +x = np.random.normal(100, 2, 100) +outlier = 10 # Value of the outlier +x = np.append(x, outlier) + + +d = outliers_tietjen(x=x) + +d.shape + diff --git a/simba/sandbox/path_curvature.py b/simba/sandbox/path_curvature.py new file mode 100644 index 000000000..1ce183d5b --- /dev/null +++ b/simba/sandbox/path_curvature.py @@ -0,0 +1,94 @@ +import numpy as np +from simba.utils.checks import check_str, check_valid_array +try: + from typing import Literal +except: + from typing_extensions import Literal + from simba.utils.enums import Formats + + +def path_curvature(x: np.ndarray, agg_type: Literal['mean', 'median', 'max'] = 'mean') -> float: + """ + Calculate aggregate curvature of a 2D path given an array of points. + + :param x: A 2D numpy array of shape (N, 2), where N is the number of points and each row is (x, y). + :param Literal['mean', 'median', 'max'] agg_type: The type of summary statistic to return. Options are 'mean', 'median', or 'max'. + :return: A single float value representing the path curvature based on the specified summary type. + :rtype: float + + :example: + >>> x = np.array([[0, 0], [1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]]) + >>> low = path_curvature(x) + >>> x = np.array([[0, 0], [1, 1], [2, 0], [3, 1], [4, 0]]) + >>> high = path_curvature(x) + """ + check_valid_array(data=x, source=f'{path_curvature.__name__} x', accepted_ndims=(2,), accepted_axis_1_shape=[2, ], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_str(name=f'{path_curvature.__name__} agg_type', value=agg_type, options=('mean', 'median', 'max')) + dx, dy = np.diff(x[:, 0]),np.diff(x[:, 1]) + x_prime, y_prime = dx[:-1], dy[:-1] + x_double_prime, y_double_prime = dx[1:] - dx[:-1], dy[1:] - dy[:-1] + curvature = np.abs(x_prime * y_double_prime - y_prime * x_double_prime) / (x_prime ** 2 + y_prime ** 2) ** (3 / 2) + if agg_type == 'mean': + return np.float32(np.nanmean(curvature)) + elif agg_type == 'median': + return np.float32(np.nanmedian(curvature)) + else: + return np.float32(np.nanmax(curvature)) + + +def sliding_path_curvature(x: np.ndarray, + agg_type: Literal['mean', 'median', 'max'], + window_size: float, + sample_rate: float) -> np.ndarray: + """ + Computes the curvature of a path over sliding windows along the path points, providing a measure of the path’s bending + or turning within each window. + + This function calculates curvature for each window segment by evaluating directional changes. It provides the option to + aggregate curvature values within each window using the mean, median, or maximum, depending on the desired level of + sensitivity to bends and turns. A higher curvature value indicates a sharper or more frequent directional change within + the window, while a lower curvature suggests a straighter or smoother path. + + :param x: A 2D array of shape (N, 2) representing the path, where N is the number of points, and each point has two spatial coordinates (e.g., x and y for 2D space). + :param Literal['mean', 'median', 'max'] agg_type: Type of aggregation for the curvature within each window. + :param float window_size: Duration of the window in seconds, used to define the size of each segment over which curvature is calculated. + :param float sample_rate: The rate at which path points were sampled (in points per second), used to convert the window size from seconds to frames + :return: An array of shape (N,) containing the computed curvature values for each window position along the path. Each element represents the aggregated curvature within a specific window, with `NaN` values for frames where the window does not fit. + :rtype: np.ndarray + + :example: + >>> x = np.random.randint(0, 500, (91, 2)) + >>> sliding_path_curvature(x=x, agg_type='mean', window_size=1, sample_rate=30) + """ + + frame_step = int(max(1.0, window_size * sample_rate)) + results = np.full(shape=(x.shape[0]), fill_value=np.nan, dtype=np.float32) + for r in range(frame_step, x.shape[0]+1): + l = r - frame_step + sample_x = x[l:r] + dx, dy = np.diff(sample_x[:, 0]), np.diff(sample_x[:, 1]) + x_prime, y_prime = dx[:-1], dy[:-1] + x_double_prime, y_double_prime = dx[1:] - dx[:-1], dy[1:] - dy[:-1] + curvature = np.abs(x_prime * y_double_prime - y_prime * x_double_prime) / (x_prime ** 2 + y_prime ** 2) ** (3 / 2) + if agg_type == 'mean': + results[r-1] = np.float32(np.nanmean(curvature)) + elif agg_type == 'median': + results[r-1] = np.float32(np.nanmedian(curvature)) + else: + results[r-1] = np.float32(np.nanmax(curvature)) + + return results + + +x = np.random.randint(0, 500, (91, 2)) +sliding_path_curvature(x=x, agg_type='mean', window_size=1, sample_rate=30) + + + + +# x = np.array([[0, 0], [1, 0.1], [2, 0.2], [3, 0.3], [4, 0.4]]) +# #x = np.random.randint(0, 500, (100, 2)) +# low = path_curvature(x) +# x = np.array([[0, 0], [1, 1], [2, 0], [3, 1], [4, 0]]) +# high = path_curvature(x) +# print(low, high) \ No newline at end of file diff --git a/simba/sandbox/path_geometry.py b/simba/sandbox/path_geometry.py new file mode 100644 index 000000000..c414413cf --- /dev/null +++ b/simba/sandbox/path_geometry.py @@ -0,0 +1,99 @@ +from simba.mixins.geometry_mixin import GeometryMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.data import savgol_smoother +from simba.utils.read_write import read_df, read_frm_of_video +import numpy as np +import os +import cv2 + +FRAME_IDX = -1 +BODY_PART = 'Nose' +VIDEO_NAME = 'SI_DAY3_308_CD1_PRESENT' +CONFIG_PATH = r'/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/project_config.ini' + +config = ConfigReader(config_path=CONFIG_PATH, read_video_info=False) +config.read_roi_data() +frm = read_frm_of_video(os.path.join(config.video_dir, VIDEO_NAME +'.mp4'), frame_index=FRAME_IDX) +shapes, colors = GeometryMixin.simba_roi_to_geometries(rectangles_df=config.rectangles_df, circles_df=config.circles_df, polygons_df=config.polygon_df, color=True) +video_roi_shapes = list(shapes[VIDEO_NAME].values()) + +roi_shapes = GeometryMixin.view_shapes(shapes=video_roi_shapes, size=500, thickness=12, color_palette='Pastel1') +# cv2.imshow('sasd', roi_shapes) +# cv2.waitKey(10000) + +roi_shapes_w_bg = GeometryMixin.view_shapes(shapes=video_roi_shapes, size=500, bg_img=frm, thickness=12, color_palette='Pastel1') +# cv2.imshow('sasd', roi_shapes_w_bg) +# cv2.waitKey(10000) + +data_path = os.path.join(config.outlier_corrected_dir, VIDEO_NAME + f'.{config.file_type}') +df = read_df(data_path, file_type=config.file_type) +animal_data = df[['Nose_x', 'Nose_y']].values +animal_path = GeometryMixin.to_linestring(data=animal_data) + +nose_path = GeometryMixin.view_shapes(shapes=video_roi_shapes + [animal_path], size=500, bg_img=frm, thickness=12, color_palette='Pastel1') +# cv2.imshow('sasd', nose_path) +# cv2.waitKey(5000) + +animal_data = savgol_smoother(data=animal_data, fps=15, time_window=1000) +animal_path = GeometryMixin.to_linestring(data=animal_data) + + + + + + + + +length = GeometryMixin.length(shape=animal_path, pixels_per_mm=1.5, unit='m') + +dist = GeometryMixin.locate_line_point(path=animal_path, px_per_mm=1.5, fps=15, geometry=shapes[VIDEO_NAME]['Top_left']) +distances = dist['raw_distances'] + +dist = GeometryMixin.locate_line_point(path=animal_path, px_per_mm=1.5, fps=15, geometry=shapes[VIDEO_NAME]['Bottom_left']) +distances = dist['raw_distances'] + + +#TIME STAMPS WHEN ANIMAL IS 1CM OR LESS FROM THE CAGE ROI +cage_dist = GeometryMixin.locate_line_point(path=animal_path, px_per_mm=1.5, fps=15, geometry=shapes[VIDEO_NAME]['Cage']) +less_than_1cm_timepoints = np.argwhere(dist['raw_distances'] < 10).flatten() / 15 + + +frm = read_frm_of_video(os.path.join(config.video_dir, VIDEO_NAME +'.mp4'), frame_index=-1) +buffered_path = GeometryMixin.buffer_shape(shape=animal_path, size_mm=10, pixels_per_mm=1.5) +buffered_path_img = GeometryMixin.view_shapes(shapes=[buffered_path], size=500, bg_img=frm, thickness=12, color_palette='Pastel1') +# cv2.imshow('sasd', buffered_path_img) +# cv2.waitKey(5000) + + +#CHECK IF PATH IS INSIDE A SPECIFIC POLYGON + +w = GeometryMixin.is_touching(shapes=[buffered_path, shapes[VIDEO_NAME]['Cage']]) + +#GeometryMixin.compute_pct_shape_overlap +frm = read_frm_of_video(os.path.join(config.video_dir, VIDEO_NAME +'.mp4'), frame_index=-1) +not_crossed = GeometryMixin.difference(shapes=[shapes[VIDEO_NAME]['Top_left'], buffered_path]) +not_crossed_img = GeometryMixin.view_shapes(shapes=[not_crossed], size=500, bg_img=frm, thickness=12, color_palette='Set1') +# cv2.imshow('sasd', not_crossed_img) +# cv2.waitKey(5000) + + +frm = read_frm_of_video(os.path.join(config.video_dir, VIDEO_NAME +'.mp4'), frame_index=-1) +shifted_geos = GeometryMixin.adjust_geometry_locations(geometries=video_roi_shapes, shift=(100, 5)) +shifted_geos_img = GeometryMixin.view_shapes(shapes=shifted_geos, size=500, bg_img=frm, thickness=12, color_palette='Set1') +cv2.imshow('sasd', shifted_geos_img) +cv2.waitKey(5000) + + + + + + + + + + + + + + + diff --git a/simba/sandbox/path_geometry_2.py b/simba/sandbox/path_geometry_2.py new file mode 100644 index 000000000..0495132d9 --- /dev/null +++ b/simba/sandbox/path_geometry_2.py @@ -0,0 +1,51 @@ +from simba.mixins.geometry_mixin import GeometryMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.data import savgol_smoother +from simba.utils.read_write import read_df, read_frm_of_video +import numpy as np +import os +import cv2 + +FRAME_IDX = -1 +VIDEO_NAME = '2022-06-20_NOB_DOT_4' +CONFIG_PATH = r'/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/project_config.ini' + +config = ConfigReader(config_path=CONFIG_PATH, read_video_info=False) +config.read_roi_data() +shapes, colors = GeometryMixin.simba_roi_to_geometries(rectangles_df=config.rectangles_df, circles_df=config.circles_df, polygons_df=config.polygon_df, color=True) +video_roi_shapes = list(shapes[VIDEO_NAME].values()) + + +roi_shapes = GeometryMixin.view_shapes(shapes=video_roi_shapes, size=750, thickness=12, bg_clr= (0, 0, 0), color_palette='Pastel1') +# cv2.imshow('sasd', roi_shapes) +# cv2.waitKey(10000) + +frm = read_frm_of_video(os.path.join(config.video_dir, VIDEO_NAME +'.mp4'), frame_index=FRAME_IDX) +roi_shape_with_bg = GeometryMixin.view_shapes(shapes=video_roi_shapes, size=750, thickness=12, bg_img=frm, color_palette='Pastel1') +# cv2.imshow('sasd', roi_shapes) +# cv2.waitKey(10000) + + +data_path = os.path.join(config.outlier_corrected_dir, VIDEO_NAME + f'.{config.file_type}') +df = read_df(data_path, file_type=config.file_type) +animal_data = df[['Tail_base_x', 'Tail_base_y', 'Nose_x', 'Nose_y']].values +animal_data = animal_data.reshape(-1, 2, 2) +animal_lines = GeometryMixin().multiframe_bodyparts_to_line(data=animal_data) + +FRAME_IDX = 120 +frm = read_frm_of_video(os.path.join(config.video_dir, VIDEO_NAME +'.mp4'), frame_index=FRAME_IDX) +animal_line_img = GeometryMixin.view_shapes(shapes=[animal_lines[FRAME_IDX], video_roi_shapes[3]], size=750, thickness=12, bg_img=frm, color_palette='Pastel1') +cv2.imshow('sasd', animal_line_img) +cv2.waitKey(10000) + +to_the_left = GeometryMixin().static_point_lineside(lines=np.array(animal_lines[FRAME_IDX].coords.xy).reshape(-1, 2, 2).astype(np.float32), point=np.array(video_roi_shapes[3].centroid)) + + + + + + + + + +#view_image(roi_shapes) diff --git a/simba/sandbox/path_plots.py b/simba/sandbox/path_plots.py new file mode 100644 index 000000000..7d0a00407 --- /dev/null +++ b/simba/sandbox/path_plots.py @@ -0,0 +1,163 @@ + + +### CREATE PATH PLOTS + +""" +In this notebook, we will create a bunch of different styled path plots representing visualizations for where the animal spends its time. +""" + +#%matplotlib inline +from simba.plotting.path_plotter_mp import PathPlotterMulticore +from matplotlib import pyplot as plt +import cv2 + + + +#Path to SimBA project config file. +CONFIG_PATH = '/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/project_config.ini' +#List of data files containing pose-estimation data. In this example, we will create visualizations for just one file, but feel free to add the paths to more files. +DATA_PATHS = ['/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/csv/machine_results/Trial 10.csv'] +#The background color of the path plot. In this first example, we will use white, but we will come back to this with fancier examples. +BG_COLOR = (255, 255, 255) +#The number of the most recent moves to plot in the path. In this first example we will plot the entire path in the video, but we will come back to this too. +MAX_LINES = 'entire video' +# A dictionary containing the body-parts we want to use when visualizing the paths and the path color. +ANIMAL_ATTR = {0: {'bp': 'Ear_right_1', 'color': (255, 0, 0)}, 1: {'bp': 'Ear_right_2', 'color': (0, 0, 255)}} +# The width of the path plot image we want to create. If `As input`, then the image will be as wide as the original video. +WIDTH = 'As input' +# The height of the path plot image we want to create. If `As input`, then the image will be as high as the original video. +HEIGHT = 'As input' +# The width of the line(s) representing the animal path. +LINE_WIDTH = 2 +# If working with paths from several animals, it can be helpful to print the name of the animal next to the current location of the animal. If you want to do this, set this to True. We will leave this off. +PRINT_ANIMAL_NAMES = False +# If printing the animal names, this will be the size of the font of the animal names. +FONT_SIZE = 0.9 +# If printing the animal names, this will be the font thickness of the animal names. +FONT_THICKNESS = 2 +# When generating path videos and frames, the current location of the animal is shown as a circle. This number controls the size of this circle +CIRCLE_SIZE = 2 +# If you want to create a png image for every frame in your input video, set this to True. We will leave this as False. +FRAME_SETTING = False +# If you want to create a video representing the paths of your animals, set this to True. We will leave this as False. +VIDEO_SETTING = False +# If you want to show the locations of your classified behaviors on the path plot, use this variable. We will come back to this - for now, we leave this as None. +CLF_ATTR = None +# If you want to create a path plot representing a defined start and end time of your vidos, use slicing. We will come back to this - for now, we leave this as None. +SLICING = None +# We use cores to tell how many of your CPU cores to use when creating the plots. In this notebook, we will only create one image per input video, so it wont have much affect. To use all your cores, set this value to -1 +CORES = -1 + + +# WE PLACE THE STYLE SELECTIONS ABOVE IN A DICTIONARY THAT THE PATH PLOTTER ACCEPTS. +STYLE_ATTR = {'width': WIDTH, + 'height': HEIGHT, + 'line width': LINE_WIDTH, + 'font size': FONT_SIZE, + 'font thickness': FONT_THICKNESS, + 'circle size': CIRCLE_SIZE, + 'bg color': BG_COLOR, + 'max lines': MAX_LINES} + + +# WE DEFINE AN INSTANCE OF THE PATH PLOTTER AND RUN IT +path_plotter = PathPlotterMulticore(config_path=CONFIG_PATH, + frame_setting=FRAME_SETTING, + video_setting=VIDEO_SETTING, + last_frame=True, + clf_attr=CLF_ATTR, + input_style_attr=STYLE_ATTR, + animal_attr=ANIMAL_ATTR, + files_found=DATA_PATHS, + cores=CORES, + slicing = SLICING, + print_animal_names=PRINT_ANIMAL_NAMES) + +path_plotter.run() + +# ...AND VIEW THE IMAGE FROM THE FILE ON THE DISK +image = cv2.imread("/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/path_plots/Trial 10_final_frame.png") +plt.imshow(image) +plt.show() + +# We may want to change the background, to represent the actual arena with some opacity decrease +STYLE_ATTR['bg color'] = {'type': 'static', 'opacity': 50, 'frame_index': 1} +path_plotter = PathPlotterMulticore(config_path=CONFIG_PATH, + frame_setting=FRAME_SETTING, + video_setting=VIDEO_SETTING, + last_frame=True, + clf_attr=CLF_ATTR, + input_style_attr=STYLE_ATTR, + animal_attr=ANIMAL_ATTR, + files_found=DATA_PATHS, + cores=CORES, + slicing = SLICING, + print_animal_names=PRINT_ANIMAL_NAMES) + +path_plotter.run() + +# ... AND VIEW THE NEW IMAGE FROM THE FILE ON THE DISK +# image = cv2.imread("/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/path_plots/Trial 10_final_frame.png") +# plt.imshow(image) +# plt.show() + +# We may to only display the path tracks from the first 2 minutes of the video. +SLICING = {'start_time': '00:00:00', 'end_time': '00:02:00'} +path_plotter = PathPlotterMulticore(config_path=CONFIG_PATH, + frame_setting=FRAME_SETTING, + video_setting=VIDEO_SETTING, + last_frame=True, + clf_attr=CLF_ATTR, + input_style_attr=STYLE_ATTR, + animal_attr=ANIMAL_ATTR, + files_found=DATA_PATHS, + cores=CORES, + slicing = SLICING, + print_animal_names=PRINT_ANIMAL_NAMES) + +path_plotter.run() + +# ... AND VIEW THE NEW IMAGE FROM THE FILE ON THE DISK +# image = cv2.imread("/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/path_plots/Trial 10_final_frame.png") +# plt.imshow(image) +# plt.show() + +# We may want to display larger circles showing the location of the classified behaviors. +CLF_ATTR = {'Nose to Nose': {'color': (155, 1, 10), 'size': 30}, 'Nose to Tailbase': {'color': (155, 90, 10), 'size': 30}} +path_plotter = PathPlotterMulticore(config_path=CONFIG_PATH, + frame_setting=FRAME_SETTING, + video_setting=VIDEO_SETTING, + last_frame=True, + clf_attr=CLF_ATTR, + input_style_attr=STYLE_ATTR, + animal_attr=ANIMAL_ATTR, + files_found=DATA_PATHS, + cores=CORES, + slicing = SLICING, + print_animal_names=PRINT_ANIMAL_NAMES) + +path_plotter.run() + +# ... AND VIEW THE NEW IMAGE FROM THE FILE ON THE DISK +image = cv2.imread("/Users/simon/Desktop/envs/simba/troubleshooting/beepboop174/project_folder/frames/output/path_plots/Trial 10_final_frame.png") +plt.imshow(image) +plt.show() + + + + + + + + + + + + + + + + + + + diff --git a/simba/sandbox/paths.py b/simba/sandbox/paths.py new file mode 100644 index 000000000..abce42d29 --- /dev/null +++ b/simba/sandbox/paths.py @@ -0,0 +1,71 @@ +import pandas as pd +import numpy as np +from typing import Optional +import cv2 +from simba.utils.read_write import read_df +from itertools import groupby +from simba.mixins.geometry_mixin import GeometryMixin +from simba.utils.checks import check_valid_array + + +def find_path_loops(data: np.ndarray): + """ + Compute the loops detected within a 2-dimensional path. + + :param np.ndarray data: Nx2 2-dimensional array with the x and y coordinated represented on axis 1. + :return: Dictionary with the coordinate tuple(x, y) as keys, and sequential frame numbers as values when animals visited, and re-visited the key coordinate. + + :example: + >>> data = read_df(file_path='/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/csv/outlier_corrected_movement_location/SI_DAY3_308_CD1_PRESENT.csv', usecols=['Center_x', 'Center_y'], file_type='csv').values.astype(int) + >>> find_path_loops(data=data) + """ + + check_valid_array(data=data, source=find_path_loops.__name__, accepted_ndims=(2,), accepted_dtypes=(np.int32, np.int64, np.int8)) + + seen = {} + for i in range(data.shape[0]): + value = tuple(data[i]) + if value not in seen.keys(): seen[value] = [i] + else: seen[value].append(i) + seen_dedup = {} + for k, v in seen.items(): + seen_dedup[k] = [x for cnt, x in enumerate(v) if cnt == 0 or v[cnt] > v[cnt-1] +1] + return {k:v for k,v in seen_dedup.items() if len(v) > 1} + + +data = read_df(file_path='/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/csv/outlier_corrected_movement_location/SI_DAY3_308_CD1_PRESENT.csv', usecols=['Center_x', 'Center_y'], file_type='csv').values.astype(int) +find_path_loops(data=data) + + +# linestring = GeometryMixin.to_linestring(data=data) +# +# img = GeometryMixin.view_shapes(shapes=[linestring], size=500) +# cv2.imshow('asdasd', img) +# cv2.waitKey(10000) +# + +# +# +# G +# +# +# data = pd.read_csv('/Users/simon/Desktop/envs/simba/troubleshooting/zebrafish/project_folder/csv/outlier_corrected_movement_location/20200318_AB_7dpf_ctl_0003.csv') +# +# +# +# +# data = np.load('/Users/simon/Desktop/envs/simba/simba/simba/sandbox/data.npy') +# data[:, 0] = data[:, 0] + 10 +# data[:, 1] = data[:, 1] + 400 + + +# linestring = to_linestring(data=data) +# +# img = GeometryMixin.view_shapes(shapes=[linestring]) +# cv2.imshow('asdasd', img) +# cv2.waitKey(10000) + + +# plt.axis('equal') +# plt.title('Path of Increasingly Larger Circles') +# plt.show() diff --git a/simba/sandbox/pct_counts_in_top_N.py b/simba/sandbox/pct_counts_in_top_N.py new file mode 100644 index 000000000..757838f01 --- /dev/null +++ b/simba/sandbox/pct_counts_in_top_N.py @@ -0,0 +1,54 @@ +import numpy as np + +from simba.utils.checks import check_valid_array, check_int, check_float + +def pct_in_top_n(x: np.ndarray, n: float) -> float: + """ + Compute percentage of elements in the top 'n' frequencies in the input. + + :param x: Input array. + :param n: Number of top frequencies. + :return: Percentage of elements in the top 'n' frequencies. + + :example: + >>> x = np.random.randint(0, 10, (100,)) + >>> pct_in_top_n(x=x, n=5) + """ + + check_valid_array(data=x, accepted_ndims=(1,), source=pct_in_top_n.__name__) + check_int(name=pct_in_top_n.__name__, value=n, max_value=x.shape[0]) + cnts = np.sort(np.unique(x, return_counts=True)[1])[-n:] + return np.sum(cnts) / x.shape[0] + +def sliding_pct_in_top_n(x: np.ndarray, windows: np.ndarray, n: int, fps: float) -> np.ndarray: + """ + Compute the percentage of elements in the top 'n' frequencies in sliding windows of the input array. + + .. note:: + To compute percentage of elements in the top 'n' frequencies in entire array, use ``simba.mixins.statistics_mixin.Statistics.pct_in_top_n``. + + :param np.ndarray x: Input 1D array. + :param np.ndarray windows: Array of window sizes in seconds. + :param int n: Number of top frequencies. + :param float fps: Sampling frequency for time convesrion. + :return np.ndarray: 2D array of computed percentages of elements in the top 'n' frequencies for each sliding window. + + :example: + >>> x = np.random.randint(0, 10, (100000,)) + >>> results = sliding_pct_in_top_n(x=x, windows=np.array([1.0]), n=4, fps=10) + """ + + check_valid_array(data=x, source=f'{sliding_pct_in_top_n.__name__} x', accepted_ndims=(1,), accepted_dtypes=(np.float32, np.float64, np.int64, np.int32, int, float)) + check_valid_array(data=windows, source=f'{sliding_pct_in_top_n.__name__} windows', accepted_ndims=(1,), accepted_dtypes=(np.float32, np.float64, np.int64, np.int32, int, float)) + check_int(name=f'{sliding_pct_in_top_n.__name__} n', value=n, min_value=1) + check_float(name=f'{sliding_pct_in_top_n.__name__} fps', value=n, min_value=10e-6) + results = np.full((x.shape[0], windows.shape[0]), -1.0) + for i in range(windows.shape[0]): + W_s = int(windows[i] * fps) + for cnt, (l, r) in enumerate(zip(range(0, x.shape[0] + 1), range(W_s, x.shape[0] + 1))): + sample = x[l:r] + cnts = np.sort(np.unique(sample, return_counts=True)[1])[-n:] + results[int(r - 1), i] = np.sum(cnts) / sample.shape[0] + return results + + diff --git a/simba/sandbox/piotr_120324 2.py.zip b/simba/sandbox/piotr_120324 2.py.zip new file mode 100644 index 0000000000000000000000000000000000000000..adb07bfec9cc5ea6c1fcddf2b13af7f2395e56ba GIT binary patch literal 2243 zcmbW2c{CLK8pmhsCCjx%k`cyM7)!D>O0ohRKp` zY=tqDC5>!DkuGD2GG3Q+?tAWg?tRbu$NfI%cb?zxkKgm0^Zowufx}qYgaBNJbt}l% z9`J8TivR$801yC$^bZO|K-5&#)ijh*;kGtB08T1{ap(QvIqJb-+yEfkHV^c>?Pni&5E6TbBu(3#HTNs7IgZ$IW<1I@{fba;Q-v*;P@^x zaVhYt$Tl?e$E$LpTBISoDHU?BEe`;4789_ z6DvH1ll74^BZyK0e+_ZuyQ}b!^UFIb)j_p;5H9_9pG5L zhLVuNJG7}wzbR)KzPxS)9TjV~-L5+07aN_I<=MLd+TF}aJJ}~AJ4X^<_ZDs43kus( zic*1PNAkqZe%;x0zaMlT=w36!_Ar7O5x%>;05u789oGvB4G#>A;6?cF#BMH>B$MCs zJxPU<>MWOUay-BVBT(TZKyc4+vltN0``}F zV(y=Qr=&H=n2tX^wQfhhwO^jSOMP9OQ8f=KHW(A0=VIR|uFtqsX#vHTFt42M%7gAe#qdGEf-QbB~eVSv08du`r?}IW^Y=OoAjh z^1A5z5fw)U(r;=hS{lC1n;VaVxQ2F>MLU6#y)({zn~_qia?YVVn;iNgA$EtGa#L$n68x<Z&Ge-TM^h(H%gW&dS|Gc%=mc-9Jks!sQ-#9+(_qXSJvv(2Ef!UlRF?` zrG9NI(REJem&`NQbt%^Jf;@?qok6`qB^x=ZNm8@5b#z%`cs@B$TSMFWp7pW%fqqIUabQ{+jjVdY2=*usDv~hxUcHu>-HFFctdr$ zRq9;zV}g2Sr9h+aV|@a}?b2|fAzH6^o_qRN>6!Z6VKk+F%lkrOW$m1iCei#j$gGU~ zZTg+Z%Tb?N9b6d0smoF9mnY%o$};H_{yXxHc*ECT)6Mv>Yzk<{+N3s0p^7YV;-T~q z)?s&ubq2M$PR`t~%tYHUd)(|R&{je;R32lxcXhm>Sd|}LHBY_P5KRE&l19?5U6DKX zL4;D2#B%F>5-sDB6)viLOHT?l9oT`XtWHj02843JwEQzSh)B$fG3036ivmRBOz z*lcfi|5#E=(1fw2ZcjLJNjAK0F>=eeyezo~@HP0={-#z@jg~}N+Os@eAXW!NCA2=u z5j?Ro5@Uh2sOFv;XpmR2#(w+^@=csHn2ltgDzIK%Kv+3Kq!c1;`>NXaI=|jq!{7|- zV}3VnR)-2M(mbzuxB8kGdRxy(;7T-eoA3Hd_b;7#v8;KwAG@gS5u73%SA*DjCLVFy zXLgnvK!#19^(kqK7SHD>+HKI32hEjDy6j0_C8bmvPhgL0=C|Kn@sePX$s}YSv&&@P zWiqc^pZhG^@4Hh~j;B2s{(h5dTwmFLpNjjgT`~^iY7~FIF5+(~O~38V=UA@xW!fv{ zghU14+hBqXH&X=;X2OsgvDh;%NzD8%80+i*!cfq z%-u!U-OI(zW!=KuZk^9A0UphgF3Kv$FAh;vH3K3+PevYLfQ(*I^pnD!SI{yKW@5b3IM!S?xX&J0`c66k{a|pQwtdJQt6BAX{ zgtPSHFu`PjO*6GkYq7J?C=>PyowLl#B5ohN6fi4S)zccM2bRuwHc!4B=urJg>dl%f z?R$|Zx%Tm@a$D0Q(F5TXaZ!6wM?zkin2Z?sl#{NP(BhXeNA)9bIJx?4$M1I^V)@7s zI1I=l#QGo7|JlbQ(*L1S%+c-d4Eqc2|M&1eFFuOm50zqC52O44gMq`?5B~!IEQi%| K*uQcfz5WF;ZUBh@ literal 0 HcmV?d00001 diff --git a/simba/sandbox/piotr_120324 3.py.zip b/simba/sandbox/piotr_120324 3.py.zip new file mode 100644 index 0000000000000000000000000000000000000000..6d998e172ff244def35fd233894b2acce5fb2cf5 GIT binary patch literal 2391 zcmbuBS5On$7KTFyDN?0|E(A~ry{QP&LkT5xOehlKp-C?mLJfk1P$U9}CcTQ%AsoQa ztI|Pw6Hr=IKm{+%+%xygy=TtLU2FENyhuh-47aub6*;4(mjDb~r5|#6Iv` z6r=!w2};BrT<%k-)cmZUb?mOAXrfYNI;$5L4K(J$J`fe{Vwv)6O}Hsr6`{W$RryLY z?{)W$`pBt3k0`M`ZO(97djnRKm`c68;t5Js!`>8*pJB=6;NYG(Y=WE>^2}@NJntu8 z1zj~XgN+%NA9Q@Xqi8M>?Z7tNB>^c_`%;Z5e;*2zpxjH?j+Hs5@9_Hfnm4hijdQ~j>dCVUvF{I$YG z5mSX9eO<07M5t#wW|}=rmNkkbhQF?4FUZ1bghov>{|; zG+DcWg7EmGXZ@Xa9=#Uh=WL@NtJuX5P%IFrVH1#{{f)V9p=xKcZbo-tn4+c=hKWht z*RYcvsP-nk6ozl~BdY<3L7m;0`5Mw0d5K1C&5X){9DMTkhMSb6FYk^W|52=fQ&Tt_ ze~Buk2id`&O}LspRYi%`hz~(N*z35sl5_r)YP_W_Q&-;b0uoj0{Nm$Ax_6)kGgd3O zFxkD?5$6cJ^&0;)C++=>>1ubCz7i}Ls)(MJXRw{J^_wiLiY6p?5}DW#cX=A81Zh8X z1geJRmv*Z5Cf{36w7Q*1Yv|m~wydb)M4Hu!j}uF}3drjhUdR}USv9Quu3DTo?|#}u zR}o6DpItn1m!F15Nx2(=!}gEzHsf;T@^^{tg;~KgY)OK#CAtI(0XgS(y2Uy&5Vqf; zpu5+nf?u1jIg8aGje4n2^BW@aupyFaX^F1%KDq;mdl1k-ZQc6n3+0)B(e67v|4=0j z&7Snl22K)h?Xj9;?$%cZ7VLS%v>fSedepZpwz`z|4SS`T?Ewy|ASkV_6l}J! z$8qNR0%$L2S75)&L2wcLt1>HxkI9y9sn>!(EPKXHZl?K1$y2))Z@hP)fIh+V$c*D= znh|$m3@Irxo0)TVaMC1fF~CY8=k8#AsE2K((I%2}%+KFHfC zv0PnIwa}jf8p=tC>ytllw zHBepqVXQBUpc{tso@?OvqazgkI$Xj!UOc9%|bPwrlJbL$75pU zsbe|HbaTmuFz4kuTsOyo*bDUD)z#e7R0+@bu_s@BJJ*OCEF`z&XdwUEub2_DZ{jIl z1&OxFP=gTd2B|l;)-77GLv0vGU>ZXe5aFPwu|vlw&TJ2wS*LO{9`e1@EFyYp`EjPh zCuA-vqAnqxUpzJ_z#X3}! zSF=8S3T&NaExV84qH$LQ&mM95Y1k@rR&YM6jbs=Xiui4dt~HA%Xz#jB*mSOiO@Y<%w@4wi-CT$@k| z(YuTTiA5h90Wf5M$fZj4;0eddC>$T`$guND1a_tcqg;hSL8*3#}KweKJ1v?kg-V;gU@nQ|EbUlU$g`u@BO zh0xH3HSV#Jr~!ZrYC`{(r$*<5Xl9B!<6$JD|`MjHN4XpQ|}w7MWTT#yK7#J;hS z)&6BG3^bfPiHky9SVtn%$o9qsK{XnH0-i$ZphQePho#t5%ng0RK2k__cY z=G?*>kM*fa)r5a)UU42rN}#v3<+1hi0~`Eq&C_KA?ckY)?zH8Cp5i!xy%}xkwx(FF zUmV-KTsB;GnJzhS(@)z3B(DuM4Ly6~6;X)B8gp8b>;y>x*uR30& yk3XvbeR2CI!Tu(T{P*&|etcoYpH+Z{(g6MsW1tYKbL#;B^7HOGpATs-UjG7rPc!-e literal 0 HcmV?d00001 diff --git a/simba/sandbox/piotr_120324 4.py.zip b/simba/sandbox/piotr_120324 4.py.zip new file mode 100644 index 0000000000000000000000000000000000000000..0a557b71b82daf52c8a6519f18dae5e97a075bd3 GIT binary patch literal 2396 zcmbuBX*3jU8^>p2tdU5O=dld0U5$`bG(ySHjAf90m$5TrOEUHujC~8)#t1DWvJF!P zA&gL_XhvBIjWQvT9?p56^FHT&pYwcquj}0By6-R7ea`>?;rB|0c=q~@SFkPM z-z6&s02~5H1A=@4(IL+2ryv@qG*1LYBCP}f+>X+lp7DEUe_|r|0YJ`mAONtp1OOo0 z&cNqR3*8!Zo_(h)r;JPo#z-P{zmGr)*68w_m!MmG;QV(|9kbXUw z6IZ%5T;tu>^T@$CJ%KNza<$YDf6xeg>*BrX6s6(e4ofl{9B)=BG+G*^rf@z<^!u=T zO#(vIKC7Ijio_D-`YyW>7Lg_^qeN&8_~5yc6)b8P zr{I?^z?Y}B(CFwe)qMDNg0Le1+>kBz(lp2~9JMwca;t^j1?iB5`YNBQo3||3fi<_M zH= zB^XsSDp#f`Pm+c$D+G{LcO88WI@U)aPrTNL&Ye}TFz?^`xl9j9x;VGn%AL_ZGPd>Y zyA8X`zLVal(>5VnlZ>KSl z!2yB`G7+Y)9xv3~_o0DMKRtPVu;GPAe1sbX_LufdN^~0Zc>=d1!Y> zKiYxn@_ouPsr(dR5a$1#;-(y94P(du#3$vX{ovt1=A`J#W(t(1dpIuH(N&a8SrFcZ zHtY@FILhx0Lv&d)(GmRuRTUjOkiU!~P8^=_nwc={eoRgNHZ%{AP=}9@(2>BYG%eMX zB;}7SU?gnUk7S;Qmvx>O)P!h{xMXDvPDr$8m7l6)Q7P3W5!@m)>EwHt$Q)A2ZWP{I z=|HiJK?h>rK+rvw(QoY~!dLRRn0fjevHDxx@gSxUZ!HS@)Hf>_(*d;UZ1~2xD{0E2 zT}DTpzM$VHyCG!YAS?I&aMS3hNv2`CZ}&2N*3{5FavCR z1V5il`a%rSdJg+4xe;X^w;G3xB-MQyEQ$Hj z`j)q6@d3j*O5evR4wkCg6ftn|T{1=J5Y(G*TogZ@GjNqvC65Yb zJYob%T_>k*-^EG7D$)#lZr(M_Na6PnTP`Orb1k>T(+vISO{R<9$Hqg^=*TtyRaCoe zKDvYaCMU4l#FVIHM4n_~i6E^_AO;?? zR1{OOIY3P&2yK026Qrjoo7Vlz8)dc;QtPwlE<#Z;cKiW5Q$EqswO*M`wqTh%UrnfR z8Vi+Jan(xhbYh|`>EmQ~gN*=?`=ypyqp`3N#&)9{QB~`}Yvd0Mv{`9z>{PiFMn<*X zv|^M#YyUarI&!OiH6?IY`{odTZ~c#FIA7M`nRlN`S)=9LBlv@`3Y^c7Y51k@XA`@I zsdxA;t1}j;4|JT&zm^8vL}r-3c%rjt7G;(G^uGPqXFAhGASh8Td^Jo(1gCnEvXn%{ zJB#EGGU0DKEdp-6Bl*EpkhO{0SC4B`i_u(dHfizemZZl@QVZP$BJv6?ghB;N)s?mK zUF{G8{z4I21M?y~!DLXj{cYiJI7SgoU8$dHHdkWG1N!{GHD%F^540s_zpkW2Tc%ED zc4cSBD6e;dgCZ9blA2$n=Iz9c_aqh9=tsSI0ZpR&K$3X3MZ_N7WUMmWejbm->IN`` zb;4J*sD%^QHm&drzK-@B)kaQw8+uD4wRMfqj#2Fz1$Zr~8%i`kvl0Y)V(co>O~ww( zRf`i3UIu-5nFd>1dhwyR1u-es!wL0G8|&&W+#c|vx>Q~FQ1mDlJnV24&YYb{xUm{< zmLRZTw2q!k&7o2Jx@7`!w#h})`fgggg&Zwe!5o~5AO`gY6)k~Iyhe9_u?8XH2PHNF;=;?9I zhgqRYUGWliyn>Gy`Q@=5c(+9VU*c$FTf4Tx(Mwfdueoneh{w7>P!cZ48p_tk=x7Dm z@Lyb1>utPe+-ezO{TOvgZ~jo2kI_c+kzPKx&rUoVy)&Z%b1|NRB`KPuVPD*DP!T|% z_~F{zO(_J7o2PI--19pZ0I-iv#NR^IbPtoq_n_+RY^HAjx3oQZ!ug;0ivM4}y1R(F zd%L)~d@(b%{&L7V1rd87Q;b7FNJ1R~F#-BWmyIQ36`5kf!kn&H%rx-*0nLBCKm-H1 zxN7Hi9LUtTxDqRJ3H5#U%l8wxxzuc{`B;~`Z?vH+PbriPL_u+335Pm^)7TgSNdeFC z!9-Gp*(Rsh7UJLwF~*?Uv)~>5qiz%4O4ubsjf|F=p#@2=_Nlrd3WWZoFK?FEUxSlc zr5m2;YD*UTCAuUbW-De_TtpI=6PG`3f6n{J{36LtV_zF?o_^%3DC%Bb*jEGr10FcS z@t+9tR~`2w$e%^T?%)1Nu)hf;|GoULAMac7XA!YTZovOx3<3t)vmO9Au-CnN6U(#z F`U9^wOX2_k literal 0 HcmV?d00001 diff --git a/simba/sandbox/piotr_120324.py b/simba/sandbox/piotr_120324.py new file mode 100644 index 000000000..550b80c74 --- /dev/null +++ b/simba/sandbox/piotr_120324.py @@ -0,0 +1,98 @@ +import os.path +from copy import deepcopy +import numpy as np + +import pandas as pd + +from simba.utils.read_write import read_df, get_fn_ext, write_df, find_core_cnt +from simba.mixins.config_reader import ConfigReader +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.geometry_mixin import GeometryMixin +from simba.mixins.abstract_classes import AbstractFeatureExtraction +from simba.utils.errors import NoFilesFoundError +from simba.utils.printing import SimbaTimer, stdout_success +from simba.plotting.geometry_plotter import GeometryPlotter +import argparse + +WHITE = 'white' +BLACK = 'black' + +CORE_CNT = -1 +VISUALIZE = True +BUFFER = 20 +RECTANGLES = True + +class PiotrFeatureExtractor(ConfigReader, + FeatureExtractionMixin, + GeometryMixin, + AbstractFeatureExtraction): + + def __init__(self, + config_path: str): + + ConfigReader.__init__(self, config_path=config_path) + FeatureExtractionMixin.__init__(self) + GeometryMixin.__init__(self) + if len(self.outlier_corrected_paths) == 0: + raise NoFilesFoundError(msg=f'No files found in {self.outlier_corrected_dir}') + self.session_timer = SimbaTimer(start=True) + self.config_path = config_path + + def run(self): + print(f'Used core count: {CORE_CNT}') + for file_cnt, file_path in enumerate(self.outlier_corrected_paths): + video_timer = SimbaTimer(start=True) + _, video_name, _ = get_fn_ext(filepath=file_path) + print(f'Processing {video_name}...') + _, pixels_per_mm, _ = self.read_video_info(video_name=video_name) + data_df = read_df(file_path=file_path, file_type=self.file_type) + data_df = data_df.apply(pd.to_numeric, errors='coerce').fillna(0).replace(0, np.nan) + data_df = data_df.interpolate(method='nearest').bfill().ffill() + nan_cols = data_df.columns[data_df.isnull().all(0)] # FIND COLUMN NAMES THAT ARE ALL NaNs + #data_df = data_df.drop(nan_cols, axis=1) # DROP COLUMNS THAT ARE ALL NaNs FROM THE DATA + results = deepcopy(data_df) + save_path = os.path.join(self.features_dir, f'{video_name}.{self.file_type}') + white_animal_bp_names, black_animal_bp_names = self.animal_bp_dict[WHITE], self.animal_bp_dict[BLACK] + white_animal_cols, black_animal_cols = [], [] + for x, y in zip(white_animal_bp_names['X_bps'], white_animal_bp_names['Y_bps']): white_animal_cols.extend((x, y)) + for x, y in zip(black_animal_bp_names['X_bps'], black_animal_bp_names['Y_bps']): black_animal_cols.extend((x, y)) + black_animal_cols = [x for x in black_animal_cols if x not in nan_cols] # DROP COLUMN NAMES THAT ARE ALL NaNs FROM THE BLACK ANIMAL BODY-PART NAMES + white_animal_cols = [x for x in white_animal_cols if x not in nan_cols] # DROP COLUMN NAMES THAT ARE ALL NaNs FROM THE WHITE ANIMAL BODY-PART NAMES + white_animal_df, black_animal_df = data_df[white_animal_cols], data_df[black_animal_cols] + white_animal_df_arr = white_animal_df.values.reshape(len(white_animal_df), -1 , 2) + black_animal_df_arr = black_animal_df.values.reshape(len(black_animal_df), -1, 2) + white_animal_polygons = GeometryMixin().multiframe_bodyparts_to_polygon(data=white_animal_df_arr, pixels_per_mm=pixels_per_mm, parallel_offset=BUFFER, verbose=True, video_name=video_name, animal_name='white', core_cnt=CORE_CNT) + black_animal_polygons = GeometryMixin().multiframe_bodyparts_to_polygon(data=black_animal_df_arr, pixels_per_mm=pixels_per_mm, parallel_offset=BUFFER, verbose=True, video_name=video_name, animal_name='black', core_cnt=CORE_CNT) + if RECTANGLES: + white_animal_polygons = GeometryMixin().multiframe_minimum_rotated_rectangle(shapes=white_animal_polygons, video_name=video_name, animal_name='white', verbose=True, core_cnt=CORE_CNT) + black_animal_polygons = GeometryMixin().multiframe_minimum_rotated_rectangle(shapes=black_animal_polygons, video_name=video_name, animal_name='black', verbose=True, core_cnt=CORE_CNT) + results['polygon_pct_overlap'] = GeometryMixin().multiframe_compute_pct_shape_overlap(shape_1=white_animal_polygons, shape_2=black_animal_polygons, animal_names='black_white', video_name=video_name, verbose=True, core_cnt=CORE_CNT) + combined_list = [list(pair) for pair in list(zip(white_animal_polygons, black_animal_polygons))] + difference = GeometryMixin().multiframe_difference(shapes=combined_list, verbose=True, animal_names='white_black', video_name=video_name, core_cnt=CORE_CNT) + results['difference_area'] = GeometryMixin().multiframe_area(shapes=difference, pixels_per_mm=pixels_per_mm, verbose=True, video_name=video_name, core_cnt=CORE_CNT) + self.save(data=results, save_path=save_path) + if VISUALIZE: + geometry_plotter = GeometryPlotter(config_path=self.config_path, geometries=[white_animal_polygons, black_animal_polygons], video_name=video_name, core_cnt=CORE_CNT) + geometry_plotter.run() + video_timer.stop_timer() + stdout_success(msg=f'{video_name} complete!', elapsed_time=video_timer.elapsed_time_str) + + self.session_timer.stop_timer() + stdout_success(msg=f'{len(self.outlier_corrected_paths)} data files saved in {self.features_dir}') + + def save(self, + data: pd.DataFrame, + save_path: str): + + write_df(df=data, file_type=self.file_type, save_path=save_path) + +# if __name__ == "__main__": +# parser = argparse.ArgumentParser(description='SimBA Custom Feature Extractor') +# parser.add_argument('--config_path', type=str, help='SimBA project config path') +# args = parser.parse_args() +# feature_extractor = PiotrFeatureExtractor(config_path=args.config_path) +# feature_extractor.run() + + +feature_extractor = PiotrFeatureExtractor(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/piotr/project_folder/project_config.ini') +feature_extractor.run() \ No newline at end of file diff --git a/simba/sandbox/piotr_120324.py.zip b/simba/sandbox/piotr_120324.py.zip new file mode 100644 index 0000000000000000000000000000000000000000..274e58550b45b2252c88287d6dbb20a98b6afebc GIT binary patch literal 2268 zcmbW3c{CJiAIE30OoW<2C~Gs4bwareS(7nj9a)D)Xoj+fK}?BELKw;#S;l&m-8CqC zir125G(?tb-x8r*&Ux>7?|I*Q?jP^(cb@Y+&mYh4_nhzdkI&B(17_g`Kn|0f<6#Z> zcSQ3806YLOfEU5j&&TnCB2r0FS>7uMYsm#*S6p9nk$ko8!gIL%133XemMtIva99KY zpj+FSOkd)?*ZZs2ye{;71ZGd!TqOLV+(NPhKS{T91Ktx;gFZ7V!cW;*)#yM=4nMb{ z4fYYeJ+f9m#f8dlm`m&Ca${VRuNB?AhPSy4Gn0d-r|c~MvZdEWi6DRu-nABfbAQ@v zQ>!R3kUb@v?MOv7XqPLYh2mLNl;xzt6oIc$OCHjch<0;tp;P&fQy!l)`yH8lMVg=2 ztd@xEU7CK9k@fYC0+$+%JY{amxGN8T8HCch>kVn)0EF zHQ&3va<)DjRXBHbsl#>J5M3qNkZ?-k%dSotW%|tZc9uNo!;@_};@@N3Yb55R#TalO z7aFeZ!f-vt&b40*TTII!a4~5l2jc(&%vC^+s%I2`k>#g7Rnd89TrH(v76o!BO9_G* z8p#>vD+i=cgw;1_oIB}l!tt~sMx?H1z96w83uVz!_9guX$@f}s*_|7tgU;{E8=K6o zg6^jHHLKEEUs8a#_dT##eG&MmNR+#a0LIayLrfY;gd#s}8n^a_I!*=5ZjwD&`; zg+>pjc*xd5b^nM%{A(fS5vN_PoyxjtLQ7IKGLubN*cW(Zu6Qfe7E9bMmE`?)7uo{5 zax9v-++;J}m=*a647%j_4$eSPyEZSw55MNq_I!%5RAoMPe==_+WA@maUCx724yCgS z#>xK635G6mlq+g9eJgCatb&!3VA8!}p1qY|-eV~fL?`#c7y0L{d%*6s-_+Ml#oV7% z-3(pOA6+c6zhOfnOyXLQepiRiJ0YYt(%lhDovQEncUH$yMusV?7^sOVc-$Ne5Z0-I+bo}R{(;a7Gc z>3r``zt9dTQG9u_^ztTMsJh9d;_7&7Q0Q$)lYv=*SLoLTX;X_Dx7n0N+^^u5VGdJy z;n=NvI9mzf_@MTwrl^4@DvWu(*4c3pIM18uR1GtcOTIBKm!=~vn&(AMtnMW}J7EG0 zYCpbnHEBF|X?sN!IV2zNX?;*(yY1#|rUUW7!84;gLc(OW(J=p7PixP*xXST%`Z?+G zNzx!Qyg`(}`c~QfY^GgEVs6rTS-uCOYfoKdVu!EMZz8tWqy#Pz529cin(xKu^op8A{kXRn$?hDZc8MdrF_K0XXeNCx zY=PX}ikW85^HbAk^+MWQGer4|HLTTkW-sq}mkN#s4R729_uKcqoDCeo_Z$?hMtJ^I z36)!A?!K+cqvLz@ua^hZYDO)ZHNGoMh@5;CU2sl@UogE`AXqK6H6#tTH#?F&g<{rQ z#G&8UsLmOa2HD5xcAGhfN>`|uqE$W57&Y33TSf5>x@3pzM&5A^#x#}qJuvaLaH(G@ zzXOWEIN{kZIGy+k!-nRMQX?LDg6Ny0lm1$-m( z*>a;KFA2|$T^gf7y@UY+mv)B?ZXw8wx1(iJ%^FWS<>q4(z8dGbMKxnlpi^Z<@(koJyPcG;+9LeLaEb^qMkDbi=*AawxqmssMydG$kemd$Id0g5`W{&V+p4^@I$8aq5ghRTR>&Z1#i) z=KOlqT5|}sGHoJNN`)O-Xx}4!$tKVqkzoK9ZQ`z`!0uHw_gx%>^Yi?-{Tbs!8l? z^Wz8I&XX7Pl;WzH-LoRD^&>^y&B)Q3cIsqqM?tjc>ZqQ4V_lTM0pG%D0c!!$(vddokPhq+sr8W8M~awYfFNG*e@1}6>UcB){296AqtibL_BR8; le;5C2<0C8nj9l`^L+k#3V@xruhwlLZ&|$V6?m)=V?GK%m@~Qv; literal 0 HcmV?d00001 diff --git a/simba/sandbox/platea_video.py b/simba/sandbox/platea_video.py new file mode 100644 index 000000000..a3aa3008b --- /dev/null +++ b/simba/sandbox/platea_video.py @@ -0,0 +1,125 @@ +import cv2 +import numpy as np +import pandas as pd +from simba.utils.read_write import read_frm_of_video +from simba.mixins.geometry_mixin import GeometryMixin +from simba.plotting.geometry_plotter import GeometryPlotter +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.circular_statistics import CircularStatisticsMixin +from simba.mixins.config_reader import ConfigReader + + +CONFIG_PATH = '/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini' +config = ConfigReader(config_path=CONFIG_PATH, create_logger=False) +config.read_roi_data() + +response_windows = GeometryMixin.simba_roi_to_geometries(rectangles_df=config.rectangles_df)[0]['3A_Mouse_5-choice_MouseTouchBasic_s9_a6_grayscale'] + +HEADERS = ['NOSE_X', 'NOSE_Y', 'NOSE_P', 'EAR_LEFT_X', 'EAR_LEFT_Y', 'EAR_LEFT_P', 'EAR_RIGHT_X', 'EAR_RIGHT_Y', 'EAR_RIGHT_P', 'LAT_LEFT_X', 'LAT_LEFT_Y', 'LAT_LEFT_P', 'LAT_RIGHT_X', 'LAT_RIGHT_Y', 'LAT_RIGHT_P', 'CENTER_X', 'CENTER_Y', 'CENTER_P', 'TAIL_BASE_X', 'TAIL_BASE_Y', 'TAIL_BASE_P'] +X_HEADERS = [x for x in HEADERS if x.endswith('_X')] +Y_HEADERS = [x for x in HEADERS if x.endswith('_Y')] +P_HEADERS = [x for x in HEADERS if x.endswith('_P')] +BP_HEADERS = [x for x in HEADERS if x not in P_HEADERS] + + +VIDEO_PATH = '/Users/simon/Desktop/envs/platea_data/3A_Mouse_5-choice_MouseTouchBasic_s9_a6_grayscale.mp4' +DATA_PATH = '/Users/simon/Desktop/envs/platea_data/3A_Mouse_5-choice_MouseTouchBasic_s9_a6_grayscale_new.mp4.npy' +data = pd.DataFrame(np.load(DATA_PATH).reshape(37420, 21), columns=HEADERS).head(200) + +bp_df = data[BP_HEADERS] +bp_df.to_csv('/Users/simon/Desktop/envs/simba/troubleshooting/platea/project_folder/csv/outlier_corrected_movement_location/Video_1.csv') +animal_polygons = GeometryMixin().multiframe_bodyparts_to_polygon(data=bp_df.values.reshape(-1, 7, 2), parallel_offset=10) + +GRID_SIZE = (5, 5) +grid, aspect_ratio = GeometryMixin().bucket_img_into_grid_square(img_size=(1280, 720), bucket_grid_size_mm=10, px_per_mm=10) + +grid_lsts = [] +for k, v in grid.items(): + x = [] + for i in range(200): + x.append(v) + grid_lsts.append(x) + +nose_arr = data[['NOSE_X', 'NOSE_Y']].values.astype(np.int64) +left_ear_arr = data[['EAR_LEFT_X', 'EAR_LEFT_Y']].values.astype(np.int64) +right_ear_arr = data[['EAR_RIGHT_X', 'EAR_RIGHT_Y']].values.astype(np.int64) +center_arr = data[['CENTER_X', 'CENTER_Y']].values.astype(np.int64) +midpoints = FeatureExtractionMixin.find_midpoints(bp_1=left_ear_arr, bp_2=right_ear_arr, percentile=0.5) + +circles = GeometryMixin().multiframe_bodyparts_to_circle(data=center_arr.reshape(-1, 1, 2), parallel_offset=200, pixels_per_mm=1) +circles = [x for xs in circles for x in xs] + +response_windows_list = [] +for i in response_windows.values(): + x = [] + for j in range(200): + x.append(i) + response_windows_list.append(x) + + + +geometries = [] +# for i in range(len(grid_lsts)): +# geometries.append(grid_lsts[i]) + +for i in range(len(response_windows_list)): + geometries.append(response_windows_list[i]) + +geometries.append(circles) +geometries.append(animal_polygons) + +geometry_plotter = GeometryPlotter(config_path=CONFIG_PATH, + geometries=geometries, + video_name='3A_Mouse_5-choice_MouseTouchBasic_s9_a6_grayscale', + thickness=10, + palette='spring') +geometry_plotter.run() + +# +# +# img = read_frm_of_video(video_path=VIDEO_PATH, frame_index=100) +# img_shapes = grid + response_windows_lst[100] + circles[100] +# img_shapes.append(animal_polygons[100]) +# img = GeometryMixin().view_shapes(shapes=img_shapes, bg_img=img, color_palette='set') +# + + + + + + + + +# lines_df = {} +# for k, v in response_windows.items(): +# target = np.array(v.centroid).astype(np.int64) +# line = FeatureExtractionMixin().jitted_line_crosses_to_static_targets(left_ear_array=left_ear_arr, +# right_ear_array=right_ear_arr, +# nose_array=nose_arr, +# target_array=target) +# target_x = np.full((line.shape[0], 1), target[0]) +# target_y = np.full((line.shape[0], 1), target[1]) +# line = np.hstack([line, target_x, target_y]) +# lines_df[k] = line + +# lines_lst = [] +# for i in range(lines_df['Window_1'].shape[0]): +# lines_lst.append([]) +# +# for k, v in lines_df.items(): +# for i in range(v.shape[0]): +# if v[i][1] != -1.0: +# line = np.array([[v[i][1], v[i][2]], [v[i][-2], v[i][-1]]]) +# line_str = GeometryMixin.to_linestring(data=line) +# lines_lst[i].append(line_str) +# else: +# line = np.array([[0, 0], [0, 0]]) +# line_str = GeometryMixin.to_linestring(data=line) +# lines_lst[i].append(line_str) + +#direction = CircularStatisticsMixin.direction_two_bps(anterior_loc=nose_arr, posterior_loc=midpoints) + + + +# cv2.imshow('sdfsdf', img) +# cv2.waitKey(5000) diff --git a/simba/sandbox/plotly_gantt.py b/simba/sandbox/plotly_gantt.py new file mode 100644 index 000000000..4db0e5ab5 --- /dev/null +++ b/simba/sandbox/plotly_gantt.py @@ -0,0 +1,72 @@ + +import pandas as pd +import numpy as np +import plotly.express as px +import PIL +import io +import cv2 +from typing import Optional + + +def gantt_plotly(bouts_df: pd.DataFrame, + width: Optional[int] = 640, + height: Optional[int] = 480, + bg_clr: Optional[str] = 'white', + title: Optional[str] = None, + font_size: Optional[int] = 12, + y_lbl: Optional[str] = 'Event', + x_lbl: Optional[str] = 'Session time (s)', + show_grid: Optional[bool] = False, + x_length: Optional[int] = None): + + last_bout_time = bouts_df['Start_time'].max() + bouts_df['Bout_time'].max() + if x_length is not None: + last_bout_time = max(x_length, last_bout_time) + + fig = px.bar(bouts_df, + base="Start_time", + x="Bout_time", + y="Event", + color=bouts_df.Event.astype(str), + orientation="h") + + fig.update_layout( + width=width, + height=height, + title=title, + yaxis_type="category", + showlegend=False, + ) + + fig.update_layout( + width=width, + height=height, + title=title, + xaxis=dict( + title=x_lbl, + tickfont=dict(size=font_size), + showgrid=show_grid, + range=[0, last_bout_time] + ), + yaxis=dict( + title=y_lbl, + tickfont=dict(size=font_size), + showgrid=show_grid, + ), + showlegend=False, + ) + + if bg_clr is not None: + fig.update_layout(plot_bgcolor=bg_clr) + img_bytes = fig.to_image(format="png") + img = PIL.Image.open(io.BytesIO(img_bytes)) + fig = None + return np.array(img).astype(np.uint8) + + +bouts_df = pd.read_csv('/Users/simon/Desktop/envs/simba/simba/simba/sandbox/bouts_df') +img = gantt_plotly(bouts_df=bouts_df) +cv2.imshow('img', img) +cv2.waitKey(5000) + + diff --git a/simba/sandbox/plug_bouts.py b/simba/sandbox/plug_bouts.py new file mode 100644 index 000000000..78bfb6524 --- /dev/null +++ b/simba/sandbox/plug_bouts.py @@ -0,0 +1,181 @@ +import time + +import pandas as pd +import numpy as np +from simba.utils.checks import check_valid_dataframe, check_float, check_int, check_instance, check_if_df_field_is_boolean +from simba.utils.data import detect_bouts + +def plug_holes_shortest_bout(data_df: pd.DataFrame, + clf_name: str, + fps: float, + shortest_bout: int) -> pd.DataFrame: + + """ + Removes behavior "bouts" that are shorter than the minimum user-specified length within a dataframe. + + .. note:: + In the initial step the function looks for behavior "interuptions" that are the length of the ``shortest_bout`` or shorter. + I.e., these are ``0`` sequences that are the length of the ``shortest_bout`` or shorter with trailing **and** leading `1`s. + These interuptions are filled with `1`s. Next, the behavioral bouts shorter than the `shortest_bout` are removed. This operations are perfomed as it helps in preserving longer sequences of the desired behavior, + ensuring they aren't fragmented by brief interruptions. + + :param pd.DataFrame data_df: Pandas Dataframe with classifier prediction data. + :param str clf_name: Name of the classifier field of list of names of classifier fields + :param int fps: The fps of the input video. + :param int shortest_bout: The shortest valid behavior boat in milliseconds. + :return pd.DataFrame data_df: Dataframe where behavior bouts with invalid lengths have been removed (< shortest_bout) + + :example: + >>> data_df = pd.DataFrame(data=[1, 0, 1, 1, 1], columns=['target']) + >>> plug_holes_shortest_bout(data_df=data_df, clf_name='target', fps=10, shortest_bout=2000) + >>> target + >>> 0 1 + >>> 1 1 + >>> 2 1 + >>> 3 1 + >>> 4 1 + """ + + print(data_df) + check_int(name=f'{plug_holes_shortest_bout.__name__} shortest_bout', value=shortest_bout, min_value=0) + check_float(name=f'{plug_holes_shortest_bout.__name__} fps', value=fps, min_value=10e-6) + shortest_bout_frms, shortest_bout_s = int(fps * (shortest_bout / 1000)), (shortest_bout / 1000) + if shortest_bout_frms <= 1: + return data_df + check_instance(source=plug_holes_shortest_bout.__name__, instance=clf_name, accepted_types=(str,)) + check_valid_dataframe(df=data_df, source=f'{plug_holes_shortest_bout.__name__} data_df', required_fields=[clf_name]) + check_if_df_field_is_boolean(df=data_df, field=clf_name, bool_values=(0, 1)) + + data = data_df[clf_name].to_frame() + data[f'{clf_name}_inverted'] = data[clf_name].apply(lambda x: ~x + 2) + clf_inverted_bouts = detect_bouts(data_df=data, target_lst=[f'{clf_name}_inverted'], fps=fps) + clf_inverted_bouts = clf_inverted_bouts[clf_inverted_bouts['Bout_time'] < shortest_bout_s] + if len(clf_inverted_bouts) > 0: + below_min_inverted = [] + for i, j in zip(clf_inverted_bouts['Start_frame'].values, clf_inverted_bouts['End_frame'].values): + below_min_inverted.extend(np.arange(i, j+1)) + data.loc[below_min_inverted, clf_name] = 1 + data_df[clf_name] = data[clf_name] + + clf_bouts = detect_bouts(data_df=data_df, target_lst=[clf_name], fps=fps) + below_min_bouts = clf_bouts[clf_bouts['Bout_time'] <= shortest_bout_s] + if len(below_min_bouts) == 0: + return data_df + + result_clf, below_min_frms = data_df[clf_name].values, [] + for i, j in zip(below_min_bouts['Start_frame'].values, below_min_bouts['End_frame'].values): + below_min_frms.extend(np.arange(i, j+1)) + result_clf[below_min_frms] = 0 + data_df[clf_name] = result_clf + return data_df + +def plug_holes_shortest_bout_old(data_df: pd.DataFrame, clf_name: str, fps: int, shortest_bout: int) -> pd.DataFrame: + """ + Removes behavior "bouts" that are shorter than the minimum user-specified length within a dataframe. + + :param pd.DataFrame data_df: Pandas Dataframe with classifier prediction data. + :param str clf_name: Name of the classifier field. + :param int fps: The fps of the input video. + :param int shortest_bout: The shortest valid behavior boat in milliseconds. + :return pd.DataFrame data_df: Dataframe where behavior bouts with invalid lengths have been removed (< shortest_bout) + + :example: + >>> data_df = pd.DataFrame(data=[1, 0, 1, 1, 1], columns=['target']) + >>> plug_holes_shortest_bout(data_df=data_df, clf_name='target', fps=10, shortest_bout=2000) + >>> target + >>> 0 1 + >>> 1 1 + >>> 2 1 + >>> 3 1 + >>> 4 1 + """ + + frames_to_plug = int(int(fps) * int(shortest_bout) / 1000) + frames_to_plug_lst = list(range(1, frames_to_plug + 1)) + frames_to_plug_lst.reverse() + patternListofLists, negPatternListofList = [], [] + for k in frames_to_plug_lst: + zerosInList, oneInlist = [0] * k, [1] * k + currList = [1] + currList.extend(zerosInList) + currList.extend([1]) + currListNeg = [0] + currListNeg.extend(oneInlist) + currListNeg.extend([0]) + patternListofLists.append(currList) + negPatternListofList.append(currListNeg) + fill_patterns = np.asarray(patternListofLists) + remove_patterns = np.asarray(negPatternListofList) + + for currPattern in fill_patterns: + n_obs = len(currPattern) + data_df["rolling_match"] = ( + data_df[clf_name] + .rolling(window=n_obs, min_periods=n_obs) + .apply(lambda x: (x == currPattern).all()) + .mask(lambda x: x == 0) + .bfill(limit=n_obs - 1) + .fillna(0) + .astype(bool) + ) + data_df.loc[data_df["rolling_match"] == True, clf_name] = 1 + data_df = data_df.drop(["rolling_match"], axis=1) + + print(remove_patterns) + for currPattern in remove_patterns: + n_obs = len(currPattern) + data_df["rolling_match"] = ( + data_df[clf_name] + .rolling(window=n_obs, min_periods=n_obs) + .apply(lambda x: (x == currPattern).all()) + .mask(lambda x: x == 0) + .bfill(limit=n_obs - 1) + .fillna(0) + .astype(bool) + ) + data_df.loc[data_df["rolling_match"] == True, clf_name] = 0 + data_df = data_df.drop(["rolling_match"], axis=1) + + return data_df + + +# d = pd.DataFrame(data=[1, 0, 1, 1, 1], columns=['target']) +# results_old = plug_holes_shortest_bout_old(data_df=d, clf_name='target', fps=10, shortest_bout=2000) +#print(results_old) +#results_new = plug_holes_shortest_bout(data_df=d, clf_name='target', fps=10, shortest_bout=2000) + + + +#pd.testing.assert_frame_equal(results, pd.DataFrame(data=[1, 1, 1, 1, 1], columns=['target'])) + + +# pd.testing.assert_frame_equal(results, pd.DataFrame(data=[1, 1, 1, 1, 1], columns=['target'])) +# +# +# +# data = pd.read_csv() +# +# data = np.random.randint(0, 2, (100000,)) +# data_df = pd.DataFrame(data=data, columns=['target']) +# +# start = time.time() +# df_1 = plug_holes_shortest_bout(data_df=data_df, clf_name='target', fps=16, shortest_bout=10000) +# new_time = time.time() - start +# start = time.time() +# df_2 = plug_holes_shortest_bout_old(data_df=data_df, clf_name='target', fps=16, shortest_bout=10000) +# old_time = time.time() - start +# +# out = pd.DataFrame() +# out['original'] = data +# out['new'] = df_1['target'] +# out['old'] = df_2['target'] +# out['diff'] = out['new'] - out['old'] +# +# +# out.to_csv('test.csv') +# +# +# +# out['diff'].sum() + +#assert data['new'] == data['old'] \ No newline at end of file diff --git a/simba/sandbox/profiler.py b/simba/sandbox/profiler.py new file mode 100644 index 000000000..e9fc30674 --- /dev/null +++ b/simba/sandbox/profiler.py @@ -0,0 +1,9 @@ +import cProfile +import App + + +profiler = cProfile.Profile() +profiler.enable() +App +profiler.disable() +profiler.print_stats() \ No newline at end of file diff --git a/simba/sandbox/px_to_mm.py b/simba/sandbox/px_to_mm.py new file mode 100644 index 000000000..1c23b882a --- /dev/null +++ b/simba/sandbox/px_to_mm.py @@ -0,0 +1,146 @@ +__author__ = "Simon Nilsson" + +import os +import cv2 +import numpy as np +from typing import Union +from simba.mixins.plotting_mixin import PlottingMixin +from simba.utils.checks import check_file_exist_and_readable, check_float +from simba.utils.read_write import get_video_meta_data, read_frm_of_video, get_fn_ext +from simba.utils.enums import TextOptions + +PIXEL_SENSITIVITY = 20 +DRAW_COLOR = (144, 0, 255) + +class GetPixelsPerMillimeterInterface(): + """ + Graphical interface to compute how many pixels represents a metric millimeter. + + .. video:: _static/img/vertical_concat.webm + :width: 800 + :autoplay: + :loop: + + :param Union[str, os.PathLike] video_path: Path to a video file on disk. + :param float known_metric_mm: Known millimeter distance to get the pixels conversion factor for. + :returns float: The number of pixels per metric millimeter. + + :example: + >>> runner = GetPixelsPerMillimeterInterface(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/videos/Together_1.avi', known_metric_mm=140) + >>> runner.run() + """ + + + def __init__(self, + video_path: Union[str, os.PathLike], + known_metric_mm: float): + + check_file_exist_and_readable(file_path=video_path) + self.video_meta_data = get_video_meta_data(video_path=video_path) + check_float(name='distance', value=known_metric_mm, min_value=1) + self.video_path, self.known_metric_mm = video_path, known_metric_mm + self.frame = read_frm_of_video(video_path=video_path, frame_index=0) + self.video_dir, self.video_name, _ = get_fn_ext(filepath=self.video_path) + self.font_scale, self.spacing_x, self.spacing_y = PlottingMixin().get_optimal_font_scales(text='"Select coordinates: double left mouse click at two locations. Press ESC when done"', accepted_px_width=int(self.video_meta_data['width']), accepted_px_height=int(self.video_meta_data['height'] * 0.3)) + self.circle_scale = PlottingMixin().get_optimal_circle_size(frame_size=(int(self.video_meta_data['width']), int(self.video_meta_data['height'])), circle_frame_ratio=70) + self.original_frm = self.frame.copy() + self.overlay_frm = self.frame.copy() + self.ix, self.iy = -1, -1 + self.cord_results = [] + self.cord_status = False + self.move_status = False + self.insert_status = False + self.change_loop = False + self.coord_change = [] + self.new_cord_lst = [] + + def _draw_circle(self, event, x, y, flags, param): + if (event == cv2.EVENT_LBUTTONDBLCLK) and (len(self.cord_results) < 4): + cv2.circle(self.overlay_frm, (x, y), self.circle_scale, DRAW_COLOR, -1) + self.cord_results.append(x) + self.cord_results.append(y) + if len(self.cord_results) == 4: + self.cord_status = True + cv2.line(self.overlay_frm, (self.cord_results[0], self.cord_results[1]), (self.cord_results[2], self.cord_results[3]), DRAW_COLOR, 6) + + def _select_cord_to_change(self, event, x, y, flags, param): + if event == cv2.EVENT_LBUTTONDBLCLK: + if np.sqrt((x - self.cord_results[0]) ** 2 + (y - self.cord_results[1]) ** 2) <= PIXEL_SENSITIVITY: + self.coord_change = [1, self.cord_results[0], self.cord_results[1]] + self.move_status = True + elif np.sqrt((x - self.cord_results[2]) ** 2 + (y - self.cord_results[3]) ** 2) <= PIXEL_SENSITIVITY: + self.coord_change = [2, self.cord_results[2], self.cord_results[3]] + self.move_status = True + + def _select_new_dot_location(self, event, x, y, flags, param): + if event == cv2.EVENT_LBUTTONDBLCLK: + self.new_cord_lst.append(x) + self.new_cord_lst.append(y) + self.insert_status = True + + def run(self): + cv2.namedWindow("Select coordinates: double left mouse click at two locations. Press ESC when done", cv2.WINDOW_NORMAL) + while 1: + if self.cord_status == False and (self.move_status == False) and (self.insert_status == False): + cv2.setMouseCallback("Select coordinates: double left mouse click at two locations. Press ESC when done", self._draw_circle) + cv2.imshow("Select coordinates: double left mouse click at two locations. Press ESC when done", self.overlay_frm) + k = cv2.waitKey(20) & 0xFF + if k == 27: + break + if (self.cord_status == True) and (self.move_status == False) and (self.insert_status == False): + if self.change_loop == True: + self.overlay_frm = self.original_frm.copy() + cv2.circle(self.overlay_frm, (self.cord_results[0], self.cord_results[1]), self.circle_scale, DRAW_COLOR, -1) + cv2.circle(self.overlay_frm, (self.cord_results[2], self.cord_results[3]), self.circle_scale, DRAW_COLOR, -1) + cv2.line(self.overlay_frm, (self.cord_results[0], self.cord_results[1]), (self.cord_results[2], self.cord_results[3]), DRAW_COLOR, int(self.circle_scale / 3)) + cv2.putText(self.overlay_frm, "Click on circle to move", (TextOptions.BORDER_BUFFER_X.value, 50), cv2.FONT_HERSHEY_TRIPLEX, self.font_scale, DRAW_COLOR, 2) + cv2.putText(self.overlay_frm, "Press ESC to save and exit", (TextOptions.BORDER_BUFFER_X.value, 50 + self.spacing_y), cv2.FONT_HERSHEY_TRIPLEX, self.font_scale, DRAW_COLOR, 2) + cv2.imshow("Select coordinates: double left mouse click at two locations. Press ESC when done", self.overlay_frm) + cv2.setMouseCallback("Select coordinates: double left mouse click at two locations. Press ESC when done", self._select_cord_to_change) + + if (self.move_status == True) and (self.insert_status == False): + if self.change_loop == True: + self.frame = self.original_frm.copy() + self.change_loop = False + if self.coord_change[0] == 1: + cv2.circle(self.frame, (self.cord_results[2], self.cord_results[3]), self.circle_scale, DRAW_COLOR, -1) + if self.coord_change[0] == 2: + cv2.circle(self.frame, (self.cord_results[0], self.cord_results[1]), self.circle_scale, DRAW_COLOR, -1) + cv2.imshow("Select coordinates: double left mouse click at two locations. Press ESC when done", self.frame) + cv2.putText(self.frame, "Click on new circle location", (TextOptions.BORDER_BUFFER_X.value, 50), cv2.FONT_HERSHEY_TRIPLEX, self.font_scale, DRAW_COLOR, 2) + cv2.setMouseCallback("Select coordinates: double left mouse click at two locations. Press ESC when done", self._select_new_dot_location) + + if self.insert_status == True: + if self.coord_change[0] == 1: + cv2.circle(self.frame, (self.cord_results[2], self.cord_results[3]), self.circle_scale, DRAW_COLOR, -1) + cv2.circle(self.frame, (self.new_cord_lst[-2], self.new_cord_lst[-1]), self.circle_scale, DRAW_COLOR, -1) + cv2.line(self.frame, (self.cord_results[2], self.cord_results[3]), (self.new_cord_lst[-2], self.new_cord_lst[-1]), DRAW_COLOR, int(self.circle_scale / 3)) + self.cord_results = [self.new_cord_lst[-2], self.new_cord_lst[-1], self.cord_results[2], self.cord_results[3]] + self.cord_status = True + self.move_status = False + self.insert_status = False + self.change_loop = True + if self.coord_change[0] == 2: + cv2.circle(self.frame, (self.cord_results[0], self.cord_results[1]), self.circle_scale, DRAW_COLOR, -1) + cv2.circle(self.frame, (self.new_cord_lst[-2], self.new_cord_lst[-1]), self.circle_scale, DRAW_COLOR, -1) + cv2.line(self.frame, (self.cord_results[0], self.cord_results[1]), (self.new_cord_lst[-2], self.new_cord_lst[-1]), DRAW_COLOR, int(self.circle_scale / 3)) + self.cord_results = [self.cord_results[0], self.cord_results[1], self.new_cord_lst[-2], self.new_cord_lst[-1]] + self.cord_status = True + self.move_status = False + self.insert_status = False + self.change_loop = True + cv2.imshow("Select coordinates: double left mouse click at two locations. Press ESC when done", self.frame) + k = cv2.waitKey(20) & 0xFF + if k == 27: + break + + self.cord_status = False + self.move_status = False + self.insert_status = False + self.change_loop = False + cv2.destroyAllWindows() + euclidean_px_dist = np.sqrt((self.cord_results[0] - self.cord_results[2]) ** 2 + (self.cord_results[1] - self.cord_results[3]) ** 2) + self.ppm = euclidean_px_dist / self.known_metric_mm + +# runner = GetPixelsPerMillimeterInterface(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/videos/Together_1.avi', known_metric_mm=140) +# runner.run() \ No newline at end of file diff --git a/simba/sandbox/pypi_sizes.py b/simba/sandbox/pypi_sizes.py new file mode 100644 index 000000000..8e496bb5c --- /dev/null +++ b/simba/sandbox/pypi_sizes.py @@ -0,0 +1,27 @@ +import requests + +def get_package_sizes(package_name): + # Fetch package information from PyPI + url = f'https://pypi.org/pypi/{package_name}/json' + response = requests.get(url) + if response.status_code != 200: + print(f"Error fetching package data: {response.status_code}") + return + + package_data = response.json() + releases = package_data['releases'] + + # Iterate through all releases and print the size of each distribution file + results = {} + + for version, release_info in releases.items(): + for file_info in release_info: + size_kb = file_info['size'] / 1024 # Convert bytes to kilobytes + results[version] = size_kb + #print(f"Version: {version}, Filename: {file_info['filename']}, Size: {size_kb:.2f} KB") + return results +# Example usage +package_name = 'simba-uw-tf-dev' # Replace with the package name you want to check +r = get_package_sizes(package_name) + +r = {k: v for k, v in sorted(r.items(), key=lambda item: item[1])} \ No newline at end of file diff --git a/simba/sandbox/querter_circle.py b/simba/sandbox/querter_circle.py new file mode 100644 index 000000000..2b1e68132 --- /dev/null +++ b/simba/sandbox/querter_circle.py @@ -0,0 +1,87 @@ +import cv2 +import numpy as np + +# Initial values +center = None +radius = 0 +drawing = False +img = np.zeros((512, 512, 3), np.uint8) +overlay = img.copy() + + +def draw_circle(event, x, y, flags, param): + global center, radius, drawing, img, overlay + + if event == cv2.EVENT_LBUTTONDOWN: + center = (x, y) + drawing = True + + elif event == cv2.EVENT_MOUSEMOVE: + if drawing: + overlay = img.copy() + radius = int(((x - center[0]) ** 2 + (y - center[1]) ** 2) ** 0.5) + angle1, angle2 = 0, 0 + if x >= center[0] and y <= center[1]: # Top-right quadrant + angle1, angle2 = 0, 90 + # Draw the straight lines + cv2.line(overlay, center, (center[0] + radius, center[1]), (0, 255, 0), 2) + cv2.line(overlay, center, (center[0], center[1] - radius), (0, 255, 0), 2) + elif x <= center[0] and y <= center[1]: # Top-left quadrant + angle1, angle2 = 90, 180 + # Draw the straight lines + cv2.line(overlay, center, (center[0] - radius, center[1]), (0, 255, 0), 2) + cv2.line(overlay, center, (center[0], center[1] - radius), (0, 255, 0), 2) + elif x <= center[0] and y >= center[1]: # Bottom-left quadrant + angle1, angle2 = 180, 270 + # Draw the straight lines + cv2.line(overlay, center, (center[0] - radius, center[1]), (0, 255, 0), 2) + cv2.line(overlay, center, (center[0], center[1] + radius), (0, 255, 0), 2) + elif x >= center[0] and y >= center[1]: # Bottom-right quadrant + angle1, angle2 = 270, 360 + # Draw the straight lines + cv2.line(overlay, center, (center[0] + radius, center[1]), (0, 255, 0), 2) + cv2.line(overlay, center, (center[0], center[1] + radius), (0, 255, 0), 2) + + # Draw the quarter circle arc + cv2.ellipse(overlay, center, (radius, radius), 0, angle1, angle2, (0, 255, 0), 2) + + elif event == cv2.EVENT_LBUTTONUP: + drawing = False + radius = int(((x - center[0]) ** 2 + (y - center[1]) ** 2) ** 0.5) + angle1, angle2 = 0, 0 + if x >= center[0] and y <= center[1]: # Top-right quadrant + angle1, angle2 = 0, 90 + # Draw the straight lines + cv2.line(img, center, (center[0] + radius, center[1]), (0, 255, 0), 2) + cv2.line(img, center, (center[0], center[1] - radius), (0, 255, 0), 2) + elif x <= center[0] and y <= center[1]: # Top-left quadrant + angle1, angle2 = 90, 180 + # Draw the straight lines + cv2.line(img, center, (center[0] - radius, center[1]), (0, 255, 0), 2) + cv2.line(img, center, (center[0], center[1] - radius), (0, 255, 0), 2) + elif x <= center[0] and y >= center[1]: # Bottom-left quadrant + angle1, angle2 = 180, 270 + # Draw the straight lines + cv2.line(img, center, (center[0] - radius, center[1]), (0, 255, 0), 2) + cv2.line(img, center, (center[0], center[1] + radius), (0, 255, 0), 2) + elif x >= center[0] and y >= center[1]: # Bottom-right quadrant + angle1, angle2 = 270, 360 + # Draw the straight lines + cv2.line(img, center, (center[0] + radius, center[1]), (0, 255, 0), 2) + cv2.line(img, center, (center[0], center[1] + radius), (0, 255, 0), 2) + + # Draw the filled quarter circle + cv2.ellipse(img, center, (radius, radius), 0, angle1, angle2, (0, 255, 0), -1) + overlay = img.copy() + + +# Create a black image +cv2.namedWindow('image') +cv2.setMouseCallback('image', draw_circle) + +while True: + cv2.imshow('image', overlay if drawing else img) + if cv2.waitKey(1) & 0xFF == 27: # Press 'ESC' to exit + break + +cv2.destroyAllWindows() diff --git a/simba/sandbox/quickselect.py b/simba/sandbox/quickselect.py new file mode 100644 index 000000000..d0b136a97 --- /dev/null +++ b/simba/sandbox/quickselect.py @@ -0,0 +1,60 @@ +import numba +import numpy as np +from numba import cuda + +@cuda.jit +def partition(arr, left, right, pivot_index, partition_index): + tid = cuda.grid(1) + + if tid < (right - left + 1): + pivot = arr[pivot_index] + i = left - 1 + for j in range(left, right): + if arr[j] <= pivot: + i += 1 + arr[i], arr[j] = arr[j], arr[i] + arr[i + 1], arr[right] = arr[right], arr[i + 1] + partition_index[0] = i + 1 + +def quickselect(arr, left, right, k): + # If the subarray has one element, return it + if left == right: + return arr[left] + + # Select pivot_index, can be improved with more sophisticated strategies + pivot_index = (left + right) // 2 + + # Convert left, right, pivot_index into arrays for CUDA kernel + left_device = np.array([left], dtype=np.int32) + right_device = np.array([right], dtype=np.int32) + pivot_index_device = np.array([pivot_index], dtype=np.int32) + partition_index_device = np.zeros(1, dtype=np.int32) + + # Transfer to device + arr_device = cuda.to_device(arr) + left_device_device = cuda.to_device(left_device) + right_device_device = cuda.to_device(right_device) + pivot_index_device_device = cuda.to_device(pivot_index_device) + partition_index_device_device = cuda.to_device(partition_index_device) + + # Run the partition kernel on GPU + partition[1, 32](arr_device, left_device_device, right_device_device, pivot_index_device_device, partition_index_device_device) + + # Get partition index after the kernel has finished + partition_idx = partition_index_device_device[0] + + # Check which side to recurse + if k == partition_idx: + return arr[k] + elif k < partition_idx: + return quickselect(arr, left, partition_idx - 1, k) + else: + return quickselect(arr, partition_idx + 1, right, k) + +# Testing the Quickselect with CUDA +arr = np.random.randint(0, 1000, size=1000) +k = 500 # Find the 500th smallest element + +# Call the quickselect function +result = quickselect(arr, 0, arr.size - 1, k) +print(f"The {k+1}-th smallest element is: {result}") diff --git a/simba/sandbox/read_boris.py b/simba/sandbox/read_boris.py new file mode 100644 index 000000000..de596dac2 --- /dev/null +++ b/simba/sandbox/read_boris.py @@ -0,0 +1,225 @@ +__author__ = "Simon Nilsson" + +import configparser +import glob +import multiprocessing +import os +import pickle +import platform +import re +import shutil +import subprocess +import webbrowser +from configparser import ConfigParser +from copy import deepcopy +from datetime import datetime, timedelta +from pathlib import Path +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union + +try: + from typing import Literal +except: + from typing_extensions import Literal + +from urllib.parse import urlparse + +import cv2 +import numpy as np +import pandas as pd +import pkg_resources +import pyarrow as pa +from numba import njit, prange +from pyarrow import csv +from shapely.geometry import (LineString, MultiLineString, MultiPolygon, Point, + Polygon) + +from simba.utils.checks import (check_file_exist_and_readable, check_float, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_instance, check_int, + check_nvidea_gpu_available, check_str, + check_valid_array, check_valid_boolean, + check_valid_dataframe, check_valid_lst) +from simba.utils.enums import ConfigKey, Dtypes, Formats, Keys, Options +from simba.utils.errors import (DataHeaderError, DuplicationError, + FFMPEGCodecGPUError, FileExistError, + FrameRangeError, IntegerError, + InvalidFilepathError, InvalidFileTypeError, + InvalidInputError, InvalidVideoFileError, + MissingProjectConfigEntryError, NoDataError, + NoFilesFoundError, NotDirectoryError, + ParametersFileError, PermissionError) +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.warnings import ( + FileExistWarning, InvalidValueWarning, NoDataFoundWarning, + NoFileFoundWarning, ThirdPartyAnnotationsInvalidFileFormatWarning, FrameRangeWarning) +from simba.utils.read_write import get_fn_ext, write_pickle + +# from simba.utils.keyboard_listener import KeyboardListener + + +PARSE_OPTIONS = csv.ParseOptions(delimiter=",") +READ_OPTIONS = csv.ReadOptions(encoding="utf8") + +def _is_new_boris_version(pd_df: pd.DataFrame): + """ + Check the format of a boris annotation file. + + In the new version, additional column names are present, while + others have slightly different name. Here, we check for the presence + of a column name present only in the newer version. + + :return: True if newer version + """ + return "Media file name" in list(pd_df.columns) + + +def _find_cap_insensitive_name(target: str, values: List[str]) -> Union[None, str]: + check_str(name=f'{_find_cap_insensitive_name.__name__} target', value=target) + check_valid_lst(data=values, source=f'{_find_cap_insensitive_name.__name__} values', valid_dtypes=(str,), min_len=1) + target_lower, values_lower = target.lower(), [x.lower() for x in values] + if target_lower not in values_lower: + return None + else: + return values[values_lower.index(target_lower)] + + +def read_boris_file(file_path: Union[str, os.PathLike], + fps: Optional[Union[int, float]] = None, + orient: Optional[Literal['index', 'columns']] = 'index', + save_path: Optional[Union[str, os.PathLike]] = None, + raise_error: Optional[bool] = False, + log_setting: Optional[bool] = False) -> Union[None, Dict[str, Dict[str, pd.DataFrame]]]: + """ + Reads a BORIS behavioral annotation file, processes the data, and optionally saves the results to a file. + + :param Union[str, os.PathLike] file_path: The path to the BORIS file to be read. The file should be a CSV containing behavioral annotations. + :param Optional[Union[int, float]] fps: Frames per second (FPS) to convert time annotations into frame numbers. If not provided, it will be extracted from the BORIS file if available. + :param Optional[Literal['index', 'columns']] orient: Determines the orientation of the results. 'index' will organize data with start and stop times as indices, while 'columns' will store data in columns. + :param Optional[Union[str, os.PathLike] save_path: The path where the processed results should be saved as a pickle file. If not provided, the results will be returned instead. + :param Optional[bool] raise_error: Whether to raise errors if the file format or content is invalid. If False, warnings will be logged instead of raising exceptions. + :param Optional[bool] log_setting: Whether to log warnings and errors. This is relevant when `raise_error` is set to False. + :return: If `save_path` is None, returns a dictionary where keys are behaviors and values are dataframes containing start and stop frames for each behavior. If `save_path` is provided, the results are saved and nothing is returned. + """ + + MEDIA_FILE_NAME = "Media file name" + BEHAVIOR_TYPE = 'Behavior type' + OBSERVATION_ID = "Observation id" + TIME = "Time" + FPS = 'FPS' + EVENT = 'EVENT' + BEHAVIOR = "Behavior" + START = 'START' + FRAME = 'FRAME' + STOP = 'STOP' + STATUS = "Status" + FRAME_INDEX = 'Image index' + MEDIA_FILE_PATH = "Media file path" + + check_file_exist_and_readable(file_path=file_path) + if fps is not None: + check_int(name=f'{read_boris_file.__name__} fps', min_value=1, value=fps) + check_str(name=f'{read_boris_file.__name__} orient', value=orient, options=('index', 'columns')) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + boris_df = pd.read_csv(file_path) + if not _is_new_boris_version(boris_df): + expected_headers = [TIME, MEDIA_FILE_PATH, BEHAVIOR, STATUS] + if not OBSERVATION_ID in boris_df.columns: + if raise_error: + raise InvalidFileTypeError(msg=f'{file_path} is not a valid BORIS file', + source=read_boris_file.__name__) + else: + ThirdPartyAnnotationsInvalidFileFormatWarning(annotation_app="BORIS", file_path=file_path, + source=read_boris_file.__name__, log_status=log_setting) + return {} + start_idx = boris_df[boris_df[OBSERVATION_ID] == TIME].index.values + if len(start_idx) != 1: + if raise_error: + raise InvalidFileTypeError(msg=f'{file_path} is not a valid BORIS file', + source=read_boris_file.__name__) + else: + ThirdPartyAnnotationsInvalidFileFormatWarning(annotation_app="BORIS", file_path=file_path, + source=read_boris_file.__name__, log_status=log_setting) + return {} + df = pd.read_csv(file_path, skiprows=range(0, int(start_idx + 1))) + else: + MEDIA_FILE_PATH, STATUS = MEDIA_FILE_NAME, BEHAVIOR_TYPE + expected_headers = [TIME, MEDIA_FILE_PATH, BEHAVIOR, STATUS] + df = pd.read_csv(file_path) + check_valid_dataframe(df=df, source=f'{read_boris_file.__name__} {file_path}', required_fields=expected_headers) + numeric_check = pd.to_numeric(df[TIME], errors='coerce').notnull().all() + if not numeric_check: + if raise_error: + raise InvalidInputError( + msg=f'SimBA found TIME DATA annotation in file {file_path} that could not be interpreted as numeric values (seconds or frame numbers)') + else: + ThirdPartyAnnotationsInvalidFileFormatWarning(annotation_app="BORIS", file_path=file_path, source=read_boris_file.__name__, log_status=log_setting) + return {} + df[TIME] = df[TIME].astype(np.float32) + media_file_names_in_file = df[MEDIA_FILE_PATH].unique() + FRAME_INDEX = _find_cap_insensitive_name(target=FRAME_INDEX, values=list(df.columns)) + if fps is None: + FPS = _find_cap_insensitive_name(target=FPS, values=list(df.columns)) + if not FPS in df.columns: + if raise_error: + raise FrameRangeError( + f'The annotations are in seconds and FPS was not passed. FPS could also not be read from the BORIS file', + source=read_boris_file.__name__) + else: + FrameRangeWarning( + msg=f'The annotations are in seconds and FPS was not passed. FPS could also not be read from the BORIS file', + source=read_boris_file.__name__) + ThirdPartyAnnotationsInvalidFileFormatWarning(annotation_app="BORIS", file_path=file_path, source=read_boris_file.__name__, log_status=log_setting) + return {} + if len(media_file_names_in_file) == 1: + fps = df[FPS].iloc[0] + check_float(name='fps', value=fps, min_value=10e-6, raise_error=True) + fps = [float(fps)] + else: + fps_lst = df[FPS].iloc[0].split(';') + fps = [] + for fps_value in fps_lst: + check_float(name='fps', value=fps_value, min_value=10e-6, raise_error=True) + fps.append(float(fps_value)) + if FRAME_INDEX is not None: + expected_headers.append(FRAME_INDEX) + df = df[expected_headers] + results = {} + for video_cnt, video_file_name in enumerate(media_file_names_in_file): + video_name = get_fn_ext(filepath=video_file_name)[1] + results[video_name] = {} + video_fps = fps[video_cnt] + video_df = df[df[MEDIA_FILE_PATH] == video_file_name].reset_index(drop=True) + if FRAME_INDEX is None: + video_df['FRAME'] = (video_df[TIME] * video_fps).astype(int) + else: + video_df['FRAME'] = video_df[FRAME_INDEX] + video_df = video_df.drop([TIME, MEDIA_FILE_PATH], axis=1) + video_df = video_df.rename(columns={BEHAVIOR: 'BEHAVIOR', STATUS: EVENT}) + for clf in video_df['BEHAVIOR'].unique(): + video_clf_df = video_df[video_df['BEHAVIOR'] == clf].reset_index(drop=True) + if orient == 'index': + start_clf, stop_clf = video_clf_df[video_clf_df[EVENT] == START].reset_index(drop=True), video_clf_df[ + video_clf_df[EVENT] == STOP].reset_index(drop=True) + start_clf = start_clf.rename(columns={FRAME: START}).drop([EVENT, 'BEHAVIOR'], axis=1) + stop_clf = stop_clf.rename(columns={FRAME: STOP}).drop([EVENT], axis=1) + if len(start_clf) != len(stop_clf): + if raise_error: + raise FrameRangeError( + f'In file {file_path}, the number of start events ({len(start_clf)}) and stop events ({len(stop_clf)}) for behavior {clf} and video {video_name} is not equal', + source=read_boris_file.__name__) + else: + FrameRangeWarning( + msg=f'In file {file_path}, the number of start events ({len(start_clf)}) and stop events ({len(stop_clf)}) for behavior {clf} and video {video_name} is not equal', + source=read_boris_file.__name__) + return results + video_clf_df = pd.concat([start_clf, stop_clf], axis=1)[['BEHAVIOR', START, STOP]] + results[video_name][clf] = video_clf_df + if save_path is None: + return results + else: + write_pickle(data=results, save_path=save_path) + +read_boris_file(file_path=r"C:\troubleshooting\boris_test\project_folder\boris_files\tabular.trial.csv") diff --git a/simba/sandbox/read_boris_annotation_files.py b/simba/sandbox/read_boris_annotation_files.py new file mode 100644 index 000000000..96402ea04 --- /dev/null +++ b/simba/sandbox/read_boris_annotation_files.py @@ -0,0 +1,237 @@ +from typing import Dict, List, Union, Optional +try: + from typing import Literal +except: + from typing_extensions import Literal + +import numpy as np +import pandas as pd +import os + +from simba.utils.data import detect_bouts +from simba.utils.enums import Methods +from simba.utils.errors import ColumnNotFoundError, InvalidFileTypeError, InvalidInputError, FrameRangeError +from simba.utils.read_write import get_fn_ext, read_video_info, bento_file_reader, read_video_info_csv, find_files_of_filetypes_in_directory, write_pickle +from simba.utils.warnings import ThirdPartyAnnotationsInvalidFileFormatWarning +from simba.utils.checks import (check_valid_lst, + check_valid_dataframe, + check_all_file_names_are_represented_in_video_log, + check_str, + check_valid_boolean, + check_file_exist_and_readable, + check_if_dir_exists, + check_int) + + + + +def is_new_boris_version(pd_df: pd.DataFrame): + """ + Check the format of a boris annotation file. + + In the new version, additional column names are present, while + others have slightly different name. Here, we check for the presence + of a column name present only in the newer version. + + :return: True if newer version + """ + return "Media file name" in list(pd_df.columns) + +def read_boris_file(file_path: Union[str, os.PathLike], + fps: Optional[Union[int, float]] = None, + orient: Optional[Literal['index', 'columns']] = 'index', + save_path: Optional[Union[str, os.PathLike]] = None, + raise_error: Optional[bool] = False, + log_setting: Optional[bool] = False) -> Union[None, Dict[str, pd.DataFrame]]: + + """ + Reads a BORIS behavioral annotation file, processes the data, and optionally saves the results to a file. + + :param Union[str, os.PathLike] file_path: The path to the BORIS file to be read. The file should be a CSV containing behavioral annotations. + :param Optional[Union[int, float]] fps: Frames per second (FPS) to convert time annotations into frame numbers. If not provided, it will be extracted from the BORIS file if available. + :param Optional[Literal['index', 'columns']] orient: Determines the orientation of the results. 'index' will organize data with start and stop times as indices, while 'columns' will store data in columns. + :param Optional[Union[str, os.PathLike] save_path: The path where the processed results should be saved as a pickle file. If not provided, the results will be returned instead. + :param Optional[bool] raise_error: Whether to raise errors if the file format or content is invalid. If False, warnings will be logged instead of raising exceptions. + :param Optional[bool] log_setting: Whether to log warnings and errors. This is relevant when `raise_error` is set to False. + :return: If `save_path` is None, returns a dictionary where keys are behaviors and values are dataframes containing start and stop frames for each behavior. If `save_path` is provided, the results are saved and nothing is returned. + """ + + MEDIA_FILE_NAME = "Media file name" + BEHAVIOR_TYPE = 'Behavior type' + OBSERVATION_ID = "Observation id" + TIME = "Time" + FPS = 'FPS' + EVENT = 'EVENT' + BEHAVIOR = "Behavior" + START = 'START' + FRAME = 'FRAME' + STOP = 'STOP' + STATUS = "Status" + MEDIA_FILE_PATH = "Media file path" + + results = {} + check_file_exist_and_readable(file_path=file_path) + if fps is not None: + check_int(name=f'{read_boris_file.__name__} fps', min_value=1, value=fps) + check_str(name=f'{read_boris_file.__name__} orient', value=orient, options=('index', 'columns')) + if save_path is not None: + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + boris_df = pd.read_csv(file_path) + if not is_new_boris_version(boris_df): + expected_headers = [TIME, MEDIA_FILE_PATH, BEHAVIOR, STATUS] + if not OBSERVATION_ID in boris_df.columns: + if raise_error: + raise InvalidFileTypeError(msg=f'{file_path} is not a valid BORIS file', source=read_boris_file.__name__) + else: + ThirdPartyAnnotationsInvalidFileFormatWarning(annotation_app="BORIS", file_path=file_path, source=read_boris_file.__name__, log_status=log_setting) + return results + start_idx = boris_df[boris_df[OBSERVATION_ID] == TIME].index.values + if len(start_idx) != 1: + if raise_error: + raise InvalidFileTypeError(msg=f'{file_path} is not a valid BORIS file', source=read_boris_file.__name__) + else: + ThirdPartyAnnotationsInvalidFileFormatWarning(annotation_app="BORIS", file_path=file_path, source=read_boris_file.__name__, log_status=log_setting) + return results + df = pd.read_csv(file_path, skiprows=range(0, int(start_idx + 1))) + else: + MEDIA_FILE_PATH, STATUS = MEDIA_FILE_NAME, BEHAVIOR_TYPE + expected_headers = [TIME, MEDIA_FILE_PATH, BEHAVIOR, STATUS] + df = pd.read_csv(file_path) + check_valid_dataframe(df=df, source=f'{read_boris_file.__name__} {file_path}', required_fields=expected_headers) + _, video_base_name, _ = get_fn_ext(df.loc[0, MEDIA_FILE_PATH]) + numeric_check = pd.to_numeric(df[TIME], errors='coerce').notnull().all() + if not numeric_check: + if raise_error: + raise InvalidInputError(msg=f'SimBA found TIME DATA annotation in file {file_path} that could not be interpreted as numeric values (seconds or frame numbers)') + else: + ThirdPartyAnnotationsInvalidFileFormatWarning(annotation_app="BORIS", file_path=file_path, source=read_boris_file.__name__, log_status=log_setting) + return results + df[TIME] = df[TIME].astype(np.float32) + fps = None + if fps is None: + if not FPS in df.columns: + if raise_error: + raise FrameRangeError(f'The annotations are in seconds and FPS was not passed. FPS could also not be read from the BORIS file', source=bento_file_reader.__name__) + else: + ThirdPartyAnnotationsInvalidFileFormatWarning(annotation_app="BORIS", file_path=file_path, source=read_boris_file.__name__, log_status=log_setting) + return results + fps = df[FPS].iloc[0] + if not isinstance(fps, (float, int)): + if raise_error: + raise FrameRangeError(f'The annotations are in seconds and FPS was not passed. FPS could also not be read from the BORIS file', source=bento_file_reader.__name__) + else: + ThirdPartyAnnotationsInvalidFileFormatWarning(annotation_app="BORIS", file_path=file_path, source=read_boris_file.__name__, log_status=log_setting) + return results + df = df[expected_headers] + df['FRAME'] = (df[TIME] * fps).astype(int) + df = df.drop([TIME, MEDIA_FILE_PATH], axis=1) + df = df.rename(columns={BEHAVIOR: 'BEHAVIOR', STATUS: EVENT}) + + for clf in df['BEHAVIOR'].unique(): + clf_df = df[df['BEHAVIOR'] == clf].reset_index(drop=True) + if orient == 'column': + results[clf] = clf_df + else: + start_clf, stop_clf = clf_df[clf_df[EVENT] == START].reset_index(drop=True), clf_df[clf_df[EVENT] == STOP].reset_index(drop=True) + start_clf = start_clf.rename(columns={FRAME: START}).drop([EVENT, 'BEHAVIOR'], axis=1) + stop_clf = stop_clf.rename(columns={FRAME: STOP}).drop([EVENT], axis=1) + if len(start_clf) != len(stop_clf): + if raise_error: + raise FrameRangeError(f'In file {file_path}, the number of start events ({len(start_clf)}) and stop events ({len(stop_clf)}) for behavior {clf} is not equal', source=bento_file_reader.__name__) + else: + ThirdPartyAnnotationsInvalidFileFormatWarning(annotation_app="BORIS", file_path=file_path, source=read_boris_file.__name__, log_status=log_setting) + return results + clf_df = pd.concat([start_clf, stop_clf], axis=1)[['BEHAVIOR', START, STOP]] + results[clf] = clf_df + if save_path is None: + return results + else: + write_pickle(data=results, save_path=save_path) + + +def read_boris_annotation_files(data_paths: Union[List[str], str, os.PathLike], + video_info_df: Union[str, os.PathLike, pd.DataFrame], + error_setting: Literal[Union[None, Methods.ERROR.value, Methods.WARNING.value]] = None, + log_setting: Optional[bool] = False) -> Dict[str, pd.DataFrame]: + """ + Reads multiple BORIS behavioral annotation files and compiles the data into a dictionary of dataframes. + + :param Union[List[str], str, os.PathLike] data_paths: Paths to the BORIS annotation files. This can be a list of file paths, a single directory containing the files, or a single file path. + :param Union[str, os.PathLike, pd.DataFrame] video_info_df: The path to a CSV file, an existing dataframe, or a file-like object containing video information (e.g., FPS, video name). This data is used to align the annotation files with their respective videos. + :param Literal[Union[None, Methods.ERROR.value, Methods.WARNING.value]] error_setting: Defines the behavior when encountering issues in the files. Options are `Methods.ERROR.value` to raise errors, `Methods.WARNING.value` to log warnings, or `None` for no action. + :param Optional[bool] log_setting: Whether to log warnings and errors when `error_setting` is set to `Methods.WARNING.value`. Defaults to `False`. + :return: A dictionary where each key is a video name, and each value is a dataframe containing the compiled behavioral annotations from the corresponding BORIS file. + """ + + + if error_setting is not None: + check_str(name=f'{read_boris_annotation_files.__name__} error_setting', value=error_setting, options=(Methods.ERROR.value, Methods.WARNING.value)) + check_valid_boolean(value=log_setting, source=f'{read_boris_annotation_files.__name__} log_setting') + raise_error = False + if error_setting == Methods.ERROR.value: + raise_error = True + if isinstance(video_info_df, str): + check_file_exist_and_readable(file_path=video_info_df) + video_info_df = read_video_info_csv(file_path=video_info_df) + if isinstance(data_paths, list): + check_valid_lst(data=data_paths, source=f'{read_boris_annotation_files.__name__} data_paths', min_len=1, valid_dtypes=(str,)) + elif isinstance(data_paths, str): + check_if_dir_exists(in_dir=data_paths, source=f'{read_boris_annotation_files.__name__} data_paths') + data_paths = find_files_of_filetypes_in_directory(directory=data_paths, extensions=['.csv'], raise_error=True) + check_all_file_names_are_represented_in_video_log(video_info_df=video_info_df, data_paths=data_paths) + check_valid_dataframe(df=video_info_df, source=read_boris_annotation_files.__name__) + dfs = {} + for file_cnt, file_path in enumerate(data_paths): + _, video_name, _ = get_fn_ext(file_path) + _, _, fps = read_video_info(vid_info_df=video_info_df, video_name=video_name) + boris_dict = read_boris_file(file_path=file_path, fps=fps, orient='columns', raise_error=raise_error, log_setting=log_setting) + dfs[video_name] = pd.concat(boris_dict.values(), ignore_index=True) + return dfs + + + + + # boris_df = pd.read_csv(file_path) + # try: + # if not is_new_boris_version(boris_df): + # expected_headers = [TIME, MEDIA_FILE_PATH, BEHAVIOR, STATUS] + # start_idx = boris_df[boris_df[OBSERVATION_ID] == TIME].index.values + # df = pd.read_csv(file_path, skiprows=range(0, int(start_idx + 1)))[ + # expected_headers + # ] + # else: + # # Adjust column names to newer BORIS annotation format + # MEDIA_FILE_PATH = "Media file name" + # STATUS = "Behavior type" + # expected_headers = [TIME, MEDIA_FILE_PATH, BEHAVIOR, STATUS] + # df = pd.read_csv(file_path)[expected_headers] + # _, video_base_name, _ = get_fn_ext(df.loc[0, MEDIA_FILE_PATH]) + # df.drop(MEDIA_FILE_PATH, axis=1, inplace=True) + # df.columns = ["TIME", "BEHAVIOR", "EVENT"] + # df["TIME"] = df["TIME"].astype(float) + # dfs[video_base_name] = df.sort_values(by="TIME") + # except Exception as e: + # print(e) + # if error_setting == Methods.WARNING.value: + # ThirdPartyAnnotationsInvalidFileFormatWarning( + # annotation_app="BORIS", file_path=file_path, log_status=log_setting + # ) + # elif error_setting == Methods.ERROR.value: + # raise InvalidFileTypeError( + # msg=f"{file_path} is not a valid BORIS file. See the docs for expected file format." + # ) + # else: + # pass + # for video_name, video_df in dfs.items(): + # _, _, fps = read_video_info(vid_info_df=video_info_df, video_name=video_name) + # video_df["FRAME"] = (video_df["TIME"] * fps).astype(int) + # video_df.drop("TIME", axis=1, inplace=True) + # return dfs + + +# video_info_df = read_video_info_csv(file_path='/Users/simon/Desktop/envs/troubleshooting/two_black_animals_14bp/project_folder/logs/video_info.csv') +# +df = read_boris_annotation_files(data_paths=[r"C:\troubleshooting\boris_test\project_folder\boris_files\c_oxt23_190816_132617_s_trimmcropped.csv"], + error_setting='WARNING', + log_setting=False, + video_info_df=r"C:\troubleshooting\boris_test\project_folder\logs\video_info.csv") \ No newline at end of file diff --git a/simba/sandbox/read_img.py b/simba/sandbox/read_img.py new file mode 100644 index 000000000..4f6c0a21c --- /dev/null +++ b/simba/sandbox/read_img.py @@ -0,0 +1,167 @@ +__author__ = "Simon Nilsson" + +import base64 +import configparser +import glob +import io +import itertools +import json +import math +import multiprocessing +import os +import pickle +import platform +import re +import shutil +import ffmpeg +import subprocess +import webbrowser +from configparser import ConfigParser +from copy import deepcopy +from datetime import datetime, timedelta +from pathlib import Path +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union + +from simba.utils.read_write import get_video_meta_data +from PIL import Image + +try: + from typing import Literal +except: + from typing_extensions import Literal + +from urllib.parse import urlparse + +import cv2 +import numpy as np +import pandas as pd +import pkg_resources +import pyarrow as pa +from numba import njit, prange +from pyarrow import csv +from shapely.geometry import (LineString, MultiLineString, MultiPolygon, Point, + Polygon) + +from simba.utils.checks import (check_file_exist_and_readable, check_float, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_keys_exist_in_dict, + check_if_string_value_is_valid_video_timestamp, + check_if_valid_rgb_tuple, check_instance, + check_int, check_nvidea_gpu_available, + check_str, check_valid_array, + check_valid_boolean, check_valid_dataframe, + check_valid_lst, is_video_color) +from simba.utils.enums import ConfigKey, Dtypes, Formats, Keys, Options +from simba.utils.errors import (DataHeaderError, DuplicationError, + FFMPEGCodecGPUError, FileExistError, + FrameRangeError, IntegerError, + InvalidFilepathError, InvalidFileTypeError, + InvalidInputError, InvalidVideoFileError, + MissingProjectConfigEntryError, NoDataError, + NoFilesFoundError, NotDirectoryError, + ParametersFileError, PermissionError) +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.warnings import ( + FileExistWarning, FrameRangeWarning, InvalidValueWarning, + NoDataFoundWarning, NoFileFoundWarning, + ThirdPartyAnnotationsInvalidFileFormatWarning) + + + +def read_frm_of_video(video_path: Union[str, os.PathLike, cv2.VideoCapture], + frame_index: Optional[int] = 0, + opacity: Optional[float] = None, + size: Optional[Tuple[int, int]] = None, + greyscale: Optional[bool] = False, + clahe: Optional[bool] = False, + use_ffmpeg: Optional[bool] = False) -> np.ndarray: + + """ + Reads single image from video file. + + :param Union[str, os.PathLike] video_path: Path to video file, or cv2.VideoCapture object. + :param int frame_index: The frame of video to return. Default: 1. Note, if frame index -1 is passed, the last frame of the video is read in. + :param Optional[int] opacity: Value between 0 and 100 or None. If float value, returns image with opacity. 100 fully opaque. 0.0 fully transparant. + :param Optional[Tuple[int, int]] size: If tuple, resizes the image to size. Else, returns original image size. + :param Optional[bool] greyscale: If true, returns the greyscale image. Default False. + :param Optional[bool] clahe: If true, returns clahe enhanced image. Default False. + :return: Image as numpy array. + :rtype: np.ndarray + + :example: + >>> img = read_frm_of_video(video_path='/Users/simon/Desktop/envs/platea_featurizer/data/video/3D_Mouse_5-choice_MouseTouchBasic_s9_a4_grayscale.mp4', clahe=True) + >>> cv2.imshow('img', img) + >>> cv2.waitKey(5000) + """ + + check_instance(source=read_frm_of_video.__name__, instance=video_path, accepted_types=(str, cv2.VideoCapture)) + if type(video_path) == str: + check_file_exist_and_readable(file_path=video_path) + video_meta_data = get_video_meta_data(video_path=video_path) + else: + video_meta_data = {"frame_count": int(video_path.get(cv2.CAP_PROP_FRAME_COUNT)), + "fps": video_path.get(cv2.CAP_PROP_FPS), + 'width': int(video_path.get(cv2.CAP_PROP_FRAME_WIDTH)), + 'height': int(video_path.get(cv2.CAP_PROP_FRAME_HEIGHT))} + + check_int(name='frame_index', value=frame_index, min_value=-1) + if frame_index == -1: + frame_index = video_meta_data["frame_count"] - 1 + if (frame_index > video_meta_data["frame_count"]) or (frame_index < 0): + raise FrameRangeError(msg=f'Frame {frame_index} is out of range: The video {video_path} contains {video_meta_data["frame_count"]} frames.', source=read_frm_of_video.__name__) + if not use_ffmpeg: + if type(video_path) == str: + capture = cv2.VideoCapture(video_path) + else: + capture = video_path + capture.set(cv2.CAP_PROP_POS_FRAMES, frame_index) + ret, img = capture.read() + if not ret: + raise FrameRangeError(msg=f"Frame {frame_index} for video {video_path} could not be read.") + else: + if not isinstance(video_path, str): + raise NoDataError(msg='When using FFMpeg, pass video path', source=read_frm_of_video.__name__) + is_color = is_video_color(video=video_path) + timestamp = frame_index / video_meta_data['fps'] + if is_color: + cmd = f"ffmpeg -hwaccel cuda -ss {timestamp:.10f} -i {video_path} -vframes 1 -f rawvideo -pix_fmt bgr24 -v error -" + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + img = np.frombuffer(result.stdout, np.uint8).reshape((video_meta_data["height"], video_meta_data["width"], 3)) + else: + cmd = f"ffmpeg -hwaccel cuda -ss {timestamp:.10f} -i {video_path} -vframes 1 -f rawvideo -pix_fmt gray -v error -" + result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + img = np.frombuffer(result.stdout, np.uint8).reshape((video_meta_data["height"], video_meta_data["width"])) + if opacity: + opacity = float(opacity / 100) + check_float(name="Opacity", value=opacity, min_value=0.00, max_value=1.00, raise_error=True) + opacity = 1 - opacity + h, w, clr = img.shape[:3] + opacity_image = np.ones((h, w, clr), dtype=np.uint8) * int(255 * opacity) + img = cv2.addWeighted( img.astype(np.uint8), 1 - opacity, opacity_image.astype(np.uint8), opacity, 0) + if size: + img = cv2.resize(img, size, interpolation=cv2.INTER_LINEAR) + if greyscale: + if len(img.shape) > 2: + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + if clahe: + if len(img.shape) > 2: + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + img = cv2.createCLAHE(clipLimit=2, tileGridSize=(16, 16)).apply(img) + + return img + + +import time + +ffmpeg_times = [] +opencv_times = [] +for i in range(10): + start = time.time() + img = read_frm_of_video(video_path=r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\502_MA141_Gi_Saline_0513.mp4", use_ffmpeg=True) + ffmpeg_times.append(time.time() - start) +for i in range(10): + start = time.time() + img = read_frm_of_video(video_path=r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\502_MA141_Gi_Saline_0513.mp4", use_ffmpeg=False) + opencv_times.append(time.time() - start) +print(np.mean(ffmpeg_times), np.mean(opencv_times)) \ No newline at end of file diff --git a/simba/sandbox/read_video_info.py b/simba/sandbox/read_video_info.py new file mode 100644 index 000000000..4afcf7caa --- /dev/null +++ b/simba/sandbox/read_video_info.py @@ -0,0 +1,78 @@ +import pandas as pd +from typing import Union, Optional, Tuple +import os +import math +from simba.utils.errors import NoFilesFoundError, ParametersFileError, DuplicationError +from simba.utils.warnings import InvalidValueWarning +from simba.utils.checks import check_valid_lst, check_float +from simba.utils.enums import Formats +from simba.utils.checks import check_str, check_valid_boolean, check_file_exist_and_readable, check_valid_dataframe +from simba.utils.read_write import read_video_info_csv +from copy import deepcopy + + +def read_video_info(video_name: str, + video_info_df: pd.DataFrame, + raise_error: Optional[bool] = True) -> Union[Tuple[pd.DataFrame, float, float], Tuple[None, None, None]]: + """ + Helper to read the metadata (pixels per mm, resolution, fps etc) from the video_info.csv for a single input file/video + + :parameter pd.DataFrame vid_info_df: Parsed ``project_folder/logs/video_info.csv`` file. This file can be parsed by :meth:`simba.utils.read_write.read_video_info_csv`. + :parameter str video_name: Name of the video as represented in the ``Video`` column of the ``project_folder/logs/video_info.csv`` file. + :parameter Optional[bool] raise_error: If True, raises error if the video cannot be found in the ``vid_info_df`` file. If False, returns None if the video cannot be found. + :returns: 3-part tuple: One row DataFrame representing the video in the ``project_folder/logs/video_info.csv`` file, the frame rate of the video, and the the pixels per millimeter of the video + :rtype: Union[Tuple[pd.DataFrame, float, float], Tuple[None, None, None]] + + :example: + >>> video_info_df = read_video_info_csv(file_path='project_folder/logs/video_info.csv') + >>> read_video_info(vid_info_df=vid_info_df, video_name='Together_1') + """ + + check_str(name=f'{read_video_info.__name__} video_name', value=video_name, allow_blank=False) + check_valid_boolean(value=[raise_error], source=f'{read_video_info.__name__} raise_error') + check_valid_dataframe(df=video_info_df, source='', required_fields=["pixels/mm", "fps", "Video"]) + video_settings = video_info_df.loc[video_info_df["Video"] == video_name] + if len(video_settings) > 1: + raise DuplicationError(msg=f"SimBA found multiple rows in `project_folder/logs/video_info.csv` for videos named {video_name}. Please make sure that each video name is represented ONCE in the file", source='') + elif len(video_settings) < 1: + if raise_error: + raise ParametersFileError(msg=f"SimBA could not find {video_name} in the `project_folder/logs/video_info.csv` file. Make sure all videos analyzed are represented in the file.", source='') + else: + return (None, None, None) + else: + px_per_mm = video_settings["pixels/mm"].values[0] + fps = video_settings["fps"].values[0] + if math.isnan(px_per_mm): + raise ParametersFileError(msg=f'Pixels per millimeter for video {video_name} in the `project_folder/logs/video_info.csv` file is not a valid number. Please correct it to proceed.') + if math.isnan(fps): + raise ParametersFileError(msg=f'The FPS for video {video_name} in the `project_folder/logs/video_info.csv` file is not a valid number. Please correct it to proceed.') + check_float(name=f'pixels per millimeter video {video_name}', value=px_per_mm); check_float(name=f'fps video {video_name}', value=fps) + px_per_mm, fps = float(px_per_mm), float(fps) + if px_per_mm <= 0: + InvalidValueWarning(msg=f"Video {video_name} has a pixel per millimeter conversion factor of 0 or less. Correct the pixel/mm conversion factor values inside the `project_folder/logs/video_info.csv` file", source='') + if fps <= 1: + InvalidValueWarning(msg=f"Video {video_name} an FPS of 1 or less. It is recommended to use videos with more than one frame per second. If inaccurate, correct the FPS values inside the `project_folder/logs/video_info.csv` file", source='') + return video_settings, px_per_mm, fps + + + + # + # px_per_mm = float(video_settings["pixels/mm"]) + # fps = float(video_settings["fps"]) + # if math.isnan(px_per_mm): + # raise ParametersFileError( + # msg=f'Pixels per millimeter for video {video_name} in the {self.video_info_path} file is not a valid number.') + # if math.isnan(fps): + # raise ParametersFileError( + # msg=f'The FPS for video {video_name} in the {self.video_info_path} file is not a valid number.') + # return video_settings, px_per_mm, fps + # except TypeError: + # raise ParametersFileError( + # msg=f"Make sure the videos that are going to be analyzed are represented with APPROPRIATE VALUES inside the project_folder/logs/video_info.csv file in your SimBA project. Could not interpret the fps, pixels per millimeter and/or fps as numerical values for video {video_name}", + # source=self.__class__.__name__, + # ) + # # return info_df + + + +read_video_info(video_name='501_MA142_Gi_CNO_0514', video_info_df_path=r"C:\troubleshooting\mitra\project_folder\logs\video_info.csv") \ No newline at end of file diff --git a/simba/sandbox/redis.py b/simba/sandbox/redis.py new file mode 100644 index 000000000..0ef295e7d --- /dev/null +++ b/simba/sandbox/redis.py @@ -0,0 +1,25 @@ +import redis +import json +import numpy as np +import multiprocessing + + +data = np.random.randint(0, 100, (1000, 5)) +r = redis.Redis(host='localhost', port=6379) +data_split =np.array_split(data, 5) +for i in range(len(data_split)): + arr_json = json.dumps(data_split[i].tolist()) + r.set(i, arr_json) + +def test_func(val): + arr_loaded = np.array(json.loads(r.get(str(i)))) + print(arr_loaded.shape) + + +chunk_lst = [x for x in range(5)] +with multiprocessing.Pool(7, maxtasksperchild=1) as pool: + for cnt, result in enumerate(pool.imap(test_func, chunk_lst, chunksize=1)): + pass + + + diff --git a/simba/sandbox/reduce_features.py b/simba/sandbox/reduce_features.py new file mode 100644 index 000000000..166e63ea9 --- /dev/null +++ b/simba/sandbox/reduce_features.py @@ -0,0 +1,27 @@ +import os +from simba.utils.read_write import read_df, write_df, find_files_of_filetypes_in_directory, get_fn_ext +from simba.mixins.config_reader import ConfigReader + +SIMBA_PROJECT_CONFIG_PATH = r"C:\troubleshooting\mitra\project_folder\project_config.ini" #PATH TO THE SIMBA PROJECT CONFIG (USED TO FIND THE BODY PART NAMES AND CLASSIFIER NAMES) +DATA_DIRECTORY = r'C:\troubleshooting\mitra\project_folder\csv\targets_inserted' #PATH TO A DIRECTORY CONTAINING SIMBA CSV FILES +SAVE_DIRECTORY = r'C:\troubleshooting\mitra\project_folder\csv\targets_inserted\temp\new_targets_inserted' #PATH TO AN EMTY DIRECTORY USE DTO SAVED THE NEW CSV FILES. + +FIELD_TO_KEEP = ["MOVEMENT_SUM_2.0_NOSE", + "GEOMETRY_MEAN_BODY_AREA_0.5", + "GEOMETRY_MEAN_BODY_AREA_2.0", + "GEOMETRY_SUM_HULL_WIDTH_2.0", + "GEOMETRY_VAR_HULL_LENGTH_2.0", + "GEOMETRY_SUM_HULL_LENGTH_0.25", + "GEOMETRY_MEAN_HULL_LENGTH_0.25"] #LIST OF FEATURE NAMES TO KEEP (ALL FEATURES NOT IN THIS LIST WILL BE REMOVED). + +data_paths = find_files_of_filetypes_in_directory(directory=DATA_DIRECTORY, extensions=['.csv'], raise_error=True) +config = ConfigReader(config_path=SIMBA_PROJECT_CONFIG_PATH, read_video_info=False, create_logger=False) +fields_to_keep = config.bp_col_names + FIELD_TO_KEEP + config.clf_names + +for file_cnt, file_path in enumerate(data_paths): + df = read_df(file_path=file_path, file_type='csv', usecols=fields_to_keep) + video_name = get_fn_ext(filepath=file_path)[1] + print(f'Processing {video_name}...') + save_path = os.path.join(SAVE_DIRECTORY, f'{video_name}.csv') + write_df(df=df, file_type='csv', save_path=save_path) +print(f'COMPLETE: New files stored in {SAVE_DIRECTORY}.') diff --git a/simba/sandbox/reduce_features.zip b/simba/sandbox/reduce_features.zip new file mode 100644 index 0000000000000000000000000000000000000000..0d6ec9b09d5c61bc0114b93a1688c6ffe558bcf0 GIT binary patch literal 1006 zcmWIWW@Zs#-~dAQth7i5DEQ9Cz#zn+z)+N$Qkt9^pO%_fQd*Q+tXEJO8p6xKp0`Om z9fa@B*G@03;AUWC0qFx1YeQr6FB^!|uD|lXAXM&Fmxh$b9-mXr*LFlJ$cH}4optTx zDXHZWw{~p0o6GU>{!YJ3)0!HTCN7^XeZRQx{MP&L@DA$~z3vm{o3JnEgX@(wB3=4DA@MR1MTb|-`Y2v} zWV4w5qHFJ*u1$?F>OOb1FZ+_pX0_|@zgAaD#Q(^dw)FkUmQ9|sd=7h?eSRrgv4Cyz z=beE?zdNk#yfR((&RO3Yc(i8m6ZMdTT05N<{LS`$*%ig}Ahg*jctJRel+=ri1xp(* zmijU6PLfh{-26s~_g%uXbP*wYHOn6}=DXC*Yri}pCAa9#dse%zkpgKQQ`5@BUVaE! z%Kg{Gyw&OXnwhh-51)SMxSF3mBE<8r0q5k?e*aYVJFXNwdC}~D+aGt<;=|WxSDu}J zbN&`phoq|S%}!tVnf@eQ-NL5!NPhcty{Oa<8B+{UaN%< zj-9{N)S)t6J@k#{t}f&6Z#{4SSo(}H&Nb0baqgihKfYKg->p0vXJXd>N8?uQDMe4s z7S3<#XBMv9&HM2)Yh%xiIF`rD0#g&YwrQSm?~S#3`mg`n#PS+NnFE~%vUO9|?)|;6 zTRrc`g9fdZXFb}gQ{Asld+eI^Ui;Hu3o*C54>m;Yxqps*xooumq9;PBp_gWhX`hvi zNRV58!%J^^%vR5oRV?vSXGxy4E8gdLEHA_+uOMShMC_WKZPyEGqT=6&>4t^wJ|Y$B z$$Cw2+lt=HZedy-QWbk+i(9`eZl3-4^R~mOsp~VXv)q4c<;#}_TuaZ-&@E0_ZP|7C z-o_t=-PhmUS><%+uu9&7{)xxG=gZxi)hlGWPU75@n1Z#1ZU?S2pEJK++Ah0GZ{Dul z${*5&xligQEs~S6)-mE}d7#J9VFabzc0PSU90047Ey{`ZO literal 0 HcmV?d00001 diff --git a/simba/sandbox/reduce_imge_stack_size.py b/simba/sandbox/reduce_imge_stack_size.py new file mode 100644 index 000000000..88e9429e7 --- /dev/null +++ b/simba/sandbox/reduce_imge_stack_size.py @@ -0,0 +1,70 @@ +from typing import Optional, Union, Dict + +from numba import jit +from numba import typed, types, prange +import numpy as np +from simba.utils.checks import check_float +from simba.utils.errors import InvalidInputError +from simba.mixins.image_mixin import ImageMixin + +@jit(nopython=True) +def _reduce_img_sizes(imgs: typed.Dict, scale: float): + results = {} + img_keys = list(imgs.keys()) + for img_idx in prange(len(img_keys)): + img = imgs[img_keys[img_idx]] + target_h, target_w = int(img.shape[0] * scale), int(img.shape[1] * scale) + row_indices = np.linspace(0, int(img.shape[0] - 1), target_h).astype(np.int32) + col_indices = np.linspace(0, int(img.shape[1] - 1), target_w).astype(np.int32) + if img.ndim == 3: + img = img[row_indices][:, col_indices, :] + else: + img = img[row_indices][:, col_indices] + results[img_keys[img_idx]] = img + return results + +def reduce_img_sizes(imgs: Dict[Union[str, int], np.ndarray], scale: float): + check_float(name=f'{reduce_img_sizes.__name__} scale', value=scale, max_value=0.99, min_value=0.01) + if not isinstance(imgs, dict): + raise InvalidInputError(msg=f'imgs has to be a dict, got {type(imgs)}', source=reduce_img_sizes.__name__) + clrs = set() + key_types = set() + for k, v in imgs.items(): + clrs.add(v.ndim) + key_types.add(type(k)) + if len(clrs) > 1: + raise InvalidInputError(msg=f'imgs has to be uniform colors, got {clrs}', source=reduce_img_sizes.__name__) + if len(key_types) > 1: + raise InvalidInputError(msg=f'imgs keys has to be int or strings, got {key_types}', source=reduce_img_sizes.__name__) + if list(clrs)[0] == 3: + value_type = types.uint8[:, :, :] + elif list(clrs)[0] == 2: + value_type = types.uint8[:, :] + else: + raise InvalidInputError(msg=f'imgs has to be 2 or 3 dimensions, got {list(clrs)[0]}', source=reduce_img_sizes.__name__) + if isinstance(list(key_types)[0], type(str)): + key_type = types.unicode_type + elif isinstance(list(key_types)[0], type(int)): + key_type = types.int64 + else: + raise InvalidInputError(msg=f'imgs keys has to be int or strings, got {list(key_types)[0]}', source=reduce_img_sizes.__name__) + if list(clrs)[0] == 3: + results = typed.Dict.empty(key_type=key_type, value_type=value_type) + else: + results = typed.Dict.empty(key_type=key_type, value_type=value_type) + for k, v in imgs.items(): + results[k] = v + return dict(_reduce_img_sizes(imgs=results, scale=scale)) + + +imgs = ImageMixin.read_all_img_in_dir(dir=r"C:\troubleshooting\two_animals_16_bp_JAG\project_folder\videos\Together_1") +new_imgs = {} +for k, v in imgs.items(): + new_imgs[k] = ImageMixin.img_to_greyscale(v) + +new_imgs = reduce_img_sizes(imgs=imgs, scale=0.5) + +import cv2 + +cv2.imshow('sadasdas', new_imgs['count_values_in_ranges_cuda']) +cv2.waitKey(30000) diff --git a/simba/sandbox/relative_risk.py b/simba/sandbox/relative_risk.py new file mode 100644 index 000000000..48fd19a4a --- /dev/null +++ b/simba/sandbox/relative_risk.py @@ -0,0 +1,64 @@ +import pandas as pd + +from simba.utils.checks import check_valid_array +import numpy as np +from numba import jit + +def relative_risk(x: np.ndarray, y: np.ndarray) -> float: + """ + Calculate the relative risk between two binary arrays. + + Relative risk (RR) is the ratio of the probability of an event occurring in one group/feature/cluster/variable (x) + to the probability of the event occurring in another group/feature/cluster/variable (y). + + :param np.ndarray x: The first 1D binary array. + :param np.ndarray y: The second 1D binary array. + :return float: The relative risk between arrays x and y. + + :example: + >>> relative_risk(x=np.array([0, 1, 1]), y=np.array([0, 1, 0])) + >>> 2.0 + """ + check_valid_array(data=x, source=f'{relative_risk.__name__} x', accepted_ndims=(1,), accepted_values=[0, 1]) + check_valid_array(data=y, source=f'{relative_risk.__name__} y', accepted_ndims=(1,), accepted_values=[0, 1]) + if np.sum(y) == 0: + return -1.0 + elif np.sum(x) == 0: + return 0.0 + else: + return (np.sum(x) / x.shape[0]) / (np.sum(y) / y.shape[0]) + + +@jit(nopython=True) +def sliding_relative_risk(x: np.ndarray, y: np.ndarray, window_sizes: np.ndarray, sample_rate: int) -> np.ndarray: + """ + Calculate sliding relative risk values between two binary arrays using different window sizes. + + + :param np.ndarray x: The first 1D binary array. + :param np.ndarray y: The second 1D binary array. + :param np.ndarray window_sizes: + :param int sample_rate: + :return np.ndarray: Array of size x.shape[0] x window_sizes.shape[0] with sliding eta squared values. + """ + results = np.full((x.shape[0], window_sizes.shape[0]), -1.0) + for i in range(window_sizes.shape[0]): + window_size = int(window_sizes[i] * sample_rate) + for l, r in zip(range(0, x.shape[0] + 1), range(window_size, x.shape[0] + 1)): + sample_x, sample_y = x[l:r], y[l:r] + print(sample_x, sample_y) + if np.sum(sample_y) == 0: + results[r - 1, i] = -1.0 + elif np.sum(sample_x) == 0: + results[r - 1, i] = 0.0 + else: + results[r - 1, i] = (np.sum(sample_x) / sample_x.shape[0]) / (np.sum(sample_y) / sample_y.shape[0]) + return results + + + + + +x = np.array([0, 1, 1, 0]) +y = np.array([0, 1, 0, 0]) +sliding_relative_risk(x=x, y=y, window_sizes=np.array([1.0]), sample_rate=2) \ No newline at end of file diff --git a/simba/sandbox/reverse_popup.py b/simba/sandbox/reverse_popup.py new file mode 100644 index 000000000..fbc87e608 --- /dev/null +++ b/simba/sandbox/reverse_popup.py @@ -0,0 +1,124 @@ +__author__ = "Simon Nilsson" + +import glob +import os +import subprocess +import sys +import threading +from copy import deepcopy +from datetime import datetime +from tkinter import * +from typing import Optional, Union + +import numpy as np +from PIL import Image, ImageTk + +import simba +from simba.labelling.extract_labelled_frames import AnnotationFrameExtractor +from simba.mixins.config_reader import ConfigReader +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.plotting.frame_mergerer_ffmpeg import FrameMergererFFmpeg +from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, + CreateToolTip, DropDownMenu, Entry_Box, + FileSelect, FolderSelect) +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_int, check_nvidea_gpu_available, + check_str, + check_that_hhmmss_start_is_before_end) +from simba.utils.data import convert_roi_definitions +from simba.utils.enums import Dtypes, Formats, Keys, Links, Options, Paths +from simba.utils.errors import (CountError, DuplicationError, FrameRangeError, + InvalidInputError, MixedMosaicError, + NoChoosenClassifierError, NoFilesFoundError, + NotDirectoryError) +from simba.utils.lookups import get_color_dict, get_fonts +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + seconds_to_timestamp, str_2_bool) +from simba.video_processors.brightness_contrast_ui import \ + brightness_contrast_ui +from simba.video_processors.clahe_ui import interactive_clahe_ui +from simba.video_processors.extract_seqframes import extract_seq_frames +from simba.video_processors.multi_cropper import MultiCropper +from simba.video_processors.px_to_mm import get_coordinates_nilsson +from simba.video_processors.video_processing import ( + VideoRotator, batch_convert_video_format, batch_create_frames, + batch_video_to_greyscale, change_fps_of_multiple_videos, change_img_format, + change_single_video_fps, clahe_enhance_video, clip_video_in_range, + clip_videos_by_frame_ids, convert_to_avi, convert_to_bmp, convert_to_jpeg, + convert_to_mov, convert_to_mp4, convert_to_png, convert_to_tiff, + convert_to_webm, convert_to_webp, reverse_videos, + convert_video_powerpoint_compatible_format, copy_img_folder, + crop_multiple_videos, crop_multiple_videos_circles, + crop_multiple_videos_polygons, crop_single_video, crop_single_video_circle, + crop_single_video_polygon, downsample_video, extract_frame_range, + extract_frames_single_video, frames_to_movie, gif_creator, + multi_split_video, remove_beginning_of_video, resize_videos_by_height, + resize_videos_by_width, roi_blurbox, superimpose_elapsed_time, + superimpose_frame_count, superimpose_freetext, superimpose_overlay_video, + superimpose_video_names, superimpose_video_progressbar, + video_bg_subtraction_mp, video_bg_subtraction, video_concatenator, + video_to_greyscale, watermark_video, rotate_video, flip_videos, upsample_fps) + +sys.setrecursionlimit(10**7) + + +class ReverseVideoPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="REVERSE VIDEOS") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.MP4_CODEC_LK = {'HEVC (H.265)': 'libx265', 'H.264 (AVC)': 'libx264', 'VP9': 'vp9'} + self.quality_dropdown = DropDownMenu(settings_frm, "OUTPUT VIDEO QUALITY:", list(range(10, 110, 10)), labelwidth=25) + self.quality_dropdown.setChoices(60) + self.codec_dropdown = DropDownMenu(settings_frm, "COMPRESSION CODEC:", list(self.MP4_CODEC_LK.keys()), labelwidth=25) + self.codec_dropdown.setChoices('HEVC (H.265)') + settings_frm.grid(row=0, column=0, sticky=NW) + self.quality_dropdown.grid(row=0, column=0, sticky=NW) + self.codec_dropdown.grid(row=1, column=0, sticky=NW) + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - REVERSE", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - REVERSE", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run(multiple=True)) + + multiple_videos_frm.grid(row=2, column=0, sticky="NW") + self.selected_video_dir.grid(row=0, column=0, sticky="NW") + multiple_videos_run.grid(row=1, column=0, sticky="NW") + #self.main_frm.mainloop() + + def run(self, multiple: bool): + target_quality = int(self.quality_dropdown.getChoices()) + codec = self.MP4_CODEC_LK[self.codec_dropdown.getChoices()] + if not multiple: + data_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=data_path) + else: + data_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=data_path) + + reverse_videos(path=data_path, quality=target_quality, codec=codec) + # + # threading.Thread(target=reverse_videos + # quality=target_quality)).start() + + +ReverseVideoPopUp() + + + + + diff --git a/simba/sandbox/reverse_videos.py b/simba/sandbox/reverse_videos.py new file mode 100644 index 000000000..8ecff0c50 --- /dev/null +++ b/simba/sandbox/reverse_videos.py @@ -0,0 +1,114 @@ +__author__ = "Simon Nilsson" + + +import functools +import glob +import multiprocessing +import os +import platform +import shutil +import subprocess +import time +from copy import deepcopy +from datetime import datetime +from tkinter import * +from typing import Any, Dict, List, Optional, Tuple, Union + +import cv2 +import numpy as np +from PIL import Image, ImageTk +from shapely.geometry import Polygon + +try: + from typing import Literal +except: + from typing_extensions import Literal + +import simba +from simba.mixins.config_reader import ConfigReader +from simba.mixins.image_mixin import ImageMixin +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, check_float, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_instance, check_int, + check_nvidea_gpu_available, check_str, + check_that_hhmmss_start_is_before_end, + check_valid_lst, check_valid_tuple) +from simba.utils.data import find_frame_numbers_from_time_stamp +from simba.utils.enums import OS, ConfigKey, Formats, Options, Paths +from simba.utils.errors import (CountError, DirectoryExistError, + FFMPEGCodecGPUError, FFMPEGNotFoundError, + FileExistError, FrameRangeError, + InvalidFileTypeError, InvalidInputError, + InvalidVideoFileError, NoDataError, + NoFilesFoundError, NotDirectoryError) +from simba.utils.lookups import (get_ffmpeg_crossfade_methods, get_fonts, + percent_to_crf_lookup, percent_to_qv_lk) +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, find_core_cnt, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + read_config_entry, read_config_file, read_frm_of_video) +from simba.utils.warnings import (FileExistWarning, InValidUserInputWarning, + SameInputAndOutputWarning) +from simba.video_processors.extract_frames import video_to_frames +from simba.video_processors.roi_selector import ROISelector +from simba.video_processors.roi_selector_circle import ROISelectorCircle +from simba.video_processors.roi_selector_polygon import ROISelectorPolygon + +MAX_FRM_SIZE = 1080, 650 + +def reverse_videos(path: Union[str, os.PathLike], + save_dir: Optional[Union[str, os.PathLike]] = None, + quality: Optional[int] = 60) -> None: + """ + Reverses one or more video files located at the specified path and saves the reversed videos in the specified + directory. + + .. video:: _static/img/reverse_videos.webm + :width: 800 + :loop: + + :param Union[str, os.PathLike] path: Path to the video file or directory containing video files to be reversed. + :param Optional[Union[str, os.PathLike]] save_dir: Directory to save the reversed videos. If not provided, reversed videos will be saved in a subdirectory named 'reversed_' in the same directory as the input file(s). + :param Optional[int] quality: Output video quality expressed as a percentage. Default is 60. Values range from 1 (low quality, high compression) to 100 (high quality, low compression). + :return: None + + :example: + >>> reverse_videos(path='/Users/simon/Desktop/envs/simba/troubleshooting/open_field_below/project_folder/videos/reverse/TheVideoName_video_name_2_frame_no.mp4') + """ + + timer = SimbaTimer(start=True) + check_ffmpeg_available(raise_error=True) + check_instance(source=f'{reverse_videos.__name__} path', instance=path, accepted_types=(str,)) + check_int(name=f'{reverse_videos.__name__} quality', value=quality) + datetime_ = datetime.now().strftime("%Y%m%d%H%M%S") + crf_lk = percent_to_crf_lookup() + crf = crf_lk[str(quality)] + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir, source=reverse_videos.__name__) + if os.path.isfile(path): + file_paths = [path] + if save_dir is None: + save_dir = os.path.join(os.path.dirname(path), f'reversed_{datetime_}') + os.makedirs(save_dir) + elif os.path.isdir(path): + file_paths = find_files_of_filetypes_in_directory(directory=path, extensions=Options.ALL_VIDEO_FORMAT_OPTIONS.value, raise_error=True) + if save_dir is None: + save_dir = os.path.join(path, f'mp4_{datetime_}') + os.makedirs(save_dir) + else: + raise InvalidInputError(msg=f'Path is not a valid file or directory path.', source=reverse_videos.__name__) + for file_cnt, file_path in enumerate(file_paths): + _, video_name, ext = get_fn_ext(filepath=file_path) + print(f'Reversing video {video_name} (Video {file_cnt+1}/{len(file_paths)})...') + _ = get_video_meta_data(video_path=file_path) + out_path = os.path.join(save_dir, f'{video_name}{ext}') + cmd = f'ffmpeg -i "{file_path}" -vf reverse -af areverse -c:v libx264 -crf {crf} "{out_path}" -loglevel error -stats -hide_banner -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f"{len(file_paths)} video(s) reversed and saved in {save_dir} directory.", elapsed_time=timer.elapsed_time_str, source=reverse_videos.__name__,) + diff --git a/simba/sandbox/roi_definition_csvs_to_h5.py b/simba/sandbox/roi_definition_csvs_to_h5.py new file mode 100644 index 000000000..bf403de70 --- /dev/null +++ b/simba/sandbox/roi_definition_csvs_to_h5.py @@ -0,0 +1,53 @@ +import pandas as pd +import warnings +warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning) +import os +import ast +import numpy as np +from simba.roi_tools.ROI_multiply import create_emty_df +from simba.utils.checks import check_file_exist_and_readable, check_if_dir_exists + +RECTANGLES_CSV_PATH = '/Users/simon/Desktop/test/rectangles_20240314085952.csv' +POLYGONS_CSV_PATH = '/Users/simon/Desktop/test/polygons_20240314085952.csv' +CIRCLES_CSV_PATH = None +SAVE_DIRECTORY = '/Users/simon/Desktop/test' + + +########################################################################################################################## +check_if_dir_exists(in_dir=SAVE_DIRECTORY) +save_path = os.path.join(SAVE_DIRECTORY, 'ROI_definitions.h5') +store = pd.HDFStore(save_path, mode="w") +if RECTANGLES_CSV_PATH is not None: + check_file_exist_and_readable(file_path=RECTANGLES_CSV_PATH) + r = pd.read_csv(RECTANGLES_CSV_PATH, index_col=0).to_dict(orient='records') + for i in range(len(r)): + r[i]['Color BGR'] = ast.literal_eval(r[i]['Color BGR']) + r[i]['Tags'] = ast.literal_eval(r[i]['Tags']) +else: + r = create_emty_df(shape_type='rectangles') +r = pd.DataFrame.from_dict(r) + +if POLYGONS_CSV_PATH is not None: + p = pd.read_csv(POLYGONS_CSV_PATH, index_col=0).to_dict(orient='records') + for i in range(len(p)): + p[i]['Color BGR'] = ast.literal_eval(p[i]['Color BGR']) + p[i]['Tags'] = ast.literal_eval(p[i]['Tags']) + p[i]['vertices'] = np.fromstring(p[i]['vertices'].replace('\n', '').replace('[', '').replace(']', '').replace(' ', ' '), sep=' ').reshape((-1, 2)).astype(np.int32) +else: + p = create_emty_df(shape_type='polygons') +p = pd.DataFrame.from_dict(p) + +if CIRCLES_CSV_PATH is not None: + c = pd.read_csv(CIRCLES_CSV_PATH, index_col=0) + for i in range(len(c)): + c[i]['Color BGR'] = ast.literal_eval(c[i]['Color BGR']) + c[i]['Tags'] = ast.literal_eval(c[i]['Tags']) +else: + c = create_emty_df(shape_type='circlesDf') +c = pd.DataFrame.from_dict(c) + +store["rectangles"] = r +store["circleDf"] = c +store["polygons"] = p +store.close() +print(f'ROI CSV definitions joined and saved at {save_path}') diff --git a/simba/sandbox/roi_definition_csvs_to_h5.py.zip b/simba/sandbox/roi_definition_csvs_to_h5.py.zip new file mode 100644 index 0000000000000000000000000000000000000000..1e496dd449275eedbdded5a6ad3f6a7d4eb42d13 GIT binary patch literal 1016 zcmWIWW@Zs#-~dA2@Vp2HD5&FLV31)@U?|GZj892T%goCx$;{7-PcAMijxWiN&oI?1 zs0>}JghA@)TwdmdLlo@t)%=C`)6F=Czf zJI;T(Gfy1*HrZPD>k;+S&hI|DIjV*-xbNh+^g?N>aIVY}S*41G&ce@!rde8~&pEGu zF!Y%3PO%SLIVb5ZmfmkuC_l~puff*OGg`NKK0Pz#LHGU2cReWwC7yo1cvNz>-3o=Z zvnI28SR9xx>2r9_)T>(W6g`e~SPOjrchS1ea1Ep2{w4f8+UXJtpWGKM5xc%AX=zDs zv;U-vCm;O!@#o9slcLfwwTaV@UzhiEy!0-e|6KV;-Nn0|KYTxUBacCT7oW?6sznDj z*RB7*>-(z@rSH#PyuG+i?~jhJp1$4PIdOCB&1acr*%?+H*`Tu%{JzB4MUrH1ecetD{ zC0;fw&3{_q?Sk880z$efXRVGj>ptSte{B}@>(d73Uy;+2U)>1os`QYJ`xX$-eIzW#gvYE#79O%;CA&F5?qt1F*=wC|zd?@r-MoO9+R zww~#{dxkS*U0lQC=H-Rf9i2Ot?(H;LleIb`YI^U=wc9p6bCTp2iEee`-qQ7Zn!?tU z)aR3S%v`9lL)i1qrV6en-yiS$+x+q$Yk)U9hgeI%T0Uk522kz_@MdHZVMgRUWI0gI dgMlTDAQmP0Fu None: + + """ + Converts SimBA roi definitions into annotations and images for training yolo network. + + :param Optional[Union[str, os.PathLike]] config_path: Optional path to the project config file in SimBA project. + :param Optional[Union[str, os.PathLike]] roi_path: Path to the SimBA roi definitions .h5 file. If None, then the ``roi_coordinates_path`` of the project. + :param Optional[Union[str, os.PathLike]] video_dir: Directory where to find the videos. If None, then the videos folder of the project. + :param Optional[Union[str, os.PathLike]] save_dir: Directory where to save the labels and images. If None, then the logs folder of the project. + :param Optional[int] roi_frm_cnt: Number of frames for each video to create bounding boxes for. + :param Optional[bool] obb: If True, created object-oriented yolo bounding boxes. Else, axis aligned yolo bounding boxes. Default False. + :param Optional[bool] greyscale: If True, converts the images to greyscale if rgb. Default: True. + :return: None + + :example I: + >>> simba_rois_to_yolo(config_path=r"C:\troubleshooting\RAT_NOR\project_folder\project_config.ini") + + :example II: + >>> simba_rois_to_yolo(config_path=r"C:\troubleshooting\RAT_NOR\project_folder\project_config.ini", save_dir=r"C:\troubleshooting\RAT_NOR\project_folder\logs\yolo", video_dir=r"C:\troubleshooting\RAT_NOR\project_folder\videos", roi_path=r"C:\troubleshooting\RAT_NOR\project_folder\logs\measures\ROI_definitions.h5") + """ + + if roi_path is None or video_dir is None or save_dir is None: + config = ConfigReader(config_path=config_path) + roi_path = config.roi_coordinates_path + video_dir = config.video_dir + save_dir = config.logs_path + check_int(name=f'{simba_rois_to_yolo.__name__} roi_frm_cnt', value=roi_frm_cnt, min_value=1) + check_valid_boolean(value=[obb,greyscale], source=f'{simba_rois_to_yolo.__name__} obb') + roi_data = read_roi_data(roi_path=roi_path) + roi_geometries = GeometryMixin.simba_roi_to_geometries(rectangles_df=roi_data[0], circles_df=roi_data[1], polygons_df=roi_data[2])[0] + roi_geometries_rectangles = {} + roi_ids, roi_cnt = {}, 0 + save_img_dir = os.path.join(save_dir, 'images') + save_labels_dir = os.path.join(save_dir, 'labels') + if not os.path.isdir(save_img_dir): os.makedirs(save_img_dir) + if not os.path.isdir(save_labels_dir): os.makedirs(save_labels_dir) + for video_name, roi_data in roi_geometries.items(): + roi_geometries_rectangles[video_name] = {} + for roi_name, roi in roi_data.items(): + if obb: + roi_geometries_rectangles[video_name][roi_name] = GeometryMixin.minimum_rotated_rectangle(shape=roi) + else: + keypoints = np.array(roi.exterior.coords).astype(np.int32).reshape(1, -1, 2) + roi_geometries_rectangles[video_name][roi_name] = Polygon(GeometryMixin.keypoints_to_axis_aligned_bounding_box(keypoints=keypoints)[0]) + if roi_name not in roi_ids.keys(): + roi_ids[roi_name] = roi_cnt + roi_cnt += 1 + + roi_results = {} + img_results = {} + for video_name, roi_data in roi_geometries.items(): + roi_results[video_name] = {} + img_results[video_name] = [] + video_path = find_video_of_file(video_dir=video_dir, filename=video_name) + video_meta_data = get_video_meta_data(video_path) + if roi_frm_cnt > video_meta_data['frame_count']: + roi_frm_cnt = video_meta_data['frame_count'] + cap = cv2.VideoCapture(video_path) + frm_idx = np.sort(np.random.choice(np.arange(0, video_meta_data['frame_count']), size=roi_frm_cnt)) + for idx in frm_idx: + img_results[video_name].append(read_frm_of_video(video_path=cap, frame_index=idx, greyscale=greyscale)) + w, h = video_meta_data['width'], video_meta_data['height'] + for roi_name, roi in roi_data.items(): + roi_id = roi_ids[roi_name] + if not obb: + shape_stats = GeometryMixin.get_shape_statistics(shapes=roi) + x_center = shape_stats['centers'][0][0] / w + y_center = shape_stats['centers'][0][1] / h + width = shape_stats['widths'][0] / w + height = shape_stats['lengths'][0] / h + roi_str = ' '.join([str(roi_id), str(x_center), str(y_center), str(width), str(height)]) + else: + img_geometry = np.array(roi.exterior.coords).astype(np.int32)[1:] + x1, y1 = img_geometry[0][0] / w, img_geometry[0][1] / h + x2, y2 = img_geometry[1][0] / w, img_geometry[1][1] / h + x3, y3 = img_geometry[2][0] / w, img_geometry[2][1] / h + x4, y4 = img_geometry[3][0] / w, img_geometry[3][1] / h + roi_str = ' '.join([str(roi_id), str(x1), str(y1), str(x2), str(y2), str(x3), str(y3), str(x4), str(y4)]) + roi_results[video_name][roi_name] = roi_str + + for video_name, imgs in img_results.items(): + for img_cnt, img in enumerate(imgs): + img_save_path = os.path.join(save_img_dir, f'{video_name}_{img_cnt}.png') + cv2.imwrite(img_save_path, img) + label_save_path = os.path.join(save_labels_dir, f'{video_name}_{img_cnt}.txt') + x = list(roi_results[video_name].values()) + with open(label_save_path, mode='wt', encoding='utf-8') as f: + f.write('\n'.join(x)) + +simba_rois_to_yolo(config_path=r"C:\troubleshooting\RAT_NOR\project_folder\project_config.ini", + save_dir=r"C:\troubleshooting\RAT_NOR\project_folder\logs\yolo", + video_dir=r"C:\troubleshooting\RAT_NOR\project_folder\videos", + roi_path=r"C:\troubleshooting\RAT_NOR\project_folder\logs\measures\ROI_definitions.h5") \ No newline at end of file diff --git a/simba/sandbox/rotate image.py b/simba/sandbox/rotate image.py new file mode 100644 index 000000000..de16753bc --- /dev/null +++ b/simba/sandbox/rotate image.py @@ -0,0 +1,39 @@ +import time + +import numpy as np +from numba import njit, uint8, types, bool_ +import cv2 + + +@njit([(uint8[:,:,:], bool_)]) +def rotate_img(img: np.ndarray, right: bool) -> np.ndarray: + """ + Flip a color image 90 degrees to the left or right + + .. image:: _static/img/rotate_img.png + :width: 600 + :align: center + + :param np.ndarray img: Input image as numpy array in uint8 format. + :param bool right: If True, flips to the right. If False, flips to the left. + :returns: The rotated image as a numpy array of uint8 format. + + :example: + >>> img = cv2.imread('/Users/simon/Desktop/test.png') + >>> rotated_img = rotate_img(img=img, right=False) + """ + + if right: + img = np.transpose(img[:, ::-1, :], axes=(1, 0, 2)) + else: + img = np.transpose(img[::-1, :, :], axes=(1, 0, 2)) + return np.ascontiguousarray(img).astype(np.uint8) + + +img = cv2.imread('/Users/simon/Desktop/test.png') +start = time.time() +for i in range(10000): + rotated_img = rotate_img(img=img, right=True) +print(time.time() - start) +# cv2.imshow('sdsf', rotated_img) +# cv2.waitKey(5000) \ No newline at end of file diff --git a/simba/sandbox/rotate.py b/simba/sandbox/rotate.py new file mode 100644 index 000000000..e89deed76 --- /dev/null +++ b/simba/sandbox/rotate.py @@ -0,0 +1,353 @@ +import functools +import glob +import multiprocessing +import os +import platform +import shutil +import subprocess +import time +from copy import deepcopy +from datetime import datetime +from tkinter import * +from typing import Any, Dict, List, Optional, Tuple, Union + +import cv2 +import numpy as np +from PIL import Image, ImageTk +from shapely.geometry import Polygon + +try: + from typing import Literal +except: + from typing_extensions import Literal + +from simba.mixins.config_reader import ConfigReader +from simba.mixins.image_mixin import ImageMixin +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, check_float, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_instance, check_int, + check_nvidea_gpu_available, check_str, + check_that_hhmmss_start_is_before_end, + check_valid_lst, check_valid_tuple) +from simba.utils.data import find_frame_numbers_from_time_stamp +from simba.utils.enums import OS, ConfigKey, Formats, Options, Paths +from simba.utils.errors import (CountError, DirectoryExistError, + FFMPEGCodecGPUError, FFMPEGNotFoundError, + FileExistError, FrameRangeError, + InvalidFileTypeError, InvalidInputError, + InvalidVideoFileError, NoDataError, + NoFilesFoundError, NotDirectoryError) +from simba.utils.lookups import (get_ffmpeg_crossfade_methods, get_fonts, + percent_to_crf_lookup, percent_to_qv_lk) +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, find_core_cnt, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + read_config_entry, read_config_file, read_frm_of_video) +from simba.utils.warnings import (FileExistWarning, InValidUserInputWarning, + SameInputAndOutputWarning) +from simba.video_processors.extract_frames import video_to_frames +from simba.video_processors.roi_selector import ROISelector +from simba.video_processors.roi_selector_circle import ROISelectorCircle +from simba.video_processors.roi_selector_polygon import ROISelectorPolygon +__author__ = "Simon Nilsson" + +import glob +import os +import subprocess +import sys +import threading +from copy import deepcopy +from datetime import datetime +from tkinter import * +from typing import Optional, Union + +import numpy as np +from PIL import Image, ImageTk + +import simba +from simba.labelling.extract_labelled_frames import AnnotationFrameExtractor +from simba.mixins.config_reader import ConfigReader +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.plotting.frame_mergerer_ffmpeg import FrameMergererFFmpeg +from simba.ui.tkinter_functions import (CreateLabelFrameWithIcon, + CreateToolTip, DropDownMenu, Entry_Box, + FileSelect, FolderSelect) +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_int, check_nvidea_gpu_available, + check_str, + check_that_hhmmss_start_is_before_end) +from simba.utils.data import convert_roi_definitions +from simba.utils.enums import Dtypes, Formats, Keys, Links, Options, Paths +from simba.utils.errors import (CountError, DuplicationError, FrameRangeError, + InvalidInputError, MixedMosaicError, + NoChoosenClassifierError, NoFilesFoundError, + NotDirectoryError) +from simba.utils.lookups import get_color_dict, get_fonts +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + seconds_to_timestamp, str_2_bool) +from simba.video_processors.brightness_contrast_ui import \ + brightness_contrast_ui +from simba.video_processors.clahe_ui import interactive_clahe_ui +from simba.video_processors.extract_seqframes import extract_seq_frames +from simba.video_processors.multi_cropper import MultiCropper +from simba.video_processors.px_to_mm import get_coordinates_nilsson +from simba.video_processors.video_processing import ( + VideoRotator, batch_convert_video_format, batch_create_frames, + batch_video_to_greyscale, change_fps_of_multiple_videos, change_img_format, + change_single_video_fps, clahe_enhance_video, clip_video_in_range, + clip_videos_by_frame_ids, convert_to_avi, convert_to_bmp, convert_to_jpeg, + convert_to_mov, convert_to_mp4, convert_to_png, convert_to_tiff, + convert_to_webm, convert_to_webp, + convert_video_powerpoint_compatible_format, copy_img_folder, + crop_multiple_videos, crop_multiple_videos_circles, + crop_multiple_videos_polygons, crop_single_video, crop_single_video_circle, + crop_single_video_polygon, downsample_video, extract_frame_range, + extract_frames_single_video, frames_to_movie, gif_creator, + multi_split_video, remove_beginning_of_video, resize_videos_by_height, + resize_videos_by_width, roi_blurbox, superimpose_elapsed_time, + superimpose_frame_count, superimpose_freetext, superimpose_overlay_video, + superimpose_video_names, superimpose_video_progressbar, + video_bg_subtraction_mp, video_bg_subtraction, video_concatenator, + video_to_greyscale, watermark_video, rotate_video, flip_videos) + +sys.setrecursionlimit(10**7) +# +# +# +# +def rotate_video(video_path: Union[str, os.PathLike], + degrees: int, + gpu: Optional[bool] = False, + quality: Optional[int] = 60, + save_dir: Optional[Union[str, os.PathLike]] = None): + + """ + Rotate a video or a directory of videos by a specified number of degrees. + + :param Union[str, os.PathLike] video_path: Path to the input video file or directory containing video files. + :param int degrees: Number of degrees (between 1 and 359, inclusive) to rotate the video clockwise. + :param Optional[bool] gpu: If True, attempt to use GPU acceleration for rotation (default is False). + :param Optional[int] quality: Quality of the output video, an integer between 1 and 100 (default is 60). + :param Optional[Union[str, os.PathLike]] save_dir: Directory to save the rotated video(s). If None, the directory of the input video(s) will be used. + :return: None. + + :example: + >>> rotate_video(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/reptile/rot_test.mp4', degrees=180) + """ + + check_ffmpeg_available(raise_error=True) + timer = SimbaTimer(start=True) + check_int(name=f'{rotate_video.__name__} font_size', value=degrees, min_value=1, max_value=359) + check_int(name=f'{rotate_video.__name__} quality', value=quality, min_value=1, max_value=100) + if gpu and not check_nvidea_gpu_available(): + raise FFMPEGCodecGPUError(msg="No GPU found (as evaluated by nvidea-smi returning None)", source=rotate_video.__name__) + crf_lk = percent_to_crf_lookup() + crf = crf_lk[str(quality)] + if os.path.isfile(video_path): + video_paths = [video_path] + elif os.path.isdir(video_path): + video_paths = list(find_all_videos_in_directory(directory=video_path, as_dict=True, raise_error=True).values()) + else: + raise InvalidInputError(msg=f'{video_path} is not a valid file path or a valid directory path', source=rotate_video.__name__) + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir) + else: + save_dir = os.path.dirname(video_paths[0]) + for file_cnt, video_path in enumerate(video_paths): + _, video_name, ext = get_fn_ext(video_path) + print(f'Rotating video {video_name} {degrees} degrees (Video {file_cnt + 1}/{len(video_paths)})...') + save_path = os.path.join(save_dir, f'{video_name}_rotated{ext}') + if gpu: + cmd = f'ffmpeg -hwaccel auto -i "{video_path}" -vf "hwupload_cuda,rotate={degrees}*(PI/180),format=nv12|cuda" -c:v h264_nvenc "{save_path}" -loglevel error -stats -y' + else: + cmd = f'ffmpeg -i "{video_path}" -vf "rotate={degrees}*(PI/180)" -c:v libx264 -crf {crf} "{save_path}" -loglevel error -stats -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f"{len(video_paths)} video(s) ratated {degrees} and saved in {save_dir} directory.", elapsed_time=timer.elapsed_time_str, source=rotate_video.__name__,) + + + +# class RotateVideoSetDegreesPopUp(PopUpMixin): +# def __init__(self): +# PopUpMixin.__init__(self, title="ROTATE VIDEOS") +# settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) +# self.degrees_dropdown = DropDownMenu(settings_frm, "CLOCKWISE DEGREES:", list(range(1, 360, 1)), labelwidth=25) +# self.quality_dropdown = DropDownMenu(settings_frm, "OUTPUT VIDEO QUALITY (%):", list(range(10, 110, 10)), labelwidth=25) +# self.quality_dropdown.setChoices(60) +# self.degrees_dropdown.setChoices('90') +# self.degrees_dropdown.grid(row=0, column=0, sticky=NW) +# +# settings_frm.grid(row=0, column=0, sticky="NW") +# self.degrees_dropdown.grid(row=0, column=0, sticky="NW") +# self.quality_dropdown.grid(row=1, column=0, sticky="NW") +# +# single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - SUPERIMPOSE TEXT", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) +# self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) +# single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) +# +# single_video_frm.grid(row=1, column=0, sticky="NW") +# self.selected_video.grid(row=0, column=0, sticky="NW") +# single_video_run.grid(row=1, column=0, sticky="NW") +# +# multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - SUPERIMPOSE TEXT", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) +# self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) +# multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run(multiple=True)) +# +# multiple_videos_frm.grid(row=2, column=0, sticky="NW") +# self.selected_video_dir.grid(row=0, column=0, sticky="NW") +# multiple_videos_run.grid(row=1, column=0, sticky="NW") +# self.main_frm.mainloop() +# +# +# def run(self, multiple: bool): +# degrees = int(self.degrees_dropdown.getChoices()) +# quality = int(self.quality_dropdown.getChoices()) +# if not multiple: +# data_path = self.selected_video.file_path +# check_file_exist_and_readable(file_path=data_path) +# else: +# data_path = self.selected_video_dir.folder_path +# check_if_dir_exists(in_dir=data_path) +# +# threading.Thread(target=rotate_video(video_path=data_path, +# degrees=degrees, +# quality=quality)).start() + +#RotateVideoSetDegreesPopUp() + + +# +# def flip_videos(video_path: Union[str, os.PathLike], +# horizontal_flip: Optional[bool] = False, +# vertical_flip: Optional[bool] = False, +# quality: Optional[int] = 60, +# save_dir: Optional[Union[str, os.PathLike]] = None): +# """ +# Flip a video or directory of videos horizontally, vertically, or both, and save them to the specified directory. +# +# .. video:: _static/img/overlay_video_progressbar.webm +# :width: 900 +# :loop: +# +# :param Union[str, os.PathLike] video_path: Path to the input video file or directory containing video files. +# :param Optional[bool] horizontal_flip: If True, flip the video(s) horizontally (default is False). +# :param Optional[bool] vertical_flip: If True, flip the video(s) vertically (default is False). +# :param Optional[int] quality: Quality of the output video, an integer between 1 and 100 (default is 60). +# :param Optional[Union[str, os.PathLike]] save_dir: Directory to save the flipped video(s). If None, the directory of the input video(s) will be used. +# :return: None. +# """ +# +# +# check_ffmpeg_available(raise_error=True) +# timer = SimbaTimer(start=True) +# check_int(name=f'{rotate_video.__name__} quality', value=quality, min_value=1, max_value=100) +# if not horizontal_flip and not vertical_flip: raise InvalidInputError(msg='Flip videos vertically and/or horizontally. Got both as False', source=flip_videos.__name__) +# crf_lk = percent_to_crf_lookup() +# crf = crf_lk[str(quality)] +# if os.path.isfile(video_path): +# video_paths = [video_path] +# elif os.path.isdir(video_path): +# video_paths = list(find_all_videos_in_directory(directory=video_path, as_dict=True, raise_error=True).values()) +# else: +# raise InvalidInputError(msg=f'{video_path} is not a valid file path or a valid directory path', source=flip_videos.__name__) +# if save_dir is not None: +# check_if_dir_exists(in_dir=save_dir) +# else: +# save_dir = os.path.dirname(video_paths[0]) +# for file_cnt, video_path in enumerate(video_paths): +# _, video_name, ext = get_fn_ext(video_path) +# print(f'Flipping video {video_name} (Video {file_cnt + 1}/{len(video_paths)})...') +# save_path = os.path.join(save_dir, f'{video_name}_flipped{ext}') +# if vertical_flip and not horizontal_flip: +# cmd = f'ffmpeg -i "{video_path}" -vf "vflip" -c:v libx264 -crf {crf} "{save_path}" -loglevel error -stats -y' +# elif horizontal_flip and not vertical_flip: +# cmd = f'ffmpeg -i "{video_path}" -vf "hflip" -c:v libx264 -crf {crf} "{save_path}" -loglevel error -stats -y' +# else: +# cmd = f'ffmpeg -i "{video_path}" -vf "hflip,vflip" -c:v libx264 -crf {crf} "{save_path}" -loglevel error -stats -y' +# subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) +# timer.stop_timer() +# stdout_success(msg=f"{len(video_paths)} video(s) flipped and saved in {save_dir} directory.", elapsed_time=timer.elapsed_time_str, source=flip_videos.__name__,) +# + +class FlipVideosPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="FLIP VIDEOS") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.horizontal_dropdown = DropDownMenu(settings_frm, "HORIZONTAL FLIP:", ['TRUE', 'FALSE'], labelwidth=25) + self.vertical_dropdown = DropDownMenu(settings_frm, "VERTICAL FLIP:", ['TRUE', 'FALSE'], labelwidth=25) + self.quality_dropdown = DropDownMenu(settings_frm, "OUTPUT VIDEO QUALITY (%):", list(range(10, 110, 10)), labelwidth=25) + + self.horizontal_dropdown.setChoices('FALSE') + self.vertical_dropdown.setChoices('FALSE') + self.quality_dropdown.setChoices(60) + + settings_frm.grid(row=0, column=0, sticky="NW") + self.vertical_dropdown.grid(row=0, column=0, sticky="NW") + self.horizontal_dropdown.grid(row=1, column=0, sticky="NW") + self.quality_dropdown.grid(row=2, column=0, sticky="NW") + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - SUPERIMPOSE TEXT", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - SUPERIMPOSE TEXT", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run(multiple=True)) + + multiple_videos_frm.grid(row=2, column=0, sticky="NW") + self.selected_video_dir.grid(row=0, column=0, sticky="NW") + multiple_videos_run.grid(row=1, column=0, sticky="NW") + self.main_frm.mainloop() + + + def run(self, multiple: bool): + vertical_flip = str_2_bool(self.vertical_dropdown.getChoices()) + horizontal_flip = str_2_bool(self.horizontal_dropdown.getChoices()) + if not vertical_flip and not horizontal_flip: + raise InvalidInputError(msg='Flip videos vertically and/or horizontally. Got both as False', source=self.__class__.__name__) + quality = int(self.quality_dropdown.getChoices()) + if not multiple: + data_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=data_path) + else: + data_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=data_path) + + threading.Thread(target=flip_videos(video_path=data_path, + vertical_flip=vertical_flip, + horizontal_flip=horizontal_flip, + quality=quality)).start() + +FlipVideosPopUp() + + + + + + + + +#flip_videos(vertical_flip=True, horizontal_flip=True, video_path=f'/Users/simon/Desktop/envs/simba/troubleshooting/reptile/flip_test/flip_1.mp4') + + + + + + diff --git a/simba/sandbox/rotate_example_nb_egocentric.py b/simba/sandbox/rotate_example_nb_egocentric.py new file mode 100644 index 000000000..863fdfed5 --- /dev/null +++ b/simba/sandbox/rotate_example_nb_egocentric.py @@ -0,0 +1,82 @@ + + + +#In this notebook, we will egocentrically align pose estimation and associated video data. + +# Egocentric alignment is a crucial preprocessing step with various applications across different domains. + +# One primary use case is in unsupervised deep learning scenarios, where images of animals are used as inputs to train algorithms. +# Standardizing the orientation of the subject (e.g., an animal) ensures that the model and subsequent analyses do not incorrectly interpret the same behavior in different orientations +# # (e.g., grooming while facing north versus south) as distinct behaviors. By aligning the animal to a consistent frame of reference, we eliminate orientation as a confounding variable. + +# While egocentric alignment is an essential first step, it is often insufficient by itself for comprehensive analyses. Additional preprocessing steps are typically required, such as: +# +# * Background subtraction to isolate the animal from its surroundings (see relevant methods and notebooks). +# * Geometric segmentation to slice out and focus on the subject's body parts (see associated code and notebooks). + +# In this notebook, we will focus exclusively on performing egocentric alignment. Further preprocessing steps are outlined in related materials. + +from simba.data_processors.egocentric_aligner import EgocentricalAligner + +#SETTINGS +ANCHOR_POINT_1 = 'center' # Name of the body-part which is the "primary" anchor point around which the alignment centers. In rodents, this is often the center of the tail-base of the animal. +ANCHOR_POINT_2 = 'nose' # The name of the secondary anchor point defining the alignment direction. This is often the anterior body-part, in rodents it can be the nose or nape. +DIRECTION = 0 # The egocentric alignment angle, in degrees. For example, `0` and the animals `ANCHOR_POINT_2` is directly to the east (right) of `ANCHOR_POINT_1`. `180` and the animals `ANCHOR_POINT_2` is directly to the west (left) of `ANCHOR_POINT_1`. +ANCHOR_LOCATION = (250, 250) # The pixel location in the video where `ANCHOR_POINT_1` should be placed. For example, if the videos are 500x500, 250x250 will place the anchor right in the middle. +GPU = True # If we have an NVIDEA GPU availalable, we can use it to speed up processing. Otherwise set this to `False`. +FILL_COLOR = (0, 0, 0) # We are rotating videos, while at the same time retaining the original video size. Therefore, there will be some "new" areas exposed in the video (see below for more info). This is what color to color these new areas. +VERBOSE = False # If True, prints progress (like which frame and video is being processed etc). However, this information will flood this notebook is I am going to turn it off. + + +#DIRECTORY WHICH IS HOLDING POSE-ESTIMATION DATA +DATA_DIRECTORY = r'C:\Users\sroni\OneDrive\Desktop\rotate_ex\data' + +#DIRECTORY WHICH IS VIDEOS, ONE FOR EACH FILE IN THE DATA_DIRECTORY +VIDEOS_DIRECTORY = r'C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos' + +#DIRECTORY WHERE WE SHOULD SAVE THE ROTATED POSE-ESTIMATION AND ROTATED VIDEOS. +SAVE_DIRECTORY = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\rotated" + + +# Now we are good to go, using the information above, we define an instance of an SimBA EgocentriAligner and run it +aligner = EgocentricalAligner(anchor_1=ANCHOR_POINT_1, + anchor_2=ANCHOR_POINT_2, + data_dir=DATA_DIRECTORY, + videos_dir=VIDEOS_DIRECTORY, + save_dir=SAVE_DIRECTORY, + direction=DIRECTION, + gpu=GPU, + anchor_location=ANCHOR_LOCATION, + fill_clr=FILL_COLOR, + verbose=VERBOSE) +aligner.run() + + + +#EXAMPLE VIDEO EXPECTED RESULTS +#### + +#Now, let's change a few settings, to get a feeling for how it behaves.. +ANCHOR_LOCATION = (500, 100) +FILL_COLOR = (255, 0, 0) +DIRECTION = 180 + + +# ... we create a new instance based on the updated information above, and run it. +aligner = EgocentricalAligner(anchor_1=ANCHOR_POINT_1, + anchor_2=ANCHOR_POINT_2, + data_dir=DATA_DIRECTORY, + videos_dir=VIDEOS_DIRECTORY, + save_dir=SAVE_DIRECTORY, + direction=DIRECTION, + gpu=GPU, + anchor_location=ANCHOR_LOCATION, + fill_clr=FILL_COLOR, + verbose=VERBOSE) +aligner.run() + +#EXAMPLE VIDEO EXPECTED RESULTS +#### + + + diff --git a/simba/sandbox/runs_test.py b/simba/sandbox/runs_test.py new file mode 100644 index 000000000..7b2c1ddaa --- /dev/null +++ b/simba/sandbox/runs_test.py @@ -0,0 +1,36 @@ +import numpy as np +from typing import Optional +from scipy import stats + +def runs_test_one_sample(x: np.ndarray): + cutoff = np.mean(x) + xindicator = (x >= cutoff).astype(int) + runstart = np.nonzero(np.diff(np.r_[[-np.inf], xindicator, [np.inf]]))[0] + runs = np.diff(runstart) + runs_sign = x[runstart[:-1]] + runs_pos = runs[runs_sign == 1] + runs_neg = runs[runs_sign == 0] + n_runs = len(runs) + npo = runs_pos.sum() + nne = runs_neg.sum() + n = npo + nne + npn = npo * nne + rmean = 2. * npn / n + 1 + rvar = 2. * npn * (2. * npn - n) / n ** 2. / (n - 1.) + rstd = np.sqrt(rvar) + rdemean = n_runs - rmean + z = rdemean + + z /= rstd + return z + + + +x = np.random.random_integers(0, 2, (1000,)) +#runs_test_one_sample(x=x) + + +x = np.zeros((100,)) +x = np.concatenate((x, np.ones((100,)))) +for i in range(1, x.shape[0], 2): x[i] = 1 +runs_test_one_sample(x=x) \ No newline at end of file diff --git a/simba/sandbox/seekable.py b/simba/sandbox/seekable.py new file mode 100644 index 000000000..16dc96a53 --- /dev/null +++ b/simba/sandbox/seekable.py @@ -0,0 +1,58 @@ +from typing import Union, Optional +import os +import cv2 +import numpy as np + +from simba.utils.checks import check_file_exist_and_readable, check_instance + +from simba.utils.errors import InvalidInputError, CorruptedFileError +from simba.utils.warnings import CorruptedFileWarning + +def check_if_video_corrupted(video: Union[str, os.PathLike, cv2.VideoCapture], + frame_interval: Optional[int] = None, + frame_n: Optional[int] = 20, + raise_error: Optional[bool] = True) -> None: + + """ + Check if a video file is corrupted by inspecting a set of its frames. + + :param Union[str, os.PathLike] video_path: Path to the video file or cv2.VideoCapture OpenCV object. + :param Optional[int] frame_interval: Interval between frames to be checked. If None, ``frame_n`` will be used. + :param Optional[int] frame_n: Number of frames to be checked. If None, ``frame_interval`` will be used. + :param Optional[bool] raise_error: Whether to raise an error if corruption is found. If False, prints warning. + :return None: + + :example: + >>> check_if_video_corrupted(video_path='/Users/simon/Downloads/NOR ENCODING FExMP8.mp4') + """ + check_instance(source=f'{check_if_video_corrupted.__name__} video', instance=video, accepted_types=(str, cv2.VideoCapture)) + if isinstance(video, str): + check_file_exist_and_readable(file_path=video) + cap = cv2.VideoCapture(video) + else: + cap = video + frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) + if (frame_interval is not None and frame_n is not None) or (frame_interval is None and frame_n is None): + raise InvalidInputError(msg='Pass frame_interval OR frame_n', source=check_if_video_corrupted.__name__) + if frame_interval is not None: + frms_to_check = list(range(0, frame_count, frame_interval)) + else: + frms_to_check = np.array_split(np.arange(0, frame_count), frame_n) + frms_to_check = [x[-1] for x in frms_to_check] + errors = [] + for frm_id in frms_to_check: + cap.set(1, frm_id) + ret, _ = cap.read() + if not ret: errors.append(frm_id) + if len(errors) > 0: + if raise_error: + raise CorruptedFileError(msg=f'Found {len(errors)} corrupted frame(s) at indexes {errors} in video {video}', source=check_if_video_corrupted.__name__) + else: + CorruptedFileWarning(msg=f'Found {len(errors)} corrupted frame(s) at indexes {errors} in video {video}', source=check_if_video_corrupted.__name__) + else: + pass + + +#detect_corrupted_frames(video_file_path='/Users/simon/Downloads/NOR ENCODING FExMP8.mp4') + + diff --git a/simba/sandbox/segment_image_horizontal.py b/simba/sandbox/segment_image_horizontal.py new file mode 100644 index 000000000..127047e4b --- /dev/null +++ b/simba/sandbox/segment_image_horizontal.py @@ -0,0 +1,39 @@ +import time + +import numpy as np +import cv2 +from simba.mixins.image_mixin import ImageMixin +from numba import jit, prange + +@jit(nopython=True, parallel=True) +def segment_img_stack_horizontal(imgs: np.ndarray, pct: int, lower: bool, both: bool) -> np.ndarray: + """ + Segment a horizontal part of all images in stack. + + :example: + >>> imgs = ImageMixin.read_img_batch_from_video(video_path='/Users/simon/Downloads/3A_Mouse_5-choice_MouseTouchBasic_a1.mp4', start_frm=0, end_frm=400) + >>> imgs = np.stack(imgs.values(), axis=0) + >>> sliced_imgs = segment_img_stack_horizontal(imgs=imgs, pct=50, lower=True, both=False) + """ + results = [] + for cnt in range(imgs.shape[0]): + img = imgs[cnt] + sliced_height = int(img.shape[0] * pct / 100) + if both: + sliced_img = img[sliced_height: img.shape[0] - sliced_height, :] + elif lower: + sliced_img = img[img.shape[0] - sliced_height:, :] + else: + sliced_img = img[:sliced_height, :] + results.append(sliced_img) + stacked_results = np.full((len(results), results[0].shape[0], results[0].shape[1], 3), np.nan) + for i in prange(len(results)): stacked_results[i] = results[i] + return results + +imgs = ImageMixin.read_img_batch_from_video(video_path='/Users/simon/Downloads/3A_Mouse_5-choice_MouseTouchBasic_a1.mp4', start_frm=0, end_frm=400) +imgs = np.stack(imgs.values(), axis=0) +imgs_gray = ImageMixin.img_stack_to_greyscale(imgs=imgs) +cv2.imshow('ssss', imgs_gray[0]) +cv2.waitKey(5000) + + diff --git a/simba/sandbox/sequential_lag_analysis.py b/simba/sandbox/sequential_lag_analysis.py new file mode 100644 index 000000000..88fb50365 --- /dev/null +++ b/simba/sandbox/sequential_lag_analysis.py @@ -0,0 +1,77 @@ +import numpy as np +import pandas as pd + +from simba.utils.data import detect_bouts +from simba.utils.read_write import read_df +from simba.utils.checks import check_instance, check_str, check_that_column_exist, check_float +from simba.utils.errors import CountError + +from statsmodels.sandbox.stats.runs import runstest_1samp + +def sequential_lag_analysis(data: pd.DataFrame, criterion: str, target:str, time_window: float, fps: float): + """ + Perform sequential lag analysis to determine the temporal relationship between two events. + + For every onset of behavior C, count the proportions of behavior T onsets in the time-window preceding the onset + of behavior C vs the proportion of behavior T onsets in the time-window proceeding the onset of behavior C. + + A value closer to 1.0 indicates that behavior T + always precede behavior C. A value closer to 0.0 indicates that behavior T follows behavior C. A value of -1.0 indicates + that behavior T never precede nor proceed behavior C. + + :example: + >>> df = read_df(file_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/csv/targets_inserted/Together_1.csv', file_type='csv') + >>> sequential_lag_analysis(data=df, criterion='Attack', target='Sniffing', fps=5, time_window=2.0) + + References + ---------- + .. [1] Casarrubea et al., Structural analyses in the study of behavior: From rodents to non-human primates, `Frontiers in Psychology`, + 2022. + """ + + check_instance(source=sequential_lag_analysis.__name__, instance=data, accepted_types=(pd.DataFrame)) + check_str(name=f'{sequential_lag_analysis.__name__} criterion', value=criterion) + check_str(name=f'{sequential_lag_analysis.__name__} target', value=target) + check_float(name=f'{sequential_lag_analysis.__name__} fps', value=fps, min_value=1.0) + check_float(name=f'{sequential_lag_analysis.__name__} time-window', value=time_window, min_value=0.01) + check_that_column_exist(df=data, column_name=[criterion, target], file_name=sequential_lag_analysis.__name__) + bouts = detect_bouts(data_df=data, target_lst=[criterion, target], fps=fps) + if len(bouts) == 0: + raise CountError(msg=f'No events of behaviors {criterion} and {target} detected in data.', source=sequential_lag_analysis) + criterion_starts = bouts['Start_frame'][bouts['Event'] == criterion].values + target_starts = bouts['Start_frame'][bouts['Event'] == target].values + preceding_cnt, proceeding_cnt = 0, 0 + window = int(fps * time_window) + if window < 1.0: window = 1 + for criterion_start in criterion_starts: + preceeding_events = target_starts[np.argwhere((target_starts < criterion_start) & (target_starts >= (criterion_start - window)))].flatten() + preceding_cnt += preceeding_events.shape[0] + target_starts = np.array([x for x in target_starts if x not in preceeding_events]) + proceeding_events = target_starts[np.argwhere((target_starts > criterion_start) & (target_starts <= (criterion_start + window)))].flatten() + proceeding_cnt += proceeding_events.shape[0] + target_starts = np.array([x for x in target_starts if x not in proceeding_events]) + if preceding_cnt == 0 and proceeding_cnt == 0: + return -1.0 + elif preceding_cnt == 0: + return 0.0 + elif proceeding_cnt == 0: + return 1.0 + else: + return np.round(preceding_cnt / (preceding_cnt + proceeding_cnt), 3) + + + + + + + + +df = read_df(file_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/csv/targets_inserted/Together_1.csv', file_type='csv') + +sequential_lag_analysis(data=df, criterion='Attack', target='Sniffing', fps=5, time_window=2.0) + + + + + + diff --git a/simba/sandbox/shannon_diversity_index.py b/simba/sandbox/shannon_diversity_index.py new file mode 100644 index 000000000..b23a39309 --- /dev/null +++ b/simba/sandbox/shannon_diversity_index.py @@ -0,0 +1,33 @@ +import numpy as np +from numba import jit + +@jit(nopython=True) +def shannon_diversity_index(x: np.ndarray) -> float: + """ + Calculate the Shannon Diversity Index for a given array of categories. The Shannon Diversity Index is a measure of diversity in a + categorical feature, taking into account both the number of different categories (richness) + and their relative abundances (evenness). + + :example: + >>> x = np.random.randint(0, 100, (100, )) + >>> shannon_diversity_index(x=x) + """ + + + unique_v = np.unique(x) + n_unique = np.unique(x).shape[0] + results = np.full((n_unique,), np.nan) + for i in range(unique_v.shape[0]): + v = unique_v[i] + cnt = np.argwhere(x == v).flatten().shape[0] + pi = cnt / x.shape[0] + results[i] = pi * np.log(pi) + return np.sum(np.abs(results)) + +x = np.random.randint(0, 100, (100, )) +y = np.random.randint(0, 1, (300, )) +x = np.append(x, y) + +shannon_diversity_index(x=x) + + diff --git a/simba/sandbox/shap_2_nb.py b/simba/sandbox/shap_2_nb.py new file mode 100644 index 000000000..3af7425ff --- /dev/null +++ b/simba/sandbox/shap_2_nb.py @@ -0,0 +1,48 @@ +from simba.mixins.train_model_mixin import TrainModelMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.read_write import read_df, read_config_file +import glob + + +# DEFINITIONS +CONFIG_PATH = r"C:\troubleshooting\mitra\project_folder\project_config.ini" +CLASSIFIER_PATH = r"C:\troubleshooting\mitra\models\generated_models\grooming.sav" +CLASSIFIER_NAME = 'grooming' +COUNT_PRESENT = 250 +COUNT_ABSENT = 250 + + +# READ IN THE CONFIG AND THE CLASSIFIER +config = read_config_file(config_path=CONFIG_PATH) +config_object = ConfigReader(config_path=CONFIG_PATH) +clf = read_df(file_path=CLASSIFIER_PATH, file_type='pickle') + + +# READ IN THE DATA + +#Read in the path to all files inside the project_folder/csv/targets_inserted directory +file_paths = glob.glob(config_object.targets_folder + '/*' + config_object.file_type) + +#Reads in the data held in all files in ``file_paths`` defined above +data, _ = TrainModelMixin().read_all_files_in_folder_mp(file_paths=file_paths, file_type=config.get('General settings', 'workflow_file_type').strip()) + +#We find all behavior annotations that are NOT the targets. I.e., if SHAP values for Attack is going to be calculated, bit we need to find which other annotations exist in the data e.g., Escape and Defensive. +non_target_annotations = TrainModelMixin().read_in_all_model_names_to_remove(config=config, model_cnt=config_object.clf_cnt, clf_name=CLASSIFIER_NAME) + +# We remove the body-part coordinate columns and the annotations which are not the target from the data +data = data.drop(non_target_annotations + config_object.bp_headers, axis=1) + +# We place the target data in its own variable +target_df = data.pop(CLASSIFIER_NAME) + + +TrainModelMixin().create_shap_log_mp(ini_file_path=CONFIG_PATH, + rf_clf=clf, + x_df=data, + y_df=target_df, + x_names=data.columns, + clf_name=CLASSIFIER_NAME, + cnt_present=COUNT_PRESENT, + cnt_absent=COUNT_ABSENT, + save_path=config_object.logs_path) + diff --git a/simba/sandbox/shift_geometries.py b/simba/sandbox/shift_geometries.py new file mode 100644 index 000000000..ac17a0d7d --- /dev/null +++ b/simba/sandbox/shift_geometries.py @@ -0,0 +1,66 @@ +from typing import List, Tuple, Optional +import numpy as np +from shapely.geometry import Polygon +from simba.utils.checks import check_valid_lst, check_valid_tuple +from simba.mixins.geometry_mixin import GeometryMixin +import cv2 + + + +def adjust_geometry_locations(geometries: List[Polygon], + shift: Tuple[int, int], + minimum: Optional[Tuple[int, int]] = (0, 0), + maximum: Optional[Tuple[int, int]] = (np.inf, np.inf)) -> List[Polygon]: + """ + Shift a set of geometries specified distance in the x and/or y-axis. + + .. image:: _static/img/adjust_geometry_locations.png + :width: 600 + :align: center + + :param List[Polygon] geometries: List of input polygons to be adjusted. + :param Tuple[int, int] shift: Tuple specifying the shift distances in the x and y-axis. + :param Optional[Tuple[int, int]] minimum: Minimim allowed coordinates of Polygon points on x and y axes. Default: (0,0). + :param Optional[Tuple[int, int]] maximum: Maximum allowed coordinates of Polygon points on x and y axes. Default: (np.inf, np.inf). + :return List[Polygon]: List of adjusted polygons. + + :example: + >>> shapes = GeometryMixin().adjust_geometry_locations(geometries=shapes, shift=(0, 333)) + """ + + + check_valid_tuple(x=shift, source=f"{adjust_geometry_locations.__name__} shift", accepted_lengths=(2,), valid_dtypes=(int,)) + check_valid_tuple(x=shift, source=f"{adjust_geometry_locations.__name__} minimum", accepted_lengths=(2,), valid_dtypes=(int,)) + check_valid_tuple(x=shift, source=f"{adjust_geometry_locations.__name__} maximum", accepted_lengths=(2,), valid_dtypes=(int,)) + check_valid_lst(data=geometries, source=f"{adjust_geometry_locations.__name__} geometries", valid_dtypes=(Polygon,), min_len=1) + results = [] + for shape_cnt, shape in enumerate(geometries): + shape_results = [] + for x, y in list(shape.exterior.coords): + x_shift, y_shift = int(np.ceil(y + shift[1])), int(np.ceil(x + shift[0])) + x_shift, y_shift = max(minimum[0], x_shift), max(minimum[1], y_shift) + x_shift, y_shift = min(maximum[0], x_shift), min(maximum[1], y_shift) + shape_results.append([y_shift, x_shift]) + results.append(Polygon(shape_results)) + return results + + + + + + + # results.append(Polygon([(int(abs(x + shift[0])), int(abs(y + shift[1]))) for x, y in list(shape.exterior.coords)])) + # return results + + +geometries = GeometryMixin().bodyparts_to_polygon(np.array([[[50, 50], + [100, 100], + [50, 100], + [100, 50]]])) +geometries_shifted = adjust_geometry_locations(geometries=geometries, shift=(-25, 100), maximum=(500, 500)) + +geometries = [geometries[0], geometries_shifted[0]] + +img = GeometryMixin.view_shapes(shapes=geometries) +cv2.imshow('sdasdas', img) +cv2.waitKey(3000) \ No newline at end of file diff --git a/simba/sandbox/siegel_tukey.py b/simba/sandbox/siegel_tukey.py new file mode 100644 index 000000000..760de1821 --- /dev/null +++ b/simba/sandbox/siegel_tukey.py @@ -0,0 +1,95 @@ +import numpy as np +#from simba.utils.data import rank + +def siegel_tukey(x: np.ndarray, y: np.ndarray): + """ + Siegel-Tukey test, also known as the Tukey one-degree-of-freedom test for nonadditivity. + + Non-parametric test to check if the variability between two groups (e.g., features) and similar. + + :example: + >>> x = np.random.random((10,)) + >>> y = np.random.random((10,)) + >>> siegel_tukey(x=x, y=y) + """ + x = np.hstack((np.zeros(x.shape[0]).reshape(-1, 1), x.reshape(-1, 1))) + y = np.hstack((np.ones(y.shape[0]).reshape(-1, 1), y.reshape(-1, 1))) + data = np.vstack((x, y)) + sorted_data = data[data[:, 1].argsort()] + results = np.full((sorted_data.shape[0], 3), -1.0) + results[0, 0:2] = sorted_data[0, :] + results[0, 2] = 1 + top, bottom = np.array_split(sorted_data[1:, ], 2) + bottom = bottom[::-1] + start, end, c_rank = 1, 5, 2 + for i in range(1, max(bottom.shape[0], top.shape[0]), 2): + b_ = bottom[i-1:i+1] + b_ = np.hstack((b_, np.full((b_.shape[0], 1), c_rank))) + c_rank += 1 + t_ = top[i - 1:i + 1] + t_ = np.hstack((t_, np.full((t_.shape[0], 1), c_rank))) + c_rank += 1 + results[start:end, :] = np.vstack((b_, t_)) + start, end = end, end+4 + + w_a = np.sum(results[np.argwhere(results[:, 0] == 0)][:, -1][:, -1]) + w_b = np.sum(results[np.argwhere(results[:, 0] == 1)][:, -1][:, -1]) + + u_a = np.abs(w_a - (x.shape[0] * (x.shape[0] + 1) / 2)) + u_b = np.abs(w_b - (y.shape[0] * (y.shape[0] + 1) / 2)) + + return u_a, u_b, (np.max((u_b, u_a)) - np.min((u_b, u_a))) - x.shape[0] + + + + + + + + + + + #print(sorted_data) + # rank_0 = np.full((sorted_data.shape[0], 1), 0) + # visited = [] + # t, b, c_rank, dir, pos = 0, sorted_data.shape[0]-1, 1, 1, 0 + # while len(visited) < sorted_data.shape[0]: + # rank_0[pos] = c_rank + # visited.append(pos) + # if dir == 1: + # if sorted_data[pos+dir] == sorted_data[pos]: + # pos += 1 + # t += 1 + # else: + # pos = b + # dir = -1 + # elif dir == -1: + # if sorted_data[pos + dir] == sorted_data[pos]: + # pos -= 1 + # b -= 1 + # else: + # pos = t + # dir = 1 + # + + + + #print(visited, pos) + + + + + + + + #sorted_data = [i[::-1] for i in sorted_data[::-1]] + #print(sorted_data) + + #np.sort(data) + + + + +x = np.random.randint(0, 100, (10,)) +y = np.random.randint(50, 10000, (10,)) +siegel_tukey(x=y, y=x) diff --git a/simba/sandbox/silhouette_score.py b/simba/sandbox/silhouette_score.py new file mode 100644 index 000000000..4e88bfebe --- /dev/null +++ b/simba/sandbox/silhouette_score.py @@ -0,0 +1,28 @@ +import numpy as np +from sklearn.metrics import adjusted_rand_score +from simba.utils.checks import check_valid_array + +def adjusted_rand_index(x: np.ndarray, y: np.ndarray) -> float: + """ + Compute Adjusted Rand Index (ARI) + + A value close to 0.0 represent random labeling and exactly 1.0 when the clusterings are identical. + + :param np.ndarray x: 1D array representing cluster labels for model one. Shape (n_samples,). + :param np.ndarray y: 1D array representing cluster labels for model two. Shape (x.shape[0],). + :return float: Adjusted Rand Index value, ranges from -1 to 1. A value close to 1 indicates a perfect match between the two clusterings, while a value close to 0 indicates random labeling. + + :example: + >>> x = np.random.randint(0, 2, (10000,)) + >>> y = np.random.randint(0, 2, (10000,)) + >>> adjusted_rand_index(x=x, y=y) + """ + check_valid_array(data=x, source=adjusted_rand_index.__name__, accepted_ndims=(1,)) + check_valid_array(data=x, source=adjusted_rand_index.__name__, accepted_shapes=[(x.shape)]) + return adjusted_rand_score(x, y) + + + + +# +# diff --git a/simba/sandbox/simpson_diversity_index.py b/simba/sandbox/simpson_diversity_index.py new file mode 100644 index 000000000..46e4938c7 --- /dev/null +++ b/simba/sandbox/simpson_diversity_index.py @@ -0,0 +1,31 @@ +from typing import Optional +import numpy as np +from numba import jit + +@jit(nopython=True) +def simpson_index(x: np.ndarray) -> float: + """ + Calculate Simpson's diversity index for a given array of values. + + Simpson's diversity index is a measure of diversity that takes into account the number of different categories + present in the input data as well as the relative abundance of each category. + + :param np.ndarray x: 1-dimensional numpy array containing the values representing categories for which Simpson's index is calculated. + :return float: Simpson's diversity index value for the input array `x` + """ + + unique_v = np.unique(x) + n_unique = np.unique(x).shape[0] + results = np.full((n_unique, 3), np.nan) + for i in range(unique_v.shape[0]): + v = unique_v[i] + cnt = np.argwhere(x == v).flatten().shape[0] + squared = cnt * (cnt-1) + results[i, :] = np.array([v, cnt, squared]) + return (np.sum(results[:, 2])) / (x.shape[0] * (x.shape[0] -1)) + + + + +x = np.random.randint(0, 5, (10000000,)) +simpson_index(x=x) diff --git a/simba/sandbox/slic.py b/simba/sandbox/slic.py new file mode 100644 index 000000000..53da092be --- /dev/null +++ b/simba/sandbox/slic.py @@ -0,0 +1,140 @@ +import os + +import matplotlib.pyplot as plt +from skimage import io +from skimage.segmentation import slic +from skimage.segmentation import mark_boundaries +import numpy as np +from typing import Optional, Union +from simba.utils.checks import check_valid_array, check_int, check_if_valid_img, check_file_exist_and_readable +from simba.utils.read_write import read_frm_of_video, get_video_meta_data, find_core_cnt, get_fn_ext, concatenate_videos_in_folder +from skimage.segmentation import slic +from skimage.color import label2rgb +import cv2 +from copy import deepcopy +import multiprocessing +import functools +from simba.utils.enums import Defaults, Formats +from simba.utils.printing import SimbaTimer, stdout_success + +def get_img_slic(img: np.ndarray, + n_segments: Optional[int] = 50, + compactness: Optional[int] = 50, + sigma: Optional[float] = 1) -> np.ndarray: + + """ + Simplify an image into superpixels using SLIC (Simple Linear Iterative Clustering). + + :param np.ndarray img: Image to segment. + :param n_segments: Number of segments to produce. + :param compactness: How compact ("square") the output segments are. + :param np.ndarray sigma: Amount of Gaussian smoothing. + :return: Smoothened version of the input image. + :rtype: np.ndarray + + :example: + >>> img = read_frm_of_video(video_path=r"C:\troubleshooting\mitra\project_folder\videos\FRR_gq_Saline_0626.mp4", frame_index=0) + >>> sliced_img = get_img_slic(img=img) + """ + + check_if_valid_img(data=img, source=f'{get_img_slic.__name__} img', raise_error=True) + check_int(name=f'{get_img_slic.__name__} n_segments', value=n_segments, min_value=2) + check_int(name=f'{get_img_slic.__name__} compactness', value=compactness, min_value=1) + check_int(name=f'{get_img_slic.__name__} sigma', value=compactness, min_value=0) + segments = slic(image=img, n_segments=n_segments, compactness=compactness, sigma=sigma, start_label=0) + segmented_image = label2rgb(segments, img, kind='avg', bg_label=0) + + return segmented_image + +def _slic_helper(frm_range: np.ndarray, + n_segments: int, + sigma: float, + compactness: int, + save_dir: Union[str, os.PathLike], + video_path: Union[str, os.PathLike]): + + """ SLIC multiprocess helper called by slic.get_video_slic""" + + video_cap = cv2.VideoCapture(video_path) + video_meta_data = get_video_meta_data(video_path=video_path) + batch, start_frm, end_frm = frm_range[0], frm_range[1][0], frm_range[1][-1] + save_path = os.path.join(save_dir, f'{batch}.mp4') + fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value) + writer = cv2.VideoWriter(save_path, fourcc, video_meta_data["fps"], (video_meta_data["width"], video_meta_data["height"])) + for frm_idx in range(start_frm, end_frm): + print(f'Frame {frm_idx}/{end_frm}, Batch {batch}...') + img = read_frm_of_video(video_path=video_cap, frame_index=frm_idx) + img = get_img_slic(img=img, n_segments=n_segments, compactness=compactness, sigma=sigma) + writer.write(img) + writer.release() + return batch + + +def get_video_slic(video_path: Union[str, os.PathLike], + save_path: Union[str, os.PathLike], + n_segments: Optional[int] = 50, + compactness: Optional[int] = 50, + sigma: Optional[int] = 1, + core_cnt: Optional[int] = -1) -> None: + + """ + Apply SLIC superpixel segmentation to all frames of a video and save the output as a new video. + + .. video:: _static/img/get_video_slic.webm + :width: 800 + :autoplay: + :loop: + + + :param Union[str, os.PathLike] video_path: Path to the input video file. + :param Union[str, os.PathLike] save_path: Path to save the processed video with SLIC superpixel segmentation. + :param Optional[int] n_segments: Approximate number of superpixels for each frame. Defaults to 50. + :param Optional[int] compactness: Balance of color and spatial proximity. Higher values result in more uniformly shaped superpixels. Defaults to 50. + :param Optional[int] sigma: Standard deviation for Gaussian smoothing applied to each frame before segmentation. Defaults to 1. + :param Optional[int] core_cnt: Number of CPU cores to use for parallel processing. Set to -1 to use all available cores. Defaults to -1. + :return: None. The segmented video is saved to `save_path`. + + :example: + >>> #video_path = r"C:\troubleshooting\mitra\project_folder\videos\FRR_gq_Saline_0626.mp4" + """ + timer = SimbaTimer(start=True) + check_int(name=f'{get_img_slic.__name__} n_segments', value=n_segments, min_value=2) + check_int(name=f'{get_img_slic.__name__} compactness', value=compactness, min_value=1) + check_int(name=f'{get_img_slic.__name__} sigma', value=sigma, min_value=1) + check_int(name=f'{get_img_slic.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0]) + check_file_exist_and_readable(file_path=video_path) + video_meta_data = get_video_meta_data(video_path=video_path) + if core_cnt == -1 or core_cnt > find_core_cnt()[0]: core_cnt = find_core_cnt()[0] + frm_ranges = np.array_split(np.arange(0, video_meta_data['frame_count'] + 1), core_cnt) + frm_ranges = [(y, x) for y, x in enumerate(frm_ranges)] + out_dir, out_name, _= get_fn_ext(filepath=save_path) + temp_folder = os.path.join(out_dir, "temp") + if not os.path.isdir(temp_folder): os.makedirs(temp_folder) + with multiprocessing.Pool(core_cnt, maxtasksperchild=Defaults.MAX_TASK_PER_CHILD.value) as pool: + constants = functools.partial(_slic_helper, + video_path=video_path, + save_dir=temp_folder, + n_segments=n_segments, + compactness=compactness, + sigma=sigma) + for cnt, core_batch in enumerate(pool.map(constants, frm_ranges, chunksize=1)): + print(f'Core batch {core_batch} complete...') + pool.join() + pool.terminate() + timer.stop_timer() + concatenate_videos_in_folder(in_folder=temp_folder, save_path=save_path) + stdout_success(msg=f'SLIC video saved at {save_path}', elapsed_time=timer.elapsed_time_str) + + +if __name__=='__main__': + video_path = r"C:\troubleshooting\mitra\project_folder\videos\FRR_gq_Saline_0626.mp4" + get_video_slic(video_path=video_path, save_path=r"C:\Users\sroni\OneDrive\Desktop\test.mp4") + +# +# img = read_frm_of_video(video_path=r"C:\troubleshooting\mitra\project_folder\videos\FRR_gq_Saline_0626.mp4", frame_index=0) +# sliced_img = get_img_slic(img=img) +# cv2.imshow('sasdasd', sliced_img) +# cv2.waitKey(5000) + + + diff --git a/simba/sandbox/sliding_autoc.py b/simba/sandbox/sliding_autoc.py new file mode 100644 index 000000000..2e55d2de0 --- /dev/null +++ b/simba/sandbox/sliding_autoc.py @@ -0,0 +1,42 @@ +import numpy as np +from numba import njit, prange +import time + +@njit("(float32[:], float64, float64, float64)") +def sliding_autocorrelation(data: np.ndarray, max_lag: float, time_window: float, fps: float): + """ + Jitted compute of sliding auto-correlations (the correlation of a feature with itself using lagged windows). + + :example: + >>> data = np.array([0,1,2,3,4, 5,6,7,8,1,10,11,12,13,14]).astype(np.float32) + >>> Statistics().sliding_autocorrelation(data=data, max_lag=0.5, time_window=1.0, fps=10) + >>> [ 0., 0., 0., 0., 0., 0., 0., 0. , 0., -3.686, -2.029, -1.323, -1.753, -3.807, -4.634] + """ + + max_frm_lag, time_window_frms = int(max_lag * fps), int(time_window * fps) + results = np.full((data.shape[0]), -1.0) + for right in prange(time_window_frms - 1, data.shape[0]): + left = right - time_window_frms + 1 + w_data = data[left: right + 1] + corrcfs = np.full((max_frm_lag), np.nan) + corrcfs[0] = 1 + for shift in range(1, max_frm_lag): + c = np.corrcoef(w_data[:-shift], w_data[shift:])[0][1] + if np.isnan(c): + corrcfs[shift] = 1 + else: + corrcfs[shift] = np.corrcoef(w_data[:-shift], w_data[shift:])[0][1] + mat_ = np.zeros(shape=(corrcfs.shape[0], 2)) + const = np.ones_like(corrcfs) + mat_[:, 0] = const + mat_[:, 1] = corrcfs + det_ = np.linalg.lstsq(mat_.astype(np.float32), np.arange(0, max_frm_lag).astype(np.float32))[0] + results[right] = det_[::-1][0] + return results + + +#data = np.array([0,1,2,3,4, 5,6,7,8,1,10,11,12,13,14]).astype(np.float32) +start = time.time() +data = np.random.randint(0, 100, (1000, )).astype(np.float32) +sliding_autocorrelation(data=data, max_lag=0.5, time_window=1.0, fps=10.0) +print(time.time() - start) \ No newline at end of file diff --git a/simba/sandbox/sliding_circular_hotspots.py b/simba/sandbox/sliding_circular_hotspots.py new file mode 100644 index 000000000..fafebc3ae --- /dev/null +++ b/simba/sandbox/sliding_circular_hotspots.py @@ -0,0 +1,55 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +from typing import Optional + +try: + from typing import Literal +except: + from typing_extensions import Literal + +import cupy as cp +import numpy as np + + +def sliding_circular_hotspots(x: np.ndarray, + time_window: float, + sample_rate: float, + bins: np.ndarray, + batch_size: Optional[int] = int(3.5e+7)) -> np.ndarray: + """ + Calculate the proportion of data points falling within specified circular bins over a sliding time window using GPU + + This function processes time series data representing angles (in degrees) and calculates the proportion of data + points within specified angular bins over a sliding window. The calculations are performed in batches to + accommodate large datasets efficiently. + + :param np.ndarray x: The input time series data in degrees. Should be a 1D numpy array. + :param float time_window: The size of the sliding window in seconds. + :param float sample_rate: The sample rate of the time series data (i.e., hz, fps). + :param ndarray bins: 2D array of shape representing circular bins defining [start_degree, end_degree] inclusive. + :param Optional[int] batch_size: The size of each batch for processing the data. Default is 5e+7 (50m). + :return: A 2D numpy array where each row corresponds to a time point in `data`, and each column represents a circular bin. The values in the array represent the proportion of data points within each bin at each time point. The first column represents the first bin. + :rtype: np.ndarray + """ + + n = x.shape[0] + x = cp.asarray(x, dtype=cp.float16) + results = cp.full((x.shape[0], bins.shape[0]), dtype=cp.float16, fill_value=-1) + window_size = int(cp.ceil(time_window * sample_rate)) + for cnt, left in enumerate(range(0, n, batch_size)): + right = int(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size + 1 + x_batch = x[left:right] + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size).astype(cp.float16) + batch_results = cp.full((x_batch.shape[0], bins.shape[0]), dtype=cp.float16, fill_value=-1) + for bin_cnt in range(bins.shape[0]): + if bins[bin_cnt][0] > bins[bin_cnt][1]: + mask = ((x_batch >= bins[bin_cnt][0]) & (x_batch <= 360)) | ((x_batch >= 0) & (x_batch <= bins[bin_cnt][1])) + else: + mask = (x_batch >= bins[bin_cnt][0]) & (x_batch <= bins[bin_cnt][1]) + count_per_row = cp.array(mask.sum(axis=1) / window_size).reshape(-1, ) + batch_results[:, bin_cnt] = count_per_row + results[left + window_size - 1:right, ] = batch_results + return results.get() \ No newline at end of file diff --git a/simba/sandbox/sliding_circular_mean.py b/simba/sandbox/sliding_circular_mean.py new file mode 100644 index 000000000..0d670ae8a --- /dev/null +++ b/simba/sandbox/sliding_circular_mean.py @@ -0,0 +1,61 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +from typing import Optional + +import cupy +import numpy as np + + +def sliding_circular_mean(x: np.ndarray, + time_window: float, + sample_rate: int, + batch_size: Optional[int] = 3e+7) -> np.ndarray: + + """ + Calculate the sliding circular mean over a time window for a series of angles. + + This function computes the circular mean of angles in the input array `x` over a specified sliding window. + The circular mean is a measure of the average direction for angles, which is especially useful for angular data + where traditional averaging would not be meaningful due to the circular nature of angles (e.g., 359° and 1° should average to 0°). + + The calculation is performed using a sliding window approach, where the circular mean is computed for each window + of angles. The function leverages GPU acceleration via CuPy for efficiency when processing large datasets. + + The circular mean :math:`\\mu` for a set of angles is calculated using the following formula: + + .. math:: + + \\mu = \\text{atan2}\\left(\\frac{1}{N} \\sum_{i=1}^{N} \\sin(\\theta_i), \\frac{1}{N} \\sum_{i=1}^{N} \\cos(\\theta_i)\\right) + + - :math:`\\theta_i` are the angles in radians within the sliding window + - :math:`N` is the number of samples in the window + + + :param np.ndarray x: Input array containing angle values in degrees. The array should be 1-dimensional. + :param float time_window: Time duration for the sliding window, in seconds. This determines the number of samples in each window based on the `sample_rate`. + :param int sample_rate: The number of samples per second (i.e., FPS). This is used to calculate the window size in terms of array indices. + :param Optional[int] batch_size: The maximum number of elements to process in each batch. This is used to handle large arrays by processing them in chunks to avoid memory overflow. Defaults to 3e+7 (30 million elements). + :return np.ndarray: A 1D numpy array of the same length as `x`, containing the circular mean for each sliding window. Values before the window is fully populated will be set to -1. + + :example: + >>> x = np.random.randint(0, 361, (i, )).astype(np.int32) + >>> results = sliding_circular_mean(x, 1, 10) + """ + + + window_size = np.ceil(time_window * sample_rate).astype(np.int64) + n = x.shape[0] + results = cupy.full(x.shape[0], -1, dtype=np.int32) + for cnt, left in enumerate(range(0, int(n), int(batch_size))): + right = np.int32(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size+1 + x_batch = cupy.asarray(x[left:right]) + x_batch = cupy.lib.stride_tricks.sliding_window_view(x_batch, window_size) + x_batch = np.deg2rad(x_batch) + cos, sin = cupy.cos(x_batch).astype(np.float32), cupy.sin(x_batch).astype(np.float32) + r = cupy.rad2deg(cupy.arctan2(cupy.mean(sin, axis=1), cupy.mean(cos, axis=1))) + r = cupy.where(r < 0, r + 360, r) + results[left + window_size - 1:right] = r + return results.get() diff --git a/simba/sandbox/sliding_circular_range.py b/simba/sandbox/sliding_circular_range.py new file mode 100644 index 000000000..ecdd4aba7 --- /dev/null +++ b/simba/sandbox/sliding_circular_range.py @@ -0,0 +1,60 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +from typing import Optional + +try: + from typing import Literal +except: + from typing_extensions import Literal + +import cupy as cp +import numpy as np + + +def sliding_circular_range(x: np.ndarray, + time_window: float, + sample_rate: float, + batch_size: Optional[int] = int(5e+7)) -> np.ndarray: + """ + Computes the sliding circular range of a time series data array using GPU. + + This function calculates the circular range of a time series data array using a sliding window approach. + The input data is assumed to be in degrees, and the function handles the circular nature of the data + by considering the circular distance between angles. + + .. math:: + + R = \\min \\left( \\text{max}(\\Delta \\theta) - \\text{min}(\\Delta \\theta), \\, 360 - \\text{max}(\\Delta \\theta) + \\text{min}(\\Delta \\theta) \\right) + + where: + + - :math:`\\Delta \\theta` is the difference between angles within the window, + - :math:`360` accounts for the circular nature of the data (i.e., wrap-around at 360 degrees). + + :param np.ndarray x: The input time series data in degrees. Should be a 1D numpy array. + :param float time_window: The size of the sliding window in seconds. + :param float sample_rate: The sample rate of the time series data (i.e., hz, fps). + :param Optional[int] batch_size: The size of each batch for processing the data. Default is 5e+7 (50m). + :return: A numpy array containing the sliding circular range values. + :rtype: np.ndarray + + :example: + >>> x = np.random.randint(0, 361, (19, )).astype(np.int32) + >>> p = sliding_circular_range(x, 1, 10) + """ + + n = x.shape[0] + x = cp.asarray(x, dtype=cp.float16) + results = cp.zeros_like(x, dtype=cp.int16) + x = cp.deg2rad(x).astype(cp.float16) + window_size = int(cp.ceil(time_window * sample_rate)) + for cnt, left in enumerate(range(0, n, batch_size)): + right = int(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size + 1 + x_batch = x[left:right] + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size).astype(cp.float16) + x_batch = cp.sort(x_batch) + results[left + window_size - 1:right] = cp.abs(cp.rint(cp.rad2deg(cp.amin(cp.vstack([x_batch[:, -1] - x_batch[:, 0], 2 * cp.pi - cp.max(cp.diff(x_batch), axis=1)]).T, axis=1)))) + return results.get() \ No newline at end of file diff --git a/simba/sandbox/sliding_circular_range_test.py b/simba/sandbox/sliding_circular_range_test.py new file mode 100644 index 000000000..44ea3810b --- /dev/null +++ b/simba/sandbox/sliding_circular_range_test.py @@ -0,0 +1,31 @@ +import numpy as np +from numba import njit, prange + + +@njit("(float32[:],)") +def circular_range(data: np.ndarray) -> float: + n = len(data) + if n < 2: + return 0.0 + data_sorted = np.sort(data) + diffs = np.empty(n) + for i in range(n - 1): + diffs[i] = data_sorted[i + 1] - data_sorted[i] + diffs[n - 1] = (data_sorted[0] + 360) - data_sorted[-1] + max_diff = np.max(diffs) + return 360 - max_diff + + +# Test cases +data1 = np.array([0, 350, 180, 275]).astype(np.float32) +data2 = np.array([350, 0, 10, 20, 30, 90, 190, 220, 250, 290, 320, 349, 90, 10]).astype(np.float32) + +data2 = np.array([0, 10, 350, 180, 45, 10, 300, 290, 100, 0]).astype(np.float32) + +data2 = np.arange(0, 370, 10).astype(np.float32) + +result1 = circular_range(data=data1) +result2 = circular_range(data=data2) + +print(f"Leftward Transition {data1}:", result1) +print(f"Rightward Transition {data2}:", result2) diff --git a/simba/sandbox/sliding_circular_std.py b/simba/sandbox/sliding_circular_std.py new file mode 100644 index 000000000..e32cb1fe3 --- /dev/null +++ b/simba/sandbox/sliding_circular_std.py @@ -0,0 +1,60 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +from typing import Optional + +try: + from typing import Literal +except: + from typing_extensions import Literal + +import cupy as cp +import numpy as np + + +def sliding_circular_std(x: np.ndarray, + time_window: float, + sample_rate: float, + batch_size: Optional[int] = int(5e+7)) -> np.ndarray: + """ + Calculate the sliding circular standard deviation of a time series data on GPU. + + This function computes the circular standard deviation over a sliding window for a given time series array. + The time series data is assumed to be in degrees, and the function converts it to radians for computation. + The sliding window approach is used to handle large datasets efficiently, processing the data in batches. + + The circular standard deviation (σ) is computed using the formula: + + .. math:: + + \sigma = \sqrt{-2 \cdot \log \left|\text{mean}\left(\exp(i \cdot x_{\text{batch}})\right)\right|} + + where :math:`x_{\text{batch}}` is the data within the current sliding window, and :math:`\text{mean}` and + :math:`\log` are computed in the circular (complex plane) domain. + + :param np.ndarray x: The input time series data in degrees. Should be a 1D numpy array. + :param float time_window: The size of the sliding window in seconds. + :param float sample_rate: The sample rate of the time series data (i.e., hz, fps). + :param Optional[int] batch_size: The size of each batch for processing the data. Default is 5e+7 (50m). + + :return: A numpy array containing the sliding circular standard deviation values. + :rtype: np.ndarray + """ + + + n = x.shape[0] + x = cp.asarray(x, dtype=cp.float16) + results = cp.zeros_like(x, dtype=cp.float16) + x = np.deg2rad(x).astype(cp.float16) + window_size = int(np.ceil(time_window * sample_rate)) + for cnt, left in enumerate(range(0, n, batch_size)): + right = int(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size + 1 + x_batch = x[left:right] + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size).astype(cp.float16) + m = cp.log(cp.abs(cp.mean(cp.exp(1j * x_batch), axis=1))) + stdev = cp.rad2deg(cp.sqrt(-2 * m)) + results[left + window_size - 1:right] = stdev + + return results.get() diff --git a/simba/sandbox/sliding_crosscorrelation.py b/simba/sandbox/sliding_crosscorrelation.py new file mode 100644 index 000000000..7892cdfe7 --- /dev/null +++ b/simba/sandbox/sliding_crosscorrelation.py @@ -0,0 +1,65 @@ +import numpy as np +from numba import njit, prange +import time + +@njit('(float64[:], float64[:], float64[:], float64, boolean, float64)') +def sliding_two_signal_crosscorrelation(x: np.ndarray, + y: np.ndarray, + windows: np.ndarray, + sample_rate: float, + normalize: bool, + lag: float) -> np.ndarray: + """ + Calculate sliding (lagged) cross-correlation between two signals, e.g., the movement and velocity of two animals. + + .. note:: + If no lag needed, pass lag 0.0. + + :param np.ndarray x: The first input signal. + :param np.ndarray y: The second input signal. + :param np.ndarray windows: Array of window lengths in seconds. + :param float sample_rate: Sampling rate of the signals (in Hz or FPS). + :param bool normalize: If True, normalize the signals before computing the correlation. + :param float lag: Time lag between the signals in seconds. + + :return: 2D array of sliding cross-correlation values. Each row corresponds to a time index, and each column corresponds to a window size specified in the `windows` parameter. + + :example: + >>> x = np.random.randint(0, 10, size=(20,)) + >>> y = np.random.randint(0, 10, size=(20,)) + >>> sliding_two_signal_crosscorrelation(x=x, y=y, windows=np.array([1.0, 1.2]), sample_rate=10, normalize=True, lag=0.0) + """ + + + results = np.full((x.shape[0], windows.shape[0]), 0.0) + lag = int(sample_rate * lag) + for i in prange(windows.shape[0]): + W_s = int(windows[i] * sample_rate) + for cnt, (l1, r1) in enumerate(zip(range(0, x.shape[0] + 1), range(W_s, x.shape[0] + 1))): + l2 = l1 - lag + if l2 < 0: l2 = 0 + r2 = r1 - lag + if r2 - l2 < W_s: r2 = l2 + W_s + X_w = x[l1:r1] + Y_w = y[l2:r2] + if normalize: + X_w = (X_w - np.mean(X_w)) / (np.std(X_w) * X_w.shape[0]) + Y_w = (Y_w - np.mean(Y_w)) / np.std(Y_w) + v = np.correlate(a=X_w, v=Y_w)[0] + if np.isnan(v): + results[r1 - 1, i] = 0.0 + else: + results[int(r1 - 1), i] = v + return results.astype(np.float32) + +start = time.time() +x = np.random.randint(0, 10, size=(4000000,)).astype(np.float64) +y = np.random.randint(0, 10, size=(4000000,)).astype(np.float64) +p = sliding_two_signal_crosscorrelation(x=x, y=y, windows=np.array([1.0, 1.2]), sample_rate=10.0, normalize=True, lag=0.0) +print(time.time() - start) + + + + + + diff --git a/simba/sandbox/sliding_displacement.py b/simba/sandbox/sliding_displacement.py new file mode 100644 index 000000000..25dd717d5 --- /dev/null +++ b/simba/sandbox/sliding_displacement.py @@ -0,0 +1,40 @@ +import numpy as np +from numba import njit + +@njit('(int32[:,:], float64[:], float64, float64)') +def sliding_displacement(x: np.ndarray, + time_windows: np.ndarray, + fps: float, + px_per_mm: float) -> np.ndarray: + + """ + Calculate sliding Euclidean displacement of a body-part point over time windows. + + .. image:: _static/img/sliding_displacement.png + :width: 600 + :align: center + + :param np.ndarray x: An array of shape (n, 2) representing the time-series sequence of 2D points. + :param np.ndarray time_windows: Array of time windows (in seconds). + :param float fps: The sample rate (frames per second) of the sequence. + :param float px_per_mm: Pixels per millimeter conversion factor. + :return np.ndarray: 1D array containing the calculated displacements. + + :example: + >>> x = np.random.randint(0, 50, (100, 2)).astype(np.int32) + >>> sliding_displacement(x=x, time_windows=np.array([1.0]), fps=1.0, px_per_mm=1.0) + """ + + results = np.full((x.shape[0], time_windows.shape[0]), -1.0) + for i in range(time_windows.shape[0]): + w = int(time_windows[i] * fps) + for j in range(w, x.shape[0]): + c, s = x[j], x[j-w] + results[j, i] = (np.sqrt((s[0] - c[0]) ** 2 + (s[1] - c[1]) ** 2)) / px_per_mm + return results.astype(np.float32) + + + + + + diff --git a/simba/sandbox/sliding_iqr.py b/simba/sandbox/sliding_iqr.py new file mode 100644 index 000000000..86082356a --- /dev/null +++ b/simba/sandbox/sliding_iqr.py @@ -0,0 +1,39 @@ +import numpy as np +from numba import cuda, njit + + + +@njit("(float32[:], float64, float64)") +def sliding_iqr(x: np.ndarray, window_size: float, sample_rate: float) -> np.ndarray: + """ + Compute the sliding interquartile range (IQR) for a 1D array of feature values. + + :param ndarray x: 1D array representing the feature values for which the IQR will be calculated. + :param float window_size: Size of the sliding window, in seconds. This value determines how many samples are included in each window. + :param float sample_rate: The sampling rate in samples per second, e.g., fps. + :returns : Sliding IQR values + :rtype: np.ndarray + + :references: + .. [1] Hession, Leinani E., Gautam S. Sabnis, Gary A. Churchill, and Vivek Kumar. “A Machine-Vision-Based Frailty Index for Mice.” Nature Aging 2, no. 8 (August 16, 2022): 756–66. https://doi.org/10.1038/s43587-022-00266-0. + + :example: + >>> data = np.random.randint(0, 50, (90,)).astype(np.float32) + >>> window_size = 0.5 + >>> sliding_iqr(x=data, window_size=0.5, sample_rate=10.0) + """ + + frm_win = max(1, int(window_size * sample_rate)) + results =np.full(shape=(x.shape[0],), dtype=np.float32, fill_value=-1.0) + for r in range(frm_win, x.shape[0]+1): + sorted_sample = np.sort(x[r - frm_win:r]) + lower_idx = sorted_sample.shape[0] // 4 + upper_idx = (3 * sorted_sample.shape[0]) // 4 + lower_val = sorted_sample[lower_idx] + upper_val = sorted_sample[upper_idx] + results[r-1] = upper_val - lower_val + return results + +data = np.random.randint(0, 50, (90,)).astype(np.float32) +window_size = 0.5 +sliding_iqr(x=data, window_size=0.5, sample_rate=10.0) \ No newline at end of file diff --git a/simba/sandbox/sliding_linearity_index.py b/simba/sandbox/sliding_linearity_index.py new file mode 100644 index 000000000..48ce7110b --- /dev/null +++ b/simba/sandbox/sliding_linearity_index.py @@ -0,0 +1,85 @@ +import numpy as np +from numba import cuda +import time +from simba.utils.enums import Formats +from simba.utils.checks import check_valid_array, check_float +from simba.data_processors.cuda.utils import _euclid_dist, _cuda_available +from simba.utils.errors import SimBAGPUError + +THREADS_PER_BLOCK = 1024 + +@cuda.jit() +def _sliding_linearity_index_kernel(x, time_frms, results): + r = cuda.grid(1) + if r >= x.shape[0] or r < 0: + return + l = int(r - time_frms[0]) + if l < 0 or l >= r: + return + sample_x = x[l:r] + straight_line_distance = _euclid_dist(sample_x[0], sample_x[-1]) + path_dist = 0 + for i in range(1, sample_x.shape[0]): + path_dist += _euclid_dist(sample_x[i-1], sample_x[i]) + if path_dist == 0: + results[r] = 0.0 + else: + results[r] = straight_line_distance / path_dist + + +def sliding_linearity_index_cuda(x: np.ndarray, + window_size: float, + sample_rate: float) -> np.ndarray: + """ + + Calculates the straightness (linearity) index of a path using CUDA acceleration. + + The output is a value between 0 and 1, where 1 indicates a perfectly straight path. + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/sliding_spatial_density_cuda.csv + :widths: 10, 45, 45 + :align: center + :header-rows: 1 + + :param np.ndarray x: An (N, M) array representing the path, where N is the number of points and M is the number of spatial dimensions (e.g., 2 for 2D or 3 for 3D). Each row represents the coordinates of a point along the path. + :param float x: The size of the sliding window in seconds. This defines the time window over which the linearity index is calculated. The window size should be specified in seconds. + :param float sample_rate: The sample rate in Hz (samples per second), which is used to convert the window size from seconds to frames. + :return: A 1D array of length N, where each element represents the linearity index of the path within a sliding window. The value is a ratio between the straight-line distance and the actual path length for each window. Values range from 0 to 1, with 1 indicating a perfectly straight path. + :rtype: np.ndarray + + :example: + >>> x = np.random.randint(0, 500, (100, 2)).astype(np.float32) + >>> q = sliding_linearity_index_cuda(x=x, window_size=2, sample_rate=30) + """ + + check_valid_array(data=x, source=f'{sliding_linearity_index_cuda.__name__} x', accepted_ndims=(2,), accepted_axis_1_shape=[2, ], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_float(name=f'{sliding_linearity_index_cuda.__name__} window_size', value=window_size) + check_float(name=f'{sliding_linearity_index_cuda.__name__} sample_rate', value=sample_rate) + x = np.ascontiguousarray(x) + time_window_frames = np.array([max(1.0, np.ceil(window_size * sample_rate))]) + if not _cuda_available()[0]: + SimBAGPUError(msg='No GPU found', source=sliding_linearity_index_cuda.__name__) + x_dev = cuda.to_device(x) + time_window_frames_dev = cuda.to_device(time_window_frames) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + results = cuda.device_array(shape=x.shape[0], dtype=np.float16) + _sliding_linearity_index_kernel[bpg, THREADS_PER_BLOCK](x_dev, time_window_frames_dev, results) + return results.copy_to_host() + + + + +for cnt in [1000000, 2000000, 4000000, 8000000, 16000000, 32000000, 64000000, 128000000, 256000000, 512000000, 1024000000]: + times = [] + for i in range(3): + start = time.perf_counter() + x = np.random.randint(0, 500, (cnt, 2)) + results_cuda = sliding_linearity_index_cuda(x=x, window_size=2.5, sample_rate=30) + elapsed = time.perf_counter() - start + times.append(elapsed) + print(cnt, '\t'*2, np.mean(times), np.std(times)) + +# x = np.random.randint(0, 500, (100, 2)).astype(np.float32) +# q = sliding_linearity_index_cuda(x=x, window_size=2, sample_rate=30) diff --git a/simba/sandbox/sliding_mean.py b/simba/sandbox/sliding_mean.py new file mode 100644 index 000000000..ff00fcd5e --- /dev/null +++ b/simba/sandbox/sliding_mean.py @@ -0,0 +1,54 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import numpy as np +from numba import cuda + +THREADS_PER_BLOCK = 1024 + +@cuda.jit(device=True) +def _cuda_sum(x: np.ndarray): + s = 0 + for i in range(x.shape[0]): + s += x[i] + return s + +@cuda.jit +def _cuda_sliding_mean(x: np.ndarray, d: np.ndarray, results: np.ndarray): + r = cuda.grid(1) + l = np.int32(r - (d[0] - 1)) + if (r >= results.shape[0]) or (l < 0): + results[r] = -1 + else: + x_i = x[l:r+1] + s = _cuda_sum(x_i) + results[r] = s / x_i.shape[0] + +def sliding_mean(x: np.ndarray, time_window: float, sample_rate: int) -> np.ndarray: + """ + Computes the mean of values within a sliding window over a 1D numpy array `x` using CUDA for acceleration. + + .. image:: _static/img/sliding_mean_cuda.png + :width: 500 + :align: center + + :param np.ndarray x: The input 1D numpy array of floats. The array over which the sliding window sum is computed. + :param float time_window:The size of the sliding window in seconds. This window slides over the array `x` to compute the sum. + :param int sample_rate: The number of samples per second in the array `x`. This is used to convert the time-based window size into the number of samples. + :return np.ndarray: A numpy array containing the sum of values within each position of the sliding window. + + :example: + >>> x = np.random.randint(1, 11, (100, )).astype(np.float32) + >>> time_window = 1 + >>> sample_rate = 10 + >>> r_x = sliding_mean(x=x, time_window=time_window, sample_rate=10) + """ + x = np.ascontiguousarray(x).astype(np.int32) + window_size = np.array([np.ceil(time_window * sample_rate)]) + x_dev = cuda.to_device(x) + delta_dev = cuda.to_device(window_size) + results = cuda.device_array(x.shape, dtype=np.float32) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _cuda_sliding_mean[bpg, THREADS_PER_BLOCK](x_dev, delta_dev, results) + results = results.copy_to_host() + return results \ No newline at end of file diff --git a/simba/sandbox/sliding_min.py b/simba/sandbox/sliding_min.py new file mode 100644 index 000000000..a3db6769d --- /dev/null +++ b/simba/sandbox/sliding_min.py @@ -0,0 +1,52 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import numpy as np +from numba import cuda + +THREADS_PER_BLOCK = 1024 + +@cuda.jit +def _cuda_sliding_min(x: np.ndarray, d: np.ndarray, results: np.ndarray): + def _cuda_min(a, b): + return a if a < b else b + r = cuda.grid(1) + l = np.int32(r - (d[0]-1)) + if (r > results.shape[0]) or (l < 0): + results[r] = -1 + else: + x_i = x[l:r-1] + local_min = x_i[0] + for k in range(x_i.shape[0]): + local_min = _cuda_min(local_min, x_i[k]) + results[r] = local_min + +def sliding_min(x: np.ndarray, time_window: float, sample_rate: int) -> np.ndarray: + """ + Computes the minimum value within a sliding window over a 1D numpy array `x` using CUDA for acceleration. + + .. image:: _static/img/sliding_min_cuda.png + :width: 500 + :align: center + + :param np.ndarray x: Input 1D numpy array of floats. The array over which the sliding window minimum is computed. + :param float time_window: The size of the sliding window in seconds. + :param intsample_rate: The sampling rate of the data, which determines the number of samples per second. + :return: A numpy array containing the minimum value for each position of the sliding window. + + :example: + >>> x = np.arange(0, 10000000) + >>> time_window = 1 + >>> sample_rate = 10 + >>> sliding_min(x=x, time_window=time_window, sample_rate=sample_rate) + """ + + x = np.ascontiguousarray(x).astype(np.float32) + window_size = np.array([np.ceil(time_window * sample_rate)]) + x_dev = cuda.to_device(x) + delta_dev = cuda.to_device(window_size) + results = cuda.device_array(x.shape, dtype=np.float32) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _cuda_sliding_min[bpg, THREADS_PER_BLOCK](x_dev, delta_dev, results) + results = results.copy_to_host() + return results diff --git a/simba/sandbox/sliding_rayleigh_z.py b/simba/sandbox/sliding_rayleigh_z.py new file mode 100644 index 000000000..ff0a65fc0 --- /dev/null +++ b/simba/sandbox/sliding_rayleigh_z.py @@ -0,0 +1,76 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +from typing import Optional, Tuple + +try: + from typing import Literal +except: + from typing_extensions import Literal + +import cupy as cp +import numpy as np + + +def sliding_rayleigh_z(x: np.ndarray, + time_window: float, + sample_rate: float, + batch_size: Optional[int] = int(5e+7)) -> Tuple[np.ndarray, np.ndarray]: + + """ + Computes the Rayleigh Z-statistic over a sliding window for a given time series of angles + + This function calculates the Rayleigh Z-statistic, which tests the null hypothesis that the population of angles + is uniformly distributed around the circle. The calculation is performed over a sliding window across the input + time series, and results are computed in batches for memory efficiency. + + Data is processed using GPU acceleration via CuPy, which allows for faster computation compared to a CPU-based approach. + + .. note:: + Adapted from ``pingouin.circular.circ_rayleigh`` and ``pycircstat.tests.rayleigh``. + + + **Rayleigh Z-statistic:** + + The Rayleigh Z-statistic is given by: + + .. math:: + + R = \frac{1}{n} \sqrt{\left(\sum_{i=1}^{n} \cos(\theta_i)\right)^2 + \left(\sum_{i=1}^{n} \sin(\theta_i)\right)^2} + + where: + - :math:`\theta_i` are the angles in the window. + - :math:`n` is the number of angles in the window. + + :param np.ndarray x: Input array of angles in degrees. Should be a 1D numpy array. + :param float time_window: The size of the sliding window in time units (e.g., seconds). + :param float sample_rate: The sampling rate of the input time series in samples per time unit (e.g., Hz, fps). + :param Optional[int] batch_size: The number of samples to process in each batch. Default is 5e7 (50m). Reducing this value may save memory at the cost of longer computation time. + :return: + A tuple containing two numpy arrays: + - **z_results**: Rayleigh Z-statistics for each position in the input array where the window was fully applied. + - **p_results**: Corresponding p-values for the Rayleigh Z-statistics. + :rtype: Tuple[np.ndarray, np.ndarray] + """ + + n = x.shape[0] + x = cp.asarray(x, dtype=cp.float16) + z_results = cp.zeros_like(x, dtype=cp.float16) + p_results = cp.zeros_like(x, dtype=cp.float16) + x = np.deg2rad(x).astype(cp.float16) + window_size = int(np.ceil(time_window * sample_rate)) + for cnt, left in enumerate(range(0, n, batch_size)): + right = int(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size + 1 + x_batch = x[left:right] + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size).astype(cp.float16) + cos_sums = cp.nansum(cp.cos(x_batch), axis=1) ** 2 + sin_sums = cp.nansum(cp.sin(x_batch), axis=1) ** 2 + R = cp.sqrt(cos_sums + sin_sums) / window_size + Z = window_size * (R**2) + P = cp.exp(np.sqrt(1 + 4 * window_size + 4 * (window_size ** 2 - R ** 2)) - (1 + 2 * window_size)) + z_results[left + window_size - 1:right] = Z + p_results[left + window_size - 1:right] = P + + return z_results.get(), p_results.get() \ No newline at end of file diff --git a/simba/sandbox/sliding_resultant_vector_length.py b/simba/sandbox/sliding_resultant_vector_length.py new file mode 100644 index 000000000..4d85e21cf --- /dev/null +++ b/simba/sandbox/sliding_resultant_vector_length.py @@ -0,0 +1,63 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +from typing import Optional + +import cupy +import numpy as np + + +def sliding_resultant_vector_length(x: np.ndarray, + time_window: float, + sample_rate: int, + batch_size: Optional[int] = 3e+7) -> np.ndarray: + + """ + Calculate the sliding resultant vector length over a time window for a series of angles. + + This function computes the resultant vector length (R) for each window of angles in the input array `x`. + The resultant vector length is a measure of the concentration of angles, and it ranges from 0 to 1, where 1 + indicates all angles point in the same direction, and 0 indicates uniform distribution of angles. + + For a given sliding window of angles, the resultant vector length :math:`R` is calculated using the following formula: + + .. math:: + + R = \\frac{1}{N} \\sqrt{\\left(\\sum_{i=1}^{N} \\cos(\\theta_i)\\right)^2 + \\left(\\sum_{i=1}^{N} \\sin(\\theta_i)\\right)^2} + + where: + + - :math:`\\theta_i` are the angles in radians within the sliding window + - :math:`N` is the number of samples in the window + + The computation is performed in a sliding window manner over the entire array, utilizing GPU acceleration + with CuPy for efficiency, especially on large datasets. + + + :param np.ndarray x: Input array containing angle values in degrees. The array should be 1-dimensional. + :param float time_window: Time duration for the sliding window, in seconds. This determines the number of samples in each window based on the `sample_rate`. + :param int sample_rate: The number of samples per second (i.e., FPS). This is used to calculate the window size in terms of array indices. + :param Optional[int] batch_size: The maximum number of elements to process in each batch. This is used to handle large arrays by processing them in chunks to avoid memory overflow. Defaults to 3e+7 (30 million elements). + :return np.ndarray: A 1D numpy array of the same length as `x`, containing the resultant vector length for each sliding window. Values before the window is fully populated will be set to -1. + + + :example: + >>> x = np.random.randint(0, 361, (5000, )).astype(np.int32) + >>> results = sliding_resultant_vector_length(x, 1, 10) + """ + + window_size = np.ceil(time_window * sample_rate).astype(np.int64) + n = x.shape[0] + results = cupy.full(x.shape[0], -1, dtype=np.float32) + for cnt, left in enumerate(range(0, int(n), int(batch_size))): + right = np.int32(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size+1 + x_batch = cupy.asarray(x[left:right]) + x_batch = cupy.lib.stride_tricks.sliding_window_view(x_batch, window_size) + x_batch = np.deg2rad(x_batch) + cos, sin = cupy.cos(x_batch).astype(np.float32), cupy.sin(x_batch).astype(np.float32) + cos_sum, sin_sum = cupy.sum(cos, axis=1), cupy.sum(sin, axis=1) + r = np.sqrt(cos_sum ** 2 + sin_sum ** 2) / window_size + results[left+window_size-1:right] = r + return results.get() \ No newline at end of file diff --git a/simba/sandbox/sliding_spatial_density.py b/simba/sandbox/sliding_spatial_density.py new file mode 100644 index 000000000..90457c1ca --- /dev/null +++ b/simba/sandbox/sliding_spatial_density.py @@ -0,0 +1,137 @@ +import time + +import numpy as np +import pandas as pd +from simba.utils.checks import check_valid_array, check_float +from simba.utils.enums import Formats +from simba.data_processors.cuda.utils import _euclid_dist +from numba import cuda + +THREADS_PER_BLOCK = 1024 + +@cuda.jit() +def _sliding_spatial_density_kernel(x, time_window, radius, results): + r = cuda.grid(1) + if r >= x.shape[0] or r < 0: + return + l = int(r - time_window[0]) + if l < 0 or l >= r: + return + total_neighbors = 0 + n_points = r - l + if n_points <= 0: + results[r] = 0 + return + for i in range(l, r): + for j in range(l, r): + if i != j: + dist = _euclid_dist(x[i], x[j]) + if dist <= radius[0]: + total_neighbors += 1 + + results[r] = total_neighbors / n_points if n_points > 0 else 0 + + +def sliding_spatial_density_cuda(x: np.ndarray, + radius: float, + pixels_per_mm: float, + window_size: float, + sample_rate: float) -> np.ndarray: + """ + Computes the spatial density of points within a moving window along a trajectory using CUDA for acceleration. + + This function calculates a spatial density measure for each point along a 2D trajectory path by counting the number + of neighboring points within a specified radius. The computation is performed within a sliding window that moves + along the trajectory, using GPU acceleration to handle large datasets efficiently. + + .. csv-table:: + :header: EXPECTED RUNTIMES + :file: ../../../docs/tables/sliding_spatial_density_cuda.csv + :widths: 10, 45, 45 + :align: center + :header-rows: 1 + + :param np.ndarray x: A 2D array of shape (N, 2), where N is the number of points and each point has two spatial coordinates (x, y). The array represents the trajectory path of points in a 2D space (e.g., x and y positions in space). + :param float radius: The radius (in millimeters) within which to count neighboring points around each trajectory point. Defines the area of interest around each point. + :param float pixels_per_mm: The scaling factor that converts the physical radius (in millimeters) to pixel units for spatial density calculations. + :param float window_size: The size of the sliding window (in seconds or points) to compute the density of points. A larger window size will consider more points in each density calculation. + :param float sample_rate: The rate at which to sample the trajectory points (e.g., frames per second or samples per unit time). It adjusts the granularity of the sliding window. + :return: A 1D numpy array where each element represents the computed spatial density for the trajectory at the corresponding point in time (or frame). Higher values indicate more densely packed points within the specified radius, while lower values suggest more sparsely distributed points. + :rtype: np.ndarray + + :example: + >>> df = pd.read_csv("/mnt/c/troubleshooting/two_black_animals_14bp/project_folder/csv/outlier_corrected_movement_location/Test_3.csv") + >>> x = df[['Nose_1_x', 'Nose_1_y']].values + >>> results_cuda = sliding_spatial_density_cuda(x=x, radius=10.0, pixels_per_mm=4.0, window_size=1, sample_rate=20) + + """ + + check_valid_array(data=x, source=f'{sliding_spatial_density_cuda.__name__} x', accepted_ndims=(2,), accepted_axis_1_shape=[2, ], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_float(name=f'{sliding_spatial_density_cuda.__name__} radius', value=radius) + check_float(name=f'{sliding_spatial_density_cuda.__name__} window_size', value=window_size) + check_float(name=f'{sliding_spatial_density_cuda.__name__} sample_rate', value=sample_rate) + check_float(name=f'{sliding_spatial_density_cuda.__name__} pixels_per_mm', value=pixels_per_mm) + + x = np.ascontiguousarray(x) + pixel_radius = np.array([np.ceil(max(1.0, (radius * pixels_per_mm)))]).astype(np.float64) + time_window_frames = np.array([np.ceil(window_size * sample_rate)]) + x_dev = cuda.to_device(x) + time_window_frames_dev = cuda.to_device(time_window_frames) + radius_dev = cuda.to_device(pixel_radius) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + results = cuda.device_array(shape=x.shape[0], dtype=np.float16) + _sliding_spatial_density_kernel[bpg, THREADS_PER_BLOCK](x_dev, time_window_frames_dev, radius_dev, results) + return results.copy_to_host() + +def sliding_spatial_density(x: np.ndarray, + radius: float, + pixels_per_mm: float, + window_size: float, + sample_rate: float) -> np.ndarray: + + pixel_radius = np.ceil(max(1.0, (radius * pixels_per_mm))) + frame_window_size = int(np.ceil(max(1.0, (window_size * sample_rate)))) + results = np.full(shape=(x.shape[0]), fill_value=np.nan, dtype=np.float32) + for r in range(frame_window_size, x.shape[0] + 1): + l = r - frame_window_size + sample_x = x[l:r] + n_points, total_neighbors = sample_x.shape[0], 0 + for i in range(n_points): + distances = np.linalg.norm(sample_x - sample_x[i], axis=1) + neighbors = np.sum(distances <= pixel_radius) - 1 + total_neighbors += neighbors + results[r - 1] = total_neighbors / n_points + return results + + + +for cnt in [1000000, 2000000, 4000000, 8000000, 16000000, 32000000, 64000000, 128000000, 256000000, 512000000, 1024000000]: + times = [] + for i in range(3): + start = time.perf_counter() + x = np.random.randint(0, 500, (cnt, 2)) + results_cuda = sliding_spatial_density(x=x, radius=10.0, pixels_per_mm=4.0, window_size=2.5, sample_rate=30) + elapsed = time.perf_counter() - start + times.append(elapsed) + print(cnt, '\t'*2, np.mean(times), np.std(times)) + + + + + +# df = pd.read_csv("/mnt/c/troubleshooting/two_black_animals_14bp/project_folder/csv/outlier_corrected_movement_location/Test_3.csv") +# x = df[['Nose_1_x', 'Nose_1_y']].values +# results_cuda = sliding_spatial_density_cuda(x=x, radius=10.0, pixels_per_mm=4.0, window_size=1, sample_rate=20) +# results_numpy = sliding_spatial_density(x=x, radius=10.0, pixels_per_mm=4.0, window_size=1, sample_rate=20) +# print(results_cuda) +# print(results_numpy) + + + + +# +# x = np.array([[0, 100], [50, 98], [10, 872], [100, 27], [103, 2], [927, 286], [10, 10]]) +# +# +# #x = np.random.randint(0, 20, (20, 2)) # Example trajectory with 100 points in 2D space + diff --git a/simba/sandbox/sliding_spearmans_rank.py b/simba/sandbox/sliding_spearmans_rank.py new file mode 100644 index 000000000..85dae6af6 --- /dev/null +++ b/simba/sandbox/sliding_spearmans_rank.py @@ -0,0 +1,67 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +from typing import Optional + +import cupy as cp +import numpy as np + + +def sliding_spearmans_rank(x: np.ndarray, + y: np.ndarray, + time_window: float, + sample_rate: int, + batch_size: Optional[int] = int(1.6e+7)) -> np.ndarray: + """ + Computes the Spearman's rank correlation coefficient between two 1D arrays `x` and `y` + over sliding windows of size `time_window * sample_rate`. The computation is performed + in batches to optimize memory usage, leveraging GPU acceleration with CuPy. + + .. math:: + \rho = 1 - \frac{6 \sum d_i^2}{n_w(n_w^2 - 1)} + + .. math:: + The function uses CuPy to perform GPU-accelerated calculations. Ensure that your environment + supports GPU computation with CuPy installed. + + Where: + - \( \rho \) is the Spearman's rank correlation coefficient. + - \( d_i \) is the difference between the ranks of corresponding elements in the sliding window. + - \( n_w \) is the size of the sliding window. + + :param np.ndarray x: The first 1D array containing the values for Feature 1. + :param np.ndarray y: The second 1D array containing the values for Feature 2. + :param float time_window: The size of the sliding window in seconds. + :param int sample_rate: The sampling rate (samples per second) of the data. + :param Optional[int] batch_size: The size of each batch to process at a time for memory efficiency. Defaults to 1.6e7. + :return: A 1D numpy array containing the Spearman's rank correlation coefficient for each sliding window. + :rtype: np.ndarray + + :example: + >>> x = np.array([9, 10, 13, 22, 15, 18, 15, 19, 32, 11]) + >>> y = np.array([11, 12, 15, 19, 21, 26, 19, 20, 22, 19]) + >>> sliding_spearmans_rank(x, y, time_window=0.5, sample_rate=2) + """ + + + window_size = int(np.ceil(time_window * sample_rate)) + n = x.shape[0] + results = cp.full(n, -1, dtype=cp.float32) + + for cnt, left in enumerate(range(0, n, batch_size)): + right = int(min(left + batch_size, n)) + if cnt > 0: + left = left - window_size + 1 + x_batch = cp.asarray(x[left:right]) + y_batch = cp.asarray(y[left:right]) + x_batch = cp.lib.stride_tricks.sliding_window_view(x_batch, window_size) + y_batch = cp.lib.stride_tricks.sliding_window_view(y_batch, window_size) + rank_x = cp.argsort(cp.argsort(x_batch, axis=1), axis=1) + rank_y = cp.argsort(cp.argsort(y_batch, axis=1), axis=1) + d_squared = cp.sum((rank_x - rank_y) ** 2, axis=1) + n_w = window_size + s = 1 - (6 * d_squared) / (n_w * (n_w ** 2 - 1)) + + results[left + window_size - 1:right] = s + + return cp.asnumpy(results) \ No newline at end of file diff --git a/simba/sandbox/sliding_std.py b/simba/sandbox/sliding_std.py new file mode 100644 index 000000000..d33ccb451 --- /dev/null +++ b/simba/sandbox/sliding_std.py @@ -0,0 +1,58 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import numpy as np +from numba import cuda + +THREADS_PER_BLOCK = 1024 + +@cuda.jit(device=True) +def _cuda_sum(x: np.ndarray): + s = 0 + for i in range(x.shape[0]): + s += x[i] + return s + +@cuda.jit(device=True) +def _cuda_std(x: np.ndarray, x_hat: float): + std = 0 + for i in range(x.shape[0]): + std += (x[0] - x_hat) ** 2 + return std + +@cuda.jit(device=False) +def _cuda_sliding_std(x: np.ndarray, d: np.ndarray, results: np.ndarray): + r = cuda.grid(1) + l = np.int32(r - (d[0] - 1)) + if (r >= results.shape[0]) or (l < 0): + results[r] = -1 + else: + x_i = x[l:r + 1] + s = _cuda_sum(x_i) + m = s / x_i.shape[0] + std = _cuda_std(x_i, m) + results[r] = std + +def sliding_std(x: np.ndarray, time_window: float, sample_rate: int) -> np.ndarray: + """ + + :param np.ndarray x: The input 1D numpy array of floats. The array over which the sliding window sum is computed. + :param float time_window:The size of the sliding window in seconds. This window slides over the array `x` to compute the sum. + :param int sample_rate: The number of samples per second in the array `x`. This is used to convert the time-based window size into the number of samples. + :return np.ndarray: A numpy array containing the sum of values within each position of the sliding window. + + :example: + >>> x = np.random.randint(1, 11, (100, )).astype(np.float32) + >>> time_window = 1 + >>> sample_rate = 10 + >>> r_x = sliding_sum(x=x, time_window=time_window, sample_rate=10) + """ + x = np.ascontiguousarray(x).astype(np.int32) + window_size = np.array([np.ceil(time_window * sample_rate)]) + x_dev = cuda.to_device(x) + delta_dev = cuda.to_device(window_size) + results = cuda.device_array(x.shape, dtype=np.float32) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _cuda_sliding_std[bpg, THREADS_PER_BLOCK](x_dev, delta_dev, results) + results = results.copy_to_host() + return results \ No newline at end of file diff --git a/simba/sandbox/sliding_sum.py b/simba/sandbox/sliding_sum.py new file mode 100644 index 000000000..89f15045e --- /dev/null +++ b/simba/sandbox/sliding_sum.py @@ -0,0 +1,47 @@ +__author__ = "Simon Nilsson" +__email__ = "sronilsson@gmail.com" + +import numpy as np +from numba import cuda + +THREADS_PER_BLOCK = 1024 + +@cuda.jit +def _cuda_sliding_sum(x: np.ndarray, d: np.ndarray, results: np.ndarray): + r = cuda.grid(1) + l = np.int32(r - (d[0]-1)) + if (r > results.shape[0]) or (l < 0): + results[r] = -1 + else: + x_i = x[l:r] + local_sum = 0 + for k in range(x_i.shape[0]): + local_sum += x_i[k] + results[r-1] = local_sum + +def sliding_sum(x: np.ndarray, time_window: float, sample_rate: int) -> np.ndarray: + """ + Computes the sum of values within a sliding window over a 1D numpy array `x` using CUDA for acceleration. + + :param np.ndarray x: The input 1D numpy array of floats. The array over which the sliding window sum is computed. + :param float time_window:The size of the sliding window in seconds. This window slides over the array `x` to compute the sum. + :param int sample_rate: The number of samples per second in the array `x`. This is used to convert the time-based window size into the number of samples. + :return np.ndarray: A numpy array containing the sum of values within each position of the sliding window. + + :example: + >>> x = np.random.randint(1, 11, (100, )).astype(np.float32) + >>> time_window = 1 + >>> sample_rate = 10 + >>> r_x = sliding_sum(x=x, time_window=time_window, sample_rate=10) + """ + x = np.ascontiguousarray(x).astype(np.float32) + window_size = np.array([np.ceil(time_window * sample_rate)]) + x_dev = cuda.to_device(x) + delta_dev = cuda.to_device(window_size) + results = cuda.device_array(x.shape, dtype=np.float32) + bpg = (x.shape[0] + (THREADS_PER_BLOCK - 1)) // THREADS_PER_BLOCK + _cuda_sliding_sum[bpg, THREADS_PER_BLOCK](x_dev, delta_dev, results) + results = results.copy_to_host() + return results + + diff --git a/simba/sandbox/smoothing.py b/simba/sandbox/smoothing.py new file mode 100644 index 000000000..bb9ae8186 --- /dev/null +++ b/simba/sandbox/smoothing.py @@ -0,0 +1,120 @@ +__author__ = "Simon Nilsson" + +import os +from typing import Union, List, Optional +from copy import deepcopy + +import pandas as pd + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal + +from simba.mixins.config_reader import ConfigReader +from simba.utils.checks import (check_str, check_int, check_valid_lst, check_file_exist_and_readable) +from simba.utils.enums import TagNames +from simba.utils.errors import InvalidInputError, NoFilesFoundError +from simba.utils.printing import SimbaTimer, log_event, stdout_success +from simba.utils.read_write import (find_files_of_filetypes_in_directory, find_video_of_file, get_fn_ext, read_video_info, get_video_meta_data, read_df, write_df, copy_files_to_directory) +from simba.utils.data import savgol_smoother, df_smoother + + +class Smoothing(ConfigReader): + """ + Smooth pose-estimation data according to user-defined method. + + .. image:: _static/img/smoothing.gif + :width: 600 + :align: center + + .. note:: + `Smoothing tutorial `__. + + .. importants:: + The wmoothened data overwrites the original data on disk. If the original data is required, pass ``copy_originals = True`` to save a copy of the original data. + + :param Union[str, os.PathLike] config_path: path to SimBA project config file in Configparser format. + :param Union[str, os.PathLike, List[Union[str, os.PathLike]]] data_path: Path to directory containing pose-estimation data, to a file containing pose-estimation data, or a list of paths containing pose-estimation data. + :param int time_window: Rolling time window in millisecond to use when smoothing. Larger time-windows and greater smoothing. + :param Optional[Literal["gaussian", "savitzky-golay"]] method: Type of smoothing_method. OPTIONS: ``gaussian``, ``savitzky-golay``. Default `gaussian`. + :param bool multi_index_df_headers: If True, the incoming data is multi-index columns dataframes. Default: False. + :param bool copy_originals: If truth-like, then the pre-smoothened, original data, will be bo stored in a subdirectory of the original data. The subdirectory is named according to the type of smoothing method and datetime of the operation. + + :references: + .. [1] `Video expected putput `__. + + :examples: + >>> smoother = Smoothing(data_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/csv/input_csv/Together_1.csv', config_path=r'/Users/simon/Desktop/envs/simba/troubleshooting/two_black_animals_14bp/project_folder/project_config.ini', method='Savitzky-Golay', time_window=500, multi_index_df_headers=True, copy_originals=True) + >>> smoother.run() + """ + + def __init__(self, + config_path: Union[str, os.PathLike], + data_path: Union[str, os.PathLike, List[Union[str, os.PathLike]]], + time_window: int, + method: Optional[Literal["gaussian", "savitzky-golay"]] = 'Savitzky-Golay', + multi_index_df_headers: Optional[bool] = False, + copy_originals: Optional[bool] = False) -> None: + + ConfigReader.__init__(self, config_path=config_path, read_video_info=False) + log_event(logger_name=str(self.__class__.__name__), log_type=TagNames.CLASS_INIT.value, msg=self.create_log_msg_from_init_args(locals=locals())) + if isinstance(data_path, list): + check_valid_lst(data=data_path, source=self.__class__.__name__, valid_dtypes=(str,)) + for i in data_path: check_file_exist_and_readable(file_path=i) + self.file_paths = deepcopy(data_path) + elif os.path.isdir(data_path): + self.file_paths = find_files_of_filetypes_in_directory(directory=data_path, extensions=[f'.{self.file_type}'], raise_error=True) + elif os.path.isfile(data_path): + check_file_exist_and_readable(file_path=data_path) + self.file_paths = [data_path] + else: + raise InvalidInputError(msg=f'{data_path} is not a valid data directory, or a valid file path, or a valid list of file paths', source=self.__class__.__name__) + check_int(value=time_window, min_value=1, name=f'{self.__class__.__name__} time_window') + check_str(name=f'{self.__class__.__name__} method', value=method.lower(), options=("gaussian", "savitzky-golay")) + if copy_originals: + self.originals_dir = os.path.join(os.path.dirname(self.file_paths[0]), f"Pre_{method}_{time_window}_smoothing_{self.datetime}") + os.makedirs(self.originals_dir) + self.multi_index_df_headers, self.method, self.time_window, self.copy_originals = multi_index_df_headers, method.lower(), time_window, copy_originals + + def __insert_multiindex_header(self, df: pd.DataFrame): + multi_idx_header = [] + for i in range(len(df.columns)): + multi_idx_header.append(("IMPORTED_POSE", "IMPORTED_POSE", list(df.columns)[i])) + df.columns = pd.MultiIndex.from_tuples(multi_idx_header) + return df + + def run(self): + for file_cnt, file_path in enumerate(self.file_paths): + df = read_df(file_path=file_path, file_type=self.file_type, check_multiindex=True) + video_timer = SimbaTimer(start=True) + _, video_name, _ = get_fn_ext(filepath=file_path) + video_path = find_video_of_file(video_dir=self.video_dir, filename=video_name, raise_error=False, warning=False) + if video_path is None: + if not os.path.isfile(self.video_info_path): + raise NoFilesFoundError(msg=f"To perform smoothing, SimBA needs to read the video FPS. SimBA could not find the video {video_name} in represented in the {self.video_dir} directory or in {self.video_info_path} file. Please import the video and/or include it in the video_logs.csv file so SimBA can know the video FPS", source=self.__class__.__name__) + else: + self.video_info_df = self.read_video_info_csv(file_path=self.video_info_path) + video_info = read_video_info(vid_info_df=self.video_info_df,video_name=video_name, raise_error=False) + if video_info is None: + raise NoFilesFoundError(msg=f"To perform smoothing, SimBA needs to read the video FPS. SimBA could not find the video {video_name} in represented in the {self.video_dir} directory or in {self.video_info_path} file. Please import the video and/or include it in the video_logs.csv file so SimBA can know the video FPS", source=self.__class__.__name__) + fps = video_info[2] + else: + fps = get_video_meta_data(video_path=video_path)['fps'] + if self.method == 'savitzky-golay': + df = savgol_smoother(data=df, fps=fps, time_window=self.time_window, source=video_name) + else: + df = df_smoother(data=df, fps=fps, time_window=self.time_window, source=video_name, method='gaussian') + if self.multi_index_df_headers: + df = self.__insert_multiindex_header(df=df) + if self.copy_originals: + copy_files_to_directory(file_paths=[file_path], dir=self.originals_dir) + write_df(df=df, file_type=self.file_type, save_path=file_path, multi_idx_header=self.multi_index_df_headers) + video_timer.stop_timer() + print(f"Video {video_name} smoothed ({self.method}: {str(self.time_window)}ms) (elapsed time {video_timer.elapsed_time_str})...") + self.timer.stop_timer() + if self.copy_originals: + msg = f"{len(self.file_paths)} data file(s) smoothened using {self.method} method and {self.time_window} time-window. Originals saved in {self.originals_dir} directory." + else: + msg = f"{len(self.file_paths)} data file(s) smoothened using {self.method} method and {self.time_window} time-window." + stdout_success(msg=msg, elapsed_time=self.timer.elapsed_time_str, source=self.__class__.__name__) \ No newline at end of file diff --git a/simba/sandbox/sorensen_dice_coefficient.py b/simba/sandbox/sorensen_dice_coefficient.py new file mode 100644 index 000000000..874d70801 --- /dev/null +++ b/simba/sandbox/sorensen_dice_coefficient.py @@ -0,0 +1,36 @@ +import numpy as np +from simba.utils.checks import check_valid_array + +def sorensen_dice_coefficient(x: np.ndarray, y: np.ndarray) -> float: + """ + Calculate Sørensen's Similarity Index between two communities/clusters. + + The Sørensen similarity index, also known as the overlap index, quantifies the overlap between two populations by comparing the number of shared categories to the total number of categories in both populations. It ranges from zero, indicating no overlap, to one, representing perfect overlap + + Sørensen's Similarity Index (S) is calculated using the formula: + + .. math:: + + S = \\frac{2 \times |X \cap Y|}{|X| + |Y|} + + where: + - \( S \) is Sørensen's Similarity Index, + - \( X \) and \( Y \) are the sets representing the categories in the first and second communities, respectively, + - \( |X \cap Y| \) is the number of shared categories between the two communities, + - \( |X| \) and \( |Y| \) are the total number of categories in the first and second communities, respectively. + + + :param x: 1D numpy array with ordinal values for the first cluster/community. + :param y: 1D numpy array with ordinal values for the second cluster/community. + :return: Sørensen's Similarity Index between x and y. + + :example: + >>> x = np.random.randint(0, 10, (100,)) + >>> y = np.random.randint(0, 10, (100,)) + >>> sorensen_dice_coefficient(x=x, y=y) + """ + + check_valid_array(source=f"{sorensen_dice_coefficient.__name__} x", accepted_ndims=(1,), data=x, accepted_dtypes=(np.int32, np.int64, np.int8, int), min_axis_0=2) + check_valid_array(source=f"{sorensen_dice_coefficient.__name__} y", accepted_ndims=(1,), data=y, accepted_dtypes=(np.int32, np.int64, np.int8, int), min_axis_0=2) + x, y = set(x), set(y) + return 2 * len(x.intersection(y)) / (len(x) + len(y)) \ No newline at end of file diff --git a/simba/sandbox/spatial_density_trajectory_points.py b/simba/sandbox/spatial_density_trajectory_points.py new file mode 100644 index 000000000..dcb7f1797 --- /dev/null +++ b/simba/sandbox/spatial_density_trajectory_points.py @@ -0,0 +1,93 @@ +import numpy as np +from simba.utils.enums import Formats +from simba.utils.checks import check_valid_array, check_float, check_int + +def spatial_density(x: np.ndarray, + radius: float, + pixels_per_mm: float) -> float: + + """ + Computes the spatial density of trajectory points in a 2D array, based on the number of neighboring points + within a specified radius for each point in the trajectory. + + Spatial density provides insights into the movement pattern along a trajectory. Higher density values indicate + areas where points are closely packed, which can suggest slower movement, lingering, or frequent changes in + direction. Lower density values suggest more spread-out points, often associated with faster, more linear movement. + + :param np.ndarray x: A 2D array of shape (N, 2), where N is the number of points and each point has two spatial coordinates. + :param float radius: The radius within which to count neighboring points around each point. Defines the area of interest around each trajectory point. + :return: A single float value representing the average spatial density of the trajectory. + :rtype: float + + :example: + >>> x = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [1, 0.5], [1.5, 1.5]]) + >>> density = spatial_density(x, pixels_per_mm=2.5, radius=5) + >>> high_density_points = np.array([[0, 0], [0.5, 0], [1, 0], [1.5, 0], [2, 0], [0, 0.5], [0.5, 0.5], [1, 0.5], [1.5, 0.5], [2, 0.5]]) + >>> low_density_points = np.array([[0, 0], [5, 5], [10, 10], [15, 15], [20, 20]]) + >>> high = spatial_density(x=high_density_points,radius=1, pixels_per_mm=1) + >>> low = spatial_density(x=low_density_points,radius=1, pixels_per_mm=1) + """ + + check_valid_array(data=x, source=f'{spatial_density.__name__} x', accepted_ndims=(2,), accepted_axis_1_shape=[2, ], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_float(name=f'{spatial_density.__name__} radius', value=radius) + check_float(name=f'{spatial_density.__name__} radius', value=pixels_per_mm) + pixel_radius = np.ceil(max(1.0, (radius * pixels_per_mm))) + n_points = x.shape[0] + total_neighbors = 0 + + for i in range(n_points): + distances = np.linalg.norm(x - x[i], axis=1) + neighbors = np.sum(distances <= pixel_radius) - 1 + total_neighbors += neighbors + + return total_neighbors / n_points + + + +def sliding_spatial_density(x: np.ndarray, + radius: float, + pixels_per_mm: float, + window_size: float, + sample_rate: float) -> np.ndarray: + + """ + Computes the sliding spatial density of trajectory points in a 2D array, based on the number of neighboring points + within a specified radius, considering the density over a moving window of points. This function accounts for the + spatial scale in pixels per millimeter, providing a density measurement that is adjusted for the physical scale + of the trajectory. + + :param np.ndarray x: A 2D array of shape (N, 2), where N is the number of points and each point has two spatial coordinates (x, y). The array represents the trajectory path of points in a 2D space (e.g., x and y positions in space). + :param float radius: The radius (in millimeters) within which to count neighboring points around each trajectory point. Defines the area of interest around each point. + :param float pixels_per_mm: The scaling factor that converts the physical radius (in millimeters) to pixel units for spatial density calculations. + :param float window_size: The size of the sliding window (in seconds or points) to compute the density of points. A larger window size will consider more points in each density calculation. + :param float sample_rate: The rate at which to sample the trajectory points (e.g., frames per second or samples per unit time). It adjusts the granularity of the sliding window. + :return: A 1D numpy array where each element represents the computed spatial density for the trajectory at the corresponding point in time (or frame). Higher values indicate more densely packed points within the specified radius, while lower values suggest more sparsely distributed points. + :rtype: np.ndarray + + :example: + >>> x = np.random.randint(0, 20, (100, 2)) # Example trajectory with 100 points in 2D space + >>> results = sliding_spatial_density(x=x, radius=5.0, pixels_per_mm=10.0, window_size=1, sample_rate=31) + """ + + pixel_radius = np.ceil(max(1.0, (radius * pixels_per_mm))) + frame_window_size = int(np.ceil(max(1.0, (window_size * sample_rate)))) + results = np.full(shape=(x.shape[0]), fill_value=np.nan, dtype=np.float32) + for r in range(frame_window_size, x.shape[0]+1): + l = r - frame_window_size + sample_x = x[l:r] + n_points, total_neighbors = sample_x.shape[0], 0 + for i in range(n_points): + distances = np.linalg.norm(sample_x - sample_x[i], axis=1) + neighbors = np.sum(distances <= pixel_radius) - 1 + total_neighbors += neighbors + results[r-1] = total_neighbors / n_points + + return results + + + + +# high_density_points = np.array([[0, 0], [0.5, 0], [1, 0], [1.5, 0], [2, 0], [0, 0.5], [0.5, 0.5], [1, 0.5], [1.5, 0.5], [2, 0.5]]) +# low_density_points = np.array([[0, 0], [5, 5], [10, 10], [15, 15], [20, 20]]) +# high = spatial_density(x=high_density_points,radius=1, pixels_per_mm=1) +# low = spatial_density(x=low_density_points,radius=1, pixels_per_mm=1) diff --git a/simba/sandbox/spontaneous_alternation_calculator.py b/simba/sandbox/spontaneous_alternation_calculator.py new file mode 100644 index 000000000..2f98a31a2 --- /dev/null +++ b/simba/sandbox/spontaneous_alternation_calculator.py @@ -0,0 +1,123 @@ + +import os +from typing import List, Tuple, Dict, Union, Optional +import itertools +import pandas as pd +import numpy as np +import warnings +warnings.filterwarnings("ignore") + +from simba.utils.checks import check_instance, check_valid_lst, check_that_column_exist, check_file_exist_and_readable, check_if_dir_exists, check_that_column_exist, check_int, check_float, check_if_keys_exist_in_dict, check_video_has_rois, check_str +from simba.utils.errors import CountError, NoFilesFoundError, NoROIDataError, AnimalNumberError, InvalidInputError +from simba.utils.read_write import get_file_name_info_in_directory, get_fn_ext, read_df, read_frm_of_video +from simba.utils.warnings import NoFileFoundWarning +from simba.mixins.geometry_mixin import GeometryMixin +from simba.mixins.config_reader import ConfigReader +from simba.utils.data import detect_bouts +from simba.utils.printing import stdout_success + +TAIL_END = 'tail_end' + +class SpontaneousAlternationCalculator(ConfigReader): + def __init__(self, + config_path: Union[str, os.PathLike], + arm_names: List[str], + center_name: str, + animal_area: Optional[int] = 80, + threshold: Optional[float] = 0.0, + buffer: Optional[int] = 2, + verbose: Optional[bool] = False, + detailed_data: Optional[bool] = False, + data_path: Optional[Union[str, os.PathLike]] = None): + + ConfigReader.__init__(self, config_path=config_path) + if self.animal_cnt != 1: raise AnimalNumberError(msg=f'Spontaneous alternation can only be calculated in 1 animal projects. Your project has {self.animal_cnt} animals.', source=self.__class__.__name__) + if len(self.body_parts_lst) < 3: raise InvalidInputError(msg=f'Spontaneous alternation can only be calculated in projects with 3 or more tracked body-parts. Found {len(self.body_parts_lst)}.', source=self.__class__.__name__) + check_valid_lst(data=arm_names, source=SpontaneousAlternationCalculator.__name__, valid_dtypes=(str,), min_len=2) + check_str(name='CENTER NAME', value=center_name) + check_int(name='ANIMAL AREA', value=animal_area, min_value=51, max_value=100) + check_float(name='THRESHOLD', value=threshold, min_value=0.0, max_value=1.0) + check_int(name='BUFFER', value=buffer, min_value=1) + if data_path is None: + data_path = self.outlier_corrected_dir + file_paths = get_file_name_info_in_directory(directory=data_path, file_type=self.file_type) + elif os.path.isdir(data_path): + check_if_dir_exists(in_dir=data_path) + file_paths = get_file_name_info_in_directory(directory=data_path, file_type=self.file_type) + else: + check_file_exist_and_readable(file_path=data_path) + file_paths = {get_fn_ext(filepath=data_path)[1]: data_path} + self.read_roi_data() + files_w_missing_rois = list(set(file_paths.keys()) - set(self.video_names_w_rois)) + self.files_w_rois = [x for x in list(file_paths.keys()) if x in self.video_names_w_rois] + if len(self.files_w_rois) == 0: raise NoFilesFoundError(msg=f'No ROI definitions found for any of the data fat {arm_names}', source=__class__.__name__) + if len(files_w_missing_rois) > 0: NoFileFoundWarning(msg=f'{len(files_w_missing_rois)} file(s) at {data_path} are missing ROI definitions and will be skipped when performing spontaneous alternation calculations: {files_w_missing_rois}', source=__class__.__name__) + check_video_has_rois(roi_dict=self.roi_dict, video_names=self.files_w_rois, roi_names=arm_names + [center_name]) + self.file_paths = list({file_paths[k] for k in self.files_w_rois if k in file_paths}) + self.threshold, self.buffer, self.animal_area, self.detailed_data = threshold, buffer, animal_area, detailed_data + self.verbose, self.center_name, self.arm_names = verbose, center_name, arm_names + + def run(self): + roi_geos, self.roi_clrs = GeometryMixin.simba_roi_to_geometries(rectangles_df=self.rectangles_df, circles_df=self.circles_df, polygons_df=self.polygon_df, color=True) + self.roi_geos = {k: v for k, v in roi_geos.items() if k in self.files_w_rois} + self.results = {} + for file_path in self.file_paths: + _, self.video_name, _ = get_fn_ext(filepath=file_path) + _, px_per_mm, fps = self.read_video_info(video_name=self.video_name) + self.data_df = read_df(file_path=file_path, file_type=self.file_type).head(5000) + bp_df = self.data_df[[x for x in self.bp_headers if not x.endswith('_p') and not TAIL_END in x.lower()]] + p_df = self.data_df[[x for x in self.bp_headers if x.endswith('_p') and not TAIL_END in x.lower()]] + bp_arr = bp_df.values.reshape(len(bp_df), int(len(bp_df.columns)/2) , 2).astype(np.int64) + p_arr = p_df.values.reshape(len(p_df), len(p_df.columns), 1) + if self.threshold > 0.0: + bp_arr = GeometryMixin.filter_low_p_bps_for_shapes(x=bp_arr, p=p_arr, threshold=self.threshold).reshape(bp_arr.shape[0], -1, 2) + self.animal_polygons = GeometryMixin().multiframe_bodyparts_to_polygon(data=bp_arr, parallel_offset=self.buffer, pixels_per_mm=1) + self.roi_df = pd.DataFrame() + for geo_name, geo in self.roi_geos[self.video_name].items(): + roi_geo = [geo for x in range(len(self.animal_polygons))] + pct_overlap = np.array(GeometryMixin().multiframe_compute_pct_shape_overlap(shape_1=self.animal_polygons, shape_2=roi_geo, denominator='shape_1', verbose=self.verbose, animal_names=geo_name, video_name=self.video_name)) + frames_in_roi = np.zeros(pct_overlap.shape) + frames_in_roi[np.argwhere(pct_overlap >= self.animal_area)] = 1 + self.roi_df[geo_name] = frames_in_roi + self.video_results = spontaneous_alternations(data=self.roi_df, arm_names=self.arm_names, center_name=self.center_name) + self.results[self.video_name] = self.video_results + + def save(self): + results_df = pd.DataFrame(columns=['VIDEO NAME', 'ALTERNATION RATE', 'ALTERNATION COUNT', 'ERROR COUNT', 'SAME ARM RETURN ERRORS', 'ALTERNATE ARM RETURN ERRORS']) + save_path = os.path.join(self.logs_path, f'spontaneous_alternation_{self.datetime}.csv') + for video_name, d in self.results.items(): + results_df.loc[len(results_df)] = [video_name, d['pct_alternation'], d['alternation_cnt'], d['error_cnt'], d['same_arm_returns_cnt'], d['alternate_arm_returns_cnt']] + results_df.set_index('VIDEO NAME').to_csv(save_path) + stdout_success(msg=f'Spontaneous alternation data for {len(list(self.results.keys()))} video(s) saved at {save_path}') + + if self.detailed_data: + save_dir = os.path.join(self.logs_path, f'detailed_spontaneous_alternation_data_{self.datetime}') + sliced_keys = ['same_arm_returns_dict', 'alternate_arm_returns_dict', 'alternations_dict'] + replace_keys = {'same_arm_returns_dict': 'same arm return', 'alternate_arm_returns_dict': 'alternate arm return', 'alternations_dict': 'alternations'} + os.makedirs(save_dir) + for video_name, d in self.results.items(): + save_path = os.path.join(save_dir, f'{video_name}.csv') + sliced_data = {k:v for k, v in d.items() if k in sliced_keys} + sliced_data = {replace_keys.get(key, key): value for key, value in sliced_data.items()} + row_idx = [(o, i) for o, i in sliced_data.items() for i in i.keys()] + values = [v for i in sliced_data.values() for v in i.values()] + values = [['' if len(sublist) == 0 else ', '.join(map(str, sublist))] for sublist in values] + multi_index = pd.MultiIndex.from_tuples(row_idx, names=['Behavior', 'Arm']) + df = pd.DataFrame(values, index=multi_index, columns=['Frames']) + df.to_csv(save_path) + stdout_success(msg=f'Detailed spontaneous alternation data for {len(list(self.results.keys()))} video(s) saved at {save_dir}') + + +# x = SpontaneousAlternationCalculator(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/spontenous_alternation/project_folder/project_config.ini', +# arm_names=['A', 'B', 'C'], +# center_name='Center', +# threshold=0.0, +# animal_area=100, +# buffer=2, +# detailed_data=True) +# +# x.run() +# x.save() + +# spontaneous_alternations(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/spontenous_alternation/project_folder/project_config.ini', +# roi_names=['A', 'B', 'C'], body_part='Center') \ No newline at end of file diff --git a/simba/sandbox/spontaneuous_alternation_plotter.py b/simba/sandbox/spontaneuous_alternation_plotter.py new file mode 100644 index 000000000..034801cd0 --- /dev/null +++ b/simba/sandbox/spontaneuous_alternation_plotter.py @@ -0,0 +1,162 @@ +import os +from shapely.geometry import Polygon +import cv2 +import numpy as np +import pandas as pd +from typing import Union, Optional, List, Dict, Tuple +from simba.sandbox.spontaneous_alternation_calculator import SpontaneousAlternationCalculator +from simba.utils.checks import check_valid_lst, check_int, check_float, check_file_exist_and_readable, check_str +from simba.utils.errors import AnimalNumberError, InvalidInputError +from simba.mixins.config_reader import ConfigReader +from simba.utils.read_write import find_core_cnt, find_video_of_file, get_video_meta_data, concatenate_videos_in_folder +from simba.utils.data import detect_bouts +from simba.utils.enums import Formats, TextOptions, Paths + +import multiprocessing +import functools +import platform +import shutil + +ALTERNATION_COLOR = (0, 255, 0) +ERROR_COLOR = (0, 0, 255) + +def spontaneous_alternator_video_mp(frm_index: np.ndarray, + video_path: Union[str, os.PathLike], + temp_save_dir: Union[str, os.PathLike], + event_txt: List[List[str]], + alt_dict: Dict[str, List[int]], + roi_geometries: Dict[str, Polygon], + roi_geometry_clrs: Dict[str, Tuple[int]], + animal_geometries: List[Polygon]): + + core, frm_index = frm_index[0], frm_index[1:] + video_meta_data = get_video_meta_data(video_path=video_path) + cap = cv2.VideoCapture(video_path) + start_frm, current_frm, end_frm = frm_index[0], frm_index[0], frm_index[-1] + cap.set(1, start_frm) + fourcc = cv2.VideoWriter_fourcc(*Formats.MP4_CODEC.value) + save_path = os.path.join(temp_save_dir, f'{core}.mp4') + writer = cv2.VideoWriter(save_path, fourcc, video_meta_data["fps"],(int(video_meta_data["width"] + (video_meta_data["width"]/2)), video_meta_data["height"])) + while current_frm < end_frm: + while current_frm < end_frm: + sequence_lst = event_txt[current_frm] + border = np.zeros((int(video_meta_data["height"]), int(video_meta_data["width"] / 2), 3), dtype=np.uint8) + ret, img = cap.read() + for shape_cnt, (k, v) in enumerate(roi_geometries.items()): + cv2.polylines(img, [np.array(v.exterior.coords).astype(np.int)], True, (roi_geometry_clrs[k]), thickness=2) + cv2.polylines(img, [np.array(animal_geometries[current_frm].exterior.coords).astype(np.int)], True, (178, 102, 255), thickness=2) + if len(list(set(sequence_lst))) == len(list(roi_geometries.keys()))-1: txt_clr = ALTERNATION_COLOR + else: txt_clr = ERROR_COLOR + error_1 = len([x for x in alt_dict['same_arm_return_errors'] if x <= current_frm]) + error_2 = len([x for x in alt_dict['alt_arm_return_errors'] if x <= current_frm]) + alternations = len([x for x in alt_dict['alternations'] if x <= current_frm]) + cv2.putText(border, 'Sequence:' + ','.join(sequence_lst), (10, 50), TextOptions.FONT.value, 1, txt_clr, 2) + cv2.putText(border, f'Alternation #: {alternations}', (10, 80), TextOptions.FONT.value, 1, txt_clr, 2) + cv2.putText(border, f'Errors #: {error_1 + error_2}', (10, 110), TextOptions.FONT.value, 1, txt_clr, 2) + img = np.hstack((img, border)) + writer.write(img) + print(f'Writing frame {current_frm} (Core: {core})...') + current_frm += 1 + writer.release() + +class SpontaneousAlternationsPlotter(ConfigReader): + + """ + + + + :example: + >>> config_path = '/Users/simon/Desktop/envs/simba/troubleshooting/spontenous_alternation/project_folder/project_config.ini' + >>> x = SpontaneousAlternationsPlotter(config_path=config_path, arm_names=['A', 'B', 'C'], center_name='Center', threshold=0.0, buffer=1, animal_area=60, data_path='/Users/simon/Desktop/envs/simba/troubleshooting/spontenous_alternation/project_folder/csv/outlier_corrected_movement_location/F1 HAB.csv') + >>> x.run() + """ + + + + def __init__(self, + config_path: Union[str, os.PathLike], + arm_names: List[str], + center_name: str, + animal_area: Optional[int] = 80, + threshold: Optional[float] = 0.0, + buffer: Optional[int] = 2, + core_cnt: Optional[int] = -1, + verbose: Optional[bool] = False, + data_path: Optional[Union[str, os.PathLike]] = None): + + ConfigReader.__init__(self, config_path=config_path) + if self.animal_cnt != 1: raise AnimalNumberError(msg=f'Spontaneous alternation can only be calculated in 1 animal projects. Your project has {self.animal_cnt} animals.', source=self.__class__.__name__) + if len(self.body_parts_lst) < 3: raise InvalidInputError(msg=f'Spontaneous alternation can only be calculated in projects with 3 or more tracked body-parts. Found {len(self.body_parts_lst)}.', source=self.__class__.__name__) + check_valid_lst(data=arm_names, source=SpontaneousAlternationCalculator.__name__, valid_dtypes=(str,), min_len=2) + check_int(name='ANIMAL AREA', value=animal_area, min_value=1, max_value=100) + check_float(name='THRESHOLD', value=threshold, min_value=0.0, max_value=1.0) + check_int(name='BUFFER', value=buffer, min_value=1) + check_file_exist_and_readable(file_path=data_path) + check_int(name="CORE COUNT",value=core_cnt,min_value=-1,max_value=find_core_cnt()[0],raise_error=True) + check_str(name='CENTER NAME', value=center_name) + if core_cnt == -1: core_cnt = find_core_cnt()[0] + self.threshold, self.buffer, self.animal_area = threshold, buffer, animal_area + self.verbose, self.arm_names, self.center_name = verbose, arm_names, center_name + self.data_path, self.core_cnt = data_path, core_cnt + + def run(self): + sa_computer = SpontaneousAlternationCalculator(config_path=self.config_path, + arm_names=self.arm_names, + center_name=self.center_name, + animal_area=self.animal_area, + threshold=self.threshold, + verbose=False, + buffer=self.buffer, + data_path=self.data_path) + sa_computer.run() + video_path = find_video_of_file(video_dir=self.video_dir, filename=sa_computer.video_name, raise_error=True) + bout_df = detect_bouts(data_df=sa_computer.roi_df, target_lst=self.arm_names + [self.center_name], fps=1)[['Event', 'Start_frame']].sort_values(['Start_frame']).reset_index(drop=True) + shifted_ = pd.concat([bout_df, bout_df.shift(-1).add_suffix('_shifted').reset_index(drop=True)], axis=1)[['Event', 'Event_shifted']].values + unique_counts = [len(list(set(list(x)))) for x in shifted_] + drop_idx = np.argwhere(np.array(unique_counts) == 1) + 1 + bout_df = bout_df.drop(drop_idx.flatten(), axis=0).reset_index(drop=True) + bout_df= bout_df[bout_df['Event'] != self.center_name] + frm_index = np.arange(0, len(sa_computer.data_df)) + frm_index = np.array_split(frm_index, self.core_cnt) + for cnt, i in enumerate(frm_index): frm_index[cnt] = np.insert(i, 0, cnt) + event_txt = [] + for idx in range(len(sa_computer.data_df)): + preceding_entries = list(bout_df['Event'][bout_df['Start_frame'] <= idx].tail(len(self.arm_names))) + event_txt.append(preceding_entries) + self.temp_folder = os.path.join(self.project_path, Paths.SPONTANEOUS_ALTERNATION_VIDEOS_DIR.value, sa_computer.video_name, "temp") + if os.path.isdir(self.temp_folder): + shutil.rmtree(self.temp_folder) + os.makedirs(self.temp_folder) + alt_dict = {'same_arm_return_errors': [x for xs in sa_computer.results['same_arm_returns_dict'].values() for x in xs], + 'alt_arm_return_errors': [x for xs in sa_computer.results['alternate_arm_returns_dict'].values() for x in xs], + 'alternations': [x for xs in sa_computer.results['alternations_dict'].values() for x in xs]} + save_path = os.path.join(self.project_path, Paths.SPONTANEOUS_ALTERNATION_VIDEOS_DIR.value, f'{sa_computer.video_name}.mp4') + with multiprocessing.Pool(self.core_cnt, maxtasksperchild=self.maxtasksperchild) as pool: + constants = functools.partial(spontaneous_alternator_video_mp, + video_path=video_path, + event_txt=event_txt, + animal_geometries=sa_computer.animal_polygons, + temp_save_dir= self.temp_folder, + alt_dict=alt_dict, + roi_geometries=sa_computer.roi_geos[sa_computer.video_name], + roi_geometry_clrs=sa_computer.roi_clrs[sa_computer.video_name]) + for cnt, result in enumerate(pool.imap(constants, frm_index, chunksize=self.multiprocess_chunksize)): + print(f'Section {cnt} complete...') + pool.terminate() + pool.join() + print(f"Joining {sa_computer.video_name} multiprocessed video...") + concatenate_videos_in_folder(in_folder=self.temp_folder, save_path=save_path) + +# +x = SpontaneousAlternationsPlotter( + config_path='/Users/simon/Desktop/envs/simba/troubleshooting/spontenous_alternation/project_folder/project_config.ini', + arm_names=['A', 'B', 'C'], + center_name='Center', + threshold=0.0, + buffer=1, + animal_area=60, + data_path='/Users/simon/Desktop/envs/simba/troubleshooting/spontenous_alternation/project_folder/csv/outlier_corrected_movement_location/F1 HAB.csv') +x.run() + + +# diff --git a/simba/sandbox/spontanous_alternations.py b/simba/sandbox/spontanous_alternations.py new file mode 100644 index 000000000..d72af5c84 --- /dev/null +++ b/simba/sandbox/spontanous_alternations.py @@ -0,0 +1,225 @@ + +import os +from typing import List, Tuple, Dict, Union, Optional +import pandas as pd +import numpy as np +from copy import deepcopy +import itertools +import warnings +warnings.filterwarnings("ignore") +from shapely.geometry import Polygon, Point + +from simba.utils.checks import check_instance, check_valid_lst, check_that_column_exist, check_file_exist_and_readable, check_if_dir_exists, check_that_column_exist +from simba.utils.errors import CountError, NoFilesFoundError, NoROIDataError +from simba.mixins.config_reader import ConfigReader +from simba.utils.read_write import get_file_name_info_in_directory, get_fn_ext, read_df +from simba.utils.warnings import NoFileFoundWarning +from simba.utils.enums import Keys +from simba.roi_tools.ROI_analyzer import ROIAnalyzer +from simba.mixins.geometry_mixin import GeometryMixin + +# def __spontaneous_alternations(data: pd.DataFrame, +# roi_names: List[str]) -> Tuple[int, Dict[str, np.ndarray]]: +# """ +# Detects spontaneous alternations between a set of user-defined ROIs. +# +# :param pd.DataFrame data: DataFrame containing shape data where each row represents a frame and each column represents a shape where 0 represents not in ROI and 1 represents inside the ROI +# :param pd.DataFrame data: List of column names in the DataFrame corresponding to shape names. +# :returns Dict[Union[str, Tuple[str], Union[int, float, List[int]]]]: Dict with the following keys and values: +# 'pct_alternation': Percent alternation computed as spontaneous alternation cnt /(total number of arm entries − (number of arms - 1))} × 100 +# 'alternation_cnt': The sliding count of ROI entry sequences of len(shape_names) that are all unique. +# 'same_arm_returns_cnt': Aggregate count of sequantial visits to the same ROI. +# 'alternate_arm_returns_cnt': Aggregate count of errors which are not same-arm-return errors. +# 'error_cnt': Aggregate error count (same_arm_returns_cnt + alternate_arm_returns_cnt), +# 'same_arm_returns_dict': Dictionary with the keys being the name of the ROI and values are a list of frames when the same-arm-return errors where committed. +# 'alternate_arm_returns_cnt': Dictionary with the keys being the name of the ROI and values are a list of frames when the alternate-arm-return errors where committed. +# 'alternations_dict': Dictionary with the keys being unique ROI name tuple sequences of length len(shape_names) and values are a list of frames when the sequence was completed. +# +# :example: +# >>> data = np.zeros((100, 4), dtype=int) +# >>> random_indices = np.random.randint(0, 4, size=100) +# >>> for i in range(100): data[i, random_indices[i]] = 1 +# >>> df = pd.DataFrame(data, columns=['left', 'top', 'right', 'bottom']) +# >>> spontanous_alternations = spontaneous_alternations(data=df, shape_names=['left', 'top', 'right', 'bottom']) +# """ +# +# def get_sliding_alternation(data: np.ndarray) -> Tuple[Dict[int, List[int]], Dict[int, List[int]], Dict[Tuple[int], List[int]]]: +# alt_cnt, stride = 0, data.shape[1]-1 +# arm_visits = np.full((data.shape[0]), -1) +# same_arm_returns, alternations, alternate_arm_returns = {}, {}, {} +# for i in range(data.shape[1]-1): alternate_arm_returns[i], same_arm_returns[i] = [], [] +# for i in list(itertools.permutations(list(range(0, data.shape[1]-1)))): alternations[i] = [] +# for i in range(data.shape[0]): arm_visits[i] = np.argwhere(data[i, 1:] == 1).flatten()[0] +# for i in range(stride-1, arm_visits.shape[0]): +# current, priors = arm_visits[i], arm_visits[i-(stride-1):i] +# sequence = np.append(priors, [current]) +# if np.unique(sequence).shape[0] == stride: +# alternations[tuple(sequence)].append(data[i, 0]) +# else: +# if current == priors[-1]: same_arm_returns[current].append(data[i, 0]) +# else: alternate_arm_returns[current].append(data[i, 0]) +# return same_arm_returns, alternate_arm_returns, alternations +# +# check_instance(source=spontaneous_alternations.__name__, instance=data, accepted_types=(pd.DataFrame,)) +# check_valid_lst(data=roi_names, source=spontaneous_alternations.__name__, valid_dtypes=(str,)) +# for shape_name in roi_names: check_that_column_exist(df=data, column_name=shape_name, file_name='') +# data = data[roi_names] +# additional_vals = list(set(np.unique(data.values.flatten())) - {0, 1}) +# if len(additional_vals) > 0: +# raise CountError(msg=f'When computing spontaneous alternation, ROI fields can only be 0 or 1. Found {additional_vals}', source=spontaneous_alternations.__name__) +# above_1_idx = np.argwhere(np.sum(data.values, axis=1) > 1) +# if above_1_idx.shape[0] > 0: +# raise CountError(msg=f'When computing spontaneous alternation, animals should only exist in <=1 ROIs in any one frame. In {above_1_idx.shape[0]} frames, the animal exist in more than one ROI.', source=spontaneous_alternations.__name__) +# shape_map = {} +# for i in range(len(roi_names)): shape_map[i] = roi_names[i] +# data = np.hstack((np.arange(0, data.shape[0]).reshape(-1, 1), data.values)) +# data = data[np.sum(data[:, 1:], axis=1) != 0] +# data = data[np.concatenate(([0], np.where(~(data[:, 1:][1:] == data[:, 1:][:-1]).all(axis=1))[0] + 1))] +# # same_arm, alternate_arm, alt = get_sliding_alternation(data=data) +# # same_arm_returns, alternate_arm_returns = {}, {} +# # for k, v in same_arm.items(): same_arm_returns[shape_map[k]] = v +# # for k, v in alternate_arm.items(): alternate_arm_returns[shape_map[k]] = v +# # alternations = {} +# # for k, v in alt.items(): +# # new_k = [] +# # for i in k: new_k.append(shape_map[i]) +# # alternations[tuple(new_k)] = v +# # +# # same_arm_returns_cnt, alternation_cnt, alternate_arm_returns_cnt = 0, 0, 0 +# # for v in same_arm_returns.values(): +# # same_arm_returns_cnt += len(v) +# # for v in alternate_arm_returns.values(): +# # alternate_arm_returns_cnt += len(v) +# # for v in alternations.values(): alternation_cnt += len(v) +# # pct_alternation = alternation_cnt / (data.shape[0] - (data.shape[1] -1)) +# # +# # return {'pct_alternation': pct_alternation * 100, +# # 'alternation_cnt': alternation_cnt, +# # 'error_cnt': same_arm_returns_cnt + alternate_arm_returns_cnt, +# # 'same_arm_returns_cnt': same_arm_returns_cnt, +# # 'alternate_arm_returns_cnt': alternate_arm_returns_cnt, +# # 'same_arm_returns_dict': same_arm_returns, +# # 'alternate_arm_returns_dict': alternate_arm_returns, +# # 'alternations_dict': alternations} + + +def __spontaneous_alternations(data: Dict[str, List[int]]) -> Tuple[int, Dict[str, np.ndarray]]: + + d = {} + roi_names = data.keys() + for shape_name, shape_data in data.items(): + for entry_frm in shape_data: d[entry_frm] = shape_name + d = {k: d[k] for k in sorted(d)} + print(d) + pass + + +# def filter_low_p_bps_for_shapes(x: np.ndarray, p: np.ndarray, threshold: float): +# """ +# Filter body-part data for geometry construction while maintaining valid geometry arrays. +# +# Having a 3D array representing body-parts across time, and a second 3D array representing probabilities of those +# body-parts across time, we want to "remove" body-parts with low detection probabilities whilst also keeping the array sizes +# intact and suitable for geometry construction. To do this, we find body-parts with detection probabilities below the threshold, and replace these with a body-part +# that doesn't fall below the detection probability threshold within the same frame. However, to construct a geometry, we need >= 3 unique key-point locations. +# Thus, no substitution can be made to when there are less than three unique body-part locations within a frame that falls above the threshold. +# +# :example: +# >>> x = np.random.randint(0, 500, (18000, 7, 2)) +# >>> p = np.random.random(size=(18000, 7, 1)) +# >>> x = filter_low_p_bps_for_shapes(x=x, p=p, threshold=0.1) +# >>> x = x.reshape(x.shape[0], int(x.shape[1] * 2)) +# """ +# +# results = np.copy(x) +# for i in range(x.shape[0]): +# below_p_idx = np.argwhere(p[i].flatten() < threshold).flatten() +# above_p_idx = np.argwhere(p[i].flatten() >= threshold).flatten() +# if (below_p_idx.shape[0] > 0) and (above_p_idx.shape[0] >= 3): +# for j in below_p_idx: +# new_val = x[i][above_p_idx[0]] +# results[i][j] = new_val +# return results + + + +def spontaneous_alternations(config_path: Union[str, os.PathLike], + roi_names: List[str], + animal_area: Optional[int] = 80, + threshold: Optional[float] = 0.1, + data_dir: Optional[Union[str, os.PathLike]] = None): + + check_file_exist_and_readable(file_path=config_path) + config = ConfigReader(config_path=config_path) + if data_dir is None: + data_dir = config.outlier_corrected_dir + check_if_dir_exists(in_dir=data_dir) + file_paths = get_file_name_info_in_directory(directory=data_dir, file_type=config.file_type) + config.read_roi_data() + files_w_missing_rois = list(set(file_paths.keys()) - set(config.video_names_w_rois)) + files_w_rois = [x for x in list(file_paths.keys()) if x in config.video_names_w_rois] + if len(files_w_rois) == 0: raise NoFilesFoundError(msg=f'No ROI definitions found for any of the data files in {data_dir}', source=spontaneous_alternations.__name__) + if len(files_w_missing_rois) > 0: NoFileFoundWarning(msg=f'{len(files_w_missing_rois)} file(s) in {data_dir} are missing ROI definitions and will be skipped when performing spontaneous alternation calculations: {files_w_missing_rois}', source=spontaneous_alternations.__name__) + for video_name in files_w_rois: + video_rectangles = config.roi_dict[Keys.ROI_RECTANGLES.value][config.roi_dict[Keys.ROI_RECTANGLES.value]['Video'] == video_name] + video_circles = config.roi_dict[Keys.ROI_CIRCLES.value][config.roi_dict[Keys.ROI_CIRCLES.value]['Video'] == video_name] + video_polygons = config.roi_dict[Keys.ROI_POLYGONS.value][config.roi_dict[Keys.ROI_POLYGONS.value]['Video'] == video_name] + video_shape_names = list(video_circles['Name']) + list(video_rectangles['Name']) + list(video_polygons['Name']) + missing_rois = list(set(roi_names) - set(video_shape_names)) + if len(missing_rois) > 0: + raise NoROIDataError(msg=f'{len(missing_rois)} ROI(s) are missing from {video_name}: {missing_rois}', source=spontaneous_alternations.__name__) + roi_geometries = GeometryMixin.simba_roi_to_geometries(rectangles_df=config.rectangles_df, circles_df=config.circles_df, polygons_df=config.polygon_df) + roi_geometries = {k: v for k, v in roi_geometries.items() if k in files_w_rois} + print(roi_geometries) + # file_paths = list({file_paths[k] for k in files_w_rois if k in file_paths}) + # for file_path in file_paths: + # data_df = read_df(file_path=file_path, file_type=config.file_type) + # x = data_df[[x for x in config.bp_headers if not x.endswith('_p') and not 'tail_end' in x.lower()]] + # p = data_df[[x for x in config.bp_headers if x.endswith('_p') and not 'tail_end' in x.lower()]] + # x = x.values.reshape(len(x), int(len(x.columns)/2) , 2).astype(np.int64) + # p = p.values.reshape(len(p), len(p.columns) , 1) + # x = GeometryMixin.filter_low_p_bps_for_shapes(x=x, p=p, threshold=threshold) + # x = x.reshape(x.shape[0], -1, 2) + # print(x.shape) + # polygons = GeometryMixin.bodyparts_to_polygon(data=x) + + + + + + + # + # + # settings = {'body_parts': {'Animal_1': body_part}, 'threshold': threshold} + + + + + + # for file_path in file_paths: + # roi_analyzer = ROIAnalyzer(ini_path=config_path, file_path=file_path, settings=settings, detailed_bout_data=True) + # roi_analyzer.run() + # data_dict = roi_analyzer.entries_exit_dict[get_fn_ext(filepath=file_path)[1]]['Animal_1'] + # data_dict_cleaned = {} + # for k, v in data_dict.items(): data_dict_cleaned[k] = v['Entry_times'] + # __spontaneous_alternations(data=data_dict_cleaned) + # for shape in roi_names: + # data_df[shape] = 0 + # roi_shape_df = roi_analyzer.detailed_df[roi_analyzer.detailed_df['SHAPE'] == shape] + # inside_roi_idx = list(roi_shape_df.apply(lambda x: list(range(int(x["ENTRY FRAMES"]), int(x["EXIT FRAMES"]) + 1)), 1,)) + # inside_roi_idx = [x for xs in inside_roi_idx for x in xs] + # data_df.loc[inside_roi_idx, shape] = 1 + # alternation = __spontaneous_alternations(data=data_df, roi_names=roi_names) + #print(data_df) + + +spontaneous_alternations(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/spontenous_alternation/project_folder/project_config.ini', + roi_names=['A', 'B', 'C'], body_part='Center') + + + +# data = np.zeros((50, 4), dtype=int) +# random_indices = np.random.randint(0, 4, size=50) +# for i in range(50): data[i, random_indices[i]] = 1 +# df = pd.DataFrame(data, columns=['left', 'top', 'right', 'bottom']) +# results = spontanous_alternation = spontaneous_alternations(data=df, shape_names=['left', 'top', 'right', 'bottom']) diff --git a/simba/sandbox/stabalize.py b/simba/sandbox/stabalize.py new file mode 100644 index 000000000..fa0729406 --- /dev/null +++ b/simba/sandbox/stabalize.py @@ -0,0 +1,126 @@ +import os + +import cv2 +import numpy as np +import pandas as pd +from typing import Union +from simba.utils.read_write import read_frm_of_video + + +def stabalize_video(data_path: Union[str, os.PathLike], video_path: Union[str, os.PathLike]): + cap = cv2.VideoCapture(video_path) + fps = cap.get(cv2.CAP_PROP_FPS) + frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + + df = pd.read_csv(data_path, index_col=0).head(500) + tail_base_points = df[['Tail_base_x', 'Tail_base_y']].values + nose_points = df[['Nose_x', 'Nose_y']].values + + for img_idx in range(len(df)): + print(img_idx) + img = read_frm_of_video(video_path=cap, frame_index=img_idx) + point1_current = tail_base_points[img_idx] + point2_current = nose_points[img_idx] + + dist = np.linalg.norm(point1_current-point2_current) + + point1_fixed = (300, 300) # Fixed location for Point 1 + point2_fixed = (int(point1_fixed[1]-dist), 300) # Fixed location for Point 2 + + translation_x1 = point1_fixed[0] - point1_current[0] + translation_y1 = point1_fixed[1] - point1_current[1] + # + translation_x2 = point2_fixed[0] - point2_current[0] + translation_y2 = point2_fixed[1] - point2_current[1] + + # Average translation (for simplicity, can also be calculated separately) + avg_translation_x = (translation_x1 + translation_x2) / 2 + avg_translation_y = (translation_y1 + translation_y2) / 2 + + # Create a translation matrix + translation_matrix = np.array([[1, 0, avg_translation_x], [0, 1, avg_translation_y]]) + # + # # Apply the translation transformation to the frame + stabilized_frame = cv2.warpAffine(img, translation_matrix, (frame_width, frame_height)) + cv2.imshow('asdasd', stabilized_frame) + cv2.waitKey(33) + + + + + +stabalize_video(data_path=r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\FL_gq_Saline_0626.csv", video_path=r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\test\bg_temp\geometry_bg.mp4") + + + + +# +# +# # Input video path and output video path +# input_video_path = r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\test\bg_temp\geometry_bg.mp4" +# output_video_path = r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\test\bg_temp\geometry_bg_test.mp4" +# data_path = r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\FL_gq_Saline_0626.csv" +# +# df = pd.read_csv(data_path, index_col=0) +# +# +# # Load the video +# cap = cv2.VideoCapture(input_video_path) +# +# # Get video properties +# fps = cap.get(cv2.CAP_PROP_FPS) +# frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) +# frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) +# fourcc = cv2.VideoWriter_fourcc(*'mp4v') +# +# # Create a VideoWriter object to save the stabilized video +# out = cv2.VideoWriter(output_video_path, fourcc, fps, (frame_width, frame_height)) +# +# +# # Define the two pixels to stabilize (current locations) +# point1_current = (182, 250) # Current location of Point 1 (x, y) +# point2_current = (400, 300) # Current location of Point 2 (x, y) +# +# # Define the fixed locations for these points (where you want them to be) +# point1_fixed = (300, 300) # Fixed location for Point 1 +# point2_fixed = (500, 300) # Fixed location for Point 2 +# +# cnt = 0 +# # Loop through the video frames +# while cap.isOpened(): +# ret, frame = cap.read() +# print(cnt) +# if not ret: +# break +# +# # Calculate the translation needed for each point +# translation_x1 = point1_fixed[0] - point1_current[0] +# translation_y1 = point1_fixed[1] - point1_current[1] +# +# translation_x2 = point2_fixed[0] - point2_current[0] +# translation_y2 = point2_fixed[1] - point2_current[1] +# +# # Average translation (for simplicity, can also be calculated separately) +# avg_translation_x = (translation_x1 + translation_x2) / 2 +# avg_translation_y = (translation_y1 + translation_y2) / 2 +# +# # Create a translation matrix +# translation_matrix = np.array([[1, 0, avg_translation_x], +# [0, 1, avg_translation_y]]) +# +# # Apply the translation transformation to the frame +# stabilized_frame = cv2.warpAffine(frame, translation_matrix, (frame_width, frame_height)) +# +# # Write the stabilized frame to the output video +# out.write(stabilized_frame) +# cnt += 1 +# if cnt == 5000: +# break +# +# +# # Release resources +# cap.release() +# out.release() +# cv2.destroyAllWindows() diff --git a/simba/sandbox/structural_similarity_index.py b/simba/sandbox/structural_similarity_index.py new file mode 100644 index 000000000..4a28a7870 --- /dev/null +++ b/simba/sandbox/structural_similarity_index.py @@ -0,0 +1,146 @@ +from skimage.metrics import structural_similarity +import numpy as np +import cv2 +from simba.utils.checks import check_if_valid_img, check_valid_lst, check_int +from typing import List, Optional +from simba.mixins.image_mixin import ImageMixin + +def structural_similarity_index(img_1: np.ndarray, img_2: np.ndarray) -> float: + """ + Compute the Structural Similarity Index (SSI) between two images. + + The function evaluates the SSI between two input images `img_1` and `img_2`. If the images have different numbers + of channels, they are converted to greyscale before computing the SSI. If the images are multi-channel (e.g., RGB), + the SSI is computed for each channel. + + :param np.ndarray img_1: The first input image represented as a NumPy array. + :param np.ndarray img_2: The second input image represented as a NumPy array. + :return float: The SSI value representing the similarity between the two images. + """ + check_if_valid_img(data=img_1, source=f'{structural_similarity_index.__name__} img_1') + check_if_valid_img(data=img_2, source=f'{structural_similarity_index.__name__} img_2') + multichannel = False + if img_1.ndim != img_2.ndim: + img_1 = cv2.cvtColor(img_1, cv2.COLOR_BGR2GRAY) + img_2 = cv2.cvtColor(img_2, cv2.COLOR_BGR2GRAY) + if img_1.ndim > 2: multichannel = True + return abs(structural_similarity(im1=img_1.astype(np.uint8), im2=img_2.astype(np.uint8), multichannel=multichannel)) + +def img_to_greyscale(img: np.ndarray) -> np.ndarray: + """ + Convert a single color image to greyscale. + + The function takes an RGB image and converts it to a greyscale image using a weighted sum approach. + If the input image is already in greyscale (2D array), it is returned as is. + + :param np.ndarray img: Input image represented as a NumPy array. For a color image, the array should have three channels (RGB). + :return np.ndarray: The greyscale image as a 2D NumPy array. + """ + check_if_valid_img(data=img, source=img_to_greyscale.__name__) + if len(img.shape) != 2: + return (0.07 * img[:, :, 2] + 0.72 * img[:, :, 1] + 0.21 * img[:, :, 0]) + else: + return img + +def sliding_structural_similarity_index(imgs: List[np.ndarray], + stride: Optional[int] = 1, + verbose: Optional[bool] = False) -> np.ndarray: + + """ + Computes the Structural Similarity Index (SSI) between consecutive images in an array with a specified stride. + + The function evaluates the SSI between pairs of images in the input array `imgs` using a sliding window approach + with the specified `stride`. The SSI is computed for each pair of images and the results are stored in an output + array. If the images are multi-channel (e.g., RGB), the SSI is computed for each channel. + + High SSI values (close to 1) indicate high similarity between images, while low SSI values (close to 0 or negative) + indicate low similarity. + + :param np.ndarray imgs: A list of images. Each element in the list is expected to be a numpy array representing an image. + :param Optional[int] stride: The number of images to skip between comparisons. Default is 1. + :param Optional[bool] verbose: If True, prints progress messages. Default is False. + :return np.ndarray: A numpy array containing the SSI values for each pair of images. + + :example: + >>> imgs = ImageMixin.read_all_img_in_dir(dir='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/test') + >>> imgs = {k: imgs[k] for k in sorted(imgs, key=lambda x: int(x.split('.')[0]))} + >>> imgs = list(imgs.values()) + >>> results = sliding_structural_similarity_index(imgs=imgs, stride=1, verbose=True) + """ + + check_valid_lst(data=imgs, valid_dtypes=(np.ndarray,), min_len=2) + check_int(name=f'{sliding_structural_similarity_index.__name__} stride', min_value=1, max_value=len(imgs), value=stride) + ndims, multichannel = set(), False + for i in imgs: + check_if_valid_img(data=i, source=sliding_structural_similarity_index.__name__) + ndims.add(i.ndim) + if len(list(ndims)) > 1: + imgs = ImageMixin.img_stack_to_greyscale(imgs=imgs) + if imgs[0].ndim > 2: multichannel = True + results = np.zeros((len(imgs)), np.float32) + for cnt, i in enumerate(range(stride, len(imgs))): + img_1, img_2 = imgs[i-stride], imgs[i] + results[i] = structural_similarity(im1=img_1, im2=img_2, multichannel=multichannel) + if verbose: + print(f'SSI computed ({cnt+1}/{len(imgs)-stride})') + return results + + +def structural_similarity_matrix(imgs: List[np.array], verbose: Optional[bool] = False) -> np.array: + """ + Computes a matrix of Structural Similarity Index (SSI) values for a list of images. + + This function takes a list of images and computes the SSI between each pair of images and produce a symmetric matrix. + + :param List[np.array] imgs: A list of images represented as numpy arrays. If not all images are greyscale or color, they are converted and processed as greyscale. + :param Optional[bool] verbose: If True, prints progress messages showing which SSI values have been computed. Default is False. + :return np.array: A square numpy array where the element at [i, j] represents the SSI between imgs[i] and imgs[j]. + + :example: + >>> imgs = ImageMixin.read_all_img_in_dir(dir='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/test') + >>> imgs = {k: imgs[k] for k in sorted(imgs, key=lambda x: int(x.split('.')[0]))} + >>> imgs = list(imgs.values())[0:10] + >>> results = sliding_structural_similarity_matrix(imgs=imgs) + """ + + check_valid_lst(data=imgs, valid_dtypes=(np.ndarray,), min_len=2) + ndims, multichannel = set(), False + for i in imgs: + check_if_valid_img(data=i, source=sliding_structural_similarity_index.__name__) + ndims.add(i.ndim) + if len(list(ndims)) > 1: + imgs = ImageMixin.img_stack_to_greyscale(imgs=imgs) + if imgs[0].ndim > 2: multichannel = True + results = np.ones((len(imgs), len(imgs)), np.float32) + for i in range(len(imgs)): + for j in range(i + 1, len(imgs)): + if verbose: + print(f'SSI matrix position ({i}, {j}) complete...') + val = structural_similarity(im1=imgs[i], im2=imgs[j], multichannel=multichannel) + results[i, j] = val + results[j, i] = val + return results + + + + +# +# img_1 = cv2.imread('/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/a.png').astype(np.float32) +# img_2 = cv2.imread('/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/b.png').astype(np.float32) +# # ImageMixin.img_emd(img_1=img_1, img_2=img_2, lower_bound=0.5, verbose=True) + +# imgs = ImageMixin.read_all_img_in_dir(dir='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/test') +# imgs = {k: imgs[k] for k in sorted(imgs, key=lambda x: int(x.split('.')[0]))} +# imgs = list(imgs.values())[0:10] +# results = sliding_structural_similarity_matrix(imgs=imgs) + + +# results = sliding_structural_similarity_index(imgs=imgs, stride=1, verbose=True) +# + + + +# +# img_1 = cv2.imread('/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/a.png', 0).astype(np.float32) +# img_2 = cv2.imread('/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/examples/e.png', 0).astype(np.float32) +# structural_similarity_index(img_1=img_1, img_2=img_2) diff --git a/simba/sandbox/summetry_index.py b/simba/sandbox/summetry_index.py new file mode 100644 index 000000000..aa11566b0 --- /dev/null +++ b/simba/sandbox/summetry_index.py @@ -0,0 +1,49 @@ + +import numpy as np +from simba.utils.checks import check_str, check_valid_array +try: + from typing import Literal +except: + from typing_extensions import Literal + from simba.utils.enums import Formats + +def symmetry_index(x: np.ndarray, y: np.ndarray, agg_type: Literal['mean', 'median'] = 'mean') -> float: + + """ + Calculate the Symmetry Index (SI) between two arrays of measurements, `x` and `y`, over a given time series. + The Symmetry Index quantifies the relative difference between two measurements at each time point, expressed as a percentage. + The function returns either the mean or median Symmetry Index over the entire series, based on the specified aggregation type. + + Zero indicates perfect symmetry. Positive values pepresent increasing asymmetry between the two measurements. + + :param np.ndarray x: A 1-dimensional array of measurements from one side (e.g., left side), representing a time series or sequence of measurements. + :param np.ndarray y: A 1-dimensional array of measurements from the other side (e.g., right side), of the same length as `x`. + :param Literal['mean', 'median'] agg_type: The aggregation method used to summarize the Symmetry Index across all time points. + :return: The aggregated Symmetry Index over the series, either as the mean or median SI. + :rtype: float + + :example: + >>> x = np.random.randint(0, 155, (100,)) + >>>y = np.random.randint(0, 155, (100,)) + >>> symmetry_index(x=x, y=y) + """ + + check_valid_array(data=x, source=f'{symmetry_index.__name__} x', accepted_ndims=(1,), min_axis_0=1, accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_valid_array(data=x, source=f'{symmetry_index.__name__} y', accepted_ndims=(1,), min_axis_0=1, accepted_axis_0_shape=[x.shape[0]], accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_str(name=f'{symmetry_index.__name__} agg_type', value=agg_type, options=('mean', 'median')) + si_values = np.abs(x - y) / (0.5 * (x + y)) * 100 + if agg_type == 'mean': + return np.float32(np.nanmean(si_values)) + else: + return np.float32(np.nanmedian(si_values)) + + + + + + + + +# x = np.random.randint(0, 155, (100,)) +# y = np.random.randint(0, 155, (100,)) +# symmetry_index(x=x, y=y) \ No newline at end of file diff --git a/simba/sandbox/superimpose_elapsed_time.py b/simba/sandbox/superimpose_elapsed_time.py new file mode 100644 index 000000000..3ab39b847 --- /dev/null +++ b/simba/sandbox/superimpose_elapsed_time.py @@ -0,0 +1,98 @@ +from typing import Union, Optional +import os +import subprocess +from simba.utils.read_write import get_fn_ext, find_all_videos_in_directory, get_video_meta_data +from simba.video_processors.roi_selector import ROISelector +from simba.utils.checks import check_ffmpeg_available, check_float, check_if_dir_exists, check_file_exist_and_readable, check_str, check_int +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.errors import InvalidInputError +from simba.utils.lookups import get_fonts +try: + from typing import Literal +except: + from typing_extensions import Literal + + +def superimpose_elapsed_time(video_path: Union[str, os.PathLike], + font: Optional[str] = 'Arial', + font_size: Optional[int] = 30, + font_color: Optional[str] = 'white', + font_border_color: Optional[str] = 'black', + time_format: Optional[Literal['MM:SS', 'HH:MM:SS', 'SS.MMMMMM', 'HH:MM:SS.MMMM']] = 'HH:MM:SS.MMMM', + font_border_width: Optional[int] = 2, + position: Optional[Literal['top_left', 'top_right', 'bottom_left', 'bottom_right', 'top_middle', 'bottom_middle']] = 'top_left', + save_dir: Optional[Union[str, os.PathLike]] = None, + count_direction: Optional[Literal['up', 'down']] = 'up') -> None: + """ + Superimposes elapsed time on the given video file(s) and saves the modified video(s). + + .. video:: _static/img/superimpose_elapsed_time.webm + :width: 900 + :loop: + + :param Union[str, os.PathLike] video_path: Path to the input video file or directory containing video files. + :param Optional[int] font_size: Font size for the elapsed time text. Default 30. + :param Optional[str] font_color: Font color for the elapsed time text. Default white + :param Optional[str] font_border_color: Font border color for the elapsed time text. Default black. + :param Optional[int] font_border_width: Font border width for the elapsed time text in pixels. Default 2. + :param Optional[Literal['top_left', 'top_right', 'bottom_left', 'bottom_right', 'middle_top', 'middle_bottom']] position: Position where the elapsed time text will be superimposed. Default ``top_left``. + :param Optional[Union[str, os.PathLike]] save_dir: Directory where the modified video(s) will be saved. If not provided, the directory of the input video(s) will be used. + :return: None + + :example: + >>> superimpose_elapsed_time(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/mouse_open_field/project_folder/videos/test_4/1.mp4', position='middle_top', font_color='black', font_border_color='pink', font_border_width=5, font_size=30) + """ + + check_ffmpeg_available(raise_error=True) + timer = SimbaTimer(start=True) + POSITIONS = ['top_left', 'top_right', 'bottom_left', 'bottom_right', 'top_middle', 'bottom_middle'] + check_str(name=f'{superimpose_elapsed_time.__name__} position', value=position, options=POSITIONS) + check_str(name=f'{superimpose_elapsed_time.__name__} time_format', value=time_format, options=['MM:SS', 'HH:MM:SS', 'SS.MMMMMM', 'HH:MM:SS.MMMM']) + check_int(name=f'{superimpose_elapsed_time.__name__} font_size', value=font_size, min_value=1) + check_int(name=f'{superimpose_elapsed_time.__name__} font_border_width', value=font_border_width, min_value=1) + font_color = ''.join(filter(str.isalnum, font_color)).lower() + font_border_color = ''.join(filter(str.isalnum, font_border_color)).lower() + font_dict = get_fonts() + check_str(name='font', value=font, options=tuple(font_dict.keys())) + font_path = font_dict[font] + time_format_map = {'MM:SS': '%{pts\\:mks}', + 'HH:MM:SS': '%{pts\\:hms}', + 'SS.MMMMMM': '%{pts}', + 'HH:MM:SS.MMMM': '%{pts\\:hms}.%{eif\\:mod(n\\,1000)\\:d\\:4}'} + position_map = { 'top_left': 'x=5:y=5', 'top_right': 'x=(w-tw-5):y=5', 'bottom_left': 'x=5:y=(h-th-5)', 'bottom_right': 'x=(w-tw-5):y=(h-th-5)', 'top_middle': 'x=(w-tw)/2:y=10', 'bottom_middle': 'x=(w-tw)/2:y=(h-th-10)'} + time_text = time_format_map[time_format] + pos = position_map[position] + + if os.path.isfile(video_path): + video_paths = [video_path] + elif os.path.isdir(video_path): + video_paths = list(find_all_videos_in_directory(directory=video_path, as_dict=True, raise_error=True).values()) + else: + raise InvalidInputError(msg=f'{video_path} is not a valid file path or a valid directory path', + source=superimpose_elapsed_time.__name__) + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir) + else: + save_dir = os.path.dirname(video_paths[0]) + for file_cnt, video_path in enumerate(video_paths): + _, video_name, ext = get_fn_ext(video_path) + duration = get_video_meta_data(video_path=video_path)['video_length_s'] + if count_direction == 'down': + time_text = f'%{{eif\\:({duration}-t)\\:d\\:0\\:2}}' + print(f'Superimposing time {video_name} (Video {file_cnt + 1}/{len(video_paths)})...') + save_path = os.path.join(save_dir, f'{video_name}_time_superimposed{ext}') + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile={font_path}:text='{time_text}':{pos}:fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f'Super-imposed time on {len(video_paths)} video(s), saved in {save_dir}', elapsed_time=timer.elapsed_time_str) + + + +# Superimpose Countdown Timer +superimpose_elapsed_time(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/reptile/AGGRESSIVITY_4_11_21_Trial_2_camera1_progress_bar.mp4', + position='top_middle', + font_color='black', + font_border_color='pink', + font_border_width=5, + font_size=30, time_format='HH:MM:SS', + count_direction='down') \ No newline at end of file diff --git a/simba/sandbox/superimpose_frame_count.py b/simba/sandbox/superimpose_frame_count.py new file mode 100644 index 000000000..fa8ad0c56 --- /dev/null +++ b/simba/sandbox/superimpose_frame_count.py @@ -0,0 +1,69 @@ +from tkinter import * +from simba.utils.checks import check_ffmpeg_available, check_int, check_file_exist_and_readable, check_if_dir_exists +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.utils.enums import Keys, Links, Options +from simba.ui.tkinter_functions import FileSelect, CreateLabelFrameWithIcon, DropDownMenu, FolderSelect +from simba.video_processors.video_processing import superimpose_frame_count +from simba.utils.read_write import find_files_of_filetypes_in_directory +from simba.utils.printing import SimbaTimer, stdout_success + +class SuperImposeFrameCountPopUp(PopUpMixin): + def __init__(self): + super().__init__(title="SUPERIMPOSE FRAME COUNT") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.use_gpu_var = BooleanVar(value=False) + use_gpu_cb = Checkbutton(settings_frm, text="USE GPU (reduced runtime)", variable=self.use_gpu_var) + self.font_size_dropdown = DropDownMenu(settings_frm, "FONT SIZE:", list(range(1, 101, 2)), labelwidth=25) + settings_frm.grid(row=0, column=0, sticky="NW") + self.font_size_dropdown.grid(row=0, column=0, sticky="NW") + self.font_size_dropdown.setChoices('20') + use_gpu_cb.grid(row=1, column=0, sticky="NW") + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - SUPERIMPOSE FRAME COUNT", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run_single_video()) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - SUPERIMPOSE FRAME COUNT", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run_multiple_videos()) + + multiple_videos_frm.grid(row=2, column=0, sticky="NW") + self.selected_video_dir.grid(row=0, column=0, sticky="NW") + multiple_videos_run.grid(row=1, column=0, sticky="NW") + self.main_frm.mainloop() + + def run_single_video(self): + video_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=self.selected_video.file_path) + self.video_paths = [video_path] + self.apply() + + def run_multiple_videos(self): + video_dir = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=video_dir, source=self.__class__.__name__) + self.video_paths = find_files_of_filetypes_in_directory(directory=video_dir, extensions=Options.ALL_VIDEO_FORMAT_OPTIONS.value, raise_error=True) + self.apply() + + def apply(self): + check_ffmpeg_available(raise_error=True) + timer = SimbaTimer(start=True) + use_gpu = self.use_gpu_var.get() + font_size = int(self.font_size_dropdown.getChoices()) + for file_cnt, file_path in enumerate(self.video_paths): + check_file_exist_and_readable(file_path=file_path) + superimpose_frame_count(file_path=file_path, gpu=use_gpu, fontsize=font_size) + timer.stop_timer() + stdout_success(msg=f'Frame counts superimposed on {len(self.video_paths)} video(s)', elapsed_time=timer.elapsed_time_str) + +SuperImposeFrameCountPopUp() + + + + +# _ = superimpose_frame_count(file_path=r'/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/2022-06-26_NOB_IOT_1_grayscale_clipped.mp4', +# gpu=False, +# fontsize=90) diff --git a/simba/sandbox/superimpose_frm_cnt.py b/simba/sandbox/superimpose_frm_cnt.py new file mode 100644 index 000000000..6511d4418 --- /dev/null +++ b/simba/sandbox/superimpose_frm_cnt.py @@ -0,0 +1,91 @@ +import functools +import glob +import multiprocessing +import os +import platform +import shutil +import subprocess +import time +from copy import deepcopy +from datetime import datetime +from tkinter import * +from typing import Any, Dict, List, Optional, Tuple, Union + +import cv2 +import numpy as np +from PIL import Image, ImageTk +from shapely.geometry import Polygon + +try: + from typing import Literal +except: + from typing_extensions import Literal + +import simba +from simba.mixins.config_reader import ConfigReader +from simba.mixins.image_mixin import ImageMixin +from simba.utils.checks import (check_ffmpeg_available, + check_file_exist_and_readable, check_float, + check_if_dir_exists, + check_if_filepath_list_is_empty, + check_if_string_value_is_valid_video_timestamp, + check_instance, check_int, + check_nvidea_gpu_available, check_str, + check_that_hhmmss_start_is_before_end, + check_valid_lst, check_valid_tuple) +from simba.utils.data import find_frame_numbers_from_time_stamp +from simba.utils.enums import OS, ConfigKey, Formats, Options, Paths +from simba.utils.errors import (CountError, DirectoryExistError, + FFMPEGCodecGPUError, FFMPEGNotFoundError, + FileExistError, FrameRangeError, + InvalidFileTypeError, InvalidInputError, + InvalidVideoFileError, NoDataError, + NoFilesFoundError, NotDirectoryError) +from simba.utils.lookups import (get_ffmpeg_crossfade_methods, get_fonts, + percent_to_crf_lookup, percent_to_qv_lk) +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.read_write import ( + check_if_hhmmss_timestamp_is_valid_part_of_video, + concatenate_videos_in_folder, find_all_videos_in_directory, find_core_cnt, + find_files_of_filetypes_in_directory, get_fn_ext, get_video_meta_data, + read_config_entry, read_config_file, read_frm_of_video) +from simba.utils.warnings import (FileExistWarning, InValidUserInputWarning, + SameInputAndOutputWarning) +from simba.video_processors.extract_frames import video_to_frames +from simba.video_processors.roi_selector import ROISelector +from simba.video_processors.roi_selector_circle import ROISelectorCircle +from simba.video_processors.roi_selector_polygon import ROISelectorPolygon + + +def extract_frames_single_video(file_path: Union[str, os.PathLike], + save_dir: Optional[Union[str, os.PathLike]]) -> None: + """ + Extract all frames for a single video. + + .. note:: + Image frames are saved as PNG files named with integers in order of appearance, i.e., ``0.png, 1.png ...`` + + :parameter Union[str, os.PathLike] file_path: Path to video file. + :parameter Optional[Union[str, os.PathLike]] save_dir: Optional directory where to save the frames. If ``save_dir`` is not passed, + results are stored within a subdirectory in the same directory as the input file. + + :example: + >>> _ = extract_frames_single_video(file_path='project_folder/videos/Video_1.mp4') + >>> extract_frames_single_video(file_path='/Users/simon/Desktop/imgs_4/test.mp4', save_dir='/Users/simon/Desktop/imgs_4/frames') + """ + + timer = SimbaTimer(start=True) + check_file_exist_and_readable(file_path=file_path) + _ = get_video_meta_data(file_path) + dir_name, file_name, ext = get_fn_ext(filepath=file_path) + if save_dir is None: + save_dir = os.path.join(dir_name, file_name) + if not os.path.exists(save_dir): os.makedirs(save_dir) + else: + check_if_dir_exists(in_dir=save_dir, source=extract_frames_single_video.__name__, create_if_not_exist=True) + print(f"Processing video {file_name}...") + video_to_frames(file_path, save_dir, overwrite=True, every=1, chunk_size=1000) + timer.stop_timer() + stdout_success(msg=f"Video {file_name} converted to images in {dir_name} directory!", elapsed_time=timer.elapsed_time_str, source=extract_frames_single_video.__name__) + + diff --git a/simba/sandbox/superimpose_popups.py b/simba/sandbox/superimpose_popups.py new file mode 100644 index 000000000..933931336 --- /dev/null +++ b/simba/sandbox/superimpose_popups.py @@ -0,0 +1,427 @@ +import os + +from typing import Union +from simba.mixins.pop_up_mixin import PopUpMixin +from simba.ui.tkinter_functions import CreateLabelFrameWithIcon, FileSelect, FolderSelect, DropDownMenu, Entry_Box +from simba.utils.enums import Keys, Links, Options +from simba.utils.checks import check_file_exist_and_readable, check_if_dir_exists, check_str +from simba.video_processors.video_processing import watermark_video, superimpose_elapsed_time, superimpose_video_progressbar, superimpose_overlay_video, superimpose_video_names, superimpose_freetext, roi_blurbox +from simba.utils.lookups import get_color_dict +import threading +from tkinter import * +import numpy as np +from simba.utils.read_write import get_video_meta_data, str_2_bool + + + +class SuperimposeWatermarkPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="WATERMARK VIDEOS") + self.LOCATIONS = {'TOP LEFT': 'top_left', 'TOP RIGHT': 'top_right', 'BOTTOM LEFT': 'bottom_left', 'BOTTOM RIGHT': 'bottom_right', 'CENTER': 'center'} + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + opacities = [round(x, 1) for x in list(np.arange(0.1, 1.1, 0.1))] + self.selected_img = FileSelect(settings_frm, "WATERMARK IMAGE PATH:", title="Select an image file", file_types=[("VIDEO", Options.ALL_IMAGE_FORMAT_OPTIONS.value)], lblwidth=25) + self.location_dropdown = DropDownMenu(settings_frm, "WATERMARK LOCATION:", list(self.LOCATIONS.keys()), labelwidth=25) + self.opacity_dropdown = DropDownMenu(settings_frm, "WATERMARK OPACITY:", opacities, labelwidth=25) + self.size_dropdown = DropDownMenu(settings_frm, "WATERMARK SCALE %:", list(range(5, 100, 5)), labelwidth=25) + + self.location_dropdown.setChoices('TOP LEFT') + self.opacity_dropdown.setChoices(0.5) + self.size_dropdown.setChoices(5) + + settings_frm.grid(row=0, column=0, sticky=NW) + self.selected_img.grid(row=0, column=0, sticky=NW) + self.location_dropdown.grid(row=1, column=0, sticky=NW) + self.opacity_dropdown.grid(row=2, column=0, sticky=NW) + self.size_dropdown.grid(row=3, column=0, sticky=NW) + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - SUPERIMPOSE WATERMARK", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - SUPERIMPOSE WATERMARK", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run(multiple=True)) + + multiple_videos_frm.grid(row=2, column=0, sticky="NW") + self.selected_video_dir.grid(row=0, column=0, sticky="NW") + multiple_videos_run.grid(row=1, column=0, sticky="NW") + self.main_frm.mainloop() + + def run(self, multiple: bool): + img_path = self.selected_img.file_path + loc = self.location_dropdown.getChoices() + loc = self.LOCATIONS[loc] + opacity = float(self.opacity_dropdown.getChoices()) + size = float(int(self.size_dropdown.getChoices()) / 100) + if size == 1.0: size = size - 0.001 + check_file_exist_and_readable(file_path=img_path) + if not multiple: + data_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=data_path) + else: + data_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=data_path) + + threading.Thread(target=watermark_video(video_path=data_path, + watermark_path=img_path, position=loc, + opacity=opacity, + scale=size)).start() +#SuperimposeWatermarkPopUp() + + +class SuperimposeTimerPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="SUPER-IMPOSE TIME ON VIDEOS") + self.LOCATIONS = {'TOP LEFT': 'top_left', 'TOP RIGHT': 'top_right', 'TOP MIDDLE': 'top_middle', 'BOTTOM LEFT': 'bottom_left', 'BOTTOM RIGHT': 'bottom_right', 'BOTTOM MIDDLE': 'bottom_middle'} + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.color_dict = get_color_dict() + + self.location_dropdown = DropDownMenu(settings_frm, "TIMER LOCATION:", list(self.LOCATIONS.keys()), labelwidth=25) + self.font_size_dropdown = DropDownMenu(settings_frm, "FONT SIZE:", list(range(20, 100, 5)), labelwidth=25) + self.font_color_dropdown = DropDownMenu(settings_frm, "FONT COLOR:", list(self.color_dict.keys()), labelwidth=25) + self.font_border_dropdown = DropDownMenu(settings_frm, "FONT BORDER COLOR:", list(self.color_dict.keys()), labelwidth=25) + self.font_border_width_dropdown = DropDownMenu(settings_frm, "FONT BORDER WIDTH:", list(range(2, 52, 2)), labelwidth=25) + + self.location_dropdown.setChoices('TOP LEFT') + self.font_size_dropdown.setChoices(20) + self.font_color_dropdown.setChoices('White') + self.font_border_dropdown.setChoices('Black') + self.font_border_width_dropdown.setChoices(2) + + settings_frm.grid(row=0, column=0, sticky=NW) + self.location_dropdown.grid(row=0, column=0, sticky=NW) + self.font_size_dropdown.grid(row=1, column=0, sticky=NW) + self.font_color_dropdown.grid(row=2, column=0, sticky=NW) + self.font_border_dropdown.grid(row=3, column=0, sticky=NW) + self.font_border_width_dropdown.grid(row=4, column=0, sticky=NW) + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - SUPERIMPOSE TIMER", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - SUPERIMPOSE TIMER", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run(multiple=True)) + + multiple_videos_frm.grid(row=2, column=0, sticky="NW") + self.selected_video_dir.grid(row=0, column=0, sticky="NW") + multiple_videos_run.grid(row=1, column=0, sticky="NW") + self.main_frm.mainloop() + + def run(self, multiple: bool): + loc = self.location_dropdown.getChoices() + loc = self.LOCATIONS[loc] + font_size = int(self.font_size_dropdown.getChoices()) + font_clr = self.font_color_dropdown.getChoices() + font_border_clr = self.font_border_dropdown.getChoices() + font_border_width = int(self.font_border_width_dropdown.getChoices()) + if not multiple: + data_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=data_path) + else: + data_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=data_path) + + threading.Thread(target=superimpose_elapsed_time(video_path=data_path, + font_size=font_size, + font_color=font_clr, + font_border_color=font_border_clr, + font_border_width=font_border_width, + position=loc)).start() + + +class SuperimposeProgressBarPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="SUPER-IMPOSE PROGRESS BAR ON VIDEOS") + self.LOCATIONS = {'TOP': 'top', 'BOTTOM': 'bottom'} + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.color_dict = get_color_dict() + size_lst = list(range(0, 110, 5)) + size_lst[0] = 1 + self.bar_loc_dropdown = DropDownMenu(settings_frm, "PROGRESS BAR LOCATION:", list(self.LOCATIONS.keys()), labelwidth=25) + self.bar_color_dropdown = DropDownMenu(settings_frm, "PROGRESS BAR COLOR:", list(self.color_dict.keys()), labelwidth=25) + self.bar_size_dropdown = DropDownMenu(settings_frm, "PROGRESS BAR HEIGHT (%):", size_lst, labelwidth=25) + self.bar_color_dropdown.setChoices('Red') + self.bar_size_dropdown.setChoices(10) + self.bar_loc_dropdown.setChoices('BOTTOM') + + settings_frm.grid(row=0, column=0, sticky=NW) + self.bar_loc_dropdown.grid(row=0, column=0, sticky=NW) + self.bar_color_dropdown.grid(row=1, column=0, sticky=NW) + self.bar_size_dropdown.grid(row=2, column=0, sticky=NW) + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - SUPERIMPOSE PROGRESS BAR", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - SUPERIMPOSE PROGRESS BAR", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run(multiple=True)) + + multiple_videos_frm.grid(row=2, column=0, sticky="NW") + self.selected_video_dir.grid(row=0, column=0, sticky="NW") + multiple_videos_run.grid(row=1, column=0, sticky="NW") + self.main_frm.mainloop() + + def run(self, multiple: bool): + loc = self.bar_loc_dropdown.getChoices() + loc = self.LOCATIONS[loc] + bar_clr = self.bar_color_dropdown.getChoices() + bar_size = int(self.bar_size_dropdown.getChoices()) + if not multiple: + data_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=data_path) + else: + data_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=data_path) + + threading.Thread(target=superimpose_video_progressbar(video_path=data_path, + bar_height=bar_size, + color=bar_clr, + position=loc)).start() + +class SuperimposeVideoPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="SUPER-IMPOSE VIDEO ON VIDEO") + self.LOCATIONS = {'TOP LEFT': 'top_left', 'TOP RIGHT': 'top_right', 'BOTTOM LEFT': 'bottom_left', 'BOTTOM RIGHT': 'bottom_right', 'CENTER': 'center'} + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + opacities = [round(x, 1) for x in list(np.arange(0.1, 1.1, 0.1))] + scales = [round(x, 2) for x in list(np.arange(0.05, 1.0, 0.05))] + self.main_video_path = FileSelect(settings_frm, "MAIN VIDEO PATH:", title="Select a video file", file_types=[("VIDEO", Options.ALL_VIDEO_FORMAT_OPTIONS.value)], lblwidth=25) + self.overlay_video_path = FileSelect(settings_frm, "OVERLAY VIDEO PATH:", title="Select a video file", file_types=[("VIDEO", Options.ALL_VIDEO_FORMAT_OPTIONS.value)], lblwidth=25) + self.location_dropdown = DropDownMenu(settings_frm, "OVERLAY VIDEO LOCATION:", list(self.LOCATIONS.keys()), labelwidth=25) + self.opacity_dropdown = DropDownMenu(settings_frm, "OVERLAY VIDEO OPACITY:", opacities, labelwidth=25) + self.size_dropdown = DropDownMenu(settings_frm, "OVERLAY VIDEO SCALE (%):", scales, labelwidth=25) + + self.location_dropdown.setChoices('TOP LEFT') + self.opacity_dropdown.setChoices(0.5) + self.size_dropdown.setChoices(0.05) + + settings_frm.grid(row=0, column=0, sticky=NW) + self.main_video_path.grid(row=0, column=0, sticky=NW) + self.overlay_video_path.grid(row=1, column=0, sticky=NW) + self.location_dropdown.grid(row=2, column=0, sticky=NW) + self.opacity_dropdown.grid(row=3, column=0, sticky=NW) + self.size_dropdown.grid(row=4, column=0, sticky=NW) + self.create_run_frm(run_function=self.run) + self.main_frm.mainloop() + + def run(self): + loc = self.location_dropdown.getChoices() + loc = self.LOCATIONS[loc] + opacity = float(self.opacity_dropdown.getChoices()) + size = float(self.size_dropdown.getChoices()) + video_path = self.main_video_path.file_path + overlay_path = self.overlay_video_path.file_path + check_file_exist_and_readable(file_path=video_path) + check_file_exist_and_readable(file_path=overlay_path) + threading.Thread(target=superimpose_overlay_video(video_path=video_path, + overlay_video_path=overlay_path, + position=loc, + opacity=opacity, + scale=size)).start() + +#SuperimposeVideoPopUp() + + +class SuperimposeVideoNamesPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="SUPER-IMPOSE VIDEO NAMES ON VIDEOS") + self.LOCATIONS = {'TOP LEFT': 'top_left', 'TOP RIGHT': 'top_right', 'TOP MIDDLE': 'top_middle', 'BOTTOM LEFT': 'bottom_left', 'BOTTOM RIGHT': 'bottom_right', 'BOTTOM MIDDLE': 'bottom_middle'} + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.color_dict = get_color_dict() + + self.location_dropdown = DropDownMenu(settings_frm, "VIDEO NAME TEXT LOCATION:", list(self.LOCATIONS.keys()), labelwidth=25) + self.text_eb = Entry_Box(settings_frm, "VIDEO NAME TEXT:", list(self.LOCATIONS.keys()), labelwidth=25) + self.font_size_dropdown = DropDownMenu(settings_frm, "FONT SIZE:", list(range(5, 105, 5)), labelwidth=25) + self.font_color_dropdown = DropDownMenu(settings_frm, "FONT COLOR:", list(self.color_dict.keys()), labelwidth=25) + self.font_border_dropdown = DropDownMenu(settings_frm, "FONT BORDER COLOR:", list(self.color_dict.keys()), labelwidth=25) + self.font_border_width_dropdown = DropDownMenu(settings_frm, "FONT BORDER WIDTH:", list(range(2, 52, 2)), labelwidth=25) + + self.location_dropdown.setChoices('TOP LEFT') + self.font_size_dropdown.setChoices(20) + self.font_color_dropdown.setChoices('White') + self.font_border_dropdown.setChoices('Black') + self.font_border_width_dropdown.setChoices(2) + + settings_frm.grid(row=0, column=0, sticky=NW) + self.location_dropdown.grid(row=0, column=0, sticky=NW) + self.text_eb.grid(row=1, column=0, sticky=NW) + self.font_size_dropdown.grid(row=2, column=0, sticky=NW) + self.font_color_dropdown.grid(row=3, column=0, sticky=NW) + self.font_border_dropdown.grid(row=4, column=0, sticky=NW) + self.font_border_width_dropdown.grid(row=5, column=0, sticky=NW) + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - SUPERIMPOSE TEXT", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - SUPERIMPOSE TEXT", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run(multiple=True)) + + multiple_videos_frm.grid(row=2, column=0, sticky="NW") + self.selected_video_dir.grid(row=0, column=0, sticky="NW") + multiple_videos_run.grid(row=1, column=0, sticky="NW") + self.main_frm.mainloop() + + def run(self, multiple: bool): + loc = self.location_dropdown.getChoices() + loc = self.LOCATIONS[loc] + text = self.text_eb.entry_get + check + font_size = int(self.font_size_dropdown.getChoices()) + font_clr = self.font_color_dropdown.getChoices() + font_border_clr = self.font_border_dropdown.getChoices() + font_border_width = int(self.font_border_width_dropdown.getChoices()) + if not multiple: + data_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=data_path) + else: + data_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=data_path) + + threading.Thread(target=superimpose_video_names(video_path=data_path, + font_size=font_size, + font_color=font_clr, + font_border_color=font_border_clr, + font_border_width=font_border_width, + position=loc)).start() + +#SuperimposeVideoNamesPopUp() + + + +class SuperimposeTextPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="SUPER-IMPOSE TEXT ON VIDEOS") + self.LOCATIONS = {'TOP LEFT': 'top_left', 'TOP RIGHT': 'top_right', 'TOP MIDDLE': 'top_middle', 'BOTTOM LEFT': 'bottom_left', 'BOTTOM RIGHT': 'bottom_right', 'BOTTOM MIDDLE': 'bottom_middle'} + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.color_dict = get_color_dict() + + self.location_dropdown = DropDownMenu(settings_frm, "TEXT LOCATION:", list(self.LOCATIONS.keys()), labelwidth=25) + self.text_eb = Entry_Box(parent=settings_frm, labelwidth=25, entry_box_width=50, fileDescription='TEXT:') + self.font_size_dropdown = DropDownMenu(settings_frm, "FONT SIZE:", list(range(5, 105, 5)), labelwidth=25) + self.font_color_dropdown = DropDownMenu(settings_frm, "FONT COLOR:", list(self.color_dict.keys()), labelwidth=25) + self.font_border_dropdown = DropDownMenu(settings_frm, "FONT BORDER COLOR:", list(self.color_dict.keys()), labelwidth=25) + self.font_border_width_dropdown = DropDownMenu(settings_frm, "FONT BORDER WIDTH:", list(range(2, 52, 2)), labelwidth=25) + + self.location_dropdown.setChoices('TOP LEFT') + self.font_size_dropdown.setChoices(20) + self.font_color_dropdown.setChoices('White') + self.font_border_dropdown.setChoices('Black') + self.font_border_width_dropdown.setChoices(2) + + settings_frm.grid(row=0, column=0, sticky=NW) + self.location_dropdown.grid(row=0, column=0, sticky=NW) + self.text_eb.grid(row=1, column=0, sticky=NW) + self.font_size_dropdown.grid(row=2, column=0, sticky=NW) + self.font_color_dropdown.grid(row=3, column=0, sticky=NW) + self.font_border_dropdown.grid(row=4, column=0, sticky=NW) + self.font_border_width_dropdown.grid(row=5, column=0, sticky=NW) + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SINGLE VIDEO - SUPERIMPOSE TEXT", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN - SINGLE VIDEO", command=lambda: self.run(multiple=False)) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + multiple_videos_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="MULTIPLE VIDEOS - SUPERIMPOSE TEXT", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video_dir = FolderSelect(multiple_videos_frm, "VIDEO DIRECTORY PATH:", title="Select a video directory", lblwidth=25) + multiple_videos_run = Button(multiple_videos_frm, text="RUN - MULTIPLE VIDEOS", command=lambda: self.run(multiple=True)) + + multiple_videos_frm.grid(row=2, column=0, sticky="NW") + self.selected_video_dir.grid(row=0, column=0, sticky="NW") + multiple_videos_run.grid(row=1, column=0, sticky="NW") + self.main_frm.mainloop() + + def run(self, multiple: bool): + loc = self.location_dropdown.getChoices() + loc = self.LOCATIONS[loc] + text = self.text_eb.entry_get + check_str(name='text', value=text) + font_size = int(self.font_size_dropdown.getChoices()) + font_clr = self.font_color_dropdown.getChoices() + font_border_clr = self.font_border_dropdown.getChoices() + font_border_width = int(self.font_border_width_dropdown.getChoices()) + if not multiple: + data_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=data_path) + else: + data_path = self.selected_video_dir.folder_path + check_if_dir_exists(in_dir=data_path) + + threading.Thread(target=superimpose_freetext(video_path=data_path, + text=text, + font_size=font_size, + font_color=font_clr, + font_border_color=font_border_clr, + font_border_width=font_border_width, + position=loc)).start() + +class BoxBlurPopUp(PopUpMixin): + def __init__(self): + PopUpMixin.__init__(self, title="BOX BLUR VIDEOS") + settings_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="SETTINGS", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + blur_lvl = [round(x, 2) for x in list(np.arange(0.05, 1.0, 0.05))] + self.blur_lvl_dropdown = DropDownMenu(settings_frm, "BLUR LEVEL:", blur_lvl, labelwidth=25) + self.invert_dropdown = DropDownMenu(settings_frm, "INVERT BLUR REGION:", ['TRUE', 'FALSE'], labelwidth=25) + + self.blur_lvl_dropdown.setChoices(0.02) + self.invert_dropdown.setChoices('FALSE') + settings_frm.grid(row=0, column=0, sticky=NW) + self.blur_lvl_dropdown.grid(row=0, column=0, sticky=NW) + self.invert_dropdown.grid(row=1, column=0, sticky=NW) + + single_video_frm = CreateLabelFrameWithIcon(parent=self.main_frm, header="APPLY BOX-BLUR", icon_name=Keys.DOCUMENTATION.value, icon_link=Links.VIDEO_TOOLS.value) + self.selected_video = FileSelect(single_video_frm, "VIDEO PATH:", title="Select a video file", lblwidth=25, file_types=[("VIDEO FILE", Options.ALL_VIDEO_FORMAT_STR_OPTIONS.value)]) + single_video_run = Button(single_video_frm, text="RUN", command=lambda: self.run()) + + single_video_frm.grid(row=1, column=0, sticky="NW") + self.selected_video.grid(row=0, column=0, sticky="NW") + single_video_run.grid(row=1, column=0, sticky="NW") + + self.main_frm.mainloop() + + def run(self): + video_path = self.selected_video.file_path + check_file_exist_and_readable(file_path=video_path) + blur_lvl = float(self.blur_lvl_dropdown.getChoices()) + invert = str_2_bool(self.invert_dropdown.getChoices()) + threading.Thread(target=roi_blurbox(video_path=video_path, blur_level=blur_lvl, invert=invert)).start() + + +BoxBlurPopUp() + + + + + +# def print_video_info(video_path: Union[str, os.PathLike]) -> None: +# video_meta_data = get_video_meta_data(video_path=video_path, fps_as_int=False) +# for k, v in video_meta_data.items(): +# print(f'{k}: {v}') + + + +#print_video_info('/Users/simon/Desktop/Box2_IF19_7_20211109T173625_4_851_873_1_time_superimposed_video_name_superimposed.mp4') + diff --git a/simba/sandbox/superimpose_text.py b/simba/sandbox/superimpose_text.py new file mode 100644 index 000000000..de3769829 --- /dev/null +++ b/simba/sandbox/superimpose_text.py @@ -0,0 +1,111 @@ +from typing import Union, Optional +try: + from typing import Literal +except: + from typing_extensions import Literal +import os +import subprocess +from simba.utils.read_write import get_fn_ext, find_all_videos_in_directory, get_video_meta_data +from simba.video_processors.roi_selector import ROISelector +from simba.utils.checks import check_ffmpeg_available, check_float, check_if_dir_exists, check_file_exist_and_readable, check_str, check_int +from simba.utils.printing import SimbaTimer, stdout_success +from simba.utils.errors import InvalidInputError + + +def superimpose_video_names(video_path: Union[str, os.PathLike], + font_size: Optional[int] = 30, + font_color: Optional[str] = 'white', + font_border_color: Optional[str] = 'black', + font_border_width: Optional[int] = 2, + position: Optional[Literal['top_left', 'top_right', 'bottom_left', 'bottom_right', 'middle_top', 'middle_bottom']] = 'top_left', + save_dir: Optional[Union[str, os.PathLike]] = None) -> None: + + check_ffmpeg_available(raise_error=True) + timer = SimbaTimer(start=True) + POSITIONS = ['top_left', 'top_right', 'bottom_left', 'bottom_right', 'top_middle', 'bottom_middle'] + check_str(name=f'{superimpose_video_names.__name__} position', value=position, options=POSITIONS) + check_int(name=f'{superimpose_video_names.__name__} font_size', value=font_size, min_value=1) + check_int(name=f'{superimpose_video_names.__name__} font_border_width', value=font_border_width, min_value=1) + font_color = ''.join(filter(str.isalnum, font_color)).lower() + font_border_color = ''.join(filter(str.isalnum, font_border_color)).lower() + if os.path.isfile(video_path): + video_paths = [video_path] + elif os.path.isdir(video_path): + video_paths = list(find_all_videos_in_directory(directory=video_path, as_dict=True, raise_error=True).values()) + else: + raise InvalidInputError(msg=f'{video_path} is not a valid file path or a valid directory path', source=superimpose_video_names.__name__) + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir) + else: + save_dir = os.path.dirname(video_paths[0]) + for file_cnt, video_path in enumerate(video_paths): + _, video_name, ext = get_fn_ext(video_path) + print(f'Superimposing video name on {video_name} (Video {file_cnt + 1}/{len(video_paths)})...') + save_path = os.path.join(save_dir, f'{video_name}_video_name_superimposed{ext}') + if position == POSITIONS[0]: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={video_name}:x=5:y=5:fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + elif position == POSITIONS[1]: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={video_name}:x=(w-tw-5):y=5:fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + elif position == POSITIONS[2]: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={video_name}:x=5:y=(h-th-5):fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + elif position == POSITIONS[3]: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={video_name}:x=(w-tw-5):y=(h-th-5):fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + elif position == POSITIONS[4]: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={video_name}:x=(w-tw)/2:y=10:fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + else: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={video_name}:x=(w-tw)/2:y=(h-th-10):fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f'Super-imposed video name on {len(video_paths)} video(s), saved in {save_dir}', elapsed_time=timer.elapsed_time_str) + + + +def superimpose_freetext(video_path: Union[str, os.PathLike], + text: str, + font_size: Optional[int] = 30, + font_color: Optional[str] = 'white', + font_border_color: Optional[str] = 'black', + font_border_width: Optional[int] = 2, + position: Optional[Literal['top_left', 'top_right', 'bottom_left', 'bottom_right', 'middle_top', 'middle_bottom']] = 'top_left', + save_dir: Optional[Union[str, os.PathLike]] = None) -> None: + + check_ffmpeg_available(raise_error=True) + timer = SimbaTimer(start=True) + POSITIONS = ['top_left', 'top_right', 'bottom_left', 'bottom_right', 'top_middle', 'bottom_middle'] + check_str(name=f'{superimpose_freetext.__name__} position', value=position, options=POSITIONS) + check_int(name=f'{superimpose_freetext.__name__} font_size', value=font_size, min_value=1) + check_int(name=f'{superimpose_freetext.__name__} font_border_width', value=font_border_width, min_value=1) + check_str(name=f'{superimpose_freetext.__name__} text', value=text) + font_color = ''.join(filter(str.isalnum, font_color)).lower() + font_border_color = ''.join(filter(str.isalnum, font_border_color)).lower() + if os.path.isfile(video_path): + video_paths = [video_path] + elif os.path.isdir(video_path): + video_paths = list(find_all_videos_in_directory(directory=video_path, as_dict=True, raise_error=True).values()) + else: + raise InvalidInputError(msg=f'{video_path} is not a valid file path or a valid directory path', source=superimpose_video_names.__name__) + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir) + else: + save_dir = os.path.dirname(video_paths[0]) + for file_cnt, video_path in enumerate(video_paths): + _, video_name, ext = get_fn_ext(video_path) + print(f'Superimposing video name on {video_name} (Video {file_cnt + 1}/{len(video_paths)})...') + save_path = os.path.join(save_dir, f'{video_name}_text_superimposed{ext}') + if position == POSITIONS[0]: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={text}:x=5:y=5:fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + elif position == POSITIONS[1]: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={text}:x=(w-tw-5):y=5:fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + elif position == POSITIONS[2]: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={text}:x=5:y=(h-th-5):fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + elif position == POSITIONS[3]: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={text}:x=(w-tw-5):y=(h-th-5):fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + elif position == POSITIONS[4]: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={text}:x=(w-tw)/2:y=10:fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + else: + cmd = f"ffmpeg -i '{video_path}' -vf \"drawtext=fontfile=Arial.ttf:text={text}:x=(w-tw)/2:y=(h-th-10):fontsize={font_size}:fontcolor={font_color}:borderw={font_border_width}:bordercolor={font_border_color}\" -c:a copy '{save_path}' -loglevel error -stats -hide_banner -y" + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f'Super-imposed text on {len(video_paths)} video(s), saved in {save_dir}', elapsed_time=timer.elapsed_time_str) + +superimpose_freetext(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/open_field_below/project_folder/videos/raw_clip1_fps_5_progress_bar.mp4', position='top_left', text='DUCK') \ No newline at end of file diff --git a/simba/sandbox/superpixels.py b/simba/sandbox/superpixels.py new file mode 100644 index 000000000..d66459a8c --- /dev/null +++ b/simba/sandbox/superpixels.py @@ -0,0 +1,46 @@ +import cv2 +import matplotlib.pyplot as plt +import numpy as np + +from skimage.data import astronaut +from skimage.color import rgb2gray +from skimage.filters import sobel +from skimage.segmentation import felzenszwalb, slic, quickshift, watershed +from skimage.segmentation import mark_boundaries +from skimage.util import img_as_float + + +img = cv2.imread('/Users/simon/Desktop/test.png').astype(np.float32)/255.0 + +#img = img_as_float(astronaut()[::2, ::2]) + +segments_fz = felzenszwalb(img, scale=100, sigma=0.5, min_size=50) +segments_slic = slic(img, n_segments=250, compactness=10, sigma=1) +segments_quick = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5) +gradient = sobel(rgb2gray(img)) +segments_watershed = watershed(gradient, markers=250, compactness=0.001) + +# cv2.imshow('asasd', segments_watershed) +# cv2.waitKey(10000) + +print(f'Felzenszwalb number of segments: {len(np.unique(segments_fz))}') +print(f'SLIC number of segments: {len(np.unique(segments_slic))}') +print(f'Quickshift number of segments: {len(np.unique(segments_quick))}') +print(f'Watershed number of segments: {len(np.unique(segments_watershed))}') + +fig, ax = plt.subplots(2, 2, figsize=(10, 10), sharex=True, sharey=True) + +ax[0, 0].imshow(mark_boundaries(img, segments_fz)) +ax[0, 0].set_title("Felzenszwalbs's method") +ax[0, 1].imshow(mark_boundaries(img, segments_slic)) +ax[0, 1].set_title('SLIC') +ax[1, 0].imshow(mark_boundaries(img, segments_quick)) +ax[1, 0].set_title('Quickshift') +ax[1, 1].imshow(mark_boundaries(img, segments_watershed)) +ax[1, 1].set_title('Compact watershed') + +for a in ax.ravel(): + a.set_axis_off() + +plt.tight_layout() +plt.show() \ No newline at end of file diff --git a/simba/sandbox/total_variation_distance.py b/simba/sandbox/total_variation_distance.py new file mode 100644 index 000000000..b575e89eb --- /dev/null +++ b/simba/sandbox/total_variation_distance.py @@ -0,0 +1,43 @@ +from typing import Optional +try: + from typing import Literal +except: + from typing_extensions import Literal +import numpy as np + +from simba.utils.checks import check_str, check_valid_array +from simba.utils.enums import Options +from simba.mixins.statistics_mixin import Statistics +from simba.utils.data import bucket_data +from numba import jit + + + +def total_variation_distance(x: np.ndarray, y: np.ndarray, bucket_method: Optional[Literal["fd", "doane", "auto", "scott", "stone", "rice", "sturges", "sqrt"]] = "auto"): + """ + Calculate the total variation distance between two probability distributions. + + :param np.ndarray x: A 1-D array representing the first sample. + :param np.ndarray y: A 1-D array representing the second sample. + :param Optional[str] bucket_method: The method used to determine the number of bins for histogram computation. Supported methods are 'fd' (Freedman-Diaconis), 'doane', 'auto', 'scott', 'stone', 'rice', 'sturges', and 'sqrt'. Defaults to 'auto'. + :return float: The total variation distance between the two distributions. + + .. math:: + + TV(P, Q) = 0.5 \sum_i |P_i - Q_i| + + where :math:`P_i` and :math:`Q_i` are the probabilities assigned by the distributions :math:`P` and :math:`Q` + to the same event :math:`i`, respectively. + + :example: + >>> total_variation_distance(x=np.array([1, 5, 10, 20, 50]), y=np.array([1, 5, 10, 100, 110])) + >>> 0.3999999761581421 + """ + + check_valid_array(data=x, source=total_variation_distance.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, np.int8, np.float32, np.float64, int, float)) + check_valid_array(data=y, source=total_variation_distance.__name__, accepted_ndims=(1,), accepted_dtypes=(np.int64, np.int32, np.int8, np.float32, np.float64, int, float)) + check_str(name=f"{total_variation_distance.__name__} method", value=bucket_method, options=Options.BUCKET_METHODS.value) + bin_width, bin_count = bucket_data(data=x, method=bucket_method) + s1_h = Statistics._hist_1d(data=x, bin_count=bin_count, range=np.array([0, int(bin_width * bin_count)]), normalize=True) + s2_h = Statistics._hist_1d(data=y, bin_count=bin_count, range=np.array([0, int(bin_width * bin_count)]), normalize=True) + return 0.5 * np.sum(np.abs(s1_h - s2_h)) \ No newline at end of file diff --git a/simba/sandbox/train_yolo_2.py b/simba/sandbox/train_yolo_2.py new file mode 100644 index 000000000..9f6403059 --- /dev/null +++ b/simba/sandbox/train_yolo_2.py @@ -0,0 +1,72 @@ +import numpy as np +import os +from simba.third_party_label_appenders.converters import geometries_to_yolo +from simba.utils.read_write import find_files_of_filetypes_in_directory, read_df, get_fn_ext, find_video_of_file +from simba.mixins.geometry_mixin import GeometryMixin +from simba.bounding_box_tools.yolo.model import fit_yolo, inference_yolo +from simba.bounding_box_tools.yolo.visualize import YOLOVisualizer +from simba.third_party_label_appenders.converters import simba_rois_to_yolo, split_yolo_train_test_val + +TRAIN_DATA_DIR = r'/mnt/c/troubleshooting/mitra/project_folder/csv/yolo_train' +YOLO_SAVE_DIR = r"/mnt/c/troubleshooting/yolo_animal" +VIDEO_DIR = r'/mnt/c/troubleshooting/mitra/project_folder/videos' +BODY_PARTS_HULL = ['Nose_x', 'Nose_y', 'Tail_base_x', 'Tail_base_y', 'Left_side_x', 'Left_side_y', 'Right_side_x', 'Right_side_y'] + +# ######################### +# +# file_paths = find_files_of_filetypes_in_directory(directory=TRAIN_DATA_DIR, extensions=['.csv']) +# video_polygons = {} +# for file_path in file_paths: +# print(file_path) +# animal_data = read_df(file_path=file_path, file_type='csv', usecols=BODY_PARTS_HULL).values.reshape(-1, 4, 2).astype(np.int32)x +# animal_polygons = GeometryMixin().multiframe_bodyparts_to_polygon(data=animal_data) +# animal_polygons = GeometryMixin().multiframe_minimum_rotated_rectangle(shapes=animal_polygons) +# video_polygons[get_fn_ext(file_path)[1]] = GeometryMixin().geometries_to_exterior_keypoints(geometries=animal_polygons) +# +# ####################### +# +# for video_name, polygons in video_polygons.items(): +# polygons = {0: polygons} +# video_path = find_video_of_file(video_dir=VIDEO_DIR, filename=video_name) +# geometries_to_yolo(geometries=polygons, video_path=video_path, save_dir=YOLO_SAVE_DIR, sample=500, obb=True) +# +# +# ####################### +# YOLO_TRAINING_DIR = r"/mnt/c/troubleshooting/yolo_data_split_animal" # DIRECTORY WHERE WE SHOULD STORE THE SLIP DATA. +# split_yolo_train_test_val(data_dir=YOLO_SAVE_DIR, save_dir=YOLO_TRAINING_DIR, verbose=True) +# +# # +# # #NEXT, WE TRAIN A YOLO MODEL BASED ON THE SPLIT DATA +# INITIAL_WEIGHTS_PATH = r'/mnt/c/Users/sroni/Downloads/yolov8n.pt' #SOME INITIAL WEIGHTS TO START WITH, THEY CAN BE DOWNLOADED AT https://huggingface.co/Ultralytics +MODEL_SAVE_DIR = "/mnt/c/troubleshooting/yolo_mdl" #DIRECTORY WHERE TO SAVE THE TRAINED MODEL AND PERFORMANCE STATISTICS +# fit_yolo(initial_weights=INITIAL_WEIGHTS_PATH, model_yaml=os.path.join(YOLO_TRAINING_DIR, 'map.yaml'), save_path=MODEL_SAVE_DIR, epochs=25, batch=16, plots=True) + +#NEXT, WE USE THE TRAINED MODEL TO FIND THE ROIS IN A NEW VIDEO +INFERENCE_RESULTS = "/mnt/c/troubleshooting/yolo_results" #DIRECTORY WHERE TO STORE THE RESULTS IN CSV FORMAT +VIDEO_PATH = r'/mnt/c/troubleshooting/mitra/project_folder/videos/501_MA142_Gi_Saline_0513.mp4' #PATH TO VIDEO TO ANALYZE +inference_yolo(weights_path=os.path.join(MODEL_SAVE_DIR, 'train', 'weights', 'best.pt'), video_path=VIDEO_PATH, verbose=True, save_dir=INFERENCE_RESULTS, gpu=True, batch_size=16, interpolate=True) + + +# #FINALLY, WE VISUALIZE THE RESULTS TO CHECK THAT THE PREDICTIONS ARE ACCURATE +# DATA_PATH = r"/mnt/c/troubleshooting/yolo_results/501_MA142_Gi_CNO_0514_clipped.csv" #PATH TO ONE OF THE CSV FILES CREATED IN THE PRIOR STEP. +# VIDEO_PATH = r'/mnt/c/troubleshooting/mitra/project_folder/videos/clipped/501_MA142_Gi_CNO_0514_clipped.mp4' # PATH TO THE VIDEO REPRESENTING THE ``DATA_PATH`` FILE. +# SAVE_DIR = r"/mnt/c/troubleshooting/yolo_videos" # DIRECTORY WHERE TO SAVE THE YOLO VIDEO +# +# yolo_visualizer = YOLOVisualizer(data_path=DATA_PATH, video_path=VIDEO_PATH, save_dir=SAVE_DIR, palette='Accent', thickness=20, core_cnt=-1, verbose=False) +# yolo_visualizer.run() +# +# + + + +#geometries_to_yolo(geometries=animal_polygons, video_path=r'C:\troubleshooting\mitra\project_folder\videos\501_MA142_Gi_CNO_0514.mp4', save_dir=r"C:\troubleshooting\coco_data", sample=500, obb=True) + + + + + + + + + + diff --git a/simba/sandbox/train_yolo_rois.py b/simba/sandbox/train_yolo_rois.py new file mode 100644 index 000000000..3acda29d2 --- /dev/null +++ b/simba/sandbox/train_yolo_rois.py @@ -0,0 +1,40 @@ +import os +from simba.bounding_box_tools.yolo.model import fit_yolo, inference_yolo +from simba.bounding_box_tools.yolo.visualize import YOLOVisualizer +from simba.third_party_label_appenders.converters import simba_rois_to_yolo, split_yolo_train_test_val + + +# FIRST WE CREATE SOME YOLO FORMATED DATA LABELS AND EXTRACT THEIR ASSOCIATED IMAGES FROM THE VIDEOS AND SIMBA ROI DEFINITIONS +ROI_PATH = r"/mnt/c/troubleshooting/RAT_NOR/project_folder/logs/measures/ROI_definitions.h5" +VIDEO_DIR = r'/mnt/c/troubleshooting/RAT_NOR/project_folder/videos' #DIRECTORY HOLDING THE VIDEOS WITH DEFINED ROI'S +YOLO_SAVE_DIR = r"/mnt/c/troubleshooting/yolo_data" # DIRECTORY WHICH SHOULD STORE THE YOLO FORMATED DATA +VIDEO_FRAME_COUNT = 200 # THE NUMBER OF LABELS WE SHOULD CREATE FROM EACH VIDEO +GREYSCALE = False #IF THE VIDEOS ARE IN COLOR, CONVERT THE YOLO IMAGES TO GREYSCALE OR NOT + +#WE CREATE THE YOLO DATASET BASED ON THE ABOVE DEFINITIONS +simba_rois_to_yolo(roi_path=ROI_PATH, video_dir=VIDEO_DIR, save_dir=YOLO_SAVE_DIR, roi_frm_cnt=VIDEO_FRAME_COUNT, greyscale=GREYSCALE, verbose=True, obb=True) + + +#NEXT, WE SPLIT THE DATA CREATED IN THE PRIOR CELL INTO DATA FOR TRAINING, TESTING AND VALIDATION. +YOLO_TRAINING_DIR = r"/mnt/c/troubleshooting/yolo_data_split" # DIRECTORY WHERE WE SHOULD STORE THE SLIP DATA. +split_yolo_train_test_val(data_dir=YOLO_SAVE_DIR, save_dir=YOLO_TRAINING_DIR, verbose=True) + + +#NEXT, WE TRAIN A YOLO MODEL BASED ON THE SPLIT DATA +INITIAL_WEIGHTS_PATH = r'/mnt/c/troubleshooting/coco_data/weights/yolov8n-obb.pt' #SOME INITIAL WEIGHTS TO START WITH, THEY CAN BE DOWNLOADED AT https://huggingface.co/Ultralytics +MODEL_SAVE_DIR = "/mnt/c/troubleshooting/yolo_mdl" #DIRECTORY WHERE TO SAVE THE TRAINED MODEL AND PERFORMANCE STATISTICS +fit_yolo(initial_weights=INITIAL_WEIGHTS_PATH, model_yaml=os.path.join(YOLO_TRAINING_DIR, 'map.yaml'), save_path=MODEL_SAVE_DIR, epochs=25, batch=16, plots=True) + + +#NEXT, WE USE THE TRAINED MODEL TO FIND THE ROIS IN A NEW VIDEO +INFERENCE_RESULTS = "/mnt/c/troubleshooting/yolo_results" #DIRECTORY WHERE TO STORE THE RESULTS IN CSV FORMAT +VIDEO_PATH = r'/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/clipped/03152021_NOB_IOT_8_clipped.mp4' #PATH TO VIDEO TO ANALYZE +inference_yolo(weights_path=os.path.join(MODEL_SAVE_DIR, 'train', 'weights', 'best.pt'), video_path=VIDEO_PATH, verbose=True, save_dir=INFERENCE_RESULTS, gpu=False, batch_size=16) + +#FINALLY, WE VISUALIZE THE RESULTS TO CHECK THAT THE PREDICTIONS ARE ACCURATE +DATA_PATH = r"/mnt/c/troubleshooting/yolo_results/03152021_NOB_IOT_8_clipped.csv" #PATH TO ONE OF THE CSV FILES CREATED IN THE PRIOR STEP. +VIDEO_PATH = r'/mnt/c/troubleshooting/RAT_NOR/project_folder/videos/clipped/03152021_NOB_IOT_8_clipped.mp4' # PATH TO THE VIDEO REPRESENTING THE ``DATA_PATH`` FILE. +SAVE_DIR = r"/mnt/c/troubleshooting/yolo_videos" # DIRECTORY WHERE TO SAVE THE YOLO VIDEO + +yolo_visualizer = YOLOVisualizer(data_path=DATA_PATH, video_path=VIDEO_PATH, save_dir=SAVE_DIR, palette='Accent', thickness=20, core_cnt=-1, verbose=False) +yolo_visualizer.run() \ No newline at end of file diff --git a/simba/sandbox/two_fish_feature_extractor_040924.py b/simba/sandbox/two_fish_feature_extractor_040924.py new file mode 100644 index 000000000..4cbc34e8f --- /dev/null +++ b/simba/sandbox/two_fish_feature_extractor_040924.py @@ -0,0 +1,154 @@ +from __future__ import division + +import os.path +from copy import deepcopy +import numpy as np +import pandas as pd +from numba import typed + +from simba.data_processors.interpolation_smoothing import Interpolate +from simba.utils.checks import check_if_filepath_list_is_empty, check_all_file_names_are_represented_in_video_log +from simba.mixins.config_reader import ConfigReader +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin +from simba.mixins.feature_extraction_supplement_mixin import FeatureExtractionSupplemental +from simba.mixins.timeseries_features_mixin import TimeseriesFeatureMixin +from simba.mixins.circular_statistics import CircularStatisticsMixin +from simba.utils.printing import SimbaTimer, stdout_success +from simba.mixins.statistics_mixin import Statistics +from simba.utils.read_write import get_fn_ext, read_df, read_video_info, write_df +from simba.utils.lookups import cardinality_to_integer_lookup +from simba.feature_extractors.perimeter_jit import jitted_hull + + +ANIMAL_NAMES = ['Cleaner', 'Client'] +MID_BODYPARTS = ['BodyMid_1', 'BodyMid_2'] +MOUTH_BODYPARTS = ['HeadTerminalMouth_1', 'HeadTerminalMouth_2'] +HEAD_MID = ['HeadMid_1', 'HeadMid_2'] +ROLL_WINDOWS_VALUES_S = np.array([10, 5, 2, 1, 0.5, 0.25]) + +class TwoFishFeatureExtractor(ConfigReader, FeatureExtractionMixin): + """ + + :example: + >>> feature_extractor = TwoFishFeatureExtractor(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_fish/project_folder/project_config.ini') + >>> feature_extractor.run() + """ + + def __init__(self, config_path: str): + ConfigReader.__init__(self, config_path=config_path, read_video_info=True) + check_if_filepath_list_is_empty(filepaths=self.outlier_corrected_paths, error_msg=f'No data files found in {self.outlier_corrected_dir} directory') + check_all_file_names_are_represented_in_video_log(video_info_df=self.video_info_df, data_paths=self.outlier_corrected_paths) + _ = Interpolate(input_path=self.outlier_corrected_dir, config_path=self.config_path, method='Body-parts: Quadratic') + print(f"Extracting features from {len(self.outlier_corrected_paths)} file(s)...") + + def run(self): + for file_cnt, file_path in enumerate(self.outlier_corrected_paths): + self.video_timer = SimbaTimer(start=True) + _, self.file_name, _ = get_fn_ext(file_path) + self.data = read_df(file_path=file_path, file_type=self.file_type) + self.results = deepcopy(self.data) + video_info, self.px_per_mm, self.fps = read_video_info(self.video_info_df, self.file_name) + self.angular_dispersion_windows = [] + for i in ROLL_WINDOWS_VALUES_S: self.angular_dispersion_windows.append(int(self.fps * i)) + self.data.columns = self.bp_headers + self.save_path = os.path.join(self.features_dir, f'{self.file_name}.{self.file_type}') + + print('Compute X relative to Y movement...') + for animal_name, body_part_name in zip(ANIMAL_NAMES, MID_BODYPARTS): + bp_arr = self.data[[f'{animal_name}_{body_part_name}_x', f'{animal_name}_{body_part_name}_y']].values + d = FeatureExtractionSupplemental.rolling_horizontal_vs_vertical_movement(data=bp_arr, pixels_per_mm=self.px_per_mm, time_windows=ROLL_WINDOWS_VALUES_S, fps=self.fps) + df = pd.DataFrame(d, columns=[f'X_relative_to_Y_animal_{animal_name}_{x}' for x in ROLL_WINDOWS_VALUES_S]) + self.results = pd.concat([self.results, df], axis=1) + + print('Compute velocity and acceleration...') + for animal_name, body_part_name in zip(ANIMAL_NAMES, MID_BODYPARTS): + bp_df = self.data[[f'{animal_name}_{body_part_name}_x', f'{animal_name}_{body_part_name}_y']] + shifted_ = self.create_shifted_df(df=bp_df) + self.results[f'framewise_metric_movement_{animal_name}'] = self.framewise_euclidean_distance(location_1=shifted_.values[:, [0, 1]], location_2=shifted_.values[:, [2, 3]], px_per_mm=self.px_per_mm).astype(np.float32) + velocity = TimeseriesFeatureMixin.sliding_descriptive_statistics(data=self.results[f'framewise_metric_movement_{animal_name}'].values.astype(np.float32), window_sizes=ROLL_WINDOWS_VALUES_S, sample_rate=int(self.fps), statistics=typed.List(['sum'])).astype(np.float32) + df = pd.DataFrame(velocity[0], columns=[f'sliding_sum_movement_{animal_name}_{x}' for x in ROLL_WINDOWS_VALUES_S]) + self.results = pd.concat([self.results, df], axis=1) + self.results[f'acceleration_{animal_name}'] = TimeseriesFeatureMixin.acceleration(data=self.results[f'framewise_metric_movement_{animal_name}'].values.astype(np.float32), pixels_per_mm=self.px_per_mm, fps=int(self.fps)) + #SLOW self.results[f'distance_moved_autocorrelation_{animal_name}'] = Statistics.sliding_autocorrelation(data=self.results[f'framewise_metric_movement_{animal_name}'].values.astype(np.float32), max_lag=1.0, time_window=float(self.fps*2), fps=float(self.fps)) + + acceleration_correlation = Statistics.sliding_spearman_rank_correlation(sample_1=self.results[f'acceleration_{ANIMAL_NAMES[0]}'].values.astype(np.float32), sample_2=self.results[f'acceleration_{ANIMAL_NAMES[1]}'].values.astype(np.float32), time_windows=ROLL_WINDOWS_VALUES_S, fps=int(self.fps)) + df = pd.DataFrame(acceleration_correlation, columns=[f'animal_acceleration_correlations_{x}' for x in ROLL_WINDOWS_VALUES_S]) + self.results = pd.concat([self.results, df], axis=1) + movement_autocorrelation = TimeseriesFeatureMixin.sliding_two_signal_crosscorrelation(x=self.results[f'framewise_metric_movement_{ANIMAL_NAMES[0]}'].values.astype(np.float64), y=self.results[f'framewise_metric_movement_{ANIMAL_NAMES[1]}'].values.astype(np.float64), windows=ROLL_WINDOWS_VALUES_S, sample_rate=float(self.fps), normalize=True, lag=self.fps) + df = pd.DataFrame(movement_autocorrelation, columns=[f'sliding_movement_autocorrelation_{x}' for x in ROLL_WINDOWS_VALUES_S]) + self.results = pd.concat([self.results, df], axis=1) + + print('Compute circular statistics...') + for (animal, bp1, bp2) in zip(ANIMAL_NAMES, MOUTH_BODYPARTS, HEAD_MID): + bp_1 = self.data[[f'{animal}_{bp1}_x', f'{animal}_{bp1}_y']].values + bp_2 = self.data[[f'{animal}_{bp2}_x', f'{animal}_{bp2}_y']].values + angle = CircularStatisticsMixin.direction_two_bps(anterior_loc=bp_1, posterior_loc=bp_2).astype(np.float32) + self.results[f'direction_degrees_{animal}'] = angle + compass_direction = [cardinality_to_integer_lookup()[d] for d in CircularStatisticsMixin.degrees_to_cardinal(data=angle)] + self.results[f'compass_direction_{animal}'] = compass_direction + sliding_unique = TimeseriesFeatureMixin.sliding_unique(x=np.array(compass_direction).astype(np.int64), time_windows=ROLL_WINDOWS_VALUES_S, fps=int(self.fps)) + df = pd.DataFrame(sliding_unique, columns=[f'sliding_unique_compass_directions_{animal}_{x}' for x in ROLL_WINDOWS_VALUES_S]) + self.results = pd.concat([self.results, df], axis=1) + self.results[f'instantaneous_angular_velocity_{animal}'] = CircularStatisticsMixin.instantaneous_angular_velocity(data=angle, bin_size=1) + self.results[f'instantaneous_rotational_direction_{animal}'] = CircularStatisticsMixin.rotational_direction(data=angle, stride=1) + sliding_circular_correlation = CircularStatisticsMixin.sliding_circular_correlation(sample_1=self.results[f'direction_degrees_{ANIMAL_NAMES[0]}'].values.astype(np.float32), sample_2=self.results[f'direction_degrees_{ANIMAL_NAMES[1]}'].values.astype(np.float32), time_windows=ROLL_WINDOWS_VALUES_S, fps=float(self.fps)) + df = pd.DataFrame(sliding_circular_correlation, columns=[f'sliding_circular_correlation_{x}' for x in ROLL_WINDOWS_VALUES_S]) + self.results = pd.concat([self.results, df], axis=1) + for s in ROLL_WINDOWS_VALUES_S: + window = int(self.fps * s) + for animal in ANIMAL_NAMES: + c = f"animal_{animal}_instantaneous_rotational_direction_rolling_{window}_mean" + self.results[c] = self.results[f'instantaneous_rotational_direction_{animal}'].rolling(window, min_periods=1).median() + c = f"animal_{animal}_instantaneous_angular_velocity_{window}_mean" + self.results[c] = self.results[f'instantaneous_angular_velocity_{animal}'].rolling(window, min_periods=1).median() + + print('Computing pose probability scores...') + print('Computing animal sizes...') + for animal_name, animal_bps in self.animal_bp_dict.items(): + x_y = [v for p in zip(animal_bps['X_bps'], animal_bps['Y_bps']) for v in p] + p = self.data[animal_bps['P_bps']].values + ranges = FeatureExtractionMixin.count_values_in_range(data=p, ranges=np.array([[0.0, 0.1],[0.000000000, 0.5],[0.000000000, 0.75],[0.000000000, 0.95],[0.000000000, 0.99]])) + df = pd.DataFrame(ranges, columns=[f'pose_probability_score_{animal_name}_below_{x}' for x in [0.1, 0.5, 0.75, 0.95, 0.99]]) + self.results = pd.concat([self.results, df], axis=1) + animal_data = self.data[x_y].values.astype(np.float32).reshape(-1, len(animal_bps['Y_bps']), 2) + self.results[f'animal_{animal_name}_area'] = jitted_hull(points=animal_data, target='area') / self.px_per_mm + for s in ROLL_WINDOWS_VALUES_S: + window = int(self.fps * s) + for animal in ANIMAL_NAMES: + c = f"animal_{animal}_area_median_rolling_{window}" + self.results[c] = self.results[f'animal_{animal}_area'].rolling(window, min_periods=1).median() + + print('Computing animal distances...') + distances = pd.DataFrame() + animal_1_bps = [v for p in zip(self.animal_bp_dict[ANIMAL_NAMES[0]]['X_bps'], self.animal_bp_dict[ANIMAL_NAMES[0]]['Y_bps']) for v in p] + animal_2_bps = [v for p in zip(self.animal_bp_dict[ANIMAL_NAMES[1]]['X_bps'], self.animal_bp_dict[ANIMAL_NAMES[1]]['Y_bps']) for v in p] + for i in range(1, len(animal_1_bps), 2): + bp_1_name = animal_1_bps[i][:-2] + animal_1_data = self.results[animal_1_bps[i-1:i+1]].values + for j in range(1, len(animal_2_bps), 2): + bp_2_name = animal_2_bps[i][:-2] + animal_2_data = self.results[animal_2_bps[j - 1:j + 1]].values + animal_dist = self.framewise_euclidean_distance(location_1=animal_1_data, location_2=animal_2_data, px_per_mm=self.px_per_mm).astype(np.float32) + distances[f'{bp_1_name}_{bp_2_name}'] = animal_dist + self.results['animal_minimum_body_part_distance'] = distances.min(axis=1) + self.results['animal_maximum_body_part_distance'] = distances.max(axis=1) + self.results['animal_variance_body_part_distance'] = distances.var(axis=1) + self.results['animal_median_body_part_distance'] = distances.median(axis=1) + for s in ROLL_WINDOWS_VALUES_S: + window = int(self.fps * s) + c = f"animal_minimum_body_part_distance_rolling_{window}" + self.results[c] = self.results["animal_minimum_body_part_distance"].rolling(window, min_periods=1).min() + c = f"animal_median_body_part_distance_rolling_{window}" + self.results[c] = self.results["animal_median_body_part_distance"].rolling(window, min_periods=1).median() + self.save() + + self.timer.stop_timer() + stdout_success(msg=f'Feature extraction complete for {len(self.outlier_corrected_paths)} files.', elapsed_time=self.timer.elapsed_time_str) + + def save(self): + _ = write_df(df=self.results.fillna(-1.0), file_type=self.file_type, save_path=self.save_path) + self.video_timer.stop_timer() + print(f'Feature extraction complete video {self.file_name} (elapsed time: {self.video_timer.elapsed_time_str})') + +# feature_extractor = TwoFishFeatureExtractor(config_path='/Users/simon/Desktop/envs/simba/troubleshooting/two_fish/project_folder/project_config.ini') +# feature_extractor.run() diff --git a/simba/sandbox/two_fish_feature_extractor_040924.py.zip b/simba/sandbox/two_fish_feature_extractor_040924.py.zip new file mode 100644 index 0000000000000000000000000000000000000000..fdb4e7c5ff825719aea43be95be12086fccc0341 GIT binary patch literal 3607 zcmb`KXHXN|wuVFRy@oEm2qXqS2!epLAVH!cB1JkRC_P9qU?LLaLnWa~k=}wz2u)B7 zAWfu5Z=pyLLFq^ngo`ux%sF%J`EzFOTC->G{b#Q|@B8O@5ta;$f&kXj9=IB32l(I0 z1pxr^02u%})Zf_)6^wND@^nRq1bI4phM|L8-O>I*&LAxi^qiJPK==(CE&z*_m5>*s zwU8J6=?V$s0MIe+&;bCaF9QI>Vkm!_&hGsvCgOohxY-T2&-Yja44HKG`{?z(GVjqo zfLP7)-D?v|6%;y9#1rcJ)qKD4E5)n5Vn%Ajgmo$`u*d_h?DX|kxlWNuE6E1P)@Fu0 zlT=U;Way*mIsOY7vw?$?)nL8XF>kOIQZ4d>@)oOzPl>$DUo^Qm7y;5+@W1uV1UVE3CdSaNEZ&imWE zTDoi^!IV$z0}5qq>GYIO@ncPixLmgQ`eII-g0Ql>MMKp>**tN+APc$h@j}t~*D!1$ z+;)!Nwt}E$c|ZhW+}oK)UgeS3cXTz*f*5erfAqq^mYTi zM?c^@9DA;gKbM&Jx1SuwfiAhU<2mOe_N~V%WsnL|uL}EBU#_TuGpDo7Ih%0%;sf|e*zf)M9G;m5_1t5^ z?2aYh0Pw?j8uTGGYH#-z%RZ3xB=x0eTNs<7ok-WOmd9VllVTH8cwgDtdaQ6ibdq&ho34@qlDp#qu~GZf0|HlNhcu?;vbU5NzR*(v#752l%@Sc6DRpI&!ia zqV4*XT&|^O=`H>WRvZ+_zT4f$bfGi3TT`}tl(5$H*}77V&BB6gEdz;~qpQn)g5CaJ zW60fk4PHDy8Ln!bg>on*kaL-vK0LW*zRlWD*SrjTl%%?YZ7iN>*z}B=uNM(Ic1YPq z#ROT~-&?8JB|c%1VmvN0bSp9p3J8QO^Oq;vXpN*D`)pn2eQk8*&d<;KlPo8C zDbFu=Wj51hG)cq%O)2XYbw!iN=D<526-;q7HAD|1Q;AP9)LdR=LAzivFnnSW6ri4I zNVV(g(>99K^i&=6UT}tT2rV?_=FuggUJO&XxIWhZbs~_ILN(J1yvH8o-u-2X^ewz& zaN0O%snQl4U<3BNAV$Li+HkPd92 z1A7s!osyz7oJtWJ0dDoz?lU-276yhk5J(x^l#8uZ@!S~9oOd0oY9+a$7Y z&19G}ZRA~g^7IsUcBdTBxHV{t4xJFWf=_}cCdj9X4+a=H!i)7&C?>6ERe!J3e@J$F zo)K!ZC_>cjDO*dlJy>I;KQ`n;XuY;a+=ypl9nK9lZmeaS>RWAvQN@=cDjg%lg}*xP zmEsKpysMT~fTo0-OZKXprgOYg$hWiX=lJv-)S(|K&aAVEHh04dEl+metvze@``sap zzCch_7XazO5ATw(>smjh82U@t3(=S0r1mNdg2v+G8@D=PZm(KeP`cXFIu>-{`*GLB zoRA~Gnr(ojnfWlq1Y6IG8sJK)oK+H0LL;xY<<=}-bc zh!+}RumoWrvFO)H;*&XJ#w)5FcADndxgIEJl^`@ySXG_x(!wjNtG_ep@yLE8oo#-S zN|FDo1&noIBD0M9xyzD|89LM^dFn;Ee6MjizP2{i~;)QG80y7^K zIC|gwf#kU(9urcBLjP@u#e-kW8f-7a$YU3E&~f^#5^Lh#a=vnZMK8Y^RGesbqVY9B zXJUdcJ_@FaE3G)mal_1?k>AgGu&MvO8A9|y*fJX_Z(_8YZVTF^UY?T5lpcEh(}e#4V8jj7&NWZx~Kikr{H0lUQ_VcBqf zI<>gN&LLwOE$s|+3zt1k+Wvh&#zx!RlmzV|wO-R-2BQ|mlckS#wqcp%=9R1PeyNph1(Wwu3cS;s+zn^P>*~zicc0{XeRs1k zFE>X2*{ky}6WmuT_04=oEBhCHWy~>|+88a>UNSI|b_&no&BXWU|A!Ptv zJwWH_$_zu*Bz0!Wcp`Wo48kL;_c;JRzSl*53Nhw+EO!6xUX9Bdv%PMr1kV7n{yIXk zY!VDxt5lKV*KkxtVqIsz7Bc2|>`lQ`|A_9{QW!NZp_%$|dvNPVQbZg1 z#P;(A3;pWfu0e$3uTLK2L{t^UC+vD(^x2Dz5*Iy;M6T?8-O9aE@TDY$tN%Q*<$P)2 z(_Xq!rm)2uO4{@SJ01HoYa$IdCNVi?1s-Kb`?d~(7;p1VqlG)n?t56*BLZ~6Kx2F+ zlt=0W(w2@A77GPu*<6x9H8yJw=@)B+FIsm$RDmvIsD~$IY|4!4mDZXEGhh{N;=UKK zB6%!Zn0ukpr%YVSw<57Cy)*DP<~(6b_yzUO_(zXe+|2dkppL zdP9lbj5_2JS|e@tDd4{JC?#$Ca>|@Swz6v6Du>BnEmbVE7hhvL z{gOsfj(&vpm*gu+rb(n_rEFV0bb3Dt-*g+WelSQYW5A^B*_?17ELm7rt)yx8^CiJ3Ct$8Czes)6{VOuOXiK|3bXGi;%mwi<`@a72I}%$M!J-OHUAG zP~sH_gFtuyN+yT$6sP<}l%Jo`6&IpMH)o2x{5)??ij(uG^PoN=j)@6Mil~q4gz`q} z8h(hw83|cv)X0MsDDwTPw9&RI`SaR(ay81suaynnh!$Stfx$qa$Ff5rsfxl1izyz9 zMf63~&2r3sM9Iu-i1zN@T@?YMVz2a)3`s_*s;;%k<&1j+Ho(ln!ibVWjf(KHFheCu z^|?iIYm}0Kd1BA|I1$Q2+Mo(h^#i1pgYx(g>{nikVretDu2{N zSklo8GW;_W{j27GGSNS(5clWwUyJ)664Jj<{MX<7@$w&4h Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: + + """ + Helper to aggregate features to bout-level representations. + + :param pd.DataFrame data: DataFrame with features, pose, and classifications. + :param List[str] clfs: Names of classifier columns + :param feature_names: Names of feature columns + :param feature_names: Names of pose columns + :param Optional[Literal['MEAN', 'MEDIAN']] aggregator: Aggregation type, e.g., 'MEAN', 'MEDIAN'. Default 'MEAN'. + :param Optional[int] min_bout_length: The length of the shortest allowed bout in milliseconds. Default 0 which means all bouts. + :param pd.DataFrame sample_rate: The sample rate at which the data was collected. + :return: Three dataframe tuple: the aggregate feature values, metadata associated with the bout (i.e., classification probability), and the pose associated with the bout. + :rtype: Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame] + """ + + timer = SimbaTimer(start=True) + check_valid_lst(source='video_bout_aggregator feature_names', valid_dtypes=(str,), min_len=1, data=feature_names) + check_valid_lst(source='video_bout_aggregator pose_names', valid_dtypes=(str,), min_len=1, data=pose_names) + if isinstance(clf_names, list): + check_valid_lst(source='video_bout_aggregator clf_names', valid_dtypes=(str,), min_len=1, data=clf_names) + else: + check_str(name='video_bout_aggregator clf_names', value=clf_names) + clf_names = [clf_names] + check_float(name='video_bout_aggregator sample_rate', value=sample_rate, min_value=10e-6) + if min_bout_length is not None: + check_float(name='video_bout_aggregator min_bout_length', value=min_bout_length, min_value=0) + else: + min_bout_length = 0 + check_str(name='video_bout_aggregator agg_method', value=agg_method, options=Methods.AGG_METHODS.value) + name = get_fn_ext(filepath=data)[1] + if verbose: + print(f"Calculating bout aggregate statistics for video {name}...") + data = read_df(file_path=data, file_type='csv') + p_cols = [f'Probability_{x}' for x in clf_names] + check_valid_dataframe(df=data, source='video_bout_aggregator data', valid_dtypes=Formats.NUMERIC_DTYPES.value, required_fields=clf_names + feature_names + p_cols + pose_names) + bouts = detect_bouts(data_df=data, target_lst=clf_names, fps=sample_rate).sort_values(by="Start_frame") + feature_results, meta_results, pose_results = [], [], [] + for clf_cnt, clf_name in enumerate(clf_names): + clf_bouts = bouts[bouts['Event'] == clf_name].reset_index(drop=True) + if len(bouts) > 0: + clf_bouts = clf_bouts[clf_bouts["Bout_time"] >= (min_bout_length / 1000)][["Start_frame", "End_frame"]].values + for clf_bout_id, (start_idx, end_idx) in enumerate(zip(clf_bouts[:, 0], clf_bouts[:, 1])): + clf_bout = data.iloc[start_idx: end_idx+1] + if agg_method == "mean": + clf_agg_df = pd.DataFrame(clf_bout[feature_names].mean()).T + clf_meta_df = pd.DataFrame([clf_bout[f"Probability_{clf_name}"].mean()], columns=['PROBABILITY']) + else: + clf_agg_df = pd.DataFrame(clf_bout[feature_names].median()).T + clf_meta_df = pd.DataFrame([clf_bout[f"Probability_{clf_name}"].median()], columns=['PROBABILITY']) + for df in (clf_agg_df, clf_meta_df): + df.insert(0, "BOUT_ID", clf_bout_id) + df.insert(0, "CLASSIFIER", clf_name) + df.insert(0, "BOUT_END_FRAME", end_idx) + df.insert(0, "BOUT_START_FRAME", start_idx) + df.insert(0, "VIDEO", name) + feature_results.append(clf_agg_df) + meta_results.append(clf_meta_df) + + feature_results = pd.concat(feature_results, axis=0).reset_index(drop=True) + meta_results = pd.concat(meta_results, axis=0).reset_index(drop=True) + pose_results = data[pose_names] + pose_results.insert(0, "VIDEO", name) + feature_results = feature_results.reset_index(drop=True).set_index(['BOUT_ID', "CLASSIFIER", "BOUT_END_FRAME", "BOUT_START_FRAME", "VIDEO"]) + meta_results = meta_results.reset_index(drop=True).set_index(['BOUT_ID', "CLASSIFIER", "BOUT_END_FRAME", "BOUT_START_FRAME", "VIDEO"]) + timer.stop_timer() + if verbose: + print(f'Bout statistics for video {name} complete. Elapsed time: {timer.elapsed_time_str}s') + return (feature_results, meta_results, pose_results) + +def directory_clf_bout_aggregator(dir: Union[str, os.PathLike], + save_path: Union[str, os.PathLike], + feature_names: List[str], + clf_names: Union[str, List[str]], + pose_names: List[str], + video_info: Union[str, os.PathLike, pd.DataFrame], + min_bout_length: Optional[float] = None, + core_cnt: int = -1, + verbose: bool = True, + agg_method: Literal["mean", "median"] = "mean") -> None: + + """ + Aggregate classifier bout-level data from multiple files in a directory. + + :param Union[str, os.PathLike] dir: Path to the directory containing CSV files with machine learning classifier results. + :param Union[str, os.PathLike] save_path: Path to save the aggregated data as a pickle file. + :param feature_names: List of feature names to include in the aggregation. + :param clf_names: Classifier names to include in the aggregation. Can be a single string or a list of strings. + :param List[str] pose_names: Pose-estimation body-part column names to include in the aggregation. + :param video_info: Path to a CSV file or a DataFrame containing metadata about the videos. This file is used to match classifier results with video (i.e., FPS/sample rate). + :param Optional[float] min_bout_length: Minimum length (in seconds) of a bout to be included in the aggregation. If None, no filtering is applied. + :param int core_cnt: Number of CPU cores to use for parallel processing. Use -1 to automatically select the maximum available cores. + :param bool verbose: If True, outputs progress and status messages during processing. + :param Literal["mean", "median"] agg_method: ggregation method to use for summarizing bout data. Options are "mean" or "median". + :return: None. The aggregated data is saved to the specified save_path as a pickle file. + :rtype: None + + :example: + >>> feature_names = list(pd.read_csv(r"C:\troubleshooting\nastacia_unsupervised\feature_names.csv", usecols=['FEATURE_NAMES']).values[:, 0]) + >>> clf_names = ['Attack', 'Escape', 'Defensive', 'anogenital_prediction', 'face', 'body'] + >>> bp_names = list(pd.read_csv(r"C:\troubleshooting\nastacia_unsupervised\bp_names.csv", usecols=['BP_NAMES']).values[:, 0]) + >>> directory_clf_bout_aggregator(dir=r'C:\troubleshooting\nastacia_unsupervised\machine_results\machine_results', feature_names=feature_names, clf_names=clf_names, pose_names=bp_names, video_info=r"C:\troubleshooting\nastacia_unsupervised\video_info.csv", save_path=r"C:\troubleshooting\nastacia_unsupervised\datasets\data.pickle") + """ + + timer = SimbaTimer(start=True) + check_valid_lst(source='video_bout_aggregator feature_names', valid_dtypes=(str,), min_len=1, data=feature_names) + check_valid_lst(source='video_bout_aggregator pose_names', valid_dtypes=(str,), min_len=1, data=pose_names) + if isinstance(clf_names, list): + check_valid_lst(source='video_bout_aggregator clf_names', valid_dtypes=(str,), min_len=1, data=clf_names) + else: + check_str(name='video_bout_aggregator clf_names', value=clf_names) + clf_names = [clf_names] + if min_bout_length is not None: + check_float(name='video_bout_aggregator min_bout_length', value=min_bout_length, min_value=0) + else: + min_bout_length = 0 + check_str(name='video_bout_aggregator agg_method', value=agg_method, options=Methods.AGG_METHODS.value) + if isinstance(video_info, str): video_info = read_video_info_csv(file_path=video_info) + check_if_dir_exists(in_dir=os.path.dirname(save_path)) + check_int(name=f'{directory_clf_bout_aggregator.__name__} core_cnt', value=core_cnt, min_value=-1, unaccepted_vals=[0]) + check_valid_boolean(value=[verbose], source='video_bout_aggregator verbose') + check_valid_lst(source='video_bout_aggregator pose_names', valid_dtypes=(str,), min_len=1, data=pose_names) + core_cnt = [find_core_cnt()[0] if core_cnt -1 or core_cnt > find_core_cnt()[0] else find_core_cnt()[0]][0] + file_paths = find_files_of_filetypes_in_directory(directory=dir, extensions=['.csv'], raise_error=True) + check_all_file_names_are_represented_in_video_log(video_info_df=video_info, data_paths=file_paths) + file_names = [get_fn_ext(filepath=x)[1] for x in file_paths] + sample_rates = [read_video_info(video_name=x, video_info_df=video_info)[2] for x in file_names] + results = Parallel(n_jobs=core_cnt, verbose=1, backend="loky")(delayed(video_clf_bout_aggregator)(data=i, + feature_names=feature_names, + clf_names=clf_names, + pose_names=pose_names, + sample_rate=j, + min_bout_length=min_bout_length, + verbose=verbose, + agg_method=agg_method) for i, j in zip(file_paths, sample_rates)) + get_reusable_executor().shutdown(wait=True) + feature_results = pd.concat([x[0] for x in results], axis=0) + metadata_results = pd.concat([x[1] for x in results], axis=0) + pose_results = pd.concat([x[2] for x in results], axis=0) + write_pickle(data={'POSE': pose_results, 'FEATURES': feature_results, 'META': metadata_results}, save_path=save_path) + timer.stop_timer() + stdout_success(msg=f"Data saved at {save_path}", elapsed_time=timer.elapsed_time_str) + + + +# name: str, +# feature_names: List[str], +# clf_names: Union[str, List[str]], +# pose_names: Union[str, List[str]], +# sample_rate: float, +# min_bout_length: Optional[float] = None, +# agg_method: Literal["mean", "median"] = "mean") -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: + +# feature_names = list(pd.read_csv(r"C:\troubleshooting\nastacia_unsupervised\feature_names.csv", usecols=['FEATURE_NAMES']).values[:, 0]) +# clf_names = ['Attack', 'Escape', 'Defensive', 'anogenital_prediction', 'face', 'body'] +# bp_names = list(pd.read_csv(r"C:\troubleshooting\nastacia_unsupervised\bp_names.csv", usecols=['BP_NAMES']).values[:, 0]) +# # #video_bout_aggregator(data=r"C:\troubleshooting\nastacia_unsupervised\machine_results\machine_results\Box2_IF19_7_20211109T173625_4.csv", name='Box2_IF19_7_20211109T173625_4', feature_names=feature_names, clf_names=clf_names, sample_rate=25, pose_names=bp_names) +# # # +# # # +# directory_clf_bout_aggregator(dir=r'C:\troubleshooting\nastacia_unsupervised\machine_results\machine_results', +# feature_names=feature_names, +# clf_names=clf_names, +# pose_names=bp_names, +# video_info=r"C:\troubleshooting\nastacia_unsupervised\video_info.csv", +# save_path=r"C:\troubleshooting\nastacia_unsupervised\datasets\data.pickle") +# +# +# +# + + + # + # + # + # + # + # + # + # + # + # + # + # + # + # check_instance( + # source=f"{bout_aggregator.__name__} data", + # instance=data, + # accepted_types=(pd.DataFrame,), + # ) + # check_instance( + # source=f"{bout_aggregator.__name__} clfs", instance=clfs, accepted_types=(list,) + # ) + # check_valid_lst( + # data=clfs, + # source=f"{bout_aggregator.__name__} clfs", + # valid_dtypes=(str,), + # min_len=1, + # ) + # check_instance( + # source=f"{bout_aggregator.__name__} video_info", + # instance=video_info, + # accepted_types=(pd.DataFrame,), + # ) + # check_int( + # name=f"{bout_aggregator.__name__} min_bout_length", + # value=min_bout_length, + # min_value=0, + # ) + # check_str( + # name=f"{bout_aggregator.__name__} aggregator", + # value=aggregator, + # options=("MEAN", "MEDIAN"), + # ) + # + # def bout_aggregator_mp(frms, data, clf_name): + # bout_df = data.iloc[frms[0] : frms[1] + 1] + # bout_video, start_frm, end_frm = ( + # bout_df["VIDEO"].values[0], + # bout_df["FRAME"].values[0], + # bout_df["FRAME"].values[-1], + # ) + # if aggregator == "MEAN": + # agg_df = pd.DataFrame(bout_df[feature_names].mean()).T + # agg_df["PROBABILITY"] = bout_df[f"Probability_{clf_name}"].mean() + # elif aggregator == "MEDIAN": + # agg_df = pd.DataFrame(bout_df[feature_names].median()).T + # agg_df["PROBABILITY"] = bout_df[f"Probability_{clf_name}"].median() + # agg_df["CLASSIFIER"] = clf_name + # agg_df.insert(0, "END_FRAME", end_frm) + # agg_df.insert(0, "START_FRAME", start_frm) + # agg_df.insert(0, "VIDEO", bout_video) + # return agg_df + # + # output = [] + # for cnt, video in enumerate(data["VIDEO"].unique()): + # print( + # f'Processing video {video} ({str(cnt+1)}/{str(len(data["VIDEO"].unique()))})...' + # ) + # video_df = data[data["VIDEO"] == video].reset_index(drop=True) + # for clf in clfs: + # _, _, fps = read_video_info(vid_info_df=video_info, video_name=video) + # bouts = detect_bouts( + # data_df=video_df, target_lst=[clf], fps=fps + # ).sort_values(by="Start_frame") + # bouts = bouts[bouts["Bout_time"] >= min_bout_length / 1000][ + # ["Start_frame", "End_frame"] + # ].values + # if len(bouts) > 0: + # bouts = [x.tolist() for x in bouts] + # results = Parallel(n_jobs=core_cnt, verbose=0, backend="loky")( + # delayed(bout_aggregator_mp)(j, video_df, clf) for j in bouts + # ) + # results = pd.concat(results, axis=0).sort_values( + # by=["VIDEO", "START_FRAME"] + # ) + # output.append(results) + # get_reusable_executor().shutdown(wait=True) + # timer.stop_timer() + # stdout_success( + # msg="Bout aggregation statistics complete!", elapsed_time=timer.elapsed_time_str + # ) + # return pd.concat(output, axis=0).reset_index(drop=True) diff --git a/simba/sandbox/unsupervised/cluster_frequentist_stats.py b/simba/sandbox/unsupervised/cluster_frequentist_stats.py new file mode 100644 index 000000000..9dda544fc --- /dev/null +++ b/simba/sandbox/unsupervised/cluster_frequentist_stats.py @@ -0,0 +1,121 @@ +from typing import Union +import os +from copy import deepcopy +import pandas as pd +import numpy as np +import itertools +from simba.mixins.train_model_mixin import TrainModelMixin +from simba.utils.checks import check_valid_boolean, check_file_exist_and_readable, check_if_keys_exist_in_dict +from simba.utils.read_write import find_files_of_filetypes_in_directory, read_pickle, get_unique_values_in_iterable +from simba.utils.errors import InvalidInputError +from simba.utils.enums import UML +from simba.utils.printing import SimbaTimer +from scipy.stats import f_oneway, kruskal +from statsmodels.stats.multicomp import pairwise_tukeyhsd +from statsmodels.stats.libqsturng import psturng + + + +class ClusterFrequentistCalculator(): + """ + Class for computing frequentist statitics based on cluster assignment labels for explainability purposes. + + :param Union[str, os.PathLike] data_path: path to pickle or directory of pickles holding unsupervised results in ``simba.unsupervised.data_map.yaml`` format. + :param dict settings: Dict holding which statistical tests to use, with test name as keys and booleans as values. + + :example: + """ + def __init__(self, + data_path: Union[str, os.PathLike], + save_path: Union[str, os.PathLike], + scaled: bool = True, + anova: bool = True, + kruskal_wallis: bool = True, + tukey: bool = True, + descriptive: bool = True, + pairwise: bool = True): + + + check_valid_boolean(value=[scaled, anova, tukey, descriptive, pairwise], source=self.__class__.__name__) + if len(list({scaled, anova, tukey, descriptive, kruskal_wallis})) == 1 and not list({scaled, anova, tukey, descriptive, kruskal_wallis})[0]: + raise InvalidInputError(msg='No statistical tests chosen', source=self.__class__.__name__) + if os.path.isdir(data_path): + self.data_paths = find_files_of_filetypes_in_directory(directory=data_path, extensions=['.pickle', '.pkl'], raise_error=True) + else: + check_file_exist_and_readable(file_path=data_path) + self.data_paths = [data_path] + self.scaled, self.anova, self.tukey, self.descriptive, self.pairwise, self.kruskal = scaled, anova, tukey, descriptive, pairwise, kruskal_wallis + with pd.ExcelWriter(save_path, mode="w") as writer: + pd.DataFrame().to_excel(writer, sheet_name=" ", index=True) + + + def run(self): + for file_cnt, file_path in enumerate(self.data_paths): + data = read_pickle(data_path=file_path) + check_if_keys_exist_in_dict(data=data, key=[UML.CLUSTER_MODEL.value, UML.DATA.value]) + name = data[UML.CLUSTER_MODEL.value][UML.HASHED_NAME.value] + if not self.scaled: + features = data[UML.DATA.value][UML.UNSCALED_TRAIN_DATA.value].reset_index(drop=True) + else: + features = data[UML.DATA.value][UML.SCALED_TRAIN_DATA.value].reset_index(drop=True) + lbls = data[UML.CLUSTER_MODEL.value][UML.MODEL.value].labels_ + cluster_cnt = get_unique_values_in_iterable(data=lbls, name=name, min=2) + unique_cluster_lbls = np.unique(lbls) + if self.pairwise: + cluster_comb = list(itertools.combinations(unique_cluster_lbls, 2)) + cluster_comb = [(x[0], (x[1],)) for x in cluster_comb] + else: + cluster_comb= [(x, tuple(y for y in unique_cluster_lbls if y != x)) for x in unique_cluster_lbls] + anova_results, kruskal_results, tukey_results = [], [], [] + for target, base in cluster_comb: + target_X = features.loc[np.argwhere(lbls == target).flatten()].values + non_target_X = features.loc[np.argwhere(np.isin(lbls, base)).flatten()].values + if self.anova: + anova_df = pd.DataFrame(features.columns, columns=['FEATURE']) + anova_df[['GROUP_1', 'GROUP_2']] = target, str(base) + anova_df['F-STATISTIC'], anova_df['P-VALUE'] = f_oneway(target_X, non_target_X) + anova_results.append(anova_df) + if self.kruskal: + kruskal_df = pd.DataFrame(features.columns, columns=['FEATURE']) + kruskal_df[['GROUP_1', 'GROUP_2']] = target, str(base) + kruskal_df['STATISTIC'], kruskal_df['P-VALUE'] = kruskal(target_X, non_target_X) + kruskal_results.append(kruskal_df) + if self.tukey: + tukey_df = deepcopy(features) + tukey_df['lbls'] = lbls + for x in features.columns: + data = pairwise_tukeyhsd(tukey_df[x], tukey_df['lbls']) + df = pd.DataFrame(data=data._results_table.data[1:], columns=data._results_table.data[0]) + df['P-VALUE'] = psturng(np.abs(data.meandiffs / data.std_pairs), len(data.groupsunique), data.df_total) + df['FEATURE'] = x + tukey_results.append(tukey_df) + if self.descriptive: + for cluster_id in unique_cluster_lbls: + target_X = features.loc[np.argwhere(lbls == target).flatten()].values + + + + + #print() + #anova_df['F-STATISTIC', 'P-VALUE'] = f_vals, p_vals + + + + + #= pair + + + # + # + # if self.settings[ANOVA]: + # self.__one_way_anovas() + + + + + + +calculator = ClusterFrequentistCalculator(data_path=r"C:\troubleshooting\nastacia_unsupervised\cluster_data\dreamy_moser.pickle", scaled=True, save_path=r"C:\troubleshooting\nastacia_unsupervised\cluster_statistics\stats.xlsx", pairwise=False) +calculator.run() + + diff --git a/simba/sandbox/unsupervised/simba_hdbscan.py b/simba/sandbox/unsupervised/simba_hdbscan.py new file mode 100644 index 000000000..383e6eb40 --- /dev/null +++ b/simba/sandbox/unsupervised/simba_hdbscan.py @@ -0,0 +1,105 @@ +import os +import itertools +import datetime +import numpy as np +from typing import Union, Tuple +from simba.utils.checks import check_if_dir_exists, check_valid_tuple +from simba.utils.read_write import find_files_of_filetypes_in_directory, read_pickle, check_if_keys_exist_in_dict, check_valid_array, check_int, check_float, check_valid_boolean, write_pickle +from simba.utils.enums import UML, Formats +from simba.utils.lookups import get_model_names +from simba.mixins.unsupervised_mixin import UMLMixin +from simba.utils.printing import SimbaTimer, stdout_success +import random + +class SimBAHdbscan(): + + def __init__(self): + self.datetime = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + self.mdl_names = get_model_names() + + + def fit(self, + data: np.ndarray, + alpha: float, + min_cluster_size: int, + min_samples: int, + cluster_selection_epsilon: float, + gpu: bool = False, + verbose: bool = True): + + check_valid_array(data=data, source=f'{self.__class__.__name__} data', accepted_ndims=(2,), accepted_dtypes=Formats.NUMERIC_DTYPES.value) + check_int(name=f'{self.__class__.__name__} min_cluster_size', value=min_cluster_size, min_value=1) + check_int(name=f'{self.__class__.__name__} min_samples', value=min_samples, min_value=1) + check_float(name=f'{self.__class__.__name__} alpha', value=alpha, min_value=0.0) + check_float(name=f'{self.__class__.__name__} cluster_selection_epsilon', value=cluster_selection_epsilon, min_value=0.0) + check_valid_boolean(value=[gpu], source=f'{self.__class__.__name__} gpu') + check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose') + mdl = UMLMixin.hdbscan_define(alpha=alpha, min_samples=min_samples, cluster_selection_epsilon=cluster_selection_epsilon, min_cluster_size=min_cluster_size, verbose=verbose, gpu=gpu) + return UMLMixin.hdbscan_fit(mdl=mdl, data=data) + + def fit_grid(self, + data_path: Union[str, os.PathLike], + save_dir: Union[str, os.PathLike], + alpha: Tuple[float, ...], + min_cluster_size: Tuple[int, ...], + min_samples: Tuple[int, ...], + cluster_selection_epsilon: Tuple[float, ...], + gpu: bool = False, + verbose: bool = True): + + """ + + :param data_path: + :param save_dir: + :param alpha: + :param min_cluster_size: + :param min_samples: + :param cluster_selection_epsilon: + :param gpu: + :param verbose: + :return: + + :example: + >>> alpha = (1.0,) + >>> min_cluster_size = (15,) + >>> min_samples = (1,) + >>> cluster_selection_epsilon = (1.0, 0.5) + >>> clusterer = SimBAHdbscan() + >>> clusterer.fit_grid(data_path=r'C:\troubleshooting\nastacia_unsupervised\embedding_data', save_dir=r"C:\troubleshooting\nastacia_unsupervised\cluster_data", alpha=alpha, min_cluster_size=min_cluster_size, min_samples=min_samples, cluster_selection_epsilon=cluster_selection_epsilon) + """ + + timer = SimbaTimer(start=True) + if os.path.isdir(data_path): + file_paths = find_files_of_filetypes_in_directory(directory=data_path, extensions=['.pkl', '.pickle'], raise_error=True) + else: + file_paths = [data_path] + check_if_dir_exists(in_dir=save_dir, source=self.__class__.__name__) + check_valid_tuple(x=alpha, source=f'{self.__class__.__name__} alpha', valid_dtypes=(float,), minimum_length=1) + check_valid_tuple(x=min_cluster_size, source=f'{self.__class__.__name__} min_cluster_size', valid_dtypes=(int,), minimum_length=1) + check_valid_tuple(x=min_samples, source=f'{self.__class__.__name__} min_samples', valid_dtypes=(int,), minimum_length=1) + check_valid_tuple(x=cluster_selection_epsilon, source=f'{self.__class__.__name__} cluster_selection_epsilon', valid_dtypes=(float,), minimum_length=1) + search_spaces = list(itertools.product(*[alpha, min_cluster_size, min_samples, cluster_selection_epsilon])) + for file_path in file_paths: + data = read_pickle(data_path=file_path) + check_if_keys_exist_in_dict(data=data, key=UML.DR_MODEL.value) + embedding = data[UML.DR_MODEL.value][UML.MODEL.value].embedding_ + for search_space in search_spaces: + mdl_name = random.sample(self.mdl_names, 1)[0] + save_path = os.path.join(save_dir, f'{mdl_name}.pickle') + mdl = self.fit(data=embedding, alpha=search_space[0], min_cluster_size=search_space[1], min_samples=search_space[2], cluster_selection_epsilon=search_space[3], gpu=gpu, verbose=verbose) + data[UML.CLUSTER_MODEL.value] = {UML.HASHED_NAME.value: mdl_name, + UML.PARAMETERS.value: {UML.ALPHA.value: search_space[0], UML.MIN_CLUSTER_SIZE.value: search_space[1], UML.MIN_SAMPLES.value:search_space[1], UML.EPSILON.value: search_space[3]}, + UML.MODEL.value: mdl} + write_pickle(data=data, save_path=save_path) + timer.stop_timer() + if verbose: + stdout_success(msg=f'{len(search_spaces)*len(file_paths)} model(s) saved in {save_dir}',elapsed_time=timer.elapsed_time_str) + +# alpha = (1.0,) +# min_cluster_size = (15,) +# min_samples = (1,) +# cluster_selection_epsilon = (1.0, 0.5) +# +# clusterer = SimBAHdbscan() +# clusterer.fit_grid(data_path=r'C:\troubleshooting\nastacia_unsupervised\embedding_data', save_dir=r"C:\troubleshooting\nastacia_unsupervised\cluster_data", alpha=alpha, min_cluster_size=min_cluster_size, min_samples=min_samples, cluster_selection_epsilon=cluster_selection_epsilon) +# diff --git a/simba/sandbox/unsupervised/simba_umap.py b/simba/sandbox/unsupervised/simba_umap.py new file mode 100644 index 000000000..76ef7cc80 --- /dev/null +++ b/simba/sandbox/unsupervised/simba_umap.py @@ -0,0 +1,130 @@ +import datetime +import os +from copy import deepcopy +import random +import pandas as pd +import numpy as np +from simba.utils.checks import check_file_exist_and_readable, check_valid_dict, check_float, check_int,check_str, check_if_keys_exist_in_dict, check_valid_boolean +from simba.utils.read_write import read_pickle, drop_df_fields, write_pickle +from simba.utils.lookups import get_model_names +from simba.utils.enums import Options, UML +from typing import Union, Optional +import itertools +from numba import typed +try: + from typing import Literal +except: + from typing_extensions import Literal +from simba.mixins.train_model_mixin import TrainModelMixin +from simba.mixins.unsupervised_mixin import UMLMixin +from simba.utils.printing import stdout_success, SimbaTimer + +class SimBAUmap(): + + def __init__(self): + self.datetime = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + self.mdl_names = get_model_names() + + def fit(self, + data: Union[np.ndarray, pd.DataFrame], + n_neighbors: int, + min_distance: float, + spread: float, + gpu: bool = False, + verbose: bool = True): + + check_int(name=f'{self.__class__.__name__} n_neighbors', value=n_neighbors, min_value=1) + check_float(name=f'{self.__class__.__name__} min_distance', value=min_distance, min_value=0.0) + check_float(name=f'{self.__class__.__name__} spread', value=spread, min_value=min_distance) + check_valid_boolean(value=[gpu], source=f'{self.__class__.__name__} gpu') + check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose') + verbose = [1 if verbose else 0][0] + mdl = UMLMixin.umap_define(n_neighbors=n_neighbors, min_distance=min_distance, spread=spread, gpu=gpu, verbose=verbose) + return UMLMixin.umap_fit(mdl=mdl, data=data) + + def fit_grid(self, + data_path: Union[str, os.PathLike], + save_dir: [str, os.PathLike], + hyperparameters: dict, + gpu: bool = False, + verbose: bool = True, + scaler: Literal['min-max', 'standard', 'quantile'] = 'min-max', + variance_threshold: Optional[float] = None, + multicollinearity_threshold: Optional[float] = None): + + """ + + :param data_path: + :param save_dir: + :param hyperparameters: + :param gpu: + :param verbose: + :param scaler: + :param variance_threshold: + :param multicollinearity_threshold: + :return: None + + :example: + >>> hyperparameters = {"n_neighbors": (5,), "min_distance": (0.1, 0.5, 0.0), "spread": (1.0,)} + >>> variance_threshold = 0.05 + >>> multicollinearity = 0.9999 + >>> scaler = "min-max" + >>> embedder = SimBAUmap() + >>> embedder.fit_grid(data_path=r"C:\troubleshooting\nastacia_unsupervised\datasets\data.pickle", save_dir=r"C:\troubleshooting\nastacia_unsupervised\embedding_data", hyperparameters=hyperparameters, gpu=False, scaler=scaler, variance_threshold=variance_threshold, multicollinearity_threshold=multicollinearity, verbose=True) + """ + + timer = SimbaTimer(start=True) + check_file_exist_and_readable(file_path=data_path) + check_valid_dict(x=hyperparameters, valid_key_dtypes=(str,), valid_values_dtypes=(tuple,), required_keys=UML.FIT_KEYS.value) + check_str(name=f'{self.__class__.__name__} scaler', value=scaler.upper(), options=Options.SCALER_OPTIONS.value) + check_valid_boolean(value=[gpu], source=f'{self.__class__.__name__} gpu') + check_valid_boolean(value=[verbose], source=f'{self.__class__.__name__} verbose') + data = read_pickle(data_path=data_path) + check_if_keys_exist_in_dict(data=data, key=['FEATURES']) + X_data = deepcopy(data['FEATURES']) + + low_variance_fields, collinear_fields = None, None + if variance_threshold is not None: + check_float(name=f'{self.__class__.__name__} variance_threshold', value=variance_threshold, min_value=0, max_value=1.0) + low_variance_fields = TrainModelMixin.find_low_variance_fields(data=X_data, variance_threshold=variance_threshold) + X_data = drop_df_fields(data=X_data, fields=low_variance_fields) + + + if multicollinearity_threshold is not None: + check_float(name=f'{self.__class__.__name__} multicollinearity_threshold', value=multicollinearity_threshold, min_value=0, max_value=1.0) + collinear_fields = TrainModelMixin.find_highly_correlated_fields(data=X_data.values, threshold=multicollinearity_threshold, field_names=typed.List(data['FEATURES'].columns)) + X_data = drop_df_fields(data=X_data, fields=collinear_fields) + + scaler = TrainModelMixin.define_scaler(scaler_name=scaler) + scaler = TrainModelMixin.fit_scaler(scaler=scaler, data=X_data) + X_data_scaled = TrainModelMixin.scaler_transform(data=X_data, scaler=scaler) + search_spaces = list(itertools.product(*[hyperparameters['n_neighbors'], hyperparameters['min_distance'], hyperparameters['spread']])) + for search_space in search_spaces: + mdl_name = random.sample(self.mdl_names, 1)[0] + save_path = os.path.join(save_dir, f'{mdl_name}.pickle') + mdl = self.fit(data=X_data_scaled, n_neighbors=search_space[0], min_distance=search_space[1], spread=search_space[2], gpu=gpu, verbose=verbose) + mdl_lk = {UML.DR_MODEL.value: {UML.HASHED_NAME.value: mdl_name, + UML.PARAMETERS.value: {UML.N_NEIGHBORS.value: search_space[0], UML.MIN_DISTANCE.value: search_space[1], UML.SPREAD.value: search_space[2]}, + UML.MODEL.value: mdl}, + UML.METHODS.value: {UML.SCALER.value: scaler, + UML.SCALER_TYPE.value: type(scaler), + UML.MULTICOLLINEARITY_THRESHOLD.value: multicollinearity_threshold, + UML.VARIANCE_THRESHOLD.value: variance_threshold}, + UML.DATA.value: {UML.COLLINEAR_FIELDS.value: collinear_fields, + UML.LOW_VARIANCE_FIELDS.value: low_variance_fields, + UML.RAW.value: data, + UML.SCALED_TRAIN_DATA.value: X_data_scaled, + UML.UNSCALED_TRAIN_DATA.value: X_data}} + write_pickle(data=mdl_lk, save_path=save_path) + timer.stop_timer() + if verbose: + stdout_success(msg=f'{len(search_spaces)} model(s) saved in {save_dir}', elapsed_time=timer.elapsed_time_str) + + + +# hyperparameters = {"n_neighbors": (5,), "min_distance": (0.1, 0.5, 0.0), "spread": (1.0,)} +# variance_threshold = 0.05 +# multicollinearity = 0.9999 +# scaler = "min-max" +# embedder = SimBAUmap() +# embedder.fit_grid(data_path=r"C:\troubleshooting\nastacia_unsupervised\datasets\data.pickle", save_dir=r"C:\troubleshooting\nastacia_unsupervised\embedding_data", hyperparameters=hyperparameters, gpu=False, scaler=scaler, variance_threshold=variance_threshold, multicollinearity_threshold=multicollinearity, verbose=True) diff --git a/simba/sandbox/unsupervised_lof.py b/simba/sandbox/unsupervised_lof.py new file mode 100644 index 000000000..d61599a31 --- /dev/null +++ b/simba/sandbox/unsupervised_lof.py @@ -0,0 +1,48 @@ +from typing import List, Union +import numpy as np +from sklearn.neighbors import LocalOutlierFactor + +import pandas as pd +from simba.utils.checks import check_valid_lst, check_valid_array +from simba.utils.errors import InvalidInputError + + +def embedding_local_outliers(data: List[np.ndarray], k: Union[int, float] = 5, contamination: float = 1e-10): + check_valid_lst(data=data, source=embedding_local_outliers.__name__, valid_dtypes=(np.ndarray,), min_len=1) + for i in data: check_valid_array(data=i, source=embedding_local_outliers.__name__, accepted_ndims=(2,)) + if not isinstance(k, (int, float)): + raise InvalidInputError(msg=f'k is invalid dtype. Found {type(k)}, accepted: int, float', source=embedding_local_outliers.__name__) + for i in data: + if isinstance(k, float): + K = int(i.shape[0] * k) + else: + K = k + if K > i.shape[0]: + K = i.shape[0] + lof_model = LocalOutlierFactor(n_neighbors=K, contamination=contamination) + _ = lof_model.fit_predict(i) + results = -lof_model.negative_outlier_factor_.astype(np.float32) + print(results) + + + + + + + + + +x = np.random.random(size=(500, 2)) +y = np.array([[99, 100]]).reshape(-1, 2) + +x = np.vstack([x, y]) +embedding_local_outliers(data=[x], k=200) + + + + +# +# class LOF(): +# def __init__(self): +# pass + diff --git a/simba/sandbox/unsupervised_outliers.py b/simba/sandbox/unsupervised_outliers.py new file mode 100644 index 000000000..7c7c3b1e6 --- /dev/null +++ b/simba/sandbox/unsupervised_outliers.py @@ -0,0 +1,329 @@ +from typing import Union, Optional +import numpy as np +import pandas as pd +from sklearn.covariance import EllipticEnvelope +from itertools import combinations + +from simba.utils.checks import check_valid_array, check_float, check_int +from sklearn.neighbors import LocalOutlierFactor +from sklearn.datasets import make_blobs +from simba.mixins.plotting_mixin import PlottingMixin +from simba.mixins.feature_extraction_mixin import FeatureExtractionMixin + +def local_outlier_factor(data: np.ndarray, + k: Union[int, float] = 5, + contamination: Optional[float] = 1e-10, + normalize: Optional[bool] = False, + groupby_idx: Optional[int] = None) -> np.ndarray: + """ + Compute the local outlier factor of each observation. + + .. note:: + The final LOF scores are negated. Thus, higher values indicate more atypical (outlier) data points. Values + Method calls ``sklearn.neighbors.LocalOutlierFactor`` directly. Attempted to use own jit compiled implementation, + but runtime was 3x-ish slower than ``sklearn.neighbors.LocalOutlierFactor``. + + If groupby_idx is not None, then the index 1 of ``data`` array for which to group the data and compute LOF within each segment/cluster. + E.g., can be field holding cluster identifier. Thus, outliers are computed within each segment/cluster, ensuring that other segments cannot affect + outlier scores within each analyzing each cluster. + + If groupby_idx is provided, then all observations with cluster/segment variable ``-1`` will be treated as unclustered and assigned the max outlier score found withiin the clustered observations. + + .. image:: _static/img/local_outlier_factor.png + :width: 700 + :align: center + + :param ndarray data: 2D array with feature values where rows represent frames and columns represent features. + :param Union[int, float] k: Number of neighbors to evaluate for each observation. If the value is a float, then interpreted as the ratio of data.shape[0]. If the value is an integer, then it represent the number of neighbours to evaluate. + :param Optional[float] contamination: Small pseudonumber to avoid DivisionByZero error. + :param Optional[bool] normalize: Whether to normalize the distances between 0 and 1. Defaults to False. + :param Optional[int] groupby_idx: If int, then the index 1 of ``data`` for which to group the data and compute LOF on each segment. E.g., can be field holding a cluster identifier. + :returns np.ndarray: Array of size data.shape[0] with local outlier scores. + + :example: + >>> data, lbls = make_blobs(n_samples=2000, n_features=2, centers=10, random_state=42) + >>> data = np.hstack((data, lbls.reshape(-1, 1))) + >>> lof = local_outlier_factor(data=data, groupby_idx=2, k=100, normalize=True) + >>> results = np.hstack((data[:, 0:2], lof.reshape(lof.shape[0], 1))) + >>> PlottingMixin.continuous_scatter(data=results, palette='seismic', bg_clr='lightgrey',size=30) + + """ + def get_lof(data, k, contamination): + check_float(name=f"{local_outlier_factor.__name__} k", value=k) + if isinstance(k, int): + k = min(k, data.shape[0]) + elif isinstance(k, float): + k = int(data.shape[0] * k) + lof_model = LocalOutlierFactor(n_neighbors=k, contamination=contamination) + _ = lof_model.fit_predict(data) + y = -lof_model.negative_outlier_factor_.astype(np.float32) + if normalize: + return (y - np.min(y)) / (np.max(y) - np.min(y)) + else: + return y + + if groupby_idx is not None: + check_int(name=f'{local_outlier_factor.__name__} groupby_idx', value=groupby_idx, min_value=0, max_value=data.shape[1]-1) + check_valid_array(source=f"{local_outlier_factor.__name__} local_outlier_factor", data=data, accepted_sizes=[2], min_axis_1=3) + else: + check_valid_array(source=f"{local_outlier_factor.__name__} data", data=data, accepted_sizes=[2], min_axis_1=2) + check_float(name=f"{local_outlier_factor.__name__} contamination", value=contamination, min_value=0.0) + + if groupby_idx is None: + return get_lof(data, k, contamination) + else: + results = [] + data_w_idx = np.hstack((np.arange(0, data.shape[0]).reshape(-1, 1), data)) + unique_c = np.unique(data[:, groupby_idx]).astype(np.float32) + if -1.0 in unique_c: + unique_c = unique_c[np.where(unique_c != -1)] + unclustered_idx = np.argwhere(data[:, groupby_idx] == -1.0).flatten() + unclustered = data_w_idx[unclustered_idx] + data_w_idx = np.delete(data_w_idx, unclustered_idx, axis=0) + else: + unclustered = None + for i in unique_c: + s_data = data_w_idx[np.argwhere(data_w_idx[:, groupby_idx+1] == i)].reshape(-1, data_w_idx.shape[1]) + idx = s_data[:, 0].reshape(s_data.shape[0], 1) + s_data = np.delete(s_data, [0, groupby_idx+1], 1) + lof = get_lof(s_data, k, contamination).reshape(s_data.shape[0], 1) + results.append(np.hstack((idx, lof))) + x = np.concatenate(results, axis=0) + if unclustered is not None: + max_lof = np.full((unclustered.shape[0], 1), np.max(x[:, -1])) + unclustered = np.hstack((unclustered, max_lof))[:, [0, -1]] + x = np.vstack((x, unclustered)) + return x[np.argsort(x[:, 0])][:, -1] + + +def elliptic_envelope(data: np.ndarray, + contamination: Optional[float] = 1e-1, + normalize: Optional[bool] = False, + groupby_idx: Optional[int] = None) -> np.ndarray: + """ + Compute the Mahalanobis distances of each observation in the input array using Elliptic Envelope method. + + .. image:: _static/img/EllipticEnvelope.png + :width: 700 + :align: center + + .. image:: _static/img/elliptic_envelope.png + :width: 700 + :align: center + + :param data: Input data array of shape (n_samples, n_features). + :param Optional[float] contamination: The proportion of outliers to be assumed in the data. Defaults to 0.1. + :param Optional[bool] normalize: Whether to normalize the Mahalanobis distances between 0 and 1. Defaults to True. + :return np.ndarray: The Mahalanobis distances of each observation in array. Larger values indicate outliers. + + :example: + >>> data, lbls = make_blobs(n_samples=2000, n_features=2, centers=1, random_state=42) + >>> envelope_score = elliptic_envelope(data=data, normalize=True) + >>> results = np.hstack((data[:, 0:2], envelope_score.reshape(lof.shape[0], 1))) + >>> results = pd.DataFrame(results, columns=['X', 'Y', 'ENVELOPE SCORE']) + >>> PlottingMixin.continuous_scatter(data=results, palette='seismic', bg_clr='lightgrey', columns=['X', 'Y', 'ENVELOPE SCORE'],size=30) + + """ + + def get_envelope(data, contamination) -> np.ndarray: + mdl = EllipticEnvelope(contamination=contamination).fit(data) + y = -mdl.score_samples(data) + if normalize: + y = (y - np.min(y)) / (np.max(y) - np.min(y)) + return y + + if groupby_idx is not None: + check_int(name=f'{elliptic_envelope.__name__} groupby_idx', value=groupby_idx, min_value=0, max_value=data.shape[1]-1) + check_valid_array(source=f"{elliptic_envelope.__name__} local_outlier_factor", data=data, accepted_sizes=[2], min_axis_1=3) + else: + check_valid_array(source=f"{elliptic_envelope.__name__} data", data=data, accepted_sizes=[2], min_axis_1=2) + check_float(name=f"{elliptic_envelope.__name__} contamination", value=contamination, min_value=0.0, max_value=1.0) + + if groupby_idx is None: + return get_envelope(data, contamination) + else: + results = [] + data_w_idx = np.hstack((np.arange(0, data.shape[0]).reshape(-1, 1), data)) + unique_c = np.unique(data[:, groupby_idx]).astype(np.float32) + if -1.0 in unique_c: + unique_c = unique_c[np.where(unique_c != -1)] + unclustered_idx = np.argwhere(data[:, groupby_idx] == -1.0).flatten() + unclustered = data_w_idx[unclustered_idx] + data_w_idx = np.delete(data_w_idx, unclustered_idx, axis=0) + else: + unclustered = None + for i in unique_c: + s_data = data_w_idx[np.argwhere(data_w_idx[:, groupby_idx+1] == i)].reshape(-1, data_w_idx.shape[1]) + idx = s_data[:, 0].reshape(s_data.shape[0], 1) + s_data = np.delete(s_data, [0, groupby_idx+1], 1) + lof = get_envelope(s_data, contamination).reshape(s_data.shape[0], 1) + results.append(np.hstack((idx, lof))) + x = np.concatenate(results, axis=0) + if unclustered is not None: + max_lof = np.full((unclustered.shape[0], 1), np.max(x[:, -1])) + unclustered = np.hstack((unclustered, max_lof))[:, [0, -1]] + x = np.vstack((x, unclustered)) + return x[np.argsort(x[:, 0])][:, -1] + + + +def angle_based_od(data: np.ndarray, + k: Union[int, float] = 5, + groupby_idx: Optional[int] = None, + normalize: Optional[bool] = False) -> np.ndarray: + """ + + :param data: + :param k: + :return: + Adopted from https://pyod.readthedocs.io/en/latest/_modules/pyod/models/abod.html#ABOD + """ + + def _wcos(x: np.ndarray, nn_s: np.ndarray): + nn_pair = list(combinations(list(range(nn_s.shape[0])), 2)) + + + # #wcos = np.full((nn_s.shape[0], nn_s.shape[0]), 0.0) + w = [] + for l in nn_pair: + a = nn_s[l[0]] - x + b = nn_s[l[1]] - x + val = np.dot(a, b) / (np.linalg.norm(a, 2) ** 2) / (np.linalg.norm(b, 2) ** 2) + w.append(val) + # for g in range(i + 1, nn_s.shape[0]): + # if (np.array_equal(nn_s[p], x)) or (np.array_equal(nn_s[g], x)): + # continue + # else: + # a = nn_s[p] - x + # b = nn_s[g] - x + # val = np.dot(a, b) / (np.linalg.norm(a, 2) ** 2) / (np.linalg.norm(b, 2) ** 2) + # w.append(val) + #print(w, nn_s.shape, x.shape, x) + #print(w) + return np.var(w) + + if groupby_idx is not None: + check_int(name=f'{angle_based_od.__name__} groupby_idx', value=groupby_idx, min_value=0, max_value=data.shape[1]-1) + check_valid_array(source=f"{angle_based_od.__name__} local_outlier_factor", data=data, accepted_sizes=[2], min_axis_1=3) + else: + check_valid_array(source=f"{angle_based_od.__name__} data", data=data, accepted_sizes=[2], min_axis_1=2) + check_float(name=f"{local_outlier_factor.__name__} k", value=k) + if groupby_idx is None: + if isinstance(k, int): + k = min(k, data.shape[0]-1) + elif isinstance(k, float): + k = int((data.shape[0]-1) * k) + distances = FeatureExtractionMixin.cdist(array_1=data, array_2=data) + #print(distances) + results = np.full((data.shape[0],), np.nan) + for i in range(distances.shape[0]): + idx = np.argsort(distances[i])[1:k+1] + nn_s = data[idx, :] + #print(nn_s) + results[i] = _wcos(data[i], nn_s) + #print(results[i]) + if normalize: + return (results - np.min(results)) / (np.max(results) - np.min(results)) + + else: + return results + # else: + # results = [] + # data_w_idx = np.hstack((np.arange(0, data.shape[0]).reshape(-1, 1), data)) + # unique_c = np.unique(data[:, groupby_idx]).astype(np.float32) + # if -1.0 in unique_c: + # unique_c = unique_c[np.where(unique_c != -1)] + # unclustered_idx = np.argwhere(data[:, groupby_idx] == -1.0).flatten() + # unclustered = data_w_idx[unclustered_idx] + # data_w_idx = np.delete(data_w_idx, unclustered_idx, axis=0) + # else: + # unclustered = None + # for i in unique_c: + # c_data = data_w_idx[np.argwhere(data_w_idx[:, groupby_idx + 1] == i)].reshape(-1, data_w_idx.shape[1]) + # c_data_idx = c_data[:, 0].reshape(c_data.shape[0], 1) + # c_data = np.delete(c_data, [0, groupby_idx + 1], 1) + # print(c_data) + # distances = FeatureExtractionMixin.cdist(array_1=c_data, array_2=c_data) + # c_results = np.full((c_data.shape[0], 1), np.nan) + # c_results_idx = np.full((c_data.shape[0], 1), np.nan) + # for j in range(distances.shape[0]): + # if isinstance(k, int): + # c_k = min(k, c_data.shape[0]-1) + # elif isinstance(k, float): + # c_k = int((c_data.shape[0] - 1) * k) + # if c_k < 1: + # c_k = 1 + # nn_idx = np.argsort(distances[j])[1:c_k + 1] + # nn_s = c_data[nn_idx, :] + # c_results_idx[j] = c_data_idx[j] + # c_results[j] = _wcos(c_data[j], nn_s) + # if normalize: + # c_results = (c_results - np.min(c_results)) / (np.max(c_results) - np.min(c_results)) + # results.append(np.hstack((c_results_idx, c_results))) + # results = np.concatenate(results, axis=0) + # if unclustered is not None: + # max_angle_od = np.full((unclustered.shape[0], 1), np.max(x[:, -1])) + # unclustered = np.hstack((unclustered, max_angle_od))[:, [0, -1]] + # results = np.vstack((results, unclustered)) + # return results[np.argsort(results[:, 0])][:, -1] + # # + + + + # lof = get_envelope(s_data, contamination).reshape(s_data.shape[0], 1) + # results.append(np.hstack((idx, lof))) + + + #else: + #return (wcos - np.min(wcos)) / (np.max(wcos) - np.min(wcos)) +# data, lbls = make_blobs(n_samples=1000, n_features=2, centers=2, random_state=42) +# #data = np.hstack((data, lbls.reshape(-1, 1))) +# +# score = angle_based_od(data=data, k=100, normalize=True) +# #print(score) +# results = np.hstack((data[:, 0:2], score.reshape(score.shape[0], 1))) +# #print(results) +# PlottingMixin.continuous_scatter(data=results, palette='seismic', bg_clr='lightgrey', size=10) + +# centroid_distances = FeatureExtractionMixin.cdist( +# array_1=centroids.astype(np.float32), array_2=centroids.astype(np.float32) +# ) +# + + +# +# +# +# +# data, lbls = make_blobs(n_samples=2000, n_features=2, centers=1, random_state=42) +# #data = np.hstack((data, lbls.reshape(-1, 1))) +# #outliers = np.random.rand(50, 2) * 20 - 10 +# #outliers = np.hstack((outliers, np.full((outliers.shape[0], 1), -1.0))) +# #data = np.concatenate([data, outliers]) +# lof = local_outlier_factor(data=data, groupby_idx=2, k=100, normalize=True) +# results = np.hstack((data[:, 0:2], lof.reshape(lof.shape[0], 1))) +# results = pd.DataFrame(results, columns=['X', 'Y', 'LOF']) +# PlottingMixin.continuous_scatter(data=results, palette='seismic', bg_clr='lightgrey', columns=['X', 'Y', 'LOF'],size=30) +# +# data, lbls = make_blobs(n_samples=2000, n_features=2, centers=1, random_state=42) +# envelope_score = elliptic_envelope(data=data, normalize=True) +# results = np.hstack((data[:, 0:2], envelope_score.reshape(lof.shape[0], 1))) +# results = pd.DataFrame(results, columns=['X', 'Y', 'ENVELOPE SCORE']) +# PlottingMixin.continuous_scatter(data=results, palette='seismic', bg_clr='lightgrey', columns=['X', 'Y', 'ENVELOPE SCORE'],size=30) +# +# +# +# + + + +# data = np.random.normal(loc=45, scale=1, size=(100, 2)).astype(np.float32) +# for i in range(5): data = np.vstack([data, np.random.normal(loc=45, scale=1, size=(100, 2)).astype(np.float32)]) +# for i in range(2): data = np.vstack([data, np.random.normal(loc=90, scale=1, size=(100, 2)).astype(np.float32)]) +# c = np.random.randint(-1, 5, (800,)).reshape(-1, 1) +# data = np.hstack((data, c)) +# local_outlier_factor(data=data, k=5, groupby_idx=2) +# +# + +#[1.004, 1.007, 0.986, 1.018, 0.986, 0.996, 24.067, 24.057] \ No newline at end of file diff --git a/simba/sandbox/vertical_video_concatenator.py b/simba/sandbox/vertical_video_concatenator.py new file mode 100644 index 000000000..27a96d84a --- /dev/null +++ b/simba/sandbox/vertical_video_concatenator.py @@ -0,0 +1,66 @@ +import os +from typing import List, Union, Optional +from simba.utils.read_write import get_video_meta_data +from simba.utils.checks import check_valid_lst, check_if_dir_exists, check_int, check_ffmpeg_available, check_nvidea_gpu_available +from simba.utils.errors import InvalidInputError, FFMPEGCodecGPUError +from simba.utils.printing import SimbaTimer +import subprocess + +def vertical_video_concatenator(video_paths: List[Union[str, os.PathLike]], + save_path: Union[str, os.PathLike], + width_px: Optional[int] = None, + width_idx: Optional[int] = None, + gpu: Optional[bool] = False, + verbose: Optional[bool] = True) -> None: + + """ + Concatenates multiple videos vertically. + + :param List[Union[str, os.PathLike]] video_paths: List of input video file paths. + :param Union[str, os.PathLike] save_path: File path to save the concatenated video. + :param Optional[int] width_px: Width of the output video in pixels. + :param Optional[int] width_idx: Index of the video to use for determining width. + :param Optional[bool] gpu: Whether to use GPU-accelerated codec (default: False). + :param Optional[bool] verbose:Whether to print progress messages (default: True). + :raises FFMPEGCodecGPUError: If GPU is requested but not available. + :raises InvalidInputError: If both or neither width_px and width_idx are provided. + + :example: + >>> video_paths = ['/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat7_8(2).mp4', + >>> '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat11_12.mp4', + >>> '/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/08102021_DOT_Rat11_12_1.mp4'] + >>> _ = vertical_video_concatenator(video_paths=video_paths, width_idx=1, save_path='/Users/simon/Desktop/envs/simba/troubleshooting/RAT_NOR/project_folder/videos/test/new/08102021_DOT_Rat7_8(2)_.mp4', gpu=False) + + + """ + + check_ffmpeg_available() + if gpu and not check_nvidea_gpu_available(): raise FFMPEGCodecGPUError(msg="NVIDIA GPU not available", source=vertical_video_concatenator.__name__) + timer = SimbaTimer(start=True) + check_valid_lst(data=video_paths, source=vertical_video_concatenator.__name__, min_len=2) + check_if_dir_exists(in_dir=os.path.dirname(save_path), source=vertical_video_concatenator.__name__) + + if ((width_px is None) and (width_idx is None)) or ((width_px is not None) and (width_idx is not None)): + raise InvalidInputError(msg='Provide a width_px OR width_idx', source=vertical_video_concatenator.__name__) + if width_idx is not None: + check_int(name=f'{vertical_video_concatenator.__name__} width index', value=width_idx, min_value=1, max_value=len(video_paths)-1) + video_meta_data = [get_video_meta_data(video_path=video_path) for video_path in video_paths] + width = int(video_meta_data[width_idx]['width']) + else: + check_int(name=f'{vertical_video_concatenator.__name__} width', value=width_px, min_value=1) + width = int(width_px) + video_path_str = " ".join([f'-i "{path}"' for path in video_paths]) + codec = 'h264_nvenc' if gpu else 'libx264' + filter_complex = ";".join([f"[{idx}:v]scale={width}:-1[v{idx}]" for idx in range(len(video_paths))]) + filter_complex += f";{''.join([f'[v{idx}]' for idx in range(len(video_paths))])}" + filter_complex += f"vstack=inputs={len(video_paths)}[v]" + if verbose: + print(f'Concatenating {len(video_paths)} video(s) vertically with a {width} pixel width...') + cmd = f'ffmpeg {video_path_str} -filter_complex "{filter_complex}" -map "[v]" -c:v {codec} -loglevel error -stats "{save_path}" -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + if verbose: + print(f'Vertical concatenation complete. Saved at {save_path} (Elapsed time: {timer.elapsed_time_str}s.)') + + + diff --git a/simba/sandbox/violin.py b/simba/sandbox/violin.py new file mode 100644 index 000000000..e19117244 --- /dev/null +++ b/simba/sandbox/violin.py @@ -0,0 +1,38 @@ +from typing import Union, Optional +try: + from typing import Literal +except: + from typing_extensions import Literal + +import seaborn as sns +import os +import matplotlib.pyplot as plt +import pandas as pd +from simba.utils.lookups import get_named_colors +from simba.utils.printing import stdout_success + + + +def violin_plot(data: pd.DataFrame, + x: str, + y: str, + save_path: Union[str, os.PathLike], + font_rotation: Optional[int] = 45, + font_size: Optional[int] = 10, + img_size: Optional[tuple] = (13.7, 8.27), + cut: Optional[int] = 0, + scale: Optional[Literal["area", "count", "width"]] = "area"): + + named_colors = get_named_colors() + palette = {} + for cnt, violin in enumerate(sorted(list(data[x].unique()))): + palette[violin] = named_colors[cnt] + plt.figure() + order = data.groupby(by=[x])[y].median().sort_values().iloc[::-1].index + figure_FSCTT = sns.violinplot(x=x, y=y, data=data, cut=cut, scale=scale, order=order, palette=palette) + figure_FSCTT.set_xticklabels(figure_FSCTT.get_xticklabels(), rotation=font_rotation, size=font_size) + figure_FSCTT.figure.set_size_inches(img_size) + figure_FSCTT.figure.savefig(save_path, bbox_inches="tight") + stdout_success(msg=f"Violin plot saved at {save_path}") + +pd.read_csv diff --git a/simba/sandbox/visualize_networks.py b/simba/sandbox/visualize_networks.py new file mode 100644 index 000000000..c6161bf20 --- /dev/null +++ b/simba/sandbox/visualize_networks.py @@ -0,0 +1,132 @@ +from simba.mixins.network_mixin import NetworkMixin +import networkx as nx +import os +from typing import Union, Optional, Tuple, Dict +from pyvis.network import Network +try: + from typing import Literal +except: + from typing_extensions import Literal + +from simba.utils.checks import check_instance, check_valid_tuple, check_if_dir_exists, check_float, check_int, check_valid_hex_color, check_valid_lst +from simba.utils.errors import InvalidInputError +from simba.utils.data import create_color_palette + + + + +def visualize(graph: Union[nx.Graph, nx.MultiGraph], + save_path: Optional[Union[str, os.PathLike]] = None, + node_size: Optional[Union[float, Dict[str, float]]] = 25.0, + palette: Optional[Union[str, Dict[str, str]]] = "magma", + node_shape: Optional[Literal['dot', 'ellipse', 'circle']] = 'dot', + smooth_type: Optional[Literal['dynamic', 'continuous', 'discrete', 'diagonalCross', 'straightCross', 'horizontal', 'vertical', 'curvedCW', 'curvedCCW', 'cubicBezier']] = 'dynamic', + img_size: Optional[Tuple[int, int]] = (500, 500)) -> Union[None, Network]: + + """ + Visualizes a network graph using the vis.js library and saves the result as an HTML file. + + .. raw:: html + :file: ../docs/_static/img/network_ex.html + + .. note:: + Multi-networks created by ``simba.mixins.network_mixin.create_multigraph`` can be a little messy to look at. Instead, + creates seperate objects and files with single edges from each time-point. + + :param Union[nx.Graph, nx.MultiGraph] graph: The input graph to be visualized. + :param Optional[Union[str, os.PathLike]] save_path: The path to save the HTML file. If multi-graph, pass a directory path. If None, the graph(s) are returned but not saved. + :param Optional[Union[float, Dict[str, float]]] node_size: The size of nodes. Can be a single float or a dictionary mapping node names to their respective sizes. Default: 25.0. + :param Optional[Union[str, Dict[str, str]]] palette: The color palette for nodes. Can be a single string representing a palette name or a dictionary mapping node names to their respective colors. Default; magma. + :param Optional[Tuple[int, int]] img_size: The size of the resulting image in pixels, represented as (width, height). Default: 500x500. + :param Optional[Literal['dot', 'ellipse', 'circle']] node_shape: The shape of the nodes. Default: `dot`. + :param Optional[Literal] smooth_type: The dynamics of the interactive graph. + + :example: + >>> graph = NetworkMixin.create_graph(data={('Animal_1', 'Animal_2'): 1.0, ('Animal_1', 'Animal_3'): 0.2, ('Animal_2', 'Animal_3'): 0.5}) + >>> graph_pg = visualize(graph=graph, node_size={'Animal_1': 10, 'Animal_2': 26, 'Animal_3': 50}, save_path='/Users/simon/Downloads/graph.html', node_shape='box', palette='spring') + >>> multigraph = NetworkMixin().create_multigraph(data={('Animal_1', 'Animal_2'): [0, 0, 0, 6], ('Animal_1', 'Animal_3'): [0, 0, 0, 0], ('Animal_1', 'Animal_4'): [0, 0, 0, 0], ('Animal_1', 'Animal_5'): [0, 0, 0, 0], ('Animal_2', 'Animal_3'): [0, 0, 0, 0], ('Animal_2', 'Animal_4'): [5, 0, 0, 2], ('Animal_2', 'Animal_5'): [0, 0, 0, 0], ('Animal_3', 'Animal_4'): [0, 0, 0, 0], ('Animal_3', 'Animal_5'): [0, 2, 22, 0], ('Animal_4', 'Animal_5'): [0, 0, 0, 0]}) + >>> graph_pg = visualize(graph=multigraph, node_size={'Animal_1': 10, 'Animal_2': 26, 'Animal_3': 50, 'Animal_4': 50, 'Animal_5': 50}, save_path='/Users/simon/Downloads/graphs', node_shape='box', palette='spring', smooth_type='diagonalCross') + + """ + + check_instance(source=NetworkMixin.visualize.__name__, instance=graph, accepted_types=(nx.MultiGraph, nx.Graph)) + check_instance(source=NetworkMixin.visualize.__name__, instance=node_size, accepted_types=(int, float, dict)) + multi_graph = False + if graph.is_multigraph(): multi_graph = True + if multi_graph: + check_if_dir_exists(in_dir=save_path, source=NetworkMixin.visualize.__name__) + check_valid_tuple(x=img_size, accepted_lengths=(2,), valid_dtypes=(int,)) + if isinstance(node_size, dict): + if sorted(graph.nodes) != sorted(list(node_size.keys())): + raise InvalidInputError(msg=f"node_size keys do not match graph node names: {sorted(graph.nodes)} != {sorted(list(node_size.keys()))}") + for v in node_size.values(): + check_float(name=f"{NetworkMixin.visualize.__name__} node_size", value=v, min_value=0) + else: + check_int(name=f"{NetworkMixin.visualize.__name__} node size", value=node_size, min_value=1) + if isinstance(palette, dict): + if sorted(graph.nodes) != sorted(list(palette.keys())): + raise InvalidInputError( + msg=f"palette keys do not match graph node names: {sorted(graph.nodes)} != {sorted(list(node_size.keys()))}") + for v in palette.values(): + check_valid_hex_color(color_hex=str(v)) + clrs = palette + else: + clrs = create_color_palette(pallete_name=palette, as_hex=True, increments=len(list(graph.nodes()))) + + if not multi_graph: + network_graph = Network(f"{img_size[0]}px", f"{img_size[1]}px") + network_graph.set_edge_smooth(smooth_type) + for node_cnt, node_name in enumerate(graph): + if isinstance(node_size, dict): node_node_size = node_size[node_name] + else: node_node_size = node_size + if isinstance(palette, dict): + node_clr = palette[node_name] + else: + node_clr = clrs[node_cnt] + network_graph.add_node(n_id=node_name, shape=node_shape, color=node_clr, size=node_node_size) + for source, target, edge_attrs in graph.edges(data=True): + network_graph.add_edge(source, target, value=edge_attrs["weight"]) + if save_path is not None: + network_graph.save_graph(save_path) + return network_graph + + else: + results = {} + edge_labels = list(set(data["label"] for _, _, data in graph.edges(data=True))) + check_valid_lst(source=f"{NetworkMixin.multigraph_page_rank.__name__} edge_labels", data=edge_labels, min_len=1) + for edge_label in edge_labels: + network_graph = Network(f"{img_size[0]}px", f"{img_size[1]}px") + network_graph.set_edge_smooth(smooth_type) + network_graph.force_atlas_2based() + graph_save_path = os.path.join(save_path, f"{edge_label}.html") + filtered_graph = nx.Graph( + graph.edge_subgraph( + [ + (u, v, k) + for u, v, k, data in graph.edges(keys=True, data=True) + if data.get("label") == edge_label + ] + ) + ) + for node_cnt, node_name in enumerate(filtered_graph): + if isinstance(node_size, dict): + node_node_size = node_size[node_name] + else: + node_node_size = node_size + if isinstance(palette, dict): + node_clr = palette[node_name] + else: + node_clr = clrs[node_cnt] + network_graph.add_node(n_id=node_name, shape="dot", color=node_clr, size=node_node_size) + for source, target, edge_attrs in filtered_graph.edges(data=True): + network_graph.add_edge(source, target, value=edge_attrs["weight"]) + if save_path is not None: + network_graph.save_graph(graph_save_path) + results[edge_label] = network_graph + return results + # + +# graph = NetworkMixin.create_graph(data={('Animal_1', 'Animal_2'): 1.0, ('Animal_1', 'Animal_3'): 0.2, ('Animal_2', 'Animal_3'): 0.5}) +# graph_pg = visualize(graph=graph, node_size={'Animal_1': 10, 'Animal_2': 26, 'Animal_3': 50}, save_path='/Users/simon/Downloads/graph.html', node_shape='box', palette='spring') +# multigraph = NetworkMixin().create_multigraph(data={('Animal_1', 'Animal_2'): [0, 0, 0, 6], ('Animal_1', 'Animal_3'): [0, 0, 0, 0], ('Animal_1', 'Animal_4'): [0, 0, 0, 0], ('Animal_1', 'Animal_5'): [0, 0, 0, 0], ('Animal_2', 'Animal_3'): [0, 0, 0, 0], ('Animal_2', 'Animal_4'): [5, 0, 0, 2], ('Animal_2', 'Animal_5'): [0, 0, 0, 0], ('Animal_3', 'Animal_4'): [0, 0, 0, 0], ('Animal_3', 'Animal_5'): [0, 2, 22, 0], ('Animal_4', 'Animal_5'): [0, 0, 0, 0]}) +# graph_pg = visualize(graph=multigraph, node_size={'Animal_1': 10, 'Animal_2': 26, 'Animal_3': 50, 'Animal_4': 50, 'Animal_5': 50}, save_path='/Users/simon/Downloads/graphs', node_shape='box', palette='spring', smooth_type='diagonalCross') diff --git a/simba/sandbox/wald_wolfowitz.py b/simba/sandbox/wald_wolfowitz.py new file mode 100644 index 000000000..f9bc2f4bf --- /dev/null +++ b/simba/sandbox/wald_wolfowitz.py @@ -0,0 +1,39 @@ +import numpy as np +from scipy.stats import norm + + + + +def wald_wolfowitz_runs_test(data: np.ndarray): + diffs = np.diff(data) + trans = np.count_nonzero(diffs != 0) + r = trans + 1 + m = np.argwhere(data == 0).shape[0] + n = np.argwhere(data == 1).shape[0] + + nominator = r - ((2*m*n) / (m+n)) + 1 + + denominator = ((2*m*n)*(2*m*n-(m+n))) / (np.square(m+n)) * ((m+n) -1) + + z = nominator / denominator + p = 2 * (1 - norm.cdf(abs(z))) + print(z, p) + + + + # n = data.shape[0] + # mean_runs = (2 * n - 1) / 3 + # var_runs = (16 * n - 29) / 90 + # z_score = (runs - mean_runs) / (var_runs ** 0.5) + # p_value = 2 * (1 - norm.cdf(abs(z_score))) + # return runs, p_value + +# data = np.array((0)) +# for i in range(5): +# data = np.array([1,0,1,0,1,0,1,0,1,0]) +# v + +data_1 = np.random.randint(0, 2, (100,)) +data = np.array([1,1,1,1,1,1,1,0,0,0,1,0, 0, 0, 1]) + +wald_wolfowitz_runs_test(data=data_1) \ No newline at end of file diff --git a/simba/sandbox/warp_affine.py b/simba/sandbox/warp_affine.py new file mode 100644 index 000000000..aeee3e254 --- /dev/null +++ b/simba/sandbox/warp_affine.py @@ -0,0 +1,78 @@ +import numpy as np +import cv2 + +# Original coordinates of the body parts (7 coordinates) +original_coords = np.array([ + [182.94406, 277.93008], # Body part 1 + [345.5105, 301.44055], # Body part 2 (fixed) + [309.65036, 257.01398], # Body part 3 + [215, 260], # Body part 4 + [266.0979, 307.34265], # Body part 5 + [288.1888, 244.2098], # Body part 6 + [273.5944, 276.4965], # Body part 7 + [413.6084, 317.83917], # Body part 8 + [474.93707, 311.13287] # Body part 9 +]) + +# Load the image +image = cv2.imread(r"C:\troubleshooting\mitra\project_folder\csv\outlier_corrected_movement_location\test\bg_temp\FL_gq_Saline_0626_frames\0.png") + +# Specify fixed locations +fixed_point1 = np.array([100, original_coords[0, 1]]) # Fix body part 1 +fixed_point2 = np.array([300, original_coords[1, 1]]) # Fix body part 2 + +# Update the fixed points +original_coords[0] = fixed_point1 +original_coords[1] = fixed_point2 + +# Calculate the original distances +original_distance_x = original_coords[1, 0] - original_coords[0, 0] +original_distance_y = original_coords[1, 1] - original_coords[0, 1] + +# Calculate new coordinates for the other points +for i in range(2, len(original_coords)): + scale_x = (fixed_point2[0] - fixed_point1[0]) / original_distance_x + scale_y = (original_coords[1, 1] - original_coords[0, 1]) / original_distance_y + + new_x = fixed_point1[0] + (original_coords[i, 0] - original_coords[0, 0]) * scale_x + new_y = fixed_point1[1] + (original_coords[i, 1] - original_coords[0, 1]) * scale_y + + original_coords[i] = [new_x, new_y] + +# Calculate the center of the body parts for rotation +center = np.mean(original_coords, axis=0) + +# Calculate the angle of the original line connecting body part 1 and body part 2 +dx = original_coords[1, 0] - original_coords[0, 0] +dy = original_coords[1, 1] - original_coords[0, 1] +original_angle = np.arctan2(dy, dx) # Angle in radians + +# Target angle for facing right (0 radians) +target_angle = 0 # 0 degrees or facing right + +# Calculate the angle of rotation needed +angle_of_rotation = np.degrees(target_angle - original_angle) # Convert to degrees + +# Create the rotation matrix +rotation_matrix = cv2.getRotationMatrix2D(tuple(center), angle_of_rotation, 1) + +# Apply the rotation transformation to the original coordinates +for i in range(len(original_coords)): + # Convert to homogeneous coordinates + coord_homogeneous = np.append(original_coords[i], 1) + # Apply the rotation + new_coord = rotation_matrix @ coord_homogeneous + original_coords[i] = new_coord[:2] + +# Debug: Print out the new coordinates +print("New coordinates:") +print(original_coords) + +# Draw circles at the new body part locations +for coord in original_coords: + cv2.circle(image, (int(coord[0]), int(coord[1])), 10, (0, 0, 255), -1) # Larger red circles + +# Display the modified image without saving +cv2.imshow('Modified Image', image) +cv2.waitKey(0) # Wait for a key press +cv2.destroyAllWindows() # Close the image window diff --git a/simba/sandbox/warp_numba.py b/simba/sandbox/warp_numba.py new file mode 100644 index 000000000..371ae6260 --- /dev/null +++ b/simba/sandbox/warp_numba.py @@ -0,0 +1,151 @@ +import numpy as np +from simba.utils.read_write import read_df +from simba.utils.data import egocentrically_align_pose +from numba import jit +from simba.utils.read_write import read_df, read_img_batch_from_video_gpu +from simba.utils.data import egocentrically_align_pose +import cv2 + +from simba.utils.read_write import read_df, read_img_batch_from_video_gpu +from simba.utils.data import egocentrically_align_pose +import cv2 + +@jit(nopython=True) +def center_rotation_warpaffine_vectors(rotation_vectors: np.ndarray, centers: np.ndarray): + """ + Create WarpAffine vectors for rotating a video around the center. These are used for egocentric alignment of video. + + .. note:: + `rotation_vectors` and `centers` are returned by :func:`simba.utils.data.egocentrically_align_pose`, or :func:`simba.utils.data.egocentrically_align_pose_numba` + """ + results = np.full((rotation_vectors.shape[0], 2, 3), fill_value=np.nan, dtype=np.float64) + for idx in range(rotation_vectors.shape[0]): + R, center = rotation_vectors[idx], centers[idx] + top = np.hstack((R[0, :], np.array([-center[0] * R[0, 0] - center[1] * R[0, 1] + center[0]]))) + bottom = np.hstack((R[1, :], np.array([-center[0] * R[1, 0] - center[1] * R[1, 1] + center[1]]))) + results[idx] = np.vstack((top, bottom)) + return results + +@jit(nopython=True) +def align_target_warpaffine_vectors(centers: np.ndarray, target: np.ndarray): + """ + Create WarpAffine for placing original center at new target position. These are used for egocentric alignment of video. + .. note:: + `centers` are returned by :func:`simba.utils.data.egocentrically_align_pose`, or :func:`simba.utils.data.egocentrically_align_pose_numba` + `target` in the location in the image where the anchor body-part should be placed. + """ + results = np.full((centers.shape[0], 2, 3), fill_value=np.nan, dtype=np.float64) + for idx in range(centers.shape[0]): + translation_x = target[0] - centers[idx][0] + translation_y = target[1] - centers[idx][1] + results[idx] = np.array([[1, 0, translation_x], [0, 1, translation_y]]) + return results + + +@jit(nopython=True, cache=True) +def _bilinear_interpolate(image: np.ndarray, x: int, y: int): + """ + Helper called by :func:`simba.sandbox.warp_numba.egocentric_frm_rotator` Perform bilinear interpolation on an image at fractional coordinates (x, y). Assumes coordinates (x, y) are within the bounds of the image. + """ + x0, y0 = int(np.floor(x)), int(np.floor(y)) + dx, dy = x - x0, y - y0 + if x0 < 0 or x0 + 1 >= image.shape[1] or y0 < 0 or y0 + 1 >= image.shape[0]: + return 0 + I00, I01 = image[y0, x0], image[y0, x0+1] + I10, I11 = image[y0+1, x0], image[y0+1, x0+1] + return (I00 * (1 - dx) * (1 - dy) + I01 * dx * (1 - dy) + I10 * (1 - dx) * dy + I11 * dx * dy) + +@jit(nopython=True) +def egocentric_frm_rotator(frames: np.ndarray, rotation_matrices: np.ndarray) -> np.ndarray: + """ + Rotates a sequence of frames using the provided rotation matrices in an egocentric manner. + + Applies a geometric transformation to each frame in the input sequence based on + its corresponding rotation matrix. The transformation includes rotation and translation, + followed by bilinear interpolation to map pixel values from the source frame to the output frame. + + .. note:: + To create rotation matrices, see :func:`simba.utils.data.center_rotation_warpaffine_vectors` and :func:`simba.utils.data.align_target_warpaffine_vectors` + + :param np.ndarray frames: A 4D array of shape (N, H, W, C) + :param np.ndarray rotation_matrices: A 3D array of shape (N, 3, 3), where each 3x3 matrix represents an affine transformation for a corresponding frame. The matrix should include rotation and translation components. + :return: A 4D array of shape (N, H, W, C), representing the warped frames after applying the transformations. The shape matches the input `frames`. + :rtype: np.ndarray + + :example: + >>> DATA_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/data/501_MA142_Gi_Saline_0513.csv" + >>> VIDEO_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513.mp4" + >>> SAVE_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513_rotated.mp4" + >>> ANCHOR_LOC = np.array([300, 300]) + >>> + >>> df = read_df(file_path=DATA_PATH, file_type='csv') + >>> bp_cols = [x for x in df.columns if not x.endswith('_p')] + >>> data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int64) + >>> data, centers, rotation_matrices = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=180) + >>> imgs = read_img_batch_from_video_gpu(video_path=VIDEO_PATH, start_frm=0, end_frm=100) + >>> imgs = np.stack(list(imgs.values()), axis=0) + >>> + >>> rot_matrices_center = center_rotation_warpaffine_vectors(rotation_vectors=rotation_matrices, centers=centers) + >>> rot_matrices_align = align_target_warpaffine_vectors(centers=centers, target=ANCHOR_LOC) + >>> + >>> imgs_centered = egocentric_frm_rotator(frames=imgs, rotation_matrices=rot_matrices_center) + >>> imgs_out = egocentric_frm_rotator(frames=imgs_centered, rotation_matrices=rot_matrices_align) + """ + + N, H, W, C = frames.shape + warped_frames = np.zeros_like(frames) + for i in range(N): + frame = frames[i] + rotation_matrix = rotation_matrices[i] + affine_matrix = rotation_matrix[:2, :2] + translation = np.ascontiguousarray(rotation_matrix[:2, 2]) + inverse_affine_matrix = np.ascontiguousarray(np.linalg.inv(affine_matrix)) + inverse_translation = -np.dot(inverse_affine_matrix, translation) + for r in range(H): + for c in range(W): + src_x = inverse_affine_matrix[0, 0] * c + inverse_affine_matrix[0, 1] * r + inverse_translation[0] + src_y = inverse_affine_matrix[1, 0] * c + inverse_affine_matrix[1, 1] * r + inverse_translation[1] + for ch in range(C): + warped_frames[i, r, c, ch] = _bilinear_interpolate(frame[:, :, ch], src_x, src_y) + return warped_frames + + +DATA_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/data/501_MA142_Gi_Saline_0513.csv" +VIDEO_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513.mp4" +SAVE_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/rotate_ex/videos/501_MA142_Gi_Saline_0513_rotated.mp4" +ANCHOR_LOC = np.array([300, 300]) + +df = read_df(file_path=DATA_PATH, file_type='csv') +bp_cols = [x for x in df.columns if not x.endswith('_p')] +data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int64) +data, centers, rotation_matrices = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=180) +imgs = read_img_batch_from_video_gpu(video_path=VIDEO_PATH, start_frm=0, end_frm=100) +imgs = np.stack(list(imgs.values()), axis=0) + +rot_matrices_center = center_rotation_warpaffine_vectors(rotation_vectors=rotation_matrices, centers=centers) +rot_matrices_align = align_target_warpaffine_vectors(centers=centers, target=ANCHOR_LOC) + +imgs_centered = egocentric_frm_rotator(frames=imgs, rotation_matrices=rot_matrices_center) +imgs_out = egocentric_frm_rotator(frames=imgs_centered, rotation_matrices=rot_matrices_align) + +for i in range(imgs_out.shape[0]): + + + cv2.imshow('sadasdas', imgs_out[i]) + cv2.waitKey(60) + + + + +# DATA_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\data\501_MA142_Gi_Saline_0513.csv" +# VIDEO_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513.mp4" +# SAVE_PATH = r"C:\Users\sroni\OneDrive\Desktop\rotate_ex\videos\501_MA142_Gi_Saline_0513_rotated.mp4" +# ANCHOR_LOC = np.array([250, 250]) +# +# df = read_df(file_path=DATA_PATH, file_type='csv') +# bp_cols = [x for x in df.columns if not x.endswith('_p')] +# data = df[bp_cols].values.reshape(len(df), int(len(bp_cols)/2), 2).astype(np.int32) +# +_, centers, rotation_vectors = egocentrically_align_pose(data=data, anchor_1_idx=6, anchor_2_idx=2, anchor_location=ANCHOR_LOC, direction=0) +# p = center_rotation_warpaffine_vectors(rotation_vectors=rotation_vectors, centers=centers) +# k = align_target_warpaffine_vectors(centers=centers, target=ANCHOR_LOC) \ No newline at end of file diff --git a/simba/sandbox/watermark.py b/simba/sandbox/watermark.py new file mode 100644 index 000000000..01dfa4204 --- /dev/null +++ b/simba/sandbox/watermark.py @@ -0,0 +1,71 @@ +from typing import Union, Optional +try: + from typing import Literal +except: + from typing_extensions import Literal + +import os +import subprocess +from simba.utils.read_write import get_fn_ext, find_all_videos_in_directory, get_video_meta_data +from simba.utils.checks import check_float, check_str, check_if_dir_exists, check_file_exist_and_readable +from simba.utils.errors import InvalidInputError +from simba.utils.printing import SimbaTimer, stdout_success + +def watermark_video(video_path: Union[str, os.PathLike], + watermark_path: Union[str, os.PathLike], + position: Optional[Literal['top_left', 'bottom_right', 'top_right', 'bottom_left', 'center']] = 'top_left', + opacity: Optional[float] = 0.5, + scale: Optional[float] = 0.05, + save_dir: Optional[Union[str, os.PathLike]] = None) -> None: + """ + Watermark a video file or a directory of video files. + + .. video:: _static/img/watermark_video.webm + :loop: + + :param Union[str, os.PathLike] video_path: The path to the video file. + :param Union[str, os.PathLike] watermark_path: The path to the watermark .png file. + :param Optional[str] position: The position of the watermark. Options: 'top_left', 'bottom_right', 'top_right', 'bottom_left', 'center' + :param Optional[float] opacity: The opacity of the watermark as a value between 0-1.0. 1.0 meaning the same as input image. Default: 0.5. + :param Optional[float] scale: The scale of the watermark as a ratio os the image size. Default: 0.05. + :param Optional[Union[str, os.PathLike]] save_dir: The location where to save the crossfaded video. If None, then saves the video in the same directory as the first video. + :return: None + + :example: + >>> watermark_video(video_path='/Users/simon/Desktop/envs/simba/troubleshooting/multi_animal_dlc_two_c57/project_folder/videos/watermark/Together_1_powerpointready.mp4', watermark_path='/Users/simon/Desktop/splash.png', position='top_left', opacity=1.0, scale=0.2) + """ + timer = SimbaTimer(start=True) + POSITIONS = ['top_left', 'bottom_right', 'top_right', 'bottom_left', 'center'] + check_float(name=f'{watermark_video.__name__} opacity', value=opacity, min_value=0.001, max_value=1.0) + check_float(name=f'{watermark_video.__name__} scale', value=scale, min_value=0.001, max_value=0.999) + check_str(name=f'{watermark_video.__name__} position', value=position, options=POSITIONS) + check_file_exist_and_readable(file_path=watermark_path) + + if os.path.isfile(video_path): + video_paths = [video_path] + elif os.path.isdir(video_path): + video_paths = list(find_all_videos_in_directory(directory=video_path, as_dict=True, raise_error=True).values()) + else: + raise InvalidInputError(msg=f'{video_path} is not a valid file path or a valid directory path', source=watermark_video.__name__) + if save_dir is not None: + check_if_dir_exists(in_dir=save_dir) + else: + save_dir = os.path.dirname(video_paths[0]) + for file_cnt, video_path in enumerate(video_paths): + _, video_name, video_ext = get_fn_ext(video_path) + _ = get_video_meta_data(video_path=video_path) + print(f'Watermarking {video_name} (Video {file_cnt+1}/{len(video_paths)})...') + out_path = os.path.join(save_dir, f'{video_name}_watermarked{video_ext}') + if position == POSITIONS[0]: + cmd = f'ffmpeg -i "{video_path}" -i "{watermark_path}" -filter_complex "[1:v]scale=iw*{scale}:-1,format=rgba,colorchannelmixer=aa={opacity}[wm];[0:v][wm]overlay=0:0" "{out_path}" -loglevel error -stats -hide_banner -y' + elif position == POSITIONS[1]: + cmd = f'ffmpeg -i "{video_path}" -i "{watermark_path}" -filter_complex "[1:v]scale=iw*{scale}:-1,format=rgba,colorchannelmixer=aa={opacity}[wm];[0:v][wm]overlay=W-w:H-h" "{out_path}" -loglevel error -stats -hide_banner -y' + elif position == POSITIONS[2]: + cmd = f'ffmpeg -i "{video_path}" -i "{watermark_path}" -filter_complex "[1:v]scale=iw*{scale}:-1,format=rgba,colorchannelmixer=aa={opacity}[wm];[0:v][wm]overlay=W-w-0:0" "{out_path}" -loglevel error -stats -hide_banner -y' + elif position == POSITIONS[3]: + cmd = f'ffmpeg -i "{video_path}" -i "{watermark_path}" -filter_complex "[1:v]scale=iw*{scale}:-1,format=rgba,colorchannelmixer=aa={opacity}[wm];[0:v][wm]overlay=0:H-h-0" "{out_path}" -loglevel error -stats -hide_banner -y' + else: + cmd = f'ffmpeg -i "{video_path}" -i "{watermark_path}" -filter_complex "[1:v]scale=iw*{scale}:-1,format=rgba,colorchannelmixer=aa={opacity}[wm];[0:v][wm]overlay=(W-w)/2:(H-h)/2" "{out_path}" -loglevel error -stats -hide_banner -y' + subprocess.call(cmd, shell=True, stdout=subprocess.PIPE) + timer.stop_timer() + stdout_success(msg=f'{len(video_paths)} watermarked video(s) saved in {save_dir}', elapsed_time=timer.elapsed_time_str) diff --git a/simba/sandbox/wilcoxon.py b/simba/sandbox/wilcoxon.py new file mode 100644 index 000000000..c5e81ac9f --- /dev/null +++ b/simba/sandbox/wilcoxon.py @@ -0,0 +1,43 @@ +import numpy as np +from simba.utils.data import fast_mean_rank + + +def wilcoxon(x: np.ndarray, y: np.ndarray): + data = np.hstack((x.reshape(-1, 1), y.reshape(-1, 1))) + n = data.shape[0] + diff = np.diff(data).flatten() + diff_abs = np.abs(diff) + rank_w_ties = fast_mean_rank(data=diff_abs, descending=False) + signed_rank_w_ties = np.full((rank_w_ties.shape[0]), np.nan) + t_plus, t_minus = 0, 0 + + for i in range(diff.shape[0]): + if diff[i] < 0: + signed_rank_w_ties[i] = -rank_w_ties[i] + t_minus += np.abs(rank_w_ties[i]) + else: + signed_rank_w_ties[i] = rank_w_ties[i] + t_plus += np.abs(rank_w_ties[i]) + print(t_minus, t_plus, n) + u_w = (n * (n + 1)) / 4 + std_correction = 0 + for i in range(signed_rank_w_ties.shape[0]): + same_rank_n = np.argwhere(signed_rank_w_ties == signed_rank_w_ties[i]).flatten().shape[0] + if same_rank_n > 1: + std_correction += (((same_rank_n**3) - same_rank_n) / 2) + + std = np.sqrt(((n * (n + 1)) * ((2 * n) + 1) - std_correction) / 24) + W = np.min((t_plus, t_minus)) + z = (W - u_w) / std + r = (z / np.sqrt(n)) + return z, r + + + + + + +x = np.random.random(20,) +y = np.random.random(20,) + +wilcoxon(x=x, y=y) \ No newline at end of file diff --git a/simba/sandbox/yolo_torch_inference.py b/simba/sandbox/yolo_torch_inference.py new file mode 100644 index 000000000..36ce77628 --- /dev/null +++ b/simba/sandbox/yolo_torch_inference.py @@ -0,0 +1,75 @@ +import torch +import cv2 +import numpy as np +import torchvision.transforms as transforms +from PIL import Image + +# Paths +MDL = r"/mnt/c/troubleshooting/yolo_mdl/train/weights/best.pt" +IMG_PATH = r"/mnt/c/Users/sroni/OneDrive/Desktop/Screenshot 2024-11-15 123805.png" + +# Load the model (YOLOv8 model) +checkpoint = torch.load(MDL, map_location=torch.device('cpu')) # Load model weights +model = checkpoint['model'].eval() # Set to eval mode + +# Load and preprocess the image +image = Image.open(IMG_PATH) # PIL image loading +if image.mode == 'RGBA': + image = image.convert('RGB') + +image_np = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) # Convert to BGR format for OpenCV + + +transform = transforms.Compose([ + transforms.Resize((640, 640)), # Resize to 640x640 + transforms.ToTensor(), # Convert to tensor +]) + +# Convert image to tensor and add batch dimension +image_tensor = transform(image).unsqueeze(0).half() + +# Run inference on the image +with torch.no_grad(): # Disable gradient calculation + predictions = model(image_tensor) # Inference + +detections = predictions[0].squeeze(0) + + +predictions = predictions[0][0] +predictions = predictions.permute(1, 0) +boxes = predictions[:, :4] # First 4 columns: [x_center, y_center, width, height] +confidences = predictions[:, 4] # 5th column: confidence score +top_conf = torch.argmax(confidences) +top_box = boxes[top_conf, :] + +print(top_box) + +xmin = top_box[0] - top_box[2] / 2 +ymin = top_box[1] - top_box[3] / 2 +xmax = top_box[0] + top_box[2] / 2 +ymax = top_box[1] + top_box[3] / 2 + + +print(xmin) + +original_height, original_width = image_np.shape[:2] # Get the original image dimensions + +# Scale the bounding box back to original image size +scale_x = original_width / 640 +scale_y = original_height / 640 + +xmin = int(xmin * scale_x) +ymin = int(ymin * scale_y) +xmax = int(xmax * scale_x) +ymax = int(ymax * scale_y) + +print(xmin) +# +# # Draw the bounding box on the original image +# cv2.rectangle(image_np, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2) # Green box, thickness=2 +# +# # Display the image with the bounding box +# cv2.imshow("Image with Bounding Box", image_np) +# cv2.waitKey(0) +# cv2.destroyAllWindows() + diff --git a/simba/sandbox/yolo_torch_train.py b/simba/sandbox/yolo_torch_train.py new file mode 100644 index 000000000..e69de29bb diff --git a/simba/sandbox/youden_j.py b/simba/sandbox/youden_j.py new file mode 100644 index 000000000..5980b49b7 --- /dev/null +++ b/simba/sandbox/youden_j.py @@ -0,0 +1,37 @@ +import numpy as np +from simba.utils.checks import check_valid_array + + + + +def youden_j(sample_1: np.ndarray, sample_2: np.ndarray) -> float: + """ + Calculate Youden's J statistic from two binary arrays. + + :param sample_1: The first binary array. + :param sample_2: The second binary array. + :return float: Youden's J statistic. + """ + check_valid_array(data=sample_1, source=f'{youden_j.__name__} sample_1', accepted_ndims=(1,), accepted_values=[0, 1]) + check_valid_array(data=sample_2, source=f'{youden_j.__name__} sample_2', accepted_ndims=(1,), accepted_shapes=[(sample_1.shape)], accepted_values=[0, 1]) + tp = np.sum((sample_1 == 1) & (sample_2 == 1)) + tn = np.sum((sample_1 == 0) & (sample_2 == 0)) + fp = np.sum((sample_1 == 0) & (sample_2 == 1)) + fn = np.sum((sample_1 == 1) & (sample_2 == 0)) + if tp + fn == 0 or tn + fp == 0: + return np.nan + else: + sensitivity = tp / (tp + fn) + specificity = tn / (tn + fp) + return sensitivity + specificity - 1 + + + + +sample_1 = np.random.randint(0, 2, (100)) +sample_2 = np.random.randint(0, 2, (100)) + + + + +youden_j(sample_1=sample_1, sample_2=sample_2) \ No newline at end of file