diff --git a/.gitignore b/.gitignore index edeffba6..360fd0bf 100644 --- a/.gitignore +++ b/.gitignore @@ -157,4 +157,6 @@ dmypy.json train_data/ train_cache.npz -autotune/ \ No newline at end of file +autotune/ + +gui-settings.json \ No newline at end of file diff --git a/BirdNET-Analyzer-full.spec b/BirdNET-Analyzer-full.spec index 8c133ed4..632e31ab 100644 --- a/BirdNET-Analyzer-full.spec +++ b/BirdNET-Analyzer-full.spec @@ -58,6 +58,8 @@ gui = Analysis( ("example/species_list.txt", "example"), ("labels", "labels"), ("gui", "gui"), + ("gui-settings.json", "."), + ("lang", "lang") ], hiddenimports=[], hookspath=["extra-hooks"], diff --git a/gui.py b/gui.py index 8c05e9ae..7f3f9e14 100644 --- a/gui.py +++ b/gui.py @@ -35,6 +35,9 @@ import species import utils from train import trainModel +import localization as loc + +loc.load_localization() _WINDOW: webview.Window OUTPUT_TYPE_MAP = { @@ -70,7 +73,7 @@ def validate(value, msg): def run_species_list(out_path, filename, lat, lon, week, use_yearlong, sf_thresh, sortby): - validate(out_path, "Please select a directory for the species list.") + validate(out_path, loc.localize("validation-no-directory-selected")) species.run( os.path.join(out_path, filename if filename else "species_list.txt"), @@ -81,7 +84,7 @@ def run_species_list(out_path, filename, lat, lon, week, use_yearlong, sf_thresh sortby, ) - gr.Info(f"Species list saved at {cfg.OUTPUT_PATH}") + gr.Info(f"{loc.localize('species-tab-finish-info')} {cfg.OUTPUT_PATH}") def runSingleFileAnalysis( @@ -101,7 +104,7 @@ def runSingleFileAnalysis( custom_classifier_file, locale, ): - validate(input_path, "Please select a file.") + validate(input_path, loc.localize("validation-no-file-selected")) return runAnalysis( input_path, @@ -155,12 +158,12 @@ def runBatchAnalysis( skip_existing, progress=gr.Progress(), ): - validate(input_dir, "Please select a directory.") + validate(input_dir, loc.localize("validation-no-directory-selected")) batch_size = int(batch_size) threads = int(threads) if species_list_choice == _CUSTOM_SPECIES: - validate(species_list_file, "Please select a species list.") + validate(species_list_file, loc.localize("validation-no-species-list-selected")) return runAnalysis( None, @@ -241,7 +244,7 @@ def runAnalysis( progress: The gradio progress bar. """ if progress is not None: - progress(0, desc="Preparing ...") + progress(0, desc=f"{loc.localize('progress-preparing')} ...") locale = locale.lower() # Load eBird codes, labels @@ -268,7 +271,7 @@ def runAnalysis( cfg.SPECIES_LIST = species.getSpeciesList(cfg.LATITUDE, cfg.LONGITUDE, cfg.WEEK, cfg.LOCATION_FILTER_THRESHOLD) elif species_list_choice == _CUSTOM_CLASSIFIER: if custom_classifier_file is None: - raise gr.Error("No custom classifier selected.") + raise gr.Error(loc.localize("validation-no-custom-classifier-selected")) # Set custom classifier? cfg.CUSTOM_CLASSIFIER = ( @@ -317,13 +320,13 @@ def runAnalysis( else: cfg.FILE_LIST = [cfg.INPUT_PATH] - validate(cfg.FILE_LIST, "No audio files found.") + validate(cfg.FILE_LIST, loc.localize("validation-no-audio-files-found")) # Set confidence threshold cfg.MIN_CONFIDENCE = confidence # Set sensitivity - cfg.SIGMOID_SENSITIVITY = sensitivity + cfg.SIGMOID_SENSITIVITY = max(0.5, min(1.0 - (float(sensitivity) - 1.0), 1.5)) # Set overlap cfg.SIG_OVERLAP = overlap @@ -363,7 +366,7 @@ def runAnalysis( result_list = [] if progress is not None: - progress(0, desc="Starting ...") + progress(0, desc=f"{loc.localize('progress-starting')} ...") # Analyze files if cfg.CPU_THREADS < 2: @@ -390,10 +393,10 @@ def runAnalysis( return [[os.path.relpath(r[0], input_dir), r[1]] for r in result_list] if input_dir else cfg.OUTPUT_PATH -_CUSTOM_SPECIES = "Custom species list" -_PREDICT_SPECIES = "Species by location" -_CUSTOM_CLASSIFIER = "Custom classifier" -_ALL_SPECIES = "all species" +_CUSTOM_SPECIES = loc.localize("species-list-radio-option-custom-list") +_PREDICT_SPECIES = loc.localize("species-list-radio-option-predict-list") +_CUSTOM_CLASSIFIER = loc.localize("species-list-radio-option-custom-classifier") +_ALL_SPECIES = loc.localize("species-list-radio-option-all") def show_species_choice(choice: str): @@ -563,27 +566,27 @@ def start_training( Returns: Returns a matplotlib.pyplot figure. """ - validate(data_dir, "Please select your Training data.") - validate(output_dir, "Please select a directory for the classifier.") - validate(classifier_name, "Please enter a valid name for the classifier.") + validate(data_dir, loc.localize("validation-no-training-data-selected")) + validate(output_dir, loc.localize("validation-no-directory-for-classifier-selected")) + validate(classifier_name, loc.localize("validation-no-valid-classifier-name")) if not epochs or epochs < 0: - raise gr.Error("Please enter a valid number of epochs.") + raise gr.Error(loc.localize("validation-no-valid-epoch-number")) if not batch_size or batch_size < 0: - raise gr.Error("Please enter a valid batch size.") + raise gr.Error(loc.localize("validation-no-valid-batch-size")) if not learning_rate or learning_rate < 0: - raise gr.Error("Please enter a valid learning rate.") + raise gr.Error(loc.localize("validation-no-valid-learning-rate")) if fmin < cfg.SIG_FMIN or fmax > cfg.SIG_FMAX or fmin > fmax: - raise gr.Error(f"Please enter valid frequency range in [{cfg.SIG_FMIN}, {cfg.SIG_FMAX}]") + raise gr.Error(f"{loc.localize('validation-no-valid-frequency')} [{cfg.SIG_FMIN}, {cfg.SIG_FMAX}]") if not hidden_units or hidden_units < 0: hidden_units = 0 if progress is not None: - progress((0, epochs), desc="Loading data & building classifier", unit="epochs") + progress((0, epochs), desc=loc.localize("progress-build-classifier"), unit="epochs") cfg.TRAIN_DATA_PATH = data_dir cfg.SAMPLE_CROP_MODE = crop_mode @@ -614,26 +617,36 @@ def start_training( def dataLoadProgression(num_files, num_total_files, label): if progress is not None: progress( - (num_files, num_total_files), total=num_total_files, unit="files", desc=f"Loading data for '{label}'" + (num_files, num_total_files), + total=num_total_files, + unit="files", + desc=f"{loc.localize('progress-loading-data')} '{label}'", ) def epochProgression(epoch, logs=None): if progress is not None: if epoch + 1 == epochs: - progress((epoch + 1, epochs), total=epochs, unit="epochs", desc=f"Saving at {cfg.CUSTOM_CLASSIFIER}") + progress( + (epoch + 1, epochs), + total=epochs, + unit="epochs", + desc=f"{loc.localize('progress-saving')} {cfg.CUSTOM_CLASSIFIER}", + ) else: - progress((epoch + 1, epochs), total=epochs, unit="epochs", desc=f"Training model") + progress((epoch + 1, epochs), total=epochs, unit="epochs", desc=loc.localize("progress-training")) def trialProgression(trial): if progress is not None: - progress((trial, autotune_trials), total=autotune_trials, unit="trials", desc=f"Autotune in progress") + progress( + (trial, autotune_trials), total=autotune_trials, unit="trials", desc=loc.localize("progress-autotune") + ) history = trainModel( on_epoch_end=epochProgression, on_trial_result=trialProgression, on_data_load_end=dataLoadProgression ) if len(history.epoch) < epochs: - gr.Info("Stopped early - validation metric not improving.") + gr.Info(loc.localize("training-tab-early-stoppage-msg")) auprc = history.history["val_AUPRC"] auroc = history.history["val_AUROC"] @@ -650,7 +663,7 @@ def trialProgression(trial): def extract_segments(audio_dir, result_dir, output_dir, min_conf, num_seq, seq_length, threads, progress=gr.Progress()): - validate(audio_dir, "No audio directory selected") + validate(audio_dir, loc.localize("validation-no-audio-directory-selected")) if not result_dir: result_dir = audio_dir @@ -659,7 +672,7 @@ def extract_segments(audio_dir, result_dir, output_dir, min_conf, num_seq, seq_l output_dir = audio_dir if progress is not None: - progress(0, desc="Searching files ...") + progress(0, desc=f"{loc.localize('progress-search')} ...") # Parse audio and result folders cfg.FILE_LIST = segments.parseFolders(audio_dir, result_dir) @@ -715,39 +728,44 @@ def sample_sliders(opened=True): A tuple with the created elements: (Slider (min confidence), Slider (sensitivity), Slider (overlap)) """ - with gr.Accordion("Inference settings", open=opened): + with gr.Accordion(loc.localize("inference-settings-accordion-label"), open=opened): with gr.Row(): confidence_slider = gr.Slider( minimum=0, maximum=1, value=0.5, step=0.01, - label="Minimum Confidence", - info="Minimum confidence threshold.", + label=loc.localize("inference-settings-confidence-slider-label"), + info=loc.localize("inference-settings-confidence-slider-info"), ) sensitivity_slider = gr.Slider( minimum=0.5, maximum=1.5, value=1, step=0.01, - label="Sensitivity", - info="Detection sensitivity; Higher values result in higher sensitivity.", + label=loc.localize("inference-settings-sensitivity-slider-label"), + info=loc.localize("inference-settings-sensitivity-slider-info"), ) overlap_slider = gr.Slider( - minimum=0, maximum=2.99, value=0, step=0.01, label="Overlap", info="Overlap of prediction segments." + minimum=0, + maximum=2.99, + value=0, + step=0.01, + label=loc.localize("inference-settings-overlap-slider-label"), + info=loc.localize("inference-settings-overlap-slider-info"), ) with gr.Row(): fmin_number = gr.Number( cfg.SIG_FMIN, - label="Minimum bandpass frequency in Hz.", - info="Note that frequency cut-offs should also be used during training in order to be effective here.", + label=loc.localize("inference-settings-fmin-number-label"), + info=loc.localize("inference-settings-fmin-number-info"), ) fmax_number = gr.Number( cfg.SIG_FMAX, - label="Maximum bandpass frequency in Hz.", - info="Note that frequency cut-offs should also be used during training in order to be effective here.", + label=loc.localize("inference-settings-fmax-number-label"), + info=loc.localize("inference-settings-fmax-number-info"), ) return confidence_slider, sensitivity_slider, overlap_slider, fmin_number, fmax_number @@ -764,26 +782,41 @@ def locale(): label_files = os.listdir(os.path.join(os.path.dirname(sys.argv[0]), ORIGINAL_TRANSLATED_LABELS_PATH)) options = ["EN"] + [label_file.rsplit("_", 1)[-1].split(".")[0].upper() for label_file in label_files] - return gr.Dropdown(options, value="EN", label="Locale", info="Locale for the translated species common names.") + return gr.Dropdown( + options, + value="EN", + label=loc.localize("analyze-locale-dropdown-label"), + info=loc.localize("analyze-locale-dropdown-info"), + ) def species_list_coordinates(): lat_number = gr.Slider( - minimum=-90, maximum=90, value=0, step=1, label="Latitude", info="Recording location latitude." + minimum=-90, + maximum=90, + value=0, + step=1, + label=loc.localize("species-list-coordinates-lat-number-label"), + info=loc.localize("species-list-coordinates-lat-number-info"), ) lon_number = gr.Slider( - minimum=-180, maximum=180, value=0, step=1, label="Longitude", info="Recording location longitude." + minimum=-180, + maximum=180, + value=0, + step=1, + label=loc.localize("species-list-coordinates-lon-number-label"), + info=loc.localize("species-list-coordinates-lon-number-info"), ) with gr.Row(): - yearlong_checkbox = gr.Checkbox(True, label="Year-round") + yearlong_checkbox = gr.Checkbox(True, label=loc.localize("species-list-coordinates-yearlong-checkbox-label")) week_number = gr.Slider( minimum=1, maximum=48, value=1, step=1, interactive=False, - label="Week", - info="Week of the year when the recording was made. Values in [1, 48] (4 weeks per month).", + label=loc.localize("species-list-coordinates-week-slider-label"), + info=loc.localize("species-list-coordinates-week-slider-info"), ) def onChange(use_yearlong): @@ -795,8 +828,8 @@ def onChange(use_yearlong): maximum=0.99, value=0.03, step=0.01, - label="Location filter threshold", - info="Minimum species occurrence frequency threshold for location filter.", + label=loc.localize("species-list-coordinates-threshold-slider-label"), + info=loc.localize("species-list-coordinates-threshold-slider-info"), ) return lat_number, lon_number, week_number, sf_thresh_number, yearlong_checkbox @@ -812,24 +845,28 @@ def species_lists(opened=True): A tuple with the created elements: (Radio (choice), File (custom species list), Slider (lat), Slider (lon), Slider (week), Slider (threshold), Checkbox (yearlong?), State (custom classifier)) """ - with gr.Accordion("Species selection", open=opened): + with gr.Accordion(loc.localize("species-list-accordion-label"), open=opened): with gr.Row(): species_list_radio = gr.Radio( [_CUSTOM_SPECIES, _PREDICT_SPECIES, _CUSTOM_CLASSIFIER, _ALL_SPECIES], value=_ALL_SPECIES, - label="Species list", - info="List of all possible species", + label=loc.localize("species-list-radio-label"), + info=loc.localize("species-list-radio-info"), elem_classes="d-block", ) with gr.Column(visible=False) as position_row: lat_number, lon_number, week_number, sf_thresh_number, yearlong_checkbox = species_list_coordinates() - species_file_input = gr.File(file_types=[".txt"], visible=False) + species_file_input = gr.File( + file_types=[".txt"], visible=False, label=loc.localize("species-list-custom-list-file-label") + ) empty_col = gr.Column() with gr.Column(visible=False) as custom_classifier_selector: - classifier_selection_button = gr.Button("Select classifier") + classifier_selection_button = gr.Button( + loc.localize("species-list-custom-classifier-selection-button-label") + ) classifier_file_input = gr.Files(file_types=[".tflite"], visible=False, interactive=False) selected_classifier_state = gr.State() @@ -895,14 +932,14 @@ def build_footer():
Model version: {cfg.MODEL_VERSION}
K. Lisa Yang Center for Conservation Bioacoustics
Chemnitz University of Technology
-
For docs and support visit:
birdnet.cornell.edu/analyzer
+
{loc.localize('footer-help')}:
birdnet.cornell.edu/analyzer
""" ) def build_single_analysis_tab(): - with gr.Tab("Single file"): - audio_input = gr.Audio(type="filepath", label="file", sources=["upload"]) + with gr.Tab(loc.localize("single-tab-title")): + audio_input = gr.Audio(type="filepath", label=loc.localize("single-audio-label"), sources=["upload"]) audio_path_state = gr.State() confidence_slider, sensitivity_slider, overlap_slider, fmin_number, fmax_number = sample_sliders(False) @@ -944,23 +981,36 @@ def get_audio_path(i): output_dataframe = gr.Dataframe( type="pandas", - headers=["Start (s)", "End (s)", "Scientific name", "Common name", "Confidence"], + headers=[ + loc.localize("single-tab-output-header-start"), + loc.localize("single-tab-output-header-end"), + loc.localize("single-tab-output-header-sci-name"), + loc.localize("single-tab-output-header-common-name"), + loc.localize("single-tab-output-header-confidence"), + ], elem_classes="mh-200", ) - single_file_analyze = gr.Button("Analyze") + single_file_analyze = gr.Button(loc.localize("analyze-start-button-label")) single_file_analyze.click(runSingleFileAnalysis, inputs=inputs, outputs=output_dataframe) def build_multi_analysis_tab(): - with gr.Tab("Multiple files"): + with gr.Tab(loc.localize("multi-tab-title")): input_directory_state = gr.State() output_directory_predict_state = gr.State() with gr.Row(): with gr.Column(): - select_directory_btn = gr.Button("Select directory (recursive)") - directory_input = gr.Matrix(interactive=False, elem_classes="mh-200", headers=["Subpath", "Length"]) + select_directory_btn = gr.Button(loc.localize("multi-tab-input-selection-button-label")) + directory_input = gr.Matrix( + interactive=False, + elem_classes="mh-200", + headers=[ + loc.localize("multi-tab-samples-dataframe-column-subpath-header"), + loc.localize("multi-tab-samples-dataframe-column-duration-header"), + ], + ) def select_directory_on_empty(): res = select_directory() @@ -971,18 +1021,18 @@ def select_directory_on_empty(): return res - return [res[0], [["No files found"]]] + return [res[0], [[loc.localize("multi-tab-samples-dataframe-no-files-found")]]] select_directory_btn.click( select_directory_on_empty, outputs=[input_directory_state, directory_input], show_progress=False ) with gr.Column(): - select_out_directory_btn = gr.Button("Select output directory.") + select_out_directory_btn = gr.Button(loc.localize("multi-tab-output-selection-button-label")) selected_out_textbox = gr.Textbox( - label="Output directory", + label=loc.localize("multi-tab-output-textbox-label"), interactive=False, - placeholder="If not selected, the input directory will be used.", + placeholder=loc.localize("multi-tab-output-textbox-placeholder"), ) def select_directory_wrapper(): @@ -1007,28 +1057,28 @@ def select_directory_wrapper(): selected_classifier_state, ) = species_lists() - with gr.Accordion("Output type", open=True): + with gr.Accordion(loc.localize("multi-tab-output-accordion-label"), open=True): output_type_radio = gr.Radio( list(OUTPUT_TYPE_MAP.keys()), value="Raven selection table", - label="Result type", - info="Specifies output format.", + label=loc.localize("multi-tab-output-radio-label"), + info=loc.localize("multi-tab-output-radio-info"), ) with gr.Row(): with gr.Column(): combine_tables_checkbox = gr.Checkbox( False, - label="Combine selection tables", - info="If checked, all selection tables are combined into one.", + label=loc.localize("multi-tab-output-combine-tables-checkbox-label"), + info=loc.localize("multi-tab-output-combine-tables-checkbox-info"), ) with gr.Column(visible=False) as output_filename_col: output_filename = gr.Textbox( "BirdNET_Results_Selection_Table.txt", - label="Output filename", - info="Name of the combined selection table.", + label=loc.localize("multi-tab-output-combined-table-name-textbox-label"), + info=loc.localize("multi-tab-output-combined-table-name-textbox-info"), ) def on_output_type_change(value, check): @@ -1053,20 +1103,36 @@ def on_combine_tables_change(value): with gr.Row(): skip_existing_checkbox = gr.Checkbox( - False, label="Skip existing results", info="Skip files that already have a result." + False, + label=loc.localize("multi-tab-skip-existing-checkbox-label"), + info=loc.localize("multi-tab-skip-existing-checkbox-info"), ) with gr.Row(): batch_size_number = gr.Number( - precision=1, label="Batch size", value=1, info="Number of samples to process at the same time." + precision=1, + label=loc.localize("multi-tab-batchsize-number-label"), + value=1, + info=loc.localize("multi-tab-batchsize-number-info"), + ) + threads_number = gr.Number( + precision=1, + label=loc.localize("multi-tab-threads-number-label"), + value=4, + info=loc.localize("multi-tab-threads-number-info"), ) - threads_number = gr.Number(precision=1, label="Threads", value=4, info="Number of CPU threads.") locale_radio = locale() - start_batch_analysis_btn = gr.Button("Analyze") + start_batch_analysis_btn = gr.Button(loc.localize("analyze-start-button-label")) - result_grid = gr.Matrix(headers=["File", "Execution"], elem_classes="mh-200") + result_grid = gr.Matrix( + headers=[ + loc.localize("multi-tab-result-dataframe-column-file-header"), + loc.localize("multi-tab-result-dataframe-column-execution-header"), + ], + elem_classes="mh-200", + ) inputs = [ output_directory_predict_state, @@ -1096,32 +1162,36 @@ def on_combine_tables_change(value): start_batch_analysis_btn.click(runBatchAnalysis, inputs=inputs, outputs=result_grid) def build_train_tab(): - with gr.Tab("Train"): + with gr.Tab(loc.localize("training-tab-title")): input_directory_state = gr.State() output_directory_state = gr.State() with gr.Row(): with gr.Column(): - select_directory_btn = gr.Button("Training data") - directory_input = gr.List(headers=["Classes"], interactive=False, elem_classes="mh-200") + select_directory_btn = gr.Button(loc.localize("training-tab-input-selection-button-label")) + directory_input = gr.List( + headers=[loc.localize("training-tab-classes-dataframe-column-classes-header")], + interactive=False, + elem_classes="mh-200", + ) select_directory_btn.click( select_subdirectories, outputs=[input_directory_state, directory_input], show_progress=False ) with gr.Column(): - select_directory_btn = gr.Button("Classifier output") + select_directory_btn = gr.Button(loc.localize("training-tab-select-output-button-label")) with gr.Column(): classifier_name = gr.Textbox( "CustomClassifier", visible=False, - info="The name of the new classifier.", + info=loc.localize("training-tab-classifier-textbox-info"), ) output_format = gr.Radio( - ["tflite", "raven", "both"], + ["tflite", "raven", (loc.localize("training-tab-output-format-both"), "both")], value="tflite", - label="Model output format", - info="Format for the trained classifier.", + label=loc.localize("training-tab-output-format-radio-label"), + info=loc.localize("training-tab-output-format-radio-info"), visible=False, ) @@ -1144,50 +1214,73 @@ def select_directory_and_update_tb(): ) autotune_cb = gr.Checkbox( - False, label="Use autotune", info="Searches best params, this will take more time." + False, + label=loc.localize("training-tab-autotune-checkbox-label"), + info=loc.localize("training-tab-autotune-checkbox-info"), ) with gr.Column(visible=False) as autotune_params: with gr.Row(): autotune_trials = gr.Number( - 50, label="Trials", info="Number of training runs for hyperparameter tuning." + 50, + label=loc.localize("training-tab-autotune-trials-number-label"), + info=loc.localize("training-tab-autotune-trials-number-info"), ) autotune_executions_per_trials = gr.Number( 1, - label="Executions per trial", - info="The number of times a training run with a set of hyperparameters is repeated during hyperparameter tuning (this reduces the variance).", + label=loc.localize("training-tab-autotune-executions-number-label"), + info=loc.localize("training-tab-autotune-executions-number-info"), ) with gr.Column() as custom_params: with gr.Row(): - epoch_number = gr.Number(50, label="Epochs", info="Number of training epochs.") - batch_size_number = gr.Number(32, label="Batch size", info="Batch size.") - learning_rate_number = gr.Number(0.001, label="Learning rate", info="Learning rate.") + epoch_number = gr.Number( + 50, + label=loc.localize("training-tab-epochs-number-label"), + info=loc.localize("training-tab-epochs-number-info"), + ) + batch_size_number = gr.Number( + 32, + label=loc.localize("training-tab-batchsize-number-label"), + info=loc.localize("training-tab-batchsize-number-info"), + ) + learning_rate_number = gr.Number( + 0.001, + label=loc.localize("training-tab-learningrate-number-label"), + info=loc.localize("training-tab-learningrate-number-info"), + ) with gr.Row(): upsampling_mode = gr.Radio( - ["repeat", "mean", "smote"], + [ + (loc.localize("training-tab-upsampling-radio-option-repeat"), "repeat"), + (loc.localize("training-tab-upsampling-radio-option-mean"), "mean"), + ("SMOTE", "smote"), + ], value="repeat", - label="Upsampling mode", - info="Balance data through upsampling.", + label=loc.localize("training-tab-upsampling-radio-label"), + info=loc.localize("training-tab-upsampling-radio-info"), ) upsampling_ratio = gr.Slider( 0.0, 1.0, 0.0, step=0.01, - label="Upsampling ratio", - info="Balance train data and upsample minority classes.", + label=loc.localize("training-tab-upsampling-ratio-slider-label"), + info=loc.localize("training-tab-upsampling-ratio-slider-info"), ) with gr.Row(): hidden_units_number = gr.Number( 0, - label="Hidden units", - info="Number of hidden units. If set to >0, a two-layer classifier is used.", + label=loc.localize("training-tab-hiddenunits-number-label"), + info=loc.localize("training-tab-hiddenunits-number-info"), ) use_mixup = gr.Checkbox( - False, label="Use mixup", info="Whether to use mixup for training.", show_label=True + False, + label=loc.localize("training-tab-use-mixup-checkbox-label"), + info=loc.localize("training-tab-use-mixup-checkbox-info"), + show_label=True, ) def on_autotune_change(value): @@ -1201,25 +1294,32 @@ def on_autotune_change(value): fmin_number = gr.Number( cfg.SIG_FMIN, - label="Minimum bandpass frequency in Hz.", - info="Make sure that you apply the same frequency cut-off for inference.", + label=loc.localize("inference-settings-fmin-number-label"), + info=loc.localize("inference-settings-fmin-number-info"), ) fmax_number = gr.Number( cfg.SIG_FMAX, - label="Maximum bandpass frequency in Hz.", - info="Make sure that you apply the same frequency cut-off for inference.", + label=loc.localize("inference-settings-fmax-number-label"), + info=loc.localize("inference-settings-fmax-number-info"), ) with gr.Row(): crop_mode = gr.Radio( - ["center", "first", "segments"], + [ + (loc.localize("training-tab-crop-mode-radio-option-center"), "center"), + (loc.localize("training-tab-crop-mode-radio-option-first"), "first"), + (loc.localize("training-tab-crop-mode-radio-option-segments"), "segments"), + ], value="center", - label="Crop mode", - info="Crop mode for training data.", + label=loc.localize("training-tab-crop-mode-radio-label"), + info=loc.localize("training-tab-crop-mode-radio-info"), ) crop_overlap = gr.Number( - 0.0, label="Crop overlap", info="Overlap of training data segments", visible=False + 0.0, + label=loc.localize("training-tab-crop-overlap-number-label"), + info=loc.localize("training-tab-crop-overlap-number-info"), + visible=False, ) def on_crop_select(new_crop_mode): @@ -1228,25 +1328,37 @@ def on_crop_select(new_crop_mode): crop_mode.change(on_crop_select, inputs=crop_mode, outputs=crop_overlap) model_save_mode = gr.Radio( - ["replace", "append"], + [ + (loc.localize("training-tab-model-save-mode-radio-option-replace"), "replace"), + (loc.localize("training-tab-model-save-mode-radio-option-append"), "append"), + ], value="replace", - label="Model save mode", - info="'replace' will overwrite the original classification layer and 'append' will combine the original classification layer with the new one.", + label=loc.localize("training-tab-model-save-mode-radio-label"), + info=loc.localize("training-tab-model-save-mode-radio-info"), ) with gr.Row(): cache_file_state = gr.State() cache_mode = gr.Radio( - ["none", "load", "save"], value="none", label="Cache mode", info="Cache mode for training files." + [ + (loc.localize("training-tab-cache-mode-radio-option-none"), "none"), + (loc.localize("training-tab-cache-mode-radio-option-load"), "load"), + (loc.localize("training-tab-cache-mode-radio-option-save"), "save"), + ], + value="none", + label=loc.localize("training-tab-cache-mode-radio-label"), + info=loc.localize("training-tab-cache-mode-radio-info"), ) with gr.Column(visible=False) as new_cache_file_row: - select_cache_file_directory_btn = gr.Button("Cache file directory") + select_cache_file_directory_btn = gr.Button( + loc.localize("training-tab-cache-select-directory-button-label") + ) with gr.Column(): cache_file_name = gr.Textbox( "train_cache.npz", visible=False, - info="The name of the cache_file.", + info=loc.localize("training-tab-cache-file-name-textbox-info"), ) def select_directory_and_update(): @@ -1267,7 +1379,7 @@ def select_directory_and_update(): ) with gr.Column(visible=False) as load_cache_file_row: - selected_cache_file_btn = gr.Button("Select cache file") + selected_cache_file_btn = gr.Button(loc.localize("training-tab-cache-select-file-button-label")) cache_file_input = gr.File(file_types=[".npz"], visible=False, interactive=False) def on_cache_file_selection_click(): @@ -1293,7 +1405,7 @@ def on_cache_mode_change(value): train_history_plot = gr.Plot() - start_training_button = gr.Button("Start training") + start_training_button = gr.Button(loc.localize("training-tab-start-training-button-label")) start_training_button.click( start_training, @@ -1325,7 +1437,7 @@ def on_cache_mode_change(value): ) def build_segments_tab(): - with gr.Tab("Segments"): + with gr.Tab(loc.localize("segments-tab-title")): audio_directory_state = gr.State() result_directory_state = gr.State() output_directory_state = gr.State() @@ -1334,7 +1446,9 @@ def select_directory_to_state_and_tb(): return (select_directory(collect_files=False),) * 2 with gr.Row(): - select_audio_directory_btn = gr.Button("Select audio directory (recursive)") + select_audio_directory_btn = gr.Button( + loc.localize("segments-tab-select-audio-input-directory-button-label") + ) selected_audio_directory_tb = gr.Textbox(show_label=False, interactive=False) select_audio_directory_btn.click( select_directory_to_state_and_tb, @@ -1343,9 +1457,13 @@ def select_directory_to_state_and_tb(): ) with gr.Row(): - select_result_directory_btn = gr.Button("Select result directory") + select_result_directory_btn = gr.Button( + loc.localize("segments-tab-select-results-input-directory-button-label") + ) selected_result_directory_tb = gr.Textbox( - show_label=False, interactive=False, placeholder="Same as audio directory if not selected" + show_label=False, + interactive=False, + placeholder=loc.localize("segments-tab-results-input-textbox-placeholder"), ) select_result_directory_btn.click( select_directory_to_state_and_tb, @@ -1354,9 +1472,11 @@ def select_directory_to_state_and_tb(): ) with gr.Row(): - select_output_directory_btn = gr.Button("Select output directory") + select_output_directory_btn = gr.Button(loc.localize("segments-tab-output-selection-button-label")) selected_output_directory_tb = gr.Textbox( - show_label=False, interactive=False, placeholder="Same as audio directory if not selected" + show_label=False, + interactive=False, + placeholder=loc.localize("segments-tab-output-selection-textbox-placeholder"), ) select_output_directory_btn.click( select_directory_to_state_and_tb, @@ -1365,17 +1485,37 @@ def select_directory_to_state_and_tb(): ) min_conf_slider = gr.Slider( - minimum=0.1, maximum=0.99, step=0.01, label="Minimum confidence", info="Minimum confidence threshold." + minimum=0.1, + maximum=0.99, + step=0.01, + label=loc.localize("segments-tab-min-confidence-slider-label"), + info=loc.localize("segments-tab-min-confidence-slider-info"), ) num_seq_number = gr.Number( - 100, label="Max number of segments", info="Maximum number of randomly extracted segments per species." + 100, + label=loc.localize("segments-tab-max-seq-number-label"), + info=loc.localize("segments-tab-max-seq-number-info"), + ) + seq_length_number = gr.Number( + 3.0, + label=loc.localize("segments-tab-seq-length-number-label"), + info=loc.localize("segments-tab-seq-length-number-info"), + ) + threads_number = gr.Number( + 4, + label=loc.localize("segments-tab-threads-number-label"), + info=loc.localize("segments-tab-threads-number-info"), ) - seq_length_number = gr.Number(3.0, label="Sequence length", info="Length of extracted segments in seconds.") - threads_number = gr.Number(4, label="Threads", info="Number of CPU threads.") - extract_segments_btn = gr.Button("Extract segments") + extract_segments_btn = gr.Button(loc.localize("segments-tab-extract-button-label")) - result_grid = gr.Matrix(headers=["File", "Execution"], elem_classes="mh-200") + result_grid = gr.Matrix( + headers=[ + loc.localize("segments-tab-result-dataframe-column-file-header"), + loc.localize("segments-tab-result-dataframe-column-execution-header"), + ], + elem_classes="mh-200", + ) extract_segments_btn.click( extract_segments, @@ -1392,14 +1532,13 @@ def select_directory_to_state_and_tb(): ) def build_species_tab(): - with gr.Tab("Species"): + with gr.Tab(loc.localize("species-tab-title")): output_directory_state = gr.State() - select_directory_btn = gr.Button("Output directory") - + select_directory_btn = gr.Button(loc.localize("species-tab-select-output-directory-button-label")) classifier_name = gr.Textbox( "species_list.txt", visible=False, - info="Name of the file, if not specified 'species_list.txt' will be used.", + info=loc.localize("species-tab-filename-textbox-label"), ) def select_directory_and_update_tb(): @@ -1422,13 +1561,16 @@ def select_directory_and_update_tb(): lat_number, lon_number, week_number, sf_thresh_number, yearlong_checkbox = species_list_coordinates() sortby = gr.Radio( - ["freq", "alpha"], + [ + (loc.localize("species-tab-sort-radio-option-frequency"), "freq"), + (loc.localize("species-tab-sort-radio-option-alphabetically"), "alpha"), + ], value="freq", - label="Sort by", - info="Sort species by occurrence frequency or alphabetically.", + label=loc.localize("species-tab-sort-radio-label"), + info=loc.localize("species-tab-sort-radio-info"), ) - start_btn = gr.Button("Generate species list") + start_btn = gr.Button(loc.localize("species-tab-start-button-label")) start_btn.click( run_species_list, inputs=[ @@ -1443,6 +1585,31 @@ def select_directory_and_update_tb(): ], ) + def build_settings(): + with gr.Tab(loc.localize("settings-tab-title")): + with gr.Row(): + options = [ + lang.rsplit(".", 1)[0] + for lang in os.listdir(os.path.join(os.path.dirname(sys.argv[0]), "lang")) + if lang.endswith(".json") + ] + languages_dropdown = gr.Dropdown( + options, + value=loc.TARGET_LANGUAGE, + label=loc.localize("settings-tab-language-dropdown-label"), + info=loc.localize("settings-tab-language-dropdown-info"), + interactive=True, + ) + + def on_language_change(value): + if value and value != loc.TARGET_LANGUAGE: + loc.set_language(value) + return gr.Button(visible=True) + + return gr.Button(visible=False) + + languages_dropdown.input(on_language_change, inputs=languages_dropdown, show_progress=False) + with gr.Blocks( css="gui/gui.css", js="gui/gui.js", @@ -1455,6 +1622,7 @@ def select_directory_and_update_tb(): build_train_tab() build_segments_tab() build_species_tab() + build_settings() build_footer() url = demo.queue(api_open=False).launch(prevent_thread_lock=True, quiet=True)[1] diff --git a/gui/gui.css b/gui/gui.css index 75551657..62354269 100644 --- a/gui/gui.css +++ b/gui/gui.css @@ -22,4 +22,4 @@ footer { border-radius: 50%; width: 1rem; height: 1rem; -} \ No newline at end of file +} diff --git a/gui/gui.js b/gui/gui.js index ea0f5229..6d380fc0 100644 --- a/gui/gui.js +++ b/gui/gui.js @@ -1,40 +1,54 @@ -function checkForNewerVersion() { - function sendGetRequest(url) { - return new Promise((resolve, reject) => { - const xhr = new XMLHttpRequest(); - xhr.open("GET", url); - xhr.onload = () => { - if (xhr.status === 200) { - resolve(xhr.responseText); - } else { - reject(new Error(`Request failed with status ${xhr.status}`)); - } - }; - xhr.onerror = () => { - reject(new Error("Request failed")); - }; - xhr.send(); - }); - } +function init() { + function checkForNewerVersion() { + console.log("Checking for newer version..."); + + function sendGetRequest(url) { + return new Promise((resolve, reject) => { + const xhr = new XMLHttpRequest(); + xhr.open("GET", url); + xhr.onload = () => { + if (xhr.status === 200) { + resolve(xhr.responseText); + } else { + reject(new Error(`Request failed with status ${xhr.status}`)); + } + }; + xhr.onerror = () => { + reject(new Error("Request failed")); + }; + xhr.send(); + }); + } + + const apiUrl = "https://api.github.com/repos/kahst/BirdNET-Analyzer/releases/latest"; - const apiUrl = "https://api.github.com/repos/kahst/BirdNET-Analyzer/releases/latest"; + sendGetRequest(apiUrl) + .then(response => { + const current_version = "v" + document.getElementById("current-version").textContent; + const response_object = JSON.parse(response); + const latest_version = response_object.tag_name; - sendGetRequest(apiUrl) - .then(response => { - const current_version = "v" + document.getElementById("current-version").textContent; - const response_object = JSON.parse(response); - const latest_version = response_object.tag_name; + if (current_version !== latest_version) { + const updateNotification = document.getElementById("update-available"); - if (current_version !== latest_version) { - const updateNotification = document.getElementById("update-available"); + updateNotification.style.display = "block"; + const linkElement = updateNotification.getElementsByTagName("a")[0] + linkElement.href = response_object.html_url; + linkElement.target = "_blank"; + } + }) + .catch(error => { + console.error(error); + }); + } + + function overwriteStyles() { + console.log("Overwriting styles..."); + const styles = document.createElement("style"); + styles.innerHTML = "@media (width <= 1024px) { .app {max-width: initial !important;}}"; + document.head.appendChild(styles); + } - updateNotification.style.display = "block"; - const linkElement = updateNotification.getElementsByTagName("a")[0] - linkElement.href = response_object.html_url; - linkElement.target = "_blank"; - } - }) - .catch(error => { - console.error(error); - }); + checkForNewerVersion(); + overwriteStyles(); } \ No newline at end of file diff --git a/lang/de.json b/lang/de.json new file mode 100644 index 00000000..2ae24bda --- /dev/null +++ b/lang/de.json @@ -0,0 +1,166 @@ +{ + "single-tab-title": "Einzelanalyse", + "single-audio-label": "Datei", + "single-tab-output-header-start": "Start (s)", + "single-tab-output-header-end": "Ende (s)", + "single-tab-output-header-sci-name": "Wissenschaftlicher Name", + "single-tab-output-header-common-name": "Trivialname", + "single-tab-output-header-confidence": "Konfidenz", + "inference-settings-accordion-label": "Inferenzeinstellungen", + "inference-settings-confidence-slider-label": "Konfidenz-Schwellenwert", + "inference-settings-confidence-slider-info": "Passen Sie den Schwellenwert an, um Ergebnisse mit einem Wert unter diesem Niveau zu ignorieren.", + "inference-settings-sensitivity-slider-label": "Sensitivität", + "inference-settings-sensitivity-slider-info": "Passen Sie die Verteilung der Vorhersagewerte an. Höhere Werte führen zu höheren Scores.", + "inference-settings-overlap-slider-label": "Überlappung (s)", + "inference-settings-overlap-slider-info": "BirdNET verwendet 3s-Segmente. Bestimmt die Überlappung mit dem vorherigen Segment.", + "inference-settings-fmin-number-label": "Minimale Frequenz des Bandpassfilters (Hz)", + "inference-settings-fmin-number-info": "Beachten Sie, dass Frequenzgrenzen auch während des Trainings verwendet werden sollten, um hier wirksam zu sein.", + "inference-settings-fmax-number-label": "Maximale Frequenz des Bandpassfilters (Hz)", + "inference-settings-fmax-number-info": "Beachten Sie, dass Frequenzgrenzen auch während des Trainings verwendet werden sollten, um hier wirksam zu sein.", + "species-list-accordion-label": "Artenauswahl", + "species-list-radio-label": "Artenliste", + "species-list-radio-info": "Filtern Sie Arten, die in der Ausgabe enthalten sind.", + "species-list-radio-option-custom-list": "Benutzerdefinierte Artenliste", + "species-list-radio-option-predict-list": "Arten nach Standort", + "species-list-radio-option-custom-classifier": "Benutzerdefinierter Klassifikator", + "species-list-radio-option-all": "Alle Arten", + "species-list-custom-list-file-label": "Datei", + "species-list-coordinates-lat-number-label": "Breitengrad", + "species-list-coordinates-lat-number-info": "Breitengrad des Aufnahmestandorts.", + "species-list-coordinates-lon-number-label": "Längengrad", + "species-list-coordinates-lon-number-info": "Längengrad des Aufnahmestandorts.", + "species-list-coordinates-yearlong-checkbox-label": "Ganzjährig", + "species-list-coordinates-week-slider-label": "Woche", + "species-list-coordinates-week-slider-info": "Geben Sie die Woche des Jahres an, in der die Aufnahme gemacht wurde. Jeder Monat ist dabei in vier Wochen unterteilt wird. Wählen Sie einen Wert von 1 bis 48.", + "species-list-coordinates-threshold-slider-label": "Schwellenwert für Standortfilter", + "species-list-coordinates-threshold-slider-info": "Mindestwahrscheinlichkeit für das Auftreten einer Art, um eingeschlossen zu werden.", + "species-list-custom-classifier-selection-button-label": "Klassifikator auswählen", + "analyze-locale-dropdown-label": "Lokalisierung", + "analyze-locale-dropdown-info": "Lokalisierung für die übersetzten Trivialnamen der Arten in der Ausgabe", + "analyze-start-button-label": "Analysieren", + "multi-tab-title": "Batch-Analyse", + "multi-tab-input-selection-button-label": "Eingabeverzeichnis auswählen (rekursiv)", + "multi-tab-samples-dataframe-column-subpath-header": "Unterpfad", + "multi-tab-samples-dataframe-column-duration-header": "Länge", + "multi-tab-samples-dataframe-no-files-found": "Keine Dateien gefunden", + "multi-tab-output-selection-button-label": "Ausgabeverzeichnis auswählen", + "multi-tab-output-textbox-label": "Ausgabeverzeichnis", + "multi-tab-output-textbox-placeholder": "Wenn nicht ausgewählt, wird das Eingabeverzeichnis verwendet.", + "multi-tab-output-accordion-label": "Ausgabeeinstellungen", + "multi-tab-output-radio-label": "Ergebnistyp", + "multi-tab-output-radio-info": "Geben Sie das Ausgabeformat der Klassifikationen an.", + "multi-tab-output-combine-tables-checkbox-label": "Auswahltabellen kombinieren", + "multi-tab-output-combine-tables-checkbox-info": "Aktivieren Sie diese Option, um alle Auswahltabellen in eine einzige Tabelle zusammenzuführen.", + "multi-tab-output-combined-table-name-textbox-label": "Dateiname der kombinierten Tabelle", + "multi-tab-output-combined-table-name-textbox-info": "Name der kombinierten Auswahltabelle.", + "multi-tab-skip-existing-checkbox-label": "Existierende Ergebnisse überspringen", + "multi-tab-skip-existing-checkbox-info": "Dateien überspringen, die bereits ein Ergebnis haben.", + "multi-tab-batchsize-number-label": "Batch-Größe", + "multi-tab-batchsize-number-info": "Anzahl der gleichzeitig zu verarbeitenden Proben.", + "multi-tab-threads-number-label": "Threads", + "multi-tab-threads-number-info": "Anzahl der CPU-Threads.", + "multi-tab-result-dataframe-column-file-header": "Datei", + "multi-tab-result-dataframe-column-execution-header": "Ausführung", + "training-tab-title": "Trainieren", + "training-tab-input-selection-button-label": "Trainingsdaten auswählen", + "training-tab-classes-dataframe-column-classes-header": "Klassen", + "training-tab-select-output-button-label": "Ausgabeverzeichnis für Klassifikator auswählen", + "training-tab-classifier-textbox-info": "Der Name des neuen Klassifikators.", + "training-tab-output-format-radio-label": "Modellausgabeformat", + "training-tab-output-format-radio-info": "Format des trainierten Klassifikators.", + "training-tab-output-format-both": "beide", + "training-tab-autotune-checkbox-label": "Autotune verwenden", + "training-tab-autotune-checkbox-info": "Sucht nach den besten Hyperparametern, benötigt jedoch mehr Zeit.", + "training-tab-autotune-trials-number-label": "Versuche", + "training-tab-autotune-trials-number-info": "Anzahl der Trainingsläufe für die Hyperparametersuche.", + "training-tab-autotune-executions-number-label": "Ausführungen pro Versuch", + "training-tab-autotune-executions-number-info": "Die Anzahl der Wiederholungen für den Trainingslauf mit einem Satz von Hyperparametern (dies reduziert die Varianz).", + "training-tab-epochs-number-label": "Epochen", + "training-tab-epochs-number-info": "Anzahl der Trainingsepochen.", + "training-tab-batchsize-number-label": "Batch-Größe", + "training-tab-batchsize-number-info": "Anzahl der Samples, die in einem Batch verarbeitet werden.", + "training-tab-learningrate-number-label": "Lernrate", + "training-tab-learningrate-number-info": "Lernrate für den Optimizer.", + "training-tab-upsampling-radio-label": "Upsampling-Modus", + "training-tab-upsampling-radio-info": "Balancieren Sie die Trainingsdaten durch Upsampling von Minderheitenklassen aus.", + "training-tab-upsampling-radio-option-repeat": "wiederholen", + "training-tab-upsampling-radio-option-mean": "Durchschnitt", + "training-tab-upsampling-ratio-slider-label": "Upsampling-Verhältnis", + "training-tab-upsampling-ratio-slider-info": "Das Mindestverhältnis für eine Minderheitenklasse im Vergleich zur Mehrheitsklasse nach dem Upsampling.", + "training-tab-hiddenunits-number-label": "Versteckte Einheiten", + "training-tab-hiddenunits-number-info": "Anzahl der versteckten Einheiten. Wenn auf >0 gesetzt, wird ein Klassifikator mit zwei Schichten verwendet.", + "training-tab-use-mixup-checkbox-label": "Mixup verwenden", + "training-tab-use-mixup-checkbox-info": "Mixup ist eine Technik zur Datenvermehrung, die neue Daten durch Mischen zweier Beispiele und ihrer Label erzeugt.", + "training-tab-crop-mode-radio-label": "Zuschneidemodus", + "training-tab-crop-mode-radio-info": "Anpassen, wie Audiodaten zugeschnitten werden, die länger als der Modellinput sind.", + "training-tab-crop-mode-radio-option-center": "Mitte", + "training-tab-crop-mode-radio-option-first": "Erster", + "training-tab-crop-mode-radio-option-segments": "Segmente", + "training-tab-crop-overlap-number-label": "Überlappung des Zuschnittsegments (s)", + "training-tab-crop-overlap-number-info": "Stellen Sie die Überlappung der Trainingssamples ein.", + "training-tab-model-save-mode-radio-label": "Modellspeichermodus", + "training-tab-model-save-mode-radio-info": "'Ersetzen' wird die ursprüngliche Klassifikationsschicht überschreiben, so dass nur die trainierten Klassen bleiben, 'Anhängen' wird die ursprüngliche Klassifikationsschicht mit der neuen kombinieren.", + "training-tab-model-save-mode-radio-option-replace": "ersetzen", + "training-tab-model-save-mode-radio-option-append": "anhängen", + "training-tab-cache-mode-radio-label": "Cache-Modus für Trainingsdaten", + "training-tab-cache-mode-radio-info": "Passen Sie an, wie Trainingsdaten zwischengespeichert werden. Wählen Sie 'keine' für kein Caching, 'laden' zum Laden aus einer Datei und 'speichern' zum Speichern der komprimierten Trainingsdaten.", + "training-tab-cache-mode-radio-option-none": "keine", + "training-tab-cache-mode-radio-option-load": "laden", + "training-tab-cache-mode-radio-option-save": "speichern", + "training-tab-cache-select-directory-button-label": "Wählen Sie das Verzeichnis der Cache-Datei", + "training-tab-cache-file-name-textbox-info": "Der Name der Cache-Datei.", + "training-tab-cache-select-file-button-label": "Wählen Sie den Speicherort der Cache-Datei", + "training-tab-start-training-button-label": "Training starten", + "training-tab-early-stoppage-msg": "Frühzeitig gestoppt - Validierungsmetrik verbessert sich nicht.", + "segments-tab-title": "Segmente", + "segments-tab-select-audio-input-directory-button-label": "Wählen Sie das Audioverzeichnis (rekursiv)", + "segments-tab-select-results-input-directory-button-label": "Wählen Sie das Ergebnisverzeichnis", + "segments-tab-results-input-textbox-placeholder": "Gleich wie das Audioverzeichnis, wenn nicht ausgewählt", + "segments-tab-output-selection-button-label": "Wählen Sie das Ausgabeverzeichnis", + "segments-tab-output-selection-textbox-placeholder": "Gleich wie das Audioverzeichnis, wenn nicht ausgewählt", + "segments-tab-min-confidence-slider-label": "Konfidenz-Schwellenwert", + "segments-tab-min-confidence-slider-info": "Wählt nur Segmente mit einem Wert über diesem Schwellenwert aus.", + "segments-tab-max-seq-number-label": "Maximale Anzahl von Segmenten", + "segments-tab-max-seq-number-info": "Maximale Anzahl von zufällig extrahierten Segmenten pro Art.", + "segments-tab-seq-length-number-label": "Sequenzlänge (s)", + "segments-tab-seq-length-number-info": "Länge der extrahierten Segmente in Sekunden.", + "segments-tab-threads-number-label": "Threads", + "segments-tab-threads-number-info": "Anzahl der CPU-Threads.", + "segments-tab-extract-button-label": "Segmente extrahieren", + "segments-tab-result-dataframe-column-file-header": "Datei", + "segments-tab-result-dataframe-column-execution-header": "Ausführung", + "species-tab-title": "Arten", + "species-tab-select-output-directory-button-label": "Wählen Sie das Ausgabeverzeichnis", + "species-tab-filename-textbox-label": "Name der Datei, wenn nicht angegeben, wird 'species_list.txt' verwendet.", + "species-tab-sort-radio-label": "Sortieren nach", + "species-tab-sort-radio-info": "Sortiert die Arten nach Häufigkeit des Vorkommens oder alphabetisch.", + "species-tab-sort-radio-option-frequency": "Häufigkeit", + "species-tab-sort-radio-option-alphabetically": "alphabetisch", + "species-tab-finish-info": "Artenliste gespeichert unter", + "species-tab-start-button-label": "Artenliste erstellen", + "settings-tab-title": "Einstellungen", + "settings-tab-language-dropdown-label": "GUI Sprache", + "settings-tab-language-dropdown-info": "Wird erst nach Neustart der Anwendung aktiv.", + "validation-no-file-selected": "Bitte wählen Sie eine Datei aus.", + "validation-no-directory-selected": "Bitte wählen Sie ein Verzeichnis.", + "validation-no-species-list-selected": "Bitte wählen Sie eine Artenliste.", + "validation-no-custom-classifier-selected": "Kein benutzerdefinierter Klassifikator ausgewählt.", + "validation-no-audio-files-found": "Keine Audiodateien gefunden.", + "validation-no-training-data-selected": "Bitte wählen Sie Ihre Trainingsdaten.", + "validation-no-directory-for-classifier-selected": "Bitte wählen Sie ein Verzeichnis für den Klassifikator.", + "validation-no-valid-classifier-name": "Bitte geben Sie einen gültigen Namen für den Klassifikator an.", + "validation-no-valid-epoch-number": "Bitte geben Sie eine gültige Anzahl von Epochen an.", + "validation-no-valid-batch-size": "Bitte geben Sie eine gültige Batch-Größe an.", + "validation-no-valid-learning-rate": "Bitte geben Sie eine gültige Lernrate an.", + "validation-no-valid-frequency": "Bitte geben Sie eine gültige Frequenz an", + "validation-no-audio-directory-selected": "Kein Audioverzeichnis ausgewählt", + "progress-preparing": "Vorbereiten", + "progress-starting": "Starten", + "progress-build-classifier": "Daten laden & Klassifikator erstellen", + "progress-loading-data": "Daten für", + "progress-saving": "Gespeichert unter", + "progress-training": "Modell trainieren", + "progress-autotune": "Autotune läuft", + "progress-search": "Dateien suchen", + "footer-help": "Dokumentation und Support finden Sie unter" +} diff --git a/lang/en.json b/lang/en.json new file mode 100644 index 00000000..bf022235 --- /dev/null +++ b/lang/en.json @@ -0,0 +1,166 @@ +{ + "single-tab-title": "Single analysis", + "single-audio-label": "file", + "single-tab-output-header-start": "Start (s)", + "single-tab-output-header-end": "End (s)", + "single-tab-output-header-sci-name": "Scientific name", + "single-tab-output-header-common-name": "Common name", + "single-tab-output-header-confidence": "Confidence", + "inference-settings-accordion-label": "Inference settings", + "inference-settings-confidence-slider-label": "Minimum confidence", + "inference-settings-confidence-slider-info": "Adjust the threshold to ignore results with confidence below this level.", + "inference-settings-sensitivity-slider-label": "Sensitivity", + "inference-settings-sensitivity-slider-info": "Adjust the distribution of prediction scores. Higher values result in higher scores.", + "inference-settings-overlap-slider-label": "Overlap (s)", + "inference-settings-overlap-slider-info": "BirdNET uses 3s segments. Determines the overlap with previous segment.", + "inference-settings-fmin-number-label": "Minimum bandpass frequency (Hz)", + "inference-settings-fmin-number-info": "Note that frequency cut-offs should also be used during training in order to be effective here.", + "inference-settings-fmax-number-label": "Maximum bandpass frequency (Hz)", + "inference-settings-fmax-number-info": "Note that frequency cut-offs should also be used during training in order to be effective here.", + "species-list-accordion-label": "Species selection", + "species-list-radio-label": "Species list", + "species-list-radio-info": "Filter species that are included in output.", + "species-list-radio-option-custom-list": "Custom species list", + "species-list-radio-option-predict-list": "Species by location", + "species-list-radio-option-custom-classifier": "Custom classifier", + "species-list-radio-option-all": "All species", + "species-list-custom-list-file-label": "File", + "species-list-coordinates-lat-number-label": "Latitude", + "species-list-coordinates-lat-number-info": "Recording location latitude.", + "species-list-coordinates-lon-number-label": "Longitude", + "species-list-coordinates-lon-number-info": "Recording location longitude.", + "species-list-coordinates-yearlong-checkbox-label": "Year-round", + "species-list-coordinates-week-slider-label": "Week", + "species-list-coordinates-week-slider-info": "Specify the week of the year the recording was made, using a simplified system where each month is divided into four weeks. Choose a value from 1 to 48.", + "species-list-coordinates-threshold-slider-label": "Location filter threshold", + "species-list-coordinates-threshold-slider-info": "Minimum occurrence probability for a species to be included.", + "species-list-custom-classifier-selection-button-label": "Select classifier", + "analyze-locale-dropdown-label": "Locale", + "analyze-locale-dropdown-info": "Locale for the translated species common names in the output", + "analyze-start-button-label": "Analyze", + "multi-tab-title": "Batch analysis", + "multi-tab-input-selection-button-label": "Select input directory (recursive)", + "multi-tab-samples-dataframe-column-subpath-header": "Subpath", + "multi-tab-samples-dataframe-column-duration-header": "Length", + "multi-tab-samples-dataframe-no-files-found": "No files found", + "multi-tab-output-selection-button-label": "Select output directory", + "multi-tab-output-textbox-label": "Output directory", + "multi-tab-output-textbox-placeholder": "If not selected, the input directory will be used.", + "multi-tab-output-accordion-label": "Output settings", + "multi-tab-output-radio-label": "Result type", + "multi-tab-output-radio-info": "Specify the output format of classifications.", + "multi-tab-output-combine-tables-checkbox-label": "Combine selection tables", + "multi-tab-output-combine-tables-checkbox-info": "Check this option to merge all selection tables into a single table.", + "multi-tab-output-combined-table-name-textbox-label": "Combined table filename", + "multi-tab-output-combined-table-name-textbox-info": "Name of the combined selection table.", + "multi-tab-skip-existing-checkbox-label": "Skip existing results", + "multi-tab-skip-existing-checkbox-info": "Skip files that already have a result.", + "multi-tab-batchsize-number-label": "Batch size", + "multi-tab-batchsize-number-info": "Number of samples to process at the same time.", + "multi-tab-threads-number-label": "Threads", + "multi-tab-threads-number-info": "Number of CPU threads.", + "multi-tab-result-dataframe-column-file-header": "File", + "multi-tab-result-dataframe-column-execution-header": "Execution", + "training-tab-title": "Train", + "training-tab-input-selection-button-label": "Select training data", + "training-tab-classes-dataframe-column-classes-header": "Classes", + "training-tab-select-output-button-label": "Select classifier output", + "training-tab-classifier-textbox-info": "The name of the new classifier.", + "training-tab-output-format-radio-label": "Model output format", + "training-tab-output-format-radio-info": "Format of the trained classifier.", + "training-tab-output-format-both": "both", + "training-tab-autotune-checkbox-label": "Use autotune", + "training-tab-autotune-checkbox-info": "Searches for the best hyperparameters, but takes more time.", + "training-tab-autotune-trials-number-label": "Trials", + "training-tab-autotune-trials-number-info": "Number of training runs for hyperparameter tuning.", + "training-tab-autotune-executions-number-label": "Executions per trial", + "training-tab-autotune-executions-number-info": "The number of times a training run with a set of hyperparameters is repeated during hyperparameter tuning (this reduces the variance).", + "training-tab-epochs-number-label": "Epochs", + "training-tab-epochs-number-info": "Number of training epochs.", + "training-tab-batchsize-number-label": "Batch size", + "training-tab-batchsize-number-info": "Number of samples to process in a batch.", + "training-tab-learningrate-number-label": "Learning rate", + "training-tab-learningrate-number-info": "Learning rate for the optimizer.", + "training-tab-upsampling-radio-label": "Upsampling mode", + "training-tab-upsampling-radio-info": "Balance train data by upsampling minority classes.", + "training-tab-upsampling-radio-option-repeat": "repeat", + "training-tab-upsampling-radio-option-mean": "mean", + "training-tab-upsampling-ratio-slider-label": "Upsampling ratio", + "training-tab-upsampling-ratio-slider-info": "The minimum ratio for a minority class compared to the majority class after upsampling.", + "training-tab-hiddenunits-number-label": "Hidden units", + "training-tab-hiddenunits-number-info": "Number of hidden units. If set to >0, a two-layer classifier is used.", + "training-tab-use-mixup-checkbox-label": "Use mixup", + "training-tab-use-mixup-checkbox-info": "Mixup is a data augmentation technique that generates new samples by mixing two samples and their labels.", + "training-tab-crop-mode-radio-label": "Crop mode", + "training-tab-crop-mode-radio-info": "Adjust how to crop samples that are longer than the model input.", + "training-tab-crop-mode-radio-option-center": "center", + "training-tab-crop-mode-radio-option-first": "first", + "training-tab-crop-mode-radio-option-segments": "segments", + "training-tab-crop-overlap-number-label": "Crop segment overlap (s)", + "training-tab-crop-overlap-number-info": "Adjust the overlap of training samples.", + "training-tab-model-save-mode-radio-label": "Model save mode", + "training-tab-model-save-mode-radio-info": "'replace' will overwrite the original classification layer, leaving only the trained classes, and 'append' will combine the original classification layer with the new one.", + "training-tab-model-save-mode-radio-option-replace": "replace", + "training-tab-model-save-mode-radio-option-append": "append", + "training-tab-cache-mode-radio-label": "Training data cache mode", + "training-tab-cache-mode-radio-info": "Adjust how to cache training data. Select 'none' for no caching, 'load' to load from file, and 'save' to save the compressed training data.", + "training-tab-cache-mode-radio-option-none": "none", + "training-tab-cache-mode-radio-option-load": "load", + "training-tab-cache-mode-radio-option-save": "save", + "training-tab-cache-select-directory-button-label": "Select the cache file directory", + "training-tab-cache-file-name-textbox-info": "The name of the cache file.", + "training-tab-cache-select-file-button-label": "Select cache file location", + "training-tab-start-training-button-label": "Start training", + "training-tab-early-stoppage-msg": "Stopped early - validation metric not improving.", + "segments-tab-title": "Segments", + "segments-tab-select-audio-input-directory-button-label": "Select audio directory (recursive)", + "segments-tab-select-results-input-directory-button-label": "Select result directory", + "segments-tab-results-input-textbox-placeholder": "Same as audio directory if not selected", + "segments-tab-output-selection-button-label": "Select output directory", + "segments-tab-output-selection-textbox-placeholder": "Same as audio directory if not selected", + "segments-tab-min-confidence-slider-label": "Minimum confidence", + "segments-tab-min-confidence-slider-info": "Select only segments with a confidence above this threshold.", + "segments-tab-max-seq-number-label": "Max number of segments", + "segments-tab-max-seq-number-info": "Maximum number of randomly extracted segments per species.", + "segments-tab-seq-length-number-label": "Sequence length (s)", + "segments-tab-seq-length-number-info": "Length of the extracted segments in seconds.", + "segments-tab-threads-number-label": "Threads", + "segments-tab-threads-number-info": "Number of CPU threads.", + "segments-tab-extract-button-label": "Extract segments", + "segments-tab-result-dataframe-column-file-header": "File", + "segments-tab-result-dataframe-column-execution-header": "Execution", + "species-tab-title": "Species", + "species-tab-select-output-directory-button-label": "Select output directory", + "species-tab-filename-textbox-label": "Name of the file, if not specified 'species_list.txt' will be used.", + "species-tab-sort-radio-label": "Sort by", + "species-tab-sort-radio-info": "Sort species by occurrence frequency or alphabetically.", + "species-tab-sort-radio-option-frequency": "frequency", + "species-tab-sort-radio-option-alphabetically": "alphabetically", + "species-tab-finish-info": "Species list saved at", + "species-tab-start-button-label": "Generate species list", + "settings-tab-title": "Settings", + "settings-tab-language-dropdown-label": "GUI Language", + "settings-tab-language-dropdown-info": "Changes will only take effect after restarting the application.", + "validation-no-file-selected": "Please select a file.", + "validation-no-directory-selected": "Please select a directory.", + "validation-no-species-list-selected": "Please select a species list.", + "validation-no-custom-classifier-selected": "No custom classifier selected.", + "validation-no-audio-files-found": "No audio files found.", + "validation-no-training-data-selected": "Please select your training data.", + "validation-no-directory-for-classifier-selected": "Please select a directory for the classifier.", + "validation-no-valid-classifier-name": "Please enter a valid name for the classifier.", + "validation-no-valid-epoch-number": "Please enter a valid number of epochs.", + "validation-no-valid-batch-size": "Please enter a valid batch size.", + "validation-no-valid-learning-rate": "Please enter a valid learning rate.", + "validation-no-valid-frequency": "Please enter a valid frequency in", + "validation-no-audio-directory-selected": "No audio directory selected", + "progress-preparing": "Preparing", + "progress-starting": "Starting", + "progress-build-classifier": "Loading data & building classifier", + "progress-loading-data": "Loading data for", + "progress-saving": "Saving at", + "progress-training": "Training model", + "progress-autotune": "Autotune in progress", + "progress-search": "Searching files", + "footer-help": "For docs and support visit" +} \ No newline at end of file diff --git a/localization.py b/localization.py new file mode 100644 index 00000000..2553508c --- /dev/null +++ b/localization.py @@ -0,0 +1,64 @@ +import json +import os + +FALLBACK_LANGUAGE = "en" +LANGUAGE_DIR = "lang/" +LANGUAGE_LOOKUP = {} +TARGET_LANGUAGE = FALLBACK_LANGUAGE +GUI_SETTINGS_PATH = "gui-settings.json" + +def ensure_settings_file(): + if not os.path.exists(GUI_SETTINGS_PATH): + with open(GUI_SETTINGS_PATH, "w") as f: + settings = {"language-id": FALLBACK_LANGUAGE} + f.write(json.dumps(settings, indent=4)) + + +def load_localization(): + global LANGUAGE_LOOKUP + global TARGET_LANGUAGE + + ensure_settings_file() + + try: + TARGET_LANGUAGE = json.load(open(GUI_SETTINGS_PATH, encoding="utf-8"))["language-id"] + except FileNotFoundError: + print(f"gui-settings.json not found. Using fallback language {FALLBACK_LANGUAGE}.") + + try: + with open(f"{LANGUAGE_DIR}/{TARGET_LANGUAGE}.json", "r", encoding="utf-8") as f: + LANGUAGE_LOOKUP = json.load(f) + except FileNotFoundError: + print( + f"Language file for {TARGET_LANGUAGE} not found in {LANGUAGE_DIR}. Using fallback language {FALLBACK_LANGUAGE}." + ) + + if TARGET_LANGUAGE != FALLBACK_LANGUAGE: + with open(f"{LANGUAGE_DIR}/{FALLBACK_LANGUAGE}.json", "r") as f: + fallback = json.load(f) + + for key, value in fallback.items(): + if key not in LANGUAGE_LOOKUP: + LANGUAGE_LOOKUP[key] = value + + +def localize(key: str) -> str: + return LANGUAGE_LOOKUP.get(key, key) + + +def set_language(language: str): + if language: + ensure_settings_file() + settings = {} + + try: + with open(GUI_SETTINGS_PATH, "r+", encoding="utf-8") as f: + settings = json.load(f) + settings["language-id"] = language + f.seek(0) + json.dump(settings, f, indent=4) + f.truncate() + + except FileNotFoundError: + pass +