diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..f8ec71a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,19 @@ +--- +name: Bug Report +about: 버그를 레포트 할 때 사용하는 템플릿 +title: "[BUG] " +labels: '' +assignees: '' + +--- + +## Describe the bug +- + +## To Reproduce +- + +## Expected Behavior +- + +## Screenshots (Optional) \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..2643aaa --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,14 @@ +--- +name: Feature Request +about: 새로운 기능을 추가할 때 사용하는 템플릿 +title: "[FEAT] " +labels: '' +assignees: '' + +--- + +## Background +- + +## To Do +- [ ] \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..c8f0cae --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,12 @@ +## Overview +- + +## Change Log +- + +## To Reviewer +- + +## Issue Tags +- closed: # +- sell also: # (optional) diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5529439 --- /dev/null +++ b/.gitignore @@ -0,0 +1,312 @@ +### JupyterNotebooks ### +# gitignore template for Jupyter Notebooks +# website: http://jupyter.org/ + +.ipynb_checkpoints +*/.ipynb_checkpoints/* + +# IPython +profile_default/ +ipython_config.py + +# Remove previous ipynb_checkpoints +# git rm -r .ipynb_checkpoints/ +dataset/ +pts/ +wandb/ +*.ipynb +*.png +*.pt +datapreprocess/code_prac.ipynb +datapreprocess/rt_code_test.ipynb +datapreprocess/csv/abnormal/train/*.csv +datapreprocess/csv/abnormal/val/*.csv +datapreprocess/csv/normal/train/*.csv +datapreprocess/csv/normal/val/*.csv +datapreprocess/json/abnormal/train/ +datapreprocess/json/abnormal/val/ +datapreprocess/json/abnormal/TS_03.이상행동_14.교통약자_train/ +datapreprocess/json/abnormal/TS_03.이상행동_14.교통약자_val/ +datapreprocess/json/normal/train/ +datapreprocess/json/normal/val/ +datapreprocess/*.csv +datapreprocess/*.pt +datapreprocess/*.pth +datapreprocess/npy +datapreprocess/nohup.out +model_train/code_prac.ipynb +model_train/model.h5 +model_train/pytorch_model.pth +model_train/nohup.out +model_train/*.ipynb +model_train/wandb +app/testitems/ +pths/ +app/models/pts +app/models/yolov8n-pose.pt +app/yolov8n-pose.pt +.netrc + +### macOS ### +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +### macOS Patch ### +# iCloud generated files +*.icloud + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook + +# IPython + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +### Python Patch ### +# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration +poetry.toml + +# ruff +.ruff_cache/ + +# LSP config files +pyrightconfig.json + +### VirtualEnv ### +# Virtualenv +# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ +[Bb]in +[Ii]nclude +[Ll]ib +[Ll]ib64 +[Ll]ocal +[Ss]cripts +pyvenv.cfg +pip-selfcheck.json + +### VisualStudioCode ### +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +!.vscode/*.code-snippets + +# Local History for Visual Studio Code +.history/ + +# Built Visual Studio Code Extensions +*.vsix + +### VisualStudioCode Patch ### +# Ignore all local history of files +.history +.ionide + +### Windows ### +# Windows thumbnail cache files +Thumbs.db +Thumbs.db:encryptable +ehthumbs.db +ehthumbs_vista.db + +# Dump file +*.stackdump + +# Folder config file +[Dd]esktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msix +*.msm +*.msp + +# Windows shortcuts +*.lnk + +<<<<<<< HEAD +# End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,macos,windows,jupyternotebooks,virtualenv,python +======= +# End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,macos,windows,jupyternotebooks,virtualenv,python \ No newline at end of file diff --git a/.gitmessage b/.gitmessage new file mode 100644 index 0000000..c10e929 --- /dev/null +++ b/.gitmessage @@ -0,0 +1,24 @@ +################ +# <타입> : <제목> 의 형식으로 제목을 아래 공백줄에 작성 +# 제목은 50자 이내 / 변경사항이 "무엇"인지 명확히 작성 / 끝에 마침표 금지 +# 예) feat : 로그인 기능 추가 + +# 바로 아래 공백은 지우지 마세요 (제목과 본문의 분리를 위함) + +################ +# 본문(구체적인 내용)을 아랫줄에 작성 +# 여러 줄의 메시지를 작성할 땐 "-"로 구분 (한 줄은 72자 이내) + +################ +# 꼬릿말(footer)을 아랫줄에 작성 (현재 커밋과 관련된 이슈 번호 추가 등) +# 예) Close #7 + +################ +# feat : 새로운 기능 추가 +# fix : 버그 수정 +# docs : 문서 수정 +# test : 테스트 코드 추가 +# refact : 코드 리팩토링 +# style : 코드 의미에 영향을 주지 않는 변경사항 +# chore : 빌드 부분 혹은 패키지 매니저 수정사항 +################ diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..6c7e06b --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,52 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files +- repo: https://github.com/pycqa/flake8 + rev: 7.0.0 + hooks: + - id: flake8 +- repo: https://github.com/myint/autoflake + rev: v2.2.1 + hooks: + - id: autoflake + args: ["--in-place", "--remove-all-unused-imports", "--remove-unused-variables", "--expand-star-imports", "--ignore-init-module-imports"] + files: \.py$ + exclude: | + (?x)( + ^.git/| + ^output/| + ^plugins + ) +- repo: https://github.com/pycqa/isort + rev: '5.13.2' + hooks: + - id: isort + language: python + args: ["--filter-files"] + files: \.py$ +- repo: https://github.com/psf/black + rev: '24.2.0' + hooks: + - id: black + args: ["--line-length=120"] + exclude: | + (?x)( + ^.git/| + ^output/| + ^plugins| + ^plugins| + ) +exclude: | + (?x)( + ^.git/| + ^output/| + ^plugins| + ^plugins| + ) \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..c001125 --- /dev/null +++ b/README.md @@ -0,0 +1,349 @@ +# 실시간 무인매장 이상행동 탐지 +cv-06 혁신비전테크(IVT) 최종 프로젝트 + +## 🎞️ 프로젝트 개요 + + + +## 💁🏻‍♂️ 팀 구성 및 역할 +| 이 름 | 역 할 | +| ----- | ----- | +| 김시웅 | 데이터셋 구성 및 코드 작성, 데이터 전처리(백본 feature 추출), VMAE 계열 모델 구현 | +| 박정민 | 데이터 탐색, FastAPI 기반 백엔드 기능 구현, 배포 | +| 백광현 | 프로젝트 기획 / 서비스 아키텍처 설계, API 구현 및 실시간 개발, LSTM-AE 모델링 / 학습 및 추론, 웹 서버 / 모델서버 분리, 배포 | +| 이동형 | 웹 개발 부분 버그 수정 및 리팩토링 | +| 조형서 | 웹 개발, 모델 조사 | +| 최수진 | 모델링, 데이터 전처리, YOLO 계열 모델 구현 | +- 개발 타임라인 + +

+ +

+ +## 📍 문제 정의 + + +

+ + + +

+ +⇒ CCTV 를 활용하여 이상행동을 자동으로 탐지하고 증거확보 및 실시간 알람을 준다면 이 문제를 해소할 수 있지 않을까? + +## 💡 해결 방안 + + + +- 녹화된 영상을 직접 돌려보는 **시간과 비용 발생** + + ⇒ 업로드 영상을 분석 후 **타임 스탬프와 스크린 샷** 제공 + +- CCTV가 있더라도 관리자가 24시간 확인할 수 없어 **현장을 놓치는 문제** 발생 + + ⇒ **실시간 영상 분석**을 통해 **타임 스탬프, 스크린 샷** 그리고 **알람** 기능을 제공 + + + + +- **대용량, 장시간** CCTV 데이터를 사람보다 빠르게 처리하도록 속도 개선 +- 무인 매장에서 발생할 수 있는 **다양한 상황**들을 잘 감지할 수 있도록 개선 +- 이상 상황은 정확하게 판단하면서 **오탐률을 낮추는 방향**으로 개선 + +--- + +## 📼 Data + +### **AI Hub 실내(편의점, 매장) 사람 [이상](https://www.aihub.or.kr/aihubdata/data/view.do?currMenu=115&topMenu=100&aihubDataSe=data&dataSetSn=71550) / [정상](https://www.aihub.or.kr/aihubdata/data/view.do?currMenu=115&topMenu=100&aihubDataSe=data&dataSetSn=71549) 행동 데이터** + +- 특징 + - 이상 상황 여부를 프레임 단위 라벨링 + - 객체 바운딩 박스 + 포즈 스켈레톤 키포인트 라벨링 제공 + +- 카테고리 + - 구매 행동 : 매장 이동, 구매, 반품, 비교 등 + - 이상 행동 : 파손, 방화, 흡연, 절도, 폭행 등 + +

+ + +

+ +## 🔨 Data Preprocessing + +

+ +

+ +- 비디오 데이터는 이미지 데이터에 비해 매우 큰 용량 + + → 주어진 AI Stages V100 서버는 100 GB **용량 제한**이 있어 모델 전체 End-to-End 학습이 아닌 기학습 가중치를 사용한 백본 네트워크로 영상의 **Feature를 미리 계산**해 학습을 진행 + +- Backbone 네트워크의 기학습된 가중치는 고정하고, +영상 Feature들을 미리 계산해 csv(YOLO v8), npy(Video MAE v2) 파일에 저장해 학습 데이터 용량을 `353 GB` → `2.42 GB` 로 줄여서 학습을 진행 + +--- + +## 🤖 Model + + + +▶️ Backbone Network : **Video Masked Auto Encoder v2** + +- 일반적인 영상 이상 탐지 모델의 영상 Feature 추출에 사용되는 Backbone Network는 **I3D** + +⛔ I3D 방식은 Optical Flow를 추가로 계산하기 때문에 실시간성 확보에 어려움이 있다고 판단 + +👉 **Optical Flow 사용하지 않고** + +👉 **Action Recognition** 분야에서 좋은 성능을 낸 Backbone Network 선정 + +▶️ **YOLO v8** + +- 주어진 데이터 셋의 라벨링에서 객체별 **바운딩 박스**와 **포즈 스켈레톤 키포인트**를 제공하기 때문에 활용할 수 있는 모델을 선정 + +▶️ Classifier : [**LSTM Auto-Encoder](https://github.com/surya2003-real/anomaly-detection-and-object-tracking?tab=readme-ov-file), [MGFN](https://arxiv.org/abs/2211.15098), [Deep MIL Ranking](https://arxiv.org/abs/1801.04264), [BN-WVAD](https://arxiv.org/abs/2311.15367)** + +- 영상 Feature를 정상 / 이상 영상으로 **이진 분류**하는 Classifier + +⛔ 데이터 셋이 프레임 단위로 정상 / 이상 여부를 제공해 지도 학습이 가능하지만 + +👉 장기적으로 **Continual Learning**과 **영상 Feature 관리**를 위해 + +👉 **비지도 학습** 또는 **비디오 단위 라벨링**을 활용한 **약한 지도 학습**이 가능한 구조를 사용 + + + +1️⃣ **YOLO v8 + [LSTM autoencoder](https://github.com/surya2003-real/anomaly-detection-and-object-tracking?tab=readme-ov-file)** + +

+ +

+ +- 데이터에서 제공하는 **포즈 스켈레톤** 정보를 활용하고자 함 + - Feature 추출 : **YOLO v8 Pose** + - 프레임 별 사람을 탐지해 Bbox, Keypoints 출력 + - 입력 영상의 Feature 추출 +- YOLO v8 (실시간 객체 탐지) + LSTM (시간적 특성 모델링) 역할로 구성 +- Classifier : **LSTM AE** + - 비지도 학습을 활용 + - **정상 행동만 학습**하고, **Encoder 입력**과 **Decoder 출력**의 차이를 줄이도록 학습 + - 학습 데이터와 다른 **이상 행동 입력이 주어지면**, 복원된 출력은 입력과 많은 차이가 발생 + + → **MSE**로 계산된 **복원 오차**가 임계치를 넘게 되면 이상으로 판단 + +- 장점: **실시간 데이터 처리, 스켈레톤 기반 행동 인식** 가능 +- 한계: 정상 영상이어도 학습 과정에서 배우지 않은 경우 이상으로 판단 + +2️⃣ **YOLO v8 + [MGFN](https://arxiv.org/abs/2211.15098)** + +

+ +

+ +- LSTM의 한계를 극복하고자 +**MGFN**(Magnitude-Contrastive-Glance-and-Focus Network)을 활용 +- **약한 지도 학습** 방식을 도입 + + → 라벨이 **부정확, 불완전한 데이터**에서도 **학습이 가능**하도록 개선 + +- Classifier : **MGFN** + - 약한 지도 학습 활용 + - **어텐션 메커니즘** + - 비디오 내의 다양한 **시간적 및 공간적 특징**을 분석하기 위해 설계 + - 정상 / 이상 행동의 **차이**를 더 잘 포착 +- 장점 : MGFN 사용으로 더 **정교한 Feature 추출** 및 **성능 향상, 빠른 추론 속도** +- 한계: 학습 과정에서 **높은 계산 비용**과 **많은 시간** 소요 + +--- + +3️⃣ **Video MAE v2 + [Deep MIL ranking model](https://arxiv.org/abs/1801.04264)** + +

+ +

+ +- Optical Flow 사용하지 않는 **Video MAE v2** 선정 + - Feature 추출 : **Video MAE v2** + - 영상을 16프레임으로 나눠 710 차원의 Feature Vector로 추출 +- 비디오 단위 라벨링으로 **약한 지도 학습** 방식 적용 가능. +- Classifier : **Deep MIL Ranking** + - UCF-Crime 데이터 셋의 베이스라인 모델 + - 영상을 여러 조각으로 나눠 **조각 별** **이상 예측 점수**를 출력 + - 정상 / 이상 영상을 1:1로 병행해 점수를 예측한 뒤 + 이상 영상의 모든 조각 예측 점수 중 최대값이 + 정상 영상의 모든 조각 예측 점수 중 최대값보다 커지도록 학습 + - BN-WVAD의 **Feature Enhancer** 구조를 추가 적용한 실험도 진행 +- 장점 + - 학습 시 이상 영상도 학습해 비지도 방식보다 **일반화 성능 향상** +- 한계 : 이상 영상 중 이상 행동 토막의 위치를 잘못 예측하는 등 **라벨링 노이즈 발생** 가능 + +4️⃣ **Video MAE v2 + [BN-WVAD](https://arxiv.org/abs/2311.15367)** + +

+ +

+ +- UCF-Crime 데이터 셋 기준 SOTA 성능의 **BN-WVAD** 선정 + - ROC AUC = 0.8724 + - Deep MIL Ranking Model = 0.7541 +- Classifier : **BN-WVAD** + - Transformer 계열 **Feature enhancer**를 사용해 Video MAE v2가 추출한 Feature Vector의 품질을 향상 + - 영상의 각 조각의 **최종 예측 점수**는 + 해당 조각의 Anomaly Classifier 결과와 + Feature Vector들의 Mahalanobis Distance 평균을 **곱한 결과** + - 각 Layer의 Output Feature 벡터들을 + 배치 정규화 과정에서 구해진 특정 벡터의 평균과 **[Mahalanobis distance](https://en.wikipedia.org/wiki/Mahalanobis_distance)**로 거리 계산 +- 장점 + - Deep MIL ranking model의 **라벨링 노이즈** 문제 **개선** +- 한계 : Triplet 계열 Loss를 사용해 **Batch Size**가 다른 모델에 비해 **매우 커야** 학습이 잘 진행됨 + +--- + + + +- **ROC AUC score** + - 이상 탐지 모델은 + **탐지율(True Postive Rate)**도 중요하지만 + **오탐율(False Postive Rate)** 또한 매우 중요 + - ⇒ Threshold 값에 따른 **오탐율, 탐지율 값**을 + 곡선으로 표현한 ROC Curve의 면적인 **ROC AUC**로 성능 평가 +- **FPS** + - 30 FPS 이상의 실시간 탐지를 위해 + **1 프레임 당 처리 속도(FPS)**로 속도 평가 + +

+ +

+

TP, FP 에 따른 ROC Curve

+ + + +- 실험 기록 및 관리는 WandB를 사용하였으며, ROC AUC, FPS 외에도 정확도, 정상 / 이상 영상 예측 스코어 평균, 예측 스코어 최대값 평균 등 다양한 결과 값들을 기록 + +

+ + +

+ +- 최종 결과 +

+ +

+ + - ROC AUC 기준 가장 좋은 성능을 보인 VMAEv2+FE+MIL 구성은 실제 이상 행동을 배우기보다는 데이터셋의 이상행동 발생 프레임 위치의 패턴만을 배운 것을 발견하여 **최종 모델**로는 **VMAEv2+MIL** 구조를 채용 + +## 🌏 Service + + + +- **Web Server - Front** + - BootStrap, HTML + - 설계한 와이어 프레임 기반으로 페이지별 기능 구현 + - 웹캠 기능 + - 세션 토큰을 활용한 사용자 검증 + - 실시간 탐지 시 일정 시간에 따라 탐지된 프레임 자동 업데이트 +- **Web Server - Back** + - Fast API + - 클라이언트와 효율적 통신을 위한 **RestAPI** 설계 + - 모델 서버의 **트래픽 최소화**를 위해 DB 저장 및 읽기, 영상 저장 작업은 **웹 서버에서 진행** + - **Websocket**을 이용해 모델 서버에 **실시간 프레임 전달** + - 웹캠, RTSP, Youtube 동영상 등 다양한 소스 처리 가능하도록 구현 + - SMTP 를 활용한 **이메일 알람** 로직 구현 +- **Web Server - Database** + - MySQL, S3 + - DB에 대해 쓰기 작업보다는 **읽기 작업이 많고**, 복잡한 쿼리가 없기 때문에 **속도와 안정성이 좋은 MySQL** 선정 + - SQLAlchemy 의 ORM 활용 + - **용량이 큰** 비디오, 프레임 이미지들을 위한 저장소로 **AWS S3** 선정. DB에는 S3 URL 을 적재하여 접근 가능하도록 함. + - 모델 추론 **결과(bounding** **box, keypoints)를 저장**하여 이후 추가 기능 혹은 모델 학습에 사용할 수 있도록 함. +- **Model Server** + - FastAPI, Pytorch + - GPU 사용 서버 + - 녹화 영상의 경우, 추론 이후 **OpenCV** 와 **FFMPEG** 를 이용, 후처리(코덱 설정)하여 html 에서 송출 가능하도록 함 + - **실시간 추론 서버**와 **녹화영상 추론 서버**로 나누어 운영. + - 추론 시 이상행동 프레임을 AWS S3 에 저장하고, DB frame 테이블을 갱신 + +
+서비스 아키텍처 +

+ +

+ +
+서비스 파이프라인 +

+ +

+ +## 🛎️ Product Serving + +- AI Stages 서버는 도커 컨테이너 환경으로 **외부 접속 및 방화벽 설정** **불가** + - VPN에서 외부 접속이 가능하도록 하는 **우회 경로 오픈**이 **금지** + - 제공되는 .ovpn 파일과 **SSH 포트 포워딩**을 통해 **AWS EC2** 환경에서 배포를 시도했으나 VPN 관련 오류인지, SSH 오류인지 로그를 확인하기 어려웠습니다. +- 우선, API 엔드포인트를 활용하여 웹 -모델 서버를 분리한 상태로 서비스를 완성시켜 놓았고, 이후 로컬 혹은 AWS 환경에서 배포를 지속적으로 시도하고 있습니다. +- 추가로 로드밸런싱을 이용하여 서버의 부하를 더 줄이는 방안도 공부하고 있습니다. +- 우리 서비스는 실시간 영상 분석을 제공하고 있는데, 다른 네트워크에 위치한 웹 서버 - 모델 서버 간 통신이 “실시간” 구현에 있어 문제되는지 면밀히 검토할 예정입니다. + +## 📸 실제 모습 + + + +1. 이상행동으로 판단된 **장면들과 타임스탬프를 저장**하고, 해당 시간대로 이동해 쉽게 확인할 수 있도록 제공 +2. 특정 장면을 자료로 사용하기 위해 **화질 개선 혹은 몽타주 생성** 등의 기능을 추가할 수 있음 + +

+ +

+ + + +1. **웹캠**, **동영상 스트리밍** 또는 **외부 CCTV** 와 연결하여 **실시간 이상행동 분석** 실시 +2. 이상 행동이 일정 시간 지속되면 가입된 이메일로 **발생 시간** 전송 + +

+ +

+ + + +1. 분석 단위로 앨범 기능을 제공하여 관리에 용이하고 결과를 재사용할 수 있다. + +

+ +

\ No newline at end of file diff --git a/app/.env b/app/.env new file mode 100644 index 0000000..fd02435 --- /dev/null +++ b/app/.env @@ -0,0 +1,24 @@ +# RDS +# MYSQL_SERVER_HOST = "cv06-database.xxxxxxxxxxxx.us-east-1.rds.amazonaws.com" +# MYSQL_SERVER_PORT = 3306 +# MYSQL_SERVER_USER = "bkh" +# MYSQL_SERVER_PASSWORD = "cv06-database" +# MYSQL_DATABASE = "cv06_database" + +AWS_ACCESS_KEY = "AWS access key" +AWS_SECRET_KEY = "AWS secret key" +BUCKET = "bucket name" + +SMTP_ADDRESS = "smtp.gmail.com" +SMTP_PORT = 465 +MAIL_ACCOUNT = "aitech06ivt@gmail.com" +MAIL_PASSWORD = "hrxs kybl yccd lgsy" + +UPLOAD_MODEL_SERVER_IP = "upload video model server" +STREAM_MODEL_SERVER_IP = "real time video model server(ws)" + +MYSQL_SERVER_IP = "10.28.xxx.xxx" +MYSQL_SERVER_PORT = 30xxx +MYSQL_SERVER_USER = "bkh" +MYSQL_SERVER_PASSWORD = "bkh" +MYSQL_DATABASE = "cv06_database" \ No newline at end of file diff --git a/app/README.md b/app/README.md new file mode 100644 index 0000000..b6ec7d0 --- /dev/null +++ b/app/README.md @@ -0,0 +1,57 @@ +# Project Structure + +```bash +. +├── README.md +├── __init__.py +├── api +│   ├── __init__.py +│   ├── album_router.py +│   ├── real_time_router.py +│   ├── upload_router.py +│   └── user_router.py +├── database +│   ├── __init__.py +│   ├── crud.py +│   ├── database.py +│   ├── models.py +│   └── schemas.py +├── inference +│   ├── __init__.py +│   ├── anomaly_detector.py +│   └── rt_anomaly_detector.py +├── main.py +├── templates +│   ├── album_detail.html +│   ├── album_list.html +│   ├── base.html +│   ├── frame.html +│   ├── login.html +│   ├── main.html +│   ├── real_time.html +│   ├── signup.html +│   ├── src +│   │   ├── album_detail.js +│   │   ├── album_list.js +│   │   └── video.js +│   ├── stream.html +│   ├── upload.html +│   └── video.html +└── utils +    ├── __init__.py +    ├── config.py +    ├── security.py +    └── utils.py +``` + +# Description + +- api: URL별 로직 구현 + +- database: 데이터베이스 관련 설정 및 함수 + +- inference: 모델 추론 코드(녹화영상, 실시간) + +- templates: UI 템플릿. Bootstrap 사용 + +- utils: config 및 기타 함수 \ No newline at end of file diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/api/__init__.py b/app/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/api/album_router.py b/app/api/album_router.py new file mode 100644 index 0000000..04a29a4 --- /dev/null +++ b/app/api/album_router.py @@ -0,0 +1,199 @@ +from typing import Optional + +from database import crud, models +from database.database import get_db +from fastapi import ( + APIRouter, + Cookie, + Depends, + File, + Form, + HTTPException, + Query, + Request, + Response, + UploadFile, +) +from fastapi.responses import RedirectResponse +from fastapi.templating import Jinja2Templates +from sqlalchemy.orm import Session +from utils.config import settings +from utils.security import get_current_user +from utils.utils import s3 + +templates = Jinja2Templates(directory="templates") + +router = APIRouter( + prefix="/album", +) + + +@router.get("") +async def upload_get(request: Request, db: Session = Depends(get_db)): + user = get_current_user(request) + if not user: + return RedirectResponse(url="/user/login") + + album_list = crud.get_uploads(db=db, user_id=user.user_id) + print(album_list[0].completes[0].completed) + return templates.TemplateResponse( + "album_list.html", + {"request": request, "token": user.email, "album_list": album_list}, + ) + + +@router.post("") +async def modify_name( + request: Request, + check_code: str = Form(...), + upload_id: Optional[int] = Form(...), + origin_name: Optional[str] = Form(None), + new_name: Optional[str] = Form(None), + is_real_time: Optional[bool] = Form(None), + db: Session = Depends(get_db), +): + user = get_current_user(request) + + if check_code == "edit": + upload_info = ( + db.query(models.Upload) + .filter( + (models.Upload.name == origin_name) + & (models.Upload.upload_id == upload_id) + ) + .first() + ) + upload_info.name = new_name + + db.add(upload_info) + db.commit() + db.refresh(upload_info) + elif check_code == "delete": + upload_info = crud.get_upload(db, upload_id) + if upload_info: + db.delete(upload_info) + + db.commit() + album_list = crud.get_uploads(db=db, user_id=user.user_id) + + return templates.TemplateResponse( + "album_list.html", + {"request": request, "token": user.email, "album_list": album_list}, + ) + + +@router.get("/details") +async def upload_get_one( + request: Request, + user_id: int = Query(...), + upload_id: int = Query(...), + db: Session = Depends(get_db), +): + + user = get_current_user(request) + + video_info = { + "user_id": user_id, + "upload_id": upload_id, + "date": None, + "upload_name": None, + "is_realtime": None, + "video_id": None, + "video_url": None, + "frame_urls": None, + "score_url": None, + "complete": None, + } + + video = crud.get_video(db=db, upload_id=upload_id) + video_info["video_id"] = video.video_id + uploaded = crud.get_upload(db=db, upload_id=video.upload_id) + video_info["upload_name"] = uploaded.name + video_info["is_realtime"] = uploaded.is_realtime + video_info["date"] = uploaded.date.strftime("%Y-%m-%d %H:%M:%S") + + # frames = crud.get_frames(db=db, video_id=video.video_id) + frames = crud.get_frames_with_highest_score(db=db, video_id=video.video_id) + frame_ids = [frame.frame_id for frame in frames] + frame_urls = [frame.frame_url for frame in frames] + frame_timestamps = [frame.time_stamp for frame in frames] + frame_objs = [] + + video_obj = s3.generate_presigned_url( + "get_object", + Params={"Bucket": settings.BUCKET, "Key": video.video_url}, + ExpiresIn=3600, + ) + + video_info["video_url"] = video_obj + video_info["complete"] = crud.get_complete(db=db, upload_id=upload_id).completed + if not video_info["complete"]: + return templates.TemplateResponse( + "album_detail.html", + { + "request": request, + "token": user.email, + "video_info": video_info, + "loading": True, + }, + ) + + if frame_ids != []: + for frame_id, frame_url, frame_timestamp in zip( + frame_ids, frame_urls, frame_timestamps + ): + frame_obj = s3.generate_presigned_url( + "get_object", + Params={"Bucket": settings.BUCKET, "Key": frame_url}, + ExpiresIn=3600, + ) + frame_objs.append( + (frame_id, frame_obj, frame_timestamp.strftime("%H:%M:%S")) + ) + + score_graph_url = "/".join(frame_urls[0].split("/")[:-1]) + "/score_graph.png" + score_obj = s3.generate_presigned_url( + "get_object", + Params={"Bucket": settings.BUCKET, "Key": score_graph_url}, + ExpiresIn=3600, + ) + + video_info["frame_urls"] = frame_objs + video_info["score_url"] = score_obj + + # print(video_info) + return templates.TemplateResponse( + "album_detail.html", + { + "request": request, + "token": user.email, + "video_info": video_info, + "loading": False, + }, + ) + + +@router.get("/details/images") +async def image_get( + request: Request, frame_id: int = Query(...), db: Session = Depends(get_db) +): + + user = get_current_user(request) + frame = crud.get_frame(db=db, frame_id=frame_id) + frame_obj = s3.generate_presigned_url( + "get_object", + Params={"Bucket": settings.BUCKET, "Key": frame.frame_url}, + ExpiresIn=3600, + ) + print(frame_obj) + print(frame.box_kp_json) + frame_info = { + "frame_url": frame_obj, + "time_stamp": frame.time_stamp, + "frame_json": frame.box_kp_json, + } + + return templates.TemplateResponse( + "frame.html", + {"request": request, "token": user.email, "frame_info": frame_info}, + ) diff --git a/app/api/inference_router.py b/app/api/inference_router.py new file mode 100644 index 0000000..28510db --- /dev/null +++ b/app/api/inference_router.py @@ -0,0 +1,165 @@ +import asyncio +import json +import os +import sys +from datetime import date, datetime, timedelta + +import cv2 +import numpy as np +import pytz +from cap_from_youtube import cap_from_youtube +from fastapi import BackgroundTasks, Depends, FastAPI, WebSocket, WebSocketDisconnect +from fastapi.middleware.cors import CORSMiddleware +from pydantic import BaseModel +from sqlalchemy.orm import Session + +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.dirname(current_dir) + +sys.path.append(parent_dir) + +from database import crud +from database.database import get_db +from inference.rt_anomaly_detector_lstmae import RT_AnomalyDetector +from utils.config import settings +from utils.utils import run_model, s3 + +app = FastAPI() + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +class ModelInfo(BaseModel): + user_id: int + upload_id: int + threshold: float + video_uuid_name: str + video_ext: str + video_id: int + video_url: str + + +@app.get("/") +def root(): + return {"message": "모델이 돌아가용"} + + +# 녹화영상용 +@app.post("/run_model") +async def run_model_endpoint( + info: ModelInfo, background_tasks: BackgroundTasks, db: Session = Depends(get_db) +): + + info = info.dict() + + def run_model_task(): + run_model(info["video_url"], info, settings, db) + + background_tasks.add_task(run_model_task) + + return {"message": "Model execution started."} + + +# 메일을 보내야하는지 판단하는 함수 +async def check_and_send_email(db, video_id, user_id, last_point, smtp): + global last_emailed_time + + frames = crud.get_frames_with_highest_score(db=db, video_id=video_id) + frame_timestamps = [frame.time_stamp.strftime("%H:%M:%S") for frame in frames] + + if len(frame_timestamps) < 6: + return False + + last = datetime.strptime(frame_timestamps[-2], "%H:%M:%S") + check = datetime.strptime(frame_timestamps[-6], "%H:%M:%S") + + if (last - check) == timedelta(seconds=4): # 연속적으로 5초간 지속되면 + if not check <= last_point <= last: + crud.send_email( + db, frame_timestamps[-6], frame_timestamps[-2], user_id, smtp + ) + last_emailed_time = last + + +# 과연 웹 서버와 실시간을 분리하는 것이 더 빠른가? +@app.websocket("/ws") +async def websocket_endpoint(websocket: WebSocket, db: Session = Depends(get_db)): + + await websocket.accept() + smtp = await crud.create_smtp_server() + + try: + video_info_str = await websocket.receive_text() + print("Received video info:", video_info_str) + video_info = json.loads(video_info_str) + global detector, last_emailed_time + if detector is None: + detector = RT_AnomalyDetector(video_info, s3, settings, db, websocket) + detector.ready() + + if video_info["video_url"] == "web": + while True: + timestamp = datetime.now(pytz.timezone("Asia/Seoul")) + # Receive bytes from the websocket + bytes = await websocket.receive_bytes() + data = np.frombuffer(bytes, dtype=np.uint8) + frame = cv2.imdecode(data, cv2.IMREAD_COLOR) + await detector.run(frame, timestamp) + await check_and_send_email( + db=db, + video_id=video_info["video_id"], + user_id=video_info["user_id"], + last_point=last_emailed_time, + smtp=smtp, + ) + + else: + if "youtube" in video_info["video_url"]: + cap = cap_from_youtube(video_info["video_url"], "240p") + + else: + cap = cv2.VideoCapture(video_info["video_url"]) + + while True: + success, frame = cap.read() + if not success: + await websocket.send_text(f"카메라 연결에 실패했습니다.") + break + else: + timestamp = datetime.now(pytz.timezone("Asia/Seoul")) + await detector.run(frame, timestamp) + await check_and_send_email( + db=db, + video_id=video_info["video_id"], + user_id=video_info["user_id"], + last_point=last_emailed_time, + smtp=smtp, + ) + + ret, buffer = cv2.imencode(".jpg", frame) + await websocket.send_bytes(buffer.tobytes()) + + await asyncio.sleep(0.042) + + except WebSocketDisconnect: + await websocket.close() + await smtp.quit() + + except Exception as e: + # 예외 발생 시 로그 기록 및 연결 종료 + print(f"WebSocket error: {e}") + await websocket.close() + await smtp.quit() + + finally: + try: + detector.upload_score_graph_s3() + except: + pass + detector = None diff --git a/app/api/real_time_router.py b/app/api/real_time_router.py new file mode 100644 index 0000000..69b3c7c --- /dev/null +++ b/app/api/real_time_router.py @@ -0,0 +1,293 @@ +import asyncio +import json +from datetime import date, datetime, timedelta + +import cv2 +import numpy as np +import pytz +import websockets +from cap_from_youtube import cap_from_youtube +from database import crud, schemas +from database.database import get_db +from fastapi import ( + APIRouter, + Depends, + Form, + Query, + Request, + WebSocket, + WebSocketDisconnect, + status, +) +from fastapi.responses import RedirectResponse +from fastapi.templating import Jinja2Templates + +# from inference.rt_anomaly_detector import RT_AnomalyDetector +from inference.rt_anomaly_detector_lstmae import RT_AnomalyDetector +from sqlalchemy.orm import Session +from utils.config import settings +from utils.security import get_current_user +from utils.utils import s3 +from websockets.exceptions import ConnectionClosed + +templates = Jinja2Templates(directory="templates") +router = APIRouter( + prefix="/real_time", +) + +detector = None +last_emailed_time = datetime.strptime("0:00:00", "%H:%M:%S") + + +@router.get("") +async def real_time_get(request: Request): + user = get_current_user(request) + if not user: + return RedirectResponse(url="/user/login") + + return templates.TemplateResponse( + "real_time.html", {"request": request, "token": user.email} + ) + + +@router.post("") +async def realtime_post( + request: Request, + name: str = Form(...), + real_time_video: str = Form(...), + datetime: datetime = Form(...), + thr: float = Form(...), + db: Session = Depends(get_db), +): + + user = get_current_user(request) + user = crud.get_user_by_email(db=db, email=user.email) + + # Form 과 user_id 를 이용하여 upload row insert + _upload_create = schemas.UploadCreate( + name=name, date=datetime, is_realtime=True, thr=thr, user_id=user.user_id + ) + crud.create_upload(db=db, upload=_upload_create) + + # 지금 업로드된 id 획득, 클라이언트로부터 작성된 실시간 스트리밍 영상 url 획득 + uploaded = crud.get_upload_id( + db=db, user_id=user.user_id, name=name, date=datetime + )[-1] + + # db 에는 실시간임을 알 수 있게만 함 + video_url = f"{real_time_video}" + _video_create = schemas.VideoCreate( + video_url=video_url, upload_id=uploaded.upload_id + ) + crud.create_video(db=db, video=_video_create) + _complete_create = schemas.Complete(completed=True, upload_id=uploaded.upload_id) + crud.create_complete(db=db, complete=_complete_create) + + # model inference 에서 사용할 정보 + info = { + "user_id": user.user_id, + "email": user.email, + "upload_id": uploaded.upload_id, + "name": uploaded.name, + "date": uploaded.date, + "threshold": uploaded.thr, + "video_url": video_url, + "video_id": crud.get_video(db=db, upload_id=uploaded.upload_id).video_id, + } + + redirect_url = ( + f"/real_time/stream?user_id={info['user_id']}&upload_id={info['upload_id']}" + ) + + return RedirectResponse(url=redirect_url, status_code=status.HTTP_303_SEE_OTHER) + + +@router.get("/stream") +async def get_stream( + request: Request, + user_id: int = Query(...), + upload_id: int = Query(...), + db: Session = Depends(get_db), +): + + user = get_current_user(request) + + video = crud.get_video(db=db, upload_id=upload_id) + uploaded = crud.get_upload(db=db, upload_id=video.upload_id) + + video_info = { + "user_id": user_id, + "upload_id": upload_id, + "date": uploaded.date.strftime("%Y-%m-%d %H:%M:%S"), + "upload_name": uploaded.name, + "thr": uploaded.thr, + "video_id": video.video_id, + "video_url": video.video_url, + "is_realtime": True, + "model_server_ip": settings.STREAM_MODEL_SERVER_IP, + } + + # video_info = json.dumps(video_info) + + return templates.TemplateResponse( + "stream.html", + {"request": request, "token": user.email, "video_info": video_info}, + ) + + +# 메일을 보내야하는지 판단하는 함수 +async def check_and_send_email(db, video_id, user_id, last_point, smtp): + global last_emailed_time + + frames = crud.get_frames_with_highest_score(db=db, video_id=video_id) + frame_timestamps = [frame.time_stamp.strftime("%H:%M:%S") for frame in frames] + + if len(frame_timestamps) < 6: + return False + + last = datetime.strptime(frame_timestamps[-2], "%H:%M:%S") + check = datetime.strptime(frame_timestamps[-6], "%H:%M:%S") + + if (last - check) == timedelta(seconds=4): # 연속적으로 5초간 지속되면 + if not check <= last_point <= last: + crud.send_email( + db, frame_timestamps[-6], frame_timestamps[-2], user_id, smtp + ) + last_emailed_time = last + + +@router.websocket("/ws") +async def websocket_endpoint(websocket: WebSocket, db: Session = Depends(get_db)): + await websocket.accept() + smtp = await crud.create_smtp_server() + + try: + video_info_str = await websocket.receive_text() + print("Received video info:", video_info_str) + video_info = json.loads(video_info_str) + global detector, last_emailed_time + if detector is None: + detector = RT_AnomalyDetector(video_info, s3, settings, db, websocket) + detector.ready() + + if video_info["video_url"] == "web": + while True: + timestamp = datetime.now(pytz.timezone("Asia/Seoul")) + # Receive bytes from the websocket + bytes = await websocket.receive_bytes() + data = np.frombuffer(bytes, dtype=np.uint8) + frame = cv2.imdecode(data, cv2.IMREAD_COLOR) + await detector.run(frame, timestamp) + await check_and_send_email( + db=db, + video_id=video_info["video_id"], + user_id=video_info["user_id"], + last_point=last_emailed_time, + smtp=smtp, + ) + + else: + if "youtube" in video_info["video_url"]: + cap = cap_from_youtube(video_info["video_url"], "240p") + + else: + cap = cv2.VideoCapture(video_info["video_url"]) + + while True: + success, frame = cap.read() + if not success: + await websocket.send_text(f"카메라 연결에 실패했습니다.") + break + else: + timestamp = datetime.now(pytz.timezone("Asia/Seoul")) + await detector.run(frame, timestamp) + await check_and_send_email( + db=db, + video_id=video_info["video_id"], + user_id=video_info["user_id"], + last_point=last_emailed_time, + smtp=smtp, + ) + + ret, buffer = cv2.imencode(".jpg", frame) + await websocket.send_bytes(buffer.tobytes()) + + await asyncio.sleep(0.042) + + except WebSocketDisconnect: + await websocket.close() + await smtp.quit() + + except Exception as e: + # 예외 발생 시 로그 기록 및 연결 종료 + print(f"WebSocket error: {e}") + await websocket.close() + await smtp.quit() + + finally: + try: + detector.upload_score_graph_s3() + except: + pass + detector = None + + +@router.get("/stream") +async def get_stream( + request: Request, + user_id: int = Query(...), + upload_id: int = Query(...), + db: Session = Depends(get_db), +): + + user = get_current_user(request) + + video = crud.get_video(db=db, upload_id=upload_id) + uploaded = crud.get_upload(db=db, upload_id=video.upload_id) + + video_info = { + "user_id": user_id, + "upload_id": upload_id, + "date": uploaded.date.strftime("%Y-%m-%d %H:%M:%S"), + "upload_name": uploaded.name, + "thr": uploaded.thr, + "video_id": video.video_id, + "video_url": video.video_url, + "is_realtime": True, + } + + # video_info = json.dumps(video_info) + + return templates.TemplateResponse( + "stream.html", + {"request": request, "token": user.email, "video_info": video_info}, + ) + + +# db 에서 실시간에서 저장되는 frame url 불러오는 코드 +def fetch_data(db, upload_id): + + video = crud.get_video(db=db, upload_id=upload_id) + frames = crud.get_frames_with_highest_score(db=db, video_id=video.video_id) + frame_ids = [frame.frame_id for frame in frames] + frame_urls = [frame.frame_url for frame in frames] + frame_timestamps = [frame.time_stamp for frame in frames] + frame_objs = [] + + for frame_id, frame_url, frame_timestamp in zip( + frame_ids, frame_urls, frame_timestamps + ): + frame_obj = s3.generate_presigned_url( + "get_object", + Params={"Bucket": settings.BUCKET, "Key": frame_url}, + ExpiresIn=3600, + ) + frame_objs.append((frame_id, frame_obj, frame_timestamp.strftime("%H:%M:%S"))) + + return {"frame_urls": frame_objs} + + +@router.get("/fetch_data") +async def fetch_frame_data(upload_id: int = Query(...), db: Session = Depends(get_db)): + frame_data = fetch_data(db, upload_id) + return frame_data diff --git a/app/api/upload_router.py b/app/api/upload_router.py new file mode 100644 index 0000000..6a73ddc --- /dev/null +++ b/app/api/upload_router.py @@ -0,0 +1,141 @@ +import os +import uuid +from datetime import datetime + +import requests +from database import crud, schemas +from database.database import get_db +from fastapi import ( + APIRouter, + BackgroundTasks, + Depends, + File, + Form, + HTTPException, + Request, + Response, + UploadFile, + status, +) +from fastapi.responses import RedirectResponse +from fastapi.templating import Jinja2Templates +from sqlalchemy.orm import Session +from utils.config import settings +from utils.security import get_current_user +from utils.utils import s3 + +templates = Jinja2Templates(directory="templates") + +router = APIRouter( + prefix="/upload", +) + + +@router.get("") +async def upload_get(request: Request): + user = get_current_user(request) + err_msg = {"file_ext": None} + if not user: + return RedirectResponse(url="/user/login") + + return templates.TemplateResponse( + "upload.html", {"request": request, "token": user.email, "err": err_msg} + ) + + +@router.post("") +async def upload_post( + request: Request, + name: str = Form(...), + upload_file: UploadFile = File(...), + datetime: datetime = Form(...), + thr: float = Form(...), + db: Session = Depends(get_db), +): + + user = get_current_user(request) + err_msg = {"file_ext": None} + + if not user: + return RedirectResponse(url="/user/login") + + file_ext = os.path.splitext(upload_file.filename)[-1] + if file_ext != ".mp4": + err_msg["file_ext"] = "파일 형식이 다릅니다.(mp4만 지원 가능)" + return templates.TemplateResponse( + "upload.html", {"request": request, "token": user.email, "err": err_msg} + ) + + _upload_create = schemas.UploadCreate( + name=name, date=datetime, is_realtime=False, thr=thr, user_id=user.user_id + ) + crud.create_upload(db=db, upload=_upload_create) + + uploaded = crud.get_upload_id( + db=db, user_id=user.user_id, name=name, date=datetime + )[-1] + + video_name = uuid.uuid1() + + # model inference 에서 s3 에 올릴 주소 그대로 db 에 insert + video_url = f"video/{user.user_id}/{uploaded.upload_id}/{video_name}{file_ext}" + _video_create = schemas.VideoCreate( + video_url=video_url, upload_id=uploaded.upload_id + ) + crud.create_video(db=db, video=_video_create) + _complete_create = schemas.Complete(completed=False, upload_id=uploaded.upload_id) + crud.create_complete(db=db, complete=_complete_create) + + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="video 를 s3 저장소 업로드에 실패했습니다.", + ) + + try: + s3.upload_fileobj( + upload_file.file, + settings.BUCKET, + video_url, + ExtraArgs={"ContentType": "video/mp4"}, + ) + except: + raise s3_upload_exception + + info = { + "user_id": user.user_id, + "email": user.email, + "upload_id": uploaded.upload_id, + "name": name, + "date": datetime, + "threshold": uploaded.thr, + "video_name": upload_file.filename, + "video_uuid_name": video_name, + "video_ext": file_ext, + "video_id": crud.get_video(db=db, upload_id=uploaded.upload_id).video_id, + "video_url": video_url, + } + + model_data = { + "user_id": user.user_id, + "upload_id": uploaded.upload_id, + "threshold": uploaded.thr, + "video_uuid_name": str(video_name), + "video_ext": file_ext, + "video_id": crud.get_video(db=db, upload_id=uploaded.upload_id).video_id, + "video_url": video_url, + } + + model_server_url = settings.UPLOAD_MODEL_SERVER_IP + try: + response = requests.post(model_server_url, json=model_data) + response.raise_for_status() # 응답 상태 코드가 200이 아닌 경우 예외 발생 + print("Model execution started successfully.") + except requests.RequestException: + e = "모델 서버에서 오류가 발생했습니다." + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=e) + + redirect_url = ( + f"/album/details?user_id={info['user_id']}&upload_id={info['upload_id']}" + ) + + return RedirectResponse(url=redirect_url, status_code=status.HTTP_303_SEE_OTHER) diff --git a/app/api/user_router.py b/app/api/user_router.py new file mode 100644 index 0000000..d0e107f --- /dev/null +++ b/app/api/user_router.py @@ -0,0 +1,92 @@ +from database import crud, models +from database.database import get_db +from fastapi import APIRouter, Depends, Request +from fastapi.responses import RedirectResponse +from fastapi.templating import Jinja2Templates +from sqlalchemy.orm import Session +from utils.security import pwd_context + +templates = Jinja2Templates(directory="templates") +router = APIRouter(prefix="/user") + + +@router.get("/signup") +async def signup_get(request: Request, db: Session = Depends(get_db)): + err_msg = {"user": None, "pw": None, "check_pw": None} + return templates.TemplateResponse( + "signup.html", {"request": request, "err": err_msg} + ) + + +@router.post("/signup") +async def signup_post(request: Request, db: Session = Depends(get_db)): + body = await request.form() + user, pw, check_pw = body["email"], body["pw"], body["check_pw"] + err_msg = {"user": None, "pw": None, "check_pw": None} + + if not user: + err_msg["user"] = "empty email" + elif not pw: + err_msg["pw"] = "empty password" + elif pw != check_pw: + err_msg["check_pw"] = "not equal password and check_password" + else: + user = db.query(models.User).filter(models.User.email == body["email"]).first() + + if user: + err_msg["user"] = "invalid email" + else: + user_info = models.User(email=body["email"], password=body["pw"]) + + crud.create_user(db, user_info) + return RedirectResponse(url="/user/login") + + return templates.TemplateResponse( + "signup.html", {"request": request, "err": err_msg} + ) + + +@router.get("/login") +async def login_get(request: Request): + err_msg = {"user": None, "pw": None} + return templates.TemplateResponse( + "login.html", {"request": request, "err": err_msg} + ) + + +@router.post("/login") +async def login_post(request: Request, db: Session = Depends(get_db)): + body = await request.form() + user, pw = body["email"], body["pw"] + err_msg = {"user": None, "pw": None} + + if body.get("check_pw", None): + return templates.TemplateResponse( + "login.html", {"request": request, "err": err_msg} + ) + + if not user: + err_msg["user"] = "empty email" + elif not pw: + err_msg["pw"] = "empty password" + else: + user = db.query(models.User).filter(models.User.email == body["email"]).first() + if not user: + err_msg["user"] = "invalid email" + elif not pwd_context.verify(body["pw"], user.password): + err_msg["pw"] = "invalid password" + else: + return RedirectResponse(url="/") + + return templates.TemplateResponse( + "login.html", {"request": request, "err": err_msg} + ) + + +@router.get("/logout") +async def logout_get(request: Request): + access_token = request.cookies.get("access_token", None) + template = RedirectResponse(url="/") + if access_token: + template.delete_cookie(key="access_token") + return template diff --git a/app/database/__init__.py b/app/database/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/database/crud.py b/app/database/crud.py new file mode 100644 index 0000000..0f66ee6 --- /dev/null +++ b/app/database/crud.py @@ -0,0 +1,236 @@ +import smtplib +from datetime import date, datetime, timedelta +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText + +from database import models +from database.schemas import ( + Complete, + FrameCreate, + UploadCreate, + UserBase, + UserCreate, + VideoCreate, +) +from sqlalchemy import func +from sqlalchemy.orm import Session, aliased +from utils.config import settings +from utils.security import get_password_hash, verify_password + +# from email.mime.image import MIMEImage +# from passlib.context import CryptContext + + + +## User +def create_user(db: Session, user: UserCreate): + hashed_password = get_password_hash(user.password) + db_user = models.User(email=user.email, password=hashed_password) + db.add(db_user) + db.commit() + db.refresh(db_user) + return db_user + + +def get_user(db: Session, user_id: int): + return db.query(models.User).filter(models.User.user_id == user_id).first() + + +def get_user_by_email(db: Session, email: str): + return db.query(models.User).filter(models.User.email == email).first() + + +def get_existing_user(db: Session, user_create: UserCreate): + return ( + db.query(models.User).filter((models.User.email == user_create.email)).first() + ) + + +def authenticate(db: Session, *, email: str, password: str): + user = get_user_by_email(db, email=email) + if not user: + return None + if not verify_password(password, user.password): + return None + return user + + +def is_active(user: UserBase) -> bool: + return user.is_active + + +## Upload +def create_upload(db: Session, upload: UploadCreate): + db_upload = models.Upload(**upload.dict()) + db.add(db_upload) + db.commit() + db.refresh(db_upload) + return db_upload + + +def delete_upload(db: Session, upload_id: int): + db_upload = ( + db.query(models.Upload).filter(models.Upload.upload_id == upload_id).first() + ) + + if db_upload: + db.delete(db_upload) + db.commit() + return True + return False + + +def get_upload(db: Session, upload_id: int): + return db.query(models.Upload).filter(models.Upload.upload_id == upload_id).first() + + +def get_upload_id( + db: Session, + user_id: int, + name: str, + date: datetime, +): + return ( + db.query(models.Upload) + .filter( + (models.Upload.user_id == user_id) + & (models.Upload.name == name) + & (models.Upload.date == date) + ) + .all() + ) + + +def get_uploads(db: Session, user_id: int): + return ( + db.query(models.Upload) + .filter(models.Upload.user_id == user_id) + .order_by(models.Upload.upload_id.desc()) + .all() + ) + + +def get_upload_by_name(db: Session, name: str): + return db.query(models.Upload).filter(models.Upload.name == name).first() + + +## Video +def create_video(db: Session, video: VideoCreate): + db_video = models.Video(**video.dict()) + db.add(db_video) + db.commit() + db.refresh(db_video) + return db_video + + +def get_video(db: Session, upload_id: int): + return db.query(models.Video).filter(models.Video.upload_id == upload_id).first() + + +## Frame +def create_frame(db: Session, frame: FrameCreate): + db_frame = models.Frame(**frame.dict()) + db.add(db_frame) + db.commit() + db.refresh(db_frame) + return db_frame + + +def get_frame(db: Session, frame_id: int): + return db.query(models.Frame).filter(models.Frame.frame_id == frame_id).first() + + +def get_frames(db: Session, video_id: int): + return db.query(models.Frame).filter(models.Frame.video_id == video_id).all() + + +def get_frames_with_highest_score(db: Session, video_id: int): + + subquery = ( + db.query( + models.Frame.video_id, + models.Frame.time_stamp, + func.max(models.Frame.score).label("max_score"), + ) + .group_by(models.Frame.video_id, models.Frame.time_stamp) + .subquery() + ) + + subq_alias = aliased(subquery) + + frames = ( + db.query(models.Frame) + .join( + subq_alias, + (models.Frame.video_id == subq_alias.c.video_id) + & (models.Frame.time_stamp == subq_alias.c.time_stamp) + & (models.Frame.score == subq_alias.c.max_score), + ) + .filter(models.Frame.video_id == video_id) + .all() + ) + + return frames + + +def create_complete(db: Session, complete: Complete): + db_complete = models.Complete(**complete.dict()) + db.add(db_complete) + db.commit() + db.refresh(db_complete) + return db_complete + + +def get_complete(db: Session, upload_id: int): + return ( + db.query(models.Complete).filter(models.Complete.upload_id == upload_id).first() + ) + + +def update_complete_status(db: Session, upload_id: int): + + complete_record = ( + db.query(models.Complete).filter(models.Complete.upload_id == upload_id).first() + ) + + if complete_record and not complete_record.completed: + complete_record.completed = True + db.commit() + + +# email feat +async def create_smtp_server(): + + smtp = smtplib.SMTP_SSL( + settings.SMTP_ADDRESS, settings.SMTP_PORT + ) # smtp 서버와 연결 + smtp.login( + settings.MAIL_ACCOUNT, settings.MAIL_PASSWORD + ) # 프로젝트 계정으로 로그인 + + return smtp + + +def send_email(db, check, last, user_id, smtp): + user = get_user(db, user_id) + + # 메일 기본 정보 설정 + msg = MIMEMultipart() + msg["subject"] = f"[IVT] 이상행동 분석 중 결과 전달 메일입니다." + msg["from"] = settings.MAIL_ACCOUNT + msg["To"] = user.email + + # 메일 본문 내용 + content = f"""안녕하세요. Naver AI Tech 6기 '혁신비전테크' 팀 입니다. + +실시간 이상행동 탐지 중 이상행동이 발견되어 해당 시간대를 전달 드립니다. + +{check} ~ {last} 시간을 확인해주세요. +""" + + content_part = MIMEText(content, "plain") + msg.attach(content_part) + + smtp.sendmail(settings.MAIL_ACCOUNT, user.email, msg.as_string()) + + # 탐지 이미지 첨부 (향후 업데이트 예정) diff --git a/app/database/database.py b/app/database/database.py new file mode 100644 index 0000000..e34c55e --- /dev/null +++ b/app/database/database.py @@ -0,0 +1,26 @@ +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +from utils.config import settings + +SQLALCHEMY_DATABASE_URL = "mysql+pymysql://{}:{}@{}:{}/{}".format( + settings.MYSQL_SERVER_USER, + settings.MYSQL_SERVER_PASSWORD, + settings.MYSQL_SERVER_IP, + settings.MYSQL_SERVER_PORT, + settings.MYSQL_DATABASE, +) + +engine = create_engine(SQLALCHEMY_DATABASE_URL, echo=True) +SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) + +Base = declarative_base() + + +# Dependency +def get_db(): + db = SessionLocal() + try: + yield db + finally: + db.close() diff --git a/app/database/models.py b/app/database/models.py new file mode 100644 index 0000000..b201bb8 --- /dev/null +++ b/app/database/models.py @@ -0,0 +1,77 @@ +from database.database import Base +from sqlalchemy import ( + JSON, + Boolean, + Column, + DateTime, + Float, + ForeignKey, + Integer, + String, + Time, +) +from sqlalchemy.orm import relationship + + +class User(Base): + __tablename__ = "user" + + user_id = Column(Integer, primary_key=True, index=True, autoincrement=True) + email = Column(String(50), unique=True, nullable=False) + password = Column(String(200), nullable=False) + is_active = Column(Boolean, default=True) + + uploads = relationship("Upload", back_populates="user") + + +class Upload(Base): + __tablename__ = "upload" + + upload_id = Column(Integer, primary_key=True, index=True, autoincrement=True) + name = Column(String(50), nullable=False) + date = Column(DateTime, nullable=False) + is_realtime = Column(Boolean, default=False) + thr = Column(Float, nullable=False) + user_id = Column(Integer, ForeignKey("user.user_id"), nullable=False) + + user = relationship("User", back_populates="uploads") + videos = relationship( + "Video", back_populates="upload", cascade="all, delete-orphan" + ) + completes = relationship( + "Complete", back_populates="upload", cascade="all, delete-orphan" + ) + + +class Video(Base): + __tablename__ = "video" + + video_id = Column(Integer, primary_key=True, index=True, autoincrement=True) + video_url = Column(String(255), nullable=False) + upload_id = Column(Integer, ForeignKey("upload.upload_id"), nullable=False) + + upload = relationship("Upload", back_populates="videos") + frames = relationship("Frame", back_populates="video", cascade="all, delete-orphan") + + +class Frame(Base): + __tablename__ = "frame" + + frame_id = Column(Integer, primary_key=True, index=True, autoincrement=True) + frame_url = Column(String(255), nullable=False) + time_stamp = Column(Time, nullable=False) + box_kp_json = Column(JSON, nullable=False) + score = Column(Float, nullable=False) + video_id = Column(Integer, ForeignKey("video.video_id"), nullable=False) + + video = relationship("Video", back_populates="frames") + + +class Complete(Base): + __tablename__ = "complete" + + complete_id = Column(Integer, primary_key=True, index=True, autoincrement=True) + completed = Column(Boolean, default=False) + upload_id = Column(Integer, ForeignKey("upload.upload_id"), nullable=False) + + upload = relationship("Upload", back_populates="completes") diff --git a/app/database/schemas.py b/app/database/schemas.py new file mode 100644 index 0000000..62ea29d --- /dev/null +++ b/app/database/schemas.py @@ -0,0 +1,58 @@ +from datetime import datetime, time +from typing import Dict, List, Optional + +from pydantic import BaseModel, EmailStr, field_validator +from pydantic_core.core_schema import FieldValidationInfo + + +class UserBase(BaseModel): + email: EmailStr + is_active: Optional[bool] = True + + +class UserCreate(UserBase): + password: str + + +# Upload post 스키마 +class UploadCreate(BaseModel): + name: str + date: datetime + is_realtime: Optional[bool] = None + thr: float + user_id: int + + +# Video post 스키마 +class VideoCreate(BaseModel): + video_url: str + upload_id: int + + +class Video(VideoCreate): + video_id: int + frames: List["Frame"] = [] + + class Config: + orm_mode = True + + +# Frame Post 스키마 +class FrameCreate(BaseModel): + frame_url: str + time_stamp: time + box_kp_json: Dict + score: float + video_id: int + + +class Frame(FrameCreate): + frame_id: int + + class Config: + orm_mode = True + + +class Complete(BaseModel): + completed: bool + upload_id: int diff --git a/app/inference/__init__.py b/app/inference/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/inference/anomaly_detector.py b/app/inference/anomaly_detector.py new file mode 100644 index 0000000..3c979cb --- /dev/null +++ b/app/inference/anomaly_detector.py @@ -0,0 +1,458 @@ +import json +import os +import sys +import uuid +from collections import defaultdict +from datetime import datetime, time +from io import BytesIO + +import albumentations as A +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch +import torch.nn as nn +from database import crud, schemas +from fastapi import HTTPException +from sklearn.preprocessing import MinMaxScaler +from starlette import status +from ultralytics import YOLO + +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.dirname(os.path.dirname(current_dir)) + +sys.path.append(os.path.join(parent_dir, "model")) +from copy import deepcopy + +import vmae + +# @@ timm은 0.4.12 버전 사용 필수 +from timm.models import create_model + + +class AnomalyDetector: + def __init__(self, video_file, info, s3_client, settings, db): + self.video = s3_client.generate_presigned_url( + ClientMethod="get_object", + Params={"Bucket": settings.BUCKET, "Key": video_file}, + ExpiresIn=3600, + ) + # print(self.video) + self.info = info + self.s3 = s3_client + self.settings = settings + self.thr = info["threshold"] + self.video_url = f"video/{info['user_id']}/{info['upload_id']}/{info['video_uuid_name']}{info['video_ext']}" + self.frame_url_base = f"frame/{info['user_id']}/{info['upload_id']}/" + self.db = db + + def display_text(self, frame, text, position): + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = 1 + font_color = (0, 255, 0) # Green color + font_thickness = 2 + cv2.putText( + frame, + text, + position, + font, + font_scale, + font_color, + font_thickness, + cv2.LINE_AA, + ) + + def upload_frame_db(self, db, temp_for_db, frame_url): + + temp_json_path = "./temp.json" + + with open(temp_json_path, "w") as f: + json.dump(temp_for_db, f) + + with open(temp_json_path, "r") as f: + box_kp_json = json.load(f) + + _frame_create = schemas.FrameCreate( + frame_url=frame_url, + time_stamp=temp_for_db["timestamp"], + box_kp_json=box_kp_json, + score=temp_for_db["score"], + video_id=self.info["video_id"], + ) + + crud.create_frame(db=db, frame=_frame_create) + + os.remove(temp_json_path) + + def upload_frame_s3(self, s3, frame): + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Frame 을 s3 저장소 업로드에 실패했습니다.", + ) + frame_name = uuid.uuid1() + frame_url = self.frame_url_base + f"{frame_name}" + ".png" + # print(frame_url) + + try: + s3.upload_fileobj( + BytesIO(cv2.imencode(".png", frame)[1].tobytes()), + self.settings.BUCKET, + frame_url, + ExtraArgs={"ContentType": "image/png"}, + ) + except Exception as e: + # print(e) + raise s3_upload_exception + + return frame_url + + def upload_video_s3(self, s3, video): + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="video 를 s3 저장소 업로드에 실패했습니다.", + ) + video_url = self.video_url + + video_change_codec = "./temp_video_path_change_codec.mp4" + + os.system('ffmpeg -i "%s" -vcodec libx264 "%s"' % (video, video_change_codec)) + + try: + with open(video_change_codec, "rb") as video_file: + s3.upload_fileobj( + video_file, + self.settings.BUCKET, + video_url, + ExtraArgs={"ContentType": "video/mp4"}, + ) + except Exception as e: + # print(e) + raise s3_upload_exception + + os.remove(video_change_codec) + + def upload_score_graph_s3(self, s3, scores): + plt.plot(scores, color="red") + plt.title("Anomaly Scores Over Time") + plt.xlabel(" ") + plt.ylabel(" ") + + plt.xticks([]) + plt.yticks([]) + + save_path = "./model_scores_plot.png" + plt.savefig(save_path) + + with open(save_path, "rb") as image_file: + graph = image_file.read() + + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="score Graph 를 s3 저장소 업로드에 실패했습니다.", + ) + score_graph_name = "score_graph.png" + score_graph_url = self.frame_url_base + score_graph_name + + try: + s3.upload_fileobj( + BytesIO(graph), + self.settings.BUCKET, + score_graph_url, + ExtraArgs={"ContentType": "image/png"}, + ) + except: + raise s3_upload_exception + + os.remove(save_path) + + def run(self): + # YOLO + tracker_model = YOLO("yolov8n-pose.pt") + + # VMAE v2 + backbone = model = create_model( + "vit_small_patch16_224", + img_size=224, + pretrained=False, + num_classes=710, + all_frames=16, + ) + + load_dict = torch.load( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/model/pts/vit_s_k710_dl_from_giant.pth" + ) + + backbone.load_state_dict(load_dict["module"]) + + tf = A.Resize(224, 224) + + # Define sequence_length, prediction_time, and n_features + # sequence_length = 20 + # prediction_time = 1 + # n_features = 38 + + # LSTM autoencoder + checkpoint = torch.load( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/model/pts/MIL_20240325_202019_best_auc.pth" + ) + classifier = vmae.MILClassifier(input_dim=710, drop_p=0.3) + classifier.load_state_dict(checkpoint["model_state_dict"]) + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + tracker_model.to(device) + backbone.to(device) + backbone.eval() + classifier.to(device) + classifier.eval() + + # Define the standard frame size + standard_width = 640 + standard_height = 480 + + # Open the video file + cap = cv2.VideoCapture(self.video) + temp_name = None + if not cap.isOpened(): + temp_name = f"{uuid.uuid4()}.mp4" + self.s3.download_file(self.settings.BUCKET, self.video_url, temp_name) + cap = cv2.VideoCapture(temp_name) + fps = cap.get(cv2.CAP_PROP_FPS) + + frame_inteval = fps // 3 + + # Store the track history + track_history = defaultdict(lambda: []) + + # Initialize a dictionary to store separate buffers for each ID + # id_buffers = defaultdict(lambda: []) + + # HTML -> H.264 codec + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + + # video writer -> ID 별 score, bbox 가 나온 영상을 s3 에 업로드 + output_video_path = "./temp_video_path.mp4" + output_video = cv2.VideoWriter( + output_video_path, fourcc, fps, (standard_width, standard_height) + ) + + # Define a function to calculate MSE between two sequences + def calculate_mse(seq1, seq2): + return np.mean(np.power(seq1 - seq2, 2)) + + # anomaly threshold (default 0.02) + # threshold = self.thr + threshold = 0.3 + + # Loop through the video frames + frame_count = 0 + # net_mse = 0 + # avg_mse = 0 + # score graph 를 위한 score list + scores = [] + # vmae에 입력할 frame들을 저장할 list + v_frames = [] + + # 영상 frame 임시 저장 list + frame_list = [] + # yolo results 임시 저장 list + results_list = [] + # temp_for_db 임시 저장 list + tfdb_list = [] + + while cap.isOpened(): + # Read a frame from the video + success, frame = cap.read() + frame_count += 1 # Increment frame count + + if success: + frame = cv2.resize(frame, (standard_width, standard_height)) + + temp_for_db = { + "timestamp": None, + "bbox": {}, + "keypoints": {}, + "score": None, + } + + # track id (사람) 별로 mse 점수가 나오기 때문에 한 frame 에 여러 mse 점수가 나옴. 이를 frame 별 점수로 구하기 위해서 변수 설정 + # mse_unit = 0 + + s_timestamp = round(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000, 2) + datetime_object = datetime.utcfromtimestamp(s_timestamp) + timestamp = datetime_object.strftime("%H:%M:%S") + temp_for_db["timestamp"] = timestamp + + # Run YOLOv8 tracking on the frame, persisting tracks between frames + results = tracker_model.track(frame, persist=True) + + frame_list.append(frame.copy()) + results_list.append(deepcopy(results)) + tfdb_list.append(deepcopy(temp_for_db)) + + # 1초에 3 frame만 저장해서 vmae+MIL에 사용 + if (frame_count - 1) % frame_inteval == 0: + v_frame = tf(image=frame)["image"] + # (224, 224, 3) + v_frame = np.expand_dims(v_frame, axis=0) + # (1, 224, 224, 3) + v_frames.append(v_frame.copy()) + + # 16 frame이 모이면 vmae+MIL 계산 + if len(v_frames) == 16: + in_frames = np.concatenate(v_frames) + # (16, 224, 224, 3) + in_frames = in_frames.transpose(3, 0, 1, 2) + # (RGB 3, frame T=16, H=224, W=224) + in_frames = np.expand_dims(in_frames, axis=0) + # (1, 3, 16 * segments_num, 224, 224) + in_frames = torch.from_numpy(in_frames).float() + # torch.Size([1, 3, 16, 224, 224]) + + in_frames = in_frames.to(device) + + with torch.no_grad(): + v_output = backbone(in_frames) + # torch.Size([1, 710]) + v_score = classifier(v_output) + # torch.Size([1, 1]) + scores.append(v_score.cpu().item()) + + v_frames = [] + + if len(frame_list) == 16 * frame_inteval: + for f_step, (frame_i, results_i, temp_for_db_i) in enumerate( + zip(frame_list, results_list, tfdb_list) + ): + if scores[-1] > threshold: + anomaly_text = f"Anomaly detected, score: {scores[-1]}" + if ( + results_i[0].boxes is not None + ): # Check if there are results and boxes + + # Get the boxes + boxes = results_i[0].boxes.xywh.cpu() + + if results_i[0].boxes.id is not None: + # If 'int' attribute exists (there are detections), get the track IDs + + track_ids = ( + results_i[0].boxes.id.int().cpu().tolist() + ) + + # Loop through the detections and add data to the DataFrame + # anomaly_text = "" # Initialize the anomaly text + + # vmae에서 보는 frame들만 db에 저장 + if f_step % frame_inteval == 0: + # 한 프레임에서 검출된 사람만큼 돌아가는 반복문. 2명이면 각 id 별로 아래 연산들이 진행됨. + for i, box in zip( + range(0, len(track_ids)), + results_i[0].boxes.xywhn.cpu(), + ): + + x, y, w, h = box + keypoints = ( + results_i[0] + .keypoints.xyn[i] + .cpu() + .numpy() + .flatten() + .tolist() + ) + + xywhk = np.array( + [float(x), float(y), float(w), float(h)] + + keypoints + ) + + xywhk = list( + map(lambda x: str(round(x, 4)), xywhk) + ) + + temp_for_db_i["bbox"][f"id {i}"] = " ".join( + xywhk[:4] + ) + + temp_for_db_i["keypoints"][f"id {i}"] = ( + " ".join(xywhk[4:]) + ) + + else: + # If 'int' attribute doesn't exist (no detections), set track_ids to an empty list + track_ids = [] + + # Visualize the results on the frame + annotated_frame = results_i[0].plot() + self.display_text( + annotated_frame, anomaly_text, (10, 30) + ) # Display the anomaly text + + # Plot the tracks + # for box, track_id in zip(boxes, track_ids): + # x, y, w, h = box + # track = track_history[track_id] + # track.append((float(x), float(y))) # x, y center point + # if len(track) > 30: # retain 90 tracks for 90 frames + # track.pop(0) + + # # Draw the tracking lines + # points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2)) + # cv2.polylines( + # annotated_frame, + # [points], + # isClosed=False, + # color=(230, 230, 230), + # thickness=10, + # ) + + # Display the annotated frame + output_video.write(annotated_frame) + # cv2.imshow("YOLOv8 Tracking", annotated_frame) + else: + self.display_text( + frame_i, anomaly_text, (10, 30) + ) # Display the anomaly text + output_video.write(frame_i) + + # vmae에서 보는 frame들만 db에 저장 + if f_step % frame_inteval == 0: + temp_for_db_i["score"] = scores[-1] + + # upload frame to s3 + frame_url = self.upload_frame_s3(self.s3, frame_i) + + # upload frame, ts, bbox, kp to db + self.upload_frame_db(self.db, temp_for_db_i, frame_url) + + else: + anomaly_text = "" + output_video.write(frame_i) + # cv2.imshow("YOLOv8 Tracking", frame) + + # 16 * frame_interval개 frame 표시 후 초기화 + frame_list = [] + results_list = [] + tfdb_list = [] + + else: + if len(frame_list) != 0: + for f in frame_list: + output_video.write(f) + + # Break the loop if the end of the video is reached + break + + # Release the video capture, video writer object + cap.release() + output_video.release() + # cv2.destroyAllWindows() + + # upload video to s3 + self.upload_video_s3(self.s3, output_video_path) + + # upload score graph to s3 + self.upload_score_graph_s3(self.s3, scores) + if temp_name: + os.remove(temp_name) + os.remove(output_video_path) diff --git a/app/inference/anomaly_detector_lstmae.py b/app/inference/anomaly_detector_lstmae.py new file mode 100644 index 0000000..e38e316 --- /dev/null +++ b/app/inference/anomaly_detector_lstmae.py @@ -0,0 +1,421 @@ +import json +import os +import sys +import uuid +from collections import defaultdict +from datetime import datetime, time +from io import BytesIO + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch +from database import crud, schemas +from fastapi import HTTPException +from sklearn.preprocessing import MinMaxScaler +from starlette import status +from ultralytics import YOLO + +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.dirname(os.path.dirname(current_dir)) + +sys.path.append(os.path.join(parent_dir, "model")) +from lstmae.lstm_ae import LSTMAutoEncoder + + +class AnomalyDetector: + def __init__(self, video_file, info, s3_client, settings, db): + self.video = s3_client.generate_presigned_url( + ClientMethod="get_object", + Params={"Bucket": settings.BUCKET, "Key": video_file}, + ExpiresIn=3600, + ) + # print(self.video) + self.info = info + self.s3 = s3_client + self.settings = settings + self.thr = info["threshold"] + self.video_url = f"video/{info['user_id']}/{info['upload_id']}/{info['video_uuid_name']}{info['video_ext']}" + self.frame_url_base = f"frame/{info['user_id']}/{info['upload_id']}/" + self.db = db + + def display_text(self, frame, text, position): + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = 1 + font_color = (0, 255, 0) # Green color + font_thickness = 2 + cv2.putText( + frame, + text, + position, + font, + font_scale, + font_color, + font_thickness, + cv2.LINE_AA, + ) + + def upload_frame_db(self, db, temp_for_db, frame_url): + + temp_json_path = "./temp.json" + + with open(temp_json_path, "w") as f: + json.dump(temp_for_db, f) + + with open(temp_json_path, "r") as f: + box_kp_json = json.load(f) + + _frame_create = schemas.FrameCreate( + frame_url=frame_url, + time_stamp=temp_for_db["timestamp"], + box_kp_json=box_kp_json, + score=temp_for_db["score"], + video_id=self.info["video_id"], + ) + + crud.create_frame(db=db, frame=_frame_create) + + os.remove(temp_json_path) + + def upload_frame_s3(self, s3, frame): + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Frame 을 s3 저장소 업로드에 실패했습니다.", + ) + frame_name = uuid.uuid1() + frame_url = self.frame_url_base + f"{frame_name}" + ".png" + # print(frame_url) + + try: + s3.upload_fileobj( + BytesIO(cv2.imencode(".png", frame)[1].tobytes()), + self.settings.BUCKET, + frame_url, + ExtraArgs={"ContentType": "image/png"}, + ) + except Exception as e: + # print(e) + raise s3_upload_exception + + return frame_url + + def upload_video_s3(self, s3, video): + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="video 를 s3 저장소 업로드에 실패했습니다.", + ) + video_url = self.video_url + + video_change_codec = "./temp_video_path_change_codec.mp4" + + os.system('ffmpeg -i "%s" -vcodec libx264 "%s"' % (video, video_change_codec)) + + try: + with open(video_change_codec, "rb") as video_file: + s3.upload_fileobj( + video_file, + self.settings.BUCKET, + video_url, + ExtraArgs={"ContentType": "video/mp4"}, + ) + except Exception as e: + # print(e) + raise s3_upload_exception + + os.remove(video_change_codec) + + def upload_score_graph_s3(self, s3, scores): + plt.plot(scores, color="red") + plt.title("Anomaly Scores Over Time") + plt.xlabel(" ") + plt.ylabel(" ") + + plt.xticks([]) + plt.yticks([]) + + save_path = "./model_scores_plot.png" + plt.savefig(save_path) + + with open(save_path, "rb") as image_file: + graph = image_file.read() + + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="score Graph 를 s3 저장소 업로드에 실패했습니다.", + ) + score_graph_name = "score_graph.png" + score_graph_url = self.frame_url_base + score_graph_name + + try: + s3.upload_fileobj( + BytesIO(graph), + self.settings.BUCKET, + score_graph_url, + ExtraArgs={"ContentType": "image/png"}, + ) + except: + raise s3_upload_exception + + os.remove(save_path) + + def run(self): + # YOLO + tracker_model = YOLO("yolov8n-pose.pt") + + # Define sequence_length, prediction_time, and n_features + sequence_length = 20 + prediction_time = 1 + n_features = 38 + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + # LSTM autoencoder + checkpoint = torch.load( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/model/pts/LSTM_20240324_222238_best.pth" + ) + autoencoder_model = LSTMAutoEncoder( + num_layers=2, hidden_size=50, n_features=n_features, device=device + ) + autoencoder_model.load_state_dict(checkpoint["model_state_dict"]) + tracker_model.to(device) + autoencoder_model.to(device) + + # Define the standard frame size + standard_width = 640 + standard_height = 480 + + # Open the video file + cap = cv2.VideoCapture(self.video) + if not cap.isOpened(): + temp_name = f"{uuid.uuid4()}.mp4" + self.s3.download_file(self.settings.BUCKET, self.video_url, temp_name) + cap = cv2.VideoCapture(temp_name) + fps = cap.get(cv2.CAP_PROP_FPS) + + # Store the track history + track_history = defaultdict(lambda: []) + + # Initialize a dictionary to store separate buffers for each ID + id_buffers = defaultdict(lambda: []) + + # HTML -> H.264 codec + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + + # video writer -> ID 별 score, bbox 가 나온 영상을 s3 에 업로드 + output_video_path = "./temp_video_path.mp4" + output_video = cv2.VideoWriter( + output_video_path, fourcc, fps, (standard_width, standard_height) + ) + + # Define a function to calculate MSE between two sequences + def calculate_mse(seq1, seq2): + return np.mean(np.power(seq1 - seq2, 2)) + + # anomaly threshold (default 0.02) + threshold = self.thr + + # Loop through the video frames + frame_count = 0 + net_mse = 0 + avg_mse = 0 + # score graph 를 위한 score list + scores = [] + + while cap.isOpened(): + # Read a frame from the video + success, frame = cap.read() + frame_count += 1 # Increment frame count + + if success: + frame = cv2.resize(frame, (standard_width, standard_height)) + + temp_for_db = { + "timestamp": None, + "bbox": {}, + "keypoints": {}, + "score": None, + } + + # track id (사람) 별로 mse 점수가 나오기 때문에 한 frame 에 여러 mse 점수가 나옴. 이를 frame 별 점수로 구하기 위해서 변수 설정 + mse_unit = 0 + + s_timestamp = round(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000, 2) + datetime_object = datetime.utcfromtimestamp(s_timestamp) + timestamp = datetime_object.strftime("%H:%M:%S") + temp_for_db["timestamp"] = timestamp + + # Run YOLOv8 tracking on the frame, persisting tracks between frames + results = tracker_model.track(frame, persist=True) + + if results[0].boxes is not None: # Check if there are results and boxes + + # Get the boxes + boxes = results[0].boxes.xywh.cpu() + + if results[0].boxes.id is not None: + # If 'int' attribute exists (there are detections), get the track IDs + + track_ids = results[0].boxes.id.int().cpu().tolist() + + # Loop through the detections and add data to the DataFrame + anomaly_text = "" # Initialize the anomaly text + + # 한 프레임에서 검출된 사람만큼 돌아가는 반복문. 2명이면 각 id 별로 아래 연산들이 진행됨. + for i, box in zip( + range(0, len(track_ids)), results[0].boxes.xywhn.cpu() + ): + + x, y, w, h = box + keypoints = ( + results[0] + .keypoints.xyn[i] + .cpu() + .numpy() + .flatten() + .tolist() + ) + + # Append the keypoints to the corresponding ID's buffer + # bbox(4), keypoints per id(34) + id_buffers[track_ids[i]].append( + [float(x), float(y), float(w), float(h)] + keypoints + ) + + # If the buffer size reaches the threshold (e.g., 20 data points), perform anomaly detection + # track_id 별 20프레임이 쌓이면 아래 연산 진행 + if len(id_buffers[track_ids[i]]) >= 20: + # Convert the buffer to a NumPy array + buffer_array = np.array(id_buffers[track_ids[i]]) + + # Scale the data (you can use the same scaler you used during training) + scaler = MinMaxScaler() + buffer_scaled = scaler.fit_transform(buffer_array) + + # Create sequences for prediction + # x_pred: [1,20,38] + x_pred = buffer_scaled[-sequence_length:].reshape( + 1, sequence_length, n_features + ) + + # Predict the next values using the autoencoder model + x_pred = torch.tensor(x_pred, dtype=torch.float32).to( + device + ) + x_pred = autoencoder_model.forward(x_pred) + + # Inverse transform the predicted data to the original scale + x_pred_original = scaler.inverse_transform( + x_pred.cpu() + .detach() + .numpy() + .reshape(-1, n_features) + ) + + # Calculate the MSE between the predicted and actual values + mse = calculate_mse( + buffer_array[-prediction_time:], x_pred_original + ) + + # print(mse) + + net_mse = mse + net_mse + avg_mse = net_mse / frame_count + + mse_unit += mse + + # Check if the MSE exceeds the threshold to detect an anomaly + if mse > 1.5 * avg_mse * 0.25 + 0.75 * threshold: + if anomaly_text == "": + anomaly_text = f"Focus ID {track_ids[i]}" + else: + anomaly_text = f"{anomaly_text}, {track_ids[i]}" + + # print(anomaly_text) + + temp_for_db["bbox"][f"id {i}"] = " ".join( + map( + lambda x: str(round(x, 4)), + buffer_array[-prediction_time:][0, :4], + ) + ) + + temp_for_db["keypoints"][f"id {i}"] = " ".join( + map( + lambda x: str(round(x, 4)), + buffer_array[-prediction_time:][0, 4:], + ) + ) + + # Remove the oldest data point from the buffer to maintain its size + id_buffers[track_ids[i]].pop(0) + + if temp_for_db["bbox"] != {}: + + temp_for_db["score"] = mse_unit + + # upload frame to s3 + frame_url = self.upload_frame_s3(self.s3, frame) + + # upload frame, ts, bbox, kp to db + self.upload_frame_db(self.db, temp_for_db, frame_url) + + else: + anomaly_text = "" + # If 'int' attribute doesn't exist (no detections), set track_ids to an empty list + track_ids = [] + + # Visualize the results on the frame + annotated_frame = results[0].plot() + self.display_text( + annotated_frame, anomaly_text, (10, 30) + ) # Display the anomaly text + + # Plot the tracks + for box, track_id in zip(boxes, track_ids): + x, y, w, h = box + track = track_history[track_id] + track.append((float(x), float(y))) # x, y center point + if len(track) > 30: # retain 90 tracks for 90 frames + track.pop(0) + + # Draw the tracking lines + points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2)) + cv2.polylines( + annotated_frame, + [points], + isClosed=False, + color=(230, 230, 230), + thickness=10, + ) + + scores.append(mse_unit) + + # Display the annotated frame + output_video.write(annotated_frame) + # cv2.imshow("YOLOv8 Tracking", annotated_frame) + + else: + # If no detections, display the original frame without annotations + scores.append(mse_unit) + output_video.write(frame) + # cv2.imshow("YOLOv8 Tracking", frame) + + else: + # Break the loop if the end of the video is reached + break + + # Release the video capture, video writer object + cap.release() + output_video.release() + # cv2.destroyAllWindows() + + # upload video to s3 + self.upload_video_s3(self.s3, output_video_path) + + # upload score graph to s3 + self.upload_score_graph_s3(self.s3, scores) + + try: + os.remove(temp_name) + except: + pass + os.remove(output_video_path) diff --git a/app/inference/rt_anomaly_detector.py b/app/inference/rt_anomaly_detector.py new file mode 100644 index 0000000..9b70305 --- /dev/null +++ b/app/inference/rt_anomaly_detector.py @@ -0,0 +1,362 @@ +import json +import os +import sys +import uuid +from collections import defaultdict +from datetime import datetime, time, timedelta +from io import BytesIO + +import albumentations as A +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch +import torch.nn as nn +from database import crud, schemas +from fastapi import HTTPException +from sklearn.preprocessing import MinMaxScaler +from starlette import status +from ultralytics import YOLO + +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.dirname(os.path.dirname(current_dir)) + +sys.path.append(os.path.join(parent_dir, "model")) +from copy import deepcopy + +import vmae + +# @@ timm은 0.4.12 버전 사용 필수 +from timm.models import create_model + + +class RT_AnomalyDetector: + def __init__(self, info, s3_client, settings, db, websocket): + self.info = info + self.s3 = s3_client + self.settings = settings + self.frame_url_base = f"frame/{info['user_id']}/{info['upload_id']}/" + self.db = db + self.websocket = websocket + + async def upload_frame_db(self, db, temp_for_db, frame_url): + + temp_json_path = "./temp.json" + + with open(temp_json_path, "w") as f: + json.dump(temp_for_db, f) + + with open(temp_json_path, "r") as f: + box_kp_json = json.load(f) + + _frame_create = schemas.FrameCreate( + frame_url=frame_url, + time_stamp=temp_for_db["timestamp"], + box_kp_json=box_kp_json, + score=temp_for_db["score"], + video_id=self.info["video_id"], + ) + + crud.create_frame(db=db, frame=_frame_create) + + os.remove(temp_json_path) + + async def upload_frame_s3(self, s3, frame): + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Frame 을 s3 저장소 업로드에 실패했습니다.", + ) + frame_name = uuid.uuid1() + frame_url = self.frame_url_base + f"{frame_name}" + ".png" + # print(frame_url) + + try: + s3.upload_fileobj( + BytesIO(cv2.imencode(".png", frame)[1].tobytes()), + self.settings.BUCKET, + frame_url, + ExtraArgs={"ContentType": "image/png"}, + ) + except Exception as e: + # print(e) + raise s3_upload_exception + + return frame_url + + def upload_score_graph_s3(self): + plt.plot(self.scores, color="red") + plt.title("Anomaly Scores Over Time") + plt.xlabel(" ") + plt.ylabel(" ") + + plt.xticks([]) + plt.yticks([]) + + save_path = "./model_scores_plot.png" + plt.savefig(save_path) + + with open(save_path, "rb") as image_file: + graph = image_file.read() + + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="score Graph 를 s3 저장소 업로드에 실패했습니다.", + ) + score_graph_name = "score_graph.png" + score_graph_url = self.frame_url_base + score_graph_name + + try: + self.s3.upload_fileobj( + BytesIO(graph), + self.settings.BUCKET, + score_graph_url, + ExtraArgs={"ContentType": "image/png"}, + ) + except: + raise s3_upload_exception + + os.remove(save_path) + + def ready(self): + # YOLO + self.tracker_model = YOLO("yolov8n-pose.pt") + + # VMAE v2 + self.backbone = model = create_model( + "vit_small_patch16_224", + img_size=224, + pretrained=False, + num_classes=710, + all_frames=16, + ) + + load_dict = torch.load( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/model/pts/vit_s_k710_dl_from_giant.pth" + ) + + self.backbone.load_state_dict(load_dict["module"]) + + self.tf = A.Resize(224, 224) + + # Define sequence_length, prediction_time, and n_features + # sequence_length = 20 + # prediction_time = 1 + # n_features = 38 + + # classifier + checkpoint = torch.load( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/model/pts/MIL_20240325_202019_best_auc.pth" + ) + self.classifier = vmae.MILClassifier(input_dim=710, drop_p=0.3) + self.classifier.load_state_dict(checkpoint["model_state_dict"]) + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.tracker_model.to(device) + self.backbone.to(device) + self.backbone.eval() + self.classifier.to(device) + self.classifier.eval() + + # Store the track history + self.track_history = defaultdict(lambda: []) + + # Initialize a dictionary to store separate buffers for each ID + self.id_buffers = defaultdict(lambda: []) + + # Loop through the video frames + self.frame_count = 0 + # self.net_mse = 0 + # self.avg_mse = 0 + + # score graph 를 위한 score list + self.scores = [] + + # vmae에 입력할 frame들을 저장할 list + self.v_frames = [] + + # 영상 frame 임시 저장 list + self.frame_list = [] + # yolo results 임시 저장 list + self.results_list = [] + # temp_for_db 임시 저장 list + self.tfdb_list = [] + + # timestamp 저장 + self.prv_timestamp = 0 + self.fps3_delta = timedelta(seconds=1 / 3) + + async def run(self, frame, timestamp): + + # Define the standard frame size + standard_width = 640 + standard_height = 480 + + # Define sequence_length, prediction_time, and n_features + # sequence_length = 20 + # prediction_time = 1 + # n_features = 38 + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + # Define a function to calculate MSE between two sequences + def calculate_mse(seq1, seq2): + return np.mean(np.power(seq1 - seq2, 2)) + + # anomaly threshold (default 0.02) + # threshold = self.info["thr"] + threshold = 0.1 + + self.frame_count += 1 # Increment frame count + + frame = cv2.resize(frame, (standard_width, standard_height)) + + temp_for_db = {"timestamp": None, "bbox": {}, "keypoints": {}, "score": None} + + # track id (사람) 별로 mse 점수가 나오기 때문에 한 frame 에 여러 mse 점수가 나옴. 이를 frame 별 점수로 구하기 위해서 변수 설정 + # mse_unit = 0 + + frame_checker = False + + # 이전에 frame을 저장한 시점에서 0.3333.. 초 이상 경과했는지 확인 + if self.prv_timestamp == 0: + frame_checker = True + self.prv_timestamp = timestamp + else: + time_delta = timestamp - self.prv_timestamp + if time_delta > self.fps3_delta: + frame_checker = True + self.prv_timestamp = timestamp + + timestamp = timestamp.strftime("%H:%M:%S") + temp_for_db["timestamp"] = timestamp + + # Run YOLOv8 tracking on the frame, persisting tracks between frames + + # print(f"==>> frame_checker: {frame_checker}") + # frame_checker = True + # 1초에 3 frame만 저장해서 vmae+MIL에 사용 + if frame_checker: + results = self.tracker_model.track(frame, persist=True, verbose=False) + # print("yolo 1frame inference") + self.frame_list.append(frame.copy()) + self.results_list.append(deepcopy(results)) + self.tfdb_list.append(deepcopy(temp_for_db)) + + v_frame = self.tf(image=frame)["image"] + # (224, 224, 3) + v_frame = np.expand_dims(v_frame, axis=0) + # (1, 224, 224, 3) + self.v_frames.append(v_frame.copy()) + print(f"==>> len(self.v_frames): {len(self.v_frames)}") + + # 16 frame이 모이면 vmae+MIL 계산 + if len(self.v_frames) == 176: + print("VMAE 176frame inference") + in_frames = np.concatenate(self.v_frames) + # (176, 224, 224, 3) + in_frames = in_frames.reshape(11, 16, 224, 224, 3) + in_frames = in_frames.transpose(0, 4, 1, 2, 3) + # (11, RGB 3, frame T=16, H=224, W=224) + in_frames = torch.from_numpy(in_frames).float() + # torch.Size([11, 3, 16, 224, 224]) + + in_frames = in_frames.to(device) + + with torch.no_grad(): + v_output = self.backbone(in_frames) + # torch.Size([11, 710]) + v_output = v_output.view(1, 11, -1) + v_score = self.classifier(v_output) + v_score = v_score.view(1, 11) + print(f"==>> v_score: {v_score}") + print(f"==>> v_score.shape: {v_score.shape}") + # torch.Size([1, 11]) + s_list = [v_score[0, i].cpu().item() for i in range(11)] + + self.v_frames = [] + for f_step, (frame_i, results_i, temp_for_db_i) in enumerate( + zip(self.frame_list, self.results_list, self.tfdb_list) + ): + if s_list[f_step // 16] > threshold: + # if True: + anomaly_text = ( + f"Anomaly detected, score: {s_list[f_step // 16]}" + ) + + if ( + results_i[0].boxes is not None + ): # Check if there are results and boxes + + # Get the boxes + boxes = results_i[0].boxes.xywh.cpu() + + if results_i[0].boxes.id is not None: + # If 'int' attribute exists (there are detections), get the track IDs + + track_ids = results_i[0].boxes.id.int().cpu().tolist() + + # Loop through the detections and add data to the DataFrame + # anomaly_text = "" # Initialize the anomaly text + + # 한 프레임에서 검출된 사람만큼 돌아가는 반복문. 2명이면 각 id 별로 아래 연산들이 진행됨. + for i, box in zip( + range(0, len(track_ids)), + results_i[0].boxes.xywhn.cpu(), + ): + + x, y, w, h = box + keypoints = ( + results_i[0] + .keypoints.xyn[i] + .cpu() + .numpy() + .flatten() + .tolist() + ) + + xywhk = np.array( + [float(x), float(y), float(w), float(h)] + + keypoints + ) + + xywhk = list(map(lambda x: str(round(x, 4)), xywhk)) + + temp_for_db_i["bbox"][f"id {i}"] = " ".join( + xywhk[:4] + ) + + temp_for_db_i["keypoints"][f"id {i}"] = " ".join( + xywhk[4:] + ) + + else: + # If 'int' attribute doesn't exist (no detections), set track_ids to an empty list + track_ids = [] + + # self.scores.append(mse_unit) + + # Display the annotated frame + # cv2.imshow("YOLOv8 Tracking", annotated_frame) + + # else: + # If no detections, display the original frame without annotations + # self.scores.append(mse_unit) + # cv2.imshow("YOLOv8 Tracking", frame) + + temp_for_db_i["score"] = s_list[f_step // 16] + + # upload frame to s3 + frame_url = await self.upload_frame_s3(self.s3, frame_i) + + # upload frame, ts, bbox, kp to db + await self.upload_frame_db(self.db, temp_for_db_i, frame_url) + + await self.websocket.send_text(f"{timestamp}: {anomaly_text}") + + # 초기화 + self.scores.extend(deepcopy(s_list)) + s_list = [] + self.frame_list = [] + self.results_list = [] + self.tfdb_list = [] diff --git a/app/inference/rt_anomaly_detector_lstmae.py b/app/inference/rt_anomaly_detector_lstmae.py new file mode 100644 index 0000000..6035a43 --- /dev/null +++ b/app/inference/rt_anomaly_detector_lstmae.py @@ -0,0 +1,304 @@ +import json +import os +import sys +import uuid +from collections import defaultdict +from datetime import datetime, time +from io import BytesIO + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch +import torch.nn as nn +from database import crud, schemas +from fastapi import HTTPException +from sklearn.preprocessing import MinMaxScaler +from starlette import status +from ultralytics import YOLO + +current_dir = os.path.dirname(os.path.abspath(__file__)) +parent_dir = os.path.dirname(os.path.dirname(current_dir)) + +sys.path.append(os.path.join(parent_dir, "model")) + +from lstmae.lstm_ae import LSTMAutoEncoder + + +class RT_AnomalyDetector: + def __init__(self, info, s3_client, settings, db, websocket): + self.info = info + self.s3 = s3_client + self.settings = settings + self.frame_url_base = f"frame/{info['user_id']}/{info['upload_id']}/" + self.db = db + self.websocket = websocket + + async def upload_frame_db(self, db, temp_for_db, frame_url): + + temp_json_path = "./temp.json" + + with open(temp_json_path, "w") as f: + json.dump(temp_for_db, f) + + with open(temp_json_path, "r") as f: + box_kp_json = json.load(f) + + _frame_create = schemas.FrameCreate( + frame_url=frame_url, + time_stamp=temp_for_db["timestamp"], + box_kp_json=box_kp_json, + score=temp_for_db["score"], + video_id=self.info["video_id"], + ) + + crud.create_frame(db=db, frame=_frame_create) + + os.remove(temp_json_path) + + async def upload_frame_s3(self, s3, frame): + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Frame 을 s3 저장소 업로드에 실패했습니다.", + ) + frame_name = uuid.uuid1() + frame_url = self.frame_url_base + f"{frame_name}" + ".png" + # print(frame_url) + + try: + s3.upload_fileobj( + BytesIO(cv2.imencode(".png", frame)[1].tobytes()), + self.settings.BUCKET, + frame_url, + ExtraArgs={"ContentType": "image/png"}, + ) + except Exception as e: + # print(e) + raise s3_upload_exception + + return frame_url + + def upload_score_graph_s3(self): + plt.plot(self.scores, color="red") + plt.title("Anomaly Scores Over Time") + plt.xlabel(" ") + plt.ylabel(" ") + + plt.xticks([]) + plt.yticks([]) + + save_path = "./model_scores_plot.png" + plt.savefig(save_path) + + with open(save_path, "rb") as image_file: + graph = image_file.read() + + s3_upload_exception = HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="score Graph 를 s3 저장소 업로드에 실패했습니다.", + ) + score_graph_name = "score_graph.png" + score_graph_url = self.frame_url_base + score_graph_name + + try: + self.s3.upload_fileobj( + BytesIO(graph), + self.settings.BUCKET, + score_graph_url, + ExtraArgs={"ContentType": "image/png"}, + ) + except: + raise s3_upload_exception + + os.remove(save_path) + + def ready(self): + # YOLO + self.tracker_model = YOLO("yolov8n-pose.pt") + + # Define sequence_length, prediction_time, and n_features + sequence_length = 20 + prediction_time = 1 + n_features = 38 + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + # LSTM autoencoder + # LSTM autoencoder + checkpoint = torch.load( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/model/pts/LSTM_20240324_222238_best.pth" + ) + self.autoencoder_model = LSTMAutoEncoder( + num_layers=2, hidden_size=50, n_features=n_features, device=device + ) + self.autoencoder_model.load_state_dict(checkpoint["model_state_dict"]) + + self.tracker_model.to(device) + self.autoencoder_model.to(device) + + # Store the track history + self.track_history = defaultdict(lambda: []) + + # Initialize a dictionary to store separate buffers for each ID + self.id_buffers = defaultdict(lambda: []) + + # Loop through the video frames + self.frame_count = 0 + self.net_mse = 0 + self.avg_mse = 0 + # score graph 를 위한 score list + self.scores = [] + + async def run(self, frame, timestamp): + + # Define the standard frame size + standard_width = 640 + standard_height = 480 + + # Define sequence_length, prediction_time, and n_features + sequence_length = 20 + prediction_time = 1 + n_features = 38 + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + # Define a function to calculate MSE between two sequences + def calculate_mse(seq1, seq2): + return np.mean(np.power(seq1 - seq2, 2)) + + # anomaly threshold (default 0.02) + threshold = self.info["thr"] + + self.frame_count += 1 # Increment frame count + + frame = cv2.resize(frame, (standard_width, standard_height)) + + temp_for_db = {"timestamp": None, "bbox": {}, "keypoints": {}, "score": None} + + # track id (사람) 별로 mse 점수가 나오기 때문에 한 frame 에 여러 mse 점수가 나옴. 이를 frame 별 점수로 구하기 위해서 변수 설정 + mse_unit = 0 + + timestamp = timestamp.strftime("%H:%M:%S") + temp_for_db["timestamp"] = timestamp + + # Run YOLOv8 tracking on the frame, persisting tracks between frames + results = self.tracker_model.track(frame, persist=True) + + if results[0].boxes is not None: # Check if there are results and boxes + + # Get the boxes + boxes = results[0].boxes.xywh.cpu() + + if results[0].boxes.id is not None: + # If 'int' attribute exists (there are detections), get the track IDs + + track_ids = results[0].boxes.id.int().cpu().tolist() + + # Loop through the detections and add data to the DataFrame + anomaly_text = "" # Initialize the anomaly text + + # 한 프레임에서 검출된 사람만큼 돌아가는 반복문. 2명이면 각 id 별로 아래 연산들이 진행됨. + for i, box in zip( + range(0, len(track_ids)), results[0].boxes.xywhn.cpu() + ): + + x, y, w, h = box + keypoints = ( + results[0].keypoints.xyn[i].cpu().numpy().flatten().tolist() + ) + + # Append the keypoints to the corresponding ID's buffer + # bbox(4), keypoints per id(34) + self.id_buffers[track_ids[i]].append( + [float(x), float(y), float(w), float(h)] + keypoints + ) + + # If the buffer size reaches the threshold (e.g., 20 data points), perform anomaly detection + # track_id 별 20프레임이 쌓이면 아래 연산 진행 + if len(self.id_buffers[track_ids[i]]) >= 20: + # Convert the buffer to a NumPy array + buffer_array = np.array(self.id_buffers[track_ids[i]]) + + # Scale the data (you can use the same scaler you used during training) + scaler = MinMaxScaler() + buffer_scaled = scaler.fit_transform(buffer_array) + + # Create sequences for prediction + x_pred = buffer_scaled[-sequence_length:].reshape( + 1, sequence_length, n_features + ) + + # Predict the next values using the autoencoder model + x_pred = torch.tensor(x_pred, dtype=torch.float32).to(device) + x_pred = self.autoencoder_model.forward(x_pred) + + # Inverse transform the predicted data to the original scale + x_pred_original = scaler.inverse_transform( + x_pred.cpu().detach().numpy().reshape(-1, n_features) + ) + + # Calculate the MSE between the predicted and actual values + mse = calculate_mse( + buffer_array[-prediction_time:], x_pred_original + ) + + # print(mse) + + self.net_mse = mse + self.net_mse + self.avg_mse = self.net_mse / self.frame_count + + mse_unit += mse + + # Check if the MSE exceeds the threshold to detect an anomaly + if mse > 1.5 * (self.avg_mse) * 0.25 + 0.75 * threshold: + + if anomaly_text == "": + anomaly_text = f"이상행동이 감지되었습니다." + else: + anomaly_text = f"이상행동이 감지되었습니다." + + # print(anomaly_text) + + temp_for_db["bbox"][f"id {i}"] = " ".join( + map( + lambda x: str(round(x, 4)), + buffer_array[-prediction_time:][0, :4], + ) + ) + + temp_for_db["keypoints"][f"id {i}"] = " ".join( + map( + lambda x: str(round(x, 4)), + buffer_array[-prediction_time:][0, 4:], + ) + ) + + # Remove the oldest data point from the buffer to maintain its size + self.id_buffers[track_ids[i]].pop(0) + + if temp_for_db["bbox"] != {}: + + temp_for_db["score"] = mse_unit + + # upload frame to s3 + frame_url = await self.upload_frame_s3(self.s3, frame) + + # upload frame, ts, bbox, kp to db + await self.upload_frame_db(self.db, temp_for_db, frame_url) + + await self.websocket.send_text(f"{timestamp}: {anomaly_text}") + + else: + anomaly_text = "" + # If 'int' attribute doesn't exist (no detections), set track_ids to an empty list + track_ids = [] + + self.scores.append(mse_unit) + + # Display the annotated frame + # cv2.imshow("YOLOv8 Tracking", annotated_frame) + + else: + # If no detections, display the original frame without annotations + self.scores.append(mse_unit) + # cv2.imshow("YOLOv8 Tracking", frame) diff --git a/app/main.py b/app/main.py new file mode 100644 index 0000000..7152aa0 --- /dev/null +++ b/app/main.py @@ -0,0 +1,74 @@ +import os +from datetime import datetime, timedelta + +import uvicorn +from api import album_router, real_time_router, upload_router, user_router +from fastapi import FastAPI, Form, Request, Response +from fastapi.middleware.cors import CORSMiddleware +from fastapi.staticfiles import StaticFiles +from fastapi.templating import Jinja2Templates +from jose import jwt +from utils.config import settings +from utils.security import get_current_user + +templates = Jinja2Templates(directory="templates") + +app = FastAPI() +static_dir = os.path.join(os.path.dirname(__file__), "templates", "src") +app.mount("/src", StaticFiles(directory=static_dir), name="src") + +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +@app.get("/") +async def main_get(request: Request): + user = get_current_user(request) + if user: + return templates.TemplateResponse( + "main.html", {"request": request, "token": user.email} + ) + else: + return templates.TemplateResponse( + "main.html", {"request": request, "token": None} + ) + + +@app.post("/") +async def main_post(request: Request): + body = await request.form() + email = body["email"] + data = { + "sub": email, + "exp": datetime.utcnow() + + timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES), + } + token = jwt.encode(data, settings.SECRET_KEY, algorithm=settings.ALGORITHM) + + template_response = templates.TemplateResponse( + "main.html", {"request": request, "token": email} + ) + + # 쿠키 저장 + template_response.set_cookie( + key="access_token", + value=token, + expires=timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES), + httponly=True, + ) + + return template_response + + +app.include_router(user_router.router) +app.include_router(upload_router.router) +app.include_router(album_router.router) +app.include_router(real_time_router.router) + +if __name__ == "__main__": + uvicorn.run("main:app", host="0.0.0.0", port=30011, reload=True) diff --git a/app/templates/album_detail.html b/app/templates/album_detail.html new file mode 100644 index 0000000..03f06d5 --- /dev/null +++ b/app/templates/album_detail.html @@ -0,0 +1,100 @@ +{% extends 'base.html' %} + +{% block title %} +Album_Detail +{% endblock %} + +{% block style %} +{{ super() }} +{% endblock %} + +{% block header %} +{{ super() }} +{% endblock %} + +{% block main %} + {% if loading %} + + + + {% endif %} + +
+
+

Video

+ {% if video_info.is_realtime %} +
실시간 서비스는 스크린샷만 제공됩니다.
+ {% else %} + + {% endif %} +
+
+
+
+

+ +

+
+
+
+
+
+

사용자 ID: {{ video_info.user_id }}

+

업로드 ID: {{ video_info.upload_id }}

+

업로드 이름: {{ video_info.upload_name }}

+

날짜: {{ video_info.date }}

+ {% if video_info.is_realtime %} +

종류: 실시간
실시간 서비스는 영상을 녹화하지 않습니다. 따라서 앨범에서 전체 영상은 보이지 않습니다.

+ {% else %} +

종류: 녹화 영상

+ {% endif %} +
+
+
+
+
+
+
+
+
+
+

Screenshot

+
+
+
+
+{% endblock %} + +{% block script %} + + +{% endblock %} \ No newline at end of file diff --git a/app/templates/album_list.html b/app/templates/album_list.html new file mode 100644 index 0000000..be5747c --- /dev/null +++ b/app/templates/album_list.html @@ -0,0 +1,121 @@ +{% extends 'base.html' %} + +{% block title %} +Album List +{% endblock %} + +{% block header %} +{{ super() }} +{% endblock %} + +{% block style %} +{% endblock %} + +{% block main %} +
+
+ + + + + + + + + + + + + {% for upload in album_list %} + + + + + + + + + {% endfor %} + +
#이름날짜&시간이상탐지분석 여부Button
{{ loop.index }} + + + {{ upload.name }} + + + {{ upload.date }} + + {% if upload.is_realtime %} + 실시간 + {% else %} + 녹화 영상 + {% endif %} + + {% if upload.completes[0].completed %} +
분석 완료
+ {% else %} +
분석중
+ {% endif %} +
+ + +
+
+
+ + + + + +{% endblock %} + +{% block script %} + +{% endblock %} + \ No newline at end of file diff --git a/app/templates/base.html b/app/templates/base.html new file mode 100644 index 0000000..4763758 --- /dev/null +++ b/app/templates/base.html @@ -0,0 +1,130 @@ + + + + + + {% block title %} {% endblock %} + + {% block style %} + + + {% endblock %} + + + + {% block header %} + + {% endblock %} + {% block main %} {% endblock %} + {% block footer %} {% endblock %} + {% block script %} {% endblock %} + + + diff --git a/app/templates/frame.html b/app/templates/frame.html new file mode 100644 index 0000000..295ae4d --- /dev/null +++ b/app/templates/frame.html @@ -0,0 +1,76 @@ + + + + + + + Screenshot Details + + + +
+ +
+ Large Image +
+ + +
+

{{ frame_info.time_stamp }} 이미지

+ + + + + +
+
+ + + + diff --git a/app/templates/login.html b/app/templates/login.html new file mode 100644 index 0000000..b0171de --- /dev/null +++ b/app/templates/login.html @@ -0,0 +1,52 @@ +{% extends 'base.html' %} + +{% block title%} +Login +{% endblock %} + +{% block style %} +{% endblock %} + +{% block header %} + +{% endblock %} + + + +{% block main %} +
+
+ +
+ +
+ + + {% if err.user %} +

{{ err.user }}

+ {% endif %} +
+ +
+ + + {% if err.pw %} +

{{ err.pw }}

+ {% endif %} +
+ +
+ +
+ +
+ + Sign up + +
+
+{% endblock %} diff --git a/app/templates/main.html b/app/templates/main.html new file mode 100644 index 0000000..7732860 --- /dev/null +++ b/app/templates/main.html @@ -0,0 +1,70 @@ +{% extends 'base.html' %} + +{% block title %} +Main Page +{% endblock %} + +{% block style %} +{% endblock %} + +{% block header %} +{{ super() }} +{% endblock %} + +{% block main %} +
+
+
+

무인매장 이상행동 탐지

+

동영상 파일과 실시간 영상에서 이상행동을 탐지해 드립니다.

+
+ {% if not token %} +

로그인 후 무료로 이용해보세요

+ {% endif %} + 지금 시작하기 +
+
+ +
+
+ +
+
+ Image 1 +

녹화 영상

+

녹화된 영상을 업로드하여
이상행동을 탐지합니다.

+ {% if token %} +

바로가기 »

+ {% else %} +

바로가기 »

+ {% endif %} +
+
+ Image 1 +

실시간

+

웹캠, CCTV 등을 연결하여
실시간으로 이상행동을 탐지합니다.

+ {% if token %} +

바로가기 »

+ {% else %} +

바로가기 »

+ {% endif %} +
+
+ Image 1 +

앨범 기능

+

녹화 영상과 실시간에서 탐지된 이상행동을
스크린샷으로 저장하여 보관합니다.

+ {% if token %} +

바로가기 »

+ {% else %} +

바로가기 »

+ {% endif %} +
+
+
+
+ +
+{% endblock %} \ No newline at end of file diff --git a/app/templates/real_time.html b/app/templates/real_time.html new file mode 100644 index 0000000..8e92354 --- /dev/null +++ b/app/templates/real_time.html @@ -0,0 +1,43 @@ +{% extends 'base.html' %} + +{% block title %} +Real Time +{% endblock %} + +{% block style %} +{% endblock %} + +{% block header %} +{{ super() }} +{% endblock %} + +{% block main %} +
+

이상행동을 탐지할 cctv를 연결 해주세요.

+
+
+ + +
+
+ + +
+
+ +

이상행동 판단 정도입니다. 느슨할수록 정확한 이상행동이 아니더라도 잡아냅니다. 엄격할수록 정확한 이상행동일 때만 잡아냅니다.

+ +
+

느슨하게

+

보통

+

엄격하게

+
+
+
+ + +
+ +
+
+{% endblock %} diff --git a/app/templates/signup.html b/app/templates/signup.html new file mode 100644 index 0000000..b023df1 --- /dev/null +++ b/app/templates/signup.html @@ -0,0 +1,58 @@ +{% extends 'base.html' %} + +{% block title%} +Sign Up +{% endblock %} + +{% block style %} +{% endblock %} + +{% block header %} + +{% endblock %} + +{% block main %} +
+
+ +
+ +
+ + + {% if err.user %} +

{{ err.user }}

+ {% endif %} +
+ +
+ + + {% if err.pw %} +

{{ err.pw }}

+ {% endif %} +
+ +
+ + + {% if err.check_pw %} +

{{ err.check_pw }}

+ {% endif %} +
+ +
+ +
+ +
+ + Log In + +
+
+{% endblock %} diff --git a/app/templates/src/album_detail.js b/app/templates/src/album_detail.js new file mode 100644 index 0000000..26c654c --- /dev/null +++ b/app/templates/src/album_detail.js @@ -0,0 +1,72 @@ +if (videoInfo.frame_urls === "Nothing") { + const messageDiv = document.createElement('div'); + messageDiv.classList.add('container-xl', 'text-center', 'mt-2', 'py-4'); + const messageHeader = document.createElement('h4'); + messageHeader.classList.add('text-start'); + messageHeader.innerText = '아무런 이상행동이 탐지되지 않았습니다.'; + messageDiv.appendChild(messageHeader); + screenshotContainer.parentNode.insertBefore(messageDiv, screenshotContainer.nextSibling); +} else { + videoInfo.frame_urls.forEach(frameInfo => { + const col = document.createElement('div'); + col.classList.add('col'); + + const image = document.createElement('img'); + image.classList.add('img-thumbnail'); + image.src = frameInfo[1]; + + const timestampLink = document.createElement('a'); + timestampLink.classList.add('link-underline-light', 'text-reset'); + timestampLink.href = '#'; + timestampLink.innerText = frameInfo[2]; + + col.appendChild(image); + col.appendChild(timestampLink); + screenshotContainer.appendChild(col); + + // 이미지를 누르면 확대된 이미지와 버튼이 있는 팝업창을 띄우기 + image.onclick = () => showPopup(frameInfo[0]); + + // 타임스탬프를 누르면 비디오가 해당 타임스탬프로 이동 + timestampLink.onclick = () => seekVideo(frameInfo[2]); + }); +} + +function showPopup(imageUrl) { + // 팝업창 가운데 정렬을 위한 스크린 가로, 세로 크기 계산 + const screenWidth = window.screen.width; + const screenHeight = window.screen.height; + + // 팝업창 크기 조정 + const popupWidth = Math.min(screenWidth * 0.8, 800); // 최대 80% 화면 크기 또는 800px + const popupHeight = Math.min(screenHeight * 0.8, 600); // 최대 80% 화면 크기 또는 600px + + // 팝업창 가운데 정렬을 위한 위치 계산 + const left = (screenWidth - popupWidth) / 2; + const top = (screenHeight - popupHeight) / 2; + + // 팝업창 열기 + const popupWindow = window.open(`/album/details/images?frame_id=${imageUrl}`, 'ImagePopup', `width=${popupWidth}, height=${popupHeight}, top=${top}, left=${left}`); + if (popupWindow) { + popupWindow.focus(); + } +} + +function seekVideo(timestamp) { + // timestamp 문자열을 시, 분, 초로 분해 + const timeParts = timestamp.split(':'); + + // 각 부분을 정수로 변환 + const hours = parseInt(timeParts[0], 10); + const minutes = parseInt(timeParts[1], 10); + const seconds = parseInt(timeParts[2], 10); + + // 비디오의 currentTime 설정 + const video = document.getElementById('vid'); + video.currentTime = hours * 3600 + minutes * 60 + seconds; +} + +var loading = '{{ loading|tojson|safe }}'; +if (loading) { + document.body.classList.add('loading'); +} \ No newline at end of file diff --git a/app/templates/src/album_list.js b/app/templates/src/album_list.js new file mode 100644 index 0000000..43c7bb2 --- /dev/null +++ b/app/templates/src/album_list.js @@ -0,0 +1,33 @@ +function redirectToDetails(user_id, upload_id) { + // URL 생성 + let url = `/album/details?user_id=${user_id}&upload_id=${upload_id}`; + + // 페이지 리디렉션 + window.location.href = url; +} + +document.addEventListener('DOMContentLoaded', function() { + let editButtons = document.querySelectorAll('.edit-btn'); + let deleteButtons = document.querySelectorAll('.delete-btn'); + + editButtons.forEach(function(button) { + button.addEventListener('click', function() { + let uploadId = this.getAttribute('data-uploadid'); + let origin_name = this.getAttribute('data-name'); + + document.getElementById('modifyUploadID').value = uploadId; + document.getElementById('originName').value = origin_name; + }); + }); + + + deleteButtons.forEach(function(button) { + button.addEventListener('click', function() { + let uploadId = this.getAttribute('data-uploadid'); + let isRealTime = this.getAttribute('data-is-real-time'); + + document.getElementById('deleteUploadID').value = uploadId; + document.getElementById('isRealTime').value = isRealTime; + }); + }); +}); \ No newline at end of file diff --git a/app/templates/src/video.js b/app/templates/src/video.js new file mode 100644 index 0000000..26c654c --- /dev/null +++ b/app/templates/src/video.js @@ -0,0 +1,72 @@ +if (videoInfo.frame_urls === "Nothing") { + const messageDiv = document.createElement('div'); + messageDiv.classList.add('container-xl', 'text-center', 'mt-2', 'py-4'); + const messageHeader = document.createElement('h4'); + messageHeader.classList.add('text-start'); + messageHeader.innerText = '아무런 이상행동이 탐지되지 않았습니다.'; + messageDiv.appendChild(messageHeader); + screenshotContainer.parentNode.insertBefore(messageDiv, screenshotContainer.nextSibling); +} else { + videoInfo.frame_urls.forEach(frameInfo => { + const col = document.createElement('div'); + col.classList.add('col'); + + const image = document.createElement('img'); + image.classList.add('img-thumbnail'); + image.src = frameInfo[1]; + + const timestampLink = document.createElement('a'); + timestampLink.classList.add('link-underline-light', 'text-reset'); + timestampLink.href = '#'; + timestampLink.innerText = frameInfo[2]; + + col.appendChild(image); + col.appendChild(timestampLink); + screenshotContainer.appendChild(col); + + // 이미지를 누르면 확대된 이미지와 버튼이 있는 팝업창을 띄우기 + image.onclick = () => showPopup(frameInfo[0]); + + // 타임스탬프를 누르면 비디오가 해당 타임스탬프로 이동 + timestampLink.onclick = () => seekVideo(frameInfo[2]); + }); +} + +function showPopup(imageUrl) { + // 팝업창 가운데 정렬을 위한 스크린 가로, 세로 크기 계산 + const screenWidth = window.screen.width; + const screenHeight = window.screen.height; + + // 팝업창 크기 조정 + const popupWidth = Math.min(screenWidth * 0.8, 800); // 최대 80% 화면 크기 또는 800px + const popupHeight = Math.min(screenHeight * 0.8, 600); // 최대 80% 화면 크기 또는 600px + + // 팝업창 가운데 정렬을 위한 위치 계산 + const left = (screenWidth - popupWidth) / 2; + const top = (screenHeight - popupHeight) / 2; + + // 팝업창 열기 + const popupWindow = window.open(`/album/details/images?frame_id=${imageUrl}`, 'ImagePopup', `width=${popupWidth}, height=${popupHeight}, top=${top}, left=${left}`); + if (popupWindow) { + popupWindow.focus(); + } +} + +function seekVideo(timestamp) { + // timestamp 문자열을 시, 분, 초로 분해 + const timeParts = timestamp.split(':'); + + // 각 부분을 정수로 변환 + const hours = parseInt(timeParts[0], 10); + const minutes = parseInt(timeParts[1], 10); + const seconds = parseInt(timeParts[2], 10); + + // 비디오의 currentTime 설정 + const video = document.getElementById('vid'); + video.currentTime = hours * 3600 + minutes * 60 + seconds; +} + +var loading = '{{ loading|tojson|safe }}'; +if (loading) { + document.body.classList.add('loading'); +} \ No newline at end of file diff --git a/app/templates/stream.html b/app/templates/stream.html new file mode 100644 index 0000000..d1761cb --- /dev/null +++ b/app/templates/stream.html @@ -0,0 +1,374 @@ + + + + + + IVT Main Page(Login) + + + + + + + + + +
+
+

Video

+
+ {% if video_info.video_url == "web" %} + + + {% else %} +
+ 연결 중... +
+ + {% endif %} +
+
실시간 서비스는 영상이 저장되지 않습니다.
+ +

Log

+
+
+
+
+
+
+
+
+

+ +

+
+
+
+
+
+

사용자 ID: {{ video_info.user_id }}

+

업로드 ID: {{ video_info.upload_id }}

+

업로드 이름: {{ video_info.upload_name }}

+

날짜: {{ video_info.date }}

+ {% if video_info.is_realtime %} +

종류: 실시간
실시간 서비스는 영상을 녹화하지 않습니다. 따라서 앨범에서 접근할 시 전체 영상은 + 보이지 않습니다.

+ {% else %} +

종류: 녹화 영상

+ {% endif %} +
+ +
+
+
+
+
+
+
+
+
+

Screenshot

+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/app/templates/upload.html b/app/templates/upload.html new file mode 100644 index 0000000..02bcda3 --- /dev/null +++ b/app/templates/upload.html @@ -0,0 +1,60 @@ +{% extends 'base.html' %} + +{% block title %} +Upload +{% endblock %} + +{% block style %} +{% endblock %} + +{% block header %} +{{ super() }} +{% endblock %} + +{% block main %} +
+

이상행동을 탐지할 동영상을 업로드 해주세요.

+
+
+ + +
+
+ + +
+
+ +

이상행동 판단 정도입니다. 느슨할수록 정확한 이상행동이 아니더라도 잡아냅니다. 엄격할수록 정확한 이상행동일 때만 잡아냅니다.

+ +
+

느슨하게

+

보통

+

엄격하게

+
+
+
+ + + {% if err.file_ext %} +

{{ err.file_ext }}

+ {% endif %} +
+ +
+ +
+
+ +
+{% endblock %} diff --git a/app/templates/video.html b/app/templates/video.html new file mode 100644 index 0000000..d4bb92d --- /dev/null +++ b/app/templates/video.html @@ -0,0 +1,109 @@ +{% extends 'base.html' %} + +{% block title %} +Video +{% endblock %} + +{% block style %} +{{ super() }} +{% endblock %} + +{% block header %} +{{ super() }} +{% endblock %} + +{% block main %} + {% if loading %} + + + + {% endif %} + +
+
+

Video

+ {% if video_info.is_realtime %} +
실시간 서비스는 스크린샷만 제공됩니다.
+ {% else %} + + {% endif %} +
+
+
+
+

+ +

+
+
+
+
+
+

사용자 ID: {{ video_info.user_id }}

+

업로드 ID: {{ video_info.upload_id }}

+

업로드 이름: {{ video_info.upload_name }}

+

날짜: {{ video_info.date }}

+ {% if video_info.is_realtime %} +

종류: 실시간
실시간 서비스는 영상을 녹화하지 않습니다. 따라서 앨범에서 전체 영상은 보이지 않습니다.

+ {% else %} +

종류: 녹화 영상

+ {% endif %} +
+ +
+
+
+
+
+
+
+
+
+

Screenshot

+
+ +
+
+
+{% endblock %} + +{% block script %} + + +{% endblock %} \ No newline at end of file diff --git a/app/utils/__init__.py b/app/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/app/utils/config.py b/app/utils/config.py new file mode 100644 index 0000000..7fbf892 --- /dev/null +++ b/app/utils/config.py @@ -0,0 +1,36 @@ +from dotenv import load_dotenv +from pydantic_settings import BaseSettings + +load_dotenv() + + +class Settings(BaseSettings): + API_V1_STR: str = "/api/v1" + SECRET_KEY: str = "55c84cbfa7f9e183da2179cb34cc45526bea05ee80b5bef66ed950534730bf5d" + ALGORITHM: str = "HS256" + # 60 minutes * 24 hours * 7 days = 7 days + ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 7 + + MYSQL_SERVER_IP: str + MYSQL_SERVER_PORT: int + MYSQL_SERVER_USER: str + MYSQL_SERVER_PASSWORD: str + MYSQL_DATABASE: str + + AWS_ACCESS_KEY: str + AWS_SECRET_KEY: str + BUCKET: str + + SMTP_ADDRESS: str + SMTP_PORT: int + MAIL_ACCOUNT: str + MAIL_PASSWORD: str + + UPLOAD_MODEL_SERVER_IP: str + STREAM_MODEL_SERVER_IP: str + + class Config: + env_file = ".env" + + +settings = Settings() diff --git a/app/utils/security.py b/app/utils/security.py new file mode 100644 index 0000000..3dcf312 --- /dev/null +++ b/app/utils/security.py @@ -0,0 +1,42 @@ +from datetime import datetime, timedelta +from typing import Any, Union + +from database import models +from database.database import engine +from fastapi import Request +from jose import JWTError, jwt +from passlib.context import CryptContext +from sqlalchemy.orm import Session +from utils.config import settings + +pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto") + + +def verify_password(plain_password: str, hashed_password: str) -> bool: + return pwd_context.verify(plain_password, hashed_password) + + +def get_password_hash(password: str) -> str: + return pwd_context.hash(password) + + +def get_current_user(request: Request): + token = request.cookies.get("access_token", None) + + try: + if token: + payload = jwt.decode( + token, settings.SECRET_KEY, algorithms=[settings.ALGORITHM] + ) + email = payload.get("sub", None) + session = Session(engine) + user = session.query(models.User).filter(models.User.email == email).first() + session.close() + if user: + return user + else: + return None + else: + return None + except: + return JWTError() diff --git a/app/utils/utils.py b/app/utils/utils.py new file mode 100644 index 0000000..e774f05 --- /dev/null +++ b/app/utils/utils.py @@ -0,0 +1,31 @@ +import boto3 +from botocore.config import Config +from database import crud +from fastapi import HTTPException, status +from inference.anomaly_detector import AnomalyDetector +from utils.config import settings + +# from inference.anomaly_detector_lstmae import AnomalyDetector + +boto_config = Config( + signature_version="v4", +) + +s3 = boto3.client( + "s3", + config=boto_config, + region_name="ap-northeast-2", + aws_access_key_id=settings.AWS_ACCESS_KEY, + aws_secret_access_key=settings.AWS_SECRET_KEY, +) + + +def run_model(video_url, info, settings, db, s3=s3): + + model = AnomalyDetector( + video_file=video_url, info=info, s3_client=s3, settings=settings, db=db + ) + model.run() + + crud.update_complete_status(db=db, upload_id=info["upload_id"]) + return diff --git a/asset/94d6ffb9-0567-43c0-b90a-aa4030a14655.png b/asset/94d6ffb9-0567-43c0-b90a-aa4030a14655.png new file mode 100644 index 0000000..33ca67b Binary files /dev/null and b/asset/94d6ffb9-0567-43c0-b90a-aa4030a14655.png differ diff --git a/asset/Untitled 1.png b/asset/Untitled 1.png new file mode 100644 index 0000000..6a5e054 Binary files /dev/null and b/asset/Untitled 1.png differ diff --git a/asset/Untitled 10.png b/asset/Untitled 10.png new file mode 100644 index 0000000..ac4fc1f Binary files /dev/null and b/asset/Untitled 10.png differ diff --git a/asset/Untitled 11.jpg b/asset/Untitled 11.jpg new file mode 100644 index 0000000..9607970 Binary files /dev/null and b/asset/Untitled 11.jpg differ diff --git a/asset/Untitled 12.png b/asset/Untitled 12.png new file mode 100644 index 0000000..ec67ec5 Binary files /dev/null and b/asset/Untitled 12.png differ diff --git a/asset/Untitled 13.png b/asset/Untitled 13.png new file mode 100644 index 0000000..7c98f3e Binary files /dev/null and b/asset/Untitled 13.png differ diff --git a/asset/Untitled 14.jpg b/asset/Untitled 14.jpg new file mode 100644 index 0000000..cc26986 Binary files /dev/null and b/asset/Untitled 14.jpg differ diff --git a/asset/Untitled 15.png b/asset/Untitled 15.png new file mode 100644 index 0000000..9a07ddb Binary files /dev/null and b/asset/Untitled 15.png differ diff --git a/asset/Untitled 16.png b/asset/Untitled 16.png new file mode 100644 index 0000000..c053e5a Binary files /dev/null and b/asset/Untitled 16.png differ diff --git a/asset/Untitled 17.png b/asset/Untitled 17.png new file mode 100644 index 0000000..b413d32 Binary files /dev/null and b/asset/Untitled 17.png differ diff --git a/asset/Untitled 18.png b/asset/Untitled 18.png new file mode 100644 index 0000000..945b8c3 Binary files /dev/null and b/asset/Untitled 18.png differ diff --git a/asset/Untitled 2.png b/asset/Untitled 2.png new file mode 100644 index 0000000..d909a08 Binary files /dev/null and b/asset/Untitled 2.png differ diff --git a/asset/Untitled 3.png b/asset/Untitled 3.png new file mode 100644 index 0000000..89132cd Binary files /dev/null and b/asset/Untitled 3.png differ diff --git a/asset/Untitled 4.png b/asset/Untitled 4.png new file mode 100644 index 0000000..09bf962 Binary files /dev/null and b/asset/Untitled 4.png differ diff --git a/asset/Untitled 5.png b/asset/Untitled 5.png new file mode 100644 index 0000000..9200e11 Binary files /dev/null and b/asset/Untitled 5.png differ diff --git a/asset/Untitled 6.png b/asset/Untitled 6.png new file mode 100644 index 0000000..9216072 Binary files /dev/null and b/asset/Untitled 6.png differ diff --git a/asset/Untitled 7.png b/asset/Untitled 7.png new file mode 100644 index 0000000..be72608 Binary files /dev/null and b/asset/Untitled 7.png differ diff --git a/asset/Untitled 8.png b/asset/Untitled 8.png new file mode 100644 index 0000000..c177df5 Binary files /dev/null and b/asset/Untitled 8.png differ diff --git a/asset/Untitled 9.png b/asset/Untitled 9.png new file mode 100644 index 0000000..c03bdeb Binary files /dev/null and b/asset/Untitled 9.png differ diff --git a/asset/timeline.png b/asset/timeline.png new file mode 100644 index 0000000..16407d0 Binary files /dev/null and b/asset/timeline.png differ diff --git a/datapreprocess/feature_extraction.ipynb b/datapreprocess/feature_extraction.ipynb new file mode 100644 index 0000000..37b26eb --- /dev/null +++ b/datapreprocess/feature_extraction.ipynb @@ -0,0 +1,423 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from collections import defaultdict\n", + "import cv2\n", + "import numpy as np\n", + "from ultralytics import YOLO\n", + "import csv\n", + "import os\n", + "from copy import deepcopy\n", + "\n", + "from datetime import datetime" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "root = \"/data/ephemeral/home/datasets/UCFCrime/normal/\"\n", + "\n", + "\n", + "header = [\n", + " \"Filename\",\n", + " \"Frame\",\n", + " \"ID\",\n", + " \"X\",\n", + " \"Y\",\n", + " \"Width\",\n", + " \"Height\",\n", + " \"Keypoint_0\",\n", + " \"Keypoint_1\",\n", + " \"Keypoint_2\",\n", + " \"Keypoint_3\",\n", + " \"Keypoint_4\",\n", + " \"Keypoint_5\",\n", + " \"Keypoint_6\",\n", + " \"Keypoint_7\",\n", + " \"Keypoint_8\",\n", + " \"Keypoint_9\",\n", + " \"Keypoint_10\",\n", + " \"Keypoint_11\",\n", + " \"Keypoint_12\",\n", + " \"Keypoint_13\",\n", + " \"Keypoint_14\",\n", + " \"Keypoint_15\",\n", + " \"Keypoint_16\",\n", + " \"Keypoint_17\",\n", + " \"Keypoint_18\",\n", + " \"Keypoint_19\",\n", + " \"Keypoint_20\",\n", + " \"Keypoint_21\",\n", + " \"Keypoint_22\",\n", + " \"Keypoint_23\",\n", + " \"Keypoint_24\",\n", + " \"Keypoint_25\",\n", + " \"Keypoint_26\",\n", + " \"Keypoint_27\",\n", + " \"Keypoint_28\",\n", + " \"Keypoint_29\",\n", + " \"Keypoint_30\",\n", + " \"Keypoint_31\",\n", + " \"Keypoint_32\",\n", + " \"Keypoint_33\",\n", + "]" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# Load the YOLOv8 model\n", + "model = YOLO(\"yolov8n-pose.pt\")\n", + "\n", + "# Define the standard frame size (change these values as needed)\n", + "standard_width = 320\n", + "standard_height = 240" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "==>> folder_list: ['Abuse', 'Arrest', 'Arson', 'Assault', 'Burglary', 'Explosion', 'Fighting', 'RoadAccidents', 'Robbery', 'Shooting', 'Shoplifting', 'Stealing', 'Vandalism']\n" + ] + } + ], + "source": [ + "folder_list = os.listdir(root)\n", + "folder_list.sort()\n", + "print(f\"==>> folder_list: {folder_list}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Abuse feature extracting starts\n", + "==>> file_list: ['Abuse001_x264.mp4', 'Abuse002_x264.mp4', 'Abuse003_x264.mp4', 'Abuse004_x264.mp4', 'Abuse005_x264.mp4', 'Abuse006_x264.mp4', 'Abuse007_x264.mp4', 'Abuse008_x264.mp4', 'Abuse009_x264.mp4', 'Abuse010_x264.mp4', 'Abuse011_x264.mp4', 'Abuse012_x264.mp4', 'Abuse013_x264.mp4', 'Abuse014_x264.mp4', 'Abuse015_x264.mp4', 'Abuse016_x264.mp4', 'Abuse017_x264.mp4', 'Abuse018_x264.mp4', 'Abuse019_x264.mp4', 'Abuse020_x264.mp4', 'Abuse021_x264.mp4', 'Abuse022_x264.mp4', 'Abuse023_x264.mp4', 'Abuse024_x264.mp4', 'Abuse025_x264.mp4', 'Abuse026_x264.mp4', 'Abuse027_x264.mp4', 'Abuse028_x264.mp4', 'Abuse029_x264.mp4', 'Abuse030_x264.mp4', 'Abuse031_x264.mp4', 'Abuse032_x264.mp4', 'Abuse033_x264.mp4', 'Abuse034_x264.mp4', 'Abuse035_x264.mp4', 'Abuse036_x264.mp4', 'Abuse037_x264.mp4', 'Abuse038_x264.mp4', 'Abuse039_x264.mp4', 'Abuse040_x264.mp4', 'Abuse041_x264.mp4', 'Abuse042_x264.mp4', 'Abuse043_x264.mp4', 'Abuse044_x264.mp4', 'Abuse045_x264.mp4', 'Abuse046_x264.mp4', 'Abuse047_x264.mp4', 'Abuse048_x264.mp4', 'Abuse049_x264.mp4', 'Abuse050_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Abuse feature extracting ended. Elapsed time: 0:49:53\n", + "Arrest feature extracting starts\n", + "==>> file_list: ['Arrest001_x264.mp4', 'Arrest002_x264.mp4', 'Arrest003_x264.mp4', 'Arrest004_x264.mp4', 'Arrest005_x264.mp4', 'Arrest006_x264.mp4', 'Arrest007_x264.mp4', 'Arrest008_x264.mp4', 'Arrest009_x264.mp4', 'Arrest010_x264.mp4', 'Arrest011_x264.mp4', 'Arrest012_x264.mp4', 'Arrest013_x264.mp4', 'Arrest014_x264.mp4', 'Arrest015_x264.mp4', 'Arrest016_x264.mp4', 'Arrest017_x264.mp4', 'Arrest018_x264.mp4', 'Arrest019_x264.mp4', 'Arrest020_x264.mp4', 'Arrest021_x264.mp4', 'Arrest022_x264.mp4', 'Arrest023_x264.mp4', 'Arrest024_x264.mp4', 'Arrest025_x264.mp4', 'Arrest026_x264.mp4', 'Arrest027_x264.mp4', 'Arrest028_x264.mp4', 'Arrest029_x264.mp4', 'Arrest030_x264.mp4', 'Arrest031_x264.mp4', 'Arrest032_x264.mp4', 'Arrest033_x264.mp4', 'Arrest034_x264.mp4', 'Arrest035_x264.mp4', 'Arrest036_x264.mp4', 'Arrest037_x264.mp4', 'Arrest038_x264.mp4', 'Arrest039_x264.mp4', 'Arrest040_x264.mp4', 'Arrest041_x264.mp4', 'Arrest042_x264.mp4', 'Arrest043_x264.mp4', 'Arrest044_x264.mp4', 'Arrest046_x264.mp4', 'Arrest047_x264.mp4', 'Arrest048_x264.mp4', 'Arrest049_x264.mp4', 'Arrest050_x264.mp4', 'Arrest051_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Arrest feature extracting ended. Elapsed time: 1:14:14\n", + "Arson feature extracting starts\n", + "==>> file_list: ['Arson001_x264.mp4', 'Arson002_x264.mp4', 'Arson003_x264.mp4', 'Arson005_x264.mp4', 'Arson006_x264.mp4', 'Arson007_x264.mp4', 'Arson008_x264.mp4', 'Arson009_x264.mp4', 'Arson010_x264.mp4', 'Arson011_x264.mp4', 'Arson012_x264.mp4', 'Arson013_x264.mp4', 'Arson014_x264.mp4', 'Arson015_x264.mp4', 'Arson016_x264.mp4', 'Arson017_x264.mp4', 'Arson018_x264.mp4', 'Arson019_x264.mp4', 'Arson020_x264.mp4', 'Arson021_x264.mp4', 'Arson022_x264.mp4', 'Arson023_x264.mp4', 'Arson024_x264.mp4', 'Arson025_x264.mp4', 'Arson026_x264.mp4', 'Arson027_x264.mp4', 'Arson028_x264.mp4', 'Arson029_x264.mp4', 'Arson030_x264.mp4', 'Arson031_x264.mp4', 'Arson032_x264.mp4', 'Arson034_x264.mp4', 'Arson035_x264.mp4', 'Arson036_x264.mp4', 'Arson037_x264.mp4', 'Arson038_x264.mp4', 'Arson039_x264.mp4', 'Arson040_x264.mp4', 'Arson041_x264.mp4', 'Arson042_x264.mp4', 'Arson044_x264.mp4', 'Arson045_x264.mp4', 'Arson046_x264.mp4', 'Arson047_x264.mp4', 'Arson048_x264.mp4', 'Arson049_x264.mp4', 'Arson050_x264.mp4', 'Arson051_x264.mp4', 'Arson052_x264.mp4', 'Arson053_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Arson feature extracting ended. Elapsed time: 1:00:21\n", + "Assault feature extracting starts\n", + "==>> file_list: ['Assault001_x264.mp4', 'Assault002_x264.mp4', 'Assault003_x264.mp4', 'Assault004_x264.mp4', 'Assault005_x264.mp4', 'Assault006_x264.mp4', 'Assault007_x264.mp4', 'Assault008_x264.mp4', 'Assault009_x264.mp4', 'Assault010_x264.mp4', 'Assault011_x264.mp4', 'Assault012_x264.mp4', 'Assault013_x264.mp4', 'Assault014_x264.mp4', 'Assault015_x264.mp4', 'Assault016_x264.mp4', 'Assault017_x264.mp4', 'Assault018_x264.mp4', 'Assault019_x264.mp4', 'Assault020_x264.mp4', 'Assault022_x264.mp4', 'Assault023_x264.mp4', 'Assault024_x264.mp4', 'Assault025_x264.mp4', 'Assault026_x264.mp4', 'Assault027_x264.mp4', 'Assault028_x264.mp4', 'Assault029_x264.mp4', 'Assault030_x264.mp4', 'Assault031_x264.mp4', 'Assault032_x264.mp4', 'Assault033_x264.mp4', 'Assault034_x264.mp4', 'Assault035_x264.mp4', 'Assault036_x264.mp4', 'Assault037_x264.mp4', 'Assault038_x264.mp4', 'Assault039_x264.mp4', 'Assault040_x264.mp4', 'Assault041_x264.mp4', 'Assault042_x264.mp4', 'Assault044_x264.mp4', 'Assault045_x264.mp4', 'Assault046_x264.mp4', 'Assault047_x264.mp4', 'Assault048_x264.mp4', 'Assault049_x264.mp4', 'Assault050_x264.mp4', 'Assault051_x264.mp4', 'Assault052_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Assault feature extracting ended. Elapsed time: 0:35:45\n", + "Burglary feature extracting starts\n", + "==>> file_list: ['Burglary001_x264.mp4', 'Burglary002_x264.mp4', 'Burglary003_x264.mp4', 'Burglary004_x264.mp4', 'Burglary005_x264.mp4', 'Burglary006_x264.mp4', 'Burglary007_x264.mp4', 'Burglary008_x264.mp4', 'Burglary009_x264.mp4', 'Burglary010_x264.mp4', 'Burglary011_x264.mp4', 'Burglary012_x264.mp4', 'Burglary013_x264.mp4', 'Burglary014_x264.mp4', 'Burglary015_x264.mp4', 'Burglary016_x264.mp4', 'Burglary017_x264.mp4', 'Burglary018_x264.mp4', 'Burglary019_x264.mp4', 'Burglary020_x264.mp4', 'Burglary021_x264.mp4', 'Burglary022_x264.mp4', 'Burglary023_x264.mp4', 'Burglary024_x264.mp4', 'Burglary025_x264.mp4', 'Burglary026_x264.mp4', 'Burglary027_x264.mp4', 'Burglary028_x264.mp4', 'Burglary029_x264.mp4', 'Burglary030_x264.mp4', 'Burglary031_x264.mp4', 'Burglary032_x264.mp4', 'Burglary033_x264.mp4', 'Burglary034_x264.mp4', 'Burglary035_x264.mp4', 'Burglary036_x264.mp4', 'Burglary037_x264.mp4', 'Burglary038_x264.mp4', 'Burglary039_x264.mp4', 'Burglary040_x264.mp4', 'Burglary041_x264.mp4', 'Burglary042_x264.mp4', 'Burglary043_x264.mp4', 'Burglary044_x264.mp4', 'Burglary045_x264.mp4', 'Burglary046_x264.mp4', 'Burglary047_x264.mp4', 'Burglary048_x264.mp4', 'Burglary049_x264.mp4', 'Burglary050_x264.mp4', 'Burglary051_x264.mp4', 'Burglary052_x264.mp4', 'Burglary053_x264.mp4', 'Burglary054_x264.mp4', 'Burglary055_x264.mp4', 'Burglary056_x264.mp4', 'Burglary057_x264.mp4', 'Burglary058_x264.mp4', 'Burglary059_x264.mp4', 'Burglary060_x264.mp4', 'Burglary061_x264.mp4', 'Burglary062_x264.mp4', 'Burglary063_x264.mp4', 'Burglary064_x264.mp4', 'Burglary065_x264.mp4', 'Burglary066_x264.mp4', 'Burglary067_x264.mp4', 'Burglary068_x264.mp4', 'Burglary069_x264.mp4', 'Burglary070_x264.mp4', 'Burglary071_x264.mp4', 'Burglary072_x264.mp4', 'Burglary073_x264.mp4', 'Burglary074_x264.mp4', 'Burglary075_x264.mp4', 'Burglary076_x264.mp4', 'Burglary077_x264.mp4', 'Burglary078_x264.mp4', 'Burglary079_x264.mp4', 'Burglary080_x264.mp4', 'Burglary081_x264.mp4', 'Burglary082_x264.mp4', 'Burglary083_x264.mp4', 'Burglary084_x264.mp4', 'Burglary085_x264.mp4', 'Burglary086_x264.mp4', 'Burglary087_x264.mp4', 'Burglary088_x264.mp4', 'Burglary089_x264.mp4', 'Burglary090_x264.mp4', 'Burglary091_x264.mp4', 'Burglary092_x264.mp4', 'Burglary093_x264.mp4', 'Burglary094_x264.mp4', 'Burglary095_x264.mp4', 'Burglary096_x264.mp4', 'Burglary097_x264.mp4', 'Burglary098_x264.mp4', 'Burglary099_x264.mp4', 'Burglary100_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Burglary feature extracting ended. Elapsed time: 1:46:44\n", + "Explosion feature extracting starts\n", + "==>> file_list: ['Explosion001_x264.mp4', 'Explosion002_x264.mp4', 'Explosion003_x264.mp4', 'Explosion004_x264.mp4', 'Explosion005_x264.mp4', 'Explosion006_x264.mp4', 'Explosion007_x264.mp4', 'Explosion008_x264.mp4', 'Explosion009_x264.mp4', 'Explosion010_x264.mp4', 'Explosion011_x264.mp4', 'Explosion012_x264.mp4', 'Explosion013_x264.mp4', 'Explosion014_x264.mp4', 'Explosion015_x264.mp4', 'Explosion016_x264.mp4', 'Explosion017_x264.mp4', 'Explosion018_x264.mp4', 'Explosion019_x264.mp4', 'Explosion020_x264.mp4', 'Explosion021_x264.mp4', 'Explosion022_x264.mp4', 'Explosion023_x264.mp4', 'Explosion024_x264.mp4', 'Explosion025_x264.mp4', 'Explosion026_x264.mp4', 'Explosion027_x264.mp4', 'Explosion028_x264.mp4', 'Explosion029_x264.mp4', 'Explosion030_x264.mp4', 'Explosion032_x264.mp4', 'Explosion033_x264.mp4', 'Explosion034_x264.mp4', 'Explosion035_x264.mp4', 'Explosion036_x264.mp4', 'Explosion037_x264.mp4', 'Explosion038_x264.mp4', 'Explosion039_x264.mp4', 'Explosion040_x264.mp4', 'Explosion041_x264.mp4', 'Explosion042_x264.mp4', 'Explosion043_x264.mp4', 'Explosion044_x264.mp4', 'Explosion045_x264.mp4', 'Explosion046_x264.mp4', 'Explosion047_x264.mp4', 'Explosion048_x264.mp4', 'Explosion050_x264.mp4', 'Explosion051_x264.mp4', 'Explosion052_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Explosion feature extracting ended. Elapsed time: 0:55:08\n", + "Fighting feature extracting starts\n", + "==>> file_list: ['Fighting002_x264.mp4', 'Fighting003_x264.mp4', 'Fighting004_x264.mp4', 'Fighting005_x264.mp4', 'Fighting006_x264.mp4', 'Fighting007_x264.mp4', 'Fighting008_x264.mp4', 'Fighting009_x264.mp4', 'Fighting010_x264.mp4', 'Fighting011_x264.mp4', 'Fighting012_x264.mp4', 'Fighting013_x264.mp4', 'Fighting014_x264.mp4', 'Fighting015_x264.mp4', 'Fighting016_x264.mp4', 'Fighting017_x264.mp4', 'Fighting018_x264.mp4', 'Fighting019_x264.mp4', 'Fighting020_x264.mp4', 'Fighting021_x264.mp4', 'Fighting022_x264.mp4', 'Fighting023_x264.mp4', 'Fighting024_x264.mp4', 'Fighting025_x264.mp4', 'Fighting026_x264.mp4', 'Fighting027_x264.mp4', 'Fighting028_x264.mp4', 'Fighting029_x264.mp4', 'Fighting030_x264.mp4', 'Fighting031_x264.mp4', 'Fighting032_x264.mp4', 'Fighting033_x264.mp4', 'Fighting034_x264.mp4', 'Fighting035_x264.mp4', 'Fighting036_x264.mp4', 'Fighting037_x264.mp4', 'Fighting038_x264.mp4', 'Fighting039_x264.mp4', 'Fighting040_x264.mp4', 'Fighting041_x264.mp4', 'Fighting042_x264.mp4', 'Fighting043_x264.mp4', 'Fighting044_x264.mp4', 'Fighting045_x264.mp4', 'Fighting046_x264.mp4', 'Fighting047_x264.mp4', 'Fighting048_x264.mp4', 'Fighting049_x264.mp4', 'Fighting050_x264.mp4', 'Fighting051_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Fighting feature extracting ended. Elapsed time: 1:12:11\n", + "RoadAccidents feature extracting starts\n", + "==>> file_list: ['RoadAccidents001_x264.mp4', 'RoadAccidents002_x264.mp4', 'RoadAccidents003_x264.mp4', 'RoadAccidents004_x264.mp4', 'RoadAccidents005_x264.mp4', 'RoadAccidents006_x264.mp4', 'RoadAccidents007_x264.mp4', 'RoadAccidents008_x264.mp4', 'RoadAccidents009_x264.mp4', 'RoadAccidents010_x264.mp4', 'RoadAccidents011_x264.mp4', 'RoadAccidents012_x264.mp4', 'RoadAccidents013_x264.mp4', 'RoadAccidents014_x264.mp4', 'RoadAccidents015_x264.mp4', 'RoadAccidents016_x264.mp4', 'RoadAccidents017_x264.mp4', 'RoadAccidents018_x264.mp4', 'RoadAccidents019_x264.mp4', 'RoadAccidents020_x264.mp4', 'RoadAccidents021_x264.mp4', 'RoadAccidents022_x264.mp4', 'RoadAccidents023_x264.mp4', 'RoadAccidents024_x264.mp4', 'RoadAccidents025_x264.mp4', 'RoadAccidents026_x264.mp4', 'RoadAccidents027_x264.mp4', 'RoadAccidents028_x264.mp4', 'RoadAccidents029_x264.mp4', 'RoadAccidents030_x264.mp4', 'RoadAccidents031_x264.mp4', 'RoadAccidents032_x264.mp4', 'RoadAccidents033_x264.mp4', 'RoadAccidents034_x264.mp4', 'RoadAccidents035_x264.mp4', 'RoadAccidents036_x264.mp4', 'RoadAccidents037_x264.mp4', 'RoadAccidents038_x264.mp4', 'RoadAccidents039_x264.mp4', 'RoadAccidents040_x264.mp4', 'RoadAccidents041_x264.mp4', 'RoadAccidents042_x264.mp4', 'RoadAccidents043_x264.mp4', 'RoadAccidents044_x264.mp4', 'RoadAccidents046_x264.mp4', 'RoadAccidents047_x264.mp4', 'RoadAccidents048_x264.mp4', 'RoadAccidents049_x264.mp4', 'RoadAccidents050_x264.mp4', 'RoadAccidents051_x264.mp4', 'RoadAccidents052_x264.mp4', 'RoadAccidents053_x264.mp4', 'RoadAccidents054_x264.mp4', 'RoadAccidents055_x264.mp4', 'RoadAccidents056_x264.mp4', 'RoadAccidents057_x264.mp4', 'RoadAccidents058_x264.mp4', 'RoadAccidents059_x264.mp4', 'RoadAccidents060_x264.mp4', 'RoadAccidents061_x264.mp4', 'RoadAccidents062_x264.mp4', 'RoadAccidents063_x264.mp4', 'RoadAccidents064_x264.mp4', 'RoadAccidents065_x264.mp4', 'RoadAccidents066_x264.mp4', 'RoadAccidents067_x264.mp4', 'RoadAccidents068_x264.mp4', 'RoadAccidents069_x264.mp4', 'RoadAccidents070_x264.mp4', 'RoadAccidents071_x264.mp4', 'RoadAccidents072_x264.mp4', 'RoadAccidents073_x264.mp4', 'RoadAccidents074_x264.mp4', 'RoadAccidents075_x264.mp4', 'RoadAccidents076_x264.mp4', 'RoadAccidents077_x264.mp4', 'RoadAccidents078_x264.mp4', 'RoadAccidents079_x264.mp4', 'RoadAccidents080_x264.mp4', 'RoadAccidents081_x264.mp4', 'RoadAccidents082_x264.mp4', 'RoadAccidents083_x264.mp4', 'RoadAccidents084_x264.mp4', 'RoadAccidents085_x264.mp4', 'RoadAccidents086_x264.mp4', 'RoadAccidents087_x264.mp4', 'RoadAccidents088_x264.mp4', 'RoadAccidents089_x264.mp4', 'RoadAccidents090_x264.mp4', 'RoadAccidents091_x264.mp4', 'RoadAccidents092_x264.mp4', 'RoadAccidents093_x264.mp4', 'RoadAccidents094_x264.mp4', 'RoadAccidents095_x264.mp4', 'RoadAccidents096_x264.mp4', 'RoadAccidents097_x264.mp4', 'RoadAccidents098_x264.mp4', 'RoadAccidents099_x264.mp4', 'RoadAccidents100_x264.mp4', 'RoadAccidents101_x264.mp4', 'RoadAccidents102_x264.mp4', 'RoadAccidents103_x264.mp4', 'RoadAccidents104_x264.mp4', 'RoadAccidents105_x264.mp4', 'RoadAccidents106_x264.mp4', 'RoadAccidents107_x264.mp4', 'RoadAccidents108_x264.mp4', 'RoadAccidents109_x264.mp4', 'RoadAccidents110_x264.mp4', 'RoadAccidents111_x264.mp4', 'RoadAccidents112_x264.mp4', 'RoadAccidents113_x264.mp4', 'RoadAccidents114_x264.mp4', 'RoadAccidents115_x264.mp4', 'RoadAccidents116_x264.mp4', 'RoadAccidents117_x264.mp4', 'RoadAccidents118_x264.mp4', 'RoadAccidents119_x264.mp4', 'RoadAccidents120_x264.mp4', 'RoadAccidents121_x264.mp4', 'RoadAccidents122_x264.mp4', 'RoadAccidents123_x264.mp4', 'RoadAccidents124_x264.mp4', 'RoadAccidents125_x264.mp4', 'RoadAccidents126_x264.mp4', 'RoadAccidents127_x264.mp4', 'RoadAccidents128_x264.mp4', 'RoadAccidents129_x264.mp4', 'RoadAccidents130_x264.mp4', 'RoadAccidents131_x264.mp4', 'RoadAccidents132_x264.mp4', 'RoadAccidents133_x264.mp4', 'RoadAccidents134_x264.mp4', 'RoadAccidents135_x264.mp4', 'RoadAccidents136_x264.mp4', 'RoadAccidents137_x264.mp4', 'RoadAccidents138_x264.mp4', 'RoadAccidents139_x264.mp4', 'RoadAccidents140_x264.mp4', 'RoadAccidents141_x264.mp4', 'RoadAccidents142_x264.mp4', 'RoadAccidents143_x264.mp4', 'RoadAccidents144_x264.mp4', 'RoadAccidents145_x264.mp4', 'RoadAccidents146_x264.mp4', 'RoadAccidents147_x264.mp4', 'RoadAccidents148_x264.mp4', 'RoadAccidents149_x264.mp4', 'RoadAccidents150_x264.mp4', 'RoadAccidents151_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "RoadAccidents feature extracting ended. Elapsed time: 0:55:41\n", + "Robbery feature extracting starts\n", + "==>> file_list: ['Robbery001_x264.mp4', 'Robbery002_x264.mp4', 'Robbery003_x264.mp4', 'Robbery004_x264.mp4', 'Robbery005_x264.mp4', 'Robbery006_x264.mp4', 'Robbery007_x264.mp4', 'Robbery008_x264.mp4', 'Robbery009_x264.mp4', 'Robbery010_x264.mp4', 'Robbery011_x264.mp4', 'Robbery012_x264.mp4', 'Robbery013_x264.mp4', 'Robbery014_x264.mp4', 'Robbery015_x264.mp4', 'Robbery016_x264.mp4', 'Robbery017_x264.mp4', 'Robbery018_x264.mp4', 'Robbery019_x264.mp4', 'Robbery020_x264.mp4', 'Robbery021_x264.mp4', 'Robbery022_x264.mp4', 'Robbery023_x264.mp4', 'Robbery024_x264.mp4', 'Robbery025_x264.mp4', 'Robbery026_x264.mp4', 'Robbery027_x264.mp4', 'Robbery028_x264.mp4', 'Robbery029_x264.mp4', 'Robbery030_x264.mp4', 'Robbery031_x264.mp4', 'Robbery032_x264.mp4', 'Robbery033_x264.mp4', 'Robbery034_x264.mp4', 'Robbery035_x264.mp4', 'Robbery036_x264.mp4', 'Robbery037_x264.mp4', 'Robbery038_x264.mp4', 'Robbery039_x264.mp4', 'Robbery040_x264.mp4', 'Robbery041_x264.mp4', 'Robbery042_x264.mp4', 'Robbery043_x264.mp4', 'Robbery044_x264.mp4', 'Robbery045_x264.mp4', 'Robbery046_x264.mp4', 'Robbery047_x264.mp4', 'Robbery048_x264.mp4', 'Robbery049_x264.mp4', 'Robbery050_x264.mp4', 'Robbery051_x264.mp4', 'Robbery052_x264.mp4', 'Robbery053_x264.mp4', 'Robbery054_x264.mp4', 'Robbery055_x264.mp4', 'Robbery056_x264.mp4', 'Robbery057_x264.mp4', 'Robbery058_x264.mp4', 'Robbery059_x264.mp4', 'Robbery060_x264.mp4', 'Robbery061_x264.mp4', 'Robbery062_x264.mp4', 'Robbery063_x264.mp4', 'Robbery064_x264.mp4', 'Robbery065_x264.mp4', 'Robbery066_x264.mp4', 'Robbery067_x264.mp4', 'Robbery068_x264.mp4', 'Robbery069_x264.mp4', 'Robbery070_x264.mp4', 'Robbery071_x264.mp4', 'Robbery072_x264.mp4', 'Robbery073_x264.mp4', 'Robbery074_x264.mp4', 'Robbery075_x264.mp4', 'Robbery076_x264.mp4', 'Robbery077_x264.mp4', 'Robbery078_x264.mp4', 'Robbery079_x264.mp4', 'Robbery080_x264.mp4', 'Robbery081_x264.mp4', 'Robbery082_x264.mp4', 'Robbery083_x264.mp4', 'Robbery084_x264.mp4', 'Robbery085_x264.mp4', 'Robbery086_x264.mp4', 'Robbery087_x264.mp4', 'Robbery088_x264.mp4', 'Robbery089_x264.mp4', 'Robbery090_x264.mp4', 'Robbery091_x264.mp4', 'Robbery092_x264.mp4', 'Robbery093_x264.mp4', 'Robbery094_x264.mp4', 'Robbery095_x264.mp4', 'Robbery096_x264.mp4', 'Robbery097_x264.mp4', 'Robbery098_x264.mp4', 'Robbery099_x264.mp4', 'Robbery100_x264.mp4', 'Robbery101_x264.mp4', 'Robbery102_x264.mp4', 'Robbery103_x264.mp4', 'Robbery104_x264.mp4', 'Robbery105_x264.mp4', 'Robbery106_x264.mp4', 'Robbery107_x264.mp4', 'Robbery108_x264.mp4', 'Robbery109_x264.mp4', 'Robbery110_x264.mp4', 'Robbery111_x264.mp4', 'Robbery112_x264.mp4', 'Robbery113_x264.mp4', 'Robbery114_x264.mp4', 'Robbery115_x264.mp4', 'Robbery116_x264.mp4', 'Robbery117_x264.mp4', 'Robbery118_x264.mp4', 'Robbery119_x264.mp4', 'Robbery120_x264.mp4', 'Robbery121_x264.mp4', 'Robbery122_x264.mp4', 'Robbery123_x264.mp4', 'Robbery124_x264.mp4', 'Robbery125_x264.mp4', 'Robbery126_x264.mp4', 'Robbery127_x264.mp4', 'Robbery128_x264.mp4', 'Robbery129_x264.mp4', 'Robbery130_x264.mp4', 'Robbery131_x264.mp4', 'Robbery132_x264.mp4', 'Robbery133_x264.mp4', 'Robbery134_x264.mp4', 'Robbery135_x264.mp4', 'Robbery136_x264.mp4', 'Robbery137_x264.mp4', 'Robbery138_x264.mp4', 'Robbery139_x264.mp4', 'Robbery140_x264.mp4', 'Robbery141_x264.mp4', 'Robbery142_x264.mp4', 'Robbery143_x264.mp4', 'Robbery144_x264.mp4', 'Robbery145_x264.mp4', 'Robbery146_x264.mp4', 'Robbery147_x264.mp4', 'Robbery148_x264.mp4', 'Robbery149_x264.mp4', 'Robbery150_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Robbery feature extracting ended. Elapsed time: 1:55:40\n", + "Shooting feature extracting starts\n", + "==>> file_list: ['Shooting001_x264.mp4', 'Shooting002_x264.mp4', 'Shooting003_x264.mp4', 'Shooting004_x264.mp4', 'Shooting005_x264.mp4', 'Shooting006_x264.mp4', 'Shooting007_x264.mp4', 'Shooting008_x264.mp4', 'Shooting009_x264.mp4', 'Shooting010_x264.mp4', 'Shooting011_x264.mp4', 'Shooting012_x264.mp4', 'Shooting013_x264.mp4', 'Shooting014_x264.mp4', 'Shooting015_x264.mp4', 'Shooting017_x264.mp4', 'Shooting018_x264.mp4', 'Shooting019_x264.mp4', 'Shooting020_x264.mp4', 'Shooting021_x264.mp4', 'Shooting022_x264.mp4', 'Shooting023_x264.mp4', 'Shooting024_x264.mp4', 'Shooting025_x264.mp4', 'Shooting026_x264.mp4', 'Shooting027_x264.mp4', 'Shooting028_x264.mp4', 'Shooting029_x264.mp4', 'Shooting030_x264.mp4', 'Shooting031_x264.mp4', 'Shooting032_x264.mp4', 'Shooting033_x264.mp4', 'Shooting034_x264.mp4', 'Shooting036_x264.mp4', 'Shooting037_x264.mp4', 'Shooting038_x264.mp4', 'Shooting039_x264.mp4', 'Shooting040_x264.mp4', 'Shooting041_x264.mp4', 'Shooting042_x264.mp4', 'Shooting043_x264.mp4', 'Shooting044_x264.mp4', 'Shooting046_x264.mp4', 'Shooting047_x264.mp4', 'Shooting048_x264.mp4', 'Shooting050_x264.mp4', 'Shooting051_x264.mp4', 'Shooting052_x264.mp4', 'Shooting053_x264.mp4', 'Shooting054_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Shooting feature extracting ended. Elapsed time: 0:35:42\n", + "Shoplifting feature extracting starts\n", + "==>> file_list: ['Shoplifting001_x264.mp4', 'Shoplifting003_x264.mp4', 'Shoplifting004_x264.mp4', 'Shoplifting005_x264.mp4', 'Shoplifting006_x264.mp4', 'Shoplifting007_x264.mp4', 'Shoplifting008_x264.mp4', 'Shoplifting009_x264.mp4', 'Shoplifting010_x264.mp4', 'Shoplifting012_x264.mp4', 'Shoplifting013_x264.mp4', 'Shoplifting014_x264.mp4', 'Shoplifting015_x264.mp4', 'Shoplifting016_x264.mp4', 'Shoplifting017_x264.mp4', 'Shoplifting018_x264.mp4', 'Shoplifting019_x264.mp4', 'Shoplifting020_x264.mp4', 'Shoplifting021_x264.mp4', 'Shoplifting022_x264.mp4', 'Shoplifting024_x264.mp4', 'Shoplifting025_x264.mp4', 'Shoplifting026_x264.mp4', 'Shoplifting027_x264.mp4', 'Shoplifting028_x264.mp4', 'Shoplifting029_x264.mp4', 'Shoplifting030_x264.mp4', 'Shoplifting031_x264.mp4', 'Shoplifting032_x264.mp4', 'Shoplifting033_x264.mp4', 'Shoplifting034_x264.mp4', 'Shoplifting036_x264.mp4', 'Shoplifting037_x264.mp4', 'Shoplifting038_x264.mp4', 'Shoplifting039_x264.mp4', 'Shoplifting040_x264.mp4', 'Shoplifting041_x264.mp4', 'Shoplifting042_x264.mp4', 'Shoplifting043_x264.mp4', 'Shoplifting044_x264.mp4', 'Shoplifting045_x264.mp4', 'Shoplifting047_x264.mp4', 'Shoplifting048_x264.mp4', 'Shoplifting049_x264.mp4', 'Shoplifting050_x264.mp4', 'Shoplifting051_x264.mp4', 'Shoplifting052_x264.mp4', 'Shoplifting053_x264.mp4', 'Shoplifting054_x264.mp4', 'Shoplifting055_x264.mp4']\n", + "Shoplifting feature extracting ended. Elapsed time: 1:42:18\n", + "Stealing feature extracting starts\n", + "==>> file_list: ['Stealing002_x264.mp4', 'Stealing003_x264.mp4', 'Stealing004_x264.mp4', 'Stealing006_x264.mp4', 'Stealing007_x264.mp4', 'Stealing008_x264.mp4', 'Stealing009_x264.mp4', 'Stealing010_x264.mp4', 'Stealing011_x264.mp4', 'Stealing012_x264.mp4', 'Stealing013_x264.mp4', 'Stealing014_x264.mp4', 'Stealing015_x264.mp4', 'Stealing016_x264.mp4', 'Stealing017_x264.mp4', 'Stealing018_x264.mp4', 'Stealing019_x264.mp4', 'Stealing020_x264.mp4', 'Stealing021_x264.mp4', 'Stealing022_x264.mp4', 'Stealing023_x264.mp4', 'Stealing024_x264.mp4', 'Stealing025_x264.mp4', 'Stealing026_x264.mp4', 'Stealing027_x264.mp4', 'Stealing028_x264.mp4', 'Stealing029_x264.mp4', 'Stealing030_x264.mp4', 'Stealing031_x264.mp4', 'Stealing032_x264.mp4', 'Stealing035_x264.mp4', 'Stealing036_x264.mp4', 'Stealing037_x264.mp4', 'Stealing042_x264.mp4', 'Stealing043_x264.mp4', 'Stealing044_x264.mp4', 'Stealing045_x264.mp4', 'Stealing046_x264.mp4', 'Stealing047_x264.mp4', 'Stealing048_x264.mp4', 'Stealing049_x264.mp4', 'Stealing050_x264.mp4', 'Stealing051_x264.mp4', 'Stealing052_x264.mp4', 'Stealing053_x264.mp4', 'Stealing054_x264.mp4', 'Stealing055_x264.mp4', 'Stealing057_x264.mp4', 'Stealing058_x264.mp4', 'Stealing059_x264.mp4', 'Stealing060_x264.mp4', 'Stealing061_x264.mp4', 'Stealing062_x264.mp4', 'Stealing063_x264.mp4', 'Stealing065_x264.mp4', 'Stealing066_x264.mp4', 'Stealing067_x264.mp4', 'Stealing068_x264.mp4', 'Stealing069_x264.mp4', 'Stealing070_x264.mp4', 'Stealing071_x264.mp4', 'Stealing072_x264.mp4', 'Stealing073_x264.mp4', 'Stealing074_x264.mp4', 'Stealing075_x264.mp4', 'Stealing077_x264.mp4', 'Stealing078_x264.mp4', 'Stealing079_x264.mp4', 'Stealing080_x264.mp4', 'Stealing081_x264.mp4', 'Stealing082_x264.mp4', 'Stealing083_x264.mp4', 'Stealing084_x264.mp4', 'Stealing086_x264.mp4', 'Stealing087_x264.mp4', 'Stealing088_x264.mp4', 'Stealing089_x264.mp4', 'Stealing091_x264.mp4', 'Stealing092_x264.mp4', 'Stealing093_x264.mp4', 'Stealing094_x264.mp4', 'Stealing095_x264.mp4', 'Stealing096_x264.mp4', 'Stealing097_x264.mp4', 'Stealing098_x264.mp4', 'Stealing100_x264.mp4', 'Stealing101_x264.mp4', 'Stealing102_x264.mp4', 'Stealing103_x264.mp4', 'Stealing104_x264.mp4', 'Stealing105_x264.mp4', 'Stealing106_x264.mp4', 'Stealing107_x264.mp4', 'Stealing108_x264.mp4', 'Stealing109_x264.mp4', 'Stealing110_x264.mp4', 'Stealing111_x264.mp4', 'Stealing112_x264.mp4', 'Stealing113_x264.mp4', 'Stealing114_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Stealing feature extracting ended. Elapsed time: 1:44:04\n", + "Vandalism feature extracting starts\n", + "==>> file_list: ['Vandalism001_x264.mp4', 'Vandalism002_x264.mp4', 'Vandalism003_x264.mp4', 'Vandalism004_x264.mp4', 'Vandalism005_x264.mp4', 'Vandalism006_x264.mp4', 'Vandalism007_x264.mp4', 'Vandalism008_x264.mp4', 'Vandalism009_x264.mp4', 'Vandalism010_x264.mp4', 'Vandalism011_x264.mp4', 'Vandalism012_x264.mp4', 'Vandalism013_x264.mp4', 'Vandalism014_x264.mp4', 'Vandalism015_x264.mp4', 'Vandalism016_x264.mp4', 'Vandalism017_x264.mp4', 'Vandalism018_x264.mp4', 'Vandalism019_x264.mp4', 'Vandalism020_x264.mp4', 'Vandalism021_x264.mp4', 'Vandalism022_x264.mp4', 'Vandalism023_x264.mp4', 'Vandalism024_x264.mp4', 'Vandalism025_x264.mp4', 'Vandalism026_x264.mp4', 'Vandalism027_x264.mp4', 'Vandalism028_x264.mp4', 'Vandalism029_x264.mp4', 'Vandalism030_x264.mp4', 'Vandalism031_x264.mp4', 'Vandalism032_x264.mp4', 'Vandalism033_x264.mp4', 'Vandalism034_x264.mp4', 'Vandalism035_x264.mp4', 'Vandalism036_x264.mp4', 'Vandalism037_x264.mp4', 'Vandalism038_x264.mp4', 'Vandalism039_x264.mp4', 'Vandalism040_x264.mp4', 'Vandalism041_x264.mp4', 'Vandalism042_x264.mp4', 'Vandalism043_x264.mp4', 'Vandalism044_x264.mp4', 'Vandalism045_x264.mp4', 'Vandalism046_x264.mp4', 'Vandalism047_x264.mp4', 'Vandalism048_x264.mp4', 'Vandalism049_x264.mp4', 'Vandalism050_x264.mp4']\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "WARNING: not enough matching points\n", + "Vandalism feature extracting ended. Elapsed time: 0:36:05\n" + ] + } + ], + "source": [ + "for folder_name in folder_list:\n", + " time_start = datetime.now()\n", + "\n", + " print(f\"{folder_name} feature extracting starts\")\n", + "\n", + " with open(f\"{folder_name}.csv\", \"w\") as c_file:\n", + " writer = csv.writer(c_file, delimiter=\",\")\n", + "\n", + " writer.writerow(header)\n", + "\n", + " folder_path = root + folder_name + \"/\"\n", + "\n", + " file_list = os.listdir(root+folder_name)\n", + " file_list.sort()\n", + " print(f\"==>> file_list: {file_list}\")\n", + "\n", + " id_count = 0\n", + "\n", + " for file_name in file_list:\n", + " path = folder_path + file_name\n", + "\n", + " cap = cv2.VideoCapture(path)\n", + "\n", + " # Loop through the video frames\n", + " frame_count = 0\n", + "\n", + " # Store the track history\n", + " track_history = defaultdict(lambda: [])\n", + "\n", + " while cap.isOpened():\n", + " # Read a frame from the video\n", + " success, frame = cap.read()\n", + "\n", + " frame_count += 1 # Increment frame count\n", + "\n", + " if success:\n", + " frame = cv2.resize(frame, (standard_width, standard_height))\n", + "\n", + " # Run YOLOv8 tracking on the frame, persisting tracks between frames\n", + " results = model.track(frame, persist=True, verbose=False)\n", + "\n", + " if results[0].boxes is not None: # Check if there are results and boxes\n", + " # Get the boxes\n", + " # boxes = results[0].boxes.xywh.cpu()\n", + "\n", + " if results[0].boxes.id is not None:\n", + " # If 'int' attribute exists (there are detections), get the track IDs\n", + " track_ids = results[0].boxes.id.int().cpu().tolist()\n", + "\n", + " for i, box in zip(range(0, len(track_ids)), results[0].boxes.xywhn.cpu()):\n", + " keypoints = results[0].keypoints.xyn[i].cpu().numpy().flatten().tolist()\n", + " box_list = box.numpy().flatten().tolist()\n", + " if type(box_list) == \"float\" or type(keypoints) == \"float\":\n", + " print(f\"==>> box_list: {box_list}\")\n", + " print(f\"==>> keypoints: {keypoints}\")\n", + " box_and_keypoints = box_list + keypoints\n", + " track_history[track_ids[i]].append([[frame_count], deepcopy(box_and_keypoints)])\n", + " else:\n", + " # Break the loop if the end of the video is reached\n", + " break\n", + "\n", + " with open(f\"{folder_name}.csv\", \"a\") as c_file:\n", + " writer = csv.writer(c_file, delimiter=\",\")\n", + " for key in track_history.keys():\n", + " for f_count, b_and_k in track_history[key]:\n", + " row = [file_name] + f_count + [id_count + key] + b_and_k\n", + "\n", + " writer.writerow(row)\n", + "\n", + " id_count = id_count + len(track_history.keys())\n", + "\n", + " cap.release()\n", + "\n", + " time_end = datetime.now()\n", + " total_time = time_end - time_start\n", + " total_time = str(total_time).split(\".\")[0]\n", + "\n", + " print(f\"{folder_name} feature extracting ended. Elapsed time: {total_time}\")\n", + "\n", + "# cv2.destroyAllWindows()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/datapreprocess/feature_extraction.py b/datapreprocess/feature_extraction.py new file mode 100644 index 0000000..e608a6e --- /dev/null +++ b/datapreprocess/feature_extraction.py @@ -0,0 +1,181 @@ +import argparse +import csv +import os +from collections import defaultdict +from copy import deepcopy +from datetime import datetime + +import cv2 +import numpy as np +from tqdm import tqdm +from ultralytics import YOLO + +parser = argparse.ArgumentParser(description="Feature Extraction") + +parser.add_argument( + "--root", + type=str, + help="root folder path", + default="/data/ephemeral/home/datasets/UCFCrime/normal/", +) + +args = parser.parse_args() + +# root = args.root + +root = "/data/ephemeral/home/datasets/UCFCrime/normal/" + +header = [ + "Filename", + "Frame", + "ID", + "X", + "Y", + "Width", + "Height", + "Keypoint_0", + "Keypoint_1", + "Keypoint_2", + "Keypoint_3", + "Keypoint_4", + "Keypoint_5", + "Keypoint_6", + "Keypoint_7", + "Keypoint_8", + "Keypoint_9", + "Keypoint_10", + "Keypoint_11", + "Keypoint_12", + "Keypoint_13", + "Keypoint_14", + "Keypoint_15", + "Keypoint_16", + "Keypoint_17", + "Keypoint_18", + "Keypoint_19", + "Keypoint_20", + "Keypoint_21", + "Keypoint_22", + "Keypoint_23", + "Keypoint_24", + "Keypoint_25", + "Keypoint_26", + "Keypoint_27", + "Keypoint_28", + "Keypoint_29", + "Keypoint_30", + "Keypoint_31", + "Keypoint_32", + "Keypoint_33", +] + + +def feat_extraction(): + # Load the YOLOv8 model + model = YOLO("yolov8n-pose.pt") + + # Define the standard frame size (change these values as needed) + standard_width = 320 + standard_height = 240 + + folder_list = os.listdir(root) + folder_list.sort() + print(f"==>> folder_list: {folder_list}") + + for folder_name in folder_list: + time_start = datetime.now() + + print(f"{folder_name} feature extracting starts") + + with open(f"{folder_name}.csv", "w") as c_file: + writer = csv.writer(c_file, delimiter=",") + + writer.writerow(header) + + folder_path = root + folder_name + "/" + + file_list = os.listdir(root + folder_name) + file_list.sort() + print(f"==>> file_list: {file_list}") + + id_count = 0 + + for file_name in tqdm(file_list, total=len(file_list)): + path = folder_path + file_name + + cap = cv2.VideoCapture(path) + + # Loop through the video frames + frame_count = 0 + + # Store the track history + track_history = defaultdict(lambda: []) + + while cap.isOpened(): + # Read a frame from the video + success, frame = cap.read() + + frame_count += 1 # Increment frame count + + if success: + frame = cv2.resize(frame, (standard_width, standard_height)) + + # Run YOLOv8 tracking on the frame, persisting tracks between frames + results = model.track(frame, persist=True, verbose=False) + + if ( + results[0].boxes is not None + ): # Check if there are results and boxes + # Get the boxes + # boxes = results[0].boxes.xywh.cpu() + + if results[0].boxes.id is not None: + # If 'int' attribute exists (there are detections), get the track IDs + track_ids = results[0].boxes.id.int().cpu().tolist() + + for i, box in zip( + range(0, len(track_ids)), results[0].boxes.xywhn.cpu() + ): + keypoints = ( + results[0] + .keypoints.xyn[i] + .cpu() + .numpy() + .flatten() + .tolist() + ) + box_list = box.numpy().flatten().tolist() + if ( + type(box_list) == "float" + or type(keypoints) == "float" + ): + print(f"==>> box_list: {box_list}") + print(f"==>> keypoints: {keypoints}") + box_and_keypoints = box_list + keypoints + track_history[track_ids[i]].append( + [[frame_count], deepcopy(box_and_keypoints)] + ) + else: + # Break the loop if the end of the video is reached + break + + with open(f"{folder_name}.csv", "a") as c_file: + writer = csv.writer(c_file, delimiter=",") + for key in track_history.keys(): + for f_count, b_and_k in track_history[key]: + row = [file_name] + f_count + [id_count + key] + b_and_k + + writer.writerow(row) + + id_count = id_count + len(track_history.keys()) + + cap.release() + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + + print(f"{folder_name} feature extracting ended. Elapsed time: {total_time}") + + +feat_extraction() diff --git a/datapreprocess/feature_extraction_onebyone.py b/datapreprocess/feature_extraction_onebyone.py new file mode 100644 index 0000000..9b353ea --- /dev/null +++ b/datapreprocess/feature_extraction_onebyone.py @@ -0,0 +1,194 @@ +import argparse +import csv +import os +from collections import defaultdict +from copy import deepcopy +from datetime import datetime + +import cv2 +import numpy as np +from tqdm import tqdm +from ultralytics import YOLO + +parser = argparse.ArgumentParser(description="Feature Extraction") + +parser.add_argument( + "--root", + type=str, + help="root folder path", + default="/data/ephemeral/home/datasets/UCFCrime/normal/", +) + +args = parser.parse_args() + +# root = args.root + +root = "/data/ephemeral/home/datasets/UCFCrime/normal/" + +header = [ + "Filename", + "Frame", + "ID", + "X", + "Y", + "Width", + "Height", + "Keypoint_0", + "Keypoint_1", + "Keypoint_2", + "Keypoint_3", + "Keypoint_4", + "Keypoint_5", + "Keypoint_6", + "Keypoint_7", + "Keypoint_8", + "Keypoint_9", + "Keypoint_10", + "Keypoint_11", + "Keypoint_12", + "Keypoint_13", + "Keypoint_14", + "Keypoint_15", + "Keypoint_16", + "Keypoint_17", + "Keypoint_18", + "Keypoint_19", + "Keypoint_20", + "Keypoint_21", + "Keypoint_22", + "Keypoint_23", + "Keypoint_24", + "Keypoint_25", + "Keypoint_26", + "Keypoint_27", + "Keypoint_28", + "Keypoint_29", + "Keypoint_30", + "Keypoint_31", + "Keypoint_32", + "Keypoint_33", +] + +csv_root = "./UCF_csv/" + +if not os.path.exists(csv_root): + os.makedirs(csv_root) + + +def feat_extraction(): + # Load the YOLOv8 model + model = YOLO("yolov8n-pose.pt") + + # Define the standard frame size (change these values as needed) + standard_width = 320 + standard_height = 240 + + folder_list = os.listdir(root) + folder_list.sort() + print(f"==>> folder_list: {folder_list}") + + for folder_name in folder_list: + time_start = datetime.now() + + print(f"{folder_name} feature extracting starts") + + if not os.path.exists(csv_root + folder_name): + os.makedirs(csv_root + folder_name) + + # with open(f"{folder_name}.csv", "w") as c_file: + # writer = csv.writer(c_file, delimiter=",") + + # writer.writerow(header) + + folder_path = root + folder_name + "/" + + file_list = os.listdir(root + folder_name) + file_list.sort() + print(f"==>> file_list: {file_list}") + + id_count = 0 + + for file_name in tqdm(file_list, total=len(file_list)): + path = folder_path + file_name + + with open(csv_root + folder_name + "/" + f"{file_name}.csv", "w") as c_file: + writer = csv.writer(c_file, delimiter=",") + + writer.writerow(header) + + cap = cv2.VideoCapture(path) + + # Loop through the video frames + frame_count = 0 + + # Store the track history + track_history = defaultdict(lambda: []) + + while cap.isOpened(): + # Read a frame from the video + success, frame = cap.read() + + frame_count += 1 # Increment frame count + + if success: + frame = cv2.resize(frame, (standard_width, standard_height)) + + # Run YOLOv8 tracking on the frame, persisting tracks between frames + results = model.track(frame, persist=True, verbose=False) + + if ( + results[0].boxes is not None + ): # Check if there are results and boxes + # Get the boxes + # boxes = results[0].boxes.xywh.cpu() + + if results[0].boxes.id is not None: + # If 'int' attribute exists (there are detections), get the track IDs + track_ids = results[0].boxes.id.int().cpu().tolist() + + for i, box in zip( + range(0, len(track_ids)), results[0].boxes.xywhn.cpu() + ): + keypoints = ( + results[0] + .keypoints.xyn[i] + .cpu() + .numpy() + .flatten() + .tolist() + ) + box_list = box.numpy().flatten().tolist() + if ( + type(box_list) == "float" + or type(keypoints) == "float" + ): + print(f"==>> box_list: {box_list}") + print(f"==>> keypoints: {keypoints}") + box_and_keypoints = box_list + keypoints + track_history[track_ids[i]].append( + [[frame_count], deepcopy(box_and_keypoints)] + ) + else: + # Break the loop if the end of the video is reached + break + + with open(csv_root + folder_name + "/" + f"{file_name}.csv", "a") as c_file: + writer = csv.writer(c_file, delimiter=",") + for key in track_history.keys(): + for f_count, b_and_k in track_history[key]: + row = [file_name] + f_count + [id_count + key] + b_and_k + + writer.writerow(row) + + id_count = id_count + len(track_history.keys()) + + cap.release() + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + + print(f"{folder_name} feature extracting ended. Elapsed time: {total_time}") + + +feat_extraction() diff --git a/datapreprocess/feature_extraction_videoMAEv2.ipynb b/datapreprocess/feature_extraction_videoMAEv2.ipynb new file mode 100644 index 0000000..89f355a --- /dev/null +++ b/datapreprocess/feature_extraction_videoMAEv2.ipynb @@ -0,0 +1,553 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# from collections import defaultdict\n", + "import cv2\n", + "import numpy as np\n", + "import os\n", + "from copy import deepcopy\n", + "\n", + "from datetime import datetime\n", + "\n", + "import torch\n", + "# import torch.nn as nn\n", + "# import torch.nn.functional as F\n", + "\n", + "import albumentations as A\n", + "\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# from datetime import datetime\n", + "\n", + "import models\n", + "from timm.models import create_model" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "root = \"/data/ephemeral/home/datasets/UCFCrime/normal/\"\n", + "\n", + "\n", + "npy_root = \"./npy/\"\n", + "\n", + "if not os.path.exists(npy_root):\n", + " os.makedirs(npy_root)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "==>> folder_list: ['VS_03.이상행동_07.전도', 'VS_03.이상행동_08.파손', 'VS_03.이상행동_09.방화', 'VS_03.이상행동_10.흡연', 'VS_03.이상행동_11.유기', 'VS_03.이상행동_12.절도', 'VS_03.이상행동_13.폭행', 'VS_03.이상행동_14.교통약자']\n" + ] + } + ], + "source": [ + "folder_list = os.listdir(root)\n", + "folder_list.sort()\n", + "print(f\"==>> folder_list: {folder_list}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "segments_num = 1\n", + "# 모델에 들어갈 frame수는 16 * segments_num\n", + "\n", + "model = create_model(\n", + " \"vit_small_patch16_224\",\n", + " # \"vit_base_patch16_224\",\n", + " img_size=224,\n", + " pretrained=False,\n", + " num_classes=710,\n", + " all_frames=16 * segments_num,\n", + " # tubelet_size=args.tubelet_size,\n", + " # drop_rate=args.drop,\n", + " # drop_path_rate=args.drop_path,\n", + " # attn_drop_rate=args.attn_drop_rate,\n", + " # head_drop_rate=args.head_drop_rate,\n", + " # drop_block_rate=None,\n", + " # use_mean_pooling=args.use_mean_pooling,\n", + " # init_scale=args.init_scale,\n", + " # with_cp=args.with_checkpoint,\n", + ")\n", + "\n", + "load_dict = torch.load(\n", + " \"/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/vit_s_k710_dl_from_giant.pth\"\n", + ")\n", + "# load_dict = torch.load(\n", + "# \"/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/vit_b_k710_dl_from_giant.pth\"\n", + "# )\n", + "# backbone pth 경로\n", + "\n", + "model.load_state_dict(load_dict[\"module\"])\n", + "\n", + "model.to(\"cuda\")\n", + "model.eval()\n", + "\n", + "tf = A.Resize(224, 224)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "VS_03.이상행동_07.전도 feature extracting starts\n", + "==>> file_list: ['C_3_7_48_BU_DYB_10-17_10-23-53_CB_RGB_DF2_M2.mp4', 'C_3_7_48_BU_DYB_10-17_10-23-53_CC_RGB_DF2_M2.mp4', 'C_3_7_48_BU_DYB_10-17_10-23-53_CD_RGB_DF2_M2.mp4', 'C_3_7_48_BU_DYB_10-17_10-23-53_CE_RGB_DF2_M2.mp4', 'C_3_7_48_BU_SMC_10-14_09-52-07_CB_RGB_DF2_M2.mp4', 'C_3_7_48_BU_SMC_10-14_09-52-07_CD_RGB_DF2_M2.mp4', 'C_3_7_48_BU_SMC_10-14_09-52-07_CE_RGB_DF2_M2.mp4', 'C_3_7_49_BU_DYB_10-17_10-25-31_CB_RGB_DF2_M2.mp4', 'C_3_7_49_BU_DYB_10-17_10-25-31_CC_RGB_DF2_M2.mp4', 'C_3_7_49_BU_DYB_10-17_10-25-31_CD_RGB_DF2_M2.mp4', 'C_3_7_49_BU_DYB_10-17_10-25-31_CE_RGB_DF2_M2.mp4', 'C_3_7_49_BU_SMC_10-14_09-59-53_CA_RGB_DF2_F2.mp4', 'C_3_7_49_BU_SMC_10-14_09-59-53_CC_RGB_DF2_F2.mp4', 'C_3_7_49_BU_SMC_10-14_09-59-53_CD_RGB_DF2_F2.mp4', 'C_3_7_50_BU_DYB_10-17_10-28-37_CA_RGB_DF2_F2.mp4', 'C_3_7_50_BU_DYB_10-17_10-28-37_CB_RGB_DF2_F2.mp4', 'C_3_7_50_BU_DYB_10-17_10-28-37_CC_RGB_DF2_F2.mp4', 'C_3_7_50_BU_DYB_10-17_10-28-37_CD_RGB_DF2_F2.mp4', 'C_3_7_50_BU_DYB_10-17_10-28-37_CE_RGB_DF2_F2.mp4', 'C_3_7_50_BU_SMC_10-14_10-16-50_CA_RGB_DF2_F2.mp4', 'C_3_7_50_BU_SMC_10-14_10-16-50_CB_RGB_DF2_F2.mp4', 'C_3_7_50_BU_SMC_10-14_10-16-50_CC_RGB_DF2_F2.mp4', 'C_3_7_50_BU_SMC_10-14_10-16-50_CD_RGB_DF2_F2.mp4', 'C_3_7_50_BU_SMC_10-14_10-16-50_CE_RGB_DF2_F2.mp4', 'C_3_7_51_BU_DYB_10-17_10-30-20_CA_RGB_DF2_F2.mp4', 'C_3_7_51_BU_DYB_10-17_10-30-20_CB_RGB_DF2_F2.mp4', 'C_3_7_51_BU_DYB_10-17_10-30-20_CC_RGB_DF2_F2.mp4', 'C_3_7_51_BU_DYB_10-17_10-30-20_CD_RGB_DF2_F2.mp4', 'C_3_7_51_BU_DYB_10-17_10-30-20_CE_RGB_DF2_F2.mp4', 'C_3_7_51_BU_SMC_10-14_11-51-24_CA_RGB_DF2_F2.mp4', 'C_3_7_51_BU_SMC_10-14_11-51-24_CB_RGB_DF2_F2.mp4', 'C_3_7_51_BU_SMC_10-14_11-51-24_CC_RGB_DF2_F2.mp4', 'C_3_7_51_BU_SMC_10-14_11-51-24_CD_RGB_DF2_F2.mp4', 'C_3_7_52_BU_DYB_10-17_10-31-52_CA_RGB_DF2_F2.mp4', 'C_3_7_52_BU_DYB_10-17_10-31-52_CB_RGB_DF2_F2.mp4', 'C_3_7_52_BU_DYB_10-17_10-31-52_CC_RGB_DF2_F2.mp4', 'C_3_7_52_BU_DYB_10-17_10-31-52_CD_RGB_DF2_F2.mp4', 'C_3_7_52_BU_DYB_10-17_10-31-52_CE_RGB_DF2_F2.mp4', 'C_3_7_52_BU_SMC_10-14_11-51-12_CC_RGB_DF2_F2.mp4', 'C_3_7_52_BU_SMC_10-14_11-51-12_CD_RGB_DF2_F2.mp4', 'C_3_7_53_BU_DYB_10-17_10-33-45_CA_RGB_DF2_F2.mp4', 'C_3_7_53_BU_DYB_10-17_10-33-45_CB_RGB_DF2_F2.mp4', 'C_3_7_53_BU_DYB_10-17_10-33-45_CC_RGB_DF2_F2.mp4', 'C_3_7_53_BU_DYB_10-17_10-33-45_CD_RGB_DF2_F2.mp4', 'C_3_7_53_BU_DYB_10-17_10-33-45_CE_RGB_DF2_F2.mp4', 'C_3_7_53_BU_SMC_10-14_11-55-57_CA_RGB_DF2_F2.mp4', 'C_3_7_53_BU_SMC_10-14_11-55-57_CB_RGB_DF2_F2.mp4', 'C_3_7_53_BU_SMC_10-14_11-55-57_CD_RGB_DF2_F2.mp4', 'C_3_7_54_BU_DYB_10-17_10-35-21_CA_RGB_DF2_F2.mp4', 'C_3_7_54_BU_DYB_10-17_10-35-21_CC_RGB_DF2_F2.mp4', 'C_3_7_54_BU_DYB_10-17_10-35-21_CD_RGB_DF2_F2.mp4', 'C_3_7_54_BU_DYB_10-17_10-35-21_CE_RGB_DF2_F2.mp4', 'C_3_7_54_BU_SMC_10-14_11-57-38_CC_RGB_DF2_F2.mp4', 'C_3_7_54_BU_SMC_10-14_11-57-38_CD_RGB_DF2_F2.mp4', 'C_3_7_55_BU_DYB_10-17_10-36-59_CA_RGB_DF2_F2.mp4', 'C_3_7_55_BU_DYB_10-17_10-36-59_CB_RGB_DF2_F2.mp4', 'C_3_7_55_BU_DYB_10-17_10-36-59_CC_RGB_DF2_F2.mp4', 'C_3_7_55_BU_DYB_10-17_10-36-59_CD_RGB_DF2_F2.mp4', 'C_3_7_55_BU_DYB_10-17_10-36-59_CE_RGB_DF2_F2.mp4', 'C_3_7_55_BU_SMC_10-14_09-42-39_CC_RGB_DF2_M3.mp4', 'C_3_7_56_BU_DYB_10-17_12-04-06_CA_RGB_DF2_M3.mp4', 'C_3_7_56_BU_DYB_10-17_12-04-06_CB_RGB_DF2_M3.mp4', 'C_3_7_56_BU_DYB_10-17_12-04-06_CC_RGB_DF2_M3.mp4', 'C_3_7_56_BU_DYB_10-17_12-04-06_CD_RGB_DF2_M3.mp4', 'C_3_7_56_BU_DYB_10-17_12-04-06_CE_RGB_DF2_M3.mp4', 'C_3_7_56_BU_SMC_10-14_15-45-26_CA_RGB_DF2_M3.mp4', 'C_3_7_56_BU_SMC_10-14_15-45-26_CB_RGB_DF2_M3.mp4', 'C_3_7_56_BU_SMC_10-14_15-45-26_CC_RGB_DF2_M3.mp4', 'C_3_7_56_BU_SMC_10-14_15-45-26_CD_RGB_DF2_M3.mp4', 'C_3_7_56_BU_SMC_10-14_15-45-26_CE_RGB_DF2_M3.mp4', 'C_3_7_57_BU_DYB_10-17_12-05-32_CA_RGB_DF2_M3.mp4', 'C_3_7_57_BU_DYB_10-17_12-05-32_CB_RGB_DF2_M3.mp4', 'C_3_7_57_BU_DYB_10-17_12-05-32_CC_RGB_DF2_M3.mp4', 'C_3_7_57_BU_DYB_10-17_12-05-32_CD_RGB_DF2_M3.mp4', 'C_3_7_57_BU_DYB_10-17_12-05-32_CE_RGB_DF2_M3.mp4', 'C_3_7_57_BU_SMC_10-14_15-47-05_CA_RGB_DF2_M3.mp4', 'C_3_7_57_BU_SMC_10-14_15-47-05_CB_RGB_DF2_M3.mp4', 'C_3_7_57_BU_SMC_10-14_15-47-05_CC_RGB_DF2_M3.mp4', 'C_3_7_57_BU_SMC_10-14_15-47-05_CD_RGB_DF2_M3.mp4', 'C_3_7_57_BU_SMC_10-14_15-47-05_CE_RGB_DF2_M3.mp4', 'C_3_7_58_BU_DYB_10-17_12-06-56_CA_RGB_DF2_M3.mp4']\n", + "VS_03.이상행동_07.전도 feature extracting ended. Elapsed time: 0:01:34\n", + "VS_03.이상행동_08.파손 feature extracting starts\n", + "==>> file_list: ['C_3_8_53_BU_SMC_10-14_13-30-48_CB_RGB_DF2_F3.mp4', 'C_3_8_54_BU_SMC_10-14_13-32-37_CA_RGB_DF2_F3.mp4', 'C_3_8_54_BU_SMC_10-14_13-32-37_CC_RGB_DF2_F3.mp4', 'C_3_8_54_BU_SMC_10-14_13-32-37_CD_RGB_DF2_F3.mp4', 'C_3_8_54_BU_SMC_10-14_13-32-37_CE_RGB_DF2_F3.mp4', 'C_3_8_55_BU_DYB_10-17_14-19-01_CA_RGB_DF2_M3.mp4', 'C_3_8_55_BU_DYB_10-17_14-19-01_CB_RGB_DF2_M3.mp4', 'C_3_8_55_BU_DYB_10-17_14-19-01_CC_RGB_DF2_M3.mp4', 'C_3_8_55_BU_DYB_10-17_14-19-01_CD_RGB_DF2_M3.mp4', 'C_3_8_55_BU_DYB_10-17_14-19-01_CE_RGB_DF2_M3.mp4', 'C_3_8_55_BU_SMC_10-14_13-35-06_CA_RGB_DF2_F3.mp4', 'C_3_8_55_BU_SMC_10-14_13-35-06_CB_RGB_DF2_F3.mp4', 'C_3_8_55_BU_SMC_10-14_13-35-06_CC_RGB_DF2_F3.mp4', 'C_3_8_55_BU_SMC_10-14_13-35-06_CD_RGB_DF2_F3.mp4', 'C_3_8_55_BU_SMC_10-14_13-35-06_CE_RGB_DF2_F3.mp4', 'C_3_8_56_BU_DYB_10-17_13-16-04_CA_RGB_DF2_M3.mp4', 'C_3_8_56_BU_DYB_10-17_13-16-04_CB_RGB_DF2_M3.mp4', 'C_3_8_56_BU_DYB_10-17_13-16-04_CC_RGB_DF2_M3.mp4', 'C_3_8_56_BU_DYB_10-17_13-16-04_CD_RGB_DF2_M3.mp4', 'C_3_8_56_BU_DYB_10-17_13-16-04_CE_RGB_DF2_M3.mp4', 'C_3_8_56_BU_SMC_10-14_13-39-12_CA_RGB_DF2_F3.mp4', 'C_3_8_56_BU_SMC_10-14_13-39-12_CB_RGB_DF2_F3.mp4', 'C_3_8_56_BU_SMC_10-14_13-39-12_CC_RGB_DF2_F3.mp4', 'C_3_8_56_BU_SMC_10-14_13-39-12_CD_RGB_DF2_F3.mp4', 'C_3_8_56_BU_SMC_10-14_13-39-12_CE_RGB_DF2_F3.mp4', 'C_3_8_57_BU_DYA_08-23_15-06-11_CD_RGB_DF2_M3.mp4', 'C_3_8_57_BU_DYA_08-23_15-06-11_CE_RGB_DF2_M3.mp4', 'C_3_8_57_BU_DYA_08-23_15-06-11_CF_RGB_DF2_M3.mp4', 'C_3_8_57_BU_DYB_10-17_13-18-24_CA_RGB_DF2_M3.mp4', 'C_3_8_57_BU_DYB_10-17_13-18-24_CB_RGB_DF2_M3.mp4', 'C_3_8_57_BU_DYB_10-17_13-18-24_CC_RGB_DF2_M3.mp4', 'C_3_8_57_BU_DYB_10-17_13-18-24_CD_RGB_DF2_M3.mp4', 'C_3_8_57_BU_DYB_10-17_13-18-24_CE_RGB_DF2_M3.mp4', 'C_3_8_57_BU_SMC_10-14_13-40-47_CA_RGB_DF2_F3.mp4', 'C_3_8_57_BU_SMC_10-14_13-40-47_CB_RGB_DF2_F3.mp4', 'C_3_8_57_BU_SMC_10-14_13-40-47_CC_RGB_DF2_F3.mp4', 'C_3_8_57_BU_SMC_10-14_13-40-47_CD_RGB_DF2_F3.mp4', 'C_3_8_57_BU_SMC_10-14_13-40-47_CE_RGB_DF2_F3.mp4', 'C_3_8_58_BU_DYB_10-17_13-20-28_CA_RGB_DF2_M3.mp4', 'C_3_8_58_BU_DYB_10-17_13-20-28_CB_RGB_DF2_M3.mp4', 'C_3_8_58_BU_DYB_10-17_13-20-28_CC_RGB_DF2_M3.mp4', 'C_3_8_58_BU_DYB_10-17_13-20-28_CD_RGB_DF2_M3.mp4', 'C_3_8_58_BU_DYB_10-17_13-20-28_CE_RGB_DF2_M3.mp4', 'C_3_8_58_BU_SMC_10-14_13-44-22_CA_RGB_DF2_F3.mp4', 'C_3_8_58_BU_SMC_10-14_13-44-22_CB_RGB_DF2_F3.mp4', 'C_3_8_58_BU_SMC_10-14_13-44-22_CC_RGB_DF2_F3.mp4', 'C_3_8_58_BU_SMC_10-14_13-44-22_CD_RGB_DF2_F3.mp4', 'C_3_8_58_BU_SMC_10-14_13-44-22_CE_RGB_DF2_F3.mp4', 'C_3_8_59_BU_DYB_10-17_13-22-05_CB_RGB_DF2_M3.mp4', 'C_3_8_59_BU_DYB_10-17_13-22-05_CC_RGB_DF2_M3.mp4', 'C_3_8_59_BU_DYB_10-17_13-22-05_CD_RGB_DF2_M3.mp4', 'C_3_8_59_BU_DYB_10-17_13-22-05_CE_RGB_DF2_M3.mp4', 'C_3_8_59_BU_SMC_10-13_16-36-18_CC_RGB_DF2_M4.mp4', 'C_3_8_59_BU_SMC_10-13_16-36-18_CD_RGB_DF2_M4.mp4', 'C_3_8_59_BU_SMC_10-13_16-36-18_CE_RGB_DF2_M4.mp4', 'C_3_8_60_BU_SMC_10-13_16-39-42_CA_RGB_DF2_M4.mp4', 'C_3_8_60_BU_SMC_10-13_16-39-42_CB_RGB_DF2_M4.mp4', 'C_3_8_60_BU_SMC_10-13_16-39-42_CC_RGB_DF2_M4.mp4', 'C_3_8_60_BU_SMC_10-13_16-39-42_CD_RGB_DF2_M4.mp4', 'C_3_8_60_BU_SMC_10-13_16-39-42_CE_RGB_DF2_M4.mp4', 'C_3_8_61_BU_DYB_10-17_13-26-05_CA_RGB_DF2_F3.mp4', 'C_3_8_61_BU_DYB_10-17_13-26-05_CB_RGB_DF2_F3.mp4', 'C_3_8_61_BU_DYB_10-17_13-26-05_CC_RGB_DF2_F3.mp4', 'C_3_8_61_BU_DYB_10-17_13-26-05_CD_RGB_DF2_F3.mp4', 'C_3_8_61_BU_DYB_10-17_13-26-05_CE_RGB_DF2_F3.mp4', 'C_3_8_61_BU_SMC_10-13_16-41-21_CB_RGB_DF2_M4.mp4', 'C_3_8_61_BU_SMC_10-13_16-41-21_CC_RGB_DF2_M4.mp4', 'C_3_8_61_BU_SMC_10-13_16-41-21_CD_RGB_DF2_M4.mp4', 'C_3_8_61_BU_SMC_10-13_16-41-21_CE_RGB_DF2_M4.mp4', 'C_3_8_62_BU_DYB_10-17_13-27-37_CA_RGB_DF2_F3.mp4', 'C_3_8_62_BU_DYB_10-17_13-27-37_CB_RGB_DF2_F3.mp4', 'C_3_8_62_BU_DYB_10-17_13-27-37_CC_RGB_DF2_F3.mp4', 'C_3_8_62_BU_DYB_10-17_13-27-37_CD_RGB_DF2_F3.mp4', 'C_3_8_62_BU_DYB_10-17_13-27-37_CE_RGB_DF2_F3.mp4', 'C_3_8_62_BU_SMC_10-13_16-43-09_CB_RGB_DF2_M4.mp4', 'C_3_8_62_BU_SMC_10-13_16-43-09_CC_RGB_DF2_M4.mp4', 'C_3_8_62_BU_SMC_10-13_16-43-09_CD_RGB_DF2_M4.mp4', 'C_3_8_62_BU_SMC_10-13_16-43-09_CE_RGB_DF2_M4.mp4', 'C_3_8_63_BU_DYB_10-17_13-29-39_CA_RGB_DF2_F3.mp4', 'C_3_8_63_BU_DYB_10-17_13-29-39_CB_RGB_DF2_F3.mp4']\n", + "VS_03.이상행동_08.파손 feature extracting ended. Elapsed time: 0:01:30\n", + "VS_03.이상행동_09.방화 feature extracting starts\n", + "==>> file_list: ['C_3_9_34_BU_SYB_10-04_12-44-35_CC_RGB_DF2_F3.mp4', 'C_3_9_34_BU_SYB_10-04_12-44-35_CD_RGB_DF2_F3.mp4', 'C_3_9_35_BU_DYA_08-02_16-49-33_CA_RGB_DF2_F2.mp4', 'C_3_9_35_BU_DYA_08-02_16-49-34_CB_RGB_DF2_F2.mp4', 'C_3_9_35_BU_DYA_08-02_16-49-35_CC_RGB_DF2_F2.mp4', 'C_3_9_35_BU_SMA_09-27_14-10-34_CA_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SMA_09-27_14-10-34_CB_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SMA_09-27_14-10-34_CC_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SMA_09-27_14-10-34_CD_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SMB_09-02_16-06-31_CA_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SMB_09-02_16-06-31_CB_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SMB_09-02_16-06-31_CC_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SMB_09-02_16-06-31_CD_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SYA_10-06_13-15-10_CA_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SYA_10-06_13-15-10_CB_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SYA_10-06_13-15-10_CC_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SYA_10-06_13-15-10_CD_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SYB_10-04_12-46-25_CB_RGB_DF2_F3.mp4', 'C_3_9_35_BU_SYB_10-04_12-46-25_CD_RGB_DF2_F3.mp4', 'C_3_9_36_BU_DYA_08-02_16-51-36_CA_RGB_DF2_F2.mp4', 'C_3_9_36_BU_DYA_08-02_16-51-37_CB_RGB_DF2_F2.mp4', 'C_3_9_36_BU_DYA_08-02_16-51-37_CC_RGB_DF2_F2.mp4', 'C_3_9_36_BU_SMA_09-27_14-12-28_CA_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SMA_09-27_14-12-28_CB_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SMA_09-27_14-12-28_CC_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SMA_09-27_14-12-28_CD_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SMB_09-02_10-43-45_CA_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SMB_09-02_10-43-45_CB_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SMB_09-02_10-43-45_CC_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SMB_09-02_10-43-45_CD_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SYA_10-06_13-16-51_CA_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SYA_10-06_13-16-51_CB_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SYA_10-06_13-16-51_CC_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SYA_10-06_13-16-51_CD_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SYB_10-04_12-48-32_CA_RGB_DF2_F3.mp4', 'C_3_9_36_BU_SYB_10-04_12-48-32_CB_RGB_DF2_F3.mp4', 'C_3_9_37_BU_DYA_08-02_16-32-26_CB_RGB_DF2_F2.mp4', 'C_3_9_37_BU_DYA_08-02_16-32-27_CC_RGB_DF2_F2.mp4', 'C_3_9_37_BU_DYA_08-02_16-49-35_CA_RGB_DF2_F2.mp4', 'C_3_9_37_BU_SMA_09-27_14-12-28_CA_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SMA_09-27_14-12-28_CB_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SMA_09-27_14-12-28_CC_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SMA_09-27_14-12-28_CD_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SMB_09-02_16-17-08_CA_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SMB_09-02_16-17-08_CB_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SMB_09-02_16-17-08_CC_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SMB_09-02_16-17-08_CD_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SYA_10-06_13-18-41_CA_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SYA_10-06_13-18-41_CB_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SYA_10-06_13-18-41_CC_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SYA_10-06_13-18-41_CD_RGB_DF2_F3.mp4', 'C_3_9_37_BU_SYB_10-04_12-50-14_CB_RGB_DF2_F3.mp4', 'C_3_9_38_BU_DYA_08-02_16-40-27_CA_RGB_DF2_F2.mp4', 'C_3_9_38_BU_DYA_08-02_16-40-28_CB_RGB_DF2_F2.mp4', 'C_3_9_38_BU_DYA_08-02_16-40-28_CC_RGB_DF2_F2.mp4', 'C_3_9_38_BU_SMA_09-27_14-16-00_CA_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SMA_09-27_14-16-00_CB_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SMA_09-27_14-16-00_CC_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SMA_09-27_14-16-00_CD_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SMB_09-02_16-19-06_CA_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SMB_09-02_16-19-06_CB_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SMB_09-02_16-19-06_CC_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SMB_09-02_16-19-06_CD_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SYA_10-06_13-20-24_CA_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SYA_10-06_13-20-24_CB_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SYA_10-06_13-20-24_CC_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SYA_10-06_13-20-24_CD_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SYB_10-04_12-52-11_CA_RGB_DF2_F3.mp4', 'C_3_9_38_BU_SYB_10-04_12-52-11_CB_RGB_DF2_F3.mp4', 'C_3_9_39_BU_DYA_08-02_16-40-27_CA_RGB_DF2_F2.mp4', 'C_3_9_39_BU_DYA_08-02_16-43-11_CB_RGB_DF2_F2.mp4', 'C_3_9_39_BU_DYA_08-02_16-43-11_CC_RGB_DF2_F2.mp4', 'C_3_9_39_BU_SMA_09-27_14-17-52_CA_RGB_DF2_F3.mp4', 'C_3_9_39_BU_SMA_09-27_14-17-52_CB_RGB_DF2_F3.mp4', 'C_3_9_39_BU_SMA_09-27_14-17-52_CC_RGB_DF2_F3.mp4', 'C_3_9_39_BU_SMA_09-27_14-17-52_CD_RGB_DF2_F3.mp4', 'C_3_9_39_BU_SMB_09-02_16-21-15_CA_RGB_DF2_F3.mp4', 'C_3_9_39_BU_SMB_09-02_16-21-15_CB_RGB_DF2_F3.mp4', 'C_3_9_39_BU_SMB_09-02_16-21-15_CC_RGB_DF2_F3.mp4', 'C_3_9_39_BU_SMB_09-02_16-21-15_CD_RGB_DF2_F3.mp4', 'C_3_9_39_BU_SYA_10-06_13-26-50_CA_RGB_DF2_F3.mp4', 'C_3_9_39_BU_SYA_10-06_13-26-50_CB_RGB_DF2_F3.mp4']\n", + "VS_03.이상행동_09.방화 feature extracting ended. Elapsed time: 0:01:32\n", + "VS_03.이상행동_10.흡연 feature extracting starts\n", + "==>> file_list: ['C_3_10_33_BU_SYB_10-04_13-56-16_CD_RGB_DF2_F3.mp4', 'C_3_10_34_BU_DYA_08-04_14-52-39_CA_RGB_DF2_F2.mp4', 'C_3_10_34_BU_DYA_08-04_14-52-40_CC_RGB_DF2_F2.mp4', 'C_3_10_34_BU_DYA_08-04_14-52-41_CB_RGB_DF2_F2.mp4', 'C_3_10_34_BU_SMA_09-27_14-59-25_CA_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SMA_09-27_14-59-25_CB_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SMA_09-27_14-59-25_CC_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SMA_09-27_14-59-25_CD_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SMB_09-02_16-58-39_CA_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SMB_09-02_16-58-39_CB_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SMB_09-02_16-58-39_CC_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SMB_09-02_16-58-39_CD_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SYA_10-06_13-40-39_CA_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SYA_10-06_13-40-39_CB_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SYB_10-04_13-59-24_CA_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SYB_10-04_13-59-24_CB_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SYB_10-04_13-59-24_CC_RGB_DF2_F3.mp4', 'C_3_10_34_BU_SYB_10-04_13-59-24_CD_RGB_DF2_F3.mp4', 'C_3_10_35_BU_DYA_08-04_14-54-45_CA_RGB_DF2_F2.mp4', 'C_3_10_35_BU_DYA_08-04_14-54-47_CB_RGB_DF2_F2.mp4', 'C_3_10_35_BU_DYA_08-04_14-54-47_CC_RGB_DF2_F2.mp4', 'C_3_10_35_BU_SMA_09-27_15-00-58_CA_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SMA_09-27_15-00-58_CB_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SMA_09-27_15-00-58_CC_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SMA_09-27_15-00-58_CD_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SMB_09-02_17-00-45_CA_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SMB_09-02_17-00-45_CB_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SMB_09-02_17-00-45_CC_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SMB_09-02_17-00-45_CD_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SYA_10-06_13-42-26_CA_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SYA_10-06_13-42-26_CB_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SYA_10-06_13-42-26_CD_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SYB_10-04_14-01-10_CA_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SYB_10-04_14-01-10_CB_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SYB_10-04_14-01-10_CC_RGB_DF2_F3.mp4', 'C_3_10_35_BU_SYB_10-04_14-01-10_CD_RGB_DF2_F3.mp4', 'C_3_10_36_BU_DYA_08-04_14-56-41_CA_RGB_DF2_F2.mp4', 'C_3_10_36_BU_DYA_08-04_14-56-43_CB_RGB_DF2_F2.mp4', 'C_3_10_36_BU_DYA_08-04_14-56-43_CC_RGB_DF2_F2.mp4', 'C_3_10_36_BU_SMA_09-27_15-03-01_CA_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SMA_09-27_15-03-01_CB_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SMA_09-27_15-03-01_CC_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SMA_09-27_15-03-01_CD_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SMB_09-02_17-04-01_CA_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SMB_09-02_17-04-01_CB_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SMB_09-02_17-04-01_CC_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SMB_09-02_17-04-01_CD_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SYA_10-06_13-44-23_CA_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SYA_10-06_13-44-23_CB_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SYB_10-04_14-04-23_CA_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SYB_10-04_14-04-23_CB_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SYB_10-04_14-04-23_CC_RGB_DF2_F3.mp4', 'C_3_10_36_BU_SYB_10-04_14-04-23_CD_RGB_DF2_F3.mp4', 'C_3_10_37_BU_DYA_08-04_14-40-17_CA_RGB_DF2_F2.mp4', 'C_3_10_37_BU_DYA_08-04_14-40-18_CB_RGB_DF2_F2.mp4', 'C_3_10_37_BU_DYA_08-04_14-40-18_CC_RGB_DF2_F2.mp4', 'C_3_10_37_BU_SMA_09-27_15-04-41_CB_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SMA_09-27_15-04-41_CC_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SMB_09-02_17-05-41_CA_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SMB_09-02_17-05-41_CB_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SMB_09-02_17-05-41_CC_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SMB_09-02_17-05-41_CD_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SYA_10-06_13-45-43_CA_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SYA_10-06_13-45-43_CC_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SYA_10-06_13-45-43_CD_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SYB_10-04_14-02-41_CA_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SYB_10-04_14-02-41_CB_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SYB_10-04_14-02-41_CC_RGB_DF2_F3.mp4', 'C_3_10_37_BU_SYB_10-04_14-02-41_CD_RGB_DF2_F3.mp4', 'C_3_10_38_BU_DYA_08-04_14-46-22_CA_RGB_DF2_F2.mp4', 'C_3_10_38_BU_DYA_08-04_14-46-24_CB_RGB_DF2_F2.mp4', 'C_3_10_38_BU_DYA_08-04_14-46-24_CC_RGB_DF2_F2.mp4', 'C_3_10_38_BU_SMA_09-27_14-35-50_CA_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SMA_09-27_14-35-50_CB_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SMA_09-27_14-35-50_CC_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SMA_09-27_14-35-50_CD_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SMB_09-02_17-07-45_CA_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SMB_09-02_17-07-45_CB_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SYA_10-06_13-56-00_CA_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SYA_10-06_13-56-00_CB_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SYB_10-04_13-33-33_CA_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SYB_10-04_13-33-33_CB_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SYB_10-04_13-33-33_CC_RGB_DF2_F3.mp4', 'C_3_10_38_BU_SYB_10-04_13-33-33_CD_RGB_DF2_F3.mp4', 'C_3_10_39_BU_DYA_08-04_14-48-32_CA_RGB_DF2_F2.mp4', 'C_3_10_39_BU_DYA_08-04_14-48-34_CB_RGB_DF2_F2.mp4']\n", + "VS_03.이상행동_10.흡연 feature extracting ended. Elapsed time: 0:01:36\n", + "VS_03.이상행동_11.유기 feature extracting starts\n", + "==>> file_list: ['C_3_11_34_BU_DYA_08-10_16-33-05_CA_RGB_DF2_M2_F2.mp4', 'C_3_11_34_BU_DYA_08-10_16-33-10_CB_RGB_DF2_M2_F2.mp4', 'C_3_11_34_BU_DYA_08-10_16-33-10_CC_RGB_DF2_M2_F2.mp4', 'C_3_11_34_BU_SMA_09-05_15-28-52_CA_RGB_DF2_M4_F4.mp4', 'C_3_11_34_BU_SMA_09-05_15-28-52_CB_RGB_DF2_M4_F4.mp4', 'C_3_11_34_BU_SMA_09-05_15-28-55_CC_RGB_DF2_M4_F4.mp4', 'C_3_11_34_BU_SMA_09-05_15-28-55_CD_RGB_DF2_M4_F4.mp4', 'C_3_11_34_BU_SMB_09-05_13-36-37_CA_RGB_DF2_M4_F4.mp4', 'C_3_11_34_BU_SMB_09-05_13-36-37_CC_RGB_DF2_M4_F4.mp4', 'C_3_11_34_BU_SMB_09-05_13-36-40_CB_RGB_DF2_M4_F4.mp4', 'C_3_11_34_BU_SMB_09-05_13-36-40_CD_RGB_DF2_M4_F4.mp4', 'C_3_11_34_BU_SMC_10-16_10-56-45_CA_RGB_DF2_M1_F1.mp4', 'C_3_11_34_BU_SMC_10-16_10-56-45_CB_RGB_DF2_M1_F1.mp4', 'C_3_11_34_BU_SMC_10-16_10-56-45_CD_RGB_DF2_M1_F1.mp4', 'C_3_11_35_BU_DYA_07-29_11-49-41_CD_RGB_DF2_M2.mp4', 'C_3_11_35_BU_DYA_07-29_11-49-41_CE_RGB_DF2_M2.mp4', 'C_3_11_35_BU_DYA_07-29_11-49-41_CF_RGB_DF2_M2.mp4', 'C_3_11_35_BU_DYA_08-10_16-35-44_CA_RGB_DF2_M2_F2.mp4', 'C_3_11_35_BU_DYA_08-10_16-35-49_CB_RGB_DF2_M2_F2.mp4', 'C_3_11_35_BU_DYA_08-10_16-35-49_CC_RGB_DF2_M2_F2.mp4', 'C_3_11_35_BU_SMA_09-05_15-30-21_CA_RGB_DF2_M4_F4.mp4', 'C_3_11_35_BU_SMA_09-05_15-30-21_CB_RGB_DF2_M4_F4.mp4', 'C_3_11_35_BU_SMA_09-05_15-30-24_CC_RGB_DF2_M4_F4.mp4', 'C_3_11_35_BU_SMA_09-05_15-30-25_CD_RGB_DF2_M4_F4.mp4', 'C_3_11_35_BU_SMB_09-05_13-38-08_CC_RGB_DF2_M4_F4.mp4', 'C_3_11_35_BU_SMB_09-05_13-38-11_CD_RGB_DF2_M4_F4.mp4', 'C_3_11_35_BU_SMC_10-14_10-20-53_CA_RGB_DF2_M2_F2.mp4', 'C_3_11_35_BU_SMC_10-14_10-20-53_CB_RGB_DF2_M2_F2.mp4', 'C_3_11_35_BU_SMC_10-14_10-20-53_CC_RGB_DF2_M2_F2.mp4', 'C_3_11_35_BU_SMC_10-14_10-20-53_CD_RGB_DF2_M2_F2.mp4', 'C_3_11_35_BU_SMC_10-14_10-20-53_CE_RGB_DF2_M2_F2.mp4', 'C_3_11_36_BU_DYA_07-29_11-52-11_CD_RGB_DF2_M2.mp4', 'C_3_11_36_BU_DYA_07-29_11-52-11_CE_RGB_DF2_M2.mp4', 'C_3_11_36_BU_DYA_07-29_11-52-11_CF_RGB_DF2_M2.mp4', 'C_3_11_36_BU_DYA_08-10_16-42-21_CA_RGB_DF2_M2_F2.mp4', 'C_3_11_36_BU_DYA_08-10_16-42-26_CB_RGB_DF2_M2_F2.mp4', 'C_3_11_36_BU_DYA_08-10_16-42-26_CC_RGB_DF2_M2_F2.mp4', 'C_3_11_36_BU_SMA_09-05_15-33-23_CA_RGB_DF2_M4_F4.mp4', 'C_3_11_36_BU_SMA_09-05_15-33-23_CB_RGB_DF2_M4_F4.mp4', 'C_3_11_36_BU_SMA_09-05_15-33-26_CC_RGB_DF2_M4_F4.mp4', 'C_3_11_36_BU_SMA_09-05_15-33-27_CD_RGB_DF2_M4_F4.mp4', 'C_3_11_36_BU_SMB_09-05_13-42-40_CA_RGB_DF2_M4_F4.mp4', 'C_3_11_36_BU_SMB_09-05_13-42-43_CB_RGB_DF2_M4_F4.mp4', 'C_3_11_36_BU_SMC_10-14_10-24-36_CA_RGB_DF2_M2_F2.mp4', 'C_3_11_36_BU_SMC_10-14_10-24-36_CB_RGB_DF2_M2_F2.mp4', 'C_3_11_36_BU_SMC_10-14_10-24-36_CC_RGB_DF2_M2_F2.mp4', 'C_3_11_36_BU_SMC_10-14_10-24-36_CD_RGB_DF2_M2_F2.mp4', 'C_3_11_36_BU_SMC_10-14_10-24-36_CE_RGB_DF2_M2_F2.mp4', 'C_3_11_37_BU_DYA_07-29_11-53-57_CD_RGB_DF2_M2.mp4', 'C_3_11_37_BU_DYA_07-29_11-53-57_CE_RGB_DF2_M2.mp4', 'C_3_11_37_BU_DYA_07-29_11-53-57_CF_RGB_DF2_M2.mp4', 'C_3_11_37_BU_DYB_10-16_14-39-23_CB_RGB_DF2_F1_M1.mp4', 'C_3_11_37_BU_DYB_10-16_14-39-23_CD_RGB_DF2_F1_M1.mp4', 'C_3_11_37_BU_SMC_10-14_11-19-25_CA_RGB_DF2_M2_F2.mp4', 'C_3_11_37_BU_SMC_10-14_11-19-25_CB_RGB_DF2_M2_F2.mp4', 'C_3_11_37_BU_SMC_10-14_11-19-25_CC_RGB_DF2_M2_F2.mp4', 'C_3_11_37_BU_SMC_10-14_11-19-25_CD_RGB_DF2_M2_F2.mp4', 'C_3_11_37_BU_SMC_10-14_11-19-25_CE_RGB_DF2_M2_F2.mp4', 'C_3_11_38_BU_DYA_07-29_15-53-58_CD_RGB_DF2_F2.mp4', 'C_3_11_38_BU_DYA_07-29_15-53-58_CE_RGB_DF2_F2.mp4', 'C_3_11_38_BU_DYA_07-29_15-53-58_CF_RGB_DF2_F2.mp4', 'C_3_11_38_BU_DYB_10-16_14-41-46_CA_RGB_DF2_M1_F1.mp4', 'C_3_11_38_BU_DYB_10-16_14-41-46_CB_RGB_DF2_M1_F1.mp4', 'C_3_11_38_BU_DYB_10-16_14-41-46_CC_RGB_DF2_M1_F1.mp4', 'C_3_11_38_BU_DYB_10-16_14-41-46_CD_RGB_DF2_M1_F1.mp4', 'C_3_11_38_BU_DYB_10-16_14-41-46_CE_RGB_DF2_M1_F1.mp4', 'C_3_11_38_BU_SMC_10-14_15-22-40_CA_RGB_DF2_F3_M3.mp4', 'C_3_11_38_BU_SMC_10-14_15-22-40_CB_RGB_DF2_F3_M3.mp4', 'C_3_11_38_BU_SMC_10-14_15-22-40_CC_RGB_DF2_F3_M3.mp4', 'C_3_11_38_BU_SMC_10-14_15-22-40_CD_RGB_DF2_F3_M3.mp4', 'C_3_11_38_BU_SMC_10-14_15-22-40_CE_RGB_DF2_F3_M3.mp4', 'C_3_11_39_BU_DYA_07-29_15-56-22_CD_RGB_DF2_F2.mp4', 'C_3_11_39_BU_DYA_07-29_15-56-22_CE_RGB_DF2_F2.mp4', 'C_3_11_39_BU_DYA_07-29_15-56-22_CF_RGB_DF2_F2.mp4', 'C_3_11_39_BU_DYB_10-16_14-43-25_CD_RGB_DF2_F1_M1.mp4', 'C_3_11_39_BU_DYB_10-16_14-43-25_CE_RGB_DF2_F1_M1.mp4', 'C_3_11_39_BU_SMC_10-14_15-26-36_CA_RGB_DF2_F3_M3.mp4', 'C_3_11_39_BU_SMC_10-14_15-26-36_CB_RGB_DF2_F3_M3.mp4', 'C_3_11_39_BU_SMC_10-14_15-26-36_CC_RGB_DF2_F3_M3.mp4', 'C_3_11_39_BU_SMC_10-14_15-26-36_CD_RGB_DF2_F3_M3.mp4']\n", + "VS_03.이상행동_11.유기 feature extracting ended. Elapsed time: 0:01:46\n", + "VS_03.이상행동_12.절도 feature extracting starts\n", + "==>> file_list: ['C_3_12_40_BU_SMC_10-14_11-43-44_CD_RGB_DF2_M2.mp4', 'C_3_12_40_BU_SMC_10-14_11-43-44_CE_RGB_DF2_M2.mp4', 'C_3_12_41_BU_DYA_07-29_15-20-29_CD_RGB_DF2_F2.mp4', 'C_3_12_41_BU_DYA_07-29_15-20-29_CE_RGB_DF2_F2.mp4', 'C_3_12_41_BU_DYA_07-29_15-20-29_CF_RGB_DF2_F2.mp4', 'C_3_12_41_BU_DYB_10-16_14-50-58_CA_RGB_DF2_M1.mp4', 'C_3_12_41_BU_DYB_10-16_14-50-58_CB_RGB_DF2_M1.mp4', 'C_3_12_41_BU_DYB_10-16_14-50-58_CD_RGB_DF2_M1.mp4', 'C_3_12_41_BU_SMC_10-14_11-45-31_CA_RGB_DF2_M2.mp4', 'C_3_12_41_BU_SMC_10-14_11-45-31_CB_RGB_DF2_M2.mp4', 'C_3_12_41_BU_SMC_10-14_11-45-31_CC_RGB_DF2_M2.mp4', 'C_3_12_41_BU_SMC_10-14_11-45-31_CD_RGB_DF2_M2.mp4', 'C_3_12_41_BU_SMC_10-14_11-45-31_CE_RGB_DF2_M2.mp4', 'C_3_12_42_BU_DYA_07-29_15-23-55_CD_RGB_DF2_F2.mp4', 'C_3_12_42_BU_DYA_07-29_15-23-55_CE_RGB_DF2_F2.mp4', 'C_3_12_42_BU_DYA_07-29_15-23-55_CF_RGB_DF2_F2.mp4', 'C_3_12_42_BU_SMC_10-14_12-15-24_CA_RGB_DF2_F2.mp4', 'C_3_12_42_BU_SMC_10-14_12-15-24_CB_RGB_DF2_F2.mp4', 'C_3_12_42_BU_SMC_10-14_12-15-24_CC_RGB_DF2_F2.mp4', 'C_3_12_42_BU_SMC_10-14_12-15-24_CD_RGB_DF2_F2.mp4', 'C_3_12_42_BU_SMC_10-14_12-15-24_CE_RGB_DF2_F2.mp4', 'C_3_12_43_BU_DYB_10-16_14-55-05_CA_RGB_DF2_F1.mp4', 'C_3_12_43_BU_DYB_10-16_14-55-05_CB_RGB_DF2_F1.mp4', 'C_3_12_43_BU_DYB_10-16_14-55-05_CC_RGB_DF2_F1.mp4', 'C_3_12_43_BU_DYB_10-16_14-55-05_CD_RGB_DF2_F1.mp4', 'C_3_12_43_BU_DYB_10-16_14-55-05_CE_RGB_DF2_F1.mp4', 'C_3_12_43_BU_SMC_10-14_12-17-14_CC_RGB_DF2_F2.mp4', 'C_3_12_44_BU_DYB_10-16_14-56-44_CA_RGB_DF2_F1.mp4', 'C_3_12_44_BU_DYB_10-16_14-56-44_CB_RGB_DF2_F1.mp4', 'C_3_12_44_BU_DYB_10-16_14-56-44_CC_RGB_DF2_F1.mp4', 'C_3_12_44_BU_DYB_10-16_14-56-44_CD_RGB_DF2_F1.mp4', 'C_3_12_44_BU_DYB_10-16_14-56-44_CE_RGB_DF2_F1.mp4', 'C_3_12_44_BU_SMC_10-14_12-19-55_CA_RGB_DF2_F2.mp4', 'C_3_12_44_BU_SMC_10-14_12-19-55_CB_RGB_DF2_F2.mp4', 'C_3_12_44_BU_SMC_10-14_12-19-55_CC_RGB_DF2_F2.mp4', 'C_3_12_44_BU_SMC_10-14_12-19-55_CD_RGB_DF2_F2.mp4', 'C_3_12_44_BU_SMC_10-14_12-19-55_CE_RGB_DF2_F2.mp4', 'C_3_12_45_BU_DYB_10-16_15-02-02_CA_RGB_DF2_F1.mp4', 'C_3_12_45_BU_DYB_10-16_15-02-02_CB_RGB_DF2_F1.mp4', 'C_3_12_45_BU_DYB_10-16_15-02-02_CD_RGB_DF2_F1.mp4', 'C_3_12_45_BU_DYB_10-16_15-02-02_CE_RGB_DF2_F1.mp4', 'C_3_12_45_BU_SMC_10-14_16-07-18_CA_RGB_DF2_M3.mp4', 'C_3_12_45_BU_SMC_10-14_16-07-18_CB_RGB_DF2_M3.mp4', 'C_3_12_45_BU_SMC_10-14_16-07-18_CC_RGB_DF2_M3.mp4', 'C_3_12_45_BU_SMC_10-14_16-07-18_CD_RGB_DF2_M3.mp4', 'C_3_12_45_BU_SMC_10-14_16-07-18_CE_RGB_DF2_M3.mp4', 'C_3_12_46_BU_SMC_10-14_16-09-57_CB_RGB_DF2_M3.mp4', 'C_3_12_46_BU_SMC_10-14_16-09-57_CC_RGB_DF2_M3.mp4', 'C_3_12_46_BU_SMC_10-14_16-09-57_CE_RGB_DF2_M3.mp4', 'C_3_12_47_BU_DYB_10-17_13-44-21_CA_RGB_DF2_M3.mp4', 'C_3_12_47_BU_DYB_10-17_13-44-21_CB_RGB_DF2_M3.mp4', 'C_3_12_47_BU_DYB_10-17_13-44-21_CC_RGB_DF2_M3.mp4', 'C_3_12_47_BU_DYB_10-17_13-44-21_CD_RGB_DF2_M3.mp4', 'C_3_12_47_BU_DYB_10-17_13-44-21_CE_RGB_DF2_M3.mp4', 'C_3_12_47_BU_SMC_10-14_16-15-55_CA_RGB_DF2_M3.mp4', 'C_3_12_47_BU_SMC_10-14_16-15-55_CB_RGB_DF2_M3.mp4', 'C_3_12_47_BU_SMC_10-14_16-15-55_CC_RGB_DF2_M3.mp4', 'C_3_12_47_BU_SMC_10-14_16-15-55_CD_RGB_DF2_M3.mp4', 'C_3_12_47_BU_SMC_10-14_16-15-55_CE_RGB_DF2_M3.mp4', 'C_3_12_48_BU_DYA_07-29_14-11-06_CD_RGB_DF2_F2.mp4', 'C_3_12_48_BU_DYA_07-29_14-11-06_CE_RGB_DF2_F2.mp4', 'C_3_12_48_BU_DYA_07-29_14-11-06_CF_RGB_DF2_F2.mp4', 'C_3_12_48_BU_DYB_10-17_13-46-19_CA_RGB_DF2_M3.mp4', 'C_3_12_48_BU_DYB_10-17_13-46-19_CB_RGB_DF2_M3.mp4', 'C_3_12_48_BU_DYB_10-17_13-46-19_CC_RGB_DF2_M3.mp4', 'C_3_12_48_BU_DYB_10-17_13-46-19_CD_RGB_DF2_M3.mp4', 'C_3_12_48_BU_DYB_10-17_13-46-19_CE_RGB_DF2_M3.mp4', 'C_3_12_48_BU_SMC_10-14_13-46-19_CA_RGB_DF2_F3.mp4', 'C_3_12_48_BU_SMC_10-14_13-46-19_CB_RGB_DF2_F3.mp4', 'C_3_12_48_BU_SMC_10-14_13-46-19_CC_RGB_DF2_F3.mp4', 'C_3_12_48_BU_SMC_10-14_13-46-19_CD_RGB_DF2_F3.mp4', 'C_3_12_48_BU_SMC_10-14_13-46-19_CE_RGB_DF2_F3.mp4', 'C_3_12_49_BU_DYA_07-29_14-13-29_CD_RGB_DF2_F3.mp4', 'C_3_12_49_BU_DYA_07-29_14-13-29_CE_RGB_DF2_F3.mp4', 'C_3_12_49_BU_DYA_07-29_14-13-29_CF_RGB_DF2_F3.mp4', 'C_3_12_49_BU_DYB_10-17_13-47-59_CA_RGB_DF2_M3.mp4', 'C_3_12_49_BU_DYB_10-17_13-47-59_CB_RGB_DF2_M3.mp4', 'C_3_12_49_BU_DYB_10-17_13-47-59_CC_RGB_DF2_M3.mp4', 'C_3_12_49_BU_DYB_10-17_13-47-59_CD_RGB_DF2_M3.mp4', 'C_3_12_49_BU_DYB_10-17_13-47-59_CE_RGB_DF2_M3.mp4']\n", + "VS_03.이상행동_12.절도 feature extracting ended. Elapsed time: 0:01:48\n", + "VS_03.이상행동_13.폭행 feature extracting starts\n", + "==>> file_list: ['C_3_13_31_BU_SMA_09-05_15-48-20_CD_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SMB_09-05_14-08-01_CA_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SMB_09-05_14-08-01_CC_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SMB_09-05_14-08-04_CB_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SMB_09-05_14-08-04_CD_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SYA_09-14_14-05-04_CA_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SYA_09-14_14-05-04_CB_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SYA_09-14_14-05-07_CC_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SYA_09-14_14-05-07_CD_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SYB_09-14_15-31-44_CA_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SYB_09-14_15-31-44_CB_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SYB_09-14_15-31-44_CC_RGB_DF2_F4.mp4', 'C_3_13_31_BU_SYB_09-14_15-31-44_CD_RGB_DF2_F4.mp4', 'C_3_13_32_BU_DYA_08-23_17-10-37_CD_RGB_DF2_F2_M2.mp4', 'C_3_13_32_BU_DYA_08-23_17-10-37_CE_RGB_DF2_F2_M2.mp4', 'C_3_13_32_BU_SMA_09-05_15-51-19_CA_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SMA_09-05_15-51-20_CB_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SMA_09-05_15-51-22_CC_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SMA_09-05_15-51-23_CD_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SMB_09-05_14-10-31_CA_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SMB_09-05_14-10-31_CC_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SMB_09-05_14-10-34_CB_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SMB_09-05_14-10-34_CD_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SYA_09-14_14-06-33_CA_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SYA_09-14_14-06-33_CB_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SYA_09-14_14-06-36_CC_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SYA_09-14_14-06-36_CD_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SYB_09-14_15-33-04_CA_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SYB_09-14_15-33-04_CB_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SYB_09-14_15-33-04_CC_RGB_DF2_F4.mp4', 'C_3_13_32_BU_SYB_09-14_15-33-04_CD_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SMA_09-05_15-53-00_CA_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SMA_09-05_15-53-00_CB_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SMA_09-05_15-53-03_CC_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SMA_09-05_15-53-03_CD_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SMB_09-05_14-12-29_CA_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SMB_09-05_14-12-29_CC_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SMB_09-05_14-12-32_CB_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SMB_09-05_14-12-32_CD_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SYA_09-14_14-08-41_CA_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SYA_09-14_14-08-41_CB_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SYA_09-14_14-08-44_CC_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SYA_09-14_14-08-44_CD_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SYB_09-14_15-37-07_CB_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SYB_09-14_15-37-07_CC_RGB_DF2_F4.mp4', 'C_3_13_33_BU_SYB_09-14_15-37-07_CD_RGB_DF2_F4.mp4', 'C_3_13_34_BU_SMA_09-05_15-56-55_CA_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SMA_09-05_15-56-56_CB_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SMA_09-05_15-56-59_CC_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SMA_09-05_15-56-59_CD_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SMB_09-05_14-18-04_CA_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SMB_09-05_14-18-04_CC_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SMB_09-05_14-18-07_CD_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SMB_09-05_14-18-08_CB_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SYB_09-14_15-53-50_CA_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SYB_09-14_15-53-50_CB_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SYB_09-14_15-53-50_CC_RGB_DF2_M4.mp4', 'C_3_13_34_BU_SYB_09-14_15-53-50_CD_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SMA_09-05_15-58-26_CA_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SMA_09-05_15-58-26_CB_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SMA_09-05_15-58-29_CC_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SMA_09-05_15-58-29_CD_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SMB_09-05_14-19-43_CA_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SMB_09-05_14-19-43_CC_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SMB_09-05_14-19-46_CD_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SMB_09-05_14-19-47_CB_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SYB_09-14_15-58-55_CA_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SYB_09-14_15-58-55_CB_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SYB_09-14_15-58-55_CC_RGB_DF2_M4.mp4', 'C_3_13_35_BU_SYB_09-14_15-58-55_CD_RGB_DF2_M4.mp4', 'C_3_13_36_BU_SMA_09-05_16-00-06_CA_RGB_DF2_M4.mp4', 'C_3_13_36_BU_SMA_09-05_16-00-07_CB_RGB_DF2_M4.mp4', 'C_3_13_36_BU_SMA_09-05_16-00-09_CC_RGB_DF2_M4.mp4', 'C_3_13_36_BU_SMA_09-05_16-00-10_CD_RGB_DF2_M4.mp4', 'C_3_13_36_BU_SMB_09-05_14-21-13_CC_RGB_DF2_M4.mp4', 'C_3_13_36_BU_SMB_09-05_14-21-14_CA_RGB_DF2_M4.mp4', 'C_3_13_36_BU_SMB_09-05_14-21-16_CD_RGB_DF2_M4.mp4', 'C_3_13_36_BU_SMB_09-05_14-21-17_CB_RGB_DF2_M4.mp4', 'C_3_13_36_BU_SYB_09-14_16-02-20_CA_RGB_DF2_M4.mp4', 'C_3_13_36_BU_SYB_09-14_16-02-20_CB_RGB_DF2_M4.mp4']\n", + "VS_03.이상행동_13.폭행 feature extracting ended. Elapsed time: 0:01:47\n", + "VS_03.이상행동_14.교통약자 feature extracting starts\n", + "==>> file_list: ['C_3_14_65_BU_DYA_07-19_12-30-50_d_DF6_F4.mp4', 'C_3_14_65_BU_DYA_07-19_12-30-50_e_DF6_F4.mp4', 'C_3_14_65_BU_DYA_07-19_12-30-50_f_DF6_F4.mp4', 'C_3_14_65_BU_DYB_10-13_12-03-56_CA_DF2_F4.mp4', 'C_3_14_65_BU_DYB_10-13_12-03-56_CB_DF2_F4.mp4', 'C_3_14_65_BU_DYB_10-13_12-03-56_CC_DF2_F4.mp4', 'C_3_14_65_BU_DYB_10-13_12-03-56_CD_DF2_F4.mp4', 'C_3_14_65_BU_DYB_10-13_12-03-56_CE_DF2_F4.mp4', 'C_3_14_65_BU_DYB_10-13_12-03-56_CF_DF2_F4.mp4', 'C_3_14_66_BU_DYA_07-19_11-46-00_a_DF6_F4.mp4', 'C_3_14_66_BU_DYA_07-19_11-46-00_b_DF6_F4.mp4', 'C_3_14_66_BU_DYA_07-19_11-46-00_c_DF6_F4.mp4', 'C_3_14_66_BU_DYA_07-19_11-46-00_d_DF6_F4.mp4', 'C_3_14_66_BU_DYA_07-19_11-46-00_e_DF6_F4.mp4', 'C_3_14_66_BU_DYA_07-19_11-46-00_f_DF6_F4.mp4', 'C_3_14_66_BU_DYB_10-13_12-06-02_CA_DF2_F4.mp4', 'C_3_14_66_BU_DYB_10-13_12-06-02_CB_DF2_F4.mp4', 'C_3_14_66_BU_DYB_10-13_12-06-02_CC_DF2_F4.mp4', 'C_3_14_66_BU_DYB_10-13_12-06-02_CD_DF2_F4.mp4', 'C_3_14_66_BU_DYB_10-13_12-06-02_CE_DF2_F4.mp4', 'C_3_14_66_BU_DYB_10-13_12-06-02_CF_DF2_F4.mp4', 'C_3_14_67_BU_DYA_07-19_11-54-57_a_DF6_F4.mp4', 'C_3_14_67_BU_DYA_07-19_11-54-57_b_DF6_F4.mp4', 'C_3_14_67_BU_DYA_07-19_11-54-57_c_DF6_F4.mp4', 'C_3_14_67_BU_DYA_07-19_11-54-57_d_DF6_F4.mp4', 'C_3_14_67_BU_DYA_07-19_11-54-57_e_DF6_F4.mp4', 'C_3_14_67_BU_DYA_07-19_11-54-57_f_DF6_F4.mp4', 'C_3_14_67_BU_DYB_10-13_10-14-49_CA_DF2_M4.mp4', 'C_3_14_67_BU_DYB_10-13_10-14-49_CB_DF2_M4.mp4', 'C_3_14_67_BU_DYB_10-13_10-14-49_CC_DF2_M4.mp4', 'C_3_14_67_BU_DYB_10-13_10-14-49_CD_DF2_M4.mp4', 'C_3_14_67_BU_DYB_10-13_10-14-49_CE_DF2_M4.mp4', 'C_3_14_67_BU_DYB_10-13_10-14-49_CF_DF2_M4.mp4', 'C_3_14_68_BU_DYA_07-19_12-04-44_a_DF6_F4.mp4', 'C_3_14_68_BU_DYA_07-19_12-04-44_b_DF6_F4.mp4', 'C_3_14_68_BU_DYA_07-19_12-04-44_c_DF6_F4.mp4', 'C_3_14_68_BU_DYA_07-19_12-04-44_d_DF6_F4.mp4', 'C_3_14_68_BU_DYA_07-19_12-04-44_e_DF6_F4.mp4', 'C_3_14_68_BU_DYA_07-19_12-04-44_f_DF6_F4.mp4', 'C_3_14_68_BU_DYB_10-13_10-17-17_CA_DF2_M4.mp4', 'C_3_14_68_BU_DYB_10-13_10-17-17_CB_DF2_M4.mp4', 'C_3_14_68_BU_DYB_10-13_10-17-17_CC_DF2_M4.mp4', 'C_3_14_68_BU_DYB_10-13_10-17-17_CD_DF2_M4.mp4', 'C_3_14_68_BU_DYB_10-13_10-17-17_CE_DF2_M4.mp4', 'C_3_14_68_BU_DYB_10-13_10-17-17_CF_DF2_M4.mp4', 'C_3_14_69_BU_DYA_07-20_14-59-57_a_DF6_M4.mp4', 'C_3_14_69_BU_DYA_07-20_14-59-57_b_DF6_M4.mp4', 'C_3_14_69_BU_DYA_07-20_14-59-57_c_DF6_M4.mp4', 'C_3_14_69_BU_DYA_07-20_14-59-57_d_DF6_M4.mp4', 'C_3_14_69_BU_DYA_07-20_14-59-57_e_DF6_M4.mp4', 'C_3_14_69_BU_DYA_07-20_14-59-57_f_DF6_M4.mp4', 'C_3_14_69_BU_DYB_10-13_10-19-42_CA_DF2_M4.mp4', 'C_3_14_69_BU_DYB_10-13_10-19-42_CB_DF2_M4.mp4', 'C_3_14_69_BU_DYB_10-13_10-19-42_CC_DF2_M4.mp4', 'C_3_14_69_BU_DYB_10-13_10-19-42_CD_DF2_M4.mp4', 'C_3_14_69_BU_DYB_10-13_10-19-42_CE_DF2_M4.mp4', 'C_3_14_69_BU_DYB_10-13_10-19-42_CF_DF2_M4.mp4', 'C_3_14_70_BU_DYA_07-20_15-01-52_a_DF6_M4.mp4', 'C_3_14_70_BU_DYA_07-20_15-01-52_b_DF6_M4.mp4', 'C_3_14_70_BU_DYA_07-20_15-01-52_c_DF6_M4.mp4', 'C_3_14_70_BU_DYA_07-20_15-01-52_d_DF6_M4.mp4', 'C_3_14_70_BU_DYA_07-20_15-01-52_e_DF6_M4.mp4', 'C_3_14_70_BU_DYA_07-20_15-01-52_f_DF6_M4.mp4', 'C_3_14_70_BU_DYB_10-13_10-21-44_CA_DF2_M4.mp4', 'C_3_14_70_BU_DYB_10-13_10-21-44_CB_DF2_M4.mp4', 'C_3_14_70_BU_DYB_10-13_10-21-44_CC_DF2_M4.mp4', 'C_3_14_70_BU_DYB_10-13_10-21-44_CD_DF2_M4.mp4', 'C_3_14_70_BU_DYB_10-13_10-21-44_CE_DF2_M4.mp4', 'C_3_14_70_BU_DYB_10-13_10-21-44_CF_DF2_M4.mp4', 'C_3_14_71_BU_DYA_07-20_15-04-31_a_DF6_M4.mp4', 'C_3_14_71_BU_DYA_07-20_15-04-31_b_DF6_M4.mp4', 'C_3_14_71_BU_DYA_07-20_15-04-31_c_DF6_M4.mp4', 'C_3_14_71_BU_DYB_10-13_10-45-53_CA_DF2_M4.mp4', 'C_3_14_71_BU_DYB_10-13_10-45-53_CB_DF2_M4.mp4', 'C_3_14_71_BU_DYB_10-13_10-45-53_CC_DF2_M4.mp4', 'C_3_14_71_BU_DYB_10-13_10-45-53_CD_DF2_M4.mp4', 'C_3_14_71_BU_DYB_10-13_10-45-53_CE_DF2_M4.mp4', 'C_3_14_71_BU_DYB_10-13_10-45-53_CF_DF2_M4.mp4', 'C_3_14_72_BU_DYA_07-20_15-07-07_a_DF6_M4.mp4', 'C_3_14_72_BU_DYA_07-20_15-07-07_b_DF6_M4.mp4', 'C_3_14_72_BU_DYA_07-20_15-07-07_c_DF6_M4.mp4']\n", + "VS_03.이상행동_14.교통약자 feature extracting ended. Elapsed time: 0:01:52\n" + ] + } + ], + "source": [ + "for folder_name in folder_list:\n", + " \n", + " time_start = datetime.now()\n", + "\n", + " print(f\"{folder_name} feature extracting starts\")\n", + "\n", + " if not os.path.exists(npy_root+folder_name):\n", + " os.makedirs(npy_root+folder_name)\n", + "\n", + " folder_path = root + folder_name + \"/\"\n", + "\n", + " file_list = os.listdir(root + folder_name)\n", + " file_list.sort()\n", + " print(f\"==>> file_list: {file_list}\")\n", + "\n", + " batch_size = 16\n", + " # Loop through the video frames\n", + " for file_name in file_list:\n", + " path = folder_path + file_name\n", + "\n", + " cap = cv2.VideoCapture(path)\n", + "\n", + " # 710차원 feature array 저장할 list\n", + " np_list = []\n", + "\n", + " # 16 * segments_num 프레임씩 저장할 list\n", + " frames = []\n", + " frame_count = 0\n", + "\n", + " # input tensor 저장할 list\n", + " input_list = []\n", + " input_count = 0\n", + "\n", + " while cap.isOpened():\n", + " # Read a frame from the video\n", + " success, frame = cap.read()\n", + " # frame.shape = (height, width, 3)\n", + "\n", + " frame_count += 1 # Increment frame count\n", + "\n", + " if success:\n", + " frame = tf(image=frame)[\"image\"]\n", + " # frame.shape = (224, 224, 3)\n", + "\n", + " frame = np.expand_dims(frame, axis=0)\n", + " # frame.shape = (1, 224, 224, 3)\n", + " frames.append(frame.copy())\n", + "\n", + " if frame_count == 16 * segments_num:\n", + " assert len(frames) == 16 * segments_num\n", + " frames = np.concatenate(frames)\n", + " # in_frames.shape = (16 * segments_num, 224, 224, 3)\n", + " in_frames = frames.transpose(3, 0, 1, 2)\n", + " # # in_frames.shape = (RGB 3, frame T=16 * segments_num, H=224, W=224)\n", + " in_frames = np.expand_dims(in_frames, axis=0)\n", + " # in_frames.shape = (1, 3, 16 * segments_num, 224, 224)\n", + " in_frames = torch.from_numpy(in_frames).float()\n", + " # in_frames.shape == torch.Size([1, 3, 16 * segments_num, 224, 224])\n", + "\n", + " input_list.append(in_frames.detach().clone())\n", + "\n", + " frame_count = 0\n", + " frames = []\n", + "\n", + " input_count += 1\n", + "\n", + " if input_count == batch_size:\n", + " # input_batch.shape == torch.Size([batch_size, 3, 16 * segments_num, 224, 224])\n", + " input_batch = torch.cat(input_list, dim=0).to(\"cuda\")\n", + "\n", + " with torch.no_grad():\n", + " output = model(input_batch)\n", + " # output.shape == torch.Size([batch_size, 710])\n", + "\n", + " np_list.append(output.cpu().numpy())\n", + "\n", + " input_count = 0\n", + " input_list = []\n", + " else:\n", + " # 남은 프레임, input_list가 지정 개수에서 모자를 때 예외 처리\n", + " if frame_count != 0 and len(frames) != 0:\n", + " # @@ success가 false 일때도 frame_count는 +1이 된다\n", + " # @@ => frames = []로 초기화 된 바로 다음 frame에 success가 false가 되면\n", + " # @@ => frame_count == 1 이지만 len(frames) == 0\n", + " len_frames_left = 16 * segments_num - len(frames)\n", + " # len_input_list_left = batch_size - len(input_list)\n", + "\n", + " # assert len(frames) != 0\n", + "\n", + " for i in range(len_frames_left):\n", + " try:\n", + " frames.append(frames[-1].copy())\n", + " except IndexError:\n", + " print(f\"==>> len(frames): {len(frames)}\")\n", + " print(f\"==>> len_frames_left: {len_frames_left}\")\n", + "\n", + " assert len(frames) == 16 * segments_num\n", + "\n", + " frames = np.concatenate(frames)\n", + " # in_frames.shape = (16 * segments_num, 224, 224, 3)\n", + " in_frames = frames.transpose(3, 0, 1, 2)\n", + " # # in_frames.shape = (RGB 3, frame T=16 * segments_num, H=224, W=224)\n", + " in_frames = np.expand_dims(in_frames, axis=0)\n", + " # in_frames.shape = (1, 3, 16 * segments_num, 224, 224)\n", + " in_frames = torch.from_numpy(in_frames).float()\n", + " # in_frames.shape == torch.Size([1, 3, 16 * segments_num, 224, 224])\n", + "\n", + " input_list.append(in_frames.detach().clone())\n", + "\n", + " # assert len(input_list) == batch_size\n", + "\n", + " # input_batch.shape == torch.Size([batch_size, 3, 16 * segments_num, 224, 224])\n", + " input_batch = torch.cat(input_list, dim=0).to(\"cuda\")\n", + "\n", + " with torch.no_grad():\n", + " output = model(input_batch)\n", + " # output.shape == torch.Size([len(input_list), 710])\n", + "\n", + " np_list.append(output.cpu().numpy())\n", + "\n", + " frame_count = 0\n", + " frames = []\n", + " input_count = 0\n", + " input_list = []\n", + "\n", + " # Break the loop if the end of the video is reached\n", + " break\n", + " try:\n", + " file_outputs = np.concatenate(np_list)\n", + " # print(f\"==>> file_outputs.shape: {file_outputs.shape}\")\n", + " np.save((npy_root + folder_name + \"/\" + file_name), file_outputs)\n", + " except ValueError:\n", + " print(f\"{file_name} ValueError: need at least one array to concatenate\")\n", + "\n", + " cap.release()\n", + "\n", + " time_end = datetime.now()\n", + " total_time = time_end - time_start\n", + " total_time = str(total_time).split(\".\")[0]\n", + "\n", + " print(f\"{folder_name} feature extracting ended. Elapsed time: {total_time}\")\n", + "\n", + " # cv2.destroyAllWindows()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "segments_num = 1\n", + "# 모델에 들어갈 frame수는 16 * segments_num\n", + "\n", + "model = create_model(\n", + " # \"vit_small_patch16_224\",\n", + " \"vit_base_patch16_224\",\n", + " img_size=224,\n", + " pretrained=False,\n", + " num_classes=710,\n", + " all_frames=16 * segments_num,\n", + " # tubelet_size=args.tubelet_size,\n", + " # drop_rate=args.drop,\n", + " # drop_path_rate=args.drop_path,\n", + " # attn_drop_rate=args.attn_drop_rate,\n", + " # head_drop_rate=args.head_drop_rate,\n", + " # drop_block_rate=None,\n", + " # use_mean_pooling=args.use_mean_pooling,\n", + " # init_scale=args.init_scale,\n", + " # with_cp=args.with_checkpoint,\n", + ")\n", + "\n", + "# load_dict = torch.load(\n", + "# \"/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/vit_s_k710_dl_from_giant.pth\"\n", + "# )\n", + "load_dict = torch.load(\n", + " \"/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/vit_b_k710_dl_from_giant.pth\"\n", + ")\n", + "# backbone pth 경로\n", + "\n", + "model.load_state_dict(load_dict[\"module\"])\n", + "\n", + "model.to(\"cuda\")\n", + "model.eval()\n", + "\n", + "tf = A.Resize(224, 224)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Abuse feature extracting starts\n", + "==>> file_list: ['Abuse001_x264.mp4', 'Abuse002_x264.mp4', 'Abuse003_x264.mp4', 'Abuse004_x264.mp4', 'Abuse005_x264.mp4', 'Abuse006_x264.mp4', 'Abuse007_x264.mp4', 'Abuse008_x264.mp4', 'Abuse009_x264.mp4', 'Abuse010_x264.mp4', 'Abuse011_x264.mp4', 'Abuse012_x264.mp4', 'Abuse013_x264.mp4', 'Abuse014_x264.mp4', 'Abuse015_x264.mp4', 'Abuse016_x264.mp4', 'Abuse017_x264.mp4', 'Abuse018_x264.mp4', 'Abuse019_x264.mp4', 'Abuse020_x264.mp4', 'Abuse021_x264.mp4', 'Abuse022_x264.mp4', 'Abuse023_x264.mp4', 'Abuse024_x264.mp4', 'Abuse025_x264.mp4', 'Abuse026_x264.mp4', 'Abuse027_x264.mp4', 'Abuse028_x264.mp4', 'Abuse029_x264.mp4', 'Abuse030_x264.mp4', 'Abuse031_x264.mp4', 'Abuse032_x264.mp4', 'Abuse033_x264.mp4', 'Abuse034_x264.mp4', 'Abuse035_x264.mp4', 'Abuse036_x264.mp4', 'Abuse037_x264.mp4', 'Abuse038_x264.mp4', 'Abuse039_x264.mp4', 'Abuse040_x264.mp4', 'Abuse041_x264.mp4', 'Abuse042_x264.mp4', 'Abuse043_x264.mp4', 'Abuse044_x264.mp4', 'Abuse045_x264.mp4', 'Abuse046_x264.mp4', 'Abuse047_x264.mp4', 'Abuse048_x264.mp4', 'Abuse049_x264.mp4', 'Abuse050_x264.mp4']\n", + "Abuse feature extracting ended. Elapsed time: 0:12:04\n", + "Arrest feature extracting starts\n", + "==>> file_list: ['Arrest001_x264.mp4', 'Arrest002_x264.mp4', 'Arrest003_x264.mp4', 'Arrest004_x264.mp4', 'Arrest005_x264.mp4', 'Arrest006_x264.mp4', 'Arrest007_x264.mp4', 'Arrest008_x264.mp4', 'Arrest009_x264.mp4', 'Arrest010_x264.mp4', 'Arrest011_x264.mp4', 'Arrest012_x264.mp4', 'Arrest013_x264.mp4', 'Arrest014_x264.mp4', 'Arrest015_x264.mp4', 'Arrest016_x264.mp4', 'Arrest017_x264.mp4', 'Arrest018_x264.mp4', 'Arrest019_x264.mp4', 'Arrest020_x264.mp4', 'Arrest021_x264.mp4', 'Arrest022_x264.mp4', 'Arrest023_x264.mp4', 'Arrest024_x264.mp4', 'Arrest025_x264.mp4', 'Arrest026_x264.mp4', 'Arrest027_x264.mp4', 'Arrest028_x264.mp4', 'Arrest029_x264.mp4', 'Arrest030_x264.mp4', 'Arrest031_x264.mp4', 'Arrest032_x264.mp4', 'Arrest033_x264.mp4', 'Arrest034_x264.mp4', 'Arrest035_x264.mp4', 'Arrest036_x264.mp4', 'Arrest037_x264.mp4', 'Arrest038_x264.mp4', 'Arrest039_x264.mp4', 'Arrest040_x264.mp4', 'Arrest041_x264.mp4', 'Arrest042_x264.mp4', 'Arrest043_x264.mp4', 'Arrest044_x264.mp4', 'Arrest046_x264.mp4', 'Arrest047_x264.mp4', 'Arrest048_x264.mp4', 'Arrest049_x264.mp4', 'Arrest050_x264.mp4', 'Arrest051_x264.mp4']\n", + "Arrest feature extracting ended. Elapsed time: 0:18:42\n", + "Arson feature extracting starts\n", + "==>> file_list: ['Arson001_x264.mp4', 'Arson002_x264.mp4', 'Arson003_x264.mp4', 'Arson005_x264.mp4', 'Arson006_x264.mp4', 'Arson007_x264.mp4', 'Arson008_x264.mp4', 'Arson009_x264.mp4', 'Arson010_x264.mp4', 'Arson011_x264.mp4', 'Arson012_x264.mp4', 'Arson013_x264.mp4', 'Arson014_x264.mp4', 'Arson015_x264.mp4', 'Arson016_x264.mp4', 'Arson017_x264.mp4', 'Arson018_x264.mp4', 'Arson019_x264.mp4', 'Arson020_x264.mp4', 'Arson021_x264.mp4', 'Arson022_x264.mp4', 'Arson023_x264.mp4', 'Arson024_x264.mp4', 'Arson025_x264.mp4', 'Arson026_x264.mp4', 'Arson027_x264.mp4', 'Arson028_x264.mp4', 'Arson029_x264.mp4', 'Arson030_x264.mp4', 'Arson031_x264.mp4', 'Arson032_x264.mp4', 'Arson034_x264.mp4', 'Arson035_x264.mp4', 'Arson036_x264.mp4', 'Arson037_x264.mp4', 'Arson038_x264.mp4', 'Arson039_x264.mp4', 'Arson040_x264.mp4', 'Arson041_x264.mp4', 'Arson042_x264.mp4', 'Arson044_x264.mp4', 'Arson045_x264.mp4', 'Arson046_x264.mp4', 'Arson047_x264.mp4', 'Arson048_x264.mp4', 'Arson049_x264.mp4', 'Arson050_x264.mp4', 'Arson051_x264.mp4', 'Arson052_x264.mp4', 'Arson053_x264.mp4']\n", + "Arson feature extracting ended. Elapsed time: 0:17:06\n", + "Assault feature extracting starts\n", + "==>> file_list: ['Assault001_x264.mp4', 'Assault002_x264.mp4', 'Assault003_x264.mp4', 'Assault004_x264.mp4', 'Assault005_x264.mp4', 'Assault006_x264.mp4', 'Assault007_x264.mp4', 'Assault008_x264.mp4', 'Assault009_x264.mp4', 'Assault010_x264.mp4', 'Assault011_x264.mp4', 'Assault012_x264.mp4', 'Assault013_x264.mp4', 'Assault014_x264.mp4', 'Assault015_x264.mp4', 'Assault016_x264.mp4', 'Assault017_x264.mp4', 'Assault018_x264.mp4', 'Assault019_x264.mp4', 'Assault020_x264.mp4', 'Assault022_x264.mp4', 'Assault023_x264.mp4', 'Assault024_x264.mp4', 'Assault025_x264.mp4', 'Assault026_x264.mp4', 'Assault027_x264.mp4', 'Assault028_x264.mp4', 'Assault029_x264.mp4', 'Assault030_x264.mp4', 'Assault031_x264.mp4', 'Assault032_x264.mp4', 'Assault033_x264.mp4', 'Assault034_x264.mp4', 'Assault035_x264.mp4', 'Assault036_x264.mp4', 'Assault037_x264.mp4', 'Assault038_x264.mp4', 'Assault039_x264.mp4', 'Assault040_x264.mp4', 'Assault041_x264.mp4', 'Assault042_x264.mp4', 'Assault044_x264.mp4', 'Assault045_x264.mp4', 'Assault046_x264.mp4', 'Assault047_x264.mp4', 'Assault048_x264.mp4', 'Assault049_x264.mp4', 'Assault050_x264.mp4', 'Assault051_x264.mp4', 'Assault052_x264.mp4']\n", + "Assault feature extracting ended. Elapsed time: 0:08:08\n", + "Burglary feature extracting starts\n", + "==>> file_list: ['Burglary001_x264.mp4', 'Burglary002_x264.mp4', 'Burglary003_x264.mp4', 'Burglary004_x264.mp4', 'Burglary005_x264.mp4', 'Burglary006_x264.mp4', 'Burglary007_x264.mp4', 'Burglary008_x264.mp4', 'Burglary009_x264.mp4', 'Burglary010_x264.mp4', 'Burglary011_x264.mp4', 'Burglary012_x264.mp4', 'Burglary013_x264.mp4', 'Burglary014_x264.mp4', 'Burglary015_x264.mp4', 'Burglary016_x264.mp4', 'Burglary017_x264.mp4', 'Burglary018_x264.mp4', 'Burglary019_x264.mp4', 'Burglary020_x264.mp4', 'Burglary021_x264.mp4', 'Burglary022_x264.mp4', 'Burglary023_x264.mp4', 'Burglary024_x264.mp4', 'Burglary025_x264.mp4', 'Burglary026_x264.mp4', 'Burglary027_x264.mp4', 'Burglary028_x264.mp4', 'Burglary029_x264.mp4', 'Burglary030_x264.mp4', 'Burglary031_x264.mp4', 'Burglary032_x264.mp4', 'Burglary033_x264.mp4', 'Burglary034_x264.mp4', 'Burglary035_x264.mp4', 'Burglary036_x264.mp4', 'Burglary037_x264.mp4', 'Burglary038_x264.mp4', 'Burglary039_x264.mp4', 'Burglary040_x264.mp4', 'Burglary041_x264.mp4', 'Burglary042_x264.mp4', 'Burglary043_x264.mp4', 'Burglary044_x264.mp4', 'Burglary045_x264.mp4', 'Burglary046_x264.mp4', 'Burglary047_x264.mp4', 'Burglary048_x264.mp4', 'Burglary049_x264.mp4', 'Burglary050_x264.mp4', 'Burglary051_x264.mp4', 'Burglary052_x264.mp4', 'Burglary053_x264.mp4', 'Burglary054_x264.mp4', 'Burglary055_x264.mp4', 'Burglary056_x264.mp4', 'Burglary057_x264.mp4', 'Burglary058_x264.mp4', 'Burglary059_x264.mp4', 'Burglary060_x264.mp4', 'Burglary061_x264.mp4', 'Burglary062_x264.mp4', 'Burglary063_x264.mp4', 'Burglary064_x264.mp4', 'Burglary065_x264.mp4', 'Burglary066_x264.mp4', 'Burglary067_x264.mp4', 'Burglary068_x264.mp4', 'Burglary069_x264.mp4', 'Burglary070_x264.mp4', 'Burglary071_x264.mp4', 'Burglary072_x264.mp4', 'Burglary073_x264.mp4', 'Burglary074_x264.mp4', 'Burglary075_x264.mp4', 'Burglary076_x264.mp4', 'Burglary077_x264.mp4', 'Burglary078_x264.mp4', 'Burglary079_x264.mp4', 'Burglary080_x264.mp4', 'Burglary081_x264.mp4', 'Burglary082_x264.mp4', 'Burglary083_x264.mp4', 'Burglary084_x264.mp4', 'Burglary085_x264.mp4', 'Burglary086_x264.mp4', 'Burglary087_x264.mp4', 'Burglary088_x264.mp4', 'Burglary089_x264.mp4', 'Burglary090_x264.mp4', 'Burglary091_x264.mp4', 'Burglary092_x264.mp4', 'Burglary093_x264.mp4', 'Burglary094_x264.mp4', 'Burglary095_x264.mp4', 'Burglary096_x264.mp4', 'Burglary097_x264.mp4', 'Burglary098_x264.mp4', 'Burglary099_x264.mp4', 'Burglary100_x264.mp4']\n", + "Burglary feature extracting ended. Elapsed time: 0:29:33\n", + "Explosion feature extracting starts\n", + "==>> file_list: ['Explosion001_x264.mp4', 'Explosion002_x264.mp4', 'Explosion003_x264.mp4', 'Explosion004_x264.mp4', 'Explosion005_x264.mp4', 'Explosion006_x264.mp4', 'Explosion007_x264.mp4', 'Explosion008_x264.mp4', 'Explosion009_x264.mp4', 'Explosion010_x264.mp4', 'Explosion011_x264.mp4', 'Explosion012_x264.mp4', 'Explosion013_x264.mp4', 'Explosion014_x264.mp4', 'Explosion015_x264.mp4', 'Explosion016_x264.mp4', 'Explosion017_x264.mp4', 'Explosion018_x264.mp4', 'Explosion019_x264.mp4', 'Explosion020_x264.mp4', 'Explosion021_x264.mp4', 'Explosion022_x264.mp4', 'Explosion023_x264.mp4', 'Explosion024_x264.mp4', 'Explosion025_x264.mp4', 'Explosion026_x264.mp4', 'Explosion027_x264.mp4', 'Explosion028_x264.mp4', 'Explosion029_x264.mp4', 'Explosion030_x264.mp4', 'Explosion032_x264.mp4', 'Explosion033_x264.mp4', 'Explosion034_x264.mp4', 'Explosion035_x264.mp4', 'Explosion036_x264.mp4', 'Explosion037_x264.mp4', 'Explosion038_x264.mp4', 'Explosion039_x264.mp4', 'Explosion040_x264.mp4', 'Explosion041_x264.mp4', 'Explosion042_x264.mp4', 'Explosion043_x264.mp4', 'Explosion044_x264.mp4', 'Explosion045_x264.mp4', 'Explosion046_x264.mp4', 'Explosion047_x264.mp4', 'Explosion048_x264.mp4', 'Explosion050_x264.mp4', 'Explosion051_x264.mp4', 'Explosion052_x264.mp4']\n", + "Explosion feature extracting ended. Elapsed time: 0:15:57\n", + "Fighting feature extracting starts\n", + "==>> file_list: ['Fighting002_x264.mp4', 'Fighting003_x264.mp4', 'Fighting004_x264.mp4', 'Fighting005_x264.mp4', 'Fighting006_x264.mp4', 'Fighting007_x264.mp4', 'Fighting008_x264.mp4', 'Fighting009_x264.mp4', 'Fighting010_x264.mp4', 'Fighting011_x264.mp4', 'Fighting012_x264.mp4', 'Fighting013_x264.mp4', 'Fighting014_x264.mp4', 'Fighting015_x264.mp4', 'Fighting016_x264.mp4', 'Fighting017_x264.mp4', 'Fighting018_x264.mp4', 'Fighting019_x264.mp4', 'Fighting020_x264.mp4', 'Fighting021_x264.mp4', 'Fighting022_x264.mp4', 'Fighting023_x264.mp4', 'Fighting024_x264.mp4', 'Fighting025_x264.mp4', 'Fighting026_x264.mp4', 'Fighting027_x264.mp4', 'Fighting028_x264.mp4', 'Fighting029_x264.mp4', 'Fighting030_x264.mp4', 'Fighting031_x264.mp4', 'Fighting032_x264.mp4', 'Fighting033_x264.mp4', 'Fighting034_x264.mp4', 'Fighting035_x264.mp4', 'Fighting036_x264.mp4', 'Fighting037_x264.mp4', 'Fighting038_x264.mp4', 'Fighting039_x264.mp4', 'Fighting040_x264.mp4', 'Fighting041_x264.mp4', 'Fighting042_x264.mp4', 'Fighting043_x264.mp4', 'Fighting044_x264.mp4', 'Fighting045_x264.mp4', 'Fighting046_x264.mp4', 'Fighting047_x264.mp4', 'Fighting048_x264.mp4', 'Fighting049_x264.mp4', 'Fighting050_x264.mp4', 'Fighting051_x264.mp4']\n", + "Fighting feature extracting ended. Elapsed time: 0:16:24\n", + "RoadAccidents feature extracting starts\n", + "==>> file_list: ['RoadAccidents001_x264.mp4', 'RoadAccidents002_x264.mp4', 'RoadAccidents003_x264.mp4', 'RoadAccidents004_x264.mp4', 'RoadAccidents005_x264.mp4', 'RoadAccidents006_x264.mp4', 'RoadAccidents007_x264.mp4', 'RoadAccidents008_x264.mp4', 'RoadAccidents009_x264.mp4', 'RoadAccidents010_x264.mp4', 'RoadAccidents011_x264.mp4', 'RoadAccidents012_x264.mp4', 'RoadAccidents013_x264.mp4', 'RoadAccidents014_x264.mp4', 'RoadAccidents015_x264.mp4', 'RoadAccidents016_x264.mp4', 'RoadAccidents017_x264.mp4', 'RoadAccidents018_x264.mp4', 'RoadAccidents019_x264.mp4', 'RoadAccidents020_x264.mp4', 'RoadAccidents021_x264.mp4', 'RoadAccidents022_x264.mp4', 'RoadAccidents023_x264.mp4', 'RoadAccidents024_x264.mp4', 'RoadAccidents025_x264.mp4', 'RoadAccidents026_x264.mp4', 'RoadAccidents027_x264.mp4', 'RoadAccidents028_x264.mp4', 'RoadAccidents029_x264.mp4', 'RoadAccidents030_x264.mp4', 'RoadAccidents031_x264.mp4', 'RoadAccidents032_x264.mp4', 'RoadAccidents033_x264.mp4', 'RoadAccidents034_x264.mp4', 'RoadAccidents035_x264.mp4', 'RoadAccidents036_x264.mp4', 'RoadAccidents037_x264.mp4', 'RoadAccidents038_x264.mp4', 'RoadAccidents039_x264.mp4', 'RoadAccidents040_x264.mp4', 'RoadAccidents041_x264.mp4', 'RoadAccidents042_x264.mp4', 'RoadAccidents043_x264.mp4', 'RoadAccidents044_x264.mp4', 'RoadAccidents046_x264.mp4', 'RoadAccidents047_x264.mp4', 'RoadAccidents048_x264.mp4', 'RoadAccidents049_x264.mp4', 'RoadAccidents050_x264.mp4', 'RoadAccidents051_x264.mp4', 'RoadAccidents052_x264.mp4', 'RoadAccidents053_x264.mp4', 'RoadAccidents054_x264.mp4', 'RoadAccidents055_x264.mp4', 'RoadAccidents056_x264.mp4', 'RoadAccidents057_x264.mp4', 'RoadAccidents058_x264.mp4', 'RoadAccidents059_x264.mp4', 'RoadAccidents060_x264.mp4', 'RoadAccidents061_x264.mp4', 'RoadAccidents062_x264.mp4', 'RoadAccidents063_x264.mp4', 'RoadAccidents064_x264.mp4', 'RoadAccidents065_x264.mp4', 'RoadAccidents066_x264.mp4', 'RoadAccidents067_x264.mp4', 'RoadAccidents068_x264.mp4', 'RoadAccidents069_x264.mp4', 'RoadAccidents070_x264.mp4', 'RoadAccidents071_x264.mp4', 'RoadAccidents072_x264.mp4', 'RoadAccidents073_x264.mp4', 'RoadAccidents074_x264.mp4', 'RoadAccidents075_x264.mp4', 'RoadAccidents076_x264.mp4', 'RoadAccidents077_x264.mp4', 'RoadAccidents078_x264.mp4', 'RoadAccidents079_x264.mp4', 'RoadAccidents080_x264.mp4', 'RoadAccidents081_x264.mp4', 'RoadAccidents082_x264.mp4', 'RoadAccidents083_x264.mp4', 'RoadAccidents084_x264.mp4', 'RoadAccidents085_x264.mp4', 'RoadAccidents086_x264.mp4', 'RoadAccidents087_x264.mp4', 'RoadAccidents088_x264.mp4', 'RoadAccidents089_x264.mp4', 'RoadAccidents090_x264.mp4', 'RoadAccidents091_x264.mp4', 'RoadAccidents092_x264.mp4', 'RoadAccidents093_x264.mp4', 'RoadAccidents094_x264.mp4', 'RoadAccidents095_x264.mp4', 'RoadAccidents096_x264.mp4', 'RoadAccidents097_x264.mp4', 'RoadAccidents098_x264.mp4', 'RoadAccidents099_x264.mp4', 'RoadAccidents100_x264.mp4', 'RoadAccidents101_x264.mp4', 'RoadAccidents102_x264.mp4', 'RoadAccidents103_x264.mp4', 'RoadAccidents104_x264.mp4', 'RoadAccidents105_x264.mp4', 'RoadAccidents106_x264.mp4', 'RoadAccidents107_x264.mp4', 'RoadAccidents108_x264.mp4', 'RoadAccidents109_x264.mp4', 'RoadAccidents110_x264.mp4', 'RoadAccidents111_x264.mp4', 'RoadAccidents112_x264.mp4', 'RoadAccidents113_x264.mp4', 'RoadAccidents114_x264.mp4', 'RoadAccidents115_x264.mp4', 'RoadAccidents116_x264.mp4', 'RoadAccidents117_x264.mp4', 'RoadAccidents118_x264.mp4', 'RoadAccidents119_x264.mp4', 'RoadAccidents120_x264.mp4', 'RoadAccidents121_x264.mp4', 'RoadAccidents122_x264.mp4', 'RoadAccidents123_x264.mp4', 'RoadAccidents124_x264.mp4', 'RoadAccidents125_x264.mp4', 'RoadAccidents126_x264.mp4', 'RoadAccidents127_x264.mp4', 'RoadAccidents128_x264.mp4', 'RoadAccidents129_x264.mp4', 'RoadAccidents130_x264.mp4', 'RoadAccidents131_x264.mp4', 'RoadAccidents132_x264.mp4', 'RoadAccidents133_x264.mp4', 'RoadAccidents134_x264.mp4', 'RoadAccidents135_x264.mp4', 'RoadAccidents136_x264.mp4', 'RoadAccidents137_x264.mp4', 'RoadAccidents138_x264.mp4', 'RoadAccidents139_x264.mp4', 'RoadAccidents140_x264.mp4', 'RoadAccidents141_x264.mp4', 'RoadAccidents142_x264.mp4', 'RoadAccidents143_x264.mp4', 'RoadAccidents144_x264.mp4', 'RoadAccidents145_x264.mp4', 'RoadAccidents146_x264.mp4', 'RoadAccidents147_x264.mp4', 'RoadAccidents148_x264.mp4', 'RoadAccidents149_x264.mp4', 'RoadAccidents150_x264.mp4', 'RoadAccidents151_x264.mp4']\n", + "RoadAccidents005_x264.mp4 ValueError: need at least one array to concatenate\n", + "RoadAccidents feature extracting ended. Elapsed time: 0:16:31\n", + "Robbery feature extracting starts\n", + "==>> file_list: ['Robbery001_x264.mp4', 'Robbery002_x264.mp4', 'Robbery003_x264.mp4', 'Robbery004_x264.mp4', 'Robbery005_x264.mp4', 'Robbery006_x264.mp4', 'Robbery007_x264.mp4', 'Robbery008_x264.mp4', 'Robbery009_x264.mp4', 'Robbery010_x264.mp4', 'Robbery011_x264.mp4', 'Robbery012_x264.mp4', 'Robbery013_x264.mp4', 'Robbery014_x264.mp4', 'Robbery015_x264.mp4', 'Robbery016_x264.mp4', 'Robbery017_x264.mp4', 'Robbery018_x264.mp4', 'Robbery019_x264.mp4', 'Robbery020_x264.mp4', 'Robbery021_x264.mp4', 'Robbery022_x264.mp4', 'Robbery023_x264.mp4', 'Robbery024_x264.mp4', 'Robbery025_x264.mp4', 'Robbery026_x264.mp4', 'Robbery027_x264.mp4', 'Robbery028_x264.mp4', 'Robbery029_x264.mp4', 'Robbery030_x264.mp4', 'Robbery031_x264.mp4', 'Robbery032_x264.mp4', 'Robbery033_x264.mp4', 'Robbery034_x264.mp4', 'Robbery035_x264.mp4', 'Robbery036_x264.mp4', 'Robbery037_x264.mp4', 'Robbery038_x264.mp4', 'Robbery039_x264.mp4', 'Robbery040_x264.mp4', 'Robbery041_x264.mp4', 'Robbery042_x264.mp4', 'Robbery043_x264.mp4', 'Robbery044_x264.mp4', 'Robbery045_x264.mp4', 'Robbery046_x264.mp4', 'Robbery047_x264.mp4', 'Robbery048_x264.mp4', 'Robbery049_x264.mp4', 'Robbery050_x264.mp4', 'Robbery051_x264.mp4', 'Robbery052_x264.mp4', 'Robbery053_x264.mp4', 'Robbery054_x264.mp4', 'Robbery055_x264.mp4', 'Robbery056_x264.mp4', 'Robbery057_x264.mp4', 'Robbery058_x264.mp4', 'Robbery059_x264.mp4', 'Robbery060_x264.mp4', 'Robbery061_x264.mp4', 'Robbery062_x264.mp4', 'Robbery063_x264.mp4', 'Robbery064_x264.mp4', 'Robbery065_x264.mp4', 'Robbery066_x264.mp4', 'Robbery067_x264.mp4', 'Robbery068_x264.mp4', 'Robbery069_x264.mp4', 'Robbery070_x264.mp4', 'Robbery071_x264.mp4', 'Robbery072_x264.mp4', 'Robbery073_x264.mp4', 'Robbery074_x264.mp4', 'Robbery075_x264.mp4', 'Robbery076_x264.mp4', 'Robbery077_x264.mp4', 'Robbery078_x264.mp4', 'Robbery079_x264.mp4', 'Robbery080_x264.mp4', 'Robbery081_x264.mp4', 'Robbery082_x264.mp4', 'Robbery083_x264.mp4', 'Robbery084_x264.mp4', 'Robbery085_x264.mp4', 'Robbery086_x264.mp4', 'Robbery087_x264.mp4', 'Robbery088_x264.mp4', 'Robbery089_x264.mp4', 'Robbery090_x264.mp4', 'Robbery091_x264.mp4', 'Robbery092_x264.mp4', 'Robbery093_x264.mp4', 'Robbery094_x264.mp4', 'Robbery095_x264.mp4', 'Robbery096_x264.mp4', 'Robbery097_x264.mp4', 'Robbery098_x264.mp4', 'Robbery099_x264.mp4', 'Robbery100_x264.mp4', 'Robbery101_x264.mp4', 'Robbery102_x264.mp4', 'Robbery103_x264.mp4', 'Robbery104_x264.mp4', 'Robbery105_x264.mp4', 'Robbery106_x264.mp4', 'Robbery107_x264.mp4', 'Robbery108_x264.mp4', 'Robbery109_x264.mp4', 'Robbery110_x264.mp4', 'Robbery111_x264.mp4', 'Robbery112_x264.mp4', 'Robbery113_x264.mp4', 'Robbery114_x264.mp4', 'Robbery115_x264.mp4', 'Robbery116_x264.mp4', 'Robbery117_x264.mp4', 'Robbery118_x264.mp4', 'Robbery119_x264.mp4', 'Robbery120_x264.mp4', 'Robbery121_x264.mp4', 'Robbery122_x264.mp4', 'Robbery123_x264.mp4', 'Robbery124_x264.mp4', 'Robbery125_x264.mp4', 'Robbery126_x264.mp4', 'Robbery127_x264.mp4', 'Robbery128_x264.mp4', 'Robbery129_x264.mp4', 'Robbery130_x264.mp4', 'Robbery131_x264.mp4', 'Robbery132_x264.mp4', 'Robbery133_x264.mp4', 'Robbery134_x264.mp4', 'Robbery135_x264.mp4', 'Robbery136_x264.mp4', 'Robbery137_x264.mp4', 'Robbery138_x264.mp4', 'Robbery139_x264.mp4', 'Robbery140_x264.mp4', 'Robbery141_x264.mp4', 'Robbery142_x264.mp4', 'Robbery143_x264.mp4', 'Robbery144_x264.mp4', 'Robbery145_x264.mp4', 'Robbery146_x264.mp4', 'Robbery147_x264.mp4', 'Robbery148_x264.mp4', 'Robbery149_x264.mp4', 'Robbery150_x264.mp4']\n", + "Robbery feature extracting ended. Elapsed time: 0:26:40\n", + "Shooting feature extracting starts\n", + "==>> file_list: ['Shooting001_x264.mp4', 'Shooting002_x264.mp4', 'Shooting003_x264.mp4', 'Shooting004_x264.mp4', 'Shooting005_x264.mp4', 'Shooting006_x264.mp4', 'Shooting007_x264.mp4', 'Shooting008_x264.mp4', 'Shooting009_x264.mp4', 'Shooting010_x264.mp4', 'Shooting011_x264.mp4', 'Shooting012_x264.mp4', 'Shooting013_x264.mp4', 'Shooting014_x264.mp4', 'Shooting015_x264.mp4', 'Shooting017_x264.mp4', 'Shooting018_x264.mp4', 'Shooting019_x264.mp4', 'Shooting020_x264.mp4', 'Shooting021_x264.mp4', 'Shooting022_x264.mp4', 'Shooting023_x264.mp4', 'Shooting024_x264.mp4', 'Shooting025_x264.mp4', 'Shooting026_x264.mp4', 'Shooting027_x264.mp4', 'Shooting028_x264.mp4', 'Shooting029_x264.mp4', 'Shooting030_x264.mp4', 'Shooting031_x264.mp4', 'Shooting032_x264.mp4', 'Shooting033_x264.mp4', 'Shooting034_x264.mp4', 'Shooting036_x264.mp4', 'Shooting037_x264.mp4', 'Shooting038_x264.mp4', 'Shooting039_x264.mp4', 'Shooting040_x264.mp4', 'Shooting041_x264.mp4', 'Shooting042_x264.mp4', 'Shooting043_x264.mp4', 'Shooting044_x264.mp4', 'Shooting046_x264.mp4', 'Shooting047_x264.mp4', 'Shooting048_x264.mp4', 'Shooting050_x264.mp4', 'Shooting051_x264.mp4', 'Shooting052_x264.mp4', 'Shooting053_x264.mp4', 'Shooting054_x264.mp4']\n", + "Shooting feature extracting ended. Elapsed time: 0:09:18\n", + "Shoplifting feature extracting starts\n", + "==>> file_list: ['Shoplifting001_x264.mp4', 'Shoplifting003_x264.mp4', 'Shoplifting004_x264.mp4', 'Shoplifting005_x264.mp4', 'Shoplifting006_x264.mp4', 'Shoplifting007_x264.mp4', 'Shoplifting008_x264.mp4', 'Shoplifting009_x264.mp4', 'Shoplifting010_x264.mp4', 'Shoplifting012_x264.mp4', 'Shoplifting013_x264.mp4', 'Shoplifting014_x264.mp4', 'Shoplifting015_x264.mp4', 'Shoplifting016_x264.mp4', 'Shoplifting017_x264.mp4', 'Shoplifting018_x264.mp4', 'Shoplifting019_x264.mp4', 'Shoplifting020_x264.mp4', 'Shoplifting021_x264.mp4', 'Shoplifting022_x264.mp4', 'Shoplifting024_x264.mp4', 'Shoplifting025_x264.mp4', 'Shoplifting026_x264.mp4', 'Shoplifting027_x264.mp4', 'Shoplifting028_x264.mp4', 'Shoplifting029_x264.mp4', 'Shoplifting030_x264.mp4', 'Shoplifting031_x264.mp4', 'Shoplifting032_x264.mp4', 'Shoplifting033_x264.mp4', 'Shoplifting034_x264.mp4', 'Shoplifting036_x264.mp4', 'Shoplifting037_x264.mp4', 'Shoplifting038_x264.mp4', 'Shoplifting039_x264.mp4', 'Shoplifting040_x264.mp4', 'Shoplifting041_x264.mp4', 'Shoplifting042_x264.mp4', 'Shoplifting043_x264.mp4', 'Shoplifting044_x264.mp4', 'Shoplifting045_x264.mp4', 'Shoplifting047_x264.mp4', 'Shoplifting048_x264.mp4', 'Shoplifting049_x264.mp4', 'Shoplifting050_x264.mp4', 'Shoplifting051_x264.mp4', 'Shoplifting052_x264.mp4', 'Shoplifting053_x264.mp4', 'Shoplifting054_x264.mp4', 'Shoplifting055_x264.mp4']\n", + "Shoplifting feature extracting ended. Elapsed time: 0:20:30\n", + "Stealing feature extracting starts\n", + "==>> file_list: ['Stealing002_x264.mp4', 'Stealing003_x264.mp4', 'Stealing004_x264.mp4', 'Stealing006_x264.mp4', 'Stealing007_x264.mp4', 'Stealing008_x264.mp4', 'Stealing009_x264.mp4', 'Stealing010_x264.mp4', 'Stealing011_x264.mp4', 'Stealing012_x264.mp4', 'Stealing013_x264.mp4', 'Stealing014_x264.mp4', 'Stealing015_x264.mp4', 'Stealing016_x264.mp4', 'Stealing017_x264.mp4', 'Stealing018_x264.mp4', 'Stealing019_x264.mp4', 'Stealing020_x264.mp4', 'Stealing021_x264.mp4', 'Stealing022_x264.mp4', 'Stealing023_x264.mp4', 'Stealing024_x264.mp4', 'Stealing025_x264.mp4', 'Stealing026_x264.mp4', 'Stealing027_x264.mp4', 'Stealing028_x264.mp4', 'Stealing029_x264.mp4', 'Stealing030_x264.mp4', 'Stealing031_x264.mp4', 'Stealing032_x264.mp4', 'Stealing035_x264.mp4', 'Stealing036_x264.mp4', 'Stealing037_x264.mp4', 'Stealing042_x264.mp4', 'Stealing043_x264.mp4', 'Stealing044_x264.mp4', 'Stealing045_x264.mp4', 'Stealing046_x264.mp4', 'Stealing047_x264.mp4', 'Stealing048_x264.mp4', 'Stealing049_x264.mp4', 'Stealing050_x264.mp4', 'Stealing051_x264.mp4', 'Stealing052_x264.mp4', 'Stealing053_x264.mp4', 'Stealing054_x264.mp4', 'Stealing055_x264.mp4', 'Stealing057_x264.mp4', 'Stealing058_x264.mp4', 'Stealing059_x264.mp4', 'Stealing060_x264.mp4', 'Stealing061_x264.mp4', 'Stealing062_x264.mp4', 'Stealing063_x264.mp4', 'Stealing065_x264.mp4', 'Stealing066_x264.mp4', 'Stealing067_x264.mp4', 'Stealing068_x264.mp4', 'Stealing069_x264.mp4', 'Stealing070_x264.mp4', 'Stealing071_x264.mp4', 'Stealing072_x264.mp4', 'Stealing073_x264.mp4', 'Stealing074_x264.mp4', 'Stealing075_x264.mp4', 'Stealing077_x264.mp4', 'Stealing078_x264.mp4', 'Stealing079_x264.mp4', 'Stealing080_x264.mp4', 'Stealing081_x264.mp4', 'Stealing082_x264.mp4', 'Stealing083_x264.mp4', 'Stealing084_x264.mp4', 'Stealing086_x264.mp4', 'Stealing087_x264.mp4', 'Stealing088_x264.mp4', 'Stealing089_x264.mp4', 'Stealing091_x264.mp4', 'Stealing092_x264.mp4', 'Stealing093_x264.mp4', 'Stealing094_x264.mp4', 'Stealing095_x264.mp4', 'Stealing096_x264.mp4', 'Stealing097_x264.mp4', 'Stealing098_x264.mp4', 'Stealing100_x264.mp4', 'Stealing101_x264.mp4', 'Stealing102_x264.mp4', 'Stealing103_x264.mp4', 'Stealing104_x264.mp4', 'Stealing105_x264.mp4', 'Stealing106_x264.mp4', 'Stealing107_x264.mp4', 'Stealing108_x264.mp4', 'Stealing109_x264.mp4', 'Stealing110_x264.mp4', 'Stealing111_x264.mp4', 'Stealing112_x264.mp4', 'Stealing113_x264.mp4', 'Stealing114_x264.mp4']\n", + "Stealing feature extracting ended. Elapsed time: 0:29:19\n", + "Vandalism feature extracting starts\n", + "==>> file_list: ['Vandalism001_x264.mp4', 'Vandalism002_x264.mp4', 'Vandalism003_x264.mp4', 'Vandalism004_x264.mp4', 'Vandalism005_x264.mp4', 'Vandalism006_x264.mp4', 'Vandalism007_x264.mp4', 'Vandalism008_x264.mp4', 'Vandalism009_x264.mp4', 'Vandalism010_x264.mp4', 'Vandalism011_x264.mp4', 'Vandalism012_x264.mp4', 'Vandalism013_x264.mp4', 'Vandalism014_x264.mp4', 'Vandalism015_x264.mp4', 'Vandalism016_x264.mp4', 'Vandalism017_x264.mp4', 'Vandalism018_x264.mp4', 'Vandalism019_x264.mp4', 'Vandalism020_x264.mp4', 'Vandalism021_x264.mp4', 'Vandalism022_x264.mp4', 'Vandalism023_x264.mp4', 'Vandalism024_x264.mp4', 'Vandalism025_x264.mp4', 'Vandalism026_x264.mp4', 'Vandalism027_x264.mp4', 'Vandalism028_x264.mp4', 'Vandalism029_x264.mp4', 'Vandalism030_x264.mp4', 'Vandalism031_x264.mp4', 'Vandalism032_x264.mp4', 'Vandalism033_x264.mp4', 'Vandalism034_x264.mp4', 'Vandalism035_x264.mp4', 'Vandalism036_x264.mp4', 'Vandalism037_x264.mp4', 'Vandalism038_x264.mp4', 'Vandalism039_x264.mp4', 'Vandalism040_x264.mp4', 'Vandalism041_x264.mp4', 'Vandalism042_x264.mp4', 'Vandalism043_x264.mp4', 'Vandalism044_x264.mp4', 'Vandalism045_x264.mp4', 'Vandalism046_x264.mp4', 'Vandalism047_x264.mp4', 'Vandalism048_x264.mp4', 'Vandalism049_x264.mp4', 'Vandalism050_x264.mp4']\n", + "Vandalism feature extracting ended. Elapsed time: 0:09:15\n" + ] + } + ], + "source": [ + "for folder_name in folder_list:\n", + " time_start = datetime.now()\n", + "\n", + " print(f\"{folder_name} feature extracting starts\")\n", + "\n", + " if not os.path.exists(npy_root + folder_name + \"_base\"):\n", + " os.makedirs(npy_root + folder_name + \"_base\")\n", + "\n", + " folder_path = root + folder_name + \"/\"\n", + "\n", + " file_list = os.listdir(root + folder_name)\n", + " file_list.sort()\n", + " print(f\"==>> file_list: {file_list}\")\n", + "\n", + " batch_size = 16\n", + " # Loop through the video frames\n", + " for file_name in file_list:\n", + " path = folder_path + file_name\n", + "\n", + " cap = cv2.VideoCapture(path)\n", + "\n", + " # 710차원 feature array 저장할 list\n", + " np_list = []\n", + "\n", + " # 16 * segments_num 프레임씩 저장할 list\n", + " frames = []\n", + " frame_count = 0\n", + "\n", + " # input tensor 저장할 list\n", + " input_list = []\n", + " input_count = 0\n", + "\n", + " while cap.isOpened():\n", + " # Read a frame from the video\n", + " success, frame = cap.read()\n", + " # frame.shape = (height, width, 3)\n", + "\n", + " frame_count += 1 # Increment frame count\n", + "\n", + " if success:\n", + " frame = tf(image=frame)[\"image\"]\n", + " # frame.shape = (224, 224, 3)\n", + "\n", + " frame = np.expand_dims(frame, axis=0)\n", + " # frame.shape = (1, 224, 224, 3)\n", + " frames.append(frame.copy())\n", + "\n", + " if frame_count == 16 * segments_num:\n", + " assert len(frames) == 16 * segments_num\n", + " frames = np.concatenate(frames)\n", + " # in_frames.shape = (16 * segments_num, 224, 224, 3)\n", + " in_frames = frames.transpose(3, 0, 1, 2)\n", + " # # in_frames.shape = (RGB 3, frame T=16 * segments_num, H=224, W=224)\n", + " in_frames = np.expand_dims(in_frames, axis=0)\n", + " # in_frames.shape = (1, 3, 16 * segments_num, 224, 224)\n", + " in_frames = torch.from_numpy(in_frames).float()\n", + " # in_frames.shape == torch.Size([1, 3, 16 * segments_num, 224, 224])\n", + "\n", + " input_list.append(in_frames.detach().clone())\n", + "\n", + " frame_count = 0\n", + " frames = []\n", + "\n", + " input_count += 1\n", + "\n", + " if input_count == batch_size:\n", + " # input_batch.shape == torch.Size([batch_size, 3, 16 * segments_num, 224, 224])\n", + " input_batch = torch.cat(input_list, dim=0).to(\"cuda\")\n", + "\n", + " with torch.no_grad():\n", + " output = model(input_batch)\n", + " # output.shape == torch.Size([batch_size, 710])\n", + "\n", + " np_list.append(output.cpu().numpy())\n", + "\n", + " input_count = 0\n", + " input_list = []\n", + " else:\n", + " # 남은 프레임, input_list가 지정 개수에서 모자를 때 예외 처리\n", + " if frame_count != 0 and len(frames) != 0:\n", + " # @@ success가 false 일때도 frame_count는 +1이 된다\n", + " # @@ => frames = []로 초기화 된 바로 다음 frame에 success가 false가 되면\n", + " # @@ => frame_count == 1 이지만 len(frames) == 0\n", + " len_frames_left = 16 * segments_num - len(frames)\n", + " # len_input_list_left = batch_size - len(input_list)\n", + " for i in range(len_frames_left):\n", + " frames.append(frames[-1].copy())\n", + "\n", + " assert len(frames) == 16 * segments_num\n", + "\n", + " frames = np.concatenate(frames)\n", + " # in_frames.shape = (16 * segments_num, 224, 224, 3)\n", + " in_frames = frames.transpose(3, 0, 1, 2)\n", + " # # in_frames.shape = (RGB 3, frame T=16 * segments_num, H=224, W=224)\n", + " in_frames = np.expand_dims(in_frames, axis=0)\n", + " # in_frames.shape = (1, 3, 16 * segments_num, 224, 224)\n", + " in_frames = torch.from_numpy(in_frames).float()\n", + " # in_frames.shape == torch.Size([1, 3, 16 * segments_num, 224, 224])\n", + "\n", + " input_list.append(in_frames.detach().clone())\n", + "\n", + " # assert len(input_list) == batch_size\n", + "\n", + " # input_batch.shape == torch.Size([batch_size, 3, 16 * segments_num, 224, 224])\n", + " input_batch = torch.cat(input_list, dim=0).to(\"cuda\")\n", + "\n", + " with torch.no_grad():\n", + " output = model(input_batch)\n", + " # output.shape == torch.Size([len(input_list), 710])\n", + "\n", + " np_list.append(output.cpu().numpy())\n", + "\n", + " frame_count = 0\n", + " frames = []\n", + " input_count = 0\n", + " input_list = []\n", + "\n", + " # Break the loop if the end of the video is reached\n", + " break\n", + "\n", + " try:\n", + " file_outputs = np.concatenate(np_list)\n", + " # print(f\"==>> file_outputs.shape: {file_outputs.shape}\")\n", + " np.save((npy_root + folder_name + \"_base/\" + file_name), file_outputs)\n", + " except ValueError:\n", + " print(f\"{file_name} ValueError: need at least one array to concatenate\")\n", + "\n", + " cap.release()\n", + "\n", + " time_end = datetime.now()\n", + " total_time = time_end - time_start\n", + " total_time = str(total_time).split(\".\")[0]\n", + "\n", + " print(f\"{folder_name} feature extracting ended. Elapsed time: {total_time}\")\n", + "\n", + " # cv2.destroyAllWindows()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/datapreprocess/feature_extraction_videoMAEv2.py b/datapreprocess/feature_extraction_videoMAEv2.py new file mode 100644 index 0000000..0bf2a69 --- /dev/null +++ b/datapreprocess/feature_extraction_videoMAEv2.py @@ -0,0 +1,368 @@ +# from collections import defaultdict +import argparse +import os +from copy import deepcopy +from datetime import datetime + +# import torch.nn as nn +# import torch.nn.functional as F +import albumentations as A +import cv2 +import matplotlib.pyplot as plt + +# from datetime import datetime +import models +import numpy as np +import torch +from timm.models import create_model +from tqdm import tqdm + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +print(torch.cuda.is_available()) + + +parser = argparse.ArgumentParser(description="Feature Extraction") + +parser.add_argument( + "--root", + type=str, + help="root folder path", + default="/data/ephemeral/home/datasets/UCFCrime/normal/", +) + +args = parser.parse_args() + + +root = "/data/ephemeral/home/datasets/UCFCrime/normal/" + +npy_root = "./npy/" + +if not os.path.exists(npy_root): + os.makedirs(npy_root) + + +folder_list = os.listdir(root) +folder_list.sort() +print(f"==>> folder_list: {folder_list}") + +segments_num = 1 +# 모델에 들어갈 frame수는 16 * segments_num + +model = create_model( + "vit_small_patch16_224", + img_size=224, + pretrained=False, + num_classes=710, + all_frames=16 * segments_num, +) + +load_dict = torch.load( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/vit_s_k710_dl_from_giant.pth" +) + +model.load_state_dict(load_dict["module"]) + +model.to("cuda") +model.eval() + +tf = A.Resize(224, 224) + +for folder_name in folder_list: + + time_start = datetime.now() + + print(f"{folder_name} feature extracting starts") + + if not os.path.exists(npy_root + folder_name): + os.makedirs(npy_root + folder_name) + + folder_path = root + folder_name + "/" + + file_list = os.listdir(root + folder_name) + file_list.sort() + print(f"==>> file_list: {file_list}") + + batch_size = 16 + # Loop through the video frames + for file_name in tqdm(file_list, total=len(file_list)): + path = folder_path + file_name + + cap = cv2.VideoCapture(path) + + # 710차원 feature array 저장할 list + np_list = [] + + # 16 * segments_num 프레임씩 저장할 list + frames = [] + frame_count = 0 + + # input tensor 저장할 list + input_list = [] + input_count = 0 + + while cap.isOpened(): + # Read a frame from the video + success, frame = cap.read() + # frame.shape = (height, width, 3) + + frame_count += 1 # Increment frame count + + if success: + frame = tf(image=frame)["image"] + # frame.shape = (224, 224, 3) + + frame = np.expand_dims(frame, axis=0) + # frame.shape = (1, 224, 224, 3) + frames.append(frame.copy()) + + if frame_count == 16 * segments_num: + assert len(frames) == 16 * segments_num + frames = np.concatenate(frames) + # in_frames.shape = (16 * segments_num, 224, 224, 3) + in_frames = frames.transpose(3, 0, 1, 2) + # # in_frames.shape = (RGB 3, frame T=16 * segments_num, H=224, W=224) + in_frames = np.expand_dims(in_frames, axis=0) + # in_frames.shape = (1, 3, 16 * segments_num, 224, 224) + in_frames = torch.from_numpy(in_frames).float() + # in_frames.shape == torch.Size([1, 3, 16 * segments_num, 224, 224]) + + input_list.append(in_frames.detach().clone()) + + frame_count = 0 + frames = [] + + input_count += 1 + + if input_count == batch_size: + # input_batch.shape == torch.Size([batch_size, 3, 16 * segments_num, 224, 224]) + input_batch = torch.cat(input_list, dim=0).to("cuda") + + with torch.no_grad(): + output = model(input_batch) + # output.shape == torch.Size([batch_size, 710]) + + np_list.append(output.cpu().numpy()) + + input_count = 0 + input_list = [] + else: + # 남은 프레임, input_list가 지정 개수에서 모자를 때 예외 처리 + if frame_count != 0 and len(frames) != 0: + # @@ success가 false 일때도 frame_count는 +1이 된다 + # @@ => frames = []로 초기화 된 바로 다음 frame에 success가 false가 되면 + # @@ => frame_count == 1 이지만 len(frames) == 0 + len_frames_left = 16 * segments_num - len(frames) + # len_input_list_left = batch_size - len(input_list) + + # assert len(frames) != 0 + + for i in range(len_frames_left): + try: + frames.append(frames[-1].copy()) + except IndexError: + print(f"==>> len(frames): {len(frames)}") + print(f"==>> len_frames_left: {len_frames_left}") + + assert len(frames) == 16 * segments_num + + frames = np.concatenate(frames) + # in_frames.shape = (16 * segments_num, 224, 224, 3) + in_frames = frames.transpose(3, 0, 1, 2) + # # in_frames.shape = (RGB 3, frame T=16 * segments_num, H=224, W=224) + in_frames = np.expand_dims(in_frames, axis=0) + # in_frames.shape = (1, 3, 16 * segments_num, 224, 224) + in_frames = torch.from_numpy(in_frames).float() + # in_frames.shape == torch.Size([1, 3, 16 * segments_num, 224, 224]) + + input_list.append(in_frames.detach().clone()) + + # assert len(input_list) == batch_size + + # input_batch.shape == torch.Size([batch_size, 3, 16 * segments_num, 224, 224]) + input_batch = torch.cat(input_list, dim=0).to("cuda") + + with torch.no_grad(): + output = model(input_batch) + # output.shape == torch.Size([len(input_list), 710]) + + np_list.append(output.cpu().numpy()) + + frame_count = 0 + frames = [] + input_count = 0 + input_list = [] + + # Break the loop if the end of the video is reached + break + try: + file_outputs = np.concatenate(np_list) + # print(f"==>> file_outputs.shape: {file_outputs.shape}") + np.save((npy_root + folder_name + "/" + file_name), file_outputs) + except ValueError: + print(f"{file_name} ValueError: need at least one array to concatenate") + + cap.release() + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + + print(f"{folder_name} feature extracting ended. Elapsed time: {total_time}") + +# segments_num = 1 +# 모델에 들어갈 frame수는 16 * segments_num + +model = create_model( + "vit_base_patch16_224", + img_size=224, + pretrained=False, + num_classes=710, + all_frames=16 * segments_num, +) + +load_dict = torch.load( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/vit_b_k710_dl_from_giant.pth" +) +# backbone pth 경로 + +model.load_state_dict(load_dict["module"]) + +model.to("cuda") +model.eval() + +tf = A.Resize(224, 224) + +for folder_name in folder_list: + time_start = datetime.now() + + print(f"{folder_name} feature extracting starts") + + if not os.path.exists(npy_root + folder_name + "_base"): + os.makedirs(npy_root + folder_name + "_base") + + folder_path = root + folder_name + "/" + + file_list = os.listdir(root + folder_name) + file_list.sort() + print(f"==>> file_list: {file_list}") + + batch_size = 16 + # Loop through the video frames + for file_name in tqdm(file_list, total=len(file_list)): + path = folder_path + file_name + + cap = cv2.VideoCapture(path) + + # 710차원 feature array 저장할 list + np_list = [] + + # 16 * segments_num 프레임씩 저장할 list + frames = [] + frame_count = 0 + + # input tensor 저장할 list + input_list = [] + input_count = 0 + + while cap.isOpened(): + # Read a frame from the video + success, frame = cap.read() + # frame.shape = (height, width, 3) + + frame_count += 1 # Increment frame count + + if success: + frame = tf(image=frame)["image"] + # frame.shape = (224, 224, 3) + + frame = np.expand_dims(frame, axis=0) + # frame.shape = (1, 224, 224, 3) + frames.append(frame.copy()) + + if frame_count == 16 * segments_num: + assert len(frames) == 16 * segments_num + frames = np.concatenate(frames) + # in_frames.shape = (16 * segments_num, 224, 224, 3) + in_frames = frames.transpose(3, 0, 1, 2) + # # in_frames.shape = (RGB 3, frame T=16 * segments_num, H=224, W=224) + in_frames = np.expand_dims(in_frames, axis=0) + # in_frames.shape = (1, 3, 16 * segments_num, 224, 224) + in_frames = torch.from_numpy(in_frames).float() + # in_frames.shape == torch.Size([1, 3, 16 * segments_num, 224, 224]) + + input_list.append(in_frames.detach().clone()) + + frame_count = 0 + frames = [] + + input_count += 1 + + if input_count == batch_size: + # input_batch.shape == torch.Size([batch_size, 3, 16 * segments_num, 224, 224]) + input_batch = torch.cat(input_list, dim=0).to("cuda") + + with torch.no_grad(): + output = model(input_batch) + # output.shape == torch.Size([batch_size, 710]) + + np_list.append(output.cpu().numpy()) + + input_count = 0 + input_list = [] + else: + # 남은 프레임, input_list가 지정 개수에서 모자를 때 예외 처리 + if frame_count != 0 and len(frames) != 0: + # @@ success가 false 일때도 frame_count는 +1이 된다 + # @@ => frames = []로 초기화 된 바로 다음 frame에 success가 false가 되면 + # @@ => frame_count == 1 이지만 len(frames) == 0 + len_frames_left = 16 * segments_num - len(frames) + # len_input_list_left = batch_size - len(input_list) + for i in range(len_frames_left): + frames.append(frames[-1].copy()) + + assert len(frames) == 16 * segments_num + + frames = np.concatenate(frames) + # in_frames.shape = (16 * segments_num, 224, 224, 3) + in_frames = frames.transpose(3, 0, 1, 2) + # # in_frames.shape = (RGB 3, frame T=16 * segments_num, H=224, W=224) + in_frames = np.expand_dims(in_frames, axis=0) + # in_frames.shape = (1, 3, 16 * segments_num, 224, 224) + in_frames = torch.from_numpy(in_frames).float() + # in_frames.shape == torch.Size([1, 3, 16 * segments_num, 224, 224]) + + input_list.append(in_frames.detach().clone()) + + # assert len(input_list) == batch_size + + # input_batch.shape == torch.Size([batch_size, 3, 16 * segments_num, 224, 224]) + input_batch = torch.cat(input_list, dim=0).to("cuda") + + with torch.no_grad(): + output = model(input_batch) + # output.shape == torch.Size([len(input_list), 710]) + + np_list.append(output.cpu().numpy()) + + frame_count = 0 + frames = [] + input_count = 0 + input_list = [] + + # Break the loop if the end of the video is reached + break + + try: + file_outputs = np.concatenate(np_list) + # print(f"==>> file_outputs.shape: {file_outputs.shape}") + np.save((npy_root + folder_name + "_base/" + file_name), file_outputs) + except ValueError: + print(f"{file_name} ValueError: need at least one array to concatenate") + + cap.release() + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + + print(f"{folder_name} feature extracting ended. Elapsed time: {total_time}") diff --git a/datapreprocess/models/__init__.py b/datapreprocess/models/__init__.py new file mode 100644 index 0000000..7172750 --- /dev/null +++ b/datapreprocess/models/__init__.py @@ -0,0 +1,27 @@ +from .modeling_finetune import ( + vit_base_patch16_224, + vit_giant_patch14_224, + vit_huge_patch16_224, + vit_large_patch16_224, + vit_small_patch16_224, +) +from .modeling_pretrain import ( + pretrain_videomae_base_patch16_224, + pretrain_videomae_giant_patch14_224, + pretrain_videomae_huge_patch16_224, + pretrain_videomae_large_patch16_224, + pretrain_videomae_small_patch16_224, +) + +__all__ = [ + "pretrain_videomae_small_patch16_224", + "pretrain_videomae_base_patch16_224", + "pretrain_videomae_large_patch16_224", + "pretrain_videomae_huge_patch16_224", + "pretrain_videomae_giant_patch14_224", + "vit_small_patch16_224", + "vit_base_patch16_224", + "vit_large_patch16_224", + "vit_huge_patch16_224", + "vit_giant_patch14_224", +] diff --git a/datapreprocess/models/modeling_finetune.py b/datapreprocess/models/modeling_finetune.py new file mode 100644 index 0000000..01d824e --- /dev/null +++ b/datapreprocess/models/modeling_finetune.py @@ -0,0 +1,574 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +from functools import partial + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from timm.models.layers import drop_path, to_2tuple, trunc_normal_ +from timm.models.registry import register_model + + +def _cfg(url="", **kwargs): + return { + "url": url, + "num_classes": 400, + "input_size": (3, 224, 224), + "pool_size": None, + "crop_pct": 0.9, + "interpolation": "bicubic", + "mean": (0.5, 0.5, 0.5), + "std": (0.5, 0.5, 0.5), + **kwargs, + } + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +class Mlp(nn.Module): + + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the orignal BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class CosAttention(nn.Module): + + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + attn_head_dim=None, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + # self.scale = qk_scale or head_dim**-0.5 + # DO NOT RENAME [self.scale] (for no weight decay) + if qk_scale is None: + self.scale = nn.Parameter( + torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True + ) + else: + self.scale = qk_scale + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat( + ( + self.q_bias, + torch.zeros_like(self.v_bias, requires_grad=False), + self.v_bias, + ) + ) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1) + + # torch.log(torch.tensor(1. / 0.01)) = 4.6052 + logit_scale = torch.clamp(self.scale, max=4.6052).exp() + + attn = attn * logit_scale + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Attention(nn.Module): + + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + attn_head_dim=None, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat( + ( + self.q_bias, + torch.zeros_like(self.v_bias, requires_grad=False), + self.v_bias, + ) + ) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = q @ k.transpose(-2, -1) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + init_values=None, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + attn_head_dim=None, + cos_attn=False, + ): + super().__init__() + self.norm1 = norm_layer(dim) + if cos_attn: + self.attn = CosAttention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + attn_head_dim=attn_head_dim, + ) + else: + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + attn_head_dim=attn_head_dim, + ) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + if init_values > 0: + self.gamma_1 = nn.Parameter( + init_values * torch.ones((dim)), requires_grad=True + ) + self.gamma_2 = nn.Parameter( + init_values * torch.ones((dim)), requires_grad=True + ) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding""" + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + num_frames=16, + tubelet_size=2, + ): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_spatial_patches = (img_size[0] // patch_size[0]) * ( + img_size[1] // patch_size[1] + ) + num_patches = num_spatial_patches * (num_frames // tubelet_size) + + self.img_size = img_size + self.tubelet_size = tubelet_size + self.patch_size = patch_size + self.num_patches = num_patches + self.proj = nn.Conv3d( + in_channels=in_chans, + out_channels=embed_dim, + kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]), + stride=(self.tubelet_size, patch_size[0], patch_size[1]), + ) + + def forward(self, x, **kwargs): + B, C, T, H, W = x.shape + assert ( + H == self.img_size[0] and W == self.img_size[1] + ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + # b, c, l -> b, l, c + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +# sin-cos position encoding +# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 +def get_sinusoid_encoding_table(n_position, d_hid): + """Sinusoid position encoding table""" + + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (hid_j // 2) / d_hid) + for hid_j in range(d_hid) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos_i) for pos_i in range(n_position)] + ) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.tensor( + sinusoid_table, dtype=torch.float, requires_grad=False + ).unsqueeze(0) + + +class VisionTransformer(nn.Module): + """Vision Transformer with support for patch or hybrid CNN input stage""" + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + head_drop_rate=0.0, + norm_layer=nn.LayerNorm, + init_values=0.0, + use_learnable_pos_emb=False, + init_scale=0.0, + all_frames=16, + tubelet_size=2, + use_mean_pooling=True, + with_cp=False, + cos_attn=False, + ): + super().__init__() + self.num_classes = num_classes + # num_features for consistency with other models + self.num_features = self.embed_dim = embed_dim + self.tubelet_size = tubelet_size + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + num_frames=all_frames, + tubelet_size=tubelet_size, + ) + num_patches = self.patch_embed.num_patches + self.with_cp = with_cp + + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + else: + # sine-cosine positional embeddings is on the way + self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) + + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList( + [ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values, + cos_attn=cos_attn, + ) + for i in range(depth) + ] + ) + self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.head_dropout = nn.Dropout(head_drop_rate) + self.head = ( + nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + if use_learnable_pos_emb: + trunc_normal_(self.pos_embed, std=0.02) + + self.apply(self._init_weights) + + self.head.weight.data.mul_(init_scale) + self.head.bias.data.mul_(init_scale) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token"} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=""): + self.num_classes = num_classes + self.head = ( + nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + def forward_features(self, x): + B = x.size(0) + + x = self.patch_embed(x) + + if self.pos_embed is not None: + x = ( + x + + self.pos_embed.expand(B, -1, -1) + .type_as(x) + .to(x.device) + .clone() + .detach() + ) + x = self.pos_drop(x) + + for blk in self.blocks: + if self.with_cp: + x = cp.checkpoint(blk, x) + else: + x = blk(x) + + if self.fc_norm is not None: + return self.fc_norm(x.mean(1)) + else: + return self.norm(x[:, 0]) + + def forward(self, x): + x = self.forward_features(x) + x = self.head_dropout(x) + x = self.head(x) + return x + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_huge_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_giant_patch14_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=14, + embed_dim=1408, + depth=40, + num_heads=16, + mlp_ratio=48 / 11, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + return model diff --git a/datapreprocess/models/modeling_pretrain.py b/datapreprocess/models/modeling_pretrain.py new file mode 100644 index 0000000..d5c3539 --- /dev/null +++ b/datapreprocess/models/modeling_pretrain.py @@ -0,0 +1,493 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +from functools import partial + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from timm.models.layers import trunc_normal_ as __call_trunc_normal_ +from timm.models.registry import register_model + +from .modeling_finetune import Block, PatchEmbed, _cfg, get_sinusoid_encoding_table + + +def trunc_normal_(tensor, mean=0.0, std=1.0): + __call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) + + +class PretrainVisionTransformerEncoder(nn.Module): + """Vision Transformer with support for patch or hybrid CNN input stage""" + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=0, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + init_values=None, + tubelet_size=2, + use_learnable_pos_emb=False, + with_cp=False, + all_frames=16, + cos_attn=False, + ): + super().__init__() + self.num_classes = num_classes + # num_features for consistency with other models + self.num_features = self.embed_dim = embed_dim + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + num_frames=all_frames, + tubelet_size=tubelet_size, + ) + num_patches = self.patch_embed.num_patches + self.with_cp = with_cp + + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + else: + # sine-cosine positional embeddings + self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList( + [ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values, + cos_attn=cos_attn, + ) + for i in range(depth) + ] + ) + self.norm = norm_layer(embed_dim) + self.head = ( + nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + if use_learnable_pos_emb: + trunc_normal_(self.pos_embed, std=0.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token"} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=""): + self.num_classes = num_classes + self.head = ( + nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + def forward_features(self, x, mask): + x = self.patch_embed(x) + + x = x + self.pos_embed.type_as(x).to(x.device).clone().detach() + + B, _, C = x.shape + x_vis = x[~mask].reshape(B, -1, C) # ~mask means visible + + for blk in self.blocks: + if self.with_cp: + x_vis = cp.checkpoint(blk, x_vis) + else: + x_vis = blk(x_vis) + + x_vis = self.norm(x_vis) + return x_vis + + def forward(self, x, mask): + x = self.forward_features(x, mask) + x = self.head(x) + return x + + +class PretrainVisionTransformerDecoder(nn.Module): + """Vision Transformer with support for patch or hybrid CNN input stage""" + + def __init__( + self, + patch_size=16, + num_classes=768, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + init_values=None, + num_patches=196, + tubelet_size=2, + with_cp=False, + cos_attn=False, + ): + super().__init__() + self.num_classes = num_classes + assert num_classes == 3 * tubelet_size * patch_size**2 + # num_features for consistency with other models + self.num_features = self.embed_dim = embed_dim + self.patch_size = patch_size + self.with_cp = with_cp + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList( + [ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values, + cos_attn=cos_attn, + ) + for i in range(depth) + ] + ) + self.norm = norm_layer(embed_dim) + self.head = ( + nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token"} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=""): + self.num_classes = num_classes + self.head = ( + nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + def forward(self, x, return_token_num): + for blk in self.blocks: + if self.with_cp: + x = cp.checkpoint(blk, x) + else: + x = blk(x) + + if return_token_num > 0: + # only return the mask tokens predict pixels + x = self.head(self.norm(x[:, -return_token_num:])) + else: + # [B, N, 3*16^2] + x = self.head(self.norm(x)) + return x + + +class PretrainVisionTransformer(nn.Module): + """Vision Transformer with support for patch or hybrid CNN input stage""" + + def __init__( + self, + img_size=224, + patch_size=16, + encoder_in_chans=3, + encoder_num_classes=0, + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + decoder_num_classes=1536, # decoder_num_classes=768 + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=8, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + init_values=0.0, + use_learnable_pos_emb=False, + tubelet_size=2, + num_classes=0, # avoid the error from create_fn in timm + in_chans=0, # avoid the error from create_fn in timm + with_cp=False, + all_frames=16, + cos_attn=False, + ): + super().__init__() + self.encoder = PretrainVisionTransformerEncoder( + img_size=img_size, + patch_size=patch_size, + in_chans=encoder_in_chans, + num_classes=encoder_num_classes, + embed_dim=encoder_embed_dim, + depth=encoder_depth, + num_heads=encoder_num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + norm_layer=norm_layer, + init_values=init_values, + tubelet_size=tubelet_size, + use_learnable_pos_emb=use_learnable_pos_emb, + with_cp=with_cp, + all_frames=all_frames, + cos_attn=cos_attn, + ) + + self.decoder = PretrainVisionTransformerDecoder( + patch_size=patch_size, + num_patches=self.encoder.patch_embed.num_patches, + num_classes=decoder_num_classes, + embed_dim=decoder_embed_dim, + depth=decoder_depth, + num_heads=decoder_num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + norm_layer=norm_layer, + init_values=init_values, + tubelet_size=tubelet_size, + with_cp=with_cp, + cos_attn=cos_attn, + ) + + self.encoder_to_decoder = nn.Linear( + encoder_embed_dim, decoder_embed_dim, bias=False + ) + + self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) + + self.pos_embed = get_sinusoid_encoding_table( + self.encoder.patch_embed.num_patches, decoder_embed_dim + ) + + trunc_normal_(self.mask_token, std=0.02) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token", "mask_token"} + + def forward(self, x, mask, decode_mask=None): + decode_vis = mask if decode_mask is None else ~decode_mask + + x_vis = self.encoder(x, mask) # [B, N_vis, C_e] + x_vis = self.encoder_to_decoder(x_vis) # [B, N_vis, C_d] + B, N_vis, C = x_vis.shape + + # we don't unshuffle the correct visible token order, + # but shuffle the pos embedding accorddingly. + expand_pos_embed = ( + self.pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach() + ) + pos_emd_vis = expand_pos_embed[~mask].reshape(B, -1, C) + pos_emd_mask = expand_pos_embed[decode_vis].reshape(B, -1, C) + + # [B, N, C_d] + x_full = torch.cat([x_vis + pos_emd_vis, self.mask_token + pos_emd_mask], dim=1) + # NOTE: if N_mask==0, the shape of x is [B, N_mask, 3 * 16 * 16] + x = self.decoder(x_full, pos_emd_mask.shape[1]) + + return x + + +@register_model +def pretrain_videomae_small_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=384, + encoder_depth=12, + encoder_num_heads=6, + encoder_num_classes=0, + decoder_num_classes=1536, # 16 * 16 * 3 * 2 + decoder_embed_dim=192, + decoder_num_heads=3, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_videomae_base_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_num_classes=0, + decoder_num_classes=1536, # 16 * 16 * 3 * 2 + decoder_embed_dim=384, + decoder_num_heads=6, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_videomae_large_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1536, # 16 * 16 * 3 * 2 + decoder_embed_dim=512, + decoder_num_heads=8, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_videomae_huge_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1536, # 16 * 16 * 3 * 2 + decoder_embed_dim=512, + decoder_num_heads=8, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_videomae_giant_patch14_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=14, + encoder_embed_dim=1408, + encoder_depth=40, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1176, # 14 * 14 * 3 * 2, + decoder_embed_dim=512, + decoder_num_heads=8, + mlp_ratio=48 / 11, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model diff --git a/datapreprocess/run.sh b/datapreprocess/run.sh new file mode 100755 index 0000000..0423a39 --- /dev/null +++ b/datapreprocess/run.sh @@ -0,0 +1,2 @@ +python feature_extraction_videoMAEv2.py +python feature_extraction.py \ No newline at end of file diff --git a/model/lstmae/__init__.py b/model/lstmae/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/model/lstmae/calc_abnormal_thr.py b/model/lstmae/calc_abnormal_thr.py new file mode 100644 index 0000000..ae99433 --- /dev/null +++ b/model/lstmae/calc_abnormal_thr.py @@ -0,0 +1,170 @@ +import random + +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from dataset import AbnormalDataset +from lstm_ae import LSTMAutoEncoder +from sklearn.metrics import precision_recall_curve, roc_auc_score +from sklearn.preprocessing import MinMaxScaler +from torch.utils.data import DataLoader, random_split +from tqdm import tqdm + +""" +MSE loss +평균과 공분산 이용 +# 평균[0.000071] +# 중간[0.000050] +# 최소[0.000002] +# 최대[0.001509] + +loss 그대로 이용 +# 평균[0.026619] +# 중간[0.026682] +# 최소[0.022427] +# 최대[0.030678] + +MAE loss +# 평균[0.002338] +# 중간[0.001776] +# 최소[0.000088] +# 최대[0.025161] + +=> MSE 는 너무 작아서 MAE 로 실시 +""" + + +def set_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if use multi-GPU + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(seed) + random.seed(seed) + + +set_seed(666) + + +def calculate_mse(seq1, seq2): + return np.mean(np.power(seq1 - seq2, 2)) + + +def main(): + root_dir = "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/dataset/abnormal" + json_dir = "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/dataset/label" + abnormal_dataset = AbnormalDataset( + root=root_dir, + label_root=json_dir, + ) + abnormal_loader = DataLoader( + dataset=abnormal_dataset, batch_size=1, shuffle=False, num_workers=0 + ) + + sequence_length = 20 + n_features = 38 + + device = "cuda" if torch.cuda.is_available() else "cpu" + model = LSTMAutoEncoder(num_layers=2, hidden_size=50, n_features=38, device=device) + load_dict = torch.load( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/pts/LSTM_20240324_222238_best.pth", + map_location="cpu", + ) + + model.load_state_dict(load_dict["model_state_dict"]) + model.to(device) + val_criterion = nn.MSELoss(reduction="none") + + label_list = [] + mse_list = [] + + model.eval() + + with torch.no_grad(): + + for i, (data, label) in tqdm( + enumerate(abnormal_loader), total=len(abnormal_loader) + ): + scaler = MinMaxScaler() + + label = label.reshape(-1).cpu().numpy() + + if sum(label) >= 1: + label_list.append(1) + else: + label_list.append(0) + + data = data.cpu().detach().numpy() + data = data.reshape(sequence_length, n_features) + data = scaler.fit_transform(data) + scaled_data = data.reshape(1, sequence_length, n_features) + scaled_data = torch.from_numpy(scaled_data).float().to(device) + + pred = model(scaled_data) + pred = pred.cpu().detach().numpy().reshape(-1, n_features) + # pred_original = scaler.inverse_transform(pred.cpu().detach().numpy().reshape(-1, n_features)) + + mse = calculate_mse(data, pred) + mse_list.append(mse) + + precision_rt, recall_rt, threshold_rt = precision_recall_curve(label_list, mse_list) + + plt.figure(figsize=(8, 5)) + plt.plot(threshold_rt, precision_rt[1:], label="Precision") + plt.plot(threshold_rt, recall_rt[1:], label="Recall") + plt.xlabel("Threshold") + plt.ylabel("Precision/Recall") + plt.legend() + plt.savefig( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/pr_curve.png" + ) + + # best position of threshold + index_cnt = [ + cnt for cnt, (p, r) in enumerate(zip(precision_rt, recall_rt)) if p == r + ][0] + print("precision: ", precision_rt[index_cnt], ", recall: ", recall_rt[index_cnt]) + + # fixed Threshold + threshold_fixed = threshold_rt[index_cnt] + print("threshold: ", threshold_fixed) + + print("mse mean: ", np.mean(mse)) + + +if __name__ == "__main__": + + main() + +# 이상행동 20 중 5프레임 이상 +# precision: 0.591683741111245 , recall: 0.591683741111245 +# threshold: 0.02313113 + +# 이상행동 20 중 1프레임 이상 +# precision: 0.6168715461824743 , recall: 0.6168715461824743 +# threshold: 0.02235273 + +# 이상행동 20 중 20프레임 모두 +# precision: 0.4549022511848341 , recall: 0.4549022511848341 +# threshold: 0.02781844 + +# 이상행동 20 중 3프레임 이상 +# precision: 0.6046858260748056 , recall: 0.6046858260748056 +# threshold: 0.022715755 + +# min_max_scaler inverse 후 + +# 이상행동 20 중 1프레임 이상 +# precision: 0.46159918800045113 , recall: 0.46159918800045113 +# threshold: 0.0026742023210807914 + +# 이상행동 20 중 10프레임 이상 +# precision: 0.37000934704232874 , recall: 0.37000934704232874 +# threshold: 0.003047690912489323 + +# 이상행동 20 중 3프레임 이상 +# precision: 0.44426848013414694 , recall: 0.44426848013414694 +# threshold: 0.002750229995391417 diff --git a/model/lstmae/calc_metric.py b/model/lstmae/calc_metric.py new file mode 100644 index 0000000..05036df --- /dev/null +++ b/model/lstmae/calc_metric.py @@ -0,0 +1,268 @@ +import os +import os.path as osp +import random +from argparse import ArgumentParser +from datetime import datetime + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sns +import torch +import torch.nn as nn +import torch.nn.functional as F +import wandb +from dataset import AbnormalDataset, NormalDataset +from lstm_ae import LSTMAutoEncoder +from PIL import Image +from sklearn.metrics import auc, confusion_matrix, roc_curve +from sklearn.preprocessing import MinMaxScaler +from torch.utils.data import DataLoader, Dataset, random_split +from tqdm import tqdm + + +def parse_args(): + parser = ArgumentParser() + + parser.add_argument( + "--abnormal_root_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_ABNORMAL_CSV", + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/dataset/abnormal", + ), + ) + parser.add_argument( + "--json_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_ABNORMAL_VAL_JSON", + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/dataset/label", + ), + ) + + parser.add_argument( + "--model_dir", + type=str, + default=os.environ.get( + "SM_MODEL_DIR", + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/pts", + ), + ) + + parser.add_argument("--model_name", type=str, default="LSTM") + parser.add_argument("--pth_name", type=str, default="LSTM_20240324_222238_best") + parser.add_argument("--seed", type=int, default=666) + + parser.add_argument( + "--device", default="cuda" if torch.cuda.is_available() else "cpu" + ) + parser.add_argument("--num_workers", type=int, default=8) + + parser.add_argument("--batch_size", type=int, default=64) + parser.add_argument("--thr", type=float, default=0.02) + + parser.add_argument("--wandb_mode", type=str, default="online") + parser.add_argument("--wandb_run_name", type=str, default="LSTM_auc") + + args = parser.parse_args() + + return args + + +def set_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if use multi-GPU + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(seed) + random.seed(seed) + + +def save_image_to_wandb(image_path): + with open(image_path, "rb") as file: + img = Image.open(file) + wandb.log( + { + image_path.split("/")[-1].split(".")[0]: [ + wandb.Image(img, caption=f"{image_path.split('/')[-1]}") + ] + } + ) + + +def calculate_mse(seq1, seq2): + return np.mean(np.power(seq1 - seq2, 2)) + + +def train( + abnormal_root_dir, + json_dir, + model_dir, + model_name, + pth_name, + device, + num_workers, + batch_size, + thr, + seed, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + # Define parameters + n_features = 38 # Number of features to predict + sequence_length = 20 + + batch_size = batch_size + + abnormal_dataset = AbnormalDataset( + root=abnormal_root_dir, + label_root=json_dir, + ) + + abnormal_loader = DataLoader( + dataset=abnormal_dataset, batch_size=1, shuffle=False, num_workers=0 + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> data_load_time: {data_load_time}") + + # Initialize the LSTM autoencoder model + model = LSTMAutoEncoder( + num_layers=2, hidden_size=50, n_features=n_features, device=device + ) + + load_dict = torch.load(osp.join(model_dir, f"{pth_name}.pth"), map_location="cpu") + + model.load_state_dict(load_dict["model_state_dict"]) + model.to(device) + + val_criterion = nn.MSELoss(reduction="none") + + print(f"Start calculation auc..") + + wandb.init( + project="VAD", + config={ + "dataset": "무인매장", + "loss": "MSE", + "notes": "LSTM auc 구하기", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + + wandb.watch((model,)) + model.eval() + + label_list = [] + mse_list = [] + pred_list = [] + + with torch.no_grad(): + for step, (data, label) in tqdm( + enumerate(abnormal_loader), total=len(abnormal_loader) + ): + scaler = MinMaxScaler() + + label = label.reshape(-1).cpu().numpy() + if sum(label) >= 1: + label_list.append(1) + else: + label_list.append(0) + + data = data.cpu().detach().numpy() + data = data.reshape(sequence_length, n_features) + data = scaler.fit_transform(data) + scaled_data = data.reshape(1, sequence_length, n_features) + scaled_data = torch.from_numpy(scaled_data).float().to(device) + + pred = model(scaled_data) + pred = pred.cpu().detach().numpy().reshape(-1, n_features) + + # pred_original = scaler.inverse_transform(pred.cpu().detach().numpy().reshape(-1, n_features)) + + mse = calculate_mse(data, pred) + mse_list.append(mse) + + pred_list.append(1 if mse > thr else 0) + + conf_matrix_path = "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/confusion_matrix.png" + roc_curve_path = "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/roc_curve.png" + pr_curve_path = "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/pr_curve.png" + + conf_matrix = confusion_matrix(label_list, pred_list) + plt.figure(figsize=(7, 7)) + sns.heatmap( + conf_matrix, + xticklabels=["Normal", "Abnormal"], + yticklabels=["Normal", "Abnormal"], + annot=True, + fmt="d", + ) + plt.title("Confusion Matrix") + plt.xlabel("Predicted Class") + plt.ylabel("True Class") + plt.savefig(conf_matrix_path) + + false_pos_rate, true_pos_rate, thresholds = roc_curve(label_list, mse_list) + roc_auc = auc( + false_pos_rate, + true_pos_rate, + ) + + plt.plot(false_pos_rate, true_pos_rate, linewidth=5, label="AUC = %0.3f" % roc_auc) + plt.plot([0, 1], [0, 1], linewidth=5) + + plt.xlim([-0.01, 1]) + plt.ylim([0, 1.01]) + plt.legend(loc="lower right") + plt.title("ROC curve") + plt.ylabel("True Positive Rate") + plt.xlabel("False Positive Rate") + plt.savefig(roc_curve_path) + + pred_list = np.array(pred_list) + label_list = np.array(label_list) + acc = sum((pred_list == label_list)) / len(pred_list) + print("accuracy 점수: {}".format(acc)) + print("roc_auc 점수: {}".format(roc_auc)) + + new_wandb_metric_dict = { + "thr": thr, + "auc": roc_auc, + "accuracy": acc, + } + + wandb.log(new_wandb_metric_dict) + + save_image_to_wandb(conf_matrix_path) + save_image_to_wandb(roc_curve_path) + save_image_to_wandb(pr_curve_path) + + os.remove(conf_matrix_path) + os.remove(roc_curve_path) + os.remove(pr_curve_path) + + +def main(args): + train(**args.__dict__) + + +if __name__ == "__main__": + args = parse_args() + print(args) + main(args) diff --git a/model/lstmae/calc_normal_thr.py b/model/lstmae/calc_normal_thr.py new file mode 100644 index 0000000..2d88934 --- /dev/null +++ b/model/lstmae/calc_normal_thr.py @@ -0,0 +1,128 @@ +import random + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from dataset import NormalDataset +from lstm_ae import LSTMAutoEncoder +from torch.utils.data import DataLoader, random_split +from tqdm import tqdm + +""" +MSE loss +평균과 공분산 이용 +# 평균[0.000071] +# 중간[0.000050] +# 최소[0.000002] +# 최대[0.001509] + +loss 그대로 이용 +# 평균[0.026619] +# 중간[0.026682] +# 최소[0.022427] +# 최대[0.030678] + +MAE loss +# 평균[0.002338] +# 중간[0.001776] +# 최소[0.000088] +# 최대[0.025161] + +=> MSE 는 너무 작아서 MAE 로 실시 +""" + + +def set_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if use multi-GPU + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(seed) + random.seed(seed) + + +set_seed(666) + + +def main(): + root_dir = "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/dataset/normal" + dataset = NormalDataset( + root=root_dir, + ) + valid_data_size = len(dataset) // 10 + train_data_size = len(dataset) - valid_data_size + + train_dataset, valid_dataset = random_split( + dataset, lengths=[train_data_size, valid_data_size] + ) + + valid_loader = DataLoader( + dataset=valid_dataset, batch_size=64, shuffle=False, num_workers=8 + ) + + device = "cuda" if torch.cuda.is_available() else "cpu" + model = LSTMAutoEncoder(num_layers=2, hidden_size=50, n_features=38, device=device) + load_dict = torch.load( + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/pts/LSTM_20240324_222238_best.pth", + map_location="cpu", + ) + + model.load_state_dict(load_dict["model_state_dict"]) + model.to(device) + val_criterion = nn.MSELoss(reduction="none") + + loss_list = [] + loss_m_list = [] + model.eval() + + with torch.no_grad(): + total_loss = 0 + for i, batch_data in tqdm(enumerate(valid_loader), total=len(valid_loader)): + + batch_data = batch_data.to(device) + predict_values = model(batch_data) + + loss = val_criterion(predict_values, batch_data) + loss_m = torch.mean(loss) + loss_m_list.append(loss_m.cpu().numpy()) + + loss_mae = F.l1_loss(predict_values, batch_data, reduction="none") + loss_mae = loss_mae.mean() + loss_list.append(loss_mae.cpu().numpy()) + + total_loss += loss_m + + loss_list = np.array(loss_list) + loss_m_list = np.array(loss_m_list) + + val_abnormal_mean_loss = (total_loss / len(valid_loader)).item() + + ## 정상구간에서 mse 점수 분포 + print("mae") + print( + "평균[{:.6f}]\n중간[{:.6f}]\n최소[{:.6f}]\n최대[{:.6f}]".format( + np.mean(loss_list), + np.median(loss_list), + np.min(loss_list), + np.max(loss_list), + ) + ) + print("=" * 40) + print("mse") + print( + "평균[{:.6f}]\n중간[{:.6f}]\n최소[{:.6f}]\n최대[{:.6f}]".format( + np.mean(loss_m_list), + np.median(loss_m_list), + np.min(loss_m_list), + np.max(loss_m_list), + ) + ) + print("=" * 40) + print("total_loss: {}".format(val_abnormal_mean_loss)) + + +if __name__ == "__main__": + + main() diff --git a/model/lstmae/dataset.py b/model/lstmae/dataset.py new file mode 100644 index 0000000..b3269a2 --- /dev/null +++ b/model/lstmae/dataset.py @@ -0,0 +1,220 @@ +import json +import os +import os.path as osp +from collections import defaultdict as dd + +import numpy as np +import pandas as pd +import torch +from sklearn.preprocessing import MinMaxScaler +from torch.utils.data import Dataset + + +class NormalDataset(Dataset): + + def __init__( + self, + sequence_length=20, + root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/dataset", + ): + super().__init__() + self.sequence_length = sequence_length + + self.scaler = MinMaxScaler() + + # Load the dataset + file_list = os.listdir(root) + + df_list = [] + + self.length = 0 + self.range_table = [] + + self.real_length = 0 + self.real_idx_table = [] + + for i, file_name in enumerate(file_list): + dat = pd.read_csv(root + "/" + file_name) + dat.drop(columns=["Frame"], inplace=True) + + print(f"==>>{i}번째 dat.shape: {dat.shape}") + + id_counter = pd.Series(dat["ID"]).value_counts(sort=False) + + for id_to_del in id_counter[id_counter < sequence_length].index: + dat.drop(dat[dat["ID"] == id_to_del].index, inplace=True) + + id_counter = pd.Series(dat["ID"]).value_counts(sort=False) + + print(f"==>>{i}번째 처리 후 dat.shape: {dat.shape}") + assert len(id_counter[id_counter < sequence_length].index) == 0 + + for count in id_counter: + cur_id_length = count - sequence_length + 1 + self.range_table.append(self.length + cur_id_length) + self.real_idx_table.append(self.real_length + count) + self.length += cur_id_length + self.real_length += count + + dat["ID"] = dat["ID"].astype("str") + f"_{i}" + df_list.append(dat.copy()) + + self.dat = pd.concat(df_list, ignore_index=True) + + def __len__(self): + return self.length + + def __getitem__(self, idx): + real_idx = self.find_real_idx(idx) + + sequence = self.dat[real_idx : real_idx + self.sequence_length].copy() + sequence.drop(columns=["ID"], inplace=True) + sequence = self.scaler.fit_transform(sequence.values) + sequence = np.array(sequence) + + return torch.from_numpy(sequence).float() + + def find_real_idx(self, idx): + + start = 0 + end = len(self.range_table) - 1 + while start <= end: + mid = (start + end) // 2 + if self.range_table[mid] == idx: + real_idx = idx + ((mid + 1) * (self.sequence_length - 1)) + return real_idx + + if self.range_table[mid] > idx: + end = mid - 1 + else: + start = mid + 1 + + real_idx = idx + (start * (self.sequence_length - 1)) + + return real_idx + + +class AbnormalDataset(Dataset): + + def __init__( + self, + sequence_length=20, + root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/dataset/abnormal", + label_root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/dataset/label", + ): + super().__init__() + self.sequence_length = sequence_length + + self.scaler = MinMaxScaler() + # 데이터 값 [0,1] 범위로 scaling할때 사용 + + # Load the dataset + file_list = os.listdir(root) + + df_list = [] + + self.length = 0 + self.range_table = [] + + self.real_length = 0 + self.real_idx_table = [] + + for i, file_name in enumerate(file_list): + dat = pd.read_csv(root + "/" + file_name) + # dat.drop(columns=["Frame"], inplace=True) # Remove the 'Frame' column + + print(f"==>>{i}번째 dat.shape: {dat.shape}") + + id_counter = pd.Series(dat["ID"]).value_counts(sort=False) + + for id_to_del in id_counter[id_counter < sequence_length].index: + dat.drop(dat[dat["ID"] == id_to_del].index, inplace=True) + + id_counter = pd.Series(dat["ID"]).value_counts(sort=False) + + print(f"==>>{i}번째 처리 후 dat.shape: {dat.shape}") + assert len(id_counter[id_counter < sequence_length].index) == 0 + + for count in id_counter: + cur_id_length = count - sequence_length + 1 + self.range_table.append(self.length + cur_id_length) + self.real_idx_table.append(self.real_length + count) + self.length += cur_id_length + self.real_length += count + + dat["ID"] = dat["ID"].astype("str") + f"_{i}" + df_list.append(dat.copy()) + + self.dat = pd.concat(df_list, ignore_index=True) + + # 정답 frame 담은 dict 만들기 + self.frame_label = dd(lambda: dd(lambda: [-1, -1])) + + folder_list = os.listdir(label_root) + + for folder in folder_list: + json_list = os.listdir(label_root + "/" + folder) + + for js in json_list: + with open(label_root + "/" + folder + "/" + js, "r") as j: + json_dict = json.load(j) + + for dict in json_dict["annotations"]["track"]: + if dict["@label"].endswith("_start"): + cur_id = dict["@id"] + self.frame_label[js[:-5]][cur_id][0] = dict["box"][0]["@frame"] + elif dict["@label"].endswith("_end"): + cur_id = dict["@id"] + self.frame_label[js[:-5]][cur_id][1] = dict["box"][0]["@frame"] + + def __len__(self): + return self.length + + def __getitem__(self, idx): + real_idx = self.find_real_idx(idx) + + sequence = self.dat[real_idx : real_idx + self.sequence_length].copy() + target_frames = sequence["Frame"].values + target_filename = sequence["Filename"].unique()[0].split(".")[0] + sequence.drop(columns=["ID"], inplace=True) + sequence.drop(columns=["Frame"], inplace=True) + sequence.drop(columns=["Filename"], inplace=True) + # sequence = self.scaler.fit_transform(sequence.values) + sequence = np.array(sequence) + + target_labels = [] + + for target_frame in target_frames: + temp = 0 + for cur_id in range(0, len(self.frame_label[target_filename].keys()), 2): + if int(target_frame) >= int( + self.frame_label[target_filename][str(int(cur_id))][0] + ) and int(target_frame) <= int( + self.frame_label[target_filename][str(int(cur_id) + 1)][1] + ): + temp = 1 + + target_labels.append(temp) + + target_labels = torch.LongTensor(target_labels) + + return (sequence, target_labels) + + def find_real_idx(self, idx): + + start = 0 + end = len(self.range_table) - 1 + while start <= end: + mid = (start + end) // 2 + if self.range_table[mid] == idx: + real_idx = idx + ((mid + 1) * (self.sequence_length - 1)) + return real_idx + + if self.range_table[mid] > idx: + end = mid - 1 + else: + start = mid + 1 + + real_idx = idx + (start * (self.sequence_length - 1)) + + return real_idx diff --git a/model/lstmae/lstm_ae.py b/model/lstmae/lstm_ae.py new file mode 100644 index 0000000..cd47351 --- /dev/null +++ b/model/lstmae/lstm_ae.py @@ -0,0 +1,117 @@ +import torch +import torch.nn as nn + +""" +LSTM output +- N : number of batches +- L : sequence lengh +- Q : input dim +- K : number of layers +- D : LSTM feature dimension + +Y,(hn,cn) = LSTM(X) + +- X : [N x L x Q] - `N` input sequnce of length `L` with `Q` dim. +- Y : [N x L x D] - `N` output sequnce of length `L` with `D` feature dim. +- hn : [K x N x D] - `K` (per each layer) of `N` final hidden state with `D` feature dim. +- cn : [K x N x D] - `K` (per each layer) of `N` final hidden state with `D` cell dim. +""" + + +class Encoder(nn.Module): + """ + input: input_seq: (batch_size, seq_len, n_features) -> (1, 20, 38) + output: hidden_cell -> (hn, cn) + -> ((num_layers, batch_size, hidden_size), (num_layers, batch_size, hidden_size)) + """ + + def __init__(self, num_layers, hidden_size, n_features, device): + super(Encoder, self).__init__() + + self.input_size = n_features + self.hidden_size = hidden_size + self.num_layers = num_layers + self.device = device + + self.lstm = nn.LSTM( + input_size=n_features, + hidden_size=hidden_size, + num_layers=num_layers, + batch_first=True, + ) + + def initHidden(self, batch_size): + """ + intialize hn, cn + """ + self.hidden_cell = ( + torch.randn( + (self.num_layers, batch_size, self.hidden_size), dtype=torch.float + ).to(self.device), + torch.randn( + (self.num_layers, batch_size, self.hidden_size), dtype=torch.float + ).to(self.device), + ) + + def forward(self, input_seq): + self.initHidden(input_seq.shape[0]) + _, self.hidden_cell = self.lstm(input_seq, self.hidden_cell) + return self.hidden_cell + + +class Decoder(nn.Module): + """ + input: (input_seq, hidden_cell) + input_seq: + hidden_cell: encoder 에서 넘어온 hidden_cell (hn, cn) + output: + decoder output: (batch_size, seq_len, n_features) -> (1, 1, 38) + linear output: (batch_size, n_features) -> (1, 38) + """ + + def __init__(self, num_layers, hidden_size, n_features, device): + super(Decoder, self).__init__() + + self.input_size = n_features + self.hidden_size = hidden_size + self.num_layers = num_layers + self.device = device + + self.lstm = nn.LSTM( + input_size=n_features, + hidden_size=hidden_size, + num_layers=num_layers, + batch_first=True, + ) + self.linear = nn.Linear(in_features=hidden_size, out_features=n_features) + + def forward(self, input_seq, hidden_cell): + output, hidden_cell = self.lstm(input_seq, hidden_cell) + output = self.linear(output) + return output, hidden_cell + + +class LSTMAutoEncoder(nn.Module): + """ + output: input seq_len(20) 모두 복원 + reconstruction 순서는 입력의 반대. + """ + + def __init__(self, num_layers, hidden_size, n_features, device): + super(LSTMAutoEncoder, self).__init__() + self.device = device + self.encoder = Encoder(num_layers, hidden_size, n_features, device) + self.decoder = Decoder(num_layers, hidden_size, n_features, device) + + def forward(self, input_seq): + output = torch.zeros(size=input_seq.shape, dtype=torch.float) + hidden_cell = self.encoder(input_seq) + input_decoder = torch.zeros( + (input_seq.shape[0], 1, input_seq.shape[2]), dtype=torch.float + ).to(self.device) + for i in range(input_seq.shape[1] - 1, -1, -1): + output_decoder, hidden_cell = self.decoder(input_decoder, hidden_cell) + input_decoder = output_decoder + output[:, i, :] = output_decoder[:, 0, :] + + return output.to(self.device) diff --git a/model/lstmae/lstm_ae_old.py b/model/lstmae/lstm_ae_old.py new file mode 100644 index 0000000..87c2ba2 --- /dev/null +++ b/model/lstmae/lstm_ae_old.py @@ -0,0 +1,129 @@ +import torch +import torch.nn as nn + +""" +LSTM output +- N : number of batches +- L : sequence lengh +- Q : input dim +- K : number of layers +- D : LSTM feature dimension + +Y,(hn,cn) = LSTM(X) + +- X : [N x L x Q] - `N` input sequnce of length `L` with `Q` dim. +- Y : [N x L x D] - `N` output sequnce of length `L` with `D` feature dim. +- hn : [K x N x D] - `K` (per each layer) of `N` final hidden state with `D` feature dim. +- cn : [K x N x D] - `K` (per each layer) of `N` final hidden state with `D` cell dim. +""" + + +class Encoder(nn.Module): + def __init__(self, seq_len, n_features, embedding_dim=64): + super(Encoder, self).__init__() + self.seq_len, self.n_features = seq_len, n_features + self.embedding_dim, self.hidden_dim = (embedding_dim, 2 * embedding_dim) + + self.rnn1 = nn.LSTM( + input_size=n_features, + hidden_size=self.hidden_dim, + num_layers=1, + batch_first=True, + ) + + self.rnn2 = nn.LSTM( + input_size=self.hidden_dim, + hidden_size=embedding_dim, + num_layers=1, + batch_first=True, + ) + + def forward(self, x): + """ + input(x): [batch_size, seq_len, n_features] - [1,20,38] + rnn1 output(x1): [batch_size, seq_len, hidden_dim] - [1,20,128] + rnn2 output(x2): [batch_size, seq_len, embedding_dim] - [1,20,64] + decoder input: 마지막 sequence hidden state + """ + x1, (hidden_n, cell_n) = self.rnn1(x) + x2, (hidden_n, cell_n) = self.rnn2(x1) + return x2[:, -1, :] + # return (hidden_n, cell_n) + + +class TimeDistributed(nn.Module): + def __init__(self, module, batch_first=True): + super(TimeDistributed, self).__init__() + self.module = module + self.batch_first = batch_first + + def forward(self, x): + if len(x.size()) <= 2: + return self.module(x) + # Squash batch_size and seq_len into a single axis + x_reshape = x.contiguous().view( + -1, x.size(-1) + ) # (batch_size * seq_len, input_size) => (1, 128) + y = self.module(x_reshape) # linear layer, output: batch_siz, n_features(38) + # We have to reshape Y + if self.batch_first: + y = y.contiguous().view( + x.size(0), -1, y.size(-1) + ) # (batch_size, seq_len, n_features) + else: + y = y.view(-1, x.size(1), y.size(-1)) # (seq_len, batch_size, n_features) + return y + + +class Decoder(nn.Module): + def __init__(self, prediction_time=1, input_dim=64, n_features=38): + super(Decoder, self).__init__() + # input 은 encoder 에서 나온 embedding_dim + self.prediction_time, self.input_dim = prediction_time, input_dim + self.hidden_dim, self.n_features = 2 * input_dim, n_features + + self.rnn1 = nn.LSTM( + input_size=input_dim, hidden_size=input_dim, num_layers=1, batch_first=True + ) + + self.rnn2 = nn.LSTM( + input_size=input_dim, + hidden_size=self.hidden_dim, + num_layers=1, + batch_first=True, + ) + + # time_distributed + # linear 로 1차원으로 복원 후 각 time 에서 출력된 아웃풋을 linear layer 와 연결 + self.output_layer = torch.nn.Linear(self.hidden_dim, n_features) + self.timedist = TimeDistributed(self.output_layer) + + def forward(self, x): + """ + repeat_x: prediction_time 만큼 encoder에서 온 input(hidden_n) 을 늘림. prediction_time이 1일 때 (1,1,64) -> (1,1,64). 입력 그대로 복원할 때는 seq_len. 그러면 (1,20,64). + 즉, + rnn1 output(x1): [batch_size, prediction_time, input_dim] - [1,1,64] + rnn2 output(x2): [batch_size, prediction_time, hidden_dim] - [1,1,128] + timedist output(return output): [batch_size, prediction_time, n_features] - [1,1,38] + """ + repeat_x = x.reshape(-1, 1, self.input_dim).repeat(1, self.prediction_time, 1) + x1, (hidden_n, cell_n) = self.rnn1(repeat_x) + x2, (hidden_n, cell_n) = self.rnn2(x1) + return self.timedist(x2) + + +class LSTMAutoencoder(nn.Module): + def __init__(self, seq_len, prediction_time, n_features, embedding_dim=64): + super(LSTMAutoencoder, self).__init__() + self.encoder = Encoder(seq_len, n_features, embedding_dim) + self.decoder = Decoder(prediction_time, embedding_dim, n_features) + + def forward(self, x): + """ + input(x): [1, seq_len, n_features] + encoder output(x_e): [batch_size, last seq_len, embedding_dim] + final output(x_d): [1, predict_time, n_features] + """ + x_e = self.encoder(x) + x_d = self.decoder(x_e) + return x_d diff --git a/model/lstmae/train.py b/model/lstmae/train.py new file mode 100644 index 0000000..34c5255 --- /dev/null +++ b/model/lstmae/train.py @@ -0,0 +1,330 @@ +import os +import os.path as osp +import random +from argparse import ArgumentParser +from datetime import datetime + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import wandb +from dataset import NormalDataset +from lstm_ae import LSTMAutoEncoder +from lstm_ae_old import LSTMAutoencoder +from sklearn.preprocessing import MinMaxScaler +from torch.utils.data import DataLoader, random_split +from tqdm import tqdm + + +def parse_args(): + parser = ArgumentParser() + + # 학습 데이터 경로 + parser.add_argument( + "--root_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_TRAIN_CSV", + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/lstmae/dataset/normal", + ), + ) + + # pth 파일 저장 경로 + parser.add_argument( + "--model_dir", + type=str, + default=os.environ.get( + "SM_MODEL_DIR", + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/pts", + ), + ) + + # import_module로 불러올 model name + parser.add_argument("--model_name", type=str, default="LSTM") + # resume 파일 이름 + parser.add_argument("--resume_name", type=str, default="") + # random seed + parser.add_argument("--seed", type=int, default=666) + + parser.add_argument( + "--device", default="cuda" if torch.cuda.is_available() else "cpu" + ) + parser.add_argument("--num_workers", type=int, default=8) + parser.add_argument("--batch_size", type=int, default=64) + parser.add_argument("--val_batch_size", type=int, default=64) + parser.add_argument("--val_num_workers", type=int, default=8) + parser.add_argument("--learning_rate", type=float, default=0.001) + parser.add_argument("--max_epoch", type=int, default=50) + + parser.add_argument("--save_interval", type=int, default=1) + parser.add_argument("--val_interval", type=int, default=1) + parser.add_argument("--thr", type=float, default=0.02) + + parser.add_argument("--patience", type=int, default=10) + + parser.add_argument("--wandb_mode", type=str, default="online") + parser.add_argument("--wandb_run_name", type=str, default="LSTM-AE") + + args = parser.parse_args() + + return args + + +def set_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if use multi-GPU + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(seed) + random.seed(seed) + + +def train( + root_dir, + model_dir, + model_name, + device, + num_workers, + batch_size, + val_num_workers, + val_batch_size, + learning_rate, + max_epoch, + val_interval, + save_interval, + thr, + patience, + resume_name, + seed, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + # Define parameters + sequence_length = 20 + prediction_time = 1 + n_features = 38 + + batch_size = batch_size + val_batch_size = val_batch_size + + # -- early stopping flag + patience = patience + counter = 0 + + # 데이터셋 + dataset = NormalDataset( + root=root_dir, + ) + + valid_data_size = len(dataset) // 10 + + train_data_size = len(dataset) - valid_data_size + + train_dataset, valid_dataset = random_split( + dataset, lengths=[train_data_size, valid_data_size] + ) + + train_loader = DataLoader( + dataset=train_dataset, + batch_size=batch_size, + shuffle=True, + num_workers=num_workers, + ) + + valid_loader = DataLoader( + dataset=valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> data_load_time: {data_load_time}") + + # Initialize the LSTM autoencoder model + # model = LSTMAutoencoder(sequence_length, prediction_time, n_features, 50) + # model.to(device) + + model = LSTMAutoEncoder( + num_layers=2, hidden_size=50, n_features=n_features, device=device + ) + model.to(device) + + optimizer = torch.optim.Adam( + model.parameters(), lr=learning_rate, weight_decay=1e-6 + ) + + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=[15, 40], gamma=0.1 + ) + + # if resume_name: + # optimizer.load_state_dict(load_dict["optimizer_state_dict"]) + # scheduler.load_state_dict(load_dict["scheduler_state_dict"]) + # scaler.load_state_dict(load_dict["scaler_state_dict"]) + + criterion = nn.MSELoss() + val_criterion = nn.MSELoss(reduction="none") + + print(f"Start training..") + + wandb.init( + project="VAD", + config={ + "lr": learning_rate, + "dataset": "무인매장", + "n_epochs": max_epoch, + "loss": "MSE", + "notes": "VAD 실험", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + + wandb.watch((model,)) + + best_loss = np.inf + total_batches = len(train_loader) + + for epoch in range(max_epoch): + model.train() + + epoch_start = datetime.now() + + epoch_loss = 0 + + for step, data in tqdm(enumerate(train_loader), total=total_batches): + + data = data.to(device) + optimizer.zero_grad() + + pred = model(data) + + loss = criterion(pred, data) + + loss.backward() + optimizer.step() + + epoch_loss += loss + + epoch_mean_loss = (epoch_loss / total_batches).item() + + train_end = datetime.now() + train_time = train_end - epoch_start + train_time = str(train_time).split(".")[0] + print( + f"==>> epoch {epoch+1} train_time: {train_time}\nloss: {round(epoch_mean_loss,4)}" + ) + + if (epoch + 1) % save_interval == 0: + + ckpt_fpath = osp.join(model_dir, f"{model_name}_{train_start}_latest.pth") + + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + "optimizer_state_dict": optimizer.state_dict(), + "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + } + + torch.save(states, ckpt_fpath) + + # validation 주기에 따라 loss를 출력하고 best model을 저장합니다. + if (epoch + 1) % val_interval == 0: + + print(f"Start validation #{epoch+1:2d}") + model.eval() + + with torch.no_grad(): + total_loss = 0 + + for step, data in tqdm( + enumerate(valid_loader), total=len(valid_loader) + ): + + data = data.to(device) + + pred = model(data) + + val_loss = val_criterion(pred, data) + val_loss = torch.mean(val_loss) + + total_loss += val_loss + + val_mean_loss = (total_loss / len(valid_loader)).item() + + if best_loss > val_mean_loss: + print( + f"Best performance at epoch: {epoch + 1}, {best_loss:.4f} -> {val_mean_loss:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best.pth" + ) + torch.save(states, best_ckpt_fpath) + best_loss = val_mean_loss + counter = 0 + else: + counter += 1 + + new_wandb_metric_dict = { + "train_loss": epoch_mean_loss, + "valid_loss": val_mean_loss, + "learning_rate": scheduler.get_lr()[0], + } + + wandb.log(new_wandb_metric_dict) + + scheduler.step() + + epoch_end = datetime.now() + epoch_time = epoch_end - epoch_start + epoch_time = str(epoch_time).split(".")[0] + print( + f"==>> epoch {epoch+1} time: {epoch_time}\nvalid_loss: {round(val_mean_loss,4)}" + ) + + if counter > patience: + print("Early Stopping...") + break + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + print(f"==>> total time: {total_time}") + + +def main(args): + train(**args.__dict__) + + +if __name__ == "__main__": + args = parse_args() + + main(args) diff --git a/model/train/anomaly_detector_LSTM.py b/model/train/anomaly_detector_LSTM.py new file mode 100644 index 0000000..b7edf0b --- /dev/null +++ b/model/train/anomaly_detector_LSTM.py @@ -0,0 +1,202 @@ +import argparse +from collections import defaultdict + +import cv2 +import numpy as np +import pandas as pd +import torch +from sklearn.preprocessing import MinMaxScaler +from ultralytics import YOLO + +# Create an argument parser +parser = argparse.ArgumentParser() + +# Define command-line arguments +parser.add_argument( + "--video_path", + type=str, + default="../videos/anomaly_1.mp4", + help="Path to the video file", +) +parser.add_argument( + "--threshold", type=float, default=0.02, help="Anomaly detection threshold" +) + +# Parse the command-line arguments +args = parser.parse_args() + + +def display_text(frame, text, position): + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = 1 + font_color = (0, 255, 0) # Green color + font_thickness = 2 + cv2.putText( + frame, text, position, font, font_scale, font_color, font_thickness, cv2.LINE_AA + ) + + +# Load the YOLOv8 model +model = YOLO("yolov8n-pose.pt") + +# Load the LSTM autoencoder model +autoencoder_model = torch.load("model.h5") # Replace 'model.h5' with your model's path + +# Define the standard frame size (change these values as needed) +standard_width = 640 +standard_height = 480 + +# Open the video file +if args.video_path == "0": + cap = cv2.VideoCapture(0) +else: + cap = cv2.VideoCapture(args.video_path) + +# Store the track history +track_history = defaultdict(lambda: []) + +# Define sequence_length, prediction_time, and n_features based on your model's configuration +sequence_length = 20 +prediction_time = 1 +n_features = 38 + +# Initialize a dictionary to store separate buffers for each ID +id_buffers = defaultdict(lambda: []) + + +# Define a function to calculate MSE between two sequences +def calculate_mse(seq1, seq2): + return np.mean(np.power(seq1 - seq2, 2)) + + +# Define the anomaly threshold +threshold = args.threshold # Adjust as needed + +# Loop through the video frames +frame_count = 0 +net_mse = 0 +avg_mse = 0 +while cap.isOpened(): + # Read a frame from the video + success, frame = cap.read() + frame_count += 1 # Increment frame count + + if success: + frame = cv2.resize(frame, (standard_width, standard_height)) + + # Run YOLOv8 tracking on the frame, persisting tracks between frames + results = model.track(frame, persist=True) + + if results[0].boxes is not None: # Check if there are results and boxes + # Get the boxes + boxes = results[0].boxes.xywh.cpu() + + if results[0].boxes.id is not None: + # If 'int' attribute exists (there are detections), get the track IDs + track_ids = results[0].boxes.id.int().cpu().tolist() + + # Loop through the detections and add data to the DataFrame + anomaly_text = "" # Initialize the anomaly text + for i, box in zip( + range(0, len(track_ids)), results[0].boxes.xywhn.cpu() + ): + x, y, w, h = box + keypoints = ( + results[0].keypoints.xyn[i].cpu().numpy().flatten().tolist() + ) + + # Create a dictionary with named columns for keypoints + keypoints_dict = { + f"Keypoint_{j}": float(val) + for j, val in enumerate(keypoints, 0) + } + + # Append the keypoints to the corresponding ID's buffer + id_buffers[track_ids[i]].append( + [float(x), float(y), float(w), float(h)] + keypoints + ) + + # If the buffer size reaches the threshold (e.g., 20 data points), perform anomaly detection + if len(id_buffers[track_ids[i]]) >= 20: + # Convert the buffer to a NumPy array + buffer_array = np.array(id_buffers[track_ids[i]]) + + # Scale the data (you can use the same scaler you used during training) + scaler = MinMaxScaler() + buffer_scaled = scaler.fit_transform(buffer_array) + + # Create sequences for prediction + x_pred = buffer_scaled[-sequence_length:].reshape( + 1, sequence_length, n_features + ) + + # Predict the next values using the autoencoder model + x_pred = autoencoder_model.predict(x_pred) + + # Inverse transform the predicted data to the original scale + x_pred_original = scaler.inverse_transform( + x_pred.reshape(-1, n_features) + ) + + # Calculate the MSE between the predicted and actual values + mse = calculate_mse( + buffer_array[-prediction_time:], x_pred_original + ) + print(mse) + net_mse = mse + net_mse + avg_mse = net_mse / frame_count + # Check if the MSE exceeds the threshold to detect an anomaly + if mse > 1.5 * avg_mse * 0.25 + 0.75 * threshold: + if anomaly_text == "": + anomaly_text = f"Anomaly detected for ID {track_ids[i]}" + else: + anomaly_text = f"{anomaly_text}, {track_ids[i]}" + print(anomaly_text) + + # Remove the oldest data point from the buffer to maintain its size + id_buffers[track_ids[i]].pop(0) + else: + anomaly_text = "" + # If 'int' attribute doesn't exist (no detections), set track_ids to an empty list + track_ids = [] + + # Visualize the results on the frame + annotated_frame = results[0].plot() + display_text( + annotated_frame, anomaly_text, (10, 30) + ) # Display the anomaly text + # Plot the tracks + for box, track_id in zip(boxes, track_ids): + x, y, w, h = box + track = track_history[track_id] + track.append((float(x), float(y))) # x, y center point + if len(track) > 30: # retain 90 tracks for 90 frames + track.pop(0) + + # Draw the tracking lines + points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2)) + cv2.polylines( + annotated_frame, + [points], + isClosed=False, + color=(230, 230, 230), + thickness=10, + ) + + # Display the annotated frame + cv2.imshow("YOLOv8 Tracking", annotated_frame) + + else: + # If no detections, display the original frame without annotations + cv2.imshow("YOLOv8 Tracking", frame) + + # Break the loop if 'q' is pressed + if cv2.waitKey(1) & 0xFF == ord("q"): + break + else: + # Break the loop if the end of the video is reached + break + +# Release the video capture object and close the display window +cap.release() +cv2.destroyAllWindows() diff --git a/model/train/classifier.py b/model/train/classifier.py new file mode 100644 index 0000000..f71a134 --- /dev/null +++ b/model/train/classifier.py @@ -0,0 +1,575 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange + + +class LSTMAutoencoder(nn.Module): + def __init__(self, sequence_length, n_features, prediction_time): + super().__init__() + + self.sequence_length = sequence_length + self.n_features = n_features + self.prediction_time = prediction_time + + # Encoder + self.encoder = nn.LSTM(input_size=n_features, hidden_size=100, batch_first=True) + self.encoder2 = nn.LSTM(input_size=100, hidden_size=50, batch_first=True) + + # Repeat vector for prediction_time + self.repeat_vector = nn.Sequential( + nn.ReplicationPad1d(padding=(0, prediction_time - 1)), + nn.ReplicationPad1d(padding=(0, 0)), # Adjusted padding + ) + + # Decoder + self.decoder = nn.LSTM( + input_size=50 + prediction_time - 1, hidden_size=100, batch_first=True + ) + self.decoder2 = nn.LSTM( + input_size=100, hidden_size=n_features, batch_first=True + ) + + def forward(self, x): + # Encoder + # _, (x, _) = self.encoder(x) + x, (_, _) = self.encoder(x) + # output, (hn, cn) = rnn(x) + x, (_, _) = self.encoder2(x) + + # Repeat vector for prediction_time + x = self.repeat_vector(x) + + # Decoder + x, (_, _) = self.decoder(x) + x, (_, _) = self.decoder2(x) + + if self.prediction_time == 1: + return x[:, -1, :].unsqueeze(dim=1) + else: + return x[:, -(self.prediction_time) :, :] + + +class MILClassifier(nn.Module): + def __init__(self, input_dim=710, drop_p=0.0): + super().__init__() + # self.embedding = Temporal(input_dim, 512) + # self.selfatt = Transformer(512, 2, 4, 128, 512, dropout=0) + self.classifier = nn.Sequential( + nn.Linear(input_dim, 512), + # nn.Linear(512, 512), + # nn.BatchNorm1d(512), + nn.ReLU(), + nn.Dropout(drop_p), + nn.Linear(512, 512), + # nn.BatchNorm1d(1024), + nn.ReLU(), + nn.Dropout(drop_p), + # nn.Linear(1024, 512), + # # nn.BatchNorm1d(512), + # nn.ReLU(), + # nn.Dropout(drop_p), + nn.Linear(512, 32), + # nn.BatchNorm1d(32), + nn.ReLU(), + nn.Dropout(drop_p), + nn.Linear(32, 1), + nn.Sigmoid(), + ) + + self.drop_p = drop_p + self.weight_init() + + def weight_init(self): + # for layer in self.classifier: + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_normal_(m.weight) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + + # x = self.embedding(x) + # x = self.selfatt(x) + + x = x.view(-1, x.size(-1)) + + x = self.classifier(x) + + return x + + +class NormalHead(nn.Module): + def __init__(self, in_channel=512, ratios=[16, 32], kernel_sizes=[1, 1, 1]): + super(NormalHead, self).__init__() + self.ratios = ratios + # 기본값 [16, 32] + self.kernel_sizes = kernel_sizes + # 기본값 [1, 1, 1] + + self.build_layers(in_channel) + + def build_layers(self, in_channel): + ratio_1, ratio_2 = self.ratios + self.conv1 = nn.Conv1d( + in_channel, + in_channel // ratio_1, + self.kernel_sizes[0], + 1, + self.kernel_sizes[0] // 2, + ) + # stride는 1, padding은 kernel_size // 2로 두면 + # (input_length - kernel_size + 2 * (kernel_size // 2)) + 1 == input_length + # => 길이 유지 + self.bn1 = nn.BatchNorm1d(in_channel // ratio_1) + self.conv2 = nn.Conv1d( + in_channel // ratio_1, + in_channel // ratio_2, + self.kernel_sizes[1], + 1, + self.kernel_sizes[1] // 2, + ) + self.bn2 = nn.BatchNorm1d(in_channel // ratio_2) + self.conv3 = nn.Conv1d( + in_channel // ratio_2, 1, self.kernel_sizes[2], 1, self.kernel_sizes[2] // 2 + ) + self.act = nn.ReLU() + self.sigmoid = nn.Sigmoid() + + self.bns = [self.bn1, self.bn2] + + def forward(self, x): + """ + x: BN * C * T + return BN * C // 64 * T and BN * 1 * T + """ + outputs = [] + x = self.conv1(x) + outputs.append(x) + x = self.conv2(self.act(self.bn1(x))) + outputs.append(x) + x = self.sigmoid(self.conv3(self.act(self.bn2(x)))) + outputs.append(x) + return outputs + + +def pair(t): + return t if isinstance(t, tuple) else (t, t) + + +class PreNorm(nn.Module): + def __init__(self, dim, fn): + super().__init__() + self.norm = nn.LayerNorm(dim) + self.fn = fn + + def forward(self, x, **kwargs): + return self.fn(self.norm(x), **kwargs) + + +class FeedForward(nn.Module): + def __init__(self, dim, hidden_dim, dropout=0.0): + super().__init__() + self.net = nn.Sequential( + nn.Linear(dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, dim), + nn.Dropout(dropout), + ) + + def forward(self, x): + return self.net(x) + + +class Attention(nn.Module): + def __init__(self, dim, heads=8, dim_head=64, dropout=0.0): + super().__init__() + inner_dim = dim_head * heads + project_out = not (heads == 1 and dim_head == dim) + + self.heads = heads + self.scale = dim_head**-0.5 + + self.attend = nn.Softmax(dim=-1) + self.to_qkv = nn.Linear(dim, inner_dim * 4, bias=False) + + self.to_out = ( + nn.Sequential(nn.Linear(2 * inner_dim, dim), nn.Dropout(dropout)) + if project_out + else nn.Identity() + ) + + def forward(self, x): + b, n, d = x.size() + qkvt = self.to_qkv(x).chunk(4, dim=-1) + q, k, v, t = map( + lambda t: rearrange(t, "b n (h d) -> b h n d", h=self.heads), qkvt + ) + + dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale + + attn1 = self.attend(dots) + + tmp_ones = torch.ones(n).cuda() + tmp_n = torch.linspace(1, n, n).cuda() + tg_tmp = torch.abs(tmp_n * tmp_ones - tmp_n.view(-1, 1)) + attn2 = torch.exp(-tg_tmp / torch.exp(torch.tensor(1.0))) + attn2 = ( + (attn2 / attn2.sum(-1)) + .unsqueeze(0) + .unsqueeze(1) + .repeat(b, self.heads, 1, 1) + ) + + out = torch.cat([torch.matmul(attn1, v), torch.matmul(attn2, t)], dim=-1) + out = rearrange(out, "b h n d -> b n (h d)") + return self.to_out(out) + + +class Transformer(nn.Module): + def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0.0): + super().__init__() + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList( + [ + PreNorm( + dim, + Attention( + dim, heads=heads, dim_head=dim_head, dropout=dropout + ), + ), + PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout)), + ] + ) + ) + + def forward(self, x): + for attn, ff in self.layers: + x = attn(x) + x + x = ff(x) + x + return x + + +class Temporal(nn.Module): + # Temporal convolutional network + def __init__(self, input_size, out_size): + super(Temporal, self).__init__() + self.conv_1 = nn.Sequential( + nn.Conv1d( + in_channels=input_size, + out_channels=out_size, + kernel_size=3, + stride=1, + padding=1, + ), + nn.ReLU(), + ) + + def forward(self, x): + # x는 (batch * n crops, t snippets, d feature dim) + # 영상 1개를 t개의 snippet(토막)으로 나누고 각 snippet은 d 차원 feature 벡터 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # permute를 하지 않고 conv1d를 하면 t가 채널축 + # => conv 특성상 1 entry 계산할 때 (t, 3)사이즈 필터를 곱해서 계산 + # => 이 entry는 각 snippet의 feature는 3개만 보지만 시간축(t)으로는 영상 전체를 보게 된다 + # => 마치 과거, 현재, 미래 정보를 다보고 계산하는 것과 마찬가지 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + x = x.permute(0, 2, 1) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # permute를 하게 되면 (batch * n crops, d feature dim, t snippets) + # 이제 conv1d를 하면 (d, 3) 사이즈 필터를 곱해서 계산한다 + # => 바로 전, 현재, 바로 다음(또는 전전, 전, 현재) 시간의 영상 snippet 3개만 보고 각 snippet의 feature들은 전부 보게 된다 + # +@ 영상을 나누는 snippet(segment) 개수 유지(kernel_size=3, stride=1, padding=1) + # +@ 이제 각 snippet의 feature dimension수를 조절 가능(필터 개수로 조절) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + x = self.conv_1(x) + x = x.permute(0, 2, 1) + return x + + +class WSAD(nn.Module): + def __init__( + self, + input_size, + ratio_sample=0.2, + ratio_batch=0.4, + ratios=[16, 32], + kernel_sizes=[1, 1, 1], + ): + super().__init__() + # self.flag = flag + + self.ratio_sample = ratio_sample + # 기본값 0.2 + self.ratio_batch = ratio_batch + # 기본값 0.4 + + self.ratios = ratios + # 기본값 [16, 32] + self.kernel_sizes = kernel_sizes + # 기본값 [1, 1, 1] + + self.normal_head = NormalHead( + in_channel=512, ratios=ratios, kernel_sizes=kernel_sizes + ) + self.embedding = Temporal(input_size, 512) + self.selfatt = Transformer(512, 2, 4, 128, 512, dropout=0) + # embedding + selfatt은 논문의 feature enhancer + # embedding은 feature 차원을 permute + conv1d를 이용해 512로 변경 + # selfatt는 transformer계열 enhancer + self.step = 0 + + def get_normal_scores(self, x, ncrops=None): + # x는 (batch * n crops, segment 개수, feature 차원 = 512(논문)) + new_x = x.permute(0, 2, 1) + # conv1d에 넣기전에 (batch * n crops, feature 차원, segment 개수)로 변경 + + outputs = self.normal_head(new_x) + # normal_head는 conv1d - bn - relu - conv1d - bn - relu - conv1d - sig 3층 구조 + # outputs는 normal_head 안의 3개의 conv1d output을 담은 list (마지막 output은 conv1d + sig output) + normal_scores = outputs[-1] + xhs = outputs[:-1] + + if ncrops: + b = normal_scores.shape[0] // ncrops + normal_scores = normal_scores.view(b, ncrops, -1).mean(1) + # (batch_size, t snippets) + + return xhs, normal_scores + + def get_mahalanobis_distance(self, feats, anchor, var, ncrops=None): + # 첫번째는 feat는 (batch_size * n crops, 512 // 16, t snippets) + # 두번째는 (batch_size * n crops, 512 // 32, t snippets) + # BN은 각 feature(채널 축)별 batch*h*w개 평균, 분산 계산 + # => (b, c, h*w) -> (c) + # => None으로 unsqueeze해서 (1, c, 1)로 변경 + distance = torch.sqrt( + torch.sum((feats - anchor[None, :, None]) ** 2 / var[None, :, None], dim=1) + ) + # (x - m)^2/var -> torch.sum(dim=1)로 각배치 안의 각 토막(segment)별로 값 존재 (b, t) + # sqrt후에도 사이즈 그대로 (b, t) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # var가 전부 1이면 distance는 BN running mean vector와 각 토막의 feature vector 간의 차이 벡터의 L2 norm 길이가 된다 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + if ncrops: + bs = distance.shape[0] // ncrops + # b x t + distance = distance.view(bs, ncrops, -1).mean(dim=1) + # (batch_size, n crops, t snippets)을 dim=1로 평균 => 동일 영상 10개 crop들의 결과를 평균 + # => (batch_size, t snippets) + return distance + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # 배치 내의 각 영상의 각 토막 feature 벡터가 + # 데이터셋 분포 내 모든 영상의 모든 토막의 feature(512 // 16 또는 512 // 32 차원) 벡터들의 평균인 벡터(running_mean으로 추정)와 + # 얼마나 다른지 알려주는 mahalanobis 거리 계산 + # 데이터 분포내의 모든 토막 feature 벡터의 평균이고 정상토막의 비중이 이상토막의 비중보다 압도적으로 크기 때문에 + # 이 평균 벡터는 정상 토막의 기준처럼 사용 가능(anchor) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + def pos_neg_select(self, feats, distance, ncrops): + batch_select_ratio = self.ratio_batch + # 기본값 0.4 + sample_select_ratio = self.ratio_sample + # 기본값 0.2 + bs, c, t = feats.shape + # 첫번째는 (batch_size * n crops, 512 // 16, t snippets) + # 두번째는 (batch_size * n crops, 512 // 32, t snippets) + select_num_sample = int(t * sample_select_ratio) + # sample-level selection(SLS)은 20% + select_num_batch = int(bs // 2 * t * batch_select_ratio) + # 데이터는 torch.cat((정상영상, 이상영상), dim=0)으로 정상영상 배치 뒤에 이상영상 배치가 붙어있음 + # => bs // 2가 실제 batch_size * n crops 개수 + # => batch-level selection(BLS)은 (bs // 2) * t개 중 40% + # ==> 40 // 2 해서 사실상 SLS와 동일 비율로 배치 하나당 20% + + feats = feats.view(bs, ncrops, c, t).mean(dim=1) # b x c x t + # 동일 영상에서 나온 10개 crop들 결과 평균 + # => (batch_size, c features, t snippets) + nor_distance = distance[: bs // 2] # b x t + # distance는 10개 crop들을 이미 평균내고 (batch_size, t snippets) + # 그리고 배치 앞 절반은 정상영상 배치 => (n_batch_size = batch_size // 2, t snippets) + nor_feats = feats[: bs // 2].permute(0, 2, 1) # b x t x c + # 정상부분 앞 절반만 가져와 permute => (n_batch_size, t snippets, c features) + abn_distance = distance[bs // 2 :] # b x t + # 배치 뒤 절반은 이상영상 배치 (a_batch_size = batch_size // 2, t snippets) + abn_feats = feats[bs // 2 :].permute(0, 2, 1) # b x t x c + # (a_batch_size, t snippets, c features) + abn_distance_flatten = abn_distance.reshape(-1) + # (a_batch_size * t snippets) + abn_feats_flatten = abn_feats.reshape(-1, c) + # (a_batch_size * t snippets, c features) + + mask_select_abnormal_sample = torch.zeros_like(abn_distance, dtype=torch.bool) + # (a_batch_size, t snippets) + topk_abnormal_sample = torch.topk(abn_distance, select_num_sample, dim=-1)[1] + # torch.topk(abn_distance, select_num_sample, dim=-1)는 top k개 value와 그 value들 indices를 담고 있다 + # value와 indices 둘 다 (a_batch_size, top K = select_num_sample) 형태 + # => [1]로 indices만 가져오기 + mask_select_abnormal_sample.scatter_( + dim=1, + index=topk_abnormal_sample, + src=torch.full_like(topk_abnormal_sample, True, dtype=torch.bool), + ) + # (a_batch_size, t snippets) 형태이고 True는 a_batch_size * select_num_sample개이고 나머지는 False + # (top k에 속하는 index 자리만 True, 나머지는 False) + # scatter는 gather의 reverse operation + + mask_select_abnormal_batch = torch.zeros_like( + abn_distance_flatten, dtype=torch.bool + ) + # (a_batch_size * t snippets) + topk_abnormal_batch = torch.topk( + abn_distance_flatten, select_num_batch, dim=-1 + )[1] + # (a_batch_size * select_num_batch) + # top K = select_num_batch 개 indices + mask_select_abnormal_batch.scatter_( + dim=0, + index=topk_abnormal_batch, + src=torch.full_like(topk_abnormal_batch, True, dtype=torch.bool), + ) + # (a_batch_size * t snippets) + + mask_select_abnormal = ( + mask_select_abnormal_batch | mask_select_abnormal_sample.reshape(-1) + ) + # SLS와 BLS를 or 연산 | 으로 합쳐서 논문의 Sample-Batch Selection(SBS) + select_abn_feats = abn_feats_flatten[mask_select_abnormal] + # mask_select_abnormal는 (a_batch_size * t snippets)개 중 num_select_abnormal개만 True고 나머진 False + # abn_feats_flatten의 (a_batch_size * t snippets, c features)에서 mask_select_abnormal를 indices로 쓰면 + # (num_select_abnormal, c feature) 형태가 된다 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # select_abn_feats는 SLS와 BLS를 합쳐 SBS를 만드는 과정에서 상위 ~%에 들었다는 정보만 남고 distance 상위 몇번째인지 순서 정보가 날아간다 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + num_select_abnormal = torch.sum(mask_select_abnormal) + # SBS 추출 개수 + + k_nor = int(num_select_abnormal / (bs // 2)) + 1 + # 이상영상 배치에서 SBS로 선택한 개수 / 배치내 영상 개수 == 1 배치 당 평균 선택 개수 + # + 1을 해주어서 정상 영상에서 선택된 토막(snippets)개수가 이상 영상 선택 토막 개수보다 크게 설정 + topk_normal_sample = torch.topk(nor_distance, k_nor, dim=-1)[1] + # nor_distance는 (n_batch_size, t snippets) + # topk_normal_sample는 각 영상의 t개 토막 중 상위 k_nor개의 indices + # => (n_batch_size, k_nor) + select_nor_feats = torch.gather( + nor_feats, 1, topk_normal_sample[..., None].expand(-1, -1, c) + ) + # nor_feats는 (n_batch_size, t snippets, c features) + # gather의 index는 input과 차원수가 같아야하므로 None으로 (n_batch_size, k_nor, 1), expand로 (n_batch_size, k_nor, c) 형태로 변경 + # expand : Returns a new view of the self tensor with singleton dimensions expanded to a larger size. + # gather dimension이 1 => select_nor_feats[i][j][k] = nor_feats[i][topk_normal_sample[i][j][k]][k] + # select_nor_feats는 (n_batch_size, k_nor, c) 형태 (gather는 index와 output 형태가 동일) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # select_abn_feats와 다르게 select_nor_feats는 크기 순서를 지우지 않고 gather를 써서 dim=1 방향으로 nor_distance 값 내림차순 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + select_nor_feats = select_nor_feats.permute(1, 0, 2).reshape(-1, c) + # (k_nor, n_batch_size, c)로 바꾼 후 reshape로 (k_nor * n_batch_size, c) 형태 + select_nor_feats = select_nor_feats[:num_select_abnormal] + # k_nor * n_batch_size는 num_select_abnormal보다 크다 => out of index 에러 안 일어남 + # select_nor_feats는 최종적으로 (num_select_abnormal, c feature) 형태 + + return select_nor_feats, select_abn_feats + + def forward(self, x, flag="Eval"): + if len(x.size()) == 4: + b, n, t, d = x.size() + # 실험에 사용한 I3D UCF-Crime feature는 하나의 영상을 중앙, 4코너 + 중앙, 4코너 거울상 = 10개 crop으로 증강해서 계산 + # => batch 개수, n crop 개수, t 토막(snippet, segment) 개수, d snippet당 feature 차원수 + + x = x.reshape(b * n, t, d) + else: + b, t, d = x.size() + n = 1 + x = self.embedding(x) + x = self.selfatt(x) + # feature enhancer를 지난 feature의 차원수 d == 512(논문) + + normal_feats, normal_scores = self.get_normal_scores(x, n) + # normal_head는 conv1d - bn - relu - conv1d - bn - relu - conv1d - sig 3층 구조 + # normal_feats는 [첫 conv1d output, 두번째 conv1d output] + # => (batch_size * n crops, 512 // 16, t snippets), (batch_size * n crops, 512 // 32, t snippets) 형태 + # normal_scores는 마지막 conv1d - sig output => (batch_size, t snippets) 형태 (n crops는 평균-> 1) + + anchors = [bn.running_mean for bn in self.normal_head.bns] + variances = [bn.running_var for bn in self.normal_head.bns] + # conv1d output 바로 뒤 bn은 conv1d output 전체 분포 추정 평균, 분산을 담고 있다 + # 두개의 bn => 첫 conv1d output, 두번째 conv1d output 추정 평균, 분산 + + distances = [ + self.get_mahalanobis_distance(normal_feat, anchor, var, ncrops=n) + for normal_feat, anchor, var in zip(normal_feats, anchors, variances) + ] + # list안의 각 distance는 (batch_size, t snippets) 형태 + + if flag == "Train": + + select_normals = [] + select_abnormals = [] + for feat, distance in zip(normal_feats, distances): + select_feat_normal, select_feat_abnormal = self.pos_neg_select( + feat, distance, n + ) + # select_feat_normal, select_feat_abnormal 둘다 (num_select_abnormal, c feature) 형태 + select_normals.append(select_feat_normal[..., None]) + select_abnormals.append(select_feat_abnormal[..., None]) + # 두 정상, 이상 리스트 모두 feature 두개씩 + # 첫번째는 (num_select_abnormal, 512 // 16 feature, 1) + # 두번째는 (num_select_abnormal, 512 // 32 feature, 1) + + bn_results = dict( + anchors=anchors, + variances=variances, + select_normals=select_normals, + select_abnormals=select_abnormals, + ) + # breakpoint() + distance_sum = sum(distances) + + return { + "pre_normal_scores": normal_scores[0 : b // 2], + # classifier 학습에 사용되는 normal loss 계산에는 label 노이즈가 없는 normal 영상만 사용 + # (label noise: MIL은 비디오 단위 라벨링만 있음 + # => 이상 영상안의 normal snippet을 abnormal snippet으로 판단 하는 등의 noise 발생 가능) + # 정상 영상의 snippet들은 무조건 정상 => 정상 영상 하나의 t snippets의 scores => t 차원 score 벡터 + # ==> 이 t 차원 score 벡터의 L2 norm 값 * n_batch_size 개 정상 영상 == normal loss + # ==> L2 norm인 normal loss가 작아지기 위해서 정상 영상 snippet들의 예측 score가 작아지는 방향으로 학습 + # 논문 3.4 확인 + "bn_results": bn_results, + # mpp loss 계산에 사용 + # 논문 3.2 확인 + # @@@@@@@@@@@@@@@@@@@@@@@@@ + # bce loss를 위해 추가 + "normal_scores": normal_scores, + "scores": distance_sum * normal_scores, + } + elif flag == "Train_extra": + distance_sum = sum(distances) + # (batch_size, t snippets) 형태인 distance들 sum + + return { + "normal_scores": normal_scores, + "scores": distance_sum * normal_scores, + } + elif flag == "Eval_MPP": + + distance_sum = sum(distances) + # (batch_size, t snippets) 형태인 distance들 sum + + return { + "normal_scores": normal_scores, + "scores": distance_sum * normal_scores, + } + else: + + distance_sum = sum(distances) + # (batch_size, t snippets) 형태인 distance들 sum + + return distance_sum * normal_scores + # normal_scores도 (batch_size, t snippets) 형태 diff --git a/model/train/loss.py b/model/train/loss.py new file mode 100644 index 0000000..a4a75ea --- /dev/null +++ b/model/train/loss.py @@ -0,0 +1,130 @@ +import pdb +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def MIL(y_pred, batch_size, feature_length, is_transformer=0): + loss = torch.tensor(0.0).cuda() + loss_intra = torch.tensor(0.0).cuda() + sparsity = torch.tensor(0.0).cuda() + smooth = torch.tensor(0.0).cuda() + if is_transformer == 0: + y_pred = y_pred.view(batch_size, -1) + # (30*24, 1)을 (30, 24)로 다시 변경 + # dim=1 24 = 이상12 + 정상12 + else: + y_pred = torch.sigmoid(y_pred) + + # print(f"==>> y_pred.shape: {y_pred.shape}") + + for i in range(batch_size): + # anomaly_index = torch.randperm(12).cuda() + # print(f"==>> anomaly_index: {anomaly_index}") + # normal_index = torch.randperm(12).cuda() + # print(f"==>> normal_index: {normal_index}") + + # print(f"==>> y_pred[i, :12].shape: {y_pred[i, :12].shape}") + # print(f"==>> y_pred[i, 12:].shape: {y_pred[i, 12:].shape}") + + y_anomaly = y_pred[i, :feature_length] + # y_anomaly = y_pred[i, :12][anomaly_index] + # print(f"==>> y_anomaly.shape: {y_anomaly.shape}") + # MIL 논문의 segment 개수 32와 다르게 무인매장 데이터셋 feature는 12 segment + y_normal = y_pred[i, feature_length:] + # y_normal = y_pred[i, 12:][normal_index] + # print(f"==>> y_normal.shape: {y_normal.shape}") + + y_anomaly_max = torch.max(y_anomaly) # anomaly + y_anomaly_min = torch.min(y_anomaly) + + y_normal_max = torch.max(y_normal) # normal + y_normal_min = torch.min(y_normal) + + loss += F.relu(1.0 - y_anomaly_max + y_normal_max) + + sparsity += torch.sum(y_anomaly) * 0.00008 + smooth += ( + torch.sum( + (y_pred[i, : feature_length - 1] - y_pred[i, 1:feature_length]) ** 2 + ) + * 0.00008 + ) + loss = (loss + sparsity + smooth) / batch_size + + return loss + + +class NormalLoss(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, normal_scores): + """ + normal_scores: [bs, pre_k] + """ + loss_normal = torch.norm(normal_scores, dim=1, p=2) + # normal_scores는 정상영상의 snippet score만 있는 상태 => (n_batch_size, t snippets) + + return loss_normal.mean() + + +class MPPLoss(nn.Module): + def __init__(self): + super().__init__() + self.w_triplet = [5, 20] + + def forward(self, anchors, variances, select_normals, select_abnormals): + losses_triplet = [] + + def mahalanobis_distance(mu, x, var): + return torch.sqrt(torch.sum((x - mu) ** 2 / var, dim=-1)) + + for anchor, var, pos, neg, wt in zip( + anchors, variances, select_normals, select_abnormals, self.w_triplet + ): + triplet_loss = nn.TripletMarginWithDistanceLoss( + margin=1, distance_function=partial(mahalanobis_distance, var=var) + ) + + B, C, k = pos.shape + pos = pos.permute(0, 2, 1).reshape(B * k, -1) + neg = neg.permute(0, 2, 1).reshape(B * k, -1) + loss_triplet = triplet_loss(anchor[None, ...].repeat(B * k, 1), pos, neg) + # pos, neg, anchor[None, ...].repeat(B * k, 1) 모두 (B * k, -1) 형태 동일 + losses_triplet.append(loss_triplet * wt) + + return sum(losses_triplet) + + +class LossComputer(nn.Module): + def __init__(self, w_normal=1.0, w_mpp=1.0): + super().__init__() + self.w_normal = w_normal + self.w_mpp = w_mpp + self.mppLoss = MPPLoss() + self.normalLoss = NormalLoss() + + def forward(self, result): + loss = {} + # breakpoint() + pre_normal_scores = result["pre_normal_scores"] + # (n_batch_size, t snippets) 형태 + normal_loss = self.normalLoss(pre_normal_scores) + # normal_loss 계산에는 정상영상의 정상 snippet들 score만 사용 + # 논문 3.4 확인 + loss["normal_loss"] = normal_loss + + anchors = result["bn_results"]["anchors"] + variances = result["bn_results"]["variances"] + select_normals = result["bn_results"]["select_normals"] + select_abnormals = result["bn_results"]["select_abnormals"] + + mpp_loss = self.mppLoss(anchors, variances, select_normals, select_abnormals) + loss["mpp_loss"] = mpp_loss + + loss["total_loss"] = self.w_normal * normal_loss + self.w_mpp * mpp_loss + + return loss["total_loss"], loss diff --git a/model/train/model_convert.py b/model/train/model_convert.py new file mode 100644 index 0000000..b58aa57 --- /dev/null +++ b/model/train/model_convert.py @@ -0,0 +1,87 @@ +import torch +import torch.nn as nn + + +class LSTMAutoencoder(nn.Module): + def __init__(self, sequence_length, n_features, prediction_time): + super(LSTMAutoencoder, self).__init__() + + self.sequence_length = sequence_length + self.n_features = n_features + self.prediction_time = prediction_time + + # Encoder + self.encoder = nn.LSTM(input_size=n_features, hidden_size=100, batch_first=True) + self.encoder2 = nn.LSTM(input_size=100, hidden_size=50, batch_first=True) + + # Repeat vector for prediction_time + self.repeat_vector = nn.Sequential( + nn.ReplicationPad1d(padding=(0, prediction_time - 1)), + nn.ReplicationPad1d(padding=(0, 0)), # Adjusted padding + ) + + # Decoder + self.decoder = nn.LSTM(input_size=50, hidden_size=100, batch_first=True) + self.decoder2 = nn.LSTM( + input_size=100, hidden_size=n_features, batch_first=True + ) + + def forward(self, x): + # Encoder + _, (x, _) = self.encoder(x) + _, (x, _) = self.encoder2(x) + + # Repeat vector for prediction_time + x = self.repeat_vector(x) + + # Decoder + _, (x, _) = self.decoder(x) + _, (x, _) = self.decoder2(x) + + return x + + +# Instantiate the model +sequence_length = 20 # Adjust as needed +prediction_time = 1 # Adjust as needed +n_features = 38 # Number of features to predict + +# Create a sample input tensor +x2 = torch.rand((1, sequence_length, n_features)) + +model = LSTMAutoencoder(sequence_length, n_features, prediction_time) +output = model(x2) + +import h5py +import torch +import torch.nn as nn + +# Instantiate the PyTorch model +sequence_length = 20 # Adjust as needed +prediction_time = 1 # Adjust as needed +n_features = 38 # Number of features to predict +pytorch_model = LSTMAutoencoder(sequence_length, n_features, prediction_time) + +# Load weights from Keras h5 file +keras_weights_file = "model.h5" +keras_weights = {} + + +def extract_weights(name, obj): + if isinstance(obj, h5py.Dataset): + print(f"Dataset: {name}") + keras_weights[name] = torch.tensor(obj[()]) + + +with h5py.File(keras_weights_file, "r") as hf: + hf.visititems(extract_weights) + +# Set PyTorch model weights +state_dict = pytorch_model.state_dict() +for name, param in state_dict.items(): + if name in keras_weights: + print(keras_weights[name]) + param.data.copy_(keras_weights[name]) + +# Save PyTorch model +torch.save(pytorch_model.state_dict(), "pytorch_model.pth") diff --git a/model/train/shop_dataset.py b/model/train/shop_dataset.py new file mode 100644 index 0000000..eb99276 --- /dev/null +++ b/model/train/shop_dataset.py @@ -0,0 +1,716 @@ +import json +import os +import os.path as osp +from collections import defaultdict as dd + +import numpy as np +import pandas as pd +import torch + +# from sklearn.preprocessing import MinMaxScaler +from sklearn.preprocessing import normalize +from torch.utils.data import Dataset + + +class NormalDataset(Dataset): + + def __init__( + self, + sequence_length=20, + prediction_time=1, + root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/csv/normal/val", + ): + super().__init__() + self.sequence_length = sequence_length + self.prediction_time = prediction_time + + # self.scaler = MinMaxScaler() + # 데이터 값 [0,1] 범위로 scaling할때 사용 + + # Load the dataset + file_list = os.listdir(root) + + df_list = [] + + self.length = 0 + self.range_table = [] + + self.real_length = 0 + self.real_idx_table = [] + + for i, file_name in enumerate(file_list): + dat = pd.read_csv(root + "/" + file_name) + dat.drop(columns=["Frame"], inplace=True) # Remove the 'Frame' column + + print(f"==>>{i}번째 dat.shape: {dat.shape}") + + id_counter = pd.Series(dat["ID"]).value_counts(sort=False) + + for id_to_del in id_counter[ + id_counter < sequence_length + prediction_time + ].index: + dat.drop(dat[dat["ID"] == id_to_del].index, inplace=True) + + id_counter = pd.Series(dat["ID"]).value_counts(sort=False) + + print(f"==>>{i}번째 처리 후 dat.shape: {dat.shape}") + assert ( + len(id_counter[id_counter < sequence_length + prediction_time].index) + == 0 + ) + + for count in id_counter: + cur_id_length = count - sequence_length - prediction_time + 1 + self.range_table.append(self.length + cur_id_length) + self.real_idx_table.append(self.real_length + count) + self.length += cur_id_length + self.real_length += count + + dat["ID"] = dat["ID"].astype("str") + f"_{i}" + df_list.append(dat.copy()) + + self.dat = pd.concat(df_list, ignore_index=True) + # self.dat.drop(columns=["Frame"], inplace=True) # Remove the 'Frame' column + + id_counter = pd.Series(self.dat["ID"]).value_counts(sort=False) + + # # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # DF를 다 합치고 나서 ID를 거르면 데이터셋 초기화에 1분정도 걸리지만 + # DF 조각마다 ID를 거르고나서 합치면 6초 밖에 안 걸린다 + # self.checker = [] + + # for id_to_del in id_counter[id_counter < sequence_length + prediction_time].index: + # self.checker.append((id_to_del, id_counter[id_to_del])) + + # self.dat.drop(self.dat[self.dat["ID"] == id_to_del].index, inplace=True) + + # # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # # sequence_length + prediction_time 보다 짧은 ID를 지우는 것을 한번만 하면 + # # 13개 ID가 sequence_length + prediction_time보다 짧은데도 남아 있다??? + # id_counter = pd.Series(self.dat["ID"]).value_counts(sort=False) + + # if len(id_counter[id_counter < sequence_length + prediction_time].index) != 0: + # for id_to_del in id_counter[id_counter < sequence_length + prediction_time].index: + # self.checker.append((id_to_del, id_counter[id_to_del])) + + # self.dat.drop(self.dat[self.dat["ID"] == id_to_del].index, inplace=True) + + # id_counter = pd.Series(self.dat["ID"]).value_counts(sort=False) + # # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + assert ( + len(id_counter[id_counter < sequence_length + prediction_time].index) == 0 + ) + + # self.length = 0 + + # self.range_table = [] + + # for count in id_counter: + # cur_id_length = count - sequence_length - prediction_time + 1 + # self.range_table.append(self.length + cur_id_length) + # self.length += cur_id_length + + # self.dat.drop(columns=["ID"], inplace=True) + + def __len__(self): + return self.length + + def __getitem__(self, idx): + real_idx = self.find_real_idx(idx) + + sequence = self.dat[real_idx : real_idx + self.sequence_length].copy() + sequence.drop(columns=["ID"], inplace=True) + sequence = np.array(sequence) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # sequence = self.scaler.fit_transform(sequence.values) + # # 데이터 값 [min, max] -> [0,1] 범위로 scaling + # scale 된 후에는 numpy array로 변환된다 + # sequence나 target은 이미 yolo v8에서 xywhn, xyn으로 0~1 범위인데 scaling을 한번 더 할 필요가 있을지? + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # (self.sequence_length, 38) + target = self.dat[ + real_idx + + self.sequence_length : real_idx + + self.sequence_length + + self.prediction_time + ].copy() + target.drop(columns=["ID"], inplace=True) + target = np.array(target) + # target = self.scaler.fit_transform(target.values) + # (self.prediction_time, 38) + + label = torch.LongTensor([0 for i in range(self.prediction_time)]) + + return ( + torch.from_numpy(sequence).float(), + torch.from_numpy(target).float(), + label, + ) + + def find_real_idx(self, idx): + + start = 0 + end = len(self.range_table) - 1 + while start <= end: + mid = (start + end) // 2 + if self.range_table[mid] == idx: + real_idx = idx + ( + (mid + 1) * (self.sequence_length + self.prediction_time - 1) + ) + return real_idx + + if self.range_table[mid] > idx: + end = mid - 1 + else: + start = mid + 1 + + real_idx = idx + (start * (self.sequence_length + self.prediction_time - 1)) + + return real_idx + + +class AbnormalDataset(Dataset): + + def __init__( + self, + sequence_length=20, + prediction_time=1, + root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/csv/abnormal/val", + label_root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/json/abnormal/val", + ): + super().__init__() + self.sequence_length = sequence_length + self.prediction_time = prediction_time + + # self.scaler = MinMaxScaler() + # 데이터 값 [0,1] 범위로 scaling할때 사용 + + # Load the dataset + file_list = os.listdir(root) + + df_list = [] + + self.length = 0 + self.range_table = [] + + self.real_length = 0 + self.real_idx_table = [] + + for i, file_name in enumerate(file_list): + dat = pd.read_csv(root + "/" + file_name) + # dat.drop(columns=["Frame"], inplace=True) # Remove the 'Frame' column + + print(f"==>>{i}번째 dat.shape: {dat.shape}") + + id_counter = pd.Series(dat["ID"]).value_counts(sort=False) + + for id_to_del in id_counter[ + id_counter < sequence_length + prediction_time + ].index: + dat.drop(dat[dat["ID"] == id_to_del].index, inplace=True) + + id_counter = pd.Series(dat["ID"]).value_counts(sort=False) + + print(f"==>>{i}번째 처리 후 dat.shape: {dat.shape}") + assert ( + len(id_counter[id_counter < sequence_length + prediction_time].index) + == 0 + ) + + for count in id_counter: + cur_id_length = count - sequence_length - prediction_time + 1 + self.range_table.append(self.length + cur_id_length) + self.real_idx_table.append(self.real_length + count) + self.length += cur_id_length + self.real_length += count + + dat["ID"] = dat["ID"].astype("str") + f"_{i}" + df_list.append(dat.copy()) + + self.dat = pd.concat(df_list, ignore_index=True) + # self.dat.drop(columns=["Frame"], inplace=True) # Remove the 'Frame' column + + id_counter = pd.Series(self.dat["ID"]).value_counts(sort=False) + + assert ( + len(id_counter[id_counter < sequence_length + prediction_time].index) == 0 + ) + + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # TODO: 한 영상에 start end 여러번 있는 경우 고려해서 코드 수정하기 + # 정답 frame 담은 dict 만들기 + self.frame_label = dd(lambda: dd(lambda: [-1, -1])) + + folder_list = os.listdir(label_root) + + for folder in folder_list: + json_list = os.listdir(label_root + "/" + folder) + + for js in json_list: + with open(label_root + "/" + folder + "/" + js, "r") as j: + json_dict = json.load(j) + + for dict in json_dict["annotations"]["track"]: + if dict["@label"].endswith("_start"): + cur_id = dict["@id"] + self.frame_label[js[:-5]][cur_id][0] = dict["box"][0]["@frame"] + elif dict["@label"].endswith("_end"): + cur_id = dict["@id"] + self.frame_label[js[:-5]][cur_id][1] = dict["box"][0]["@frame"] + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + def __len__(self): + return self.length + + def __getitem__(self, idx): + real_idx = self.find_real_idx(idx) + + sequence = self.dat[real_idx : real_idx + self.sequence_length].copy() + sequence.drop(columns=["ID"], inplace=True) + sequence.drop(columns=["Frame"], inplace=True) + sequence.drop(columns=["Filename"], inplace=True) + sequence = np.array(sequence) + # (self.sequence_length, 38) + target = self.dat[ + real_idx + + self.sequence_length : real_idx + + self.sequence_length + + self.prediction_time + ].copy() + target_frames = target["Frame"].unique() + target_filename = target["Filename"].unique()[0].split(".")[0] + + target.drop(columns=["ID"], inplace=True) + target.drop(columns=["Frame"], inplace=True) + target.drop(columns=["Filename"], inplace=True) + target = np.array(target) + # target = self.scaler.fit_transform(target.values) + # (self.prediction_time, 38) + + target_labels = [] + + for target_frame in target_frames: + temp = 0 + for cur_id in self.frame_label[target_filename].keys(): + if int(target_frame) >= int( + self.frame_label[target_filename][cur_id][0] + ) and int(target_frame) <= int( + self.frame_label[target_filename][cur_id][1] + ): + temp = 1 + + target_labels.append(temp) + + target_labels = torch.LongTensor(target_labels) + + return ( + torch.from_numpy(sequence).float(), + torch.from_numpy(target).float(), + target_labels, + ) + + def find_real_idx(self, idx): + + start = 0 + end = len(self.range_table) - 1 + while start <= end: + mid = (start + end) // 2 + if self.range_table[mid] == idx: + real_idx = idx + ( + (mid + 1) * (self.sequence_length + self.prediction_time - 1) + ) + return real_idx + + if self.range_table[mid] > idx: + end = mid - 1 + else: + start = mid + 1 + + real_idx = idx + (start * (self.sequence_length + self.prediction_time - 1)) + + return real_idx + + +class NormalVMAE(Dataset): + """ + is_train = 1 <- train, 0 <- test + """ + + def __init__( + self, + # is_train=1, + model_size="small", + root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/npy/normal", + # label_root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/json/abnormal", + ): + super().__init__() + # self.is_train = is_train + # normal의 경우 torch.utils.data.random_split 함수로 train/val 나눔 + + self.path = root + + folder_list = os.listdir(self.path) + folder_list.sort() + + self.data_list = [] + + for folder_name in folder_list: + if folder_name.endswith("_base") and model_size == "small": + continue + elif not folder_name.endswith("_base") and model_size != "small": + continue + print(f"==>> {folder_name} 폴더 데이터 로딩 시작") + + folder_path = folder_name + "/" + data_list = os.listdir(self.path + "/" + folder_path) + data_list.sort() + data_list = [folder_path + name for name in data_list] + self.data_list.extend(data_list) + print(f"==>> {folder_name} 폴더 데이터 로딩 완료") + + def __len__(self): + return len(self.data_list) + + def __getitem__(self, idx): + file_name = self.data_list[idx] + + feature = np.load(self.path + "/" + file_name) + # 정상 영상 feature는 (57,710) 또는 (38,710) + if feature.shape[0] % 12 == 0: + feature_npy = feature + else: + count = (feature.shape[0] // 12) + 1 + + feature_npy = np.zeros((count * 12, 710)) + # 12로 나눌 수 있도록 (60, 710) or (48, 710) 준비 + + feature_npy[: feature.shape[0]] = feature + # np.load로 불러온 정상영상 feature는 (57, 710) 또는 (38,710) + + feature_npy[feature.shape[0] :] = [feature_npy[-1]] * ( + count * 12 - feature.shape[0] + ) + # 정상영상 feature의 마지막 부분으로 빈 자리 채우기 + + feature_npy = feature_npy.reshape(12, -1, 710) + # (12, 5, 710) or (12, 4, 710) + feature_npy = np.mean(feature_npy, axis=1) + # 이상행동 영상 feature의 (12,710)과 같아지도록 평균으로 조절 + + gts = np.zeros(11) + # 정상영상은 전부 정답이 0 + + return ( + torch.from_numpy(feature_npy[:-1, :]).float(), + torch.from_numpy(gts).float(), + ) + + +class AbnormalVMAE(Dataset): + """ + is_train = 1 <- train, 0 <- test + """ + + def __init__( + self, + is_train=1, + model_size="small", + root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/npy/abnormal", + label_root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/json/abnormal", + ): + print(f"==>> abnormal 데이터 로딩 시작") + super().__init__() + self.is_train = is_train + + if self.is_train == 1: + self.path = root + "/train/" + if model_size == "small": + self.label_path = label_root + "/train/abnormal_train.json" + else: + self.label_path = label_root + "/train/abnormal_train_base.json" + else: + self.path = root + "/val/" + if model_size == "small": + self.label_path = label_root + "/val/abnormal_val.json" + else: + self.label_path = label_root + "/val/abnormal_val_base.json" + + with open(self.label_path, "r", encoding="utf-8") as j: + self.data_list = json.load(j) + print(f"==>> abnormal 데이터 로딩 완료") + + def __len__(self): + return len(self.data_list) + + def __getitem__(self, idx): + file_info = self.data_list[str(idx)] + + feature_npy = np.load(self.path + file_info["filename"]) + # feature_npy.shape: (12, 710) + + # file_name = file_info["filename"].split("/")[-1].split(".")[0] + + gts = np.zeros(176) + # 이상행동 영상 180 프레임 => 12 * 16 = 192 가 되도록 길이 연장 + + for start, end in zip(file_info["frames_start"], file_info["frames_end"]): + gts[int(start) - 1 : min(int(end), 176)] = 1 + + # for i in range(12): + # gts[180 + i] = gts[179] + # @@ feature extraction할때 마지막 조각에서 frame 개수가 16개가 안되면 마지막 frame을 복사해서 추가함 + + if self.is_train: + gts = gts.reshape(11, 16) + # (192) => (12, 16)로 변경 + # gts = np.mean(gts, axis=1) + # 평균 내서 (12)로 변경 + gts = np.max(gts, axis=1) + + # @@ validation일때는 평균내지 않고 (192) numpy array 그대로 반환 + + return ( + torch.from_numpy(feature_npy[:-1, :]).float(), + torch.from_numpy(gts).float(), + ) + + +class NewNormalVMAE(Dataset): + """ + is_train = 1 <- train, 0 <- test + """ + + def __init__( + self, + is_train=1, + model_size="small", + root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/npy/UCFCrime/normal", + # label_root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/json/abnormal", + num_segments=200, + l2_norm=False, + ): + set_type = "학습" if is_train == 1 else "검증" + print(f"==>> normal {set_type} 데이터 로딩 시작") + super().__init__() + self.is_train = is_train + self.l2_norm = l2_norm + + if self.is_train == 1: + self.path = root + "/train/" + else: + self.path = root + "/val/" + self.num_segments = num_segments + + folder_list = os.listdir(self.path) + folder_list.sort() + + self.data_list = [] + + for folder_name in folder_list: + if folder_name.endswith("_base") and model_size == "small": + continue + elif not folder_name.endswith("_base") and model_size != "small": + continue + print(f"==>> {folder_name} 폴더 데이터 로딩 시작") + + folder_path = folder_name + "/" + data_list = os.listdir(self.path + "/" + folder_path) + data_list.sort() + data_list = [folder_path + name for name in data_list] + self.data_list.extend(data_list) + print(f"==>> {folder_name} 폴더 데이터 로딩 완료") + + print(f"==>> normal {set_type} 데이터 로딩 완료") + + def __len__(self): + return len(self.data_list) + + def __getitem__(self, idx): + file_name = self.data_list[idx] + + feature = np.load(self.path + "/" + file_name).astype(np.float32) + # (원본영상 frame 수 // 16,710) + + if self.l2_norm: + feature = normalize(feature, norm="l2") + + feature_npy = np.zeros((self.num_segments, 710)).astype(np.float32) + + sample_index = np.linspace( + 0, feature.shape[0], self.num_segments + 1, dtype=np.uint16 + ) + # ex: feature.shape[0]이 62이고, self.num_segments이 200이면 + # sample_index == [ 0 0 0 0 1 1 1 2 2 2 3 3 3 4 4 4 4 5 5 5 6 6 6 7 + # 7 7 8 8 8 8 9 9 9 10 10 10 11 11 11 12 12 12 13 13 13 13 14 14 + # 14 15 15 15 16 16 16 17 17 17 17 18 18 18 19 19 19 20 20 20 21 21 21 22 + # 22 22 22 23 23 23 24 24 24 25 25 25 26 26 26 26 27 27 27 28 28 28 29 29 + # 29 30 30 30 31 31 31 31 32 32 32 33 33 33 34 34 34 35 35 35 35 36 36 36 + # 37 37 37 38 38 38 39 39 39 39 40 40 40 41 41 41 42 42 42 43 43 43 44 44 + # 44 44 45 45 45 46 46 46 47 47 47 48 48 48 48 49 49 49 50 50 50 51 51 51 + # 52 52 52 53 53 53 53 54 54 54 55 55 55 56 56 56 57 57 57 57 58 58 58 59 + # 59 59 60 60 60 61 61 61 62] 꼴 + # ex2: feature.shape[0]이 62이고, self.num_segments이 11이면 + # sample_index == [ 0, 6, 12, 18, 24, 31, 37, 43, 49, 55, 62] + + for i in range(len(sample_index) - 1): + if sample_index[i] == sample_index[i + 1]: + feature_npy[i, :] = feature[sample_index[i], :] + else: + feature_npy[i, :] = feature[ + sample_index[i] : sample_index[i + 1], : + ].mean(0) + # ex2의 0과 6 => [0:6] => 0~5 feature 6개 평균 + # ex1의 0과 1 => [0:1] => 0~0 feature 1개 평균 => 0번 feature 그대로 + + # feature.shape[0]이 self.num_segments보다 짧으면 같은 feature 반복 + # feature.shape[0]이 self.num_segments보다 길면 평균 내서 self.num_segments개로 줄인다 + + if self.is_train != 1: + gts = np.zeros(self.num_segments).astype(np.float32) + # 정상영상은 전부 정답이 0 + + return torch.from_numpy(feature_npy), torch.from_numpy(gts) + else: + return torch.from_numpy(feature_npy) + + +class NewAbnormalVMAE(Dataset): + """ + is_train = 1 <- train, 0 <- test + """ + + def __init__( + self, + is_train=1, + model_size="small", + root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/npy/UCFCrime/abnormal", + label_root="/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/npy/UCFCrime/test_anomalyv2.txt", + num_segments=200, + gt_thr=0.25, + l2_norm=False, + ): + set_type = "학습" if is_train == 1 else "검증" + print(f"==>> abnormal {set_type} 데이터 로딩 시작") + super().__init__() + self.is_train = is_train + self.l2_norm = l2_norm + + if self.is_train == 1: + self.path = root + "/train/" + else: + self.path = root + "/val/" + self.label_dict = {} + with open(label_root, "r", encoding="utf-8") as f: + for line in f: + # line.split()은 ['Arrest/Arrest039_x264.mp4', '15836', '[7215, 10335]\n'] 이런 형태 + temp = line.split("|") + self.label_dict[temp[0].split("/")[1] + ".npy"] = { + "frame_counts": int(temp[1]), + "frames_gt": temp[2][1:-2].split(","), + } + + self.num_segments = num_segments + self.gt_thr = gt_thr + + folder_list = os.listdir(self.path) + folder_list.sort() + + self.data_list = [] + + for folder_name in folder_list: + if folder_name.endswith("_base") and model_size == "small": + continue + elif not folder_name.endswith("_base") and model_size != "small": + continue + print(f"==>> {folder_name} 폴더 데이터 로딩 시작") + + folder_path = folder_name + "/" + data_list = os.listdir(self.path + "/" + folder_path) + data_list.sort() + data_list = [folder_path + name for name in data_list] + self.data_list.extend(data_list) + print(f"==>> {folder_name} 폴더 데이터 로딩 완료") + + print(f"==>> abnormal {set_type} 데이터 로딩 완료") + + def __len__(self): + return len(self.data_list) + + def __getitem__(self, idx): + file_name = self.data_list[idx] + + feature = np.load(self.path + "/" + file_name).astype(np.float32) + # (원본영상 frame 수 // 16,710) + + if self.l2_norm: + feature = normalize(feature, norm="l2") + + feature_npy = np.zeros((self.num_segments, 710)).astype(np.float32) + + sample_index = np.linspace( + 0, feature.shape[0], self.num_segments + 1, dtype=np.uint16 + ) + # ex: feature.shape[0]이 62이고, self.num_segments이 11이면 + # sample_index == [ 0, 6, 12, 18, 24, 31, 37, 43, 49, 55, 62] + + for i in range(len(sample_index) - 1): + if sample_index[i] == sample_index[i + 1]: + feature_npy[i, :] = feature[sample_index[i], :] + else: + feature_npy[i, :] = feature[ + sample_index[i] : sample_index[i + 1], : + ].mean(0) + # ex의 0과 6 => [0:6] => 0~5 feature 6개 평균 + + # feature.shape[0]이 self.num_segments보다 짧으면 같은 feature 반복 + # feature.shape[0]이 self.num_segments보다 길면 평균 내서 self.num_segments개로 줄인다 + + if self.is_train != 1: + label_info = self.label_dict[file_name.split("/")[1]] + frame_counts = label_info["frame_counts"] + frames_gt = label_info["frames_gt"] + + # if frame_counts % 16 == 0: + # gts = np.zeros(frame_counts) + # else: + # gts = np.zeros(frame_counts + (16 - (frame_counts % 16))) + gts = np.zeros(feature.shape[0] * 16) + + # @@@@@@TODO 토막 단위 또는 프레임 단위로 gt 만들기 @@@@@@@@@@@@@@@@@@@@ + + gts[int(frames_gt[0]) - 1 : min(int(frames_gt[1]), frame_counts)] = 1 + + if len(frames_gt) == 4: + gts[int(frames_gt[2]) - 1 : min(int(frames_gt[3]), frame_counts)] = 1 + + # # for i in range(12): + # # gts[180 + i] = gts[179] + # # @@ feature extraction할때 마지막 조각에서 frame 개수가 16개가 안되면 마지막 frame을 복사해서 추가함 + + gts = gts.reshape(-1, 16) + + # assert feature.shape[0] == gts.shape[0] + + # gts = np.max(gts, axis=1) + # 16프레임중 1개라도 1이 있으면 True로 취급 + + gts = np.mean(gts, axis=1) + # 마지막에 self.gt_thr 넘는 값만 True로 취급 + # 기본값 0.25 + + gts_npy = np.zeros(self.num_segments).astype(np.float32) + + for i in range(len(sample_index) - 1): + if sample_index[i] == sample_index[i + 1]: + gts_npy[i] = gts[sample_index[i]] + else: + gts_npy[i] = gts[sample_index[i] : sample_index[i + 1]].mean(0) + # ex의 0과 6 => [0:6] => 0~5 gts 6개 평균 + + gts_npy = gts_npy > self.gt_thr + gts_npy = gts_npy.astype(np.float32) + + return torch.from_numpy(feature_npy), torch.from_numpy(gts_npy) + else: + return torch.from_numpy(feature_npy) diff --git a/model/train/train.sh b/model/train/train.sh new file mode 100755 index 0000000..40694e5 --- /dev/null +++ b/model/train/train.sh @@ -0,0 +1,2 @@ +python train_MIL.py --model_size="small" --num_workers=8 --drop_rate=0.3 --thr=0.4 --wandb_mode="online" --wandb_run_name="MIL_nl_onlyMIL_no_extra_small" --patience=1000 --use_extra +python train_MIL.py --model_size="small" --num_workers=8 --drop_rate=0.3 --thr=0.4 --wandb_mode="online" --wandb_run_name="MIL_nl_onlyMIL_in_t1loop_small" --patience=1000 diff --git a/model/train/train_BNWVAD.py b/model/train/train_BNWVAD.py new file mode 100644 index 0000000..5651abd --- /dev/null +++ b/model/train/train_BNWVAD.py @@ -0,0 +1,2465 @@ +import os +import os.path as osp +import random +from argparse import ArgumentParser +from datetime import datetime + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import sklearn.metrics +import torch +import torch.nn as nn +import torch.nn.functional as F +import wandb +from classifier import WSAD +from loss import MIL, LossComputer +from shop_dataset import AbnormalVMAE, NormalVMAE +from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve +from sklearn.preprocessing import MinMaxScaler +from torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split +from tqdm import tqdm + + +def parse_args(): + parser = ArgumentParser() + + # Conventional args + parser.add_argument( + "--normal_root_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_NORMAL_NPY", + "../datapreprocess/npy/normal", + ), + ) + # 학습 데이터 경로 + parser.add_argument( + "--abnormal_root_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_ABNORMAL_NPY", + "../datapreprocess/npy/abnormal", + ), + ) + parser.add_argument( + "--json_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_ABNORMAL_JSON", + "../datapreprocess/json/abnormal", + ), + ) + # abnormal 검증셋 npy, json파일 경로 + parser.add_argument( + "--model_dir", type=str, default=os.environ.get("SM_MODEL_DIR", "../pths") + ) + # pth 파일 저장 경로 + + parser.add_argument("--model_name", type=str, default="BNWVAD") + # import_module로 불러올 model name + + parser.add_argument("--len_feature", type=int, default=710) + # npy파일 feature length + parser.add_argument("--num_segments", type=int, default=11) + # 영상 segment 개수 + + parser.add_argument("--resume_name", type=str, default="") + # resume 파일 이름 + + parser.add_argument("--model_size", type=str, default="small") + # VideoMAEv2 backbone 사이즈 = "small" or "base" + + parser.add_argument("--seed", type=int, default=666) + # random seed + + parser.add_argument( + "--device", default="cuda" if torch.cuda.is_available() else "cpu" + ) + parser.add_argument("--num_workers", type=int, default=0) + + parser.add_argument("--batch_size", type=int, default=30) + # parser.add_argument("--val_batch_size", type=int, default=1) + # parser.add_argument("--val_num_workers", type=int, default=0) + parser.add_argument("--learning_rate", type=float, default=0.0001) + parser.add_argument("--weight_decay", type=float, default=0.00005) + parser.add_argument("--max_epoch", type=int, default=1000) + + parser.add_argument("--save_interval", type=int, default=1) + parser.add_argument("--val_interval", type=int, default=1) + parser.add_argument("--w_normal", type=float, default=1.0) + parser.add_argument("--w_mpp", type=float, default=1.0) + parser.add_argument("--thr", type=float, default=0.25) + + parser.add_argument("--ratio_sample", type=float, default=0.2) + parser.add_argument("--ratio_batch", type=float, default=0.4) + + parser.add_argument("--ratios", type=int, nargs="+", default=[16, 32]) + parser.add_argument("--kernel_sizes", type=int, nargs="+", default=[1, 1, 1]) + + parser.add_argument("--patience", type=int, default=100) + + # parser.add_argument("--mp", action="store_false") + # https://stackoverflow.com/questions/60999816/argparse-not-parsing-boolean-arguments + # mixed precision 사용할 지 여부 + + parser.add_argument("--use_extra", action="store_false") + + # parser.add_argument("--wandb_mode", type=str, default="online") + parser.add_argument("--wandb_mode", type=str, default="disabled") + # wandb mode + parser.add_argument("--wandb_run_name", type=str, default="BNWVAD") + # wandb run name + + args = parser.parse_args() + + return args + + +def set_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if use multi-GPU + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(seed) + random.seed(seed) + + +def train( + normal_root_dir, + abnormal_root_dir, + json_dir, + model_dir, + model_name, + model_size, + device, + num_workers, + batch_size, + # val_num_workers, + # val_batch_size, + learning_rate, + weight_decay, + max_epoch, + val_interval, + save_interval, + w_normal, + w_mpp, + thr, + len_feature, + num_segments, + ratio_sample, + ratio_batch, + ratios, + kernel_sizes, + patience, + resume_name, + seed, + # mp, + use_extra, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + batch_size = batch_size + + val_batch_size = 1 + val_num_workers = 0 + + # -- early stopping flag + patience = patience + counter = 0 + + # 데이터셋 + dataset = NormalVMAE( + model_size=model_size, + root=normal_root_dir, + ) + + valid_data_size = len(dataset) // 10 + + train_data_size = len(dataset) - valid_data_size + + train_dataset, valid_dataset = random_split( + dataset, lengths=[train_data_size, valid_data_size] + ) + + normal_train_loader = DataLoader( + dataset=train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + normal_valid_loader = DataLoader( + dataset=valid_dataset, + batch_size=val_batch_size, + shuffle=False, + drop_last=True, + num_workers=val_num_workers, + ) + + abnormal_train_dataset = AbnormalVMAE( + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + abnormal_valid_dataset = AbnormalVMAE( + is_train=0, + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + + abnormal_train_loader = DataLoader( + dataset=abnormal_train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + abnormal_valid_loader = DataLoader( + dataset=abnormal_valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> {model_size} data_load_time: {data_load_time}") + + # Initialize the model + model = WSAD( + input_size=len_feature, + ratio_sample=ratio_sample, + ratio_batch=ratio_batch, + ratios=ratios, + kernel_sizes=kernel_sizes, + ) + + load_dict = None + + if resume_name: + load_dict = torch.load( + osp.join(model_dir, f"{resume_name}.pth"), map_location="cpu" + ) + model.load_state_dict(load_dict["model_state_dict"]) + + model.to(device) + + # optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + # 1e-6 => 0.0010000000474974513 + optimizer = torch.optim.Adam( + model.parameters(), + lr=learning_rate, + betas=(0.9, 0.999), + weight_decay=weight_decay, + ) + # optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=[1000, 1500], gamma=0.5 + ) + + if resume_name: + optimizer.load_state_dict(load_dict["optimizer_state_dict"]) + scheduler.load_state_dict(load_dict["scheduler_state_dict"]) + # scaler.load_state_dict(load_dict["scaler_state_dict"]) + + criterion = nn.BCELoss() + MPP_criterion = LossComputer(w_normal=w_normal, w_mpp=w_mpp) + + print(f"Start training..") + + wandb.init( + project="VAD", + entity="pao-kim-si-woong", + config={ + "lr": learning_rate, + "dataset": "무인매장", + "n_epochs": max_epoch, + "loss": "MPP", + "notes": "VAD 실험", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + + wandb.watch((model,)) + + best_loss = np.inf + best_auc = 0 + + total_batches = len(abnormal_train_loader) + + for epoch in range(max_epoch): + model.train() + + epoch_start = datetime.now() + + epoch_loss = 0 + epoch_n_corrects = 0 + epoch_n_MPP_loss = 0 + epoch_n_norm_loss = 0 + epoch_n_MPP_and_norm_loss = 0 + epoch_n_loss = 0 + epoch_n_n_corrects = 0 + + epoch_abnormal_max = 0 + epoch_abnormal_mean = 0 + epoch_normal_max = 0 + epoch_normal_mean = 0 + + norm_train_iter = iter(normal_train_loader) + # iterator를 여기서 매번 새로 할당해줘야 iterator가 다시 처음부터 작동 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_train_loader), + total=total_batches, + ): + try: + normal_inputs = next(norm_train_iter) + + normal_input, normal_gt = normal_inputs + # (batch_size, 11, 710), (batch_size, 11) + abnormal_input, abnormal_gt = abnormal_inputs + # (batch_size, 11, 710), (batch_size, 11) + + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + inputs, gts = torch.cat( + (normal_input, abnormal_input), dim=1 + ), torch.cat((normal_gt, abnormal_gt), dim=1) + # @@@@ BN-WVAD는 정상 영상 먼저 @@@@ + # inputs는 (batch_size, 22, 710), gts는 (batch_size, 22) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + # batch_size = inputs.shape[0] + + inputs = inputs.to(device) + gts = gts.view(-1, 1).to(device) + # (batch_size * 22, 1) + + optimizer.zero_grad() + + pred_result = model(inputs, flag="Train") + # pred_result["pre_normal_scores"]: normal_scores[0 : b // 2], + # pred_result["bn_results"]: bn_results, + # pred_result["normal_scores"]: normal_scores, + # pred_result["scores"]: distance_sum * normal_scores, + + pred_acc = pred_result["normal_scores"].view(-1, 1) + pred = pred_result["scores"].view(-1, 1) + # pred_result["normal_scores"]는 (batch_size, 22) + # => pred는 (batch_size * 22, 1) + + loss = criterion(pred_acc, gts) + MPP_and_norm_loss, loss_dict = MPP_criterion(pred_result) + # sum_loss = loss + MPP_and_norm_loss + sum_loss = MPP_and_norm_loss + sum_loss.backward() + + # loss.backward() + optimizer.step() + with torch.no_grad(): + pred_n = pred.view(batch_size, 2, abnormal_input.size(1))[:, 0, :] + pred_a = pred.view(batch_size, 2, abnormal_input.size(1))[:, 1, :] + + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_n_mean = torch.mean(pred_n) + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred_acc > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + epoch_n_loss += loss.item() + epoch_n_MPP_loss += loss_dict["mpp_loss"].item() + epoch_n_norm_loss += loss_dict["normal_loss"].item() + epoch_n_MPP_and_norm_loss += MPP_and_norm_loss.item() + + epoch_n_n_corrects += corrects / (abnormal_input.size(1) * 2) + + epoch_normal_max += pred_n_max.item() + epoch_normal_mean += pred_n_mean.item() + epoch_abnormal_max += pred_a_max.item() + epoch_abnormal_mean += pred_a_mean.item() + + except StopIteration: + if not use_extra: + break + + abnormal_input, abnormal_gt = abnormal_inputs + # (batch_size, 11, 710), (batch_size, 11) + + inputs = abnormal_input.to(device) + # (batch_size, 11, 710) + gts = abnormal_gt.view(-1, 1).to(device) + # (batch_size * 11, 1) + + optimizer.zero_grad() + + # pred_result = model(inputs, flag="Train_extra") + pred_result = model(inputs, flag="Train_extra") + # pred_result는 (batch_size, 11) + + pred_acc = pred_result["normal_scores"].view(-1, 1) + pred = pred_result["scores"].view(-1, 1) + # pred는(batch_size * 11, 1) + + loss = criterion(pred_acc, gts) + + loss.backward() + optimizer.step() + with torch.no_grad(): + # print(f"==>> pred.shape: {pred.shape}") + pred_a = pred.view(batch_size, abnormal_input.size(1)) + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred_acc > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + epoch_loss += loss.item() + epoch_n_corrects += corrects / abnormal_input.size(1) + + epoch_abnormal_max += pred_a_max.item() + epoch_abnormal_mean += pred_a_mean.item() + + epoch_n_mean_loss = epoch_n_loss / len(normal_train_loader) + epoch_n_mean_MPP_loss = epoch_n_MPP_loss / len(normal_train_loader) + epoch_n_mean_norm_loss = epoch_n_norm_loss / len(normal_train_loader) + epoch_n_mean_MPP_and_norm_loss = epoch_n_MPP_and_norm_loss / len( + normal_train_loader + ) + epoch_n_accuracy = epoch_n_n_corrects / ( + batch_size * (len(normal_train_loader)) + ) + + epoch_mean_normal_max = epoch_normal_max / len(normal_train_loader) + epoch_mean_normal_mean = epoch_normal_mean / len(normal_train_loader) + if use_extra: + epoch_mean_loss = (epoch_loss + epoch_n_loss) / total_batches + epoch_accuracy = (epoch_n_corrects + epoch_n_n_corrects) / ( + batch_size * total_batches + ) + epoch_mean_abnormal_max = epoch_abnormal_max / total_batches + epoch_mean_abnormal_mean = epoch_abnormal_mean / total_batches + else: + epoch_mean_loss = (epoch_loss + epoch_n_loss) / len(normal_train_loader) + epoch_accuracy = (epoch_n_corrects + epoch_n_n_corrects) / ( + batch_size * (len(normal_train_loader)) + ) + epoch_mean_abnormal_max = epoch_abnormal_max / len(normal_train_loader) + epoch_mean_abnormal_mean = epoch_abnormal_mean / len(normal_train_loader) + + train_end = datetime.now() + train_time = train_end - epoch_start + train_time = str(train_time).split(".")[0] + print( + f"==>> epoch {epoch+1} train_time: {train_time}\nloss: {round(epoch_mean_loss,4)} n_loss: {round(epoch_n_mean_loss,4)}" + ) + print( + f"MPP_loss: {round(epoch_n_mean_MPP_loss,4)} norm_loss: {round(epoch_n_mean_norm_loss,4)} MPP+norm_loss: {round(epoch_n_mean_MPP_and_norm_loss,4)}" + ) + print(f"accuracy: {epoch_accuracy:.2f} n_accuracy: {epoch_n_accuracy:.2f}") + print( + f"==>> abnormal_max_mean: {epoch_mean_abnormal_max} abnormal_mean: {epoch_mean_abnormal_mean}" + ) + print( + f"==>> normal_max_mean: {epoch_mean_normal_max} normal_mean: {epoch_mean_normal_mean}" + ) + + if (epoch + 1) % save_interval == 0: + + ckpt_fpath = osp.join(model_dir, f"{model_name}_{train_start}_latest.pth") + + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + "optimizer_state_dict": optimizer.state_dict(), + "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + } + + torch.save(states, ckpt_fpath) + + # validation 주기에 따라 loss를 출력하고 best model을 저장합니다. + if (epoch + 1) % val_interval == 0: + + print(f"Start validation #{epoch+1:2d}") + model.eval() + + with torch.no_grad(): + total_n_loss = 0 + # total_n_MPP_loss = 0 + # total_n_norm_loss = 0 + # total_n_MPP_and_norm_loss = 0 + total_n_n_corrects = 0 + + total_n_fpr = 0 + total_n_tpr = 0 + total_n_bthr = 0 + total_n_auc = 0 + total_n_ap = 0 + + total_loss = 0 + total_n_corrects = 0 + + total_fpr = 0 + total_tpr = 0 + total_bthr = 0 + total_auc = 0 + total_ap = 0 + + error_n_count = 0 + error_count = 0 + + total_abnormal_max = 0 + total_abnormal_mean = 0 + total_normal_max = 0 + total_normal_mean = 0 + + norm_valid_iter = iter(normal_valid_loader) + # iterator를 여기서 매번 새로 할당해줘야 iterator가 다시 처음부터 작동 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_valid_loader), total=len(abnormal_valid_loader) + ): + try: + normal_inputs = next(norm_valid_iter) + + normal_input, normal_gt = normal_inputs + # (val_batch_size, 11, 710), (val_batch_size, 11) + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 11, 710), (val_batch_size, 176) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 11) + + inputs = torch.cat((normal_input, abnormal_input), dim=1) + gts = torch.cat((normal_gt, abnormal_gt2), dim=1) + # inputs는 (val_batch_size, 22, 710), gts는 (val_batch_size, 22) + + inputs = inputs.to(device) + # (val_batch_size, 22, 710) + gts = gts.view(-1, 1).to(device) + # (val_batch_size * 22, 1) + + pred_result = model(inputs, flag="Eval_MPP") + # pred_result["pre_normal_scores"]: normal_scores[0 : b // 2], + # pred_result["bn_results"]: bn_results, + # pred_result["scores"]: distance_sum * normal_scores, + # breakpoint() + pred_acc = pred_result["normal_scores"].view(-1, 1) + pred = pred_result["scores"].view(-1, 1) + # pred는(batch_size * 11, 1) + + val_loss = criterion(pred_acc, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + # val_MPP_and_norm_loss, val_loss_dict = MPP_criterion(pred_result) + + pred_n = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 0, : + ] + pred_a = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 1, : + ] + + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_n_mean = torch.mean(pred_n) + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred_acc > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + + pred_normal_np = np.zeros(abnormal_gt.size(1)) + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_normal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[ + abnormal_input.size(1) + j + ] + + pred_np = np.concatenate( + (pred_normal_np, pred_abnormal_np), axis=0 + ) + + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + # normal_gt = np.zeros_like(abnormal_gt2) + normal_gt = np.zeros_like(abnormal_gt) + # gt_np = np.concatenate((abnormal_gt2, normal_gt), axis=0) + gt_np = np.concatenate((normal_gt, abnormal_gt), axis=0) + + try: + # auc = roc_auc_score(y_true=gt_np, y_score=pred_np) + # auc = roc_auc_score(y_true=gt_np, y_score=pred) + + fpr, tpr, cut = roc_curve(y_true=gt_np, y_score=pred_np) + precision, recall, cut2 = precision_recall_curve( + gt_np, pred_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_np > thr + TP_and_FN = pred_positive[gt_np > 0.9] + FP_and_TN = pred_positive[gt_np < 0.1] + + total_n_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_n_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_n_bthr += best_thr if diff_idx != 0 else 1 + + total_n_auc += auc + total_n_ap += ap + total_n_n_corrects += corrects / ( + abnormal_input.size(1) * 2 + ) + total_n_loss += val_loss.item() + # total_n_MPP_loss += val_loss_dict["mpp_loss"].item() + # total_n_norm_loss += val_loss_dict["normal_loss"].item() + # total_n_MPP_and_norm_loss += val_MPP_and_norm_loss.item() + + total_normal_max += pred_n_max.item() + total_normal_mean += pred_n_mean.item() + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_n_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + except StopIteration: + # if not use_extra: + # break + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 11, 710), (val_batch_size, 176) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 11) + + inputs = abnormal_input.to(device) + # (val_batch_size, 11, 710) + gts = abnormal_gt2.view(-1, 1).to(device) + # (val_batch_size * 11, 1) + + pred_result = model(inputs, flag="Eval_MPP") + # pred_result는 (batch_size, 11) + + pred_acc = pred_result["normal_scores"].view(-1, 1) + pred = pred_result["scores"].view(-1, 1) + # pred = pred_result.view(-1, 1) + # pred는(batch_size * 11, 1) + + val_loss = criterion(pred_acc, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + pred_a = pred.view(val_batch_size, abnormal_input.size(1)) + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred_acc > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + + try: + # auc = roc_auc_score(y_true=abnormal_gt, y_score=pred_abnormal_np) + # auc = roc_auc_score(y_true=abnormal_gt2, y_score=pred) + + fpr, tpr, cut = roc_curve( + y_true=abnormal_gt, y_score=pred_abnormal_np + ) + precision, recall, cut2 = precision_recall_curve( + abnormal_gt, pred_abnormal_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_abnormal_np > thr + TP_and_FN = pred_positive[abnormal_gt > 0.9] + FP_and_TN = pred_positive[abnormal_gt < 0.1] + + total_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_bthr += best_thr if diff_idx != 0 else 1 + + total_auc += auc + total_ap += ap + total_n_corrects += corrects / abnormal_input.size(1) + # normal + abnormal 24개와 다르게 abnormal 12개만 있음 -> /12 => 2/24 + total_loss += val_loss.item() + + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + + val_n_mean_loss = total_n_loss / ( + len(normal_valid_loader) - error_n_count + ) + # val_n_mean_MPP_loss = total_n_MPP_loss / (len(normal_valid_loader) - error_n_count) + # val_n_mean_norm_loss = total_n_norm_loss / (len(normal_valid_loader) - error_n_count) + # val_n_mean_MPP_and_norm_loss = total_n_MPP_and_norm_loss / ( + # len(normal_valid_loader) - error_n_count + # ) + + val_n_fpr = total_n_fpr / ((len(normal_valid_loader) - error_n_count)) + val_n_tpr = total_n_tpr / ((len(normal_valid_loader) - error_n_count)) + val_n_bthr = total_n_bthr / ((len(normal_valid_loader) - error_n_count)) + val_n_auc = total_n_auc / (len(normal_valid_loader) - error_n_count) + val_n_ap = total_n_ap / (len(normal_valid_loader) - error_n_count) + + val_n_accuracy = total_n_n_corrects / ( + (len(normal_valid_loader) - error_n_count) + ) + + val_mean_loss = (total_loss + total_n_loss) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + val_fpr = (total_fpr + total_n_fpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_tpr = (total_tpr + total_n_tpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_bthr = (total_bthr + total_n_bthr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_auc = (total_auc + total_n_auc) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_ap = (total_ap + total_n_ap) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_accuracy = (total_n_corrects + total_n_n_corrects) / ( + (len(abnormal_valid_loader) - error_n_count - error_count) + ) + # for loop 한번에 abnormal 12, normal 12해서 24개 정답 확인 + + val_mean_normal_max = total_normal_max / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_normal_mean = total_normal_mean / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_abnormal_max = total_abnormal_max / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_mean_abnormal_mean = total_abnormal_mean / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + if best_loss > val_mean_loss: + print( + f"Best performance at epoch: {epoch + 1}, {best_loss:.4f} -> {val_mean_loss:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best.pth" + ) + torch.save(states, best_ckpt_fpath) + best_loss = val_mean_loss + # counter = 0 + # else: + # counter += 1 + + if best_auc < val_auc: + print( + f"Best auc performance at epoch: {epoch + 1}, {best_auc:.4f} -> {val_auc:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best_auc.pth" + ) + torch.save(states, best_ckpt_fpath) + best_auc = val_auc + counter = 0 + else: + counter += 1 + + new_wandb_metric_dict = { + "train_loss": epoch_mean_loss, + "train_accuracy": epoch_accuracy, + "train_n_loss": epoch_n_mean_loss, + "train_n_MPP_loss": epoch_n_mean_MPP_loss, + "train_n_norm_loss": epoch_n_mean_norm_loss, + "train_n_MPP+norm_loss": epoch_n_mean_MPP_and_norm_loss, + "train_n_accuracy": epoch_n_accuracy, + "valid_loss": val_mean_loss, + "valid_fpr": val_fpr, + "valid_tpr": val_tpr, + "valid_bthr": val_bthr, + "valid_auc": val_auc, + "valid_ap": val_ap, + "valid_accuracy": val_accuracy, + "valid_n_loss": val_n_mean_loss, + # "valid_n_MPP_loss": val_n_mean_MPP_loss, + # "valid_n_norm_loss": val_n_mean_norm_loss, + # "valid_n_MPP+norm_loss": val_n_mean_MPP_and_norm_loss, + "valid_n_fpr": val_n_fpr, + "valid_n_tpr": val_n_tpr, + "valid_n_bthr": val_n_bthr, + "valid_n_auc": val_n_auc, + "valid_n_ap": val_n_ap, + "valid_n_accuracy": val_n_accuracy, + "learning_rate": scheduler.get_last_lr()[0], + "train_abnormal_max_mean": epoch_mean_abnormal_max, + "train_abnormal_mean": epoch_mean_abnormal_mean, + "train_normal_max_mean": epoch_mean_normal_max, + "train_normal_mean": epoch_mean_normal_mean, + "valid_abnormal_max_mean": val_mean_abnormal_max, + "valid_abnormal_mean": val_mean_abnormal_mean, + "valid_normal_max_mean": val_mean_normal_max, + "valid_normal_mean": val_mean_normal_mean, + } + + wandb.log(new_wandb_metric_dict) + + scheduler.step() + + epoch_end = datetime.now() + epoch_time = epoch_end - epoch_start + epoch_time = str(epoch_time).split(".")[0] + print( + f"==>> epoch {epoch+1} time: {epoch_time}\nvalid_loss: {round(val_mean_loss,4)} valid_n_loss: {round(val_n_mean_loss,4)}" + ) + # print( + # f"valid_n_MPP_loss: {round(val_n_mean_MPP_loss,4)} valid_n_norm_loss: {round(val_n_mean_norm_loss,4)} valid_n_MPP+norm_loss: {round(val_n_mean_MPP_and_norm_loss,4)}" + # ) + print(f"valid_fpr: {val_fpr} valid_n_fpr: {val_n_fpr}") + print(f"valid_tpr: {val_tpr} valid_n_tpr: {val_n_tpr}") + print(f"valid_bthr: {val_bthr} valid_n_bthr: {val_n_bthr}") + print( + f"valid_auc: {val_auc:.4f} valid_n_auc: {val_n_auc:.4f}\nvalid_ap: {val_ap:.4f} valid_n_ap: {val_n_ap:.4f}\nvalid_accuracy: {val_accuracy:.2f} valid_n_accuracy: {val_n_accuracy:.2f}" + ) + print( + f"==>> val_abnormal_max_mean: {val_mean_abnormal_max} val_abnormal_mean: {val_mean_abnormal_mean}" + ) + print( + f"==>> val_normal_max_mean: {val_mean_normal_max} val_normal_mean: {val_mean_normal_mean}" + ) + print(f"==>> error_count: {error_count}") + + if counter > patience: + print("Early Stopping...") + break + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + print(f"==>> total time: {total_time}") + + +# @@TODO: BNWVAD에 맞게 수정 필요 +def train2( + normal_root_dir, + abnormal_root_dir, + json_dir, + model_dir, + model_name, + model_size, + device, + num_workers, + batch_size, + # val_num_workers, + # val_batch_size, + learning_rate, + weight_decay, + max_epoch, + val_interval, + save_interval, + w_normal, + w_mpp, + thr, + len_feature, + num_segments, + ratio_sample, + ratio_batch, + ratios, + kernel_sizes, + patience, + resume_name, + seed, + # mp, + use_extra, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + batch_size = batch_size + + val_batch_size = 1 + val_num_workers = 0 + + # -- early stopping flag + patience = patience + counter = 0 + + # 데이터셋 + dataset = NormalVMAE( + model_size=model_size, + root=normal_root_dir, + ) + + valid_data_size = len(dataset) // 10 + + train_data_size = len(dataset) - valid_data_size + + train_dataset, valid_dataset = random_split( + dataset, lengths=[train_data_size, valid_data_size] + ) + + # normal_train_loader = DataLoader( + # dataset=train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=num_workers + # ) + + normal_valid_loader = DataLoader( + dataset=valid_dataset, + batch_size=val_batch_size, + shuffle=False, + drop_last=True, + num_workers=val_num_workers, + ) + + abnormal_train_dataset = AbnormalVMAE( + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + abnormal_valid_dataset = AbnormalVMAE( + is_train=0, + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + + # abnormal_train_loader = DataLoader( + # dataset=abnormal_train_dataset, + # batch_size=batch_size, + # shuffle=True, + # drop_last=True, + # num_workers=num_workers, + # ) + + concat_trainset = ConcatDataset([train_dataset, abnormal_train_dataset]) + + concat_train_loader = DataLoader( + dataset=concat_trainset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + abnormal_valid_loader = DataLoader( + dataset=abnormal_valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> {model_size} data_load_time: {data_load_time}") + + # Initialize the LSTM autoencoder model + model = WSAD( + input_size=len_feature, + ratio_sample=ratio_sample, + ratio_batch=ratio_batch, + ratios=ratios, + kernel_sizes=kernel_sizes, + ) + + load_dict = None + + if resume_name: + load_dict = torch.load( + osp.join(model_dir, f"{resume_name}.pth"), map_location="cpu" + ) + model.load_state_dict(load_dict["model_state_dict"]) + + model.to(device) + + # optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + # 1e-6 => 0.0010000000474974513 + # optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + # optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + + optimizer = torch.optim.Adam( + model.parameters(), + lr=learning_rate, + betas=(0.9, 0.999), + weight_decay=weight_decay, + ) + + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=[1000, 1500], gamma=0.5 + ) + + if resume_name: + optimizer.load_state_dict(load_dict["optimizer_state_dict"]) + scheduler.load_state_dict(load_dict["scheduler_state_dict"]) + # scaler.load_state_dict(load_dict["scaler_state_dict"]) + + criterion = nn.BCELoss() + MPP_criterion = LossComputer(w_normal=w_normal, w_mpp=w_mpp) + + print(f"Start training..") + + wandb.init( + project="VAD", + entity="pao-kim-si-woong", + config={ + "lr": learning_rate, + "dataset": "무인매장", + "n_epochs": max_epoch, + "loss": "MPP", + "notes": "VAD 실험", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + wandb.watch((model,)) + + best_loss = np.inf + best_auc = 0 + + total_batches = len(concat_train_loader) + + for epoch in range(max_epoch): + model.train() + + epoch_start = datetime.now() + + epoch_loss = 0 + epoch_n_corrects = 0 + + epoch_abnormal_max = 0 + epoch_abnormal_mean = 0 + epoch_normal_max = 0 + epoch_normal_mean = 0 + + nan_count = 0 + + for step, inputs in tqdm( + enumerate(concat_train_loader), + total=total_batches, + ): + inp, gts = inputs + # (batch_size, 11, 710), (batch_size, 11) + + num_segs = inp.size(1) + + inp = inp.to(device) + # (batch_size, 11, 710) + gts = gts.view(-1, 1).to(device) + # (batch_size * 11, 1) + + optimizer.zero_grad() + + pred_result = model(inp, flag="Train_extra") + + pred_acc = pred_result["normal_scores"].view(-1, 1) + # pred = pred_result["scores"].view(-1, 1) + pred = pred_acc + # pred는 (batch_size * 11, 1) + + loss = criterion(pred_acc, gts) + + loss.backward() + optimizer.step() + with torch.no_grad(): + # print(f"==>> pred.shape: {pred.shape}") + + pred_correct = pred_acc > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + epoch_loss += loss.item() + epoch_n_corrects += corrects / (num_segs * batch_size) + + check = gts.view(batch_size, num_segs) != 0 + check = torch.sum(check, dim=1) + + check_a = check != 0 + check_n = check_a == False + + pred_reshape = pred.view(batch_size, num_segs) + + pred_a_max = torch.mean(torch.max(pred_reshape[check_a], dim=-1)[0]) + + pred_a_mean = torch.mean(pred_reshape[check_a]) + + epoch_abnormal_max += pred_a_max.item() + epoch_abnormal_mean += pred_a_mean.item() + + if torch.sum(check_n) != 0: + pred_n_max = torch.mean(torch.max(pred_reshape[check_n], dim=-1)[0]) + + pred_n_mean = torch.mean(pred_reshape[check_n]) + + epoch_normal_max += pred_n_max.item() + epoch_normal_mean += pred_n_mean.item() + else: + nan_count += 1 + + epoch_mean_loss = epoch_loss / total_batches + epoch_accuracy = epoch_n_corrects / total_batches + + epoch_mean_normal_max = epoch_normal_max / (total_batches - nan_count) + epoch_mean_normal_mean = epoch_normal_mean / (total_batches - nan_count) + epoch_mean_abnormal_max = epoch_abnormal_max / total_batches + epoch_mean_abnormal_mean = epoch_abnormal_mean / total_batches + + train_end = datetime.now() + train_time = train_end - epoch_start + train_time = str(train_time).split(".")[0] + print( + f"==>> epoch {epoch+1} train_time: {train_time}\nloss: {round(epoch_mean_loss,4)}" + ) + print(f"accuracy: {epoch_accuracy:.2f}") + print( + f"==>> abnormal_max_mean: {epoch_mean_abnormal_max} abnormal_mean: {epoch_mean_abnormal_mean}" + ) + print( + f"==>> normal_max_mean: {epoch_mean_normal_max} normal_mean: {epoch_mean_normal_mean}" + ) + + if (epoch + 1) % save_interval == 0: + + ckpt_fpath = osp.join(model_dir, f"{model_name}_{train_start}_latest.pth") + + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + "optimizer_state_dict": optimizer.state_dict(), + "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + } + + torch.save(states, ckpt_fpath) + + # validation 주기에 따라 loss를 출력하고 best model을 저장합니다. + if (epoch + 1) % val_interval == 0: + + print(f"Start validation #{epoch+1:2d}") + model.eval() + + with torch.no_grad(): + total_n_loss = 0 + # total_n_MPP_loss = 0 + # total_n_norm_loss = 0 + # total_n_MPP_and_norm_loss = 0 + total_n_n_corrects = 0 + + total_n_fpr = 0 + total_n_tpr = 0 + total_n_bthr = 0 + total_n_auc = 0 + total_n_ap = 0 + + total_loss = 0 + total_n_corrects = 0 + + total_fpr = 0 + total_tpr = 0 + total_bthr = 0 + total_auc = 0 + total_ap = 0 + + error_n_count = 0 + error_count = 0 + + total_abnormal_max = 0 + total_abnormal_mean = 0 + total_normal_max = 0 + total_normal_mean = 0 + + norm_valid_iter = iter(normal_valid_loader) + # iterator를 여기서 매번 새로 할당해줘야 iterator가 다시 처음부터 작동 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_valid_loader), total=len(abnormal_valid_loader) + ): + try: + normal_inputs = next(norm_valid_iter) + + normal_input, normal_gt = normal_inputs + # (val_batch_size, 11, 710), (val_batch_size, 11) + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 11, 710), (val_batch_size, 176) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 11) + + inputs = torch.cat((normal_input, abnormal_input), dim=1) + gts = torch.cat((normal_gt, abnormal_gt2), dim=1) + # inputs는 (val_batch_size, 22, 710), gts는 (val_batch_size, 22) + + inputs = inputs.to(device) + # (val_batch_size, 22, 710) + gts = gts.view(-1, 1).to(device) + # (val_batch_size * 22, 1) + + pred_result = model(inputs, flag="Eval_MPP") + # pred_result["pre_normal_scores"]: normal_scores[0 : b // 2], + # pred_result["bn_results"]: bn_results, + # pred_result["scores"]: distance_sum * normal_scores, + # breakpoint() + pred_acc = pred_result["normal_scores"].view(-1, 1) + # pred = pred_result["scores"].view(-1, 1) + pred = pred_acc + # pred는(batch_size * 11, 1) + + val_loss = criterion(pred_acc, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + # val_MPP_and_norm_loss, val_loss_dict = MPP_criterion(pred_result) + + pred_n = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 0, : + ] + pred_a = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 1, : + ] + + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_n_mean = torch.mean(pred_n) + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred_acc > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + + pred_normal_np = np.zeros(abnormal_gt.size(1)) + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_normal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[ + abnormal_input.size(1) + j + ] + + pred_np = np.concatenate( + (pred_normal_np, pred_abnormal_np), axis=0 + ) + + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + # normal_gt = np.zeros_like(abnormal_gt2) + normal_gt = np.zeros_like(abnormal_gt) + # gt_np = np.concatenate((abnormal_gt2, normal_gt), axis=0) + gt_np = np.concatenate((normal_gt, abnormal_gt), axis=0) + + try: + # auc = roc_auc_score(y_true=gt_np, y_score=pred_np) + # auc = roc_auc_score(y_true=gt_np, y_score=pred) + + fpr, tpr, cut = roc_curve(y_true=gt_np, y_score=pred_np) + precision, recall, cut2 = precision_recall_curve( + gt_np, pred_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_np > thr + TP_and_FN = pred_positive[gt_np > 0.9] + FP_and_TN = pred_positive[gt_np < 0.1] + + total_n_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_n_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_n_bthr += best_thr if diff_idx != 0 else 1 + + total_n_auc += auc + total_n_ap += ap + total_n_n_corrects += corrects / ( + abnormal_input.size(1) * 2 + ) + total_n_loss += val_loss.item() + # total_n_MPP_loss += val_loss_dict["mpp_loss"].item() + # total_n_norm_loss += val_loss_dict["normal_loss"].item() + # total_n_MPP_and_norm_loss += val_MPP_and_norm_loss.item() + + total_normal_max += pred_n_max.item() + total_normal_mean += pred_n_mean.item() + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_n_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + except StopIteration: + # if not use_extra: + # break + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 11, 710), (val_batch_size, 176) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 11) + + inputs = abnormal_input.to(device) + # (val_batch_size, 11, 710) + gts = abnormal_gt2.view(-1, 1).to(device) + # (val_batch_size * 11, 1) + + pred_result = model(inputs, flag="Eval_MPP") + # pred_result는 (batch_size, 11) + + pred_acc = pred_result["normal_scores"].view(-1, 1) + pred = pred_result["scores"].view(-1, 1) + pred = pred_acc + # pred = pred_result.view(-1, 1) + # pred는(batch_size * 11, 1) + + val_loss = criterion(pred_acc, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + pred_a = pred.view(val_batch_size, abnormal_input.size(1)) + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred_acc > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + + try: + # auc = roc_auc_score(y_true=abnormal_gt, y_score=pred_abnormal_np) + # auc = roc_auc_score(y_true=abnormal_gt2, y_score=pred) + + fpr, tpr, cut = roc_curve( + y_true=abnormal_gt, y_score=pred_abnormal_np + ) + precision, recall, cut2 = precision_recall_curve( + abnormal_gt, pred_abnormal_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_abnormal_np > thr + TP_and_FN = pred_positive[abnormal_gt > 0.9] + FP_and_TN = pred_positive[abnormal_gt < 0.1] + + total_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_bthr += best_thr if diff_idx != 0 else 1 + + total_auc += auc + total_ap += ap + total_n_corrects += corrects / abnormal_input.size(1) + # normal + abnormal 24개와 다르게 abnormal 12개만 있음 -> /12 => 2/24 + total_loss += val_loss.item() + + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + + val_n_mean_loss = total_n_loss / ( + len(normal_valid_loader) - error_n_count + ) + # val_n_mean_MPP_loss = total_n_MPP_loss / (len(normal_valid_loader) - error_n_count) + # val_n_mean_norm_loss = total_n_norm_loss / (len(normal_valid_loader) - error_n_count) + # val_n_mean_MPP_and_norm_loss = total_n_MPP_and_norm_loss / ( + # len(normal_valid_loader) - error_n_count + # ) + + val_n_fpr = total_n_fpr / ((len(normal_valid_loader) - error_n_count)) + val_n_tpr = total_n_tpr / ((len(normal_valid_loader) - error_n_count)) + val_n_bthr = total_n_bthr / ((len(normal_valid_loader) - error_n_count)) + val_n_auc = total_n_auc / (len(normal_valid_loader) - error_n_count) + val_n_ap = total_n_ap / (len(normal_valid_loader) - error_n_count) + + val_n_accuracy = total_n_n_corrects / ( + (len(normal_valid_loader) - error_n_count) + ) + + val_mean_loss = (total_loss + total_n_loss) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + val_fpr = (total_fpr + total_n_fpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_tpr = (total_tpr + total_n_tpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_bthr = (total_bthr + total_n_bthr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_auc = (total_auc + total_n_auc) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_ap = (total_ap + total_n_ap) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_accuracy = (total_n_corrects + total_n_n_corrects) / ( + (len(abnormal_valid_loader) - error_n_count - error_count) + ) + # for loop 한번에 abnormal 12, normal 12해서 24개 정답 확인 + + val_mean_normal_max = total_normal_max / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_normal_mean = total_normal_mean / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_abnormal_max = total_abnormal_max / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_mean_abnormal_mean = total_abnormal_mean / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + if best_loss > val_mean_loss: + print( + f"Best performance at epoch: {epoch + 1}, {best_loss:.4f} -> {val_mean_loss:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best.pth" + ) + torch.save(states, best_ckpt_fpath) + best_loss = val_mean_loss + # counter = 0 + # else: + # counter += 1 + + if best_auc < val_auc: + print( + f"Best auc performance at epoch: {epoch + 1}, {best_auc:.4f} -> {val_auc:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best_auc.pth" + ) + torch.save(states, best_ckpt_fpath) + best_auc = val_auc + counter = 0 + else: + counter += 1 + + new_wandb_metric_dict = { + "train_loss": epoch_mean_loss, + "train_accuracy": epoch_accuracy, + "valid_loss": val_mean_loss, + "valid_fpr": val_fpr, + "valid_tpr": val_tpr, + "valid_bthr": val_bthr, + "valid_auc": val_auc, + "valid_ap": val_ap, + "valid_accuracy": val_accuracy, + "valid_n_loss": val_n_mean_loss, + # "valid_n_MPP_loss": val_n_mean_MPP_loss, + # "valid_n_norm_loss": val_n_mean_norm_loss, + # "valid_n_MPP+norm_loss": val_n_mean_MPP_and_norm_loss, + "valid_n_fpr": val_n_fpr, + "valid_n_tpr": val_n_tpr, + "valid_n_bthr": val_n_bthr, + "valid_n_auc": val_n_auc, + "valid_n_ap": val_n_ap, + "valid_n_accuracy": val_n_accuracy, + "learning_rate": scheduler.get_last_lr()[0], + "train_abnormal_max_mean": epoch_mean_abnormal_max, + "train_abnormal_mean": epoch_mean_abnormal_mean, + "train_normal_max_mean": epoch_mean_normal_max, + "train_normal_mean": epoch_mean_normal_mean, + "valid_abnormal_max_mean": val_mean_abnormal_max, + "valid_abnormal_mean": val_mean_abnormal_mean, + "valid_normal_max_mean": val_mean_normal_max, + "valid_normal_mean": val_mean_normal_mean, + } + + wandb.log(new_wandb_metric_dict) + + scheduler.step() + + epoch_end = datetime.now() + epoch_time = epoch_end - epoch_start + epoch_time = str(epoch_time).split(".")[0] + print( + f"==>> epoch {epoch+1} time: {epoch_time}\nvalid_loss: {round(val_mean_loss,4)} valid_n_loss: {round(val_n_mean_loss,4)}" + ) + # print( + # f"valid_n_MPP_loss: {round(val_n_mean_MPP_loss,4)} valid_n_norm_loss: {round(val_n_mean_norm_loss,4)} valid_n_MPP+norm_loss: {round(val_n_mean_MPP_and_norm_loss,4)}" + # ) + print(f"valid_fpr: {val_fpr} valid_n_fpr: {val_n_fpr}") + print(f"valid_tpr: {val_tpr} valid_n_tpr: {val_n_tpr}") + print(f"valid_bthr: {val_bthr} valid_n_bthr: {val_n_bthr}") + print( + f"valid_auc: {val_auc:.4f} valid_n_auc: {val_n_auc:.4f}\nvalid_ap: {val_ap:.4f} valid_n_ap: {val_n_ap:.4f}\nvalid_accuracy: {val_accuracy:.2f} valid_n_accuracy: {val_n_accuracy:.2f}" + ) + print( + f"==>> val_abnormal_max_mean: {val_mean_abnormal_max} val_abnormal_mean: {val_mean_abnormal_mean}" + ) + print( + f"==>> val_normal_max_mean: {val_mean_normal_max} val_normal_mean: {val_mean_normal_mean}" + ) + print(f"==>> error_count: {error_count}") + + if counter > patience: + print("Early Stopping...") + break + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + print(f"==>> total time: {total_time}") + + +def train3( + normal_root_dir, + abnormal_root_dir, + json_dir, + model_dir, + model_name, + model_size, + device, + num_workers, + batch_size, + # val_num_workers, + # val_batch_size, + learning_rate, + weight_decay, + max_epoch, + val_interval, + save_interval, + w_normal, + w_mpp, + thr, + len_feature, + num_segments, + ratio_sample, + ratio_batch, + ratios, + kernel_sizes, + patience, + resume_name, + seed, + # mp, + use_extra, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + batch_size = batch_size + + val_batch_size = 1 + val_num_workers = 0 + + # -- early stopping flag + patience = patience + counter = 0 + + # 데이터셋 + dataset = NormalVMAE( + model_size=model_size, + root=normal_root_dir, + ) + + valid_data_size = len(dataset) // 10 + + train_data_size = len(dataset) - valid_data_size + + train_dataset, valid_dataset = random_split( + dataset, lengths=[train_data_size, valid_data_size] + ) + + normal_train_loader = DataLoader( + dataset=train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + normal_valid_loader = DataLoader( + dataset=valid_dataset, + batch_size=val_batch_size, + shuffle=False, + drop_last=True, + num_workers=val_num_workers, + ) + + abnormal_train_dataset = AbnormalVMAE( + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + abnormal_valid_dataset = AbnormalVMAE( + is_train=0, + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + + abnormal_train_loader = DataLoader( + dataset=abnormal_train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + abnormal_valid_loader = DataLoader( + dataset=abnormal_valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> {model_size} data_load_time: {data_load_time}") + + # Initialize the model + model = WSAD( + input_size=len_feature, + ratio_sample=ratio_sample, + ratio_batch=ratio_batch, + ratios=ratios, + kernel_sizes=kernel_sizes, + ) + + load_dict = None + + if resume_name: + load_dict = torch.load( + osp.join(model_dir, f"{resume_name}.pth"), map_location="cpu" + ) + model.load_state_dict(load_dict["model_state_dict"]) + + model.to(device) + + # optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + # 1e-6 => 0.0010000000474974513 + optimizer = torch.optim.Adam( + model.parameters(), + lr=learning_rate, + betas=(0.9, 0.999), + weight_decay=weight_decay, + ) + # optimizer = torch.optim.AdamW( + # model.parameters(), lr=learning_rate, betas=(0.9, 0.999), weight_decay=weight_decay + # ) + # optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=[1000, 1500], gamma=0.5 + ) + + if resume_name: + optimizer.load_state_dict(load_dict["optimizer_state_dict"]) + scheduler.load_state_dict(load_dict["scheduler_state_dict"]) + # scaler.load_state_dict(load_dict["scaler_state_dict"]) + + criterion = nn.BCELoss() + MPP_criterion = LossComputer(w_normal=w_normal, w_mpp=w_mpp) + + print(f"Start training..") + + wandb.init( + project="VAD", + entity="pao-kim-si-woong", + config={ + "lr": learning_rate, + "dataset": "무인매장", + "n_epochs": max_epoch, + "loss": "MPP", + "notes": "VAD 실험", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + + wandb.watch((model,)) + + best_loss = np.inf + best_auc = 0 + + total_batches = len(abnormal_train_loader) + + for epoch in range(max_epoch): + model.train() + + epoch_start = datetime.now() + + epoch_loss = 0 + epoch_n_corrects = 0 + epoch_MPP_loss = 0 + epoch_norm_loss = 0 + epoch_MPP_and_norm_loss = 0 + + epoch_abnormal_max = 0 + epoch_abnormal_mean = 0 + epoch_normal_max = 0 + epoch_normal_mean = 0 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_train_loader), + total=total_batches, + ): + if step % len(normal_train_loader) == 0: + norm_train_iter = iter(normal_train_loader) + # 중복 추출하더라도 정상, 이상 영상 1대1 대응 loop 끝까지 유지 + + normal_inputs = next(norm_train_iter) + + normal_input, normal_gt = normal_inputs + # (batch_size, 11, 710), (batch_size, 11) + abnormal_input, abnormal_gt = abnormal_inputs + # (batch_size, 11, 710), (batch_size, 11) + + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + inputs, gts = torch.cat((normal_input, abnormal_input), dim=1), torch.cat( + (normal_gt, abnormal_gt), dim=1 + ) + # @@@@ BN-WVAD는 정상 영상 먼저 @@@@ + # inputs는 (batch_size, 22, 710), gts는 (batch_size, 22) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + # batch_size = inputs.shape[0] + + inputs = inputs.to(device) + gts = gts.view(-1, 1).to(device) + # (batch_size * 22, 1) + + optimizer.zero_grad() + + pred_result = model(inputs, flag="Train") + # pred_result["pre_normal_scores"]: normal_scores[0 : b // 2], + # pred_result["bn_results"]: bn_results, + # pred_result["normal_scores"]: normal_scores, + # pred_result["scores"]: distance_sum * normal_scores, + + pred_acc = pred_result["normal_scores"].view(-1, 1) + pred = pred_result["scores"].view(-1, 1) + # pred_result["normal_scores"]는 (batch_size, 22) + # => pred는 (batch_size * 22, 1) + + loss = criterion(pred_acc, gts) + MPP_and_norm_loss, loss_dict = MPP_criterion(pred_result) + # sum_loss = loss + MPP_and_norm_loss + sum_loss = MPP_and_norm_loss + sum_loss.backward() + + # loss.backward() + optimizer.step() + with torch.no_grad(): + pred_n = pred.view(batch_size, 2, abnormal_input.size(1))[:, 0, :] + pred_a = pred.view(batch_size, 2, abnormal_input.size(1))[:, 1, :] + + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_n_mean = torch.mean(pred_n) + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred_acc > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + epoch_loss += loss.item() + epoch_MPP_loss += loss_dict["mpp_loss"].item() + epoch_norm_loss += loss_dict["normal_loss"].item() + epoch_MPP_and_norm_loss += MPP_and_norm_loss.item() + + epoch_n_corrects += corrects / (abnormal_input.size(1) * 2) + + epoch_normal_max += pred_n_max.item() + epoch_normal_mean += pred_n_mean.item() + epoch_abnormal_max += pred_a_max.item() + epoch_abnormal_mean += pred_a_mean.item() + + epoch_mean_loss = epoch_loss / total_batches + epoch_mean_MPP_loss = epoch_MPP_loss / total_batches + epoch_mean_norm_loss = epoch_norm_loss / total_batches + epoch_mean_MPP_and_norm_loss = epoch_MPP_and_norm_loss / total_batches + epoch_accuracy = epoch_n_corrects / (batch_size * total_batches) + + epoch_mean_normal_max = epoch_normal_max / total_batches + epoch_mean_normal_mean = epoch_normal_mean / total_batches + epoch_mean_abnormal_max = epoch_abnormal_max / total_batches + epoch_mean_abnormal_mean = epoch_abnormal_mean / total_batches + + train_end = datetime.now() + train_time = train_end - epoch_start + train_time = str(train_time).split(".")[0] + print( + f"==>> epoch {epoch+1} train_time: {train_time}\nloss: {round(epoch_mean_loss,4)}" + ) + print( + f"MPP_loss: {round(epoch_mean_MPP_loss,4)} norm_loss: {round(epoch_mean_norm_loss,4)} MPP+norm_loss: {round(epoch_mean_MPP_and_norm_loss,4)}" + ) + print(f"accuracy: {epoch_accuracy:.2f}") + print( + f"==>> abnormal_max_mean: {epoch_mean_abnormal_max} abnormal_mean: {epoch_mean_abnormal_mean}" + ) + print( + f"==>> normal_max_mean: {epoch_mean_normal_max} normal_mean: {epoch_mean_normal_mean}" + ) + + if (epoch + 1) % save_interval == 0: + + ckpt_fpath = osp.join(model_dir, f"{model_name}_{train_start}_latest.pth") + + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + "optimizer_state_dict": optimizer.state_dict(), + "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + } + + torch.save(states, ckpt_fpath) + + # validation 주기에 따라 loss를 출력하고 best model을 저장합니다. + if (epoch + 1) % val_interval == 0: + + print(f"Start validation #{epoch+1:2d}") + model.eval() + + with torch.no_grad(): + total_n_loss = 0 + # total_n_MPP_loss = 0 + # total_n_norm_loss = 0 + # total_n_MPP_and_norm_loss = 0 + total_n_n_corrects = 0 + + total_n_fpr = 0 + total_n_tpr = 0 + total_n_bthr = 0 + total_n_auc = 0 + total_n_ap = 0 + + total_loss = 0 + total_n_corrects = 0 + + total_fpr = 0 + total_tpr = 0 + total_bthr = 0 + total_auc = 0 + total_ap = 0 + + error_n_count = 0 + error_count = 0 + + total_abnormal_max = 0 + total_abnormal_mean = 0 + total_normal_max = 0 + total_normal_mean = 0 + + norm_valid_iter = iter(normal_valid_loader) + # iterator를 여기서 매번 새로 할당해줘야 iterator가 다시 처음부터 작동 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_valid_loader), total=len(abnormal_valid_loader) + ): + try: + normal_inputs = next(norm_valid_iter) + + normal_input, normal_gt = normal_inputs + # (val_batch_size, 11, 710), (val_batch_size, 11) + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 11, 710), (val_batch_size, 176) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 11) + + inputs = torch.cat((normal_input, abnormal_input), dim=1) + gts = torch.cat((normal_gt, abnormal_gt2), dim=1) + # inputs는 (val_batch_size, 22, 710), gts는 (val_batch_size, 22) + + inputs = inputs.to(device) + # (val_batch_size, 22, 710) + gts = gts.view(-1, 1).to(device) + # (val_batch_size * 22, 1) + + pred_result = model(inputs, flag="Eval_MPP") + # pred_result["pre_normal_scores"]: normal_scores[0 : b // 2], + # pred_result["bn_results"]: bn_results, + # pred_result["scores"]: distance_sum * normal_scores, + # breakpoint() + pred_acc = pred_result["normal_scores"].view(-1, 1) + pred = pred_result["scores"].view(-1, 1) + # pred는(batch_size * 11, 1) + + val_loss = criterion(pred_acc, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + # val_MPP_and_norm_loss, val_loss_dict = MPP_criterion(pred_result) + + pred_n = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 0, : + ] + pred_a = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 1, : + ] + + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_n_mean = torch.mean(pred_n) + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred_acc > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + + pred_normal_np = np.zeros(abnormal_gt.size(1)) + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_normal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[ + abnormal_input.size(1) + j + ] + + pred_np = np.concatenate( + (pred_normal_np, pred_abnormal_np), axis=0 + ) + + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + # normal_gt = np.zeros_like(abnormal_gt2) + normal_gt = np.zeros_like(abnormal_gt) + # gt_np = np.concatenate((abnormal_gt2, normal_gt), axis=0) + gt_np = np.concatenate((normal_gt, abnormal_gt), axis=0) + + try: + # auc = roc_auc_score(y_true=gt_np, y_score=pred_np) + # auc = roc_auc_score(y_true=gt_np, y_score=pred) + + fpr, tpr, cut = roc_curve(y_true=gt_np, y_score=pred_np) + precision, recall, cut2 = precision_recall_curve( + gt_np, pred_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_np > thr + TP_and_FN = pred_positive[gt_np > 0.9] + FP_and_TN = pred_positive[gt_np < 0.1] + + total_n_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_n_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_n_bthr += best_thr if diff_idx != 0 else 1 + + total_n_auc += auc + total_n_ap += ap + total_n_n_corrects += corrects / ( + abnormal_input.size(1) * 2 + ) + total_n_loss += val_loss.item() + # total_n_MPP_loss += val_loss_dict["mpp_loss"].item() + # total_n_norm_loss += val_loss_dict["normal_loss"].item() + # total_n_MPP_and_norm_loss += val_MPP_and_norm_loss.item() + + total_normal_max += pred_n_max.item() + total_normal_mean += pred_n_mean.item() + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_n_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + except StopIteration: + # if not use_extra: + # break + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 11, 710), (val_batch_size, 176) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 11) + + inputs = abnormal_input.to(device) + # (val_batch_size, 11, 710) + gts = abnormal_gt2.view(-1, 1).to(device) + # (val_batch_size * 11, 1) + + pred_result = model(inputs, flag="Eval_MPP") + # pred_result는 (batch_size, 11) + + pred_acc = pred_result["normal_scores"].view(-1, 1) + pred = pred_result["scores"].view(-1, 1) + # pred = pred_result.view(-1, 1) + # pred는(batch_size * 11, 1) + + val_loss = criterion(pred_acc, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + pred_a = pred.view(val_batch_size, abnormal_input.size(1)) + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred_acc > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + + try: + # auc = roc_auc_score(y_true=abnormal_gt, y_score=pred_abnormal_np) + # auc = roc_auc_score(y_true=abnormal_gt2, y_score=pred) + + fpr, tpr, cut = roc_curve( + y_true=abnormal_gt, y_score=pred_abnormal_np + ) + precision, recall, cut2 = precision_recall_curve( + abnormal_gt, pred_abnormal_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_abnormal_np > thr + TP_and_FN = pred_positive[abnormal_gt > 0.9] + FP_and_TN = pred_positive[abnormal_gt < 0.1] + + total_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_bthr += best_thr if diff_idx != 0 else 1 + + total_auc += auc + total_ap += ap + total_n_corrects += corrects / abnormal_input.size(1) + # normal + abnormal 24개와 다르게 abnormal 12개만 있음 -> /12 => 2/24 + total_loss += val_loss.item() + + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + + val_n_mean_loss = total_n_loss / ( + len(normal_valid_loader) - error_n_count + ) + # val_n_mean_MPP_loss = total_n_MPP_loss / (len(normal_valid_loader) - error_n_count) + # val_n_mean_norm_loss = total_n_norm_loss / (len(normal_valid_loader) - error_n_count) + # val_n_mean_MPP_and_norm_loss = total_n_MPP_and_norm_loss / ( + # len(normal_valid_loader) - error_n_count + # ) + + val_n_fpr = total_n_fpr / ((len(normal_valid_loader) - error_n_count)) + val_n_tpr = total_n_tpr / ((len(normal_valid_loader) - error_n_count)) + val_n_bthr = total_n_bthr / ((len(normal_valid_loader) - error_n_count)) + val_n_auc = total_n_auc / (len(normal_valid_loader) - error_n_count) + val_n_ap = total_n_ap / (len(normal_valid_loader) - error_n_count) + + val_n_accuracy = total_n_n_corrects / ( + (len(normal_valid_loader) - error_n_count) + ) + + val_mean_loss = (total_loss + total_n_loss) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + val_fpr = (total_fpr + total_n_fpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_tpr = (total_tpr + total_n_tpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_bthr = (total_bthr + total_n_bthr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_auc = (total_auc + total_n_auc) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_ap = (total_ap + total_n_ap) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_accuracy = (total_n_corrects + total_n_n_corrects) / ( + (len(abnormal_valid_loader) - error_n_count - error_count) + ) + # for loop 한번에 abnormal 12, normal 12해서 24개 정답 확인 + + val_mean_normal_max = total_normal_max / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_normal_mean = total_normal_mean / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_abnormal_max = total_abnormal_max / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_mean_abnormal_mean = total_abnormal_mean / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + if best_loss > val_mean_loss: + print( + f"Best performance at epoch: {epoch + 1}, {best_loss:.4f} -> {val_mean_loss:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best.pth" + ) + torch.save(states, best_ckpt_fpath) + best_loss = val_mean_loss + # counter = 0 + # else: + # counter += 1 + + if best_auc < val_auc: + print( + f"Best auc performance at epoch: {epoch + 1}, {best_auc:.4f} -> {val_auc:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best_auc.pth" + ) + torch.save(states, best_ckpt_fpath) + best_auc = val_auc + counter = 0 + else: + counter += 1 + + new_wandb_metric_dict = { + "train_loss": epoch_mean_loss, + "train_accuracy": epoch_accuracy, + "train_MPP_loss": epoch_mean_MPP_loss, + "train_norm_loss": epoch_mean_norm_loss, + "train_MPP+norm_loss": epoch_mean_MPP_and_norm_loss, + "valid_loss": val_mean_loss, + "valid_fpr": val_fpr, + "valid_tpr": val_tpr, + "valid_bthr": val_bthr, + "valid_auc": val_auc, + "valid_ap": val_ap, + "valid_accuracy": val_accuracy, + "valid_n_loss": val_n_mean_loss, + # "valid_n_MPP_loss": val_n_mean_MPP_loss, + # "valid_n_norm_loss": val_n_mean_norm_loss, + # "valid_n_MPP+norm_loss": val_n_mean_MPP_and_norm_loss, + "valid_n_fpr": val_n_fpr, + "valid_n_tpr": val_n_tpr, + "valid_n_bthr": val_n_bthr, + "valid_n_auc": val_n_auc, + "valid_n_ap": val_n_ap, + "valid_n_accuracy": val_n_accuracy, + "learning_rate": scheduler.get_last_lr()[0], + "train_abnormal_max_mean": epoch_mean_abnormal_max, + "train_abnormal_mean": epoch_mean_abnormal_mean, + "train_normal_max_mean": epoch_mean_normal_max, + "train_normal_mean": epoch_mean_normal_mean, + "valid_abnormal_max_mean": val_mean_abnormal_max, + "valid_abnormal_mean": val_mean_abnormal_mean, + "valid_normal_max_mean": val_mean_normal_max, + "valid_normal_mean": val_mean_normal_mean, + } + + wandb.log(new_wandb_metric_dict) + + scheduler.step() + + epoch_end = datetime.now() + epoch_time = epoch_end - epoch_start + epoch_time = str(epoch_time).split(".")[0] + print( + f"==>> epoch {epoch+1} time: {epoch_time}\nvalid_loss: {round(val_mean_loss,4)} valid_n_loss: {round(val_n_mean_loss,4)}" + ) + # print( + # f"valid_n_MPP_loss: {round(val_n_mean_MPP_loss,4)} valid_n_norm_loss: {round(val_n_mean_norm_loss,4)} valid_n_MPP+norm_loss: {round(val_n_mean_MPP_and_norm_loss,4)}" + # ) + print(f"valid_fpr: {val_fpr} valid_n_fpr: {val_n_fpr}") + print(f"valid_tpr: {val_tpr} valid_n_tpr: {val_n_tpr}") + print(f"valid_bthr: {val_bthr} valid_n_bthr: {val_n_bthr}") + print( + f"valid_auc: {val_auc:.4f} valid_n_auc: {val_n_auc:.4f}\nvalid_ap: {val_ap:.4f} valid_n_ap: {val_n_ap:.4f}\nvalid_accuracy: {val_accuracy:.2f} valid_n_accuracy: {val_n_accuracy:.2f}" + ) + print( + f"==>> val_abnormal_max_mean: {val_mean_abnormal_max} val_abnormal_mean: {val_mean_abnormal_mean}" + ) + print( + f"==>> val_normal_max_mean: {val_mean_normal_max} val_normal_mean: {val_mean_normal_mean}" + ) + print(f"==>> error_count: {error_count}") + + if counter > patience: + print("Early Stopping...") + break + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + print(f"==>> total time: {total_time}") + + +def main(args): + train3(**args.__dict__) + + +if __name__ == "__main__": + args = parse_args() + + main(args) diff --git a/model/train/train_LSTM.py b/model/train/train_LSTM.py new file mode 100644 index 0000000..782f1f9 --- /dev/null +++ b/model/train/train_LSTM.py @@ -0,0 +1,486 @@ +import os +import os.path as osp +import random +from argparse import ArgumentParser +from datetime import datetime + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch +import torch.nn as nn +import torch.nn.functional as F +import wandb +from classifier import LSTMAutoencoder +from shop_dataset import AbnormalDataset, NormalDataset +from sklearn.metrics import roc_auc_score +from sklearn.preprocessing import MinMaxScaler +from torch.utils.data import DataLoader, Dataset, random_split +from tqdm import tqdm + + +def parse_args(): + parser = ArgumentParser() + + # Conventional args + parser.add_argument( + "--root_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_TRAIN_CSV", + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/csv/normal/val", + ), + ) + # 학습 데이터 경로 + parser.add_argument( + "--abnormal_root_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_ABNORMAL_CSV", + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/csv/abnormal/val", + ), + ) + parser.add_argument( + "--json_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_ABNORMAL_VAL_JSON", + "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/datapreprocess/json/abnormal/val", + ), + ) + # abnormal 검증셋 csv, json파일 경로 + parser.add_argument( + "--model_dir", + type=str, + default=os.environ.get("SM_MODEL_DIR", "/data/ephemeral/home/pths"), + ) + # pth 파일 저장 경로 + + parser.add_argument("--model_name", type=str, default="LSTM") + # import_module로 불러올 model name + + parser.add_argument("--resume_name", type=str, default="") + # resume 파일 이름 + + parser.add_argument("--seed", type=int, default=666) + # random seed + + parser.add_argument( + "--device", default="cuda" if torch.cuda.is_available() else "cpu" + ) + parser.add_argument("--num_workers", type=int, default=8) + + parser.add_argument("--batch_size", type=int, default=64) + parser.add_argument("--val_batch_size", type=int, default=64) + parser.add_argument("--val_num_workers", type=int, default=8) + parser.add_argument("--learning_rate", type=float, default=0.001) + parser.add_argument("--max_epoch", type=int, default=50) + + parser.add_argument("--save_interval", type=int, default=1) + parser.add_argument("--val_interval", type=int, default=1) + parser.add_argument("--thr", type=float, default=0.02) + + parser.add_argument("--patience", type=int, default=10) + + # parser.add_argument("--mp", action="store_false") + # https://stackoverflow.com/questions/60999816/argparse-not-parsing-boolean-arguments + # mixed precision 사용할 지 여부 + + parser.add_argument("--wandb_mode", type=str, default="online") + # parser.add_argument("--wandb_mode", type=str, default="disabled") + # wandb mode + parser.add_argument("--wandb_run_name", type=str, default="LSTM") + # wandb run name + + args = parser.parse_args() + + return args + + +def set_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if use multi-GPU + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(seed) + random.seed(seed) + + +def train( + root_dir, + abnormal_root_dir, + json_dir, + model_dir, + model_name, + device, + num_workers, + batch_size, + val_num_workers, + val_batch_size, + learning_rate, + max_epoch, + val_interval, + save_interval, + thr, + patience, + resume_name, + seed, + # mp, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + # Define parameters + sequence_length = 20 # Adjust as needed + prediction_time = 1 # Adjust as needed + n_features = 38 # Number of features to predict + + batch_size = batch_size + val_batch_size = val_batch_size + + # -- early stopping flag + patience = patience + counter = 0 + + # 데이터셋 + dataset = NormalDataset( + root=root_dir, + ) + + valid_data_size = len(dataset) // 10 + + train_data_size = len(dataset) - valid_data_size + + train_dataset, valid_dataset = random_split( + dataset, lengths=[train_data_size, valid_data_size] + ) + + train_loader = DataLoader( + dataset=train_dataset, + batch_size=batch_size, + shuffle=True, + num_workers=num_workers, + ) + + valid_loader = DataLoader( + dataset=valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + abnormal_dataset = AbnormalDataset( + root=abnormal_root_dir, + label_root=json_dir, + ) + + abnormal_loader = DataLoader( + dataset=abnormal_dataset, + batch_size=val_batch_size, + shuffle=True, + num_workers=val_num_workers, + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> data_load_time: {data_load_time}") + + # Initialize the LSTM autoencoder model + model = LSTMAutoencoder(sequence_length, n_features, prediction_time) + + # load_dict = None + + # if resume_name: + # load_dict = torch.load(osp.join(model_dir, f"{resume_name}.pth"), map_location="cpu") + # model.load_state_dict(load_dict["model_state_dict"]) + + # model.load_state_dict( + # torch.load( + # "/data/ephemeral/home/level2-3-cv-finalproject-cv-06/app/models/pytorch_model.pth", + # map_location="cpu", + # ) + # ) + model.to(device) + + optimizer = torch.optim.AdamW( + model.parameters(), lr=learning_rate, weight_decay=1e-6 + ) + + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=[15, 40], gamma=0.1 + ) + + # if resume_name: + # optimizer.load_state_dict(load_dict["optimizer_state_dict"]) + # scheduler.load_state_dict(load_dict["scheduler_state_dict"]) + # scaler.load_state_dict(load_dict["scaler_state_dict"]) + + criterion = nn.MSELoss() + val_criterion = nn.MSELoss(reduction="none") + + print(f"Start training..") + + wandb.init( + project="VAD", + entity="pao-kim-si-woong", + config={ + "lr": learning_rate, + "dataset": "무인매장", + "n_epochs": max_epoch, + "loss": "MSE", + "notes": "VAD 실험", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + + wandb.watch((model,)) + + best_loss = np.inf + + total_batches = len(train_loader) + + for epoch in range(max_epoch): + model.train() + + epoch_start = datetime.now() + + epoch_loss = 0 + + for step, (x, y, _) in tqdm(enumerate(train_loader), total=total_batches): + + x, y = x.to(device), y.to(device) + + optimizer.zero_grad() + + pred = model(x) + + loss = criterion(pred, y) + + loss.backward() + optimizer.step() + + epoch_loss += loss + + epoch_mean_loss = (epoch_loss / total_batches).item() + + train_end = datetime.now() + train_time = train_end - epoch_start + train_time = str(train_time).split(".")[0] + print( + f"==>> epoch {epoch+1} train_time: {train_time}\nloss: {round(epoch_mean_loss,4)}" + ) + + if (epoch + 1) % save_interval == 0: + + ckpt_fpath = osp.join(model_dir, f"{model_name}_{train_start}_latest.pth") + + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + "optimizer_state_dict": optimizer.state_dict(), + "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + } + + torch.save(states, ckpt_fpath) + + # validation 주기에 따라 loss를 출력하고 best model을 저장합니다. + if (epoch + 1) % val_interval == 0: + + print(f"Start validation #{epoch+1:2d}") + model.eval() + + with torch.no_grad(): + total_loss = 0 + total_abnormal_loss = 0 + total_n_corrects = 0 + total_abnormal_n_corrects = 0 + total_auc = 0 + total_abnormal_auc = 0 + error_count = 0 + error_count_abnormal = 0 + + for step, (x, y, label) in tqdm( + enumerate(valid_loader), total=len(valid_loader) + ): + x, y, label = x.to(device), y.to(device), label.to(device) + + pred = model(x) + + val_loss = val_criterion(pred, y) + val_loss_rdim = torch.mean(val_loss, dim=2) + pred_label = val_loss_rdim > thr + # pred_sig = F.sigmoid(val_loss_rdim-thr) + label = label.view(-1, 1) + + try: + auc = roc_auc_score(label.cpu(), pred_label.cpu()) + # auc = roc_auc_score(label.cpu(), pred_sig.cpu()) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # 정상상황인 경우 label이 항상 전부 0 + # => 무조건 "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." 발생 + total_auc += auc + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + total_auc += 0 + error_count += 1 + + pred_correct = pred_label == label + corrects = torch.sum(pred_correct).item() + + total_n_corrects += corrects + + val_loss = torch.mean(val_loss) + + total_loss += val_loss + + val_mean_loss = (total_loss / len(valid_loader)).item() + if error_count < len(valid_loader): + val_auc = total_auc / (len(valid_loader) - error_count) + else: + # 정상영상은 roc_auc_score 함수 사용 불가 => error_count == len(valid_loader) + val_auc = 0 + # ==> vaild_auc는 항상 0 + val_accuracy = total_n_corrects / valid_data_size + + for step, (x, y, label) in tqdm( + enumerate(abnormal_loader), total=len(abnormal_loader) + ): + x, y, label = x.to(device), y.to(device), label.to(device) + + pred = model(x) + + val_loss = val_criterion(pred, y) + val_loss_rdim = torch.mean(val_loss, dim=2) + pred_label = val_loss_rdim > thr + # pred_sig = F.sigmoid(val_loss_rdim - thr) + label = label.view(-1, 1) + + try: + auc = roc_auc_score(label.cpu(), pred_label.cpu()) + # auc = roc_auc_score(label.cpu(), pred_sig.cpu()) + total_abnormal_auc += auc + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + total_abnormal_auc += 0 + error_count_abnormal += 1 + + pred_correct = pred_label == label + corrects = torch.sum(pred_correct).item() + + total_abnormal_n_corrects += corrects + + val_loss = torch.mean(val_loss) + + total_abnormal_loss += val_loss + + val_abnormal_mean_loss = ( + total_abnormal_loss / len(abnormal_loader) + ).item() + val_abnormal_auc = total_abnormal_auc / ( + len(abnormal_loader) - error_count_abnormal + ) + val_abnormal_accuracy = total_abnormal_n_corrects / len( + abnormal_dataset + ) + + val_total_auc = (total_auc + total_abnormal_auc) / ( + len(valid_loader) + + len(abnormal_loader) + - error_count + - error_count_abnormal + ) + val_total_accuracy = (total_n_corrects + total_abnormal_n_corrects) / ( + valid_data_size + len(abnormal_dataset) + ) + + if best_loss > val_mean_loss: + print( + f"Best performance at epoch: {epoch + 1}, {best_loss:.4f} -> {val_mean_loss:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best.pth" + ) + torch.save(states, best_ckpt_fpath) + best_loss = val_mean_loss + counter = 0 + else: + counter += 1 + + new_wandb_metric_dict = { + "train_loss": epoch_mean_loss, + "valid_loss": val_mean_loss, + "valid_abnormal_loss": val_abnormal_mean_loss, + "valid_auc": val_auc, + "valid_abnormal_auc": val_abnormal_auc, + "valid_normal+abnormal_auc": val_total_auc, + "valid_accuracy": val_accuracy, + "valid_abnormal_accuracy": val_abnormal_accuracy, + "valid_normal+abnormal_accuracy": val_total_accuracy, + "learning_rate": scheduler.get_lr()[0], + } + + wandb.log(new_wandb_metric_dict) + + scheduler.step() + + epoch_end = datetime.now() + epoch_time = epoch_end - epoch_start + epoch_time = str(epoch_time).split(".")[0] + print( + f"==>> epoch {epoch+1} time: {epoch_time}\nvalid_loss: {round(val_mean_loss,4)}\nvalid_auc: {val_auc:.4f}\nvalid_accuracy: {val_accuracy:.2f}" + ) + print( + f"valid_abnormal_loss: {round(val_abnormal_mean_loss,4)}\nvalid_abnormal_auc: {val_abnormal_auc:.4f}\nvalid_abnormal_accuracy: {val_abnormal_accuracy:.2f}" + ) + print( + f"valid_normal+abnormal_auc: {val_total_auc:.4f}\nvalid_normal+abnormal_accuracy: {val_total_accuracy:.2f}" + ) + print(f"auc_roc_error_count: {error_count+error_count_abnormal}") + + if counter > patience: + print("Early Stopping...") + break + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + print(f"==>> total time: {total_time}") + + +def main(args): + train(**args.__dict__) + + +if __name__ == "__main__": + args = parse_args() + + main(args) diff --git a/model/train/train_MIL.py b/model/train/train_MIL.py new file mode 100644 index 0000000..2e1ccda --- /dev/null +++ b/model/train/train_MIL.py @@ -0,0 +1,2299 @@ +import os +import os.path as osp +import random +from argparse import ArgumentParser +from datetime import datetime + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import sklearn.metrics +import torch +import torch.nn as nn +import torch.nn.functional as F +import wandb +from classifier import MILClassifier +from loss import MIL +from shop_dataset import AbnormalVMAE, NormalVMAE +from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve +from sklearn.preprocessing import MinMaxScaler +from torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split +from tqdm import tqdm + + +def parse_args(): + parser = ArgumentParser() + + # Conventional args + parser.add_argument( + "--normal_root_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_NORMAL_NPY", + "../datapreprocess/npy/normal", + ), + ) + # 학습 데이터 경로 + parser.add_argument( + "--abnormal_root_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_ABNORMAL_NPY", + "../datapreprocess/npy/abnormal", + ), + ) + parser.add_argument( + "--json_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_ABNORMAL_JSON", + "../datapreprocess/json/abnormal", + ), + ) + # abnormal 검증셋 npy, json파일 경로 + parser.add_argument( + "--model_dir", type=str, default=os.environ.get("SM_MODEL_DIR", "../pths") + ) + # pth 파일 저장 경로 + + parser.add_argument("--model_name", type=str, default="MIL") + # import_module로 불러올 model name + + parser.add_argument("--resume_name", type=str, default="") + # resume 파일 이름 + + parser.add_argument("--model_size", type=str, default="small") + # VideoMAEv2 backbone 사이즈 = "small" or "base" + + parser.add_argument("--seed", type=int, default=666) + # random seed + + parser.add_argument( + "--device", default="cuda" if torch.cuda.is_available() else "cpu" + ) + parser.add_argument("--num_workers", type=int, default=0) + + parser.add_argument("--batch_size", type=int, default=30) + # parser.add_argument("--val_batch_size", type=int, default=1) + # parser.add_argument("--val_num_workers", type=int, default=0) + parser.add_argument("--learning_rate", type=float, default=0.001) + parser.add_argument("--max_epoch", type=int, default=1000) + + parser.add_argument("--save_interval", type=int, default=1) + parser.add_argument("--val_interval", type=int, default=1) + parser.add_argument("--thr", type=float, default=0.25) + parser.add_argument("--drop_rate", type=float, default=0.3) + + parser.add_argument("--patience", type=int, default=100) + + # parser.add_argument("--mp", action="store_false") + # https://stackoverflow.com/questions/60999816/argparse-not-parsing-boolean-arguments + # mixed precision 사용할 지 여부 + + parser.add_argument("--use_extra", action="store_false") + + # parser.add_argument("--wandb_mode", type=str, default="online") + parser.add_argument("--wandb_mode", type=str, default="disabled") + # wandb mode + parser.add_argument("--wandb_run_name", type=str, default="MIL") + # wandb run name + + args = parser.parse_args() + + return args + + +def set_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if use multi-GPU + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(seed) + random.seed(seed) + + +def train( + normal_root_dir, + abnormal_root_dir, + json_dir, + model_dir, + model_name, + model_size, + device, + num_workers, + batch_size, + # val_num_workers, + # val_batch_size, + learning_rate, + max_epoch, + val_interval, + save_interval, + thr, + drop_rate, + patience, + resume_name, + seed, + # mp, + use_extra, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + batch_size = batch_size + + val_batch_size = 1 + val_num_workers = 0 + + # -- early stopping flag + patience = patience + counter = 0 + + # 데이터셋 + dataset = NormalVMAE( + model_size=model_size, + root=normal_root_dir, + ) + + valid_data_size = len(dataset) // 10 + + train_data_size = len(dataset) - valid_data_size + + train_dataset, valid_dataset = random_split( + dataset, lengths=[train_data_size, valid_data_size] + ) + + normal_train_loader = DataLoader( + dataset=train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + normal_valid_loader = DataLoader( + dataset=valid_dataset, + batch_size=val_batch_size, + shuffle=False, + drop_last=True, + num_workers=val_num_workers, + ) + + abnormal_train_dataset = AbnormalVMAE( + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + abnormal_valid_dataset = AbnormalVMAE( + is_train=0, + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + + abnormal_train_loader = DataLoader( + dataset=abnormal_train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + abnormal_valid_loader = DataLoader( + dataset=abnormal_valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> {model_size} data_load_time: {data_load_time}") + + # Initialize the model + model = MILClassifier(drop_p=drop_rate) + + load_dict = None + + if resume_name: + load_dict = torch.load( + osp.join(model_dir, f"{resume_name}.pth"), map_location="cpu" + ) + model.load_state_dict(load_dict["model_state_dict"]) + + model.to(device) + + # optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + # 1e-6 => 0.0010000000474974513 + optimizer = torch.optim.Adagrad( + model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513 + ) + # optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=[1000, 1500], gamma=0.5 + ) + + if resume_name: + optimizer.load_state_dict(load_dict["optimizer_state_dict"]) + scheduler.load_state_dict(load_dict["scheduler_state_dict"]) + # scaler.load_state_dict(load_dict["scaler_state_dict"]) + + criterion = nn.BCELoss() + MIL_criterion = MIL + + print(f"Start training..") + + wandb.init( + project="VAD", + entity="pao-kim-si-woong", + config={ + "lr": learning_rate, + "dataset": "무인매장", + "n_epochs": max_epoch, + "loss": "BCE+MIL", + "notes": "VAD 실험", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + + wandb.watch((model,)) + + best_loss = np.inf + best_auc = 0 + + total_batches = len(abnormal_train_loader) + + for epoch in range(max_epoch): + model.train() + + epoch_start = datetime.now() + + epoch_loss = 0 + epoch_n_corrects = 0 + epoch_n_MIL_loss = 0 + epoch_n_loss = 0 + epoch_n_n_corrects = 0 + + epoch_abnormal_max = 0 + epoch_abnormal_mean = 0 + epoch_normal_max = 0 + epoch_normal_mean = 0 + + norm_train_iter = iter(normal_train_loader) + # iterator를 여기서 매번 새로 할당해줘야 iterator가 다시 처음부터 작동 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_train_loader), + total=total_batches, + ): + try: + normal_inputs = next(norm_train_iter) + + abnormal_input, abnormal_gt = abnormal_inputs + # (batch_size, 12, 710), (batch_size, 12) + normal_input, normal_gt = normal_inputs + # (batch_size, 12, 710), (batch_size, 12) + + inputs, gts = torch.cat( + (abnormal_input, normal_input), dim=1 + ), torch.cat((abnormal_gt, normal_gt), dim=1) + # inputs는 (batch_size, 24, 710), gts는 (batch_size, 24) + + # batch_size = inputs.shape[0] + + # inputs = inputs.view(-1, inputs.size(-1)).to(device) + # (batch_size * 24, 710) + inputs = inputs.to(device) + gts = gts.view(-1, 1).to(device) + # (batch_size * 24, 1) + + optimizer.zero_grad() + + pred = model(inputs) + # pred는 (batch_size * 24, 1) + + loss = criterion(pred, gts) + MIL_loss = MIL_criterion(pred, batch_size, abnormal_input.size(1)) + sum_loss = loss + MIL_loss + # sum_loss = MIL_loss + sum_loss.backward() + + # loss.backward() + optimizer.step() + with torch.no_grad(): + pred_a = pred.view(batch_size, 2, abnormal_input.size(1))[:, 0, :] + pred_n = pred.view(batch_size, 2, abnormal_input.size(1))[:, 1, :] + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + pred_n_mean = torch.mean(pred_n) + + pred_correct = pred > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + epoch_n_loss += loss.item() + epoch_n_MIL_loss += MIL_loss.item() + + epoch_n_n_corrects += corrects / (abnormal_input.size(1) * 2) + + epoch_abnormal_max += pred_a_max.item() + epoch_abnormal_mean += pred_a_mean.item() + epoch_normal_max += pred_n_max.item() + epoch_normal_mean += pred_n_mean.item() + + except StopIteration: + if not use_extra: + + break + abnormal_input, abnormal_gt = abnormal_inputs + # (batch_size, 12, 710), (batch_size, 12) + + # inputs = abnormal_input.view(-1, inputs.size(-1)).to(device) + # (batch_size * 12, 710) + inputs = abnormal_input.to(device) + gts = abnormal_gt.view(-1, 1).to(device) + # (batch_size * 12, 1) + + optimizer.zero_grad() + + pred = model(inputs) + # pred는 (batch_size * 12, 1) + + loss = criterion(pred, gts) + + loss.backward() + optimizer.step() + with torch.no_grad(): + # print(f"==>> pred.shape: {pred.shape}") + pred_a = pred.view(batch_size, abnormal_input.size(1)) + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + epoch_loss += loss.item() + epoch_n_corrects += corrects / abnormal_input.size(1) + + epoch_abnormal_max += pred_a_max.item() + epoch_abnormal_mean += pred_a_mean.item() + + epoch_n_mean_loss = epoch_n_loss / len(normal_train_loader) + epoch_n_mean_MIL_loss = epoch_n_MIL_loss / len(normal_train_loader) + epoch_n_accuracy = epoch_n_n_corrects / ( + batch_size * (len(normal_train_loader)) + ) + + epoch_mean_normal_max = epoch_normal_max / len(normal_train_loader) + epoch_mean_normal_mean = epoch_normal_mean / len(normal_train_loader) + if use_extra: + epoch_mean_loss = (epoch_loss + epoch_n_loss) / total_batches + epoch_accuracy = (epoch_n_corrects + epoch_n_n_corrects) / ( + batch_size * (len(abnormal_train_loader)) + ) + epoch_mean_abnormal_max = epoch_abnormal_max / total_batches + epoch_mean_abnormal_mean = epoch_abnormal_mean / total_batches + else: + epoch_mean_loss = (epoch_loss + epoch_n_loss) / len(normal_train_loader) + epoch_accuracy = (epoch_n_corrects + epoch_n_n_corrects) / ( + batch_size * (len(normal_train_loader)) + ) + epoch_mean_abnormal_max = epoch_abnormal_max / len(normal_train_loader) + epoch_mean_abnormal_mean = epoch_abnormal_mean / len(normal_train_loader) + + train_end = datetime.now() + train_time = train_end - epoch_start + train_time = str(train_time).split(".")[0] + print( + f"==>> epoch {epoch+1} train_time: {train_time}\nloss: {round(epoch_mean_loss,4)} n_loss: {round(epoch_n_mean_loss,4)} MIL_loss: {round(epoch_n_mean_MIL_loss,4)}" + ) + print(f"accuracy: {epoch_accuracy:.2f} n_accuracy: {epoch_n_accuracy:.2f}") + print( + f"==>> abnormal_max_mean: {epoch_mean_abnormal_max} abnormal_mean: {epoch_mean_abnormal_mean}" + ) + print( + f"==>> normal_max_mean: {epoch_mean_normal_max} normal_mean: {epoch_mean_normal_mean}" + ) + + if (epoch + 1) % save_interval == 0: + + ckpt_fpath = osp.join(model_dir, f"{model_name}_{train_start}_latest.pth") + + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + "optimizer_state_dict": optimizer.state_dict(), + "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + } + + torch.save(states, ckpt_fpath) + + # validation 주기에 따라 loss를 출력하고 best model을 저장합니다. + if (epoch + 1) % val_interval == 0: + + print(f"Start validation #{epoch+1:2d}") + model.eval() + + with torch.no_grad(): + total_n_loss = 0 + total_n_MIL_loss = 0 + total_n_n_corrects = 0 + + total_n_fpr = 0 + total_n_tpr = 0 + total_n_bthr = 0 + total_n_auc = 0 + total_n_ap = 0 + + total_loss = 0 + total_n_corrects = 0 + + total_fpr = 0 + total_tpr = 0 + total_bthr = 0 + total_auc = 0 + total_ap = 0 + + error_n_count = 0 + error_count = 0 + + total_abnormal_max = 0 + total_abnormal_mean = 0 + total_normal_max = 0 + total_normal_mean = 0 + + norm_valid_iter = iter(normal_valid_loader) + # iterator를 여기서 매번 새로 할당해줘야 iterator가 다시 처음부터 작동 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_valid_loader), total=len(abnormal_valid_loader) + ): + try: + normal_inputs = next(norm_valid_iter) + + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 12, 710), (val_batch_size, 192) + normal_input, normal_gt = normal_inputs + # (val_batch_size, 12, 710), (val_batch_size, 12) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 12) + + inputs = torch.cat((abnormal_input, normal_input), dim=1) + gts = torch.cat((abnormal_gt2, normal_gt), dim=1) + # inputs는 (val_batch_size, 24, 710), gts는 (val_batch_size, 24) + + # inputs = inputs.view(-1, inputs.size(-1)).to(device) + # (val_batch_size * 24, 710) + inputs = inputs.to(device) + gts = gts.view(-1, 1).to(device) + # (val_batch_size * 24, 1) + + pred = model(inputs) + # pred는 (val_batch_size * 24, 1) + + val_loss = criterion(pred, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + val_MIL_loss = MIL_criterion( + pred, val_batch_size, abnormal_input.size(1) + ) + + pred_a = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 0, : + ] + pred_n = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 1, : + ] + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + pred_n_mean = torch.mean(pred_n) + + pred_correct = pred > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + pred_normal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + pred_normal_np[step[j] * 16 : step[j + 1] * 16] = pred[ + abnormal_input.size(1) + j + ] + + pred_np = np.concatenate( + (pred_abnormal_np, pred_normal_np), axis=0 + ) + + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + # normal_gt = np.zeros_like(abnormal_gt2) + normal_gt = np.zeros_like(abnormal_gt) + # gt_np = np.concatenate((abnormal_gt2, normal_gt), axis=0) + gt_np = np.concatenate((abnormal_gt, normal_gt), axis=0) + + try: + # auc = roc_auc_score(y_true=gt_np, y_score=pred_np) + # auc = roc_auc_score(y_true=gt_np, y_score=pred) + + fpr, tpr, cut = roc_curve(y_true=gt_np, y_score=pred_np) + precision, recall, cut2 = precision_recall_curve( + gt_np, pred_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_np > thr + TP_and_FN = pred_positive[gt_np > 0.9] + FP_and_TN = pred_positive[gt_np < 0.1] + + total_n_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_n_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_n_bthr += best_thr if diff_idx != 0 else 1 + + total_n_auc += auc + total_n_ap += ap + total_n_n_corrects += corrects / ( + abnormal_input.size(1) * 2 + ) + total_n_loss += val_loss.item() + total_n_MIL_loss += val_MIL_loss.item() + + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + total_normal_max += pred_n_max.item() + total_normal_mean += pred_n_mean.item() + + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_n_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + except StopIteration: + # if not use_extra: + # break + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 12, 710), (val_batch_size, 192) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 12) + + # inputs = abnormal_input.view(-1, inputs.size(-1)).to(device) + # (val_batch_size * 12, 710) + inputs = abnormal_input.to(device) + gts = abnormal_gt2.view(-1, 1).to(device) + # (val_batch_size * 12, 1) + + pred = model(inputs) + # pred는 (val_batch_size * 12, 1) + + val_loss = criterion(pred, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + pred_a = pred.view(val_batch_size, abnormal_input.size(1)) + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + + try: + # auc = roc_auc_score(y_true=abnormal_gt, y_score=pred_abnormal_np) + # auc = roc_auc_score(y_true=abnormal_gt2, y_score=pred) + + fpr, tpr, cut = roc_curve( + y_true=abnormal_gt, y_score=pred_abnormal_np + ) + precision, recall, cut2 = precision_recall_curve( + abnormal_gt, pred_abnormal_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_abnormal_np > thr + TP_and_FN = pred_positive[abnormal_gt > 0.9] + FP_and_TN = pred_positive[abnormal_gt < 0.1] + + total_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_bthr += best_thr if diff_idx != 0 else 1 + + total_auc += auc + total_ap += ap + total_n_corrects += corrects / abnormal_input.size(1) + # normal + abnormal 24개와 다르게 abnormal 12개만 있음 -> /12 => 2/24 + total_loss += val_loss.item() + + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + + val_n_mean_loss = total_n_loss / ( + len(normal_valid_loader) - error_n_count + ) + val_n_mean_MIL_loss = total_n_MIL_loss / ( + len(normal_valid_loader) - error_n_count + ) + + val_n_fpr = total_n_fpr / ((len(normal_valid_loader) - error_n_count)) + val_n_tpr = total_n_tpr / ((len(normal_valid_loader) - error_n_count)) + val_n_bthr = total_n_bthr / ((len(normal_valid_loader) - error_n_count)) + val_n_auc = total_n_auc / (len(normal_valid_loader) - error_n_count) + val_n_ap = total_n_ap / (len(normal_valid_loader) - error_n_count) + + val_n_accuracy = total_n_n_corrects / ( + (len(normal_valid_loader) - error_n_count) + ) + + val_mean_loss = (total_loss + total_n_loss) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + val_fpr = (total_fpr + total_n_fpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_tpr = (total_tpr + total_n_tpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_bthr = (total_bthr + total_n_bthr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_auc = (total_auc + total_n_auc) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_ap = (total_ap + total_n_ap) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_accuracy = (total_n_corrects + total_n_n_corrects) / ( + (len(abnormal_valid_loader) - error_n_count - error_count) + ) + # for loop 한번에 abnormal 12, normal 12해서 24개 정답 확인 + + val_mean_normal_max = total_normal_max / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_normal_mean = total_normal_mean / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_abnormal_max = total_abnormal_max / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_mean_abnormal_mean = total_abnormal_mean / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + if best_loss > val_mean_loss: + print( + f"Best performance at epoch: {epoch + 1}, {best_loss:.4f} -> {val_mean_loss:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best.pth" + ) + torch.save(states, best_ckpt_fpath) + best_loss = val_mean_loss + # counter = 0 + # else: + # counter += 1 + + if best_auc < val_auc: + print( + f"Best auc performance at epoch: {epoch + 1}, {best_auc:.4f} -> {val_auc:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best_auc.pth" + ) + torch.save(states, best_ckpt_fpath) + best_auc = val_auc + counter = 0 + else: + counter += 1 + + new_wandb_metric_dict = { + "train_loss": epoch_mean_loss, + "train_accuracy": epoch_accuracy, + "train_n_loss": epoch_n_mean_loss, + "train_n_MIL_loss": epoch_n_mean_MIL_loss, + "train_n_accuracy": epoch_n_accuracy, + "valid_loss": val_mean_loss, + "valid_fpr": val_fpr, + "valid_tpr": val_tpr, + "valid_bthr": val_bthr, + "valid_auc": val_auc, + "valid_ap": val_ap, + "valid_accuracy": val_accuracy, + "valid_n_loss": val_n_mean_loss, + "valid_n_MIL_loss": val_n_mean_MIL_loss, + "valid_n_fpr": val_n_fpr, + "valid_n_tpr": val_n_tpr, + "valid_n_bthr": val_n_bthr, + "valid_n_auc": val_n_auc, + "valid_n_ap": val_n_ap, + "valid_n_accuracy": val_n_accuracy, + "learning_rate": scheduler.get_last_lr()[0], + "train_abnormal_max_mean": epoch_mean_abnormal_max, + "train_abnormal_mean": epoch_mean_abnormal_mean, + "train_normal_max_mean": epoch_mean_normal_max, + "train_normal_mean": epoch_mean_normal_mean, + "valid_abnormal_max_mean": val_mean_abnormal_max, + "valid_abnormal_mean": val_mean_abnormal_mean, + "valid_normal_max_mean": val_mean_normal_max, + "valid_normal_mean": val_mean_normal_mean, + } + + wandb.log(new_wandb_metric_dict) + + scheduler.step() + + epoch_end = datetime.now() + epoch_time = epoch_end - epoch_start + epoch_time = str(epoch_time).split(".")[0] + print( + f"==>> epoch {epoch+1} time: {epoch_time}\nvalid_loss: {round(val_mean_loss,4)} valid_n_loss: {round(val_n_mean_loss,4)} valid_n_MIL_loss: {round(val_n_mean_MIL_loss,4)}" + ) + print(f"valid_fpr: {val_fpr} valid_n_fpr: {val_n_fpr}") + print(f"valid_tpr: {val_tpr} valid_n_tpr: {val_n_tpr}") + print(f"valid_bthr: {val_bthr} valid_n_bthr: {val_n_bthr}") + print( + f"valid_auc: {val_auc:.4f} valid_n_auc: {val_n_auc:.4f}\nvalid_ap: {val_ap:.4f} valid_n_ap: {val_n_ap:.4f}\nvalid_accuracy: {val_accuracy:.2f} valid_n_accuracy: {val_n_accuracy:.2f}" + ) + print( + f"==>> val_abnormal_max_mean: {val_mean_abnormal_max} val_abnormal_mean: {val_mean_abnormal_mean}" + ) + print( + f"==>> val_normal_max_mean: {val_mean_normal_max} val_normal_mean: {val_mean_normal_mean}" + ) + print(f"==>> error_count: {error_count}") + + if counter > patience: + print("Early Stopping...") + break + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + print(f"==>> total time: {total_time}") + + +def train2( + normal_root_dir, + abnormal_root_dir, + json_dir, + model_dir, + model_name, + model_size, + device, + num_workers, + batch_size, + # val_num_workers, + # val_batch_size, + learning_rate, + max_epoch, + val_interval, + save_interval, + thr, + drop_rate, + patience, + resume_name, + seed, + # mp, + use_extra, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + batch_size = batch_size + + val_batch_size = 1 + val_num_workers = 0 + + # -- early stopping flag + patience = patience + counter = 0 + + # 데이터셋 + dataset = NormalVMAE( + model_size=model_size, + root=normal_root_dir, + ) + + valid_data_size = len(dataset) // 10 + + train_data_size = len(dataset) - valid_data_size + + train_dataset, valid_dataset = random_split( + dataset, lengths=[train_data_size, valid_data_size] + ) + + # normal_train_loader = DataLoader( + # dataset=train_dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=num_workers + # ) + + normal_valid_loader = DataLoader( + dataset=valid_dataset, + batch_size=val_batch_size, + shuffle=False, + drop_last=True, + num_workers=val_num_workers, + ) + + abnormal_train_dataset = AbnormalVMAE( + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + abnormal_valid_dataset = AbnormalVMAE( + is_train=0, + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + + # abnormal_train_loader = DataLoader( + # dataset=abnormal_train_dataset, + # batch_size=batch_size, + # shuffle=True, + # drop_last=True, + # num_workers=num_workers, + # ) + + concat_trainset = ConcatDataset([train_dataset, abnormal_train_dataset]) + + concat_train_loader = DataLoader( + dataset=concat_trainset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + abnormal_valid_loader = DataLoader( + dataset=abnormal_valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> {model_size} data_load_time: {data_load_time}") + + # Initialize the LSTM autoencoder model + model = MILClassifier(drop_p=drop_rate) + + load_dict = None + + if resume_name: + load_dict = torch.load( + osp.join(model_dir, f"{resume_name}.pth"), map_location="cpu" + ) + model.load_state_dict(load_dict["model_state_dict"]) + + model.to(device) + + # optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + # 1e-6 => 0.0010000000474974513 + optimizer = torch.optim.Adagrad( + model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513 + ) + # optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=[1000, 1500], gamma=0.5 + ) + + if resume_name: + optimizer.load_state_dict(load_dict["optimizer_state_dict"]) + scheduler.load_state_dict(load_dict["scheduler_state_dict"]) + # scaler.load_state_dict(load_dict["scaler_state_dict"]) + + criterion = nn.BCELoss() + MIL_criterion = MIL + + print(f"Start training..") + + wandb.init( + project="VAD", + entity="pao-kim-si-woong", + config={ + "lr": learning_rate, + "dataset": "무인매장", + "n_epochs": max_epoch, + "loss": "BCE", + "notes": "VAD 실험", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + + wandb.watch((model,)) + + best_loss = np.inf + best_auc = 0 + + total_batches = len(concat_train_loader) + + for epoch in range(max_epoch): + model.train() + + epoch_start = datetime.now() + + epoch_loss = 0 + epoch_n_corrects = 0 + + epoch_abnormal_max = 0 + epoch_abnormal_mean = 0 + epoch_normal_max = 0 + epoch_normal_mean = 0 + + nan_count = 0 + + for step, inputs in tqdm( + enumerate(concat_train_loader), + total=total_batches, + ): + inp, gts = inputs + # (batch_size, 11, 710), (batch_size, 11) + + num_segs = inp.size(1) + + # inp = inp.view(-1, inp.size(-1)).to(device) + # (batch_size * 11, 710) + inp = inp.to(device) + gts = gts.view(-1, 1).to(device) + # (batch_size * 11, 1) + + optimizer.zero_grad() + + pred = model(inp) + # pred는 (batch_size * 11, 1) + + loss = criterion(pred, gts) + + loss.backward() + optimizer.step() + with torch.no_grad(): + # print(f"==>> pred.shape: {pred.shape}") + + pred_correct = pred > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + epoch_loss += loss.item() + epoch_n_corrects += corrects / (num_segs * batch_size) + + check = gts.view(batch_size, num_segs) != 0 + check = torch.sum(check, dim=1) + + check_a = check != 0 + check_n = check_a == False + + pred_reshape = pred.view(batch_size, num_segs) + + pred_a_max = torch.mean(torch.max(pred_reshape[check_a], dim=-1)[0]) + + pred_a_mean = torch.mean(pred_reshape[check_a]) + + epoch_abnormal_max += pred_a_max.item() + epoch_abnormal_mean += pred_a_mean.item() + + if torch.sum(check_n) != 0: + pred_n_max = torch.mean(torch.max(pred_reshape[check_n], dim=-1)[0]) + + pred_n_mean = torch.mean(pred_reshape[check_n]) + + epoch_normal_max += pred_n_max.item() + epoch_normal_mean += pred_n_mean.item() + else: + nan_count += 1 + + epoch_mean_loss = epoch_loss / total_batches + epoch_accuracy = epoch_n_corrects / total_batches + + epoch_mean_normal_max = epoch_normal_max / (total_batches - nan_count) + epoch_mean_normal_mean = epoch_normal_mean / (total_batches - nan_count) + epoch_mean_abnormal_max = epoch_abnormal_max / total_batches + epoch_mean_abnormal_mean = epoch_abnormal_mean / total_batches + + train_end = datetime.now() + train_time = train_end - epoch_start + train_time = str(train_time).split(".")[0] + print( + f"==>> epoch {epoch+1} train_time: {train_time}\nloss: {round(epoch_mean_loss,4)}" + ) + print(f"accuracy: {epoch_accuracy:.2f}") + print( + f"==>> abnormal_max_mean: {epoch_mean_abnormal_max} abnormal_mean: {epoch_mean_abnormal_mean}" + ) + print( + f"==>> normal_max_mean: {epoch_mean_normal_max} normal_mean: {epoch_mean_normal_mean}" + ) + + if (epoch + 1) % save_interval == 0: + + ckpt_fpath = osp.join(model_dir, f"{model_name}_{train_start}_latest.pth") + + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + "optimizer_state_dict": optimizer.state_dict(), + "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + } + + torch.save(states, ckpt_fpath) + + # validation 주기에 따라 loss를 출력하고 best model을 저장합니다. + if (epoch + 1) % val_interval == 0: + + print(f"Start validation #{epoch+1:2d}") + model.eval() + + with torch.no_grad(): + total_n_loss = 0 + total_n_MIL_loss = 0 + total_n_n_corrects = 0 + + total_n_fpr = 0 + total_n_tpr = 0 + total_n_bthr = 0 + total_n_auc = 0 + total_n_ap = 0 + + total_loss = 0 + total_n_corrects = 0 + + total_fpr = 0 + total_tpr = 0 + total_bthr = 0 + total_auc = 0 + total_ap = 0 + + error_n_count = 0 + error_count = 0 + + total_abnormal_max = 0 + total_abnormal_mean = 0 + total_normal_max = 0 + total_normal_mean = 0 + + norm_valid_iter = iter(normal_valid_loader) + # iterator를 여기서 매번 새로 할당해줘야 iterator가 다시 처음부터 작동 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_valid_loader), total=len(abnormal_valid_loader) + ): + try: + normal_inputs = next(norm_valid_iter) + + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 12, 710), (val_batch_size, 192) + normal_input, normal_gt = normal_inputs + # (val_batch_size, 12, 710), (val_batch_size, 12) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 12) + + inputs = torch.cat((abnormal_input, normal_input), dim=1) + gts = torch.cat((abnormal_gt2, normal_gt), dim=1) + # inputs는 (val_batch_size, 24, 710), gts는 (val_batch_size, 24) + + # inputs = inputs.view(-1, inputs.size(-1)).to(device) + # (val_batch_size * 24, 710) + inputs = inputs.to(device) + gts = gts.view(-1, 1).to(device) + # (val_batch_size * 24, 1) + + pred = model(inputs) + # pred는 (val_batch_size * 24, 1) + + val_loss = criterion(pred, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + val_MIL_loss = MIL_criterion( + pred, val_batch_size, abnormal_input.size(1) + ) + + pred_a = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 0, : + ] + pred_n = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 1, : + ] + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + pred_n_mean = torch.mean(pred_n) + + pred_correct = pred > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + pred_normal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + pred_normal_np[step[j] * 16 : step[j + 1] * 16] = pred[ + abnormal_input.size(1) + j + ] + + pred_np = np.concatenate( + (pred_abnormal_np, pred_normal_np), axis=0 + ) + + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + # normal_gt = np.zeros_like(abnormal_gt2) + normal_gt = np.zeros_like(abnormal_gt) + # gt_np = np.concatenate((abnormal_gt2, normal_gt), axis=0) + gt_np = np.concatenate((abnormal_gt, normal_gt), axis=0) + + try: + # auc = roc_auc_score(y_true=gt_np, y_score=pred_np) + # auc = roc_auc_score(y_true=gt_np, y_score=pred) + + fpr, tpr, cut = roc_curve(y_true=gt_np, y_score=pred_np) + precision, recall, cut2 = precision_recall_curve( + gt_np, pred_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_np > thr + TP_and_FN = pred_positive[gt_np > 0.9] + FP_and_TN = pred_positive[gt_np < 0.1] + + total_n_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_n_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_n_bthr += best_thr if diff_idx != 0 else 1 + + total_n_auc += auc + total_n_ap += ap + total_n_n_corrects += corrects / ( + abnormal_input.size(1) * 2 + ) + total_n_loss += val_loss.item() + total_n_MIL_loss += val_MIL_loss.item() + + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + total_normal_max += pred_n_max.item() + total_normal_mean += pred_n_mean.item() + + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_n_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + except StopIteration: + # if not use_extra: + # break + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 12, 710), (val_batch_size, 192) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 12) + + # inputs = abnormal_input.view(-1, inputs.size(-1)).to(device) + # (val_batch_size * 12, 710) + inputs = abnormal_input.to(device) + gts = abnormal_gt2.view(-1, 1).to(device) + # (val_batch_size * 12, 1) + + pred = model(inputs) + # pred는 (val_batch_size * 12, 1) + + val_loss = criterion(pred, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + pred_a = pred.view(val_batch_size, abnormal_input.size(1)) + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + + try: + # auc = roc_auc_score(y_true=abnormal_gt, y_score=pred_abnormal_np) + # auc = roc_auc_score(y_true=abnormal_gt2, y_score=pred) + + fpr, tpr, cut = roc_curve( + y_true=abnormal_gt, y_score=pred_abnormal_np + ) + precision, recall, cut2 = precision_recall_curve( + abnormal_gt, pred_abnormal_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_abnormal_np > thr + TP_and_FN = pred_positive[abnormal_gt > 0.9] + FP_and_TN = pred_positive[abnormal_gt < 0.1] + + total_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_bthr += best_thr if diff_idx != 0 else 1 + + total_auc += auc + total_ap += ap + total_n_corrects += corrects / abnormal_input.size(1) + # normal + abnormal 24개와 다르게 abnormal 12개만 있음 -> /12 => 2/24 + total_loss += val_loss.item() + + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + + val_n_mean_loss = total_n_loss / ( + len(normal_valid_loader) - error_n_count + ) + val_n_mean_MIL_loss = total_n_MIL_loss / ( + len(normal_valid_loader) - error_n_count + ) + + val_n_fpr = total_n_fpr / ((len(normal_valid_loader) - error_n_count)) + val_n_tpr = total_n_tpr / ((len(normal_valid_loader) - error_n_count)) + val_n_bthr = total_n_bthr / ((len(normal_valid_loader) - error_n_count)) + val_n_auc = total_n_auc / (len(normal_valid_loader) - error_n_count) + val_n_ap = total_n_ap / (len(normal_valid_loader) - error_n_count) + + val_n_accuracy = total_n_n_corrects / ( + (len(normal_valid_loader) - error_n_count) + ) + + val_mean_loss = (total_loss + total_n_loss) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + val_fpr = (total_fpr + total_n_fpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_tpr = (total_tpr + total_n_tpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_bthr = (total_bthr + total_n_bthr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_auc = (total_auc + total_n_auc) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_ap = (total_ap + total_n_ap) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_accuracy = (total_n_corrects + total_n_n_corrects) / ( + (len(abnormal_valid_loader) - error_n_count - error_count) + ) + # for loop 한번에 abnormal 12, normal 12해서 24개 정답 확인 + + val_mean_normal_max = total_normal_max / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_normal_mean = total_normal_mean / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_abnormal_max = total_abnormal_max / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_mean_abnormal_mean = total_abnormal_mean / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + if best_loss > val_mean_loss: + print( + f"Best performance at epoch: {epoch + 1}, {best_loss:.4f} -> {val_mean_loss:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best.pth" + ) + torch.save(states, best_ckpt_fpath) + best_loss = val_mean_loss + # counter = 0 + # else: + # counter += 1 + + if best_auc < val_auc: + print( + f"Best auc performance at epoch: {epoch + 1}, {best_auc:.4f} -> {val_auc:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best_auc.pth" + ) + torch.save(states, best_ckpt_fpath) + best_auc = val_auc + counter = 0 + else: + counter += 1 + + new_wandb_metric_dict = { + "train_loss": epoch_mean_loss, + "train_accuracy": epoch_accuracy, + "valid_loss": val_mean_loss, + "valid_fpr": val_fpr, + "valid_tpr": val_tpr, + "valid_bthr": val_bthr, + "valid_auc": val_auc, + "valid_ap": val_ap, + "valid_accuracy": val_accuracy, + "valid_n_loss": val_n_mean_loss, + "valid_n_MIL_loss": val_n_mean_MIL_loss, + "valid_n_fpr": val_n_fpr, + "valid_n_tpr": val_n_tpr, + "valid_n_bthr": val_n_bthr, + "valid_n_auc": val_n_auc, + "valid_n_ap": val_n_ap, + "valid_n_accuracy": val_n_accuracy, + "learning_rate": scheduler.get_last_lr()[0], + "train_abnormal_max_mean": epoch_mean_abnormal_max, + "train_abnormal_mean": epoch_mean_abnormal_mean, + "train_normal_max_mean": epoch_mean_normal_max, + "train_normal_mean": epoch_mean_normal_mean, + "valid_abnormal_max_mean": val_mean_abnormal_max, + "valid_abnormal_mean": val_mean_abnormal_mean, + "valid_normal_max_mean": val_mean_normal_max, + "valid_normal_mean": val_mean_normal_mean, + } + + wandb.log(new_wandb_metric_dict) + + scheduler.step() + + epoch_end = datetime.now() + epoch_time = epoch_end - epoch_start + epoch_time = str(epoch_time).split(".")[0] + print( + f"==>> epoch {epoch+1} time: {epoch_time}\nvalid_loss: {round(val_mean_loss,4)} valid_n_loss: {round(val_n_mean_loss,4)} valid_n_MIL_loss: {round(val_n_mean_MIL_loss,4)}" + ) + print(f"valid_fpr: {val_fpr} valid_n_fpr: {val_n_fpr}") + print(f"valid_tpr: {val_tpr} valid_n_tpr: {val_n_tpr}") + print(f"valid_bthr: {val_bthr} valid_n_bthr: {val_n_bthr}") + print( + f"valid_auc: {val_auc:.4f} valid_n_auc: {val_n_auc:.4f}\nvalid_ap: {val_ap:.4f} valid_n_ap: {val_n_ap:.4f}\nvalid_accuracy: {val_accuracy:.2f} valid_n_accuracy: {val_n_accuracy:.2f}" + ) + print( + f"==>> val_abnormal_max_mean: {val_mean_abnormal_max} val_abnormal_mean: {val_mean_abnormal_mean}" + ) + print( + f"==>> val_normal_max_mean: {val_mean_normal_max} val_normal_mean: {val_mean_normal_mean}" + ) + print(f"==>> error_count: {error_count}") + + if counter > patience: + print("Early Stopping...") + break + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + print(f"==>> total time: {total_time}") + + +def train3( + normal_root_dir, + abnormal_root_dir, + json_dir, + model_dir, + model_name, + model_size, + device, + num_workers, + batch_size, + # val_num_workers, + # val_batch_size, + learning_rate, + max_epoch, + val_interval, + save_interval, + thr, + drop_rate, + patience, + resume_name, + seed, + # mp, + use_extra, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + batch_size = batch_size + + val_batch_size = 1 + val_num_workers = 0 + + # -- early stopping flag + patience = patience + counter = 0 + + # 데이터셋 + dataset = NormalVMAE( + model_size=model_size, + root=normal_root_dir, + ) + + valid_data_size = len(dataset) // 10 + + train_data_size = len(dataset) - valid_data_size + + train_dataset, valid_dataset = random_split( + dataset, lengths=[train_data_size, valid_data_size] + ) + + normal_train_loader = DataLoader( + dataset=train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + normal_valid_loader = DataLoader( + dataset=valid_dataset, + batch_size=val_batch_size, + shuffle=False, + drop_last=True, + num_workers=val_num_workers, + ) + + abnormal_train_dataset = AbnormalVMAE( + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + abnormal_valid_dataset = AbnormalVMAE( + is_train=0, + model_size=model_size, + root=abnormal_root_dir, + label_root=json_dir, + ) + + abnormal_train_loader = DataLoader( + dataset=abnormal_train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + abnormal_valid_loader = DataLoader( + dataset=abnormal_valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> {model_size} data_load_time: {data_load_time}") + + # Initialize the model + model = MILClassifier(drop_p=drop_rate) + + load_dict = None + + if resume_name: + load_dict = torch.load( + osp.join(model_dir, f"{resume_name}.pth"), map_location="cpu" + ) + model.load_state_dict(load_dict["model_state_dict"]) + + model.to(device) + + # optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + # 1e-6 => 0.0010000000474974513 + optimizer = torch.optim.Adagrad( + model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513 + ) + # optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=[1000, 1500], gamma=0.5 + ) + + if resume_name: + optimizer.load_state_dict(load_dict["optimizer_state_dict"]) + scheduler.load_state_dict(load_dict["scheduler_state_dict"]) + # scaler.load_state_dict(load_dict["scaler_state_dict"]) + + criterion = nn.BCELoss() + MIL_criterion = MIL + + print(f"Start training..") + + wandb.init( + project="VAD", + entity="pao-kim-si-woong", + config={ + "lr": learning_rate, + "dataset": "무인매장", + "n_epochs": max_epoch, + "loss": "BCE+MIL", + "notes": "VAD 실험", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + + wandb.watch((model,)) + + best_loss = np.inf + best_auc = 0 + + total_batches = len(abnormal_train_loader) + + for epoch in range(max_epoch): + model.train() + + epoch_start = datetime.now() + + epoch_loss = 0 + epoch_n_corrects = 0 + epoch_MIL_loss = 0 + + epoch_abnormal_max = 0 + epoch_abnormal_mean = 0 + epoch_normal_max = 0 + epoch_normal_mean = 0 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_train_loader), + total=total_batches, + ): + if step % len(normal_train_loader) == 0: + norm_train_iter = iter(normal_train_loader) + # 중복 추출하더라도 정상, 이상 영상 1대1 대응 loop 끝까지 유지 + + normal_inputs = next(norm_train_iter) + + abnormal_input, abnormal_gt = abnormal_inputs + # (batch_size, 12, 710), (batch_size, 12) + normal_input, normal_gt = normal_inputs + # (batch_size, 12, 710), (batch_size, 12) + + inputs, gts = torch.cat((abnormal_input, normal_input), dim=1), torch.cat( + (abnormal_gt, normal_gt), dim=1 + ) + # inputs는 (batch_size, 24, 710), gts는 (batch_size, 24) + + # batch_size = inputs.shape[0] + + # inputs = inputs.view(-1, inputs.size(-1)).to(device) + # (batch_size * 24, 710) + inputs = inputs.to(device) + gts = gts.view(-1, 1).to(device) + # (batch_size * 24, 1) + + optimizer.zero_grad() + + pred = model(inputs) + # pred는 (batch_size * 24, 1) + + loss = criterion(pred, gts) + MIL_loss = MIL_criterion(pred, batch_size, abnormal_input.size(1)) + sum_loss = loss + MIL_loss + # sum_loss = MIL_loss + sum_loss.backward() + + # loss.backward() + optimizer.step() + with torch.no_grad(): + pred_a = pred.view(batch_size, 2, abnormal_input.size(1))[:, 0, :] + pred_n = pred.view(batch_size, 2, abnormal_input.size(1))[:, 1, :] + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + pred_n_mean = torch.mean(pred_n) + + pred_correct = pred > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + epoch_loss += loss.item() + epoch_MIL_loss += MIL_loss.item() + + epoch_n_corrects += corrects / (abnormal_input.size(1) * 2) + + epoch_abnormal_max += pred_a_max.item() + epoch_abnormal_mean += pred_a_mean.item() + epoch_normal_max += pred_n_max.item() + epoch_normal_mean += pred_n_mean.item() + + epoch_mean_loss = epoch_loss / total_batches + epoch_mean_MIL_loss = epoch_MIL_loss / total_batches + epoch_accuracy = epoch_n_corrects / (batch_size * (total_batches)) + + epoch_mean_normal_max = epoch_normal_max / total_batches + epoch_mean_normal_mean = epoch_normal_mean / total_batches + epoch_mean_abnormal_max = epoch_abnormal_max / total_batches + epoch_mean_abnormal_mean = epoch_abnormal_mean / total_batches + + train_end = datetime.now() + train_time = train_end - epoch_start + train_time = str(train_time).split(".")[0] + print( + f"==>> epoch {epoch+1} train_time: {train_time}\nloss: {round(epoch_mean_loss,4)} MIL_loss: {round(epoch_mean_MIL_loss,4)}" + ) + print(f"accuracy: {epoch_accuracy:.2f}") + print( + f"==>> abnormal_max_mean: {epoch_mean_abnormal_max} abnormal_mean: {epoch_mean_abnormal_mean}" + ) + print( + f"==>> normal_max_mean: {epoch_mean_normal_max} normal_mean: {epoch_mean_normal_mean}" + ) + + if (epoch + 1) % save_interval == 0: + + ckpt_fpath = osp.join(model_dir, f"{model_name}_{train_start}_latest.pth") + + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + "optimizer_state_dict": optimizer.state_dict(), + "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + } + + torch.save(states, ckpt_fpath) + + # validation 주기에 따라 loss를 출력하고 best model을 저장합니다. + if (epoch + 1) % val_interval == 0: + + print(f"Start validation #{epoch+1:2d}") + model.eval() + + with torch.no_grad(): + total_n_loss = 0 + total_n_MIL_loss = 0 + total_n_n_corrects = 0 + + total_n_fpr = 0 + total_n_tpr = 0 + total_n_bthr = 0 + total_n_auc = 0 + total_n_ap = 0 + + total_loss = 0 + total_n_corrects = 0 + + total_fpr = 0 + total_tpr = 0 + total_bthr = 0 + total_auc = 0 + total_ap = 0 + + error_n_count = 0 + error_count = 0 + + total_abnormal_max = 0 + total_abnormal_mean = 0 + total_normal_max = 0 + total_normal_mean = 0 + + norm_valid_iter = iter(normal_valid_loader) + # iterator를 여기서 매번 새로 할당해줘야 iterator가 다시 처음부터 작동 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_valid_loader), total=len(abnormal_valid_loader) + ): + try: + normal_inputs = next(norm_valid_iter) + + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 12, 710), (val_batch_size, 192) + normal_input, normal_gt = normal_inputs + # (val_batch_size, 12, 710), (val_batch_size, 12) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 12) + + inputs = torch.cat((abnormal_input, normal_input), dim=1) + gts = torch.cat((abnormal_gt2, normal_gt), dim=1) + # inputs는 (val_batch_size, 24, 710), gts는 (val_batch_size, 24) + + # inputs = inputs.view(-1, inputs.size(-1)).to(device) + # (val_batch_size * 24, 710) + inputs = inputs.to(device) + gts = gts.view(-1, 1).to(device) + # (val_batch_size * 24, 1) + + pred = model(inputs) + # pred는 (val_batch_size * 24, 1) + + val_loss = criterion(pred, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + val_MIL_loss = MIL_criterion( + pred, val_batch_size, abnormal_input.size(1) + ) + + pred_a = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 0, : + ] + pred_n = pred.view(val_batch_size, 2, abnormal_input.size(1))[ + :, 1, : + ] + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + pred_n_mean = torch.mean(pred_n) + + pred_correct = pred > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + pred_normal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + pred_normal_np[step[j] * 16 : step[j + 1] * 16] = pred[ + abnormal_input.size(1) + j + ] + + pred_np = np.concatenate( + (pred_abnormal_np, pred_normal_np), axis=0 + ) + + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + # normal_gt = np.zeros_like(abnormal_gt2) + normal_gt = np.zeros_like(abnormal_gt) + # gt_np = np.concatenate((abnormal_gt2, normal_gt), axis=0) + gt_np = np.concatenate((abnormal_gt, normal_gt), axis=0) + + try: + # auc = roc_auc_score(y_true=gt_np, y_score=pred_np) + # auc = roc_auc_score(y_true=gt_np, y_score=pred) + + fpr, tpr, cut = roc_curve(y_true=gt_np, y_score=pred_np) + precision, recall, cut2 = precision_recall_curve( + gt_np, pred_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_np > thr + TP_and_FN = pred_positive[gt_np > 0.9] + FP_and_TN = pred_positive[gt_np < 0.1] + + total_n_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_n_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_n_bthr += best_thr if diff_idx != 0 else 1 + + total_n_auc += auc + total_n_ap += ap + total_n_n_corrects += corrects / ( + abnormal_input.size(1) * 2 + ) + total_n_loss += val_loss.item() + total_n_MIL_loss += val_MIL_loss.item() + + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + total_normal_max += pred_n_max.item() + total_normal_mean += pred_n_mean.item() + + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_n_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + except StopIteration: + # if not use_extra: + # break + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, 12, 710), (val_batch_size, 192) + + abnormal_gt2 = torch.max( + abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2 + )[0] + # abnormal_gt2 = torch.mean(abnormal_gt.view(-1, abnormal_input.size(1), 16), dim=2) + # (val_batch_size, 12) + + # inputs = abnormal_input.view(-1, inputs.size(-1)).to(device) + # (val_batch_size * 12, 710) + inputs = abnormal_input.to(device) + gts = abnormal_gt2.view(-1, 1).to(device) + # (val_batch_size * 12, 1) + + pred = model(inputs) + # pred는 (val_batch_size * 12, 1) + + val_loss = criterion(pred, gts) + # if val_loss > 2: + # print(f"==>> pred: {pred}") + # print(f"==>> gts: {gts}") + # counter = patience + 1 + + pred_a = pred.view(val_batch_size, abnormal_input.size(1)) + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred > thr + gts_correct = gts > thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + + pred = (pred.squeeze()).detach().cpu().numpy() + pred_abnormal_np = np.zeros(abnormal_gt.size(1)) + + step = np.array([i for i in range(abnormal_input.size(1) + 1)]) + + for j in range(abnormal_input.size(1)): + pred_abnormal_np[step[j] * 16 : step[j + 1] * 16] = pred[j] + + # abnormal_gt2 = abnormal_gt2.squeeze().detach().cpu().numpy() + abnormal_gt = abnormal_gt.squeeze().detach().cpu().numpy() + + try: + # auc = roc_auc_score(y_true=abnormal_gt, y_score=pred_abnormal_np) + # auc = roc_auc_score(y_true=abnormal_gt2, y_score=pred) + + fpr, tpr, cut = roc_curve( + y_true=abnormal_gt, y_score=pred_abnormal_np + ) + precision, recall, cut2 = precision_recall_curve( + abnormal_gt, pred_abnormal_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_abnormal_np > thr + TP_and_FN = pred_positive[abnormal_gt > 0.9] + FP_and_TN = pred_positive[abnormal_gt < 0.1] + + total_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_bthr += best_thr if diff_idx != 0 else 1 + + total_auc += auc + total_ap += ap + total_n_corrects += corrects / abnormal_input.size(1) + # normal + abnormal 24개와 다르게 abnormal 12개만 있음 -> /12 => 2/24 + total_loss += val_loss.item() + + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_count += 1 + # print("0~180 전부 0인 abnormal 영상 있음") + + val_n_mean_loss = total_n_loss / ( + len(normal_valid_loader) - error_n_count + ) + val_n_mean_MIL_loss = total_n_MIL_loss / ( + len(normal_valid_loader) - error_n_count + ) + + val_n_fpr = total_n_fpr / ((len(normal_valid_loader) - error_n_count)) + val_n_tpr = total_n_tpr / ((len(normal_valid_loader) - error_n_count)) + val_n_bthr = total_n_bthr / ((len(normal_valid_loader) - error_n_count)) + val_n_auc = total_n_auc / (len(normal_valid_loader) - error_n_count) + val_n_ap = total_n_ap / (len(normal_valid_loader) - error_n_count) + + val_n_accuracy = total_n_n_corrects / ( + (len(normal_valid_loader) - error_n_count) + ) + + val_mean_loss = (total_loss + total_n_loss) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + val_fpr = (total_fpr + total_n_fpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_tpr = (total_tpr + total_n_tpr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_bthr = (total_bthr + total_n_bthr) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_auc = (total_auc + total_n_auc) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_ap = (total_ap + total_n_ap) / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_accuracy = (total_n_corrects + total_n_n_corrects) / ( + (len(abnormal_valid_loader) - error_n_count - error_count) + ) + # for loop 한번에 abnormal 12, normal 12해서 24개 정답 확인 + + val_mean_normal_max = total_normal_max / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_normal_mean = total_normal_mean / ( + len(normal_valid_loader) - error_n_count + ) + val_mean_abnormal_max = total_abnormal_max / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + val_mean_abnormal_mean = total_abnormal_mean / ( + len(abnormal_valid_loader) - error_n_count - error_count + ) + + if best_loss > val_mean_loss: + print( + f"Best performance at epoch: {epoch + 1}, {best_loss:.4f} -> {val_mean_loss:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best.pth" + ) + torch.save(states, best_ckpt_fpath) + best_loss = val_mean_loss + # counter = 0 + # else: + # counter += 1 + + if best_auc < val_auc: + str_to_keep = f"Best auc performance at epoch: {epoch + 1}, {best_auc:.4f} -> {val_auc:.4f}" + print(str_to_keep) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best_auc.pth" + ) + torch.save(states, best_ckpt_fpath) + best_auc = val_auc + counter = 0 + else: + counter += 1 + + new_wandb_metric_dict = { + "train_loss": epoch_mean_loss, + "train_accuracy": epoch_accuracy, + "train_MIL_loss": epoch_mean_MIL_loss, + "valid_loss": val_mean_loss, + "valid_fpr": val_fpr, + "valid_tpr": val_tpr, + "valid_bthr": val_bthr, + "valid_auc": val_auc, + "valid_ap": val_ap, + "valid_accuracy": val_accuracy, + "valid_n_loss": val_n_mean_loss, + "valid_n_MIL_loss": val_n_mean_MIL_loss, + "valid_n_fpr": val_n_fpr, + "valid_n_tpr": val_n_tpr, + "valid_n_bthr": val_n_bthr, + "valid_n_auc": val_n_auc, + "valid_n_ap": val_n_ap, + "valid_n_accuracy": val_n_accuracy, + "learning_rate": scheduler.get_last_lr()[0], + "train_abnormal_max_mean": epoch_mean_abnormal_max, + "train_abnormal_mean": epoch_mean_abnormal_mean, + "train_normal_max_mean": epoch_mean_normal_max, + "train_normal_mean": epoch_mean_normal_mean, + "valid_abnormal_max_mean": val_mean_abnormal_max, + "valid_abnormal_mean": val_mean_abnormal_mean, + "valid_normal_max_mean": val_mean_normal_max, + "valid_normal_mean": val_mean_normal_mean, + } + + wandb.log(new_wandb_metric_dict) + + scheduler.step() + + epoch_end = datetime.now() + epoch_time = epoch_end - epoch_start + epoch_time = str(epoch_time).split(".")[0] + print( + f"==>> epoch {epoch+1} time: {epoch_time}\nvalid_loss: {round(val_mean_loss,4)} valid_n_loss: {round(val_n_mean_loss,4)} valid_n_MIL_loss: {round(val_n_mean_MIL_loss,4)}" + ) + print(f"valid_fpr: {val_fpr} valid_n_fpr: {val_n_fpr}") + print(f"valid_tpr: {val_tpr} valid_n_tpr: {val_n_tpr}") + print(f"valid_bthr: {val_bthr} valid_n_bthr: {val_n_bthr}") + print( + f"valid_auc: {val_auc:.4f} valid_n_auc: {val_n_auc:.4f}\nvalid_ap: {val_ap:.4f} valid_n_ap: {val_n_ap:.4f}\nvalid_accuracy: {val_accuracy:.2f} valid_n_accuracy: {val_n_accuracy:.2f}" + ) + print( + f"==>> val_abnormal_max_mean: {val_mean_abnormal_max} val_abnormal_mean: {val_mean_abnormal_mean}" + ) + print( + f"==>> val_normal_max_mean: {val_mean_normal_max} val_normal_mean: {val_mean_normal_mean}" + ) + print(f"==>> error_count: {error_count}") + + if counter > patience: + print("Early Stopping...") + break + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + print(str_to_keep) + print(f"==>> total time: {total_time}") + + +def main(args): + if (args.wandb_run_name).startswith("MIL_nl_feat_enhancer_"): + print("train 1") + train(**args.__dict__) + elif (args.wandb_run_name).startswith("MIL_nl_BCEonly_feat_enhancer_"): + print("train 2") + train2(**args.__dict__) + else: + print("train 3") + train3(**args.__dict__) + + +if __name__ == "__main__": + args = parse_args() + + main(args) diff --git a/model/train/train_UCFC.py b/model/train/train_UCFC.py new file mode 100644 index 0000000..af14e89 --- /dev/null +++ b/model/train/train_UCFC.py @@ -0,0 +1,1335 @@ +import os +import os.path as osp +import random +from argparse import ArgumentParser +from datetime import datetime + +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import sklearn.metrics +import torch +import torch.nn as nn +import torch.nn.functional as F +import wandb +from classifier import WSAD, MILClassifier +from loss import MIL, LossComputer +from shop_dataset import NewAbnormalVMAE, NewNormalVMAE +from sklearn.metrics import precision_recall_curve, roc_auc_score, roc_curve +from torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split +from tqdm import tqdm + +# from sklearn.preprocessing import MinMaxScaler + + + + + + + +def parse_args(): + parser = ArgumentParser() + + # Conventional args + parser.add_argument( + "--normal_root_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_NORMAL_NPY", + "../datapreprocess/npy/UCFCrime/normal", + ), + ) + # 학습 데이터 경로 + parser.add_argument( + "--abnormal_root_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_ABNORMAL_NPY", + "../datapreprocess/npy/UCFCrime/abnormal", + ), + ) + parser.add_argument( + "--label_dir", + type=str, + default=os.environ.get( + "SM_CHANNEL_ABNORMAL_LABEL", + "../datapreprocess/npy/UCFCrime/test_anomalyv2.txt", + ), + ) + # abnormal 검증셋 npy, json파일 경로 + parser.add_argument( + "--model_dir", type=str, default=os.environ.get("SM_MODEL_DIR", "../pths") + ) + # pth 파일 저장 경로 + + parser.add_argument("--model_name", type=str, default="BNWVAD") + # import_module로 불러올 model name + + parser.add_argument("--len_feature", type=int, default=710) + # npy파일 feature length + parser.add_argument("--use_l2norm", action="store_true") + # npy feature l2 normalization 여부 + parser.add_argument("--num_segments", type=int, default=200) + # 영상 segment 개수 + + parser.add_argument("--resume_name", type=str, default="") + # resume 파일 이름 + + parser.add_argument("--model_size", type=str, default="small") + # VideoMAEv2 backbone 사이즈 = "small" or "base" + + parser.add_argument("--seed", type=int, default=666) + # random seed + + parser.add_argument( + "--device", default="cuda" if torch.cuda.is_available() else "cpu" + ) + parser.add_argument("--num_workers", type=int, default=0) + + parser.add_argument("--batch_size", type=int, default=30) + # parser.add_argument("--val_batch_size", type=int, default=1) + # parser.add_argument("--val_num_workers", type=int, default=0) + parser.add_argument("--learning_rate", type=float, default=0.0001) + parser.add_argument("--weight_decay", type=float, default=0.00005) + parser.add_argument("--max_epoch", type=int, default=1000) + + parser.add_argument("--save_interval", type=int, default=1) + parser.add_argument("--val_interval", type=int, default=1) + parser.add_argument("--w_normal", type=float, default=1.0) + parser.add_argument("--w_mpp", type=float, default=1.0) + parser.add_argument("--gt_thr", type=float, default=0.25) + parser.add_argument("--dist_thr", type=float, default=10) + + parser.add_argument("--ratio_sample", type=float, default=0.2) + parser.add_argument("--ratio_batch", type=float, default=0.4) + + parser.add_argument("--ratios", type=int, nargs="+", default=[16, 32]) + parser.add_argument("--kernel_sizes", type=int, nargs="+", default=[1, 1, 1]) + + parser.add_argument("--patience", type=int, default=100) + + # parser.add_argument("--mp", action="store_false") + # https://stackoverflow.com/questions/60999816/argparse-not-parsing-boolean-arguments + # mixed precision 사용할 지 여부 + + # parser.add_argument("--use_extra", action="store_false") + + # parser.add_argument("--wandb_mode", type=str, default="online") + parser.add_argument("--wandb_mode", type=str, default="disabled") + # wandb mode + parser.add_argument("--wandb_run_name", type=str, default="BNWVAD") + # wandb run name + + args = parser.parse_args() + + return args + + +def set_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # if use multi-GPU + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(seed) + random.seed(seed) + + +def train_BNWVAD( + normal_root_dir, + abnormal_root_dir, + label_dir, + model_dir, + model_name, + model_size, + device, + num_workers, + batch_size, + # val_num_workers, + # val_batch_size, + learning_rate, + weight_decay, + max_epoch, + val_interval, + save_interval, + w_normal, + w_mpp, + gt_thr, + dist_thr, + len_feature, + use_l2norm, + num_segments, + ratio_sample, + ratio_batch, + ratios, + kernel_sizes, + patience, + resume_name, + seed, + # mp, + # use_extra, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + batch_size = batch_size + + val_batch_size = 1 + val_num_workers = 0 + + # -- early stopping flag + patience = patience + counter = 0 + + # 데이터셋 + normal_train_dataset = NewNormalVMAE( + is_train=1, + model_size=model_size, + root=normal_root_dir, + num_segments=num_segments, + l2_norm=use_l2norm, + ) + # 800개 + normal_valid_dataset = NewNormalVMAE( + is_train=0, + model_size=model_size, + root=normal_root_dir, + num_segments=num_segments, + l2_norm=use_l2norm, + ) + # 149개 + + normal_train_loader = DataLoader( + dataset=normal_train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + normal_valid_loader = DataLoader( + dataset=normal_valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + abnormal_train_dataset = NewAbnormalVMAE( + is_train=1, + model_size=model_size, + root=abnormal_root_dir, + label_root=label_dir, + num_segments=num_segments, + l2_norm=use_l2norm, + ) + # 809개 + abnormal_valid_dataset = NewAbnormalVMAE( + is_train=0, + model_size=model_size, + root=abnormal_root_dir, + label_root=label_dir, + num_segments=num_segments, + gt_thr=gt_thr, + l2_norm=use_l2norm, + ) + # 140개 + + abnormal_train_loader = DataLoader( + dataset=abnormal_train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + abnormal_valid_loader = DataLoader( + dataset=abnormal_valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> {model_size} data_load_time: {data_load_time}") + + # Initialize the model + model = WSAD( + input_size=len_feature, + ratio_sample=ratio_sample, + ratio_batch=ratio_batch, + ratios=ratios, + kernel_sizes=kernel_sizes, + ) + + load_dict = None + + if resume_name: + load_dict = torch.load( + osp.join(model_dir, f"{resume_name}.pth"), map_location="cpu" + ) + model.load_state_dict(load_dict["model_state_dict"]) + + model.to(device) + + # optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + # 1e-6 => 0.0010000000474974513 + optimizer = torch.optim.Adam( + model.parameters(), + lr=learning_rate, + betas=(0.9, 0.999), + weight_decay=weight_decay, + ) + # optimizer = torch.optim.AdamW( + # model.parameters(), lr=learning_rate, betas=(0.9, 0.999), weight_decay=weight_decay + # ) + # optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=[1000, 1500], gamma=0.5 + ) + + if resume_name: + optimizer.load_state_dict(load_dict["optimizer_state_dict"]) + scheduler.load_state_dict(load_dict["scheduler_state_dict"]) + # scaler.load_state_dict(load_dict["scaler_state_dict"]) + + criterion = nn.BCELoss() + MPP_criterion = LossComputer(w_normal=w_normal, w_mpp=w_mpp) + + print(f"Start training..") + + wandb.init( + project="VAD", + entity="pao-kim-si-woong", + config={ + "lr": learning_rate, + "dataset": "무인매장", + "n_epochs": max_epoch, + "loss": "MPP", + "notes": "VAD 실험", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + + wandb.watch((model,)) + + best_loss = np.inf + best_auc = 0 + + total_batches = len(abnormal_train_loader) + + for epoch in range(max_epoch): + model.train() + + epoch_start = datetime.now() + + epoch_MPP_loss = 0 + epoch_norm_loss = 0 + epoch_MPP_and_norm_loss = 0 + + epoch_abnormal_max = 0 + epoch_abnormal_mean = 0 + epoch_normal_max = 0 + epoch_normal_mean = 0 + + for step, abnormal_input in tqdm( + enumerate(abnormal_train_loader), + total=total_batches, + ): + if step % len(normal_train_loader) == 0: + norm_train_iter = iter(normal_train_loader) + # 중복 추출하더라도 정상, 이상 영상 1대1 대응 loop 끝까지 유지 + + normal_input = next(norm_train_iter) + + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + input = torch.cat((normal_input, abnormal_input), dim=1) + # @@@@ BN-WVAD는 정상 영상 먼저 @@@@ + # inputs는 (batch_size, 2 * num_segments, 710) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + # batch_size = input.shape[0] + + input = input.to(device) + + optimizer.zero_grad() + + pred_result = model(input, flag="Train") + # pred_result["pre_normal_scores"]: normal_scores[0 : b // 2], + # pred_result["bn_results"]: bn_results, + # pred_result["normal_scores"]: normal_scores, + # pred_result["scores"]: distance_sum * normal_scores, + + pred = pred_result["scores"].view(-1, 1) + # => pred는 (batch_size * 2 * num_segments, 1) + + MPP_and_norm_loss, loss_dict = MPP_criterion(pred_result) + + MPP_and_norm_loss.backward() + + # loss.backward() + optimizer.step() + with torch.no_grad(): + pred_n = pred.view(batch_size, 2, num_segments)[:, 0, :] + pred_a = pred.view(batch_size, 2, num_segments)[:, 1, :] + + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_n_mean = torch.mean(pred_n) + pred_a_mean = torch.mean(pred_a) + + epoch_MPP_loss += loss_dict["mpp_loss"].item() + epoch_norm_loss += loss_dict["normal_loss"].item() + epoch_MPP_and_norm_loss += MPP_and_norm_loss.item() + + epoch_normal_max += pred_n_max.item() + epoch_normal_mean += pred_n_mean.item() + epoch_abnormal_max += pred_a_max.item() + epoch_abnormal_mean += pred_a_mean.item() + + epoch_mean_MPP_loss = epoch_MPP_loss / total_batches + epoch_mean_norm_loss = epoch_norm_loss / total_batches + epoch_mean_MPP_and_norm_loss = epoch_MPP_and_norm_loss / total_batches + + epoch_mean_normal_max = epoch_normal_max / total_batches + epoch_mean_normal_mean = epoch_normal_mean / total_batches + epoch_mean_abnormal_max = epoch_abnormal_max / total_batches + epoch_mean_abnormal_mean = epoch_abnormal_mean / total_batches + + train_end = datetime.now() + train_time = train_end - epoch_start + train_time = str(train_time).split(".")[0] + print(f"==>> epoch {epoch+1} train_time: {train_time}") + print( + f"MPP_loss: {round(epoch_mean_MPP_loss,4)} norm_loss: {round(epoch_mean_norm_loss,4)} MPP+norm_loss: {round(epoch_mean_MPP_and_norm_loss,4)}" + ) + print( + f"==>> abnormal_max_mean: {epoch_mean_abnormal_max} abnormal_mean: {epoch_mean_abnormal_mean}" + ) + print( + f"==>> normal_max_mean: {epoch_mean_normal_max} normal_mean: {epoch_mean_normal_mean}" + ) + + if (epoch + 1) % save_interval == 0: + + ckpt_fpath = osp.join(model_dir, f"{model_name}_{train_start}_latest.pth") + + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + "optimizer_state_dict": optimizer.state_dict(), + "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + } + + torch.save(states, ckpt_fpath) + + # validation 주기에 따라 loss를 출력하고 best model을 저장합니다. + if (epoch + 1) % val_interval == 0: + + print(f"Start validation #{epoch+1:2d}") + model.eval() + + with torch.no_grad(): + total_loss = 0 + + total_n_corrects = 0 + + total_ab_n_corrects = 0 + + total_fpr = 0 + total_tpr = 0 + total_bthr = 0 + total_auc = 0 + total_ap = 0 + + total_ab_fpr = 0 + total_ab_tpr = 0 + total_ab_bthr = 0 + total_ab_auc = 0 + total_ab_ap = 0 + + error_count = 0 + + total_abnormal_max = 0 + total_abnormal_mean = 0 + total_normal_max = 0 + total_normal_mean = 0 + + norm_valid_iter = iter(normal_valid_loader) + # iterator를 여기서 매번 새로 할당해줘야 iterator가 다시 처음부터 작동 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_valid_loader), total=len(abnormal_valid_loader) + ): + normal_inputs = next(norm_valid_iter) + + normal_input, normal_gt = normal_inputs + # (val_batch_size, num_segments, 710), (val_batch_size, num_segments) + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, num_segments, 710), (val_batch_size, num_segments) + + inputs = torch.cat((normal_input, abnormal_input), dim=1) + gts = torch.cat((normal_gt, abnormal_gt), dim=1) + # inputs는 (val_batch_size, 2 * num_segments, 710), gts는 (val_batch_size, 2 * num_segments) + + inputs = inputs.to(device) + # (val_batch_size, 2 * num_segments, 710) + gts = gts.view(-1, 1).to(device) + # (val_batch_size * 2 * num_segments, 1) + + pred_result = model(inputs, flag="Eval_MPP") + # pred_result["normal_scores"]: normal_scores, + # pred_result["scores"]: distance_sum * normal_scores, + # breakpoint() + pred_acc = pred_result["normal_scores"].view(-1, 1) + pred = pred_result["scores"].view(-1, 1) + # pred는(batch_size * 2 * num_segments, 1) + + val_loss = criterion(pred_acc, gts) + + pred_n = pred.view(val_batch_size, 2, num_segments)[:, 0, :] + pred_a = pred.view(val_batch_size, 2, num_segments)[:, 1, :] + + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + + pred_n_mean = torch.mean(pred_n) + pred_a_mean = torch.mean(pred_a) + + pred_correct = pred > dist_thr + gts_correct = gts # > gt_thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + ab_corrects = torch.sum(pred_correct[num_segments:]).item() + + pred_np = (pred.squeeze()).detach().cpu().numpy() + gts_np = (gts.squeeze()).detach().cpu().numpy() + # pred_np, gts_np 둘다 (batch_size * 2 * num_segments) + + try: + # auc = roc_auc_score(y_true=gt_np, y_score=pred_np) + # auc = roc_auc_score(y_true=gt_np, y_score=pred) + + fpr, tpr, cut = roc_curve(y_true=gts_np, y_score=pred_np) + precision, recall, cut2 = precision_recall_curve( + gts_np, pred_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_np > dist_thr + TP_and_FN = pred_positive[gts_np > 0.9] + FP_and_TN = pred_positive[gts_np < 0.1] + + total_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_bthr += best_thr if diff_idx != 0 else 1 + + total_auc += auc + total_ap += ap + total_n_corrects += corrects / (num_segments * 2) + + ab_fpr, ab_tpr, ab_cut = roc_curve( + y_true=gts_np[num_segments:], y_score=pred_np[num_segments:] + ) + ab_precision, ab_recall, ab_cut2 = precision_recall_curve( + gts_np[num_segments:], pred_np[num_segments:] + ) + + ab_auc = sklearn.metrics.auc(ab_fpr, ab_tpr) + ab_ap = sklearn.metrics.auc(ab_recall, ab_precision) + + ab_diff = ab_tpr - ab_fpr + ab_diff_idx = np.argmax(ab_diff) + ab_best_thr = ab_cut[ab_diff_idx] + + ab_pred_positive = pred_positive[num_segments:] + ab_TP_and_FN = ab_pred_positive[gts_np[num_segments:] > 0.9] + ab_FP_and_TN = ab_pred_positive[gts_np[num_segments:] < 0.1] + + total_ab_fpr += np.sum(ab_FP_and_TN) / len(ab_FP_and_TN) + total_ab_tpr += np.sum(ab_TP_and_FN) / len(ab_TP_and_FN) + total_ab_bthr += ab_best_thr if ab_diff_idx != 0 else 1 + + total_ab_auc += ab_auc + total_ab_ap += ab_ap + total_ab_n_corrects += ab_corrects / (num_segments) + + total_loss += val_loss.item() + + total_normal_max += pred_n_max.item() + total_normal_mean += pred_n_mean.item() + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_count += 1 + # print("gt가 전부 0인 abnormal 영상 있음") + + val_mean_loss = total_loss / (len(abnormal_valid_loader) - error_count) + + val_fpr = total_fpr / (len(abnormal_valid_loader) - error_count) + val_tpr = total_tpr / (len(abnormal_valid_loader) - error_count) + val_bthr = total_bthr / (len(abnormal_valid_loader) - error_count) + val_auc = total_auc / (len(abnormal_valid_loader) - error_count) + val_ap = total_ap / (len(abnormal_valid_loader) - error_count) + val_accuracy = total_n_corrects / ( + (len(abnormal_valid_loader) - error_count) + ) + + val_ab_fpr = total_ab_fpr / (len(abnormal_valid_loader) - error_count) + val_ab_tpr = total_ab_tpr / (len(abnormal_valid_loader) - error_count) + val_ab_bthr = total_ab_bthr / (len(abnormal_valid_loader) - error_count) + val_ab_auc = total_ab_auc / (len(abnormal_valid_loader) - error_count) + val_ab_ap = total_ab_ap / (len(abnormal_valid_loader) - error_count) + val_ab_accuracy = total_ab_n_corrects / ( + (len(abnormal_valid_loader) - error_count) + ) + + val_mean_normal_max = total_normal_max / ( + len(abnormal_valid_loader) - error_count + ) + val_mean_normal_mean = total_normal_mean / ( + len(abnormal_valid_loader) - error_count + ) + val_mean_abnormal_max = total_abnormal_max / ( + len(abnormal_valid_loader) - error_count + ) + val_mean_abnormal_mean = total_abnormal_mean / ( + len(abnormal_valid_loader) - error_count + ) + + if best_loss > val_mean_loss: + print( + f"Best loss performance at epoch: {epoch + 1}, {best_loss:.4f} -> {val_mean_loss:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best.pth" + ) + torch.save(states, best_ckpt_fpath) + best_loss = val_mean_loss + # counter = 0 + # else: + # counter += 1 + + if best_auc < val_auc: + str_to_keep = f"Best auc performance at epoch: {epoch + 1}, {best_auc:.4f} -> {val_auc:.4f}" + print(str_to_keep) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best_auc.pth" + ) + torch.save(states, best_ckpt_fpath) + best_auc = val_auc + counter = 0 + else: + counter += 1 + + new_wandb_metric_dict = { + "train_MPP_loss": epoch_mean_MPP_loss, + "train_norm_loss": epoch_mean_norm_loss, + "train_MPP+norm_loss": epoch_mean_MPP_and_norm_loss, + "valid_loss": val_mean_loss, + "valid_fpr": val_fpr, + "valid_tpr": val_tpr, + "valid_bthr": val_bthr, + "valid_auc": val_auc, + "valid_ap": val_ap, + "valid_accuracy": val_accuracy, + "valid_ab_fpr": val_ab_fpr, + "valid_ab_tpr": val_ab_tpr, + "valid_ab_bthr": val_ab_bthr, + "valid_ab_auc": val_ab_auc, + "valid_ab_ap": val_ab_ap, + "valid_ab_accuracy": val_ab_accuracy, + "learning_rate": scheduler.get_last_lr()[0], + "train_abnormal_max_mean": epoch_mean_abnormal_max, + "train_abnormal_mean": epoch_mean_abnormal_mean, + "train_normal_max_mean": epoch_mean_normal_max, + "train_normal_mean": epoch_mean_normal_mean, + "valid_abnormal_max_mean": val_mean_abnormal_max, + "valid_abnormal_mean": val_mean_abnormal_mean, + "valid_normal_max_mean": val_mean_normal_max, + "valid_normal_mean": val_mean_normal_mean, + } + + wandb.log(new_wandb_metric_dict) + + scheduler.step() + + epoch_end = datetime.now() + epoch_time = epoch_end - epoch_start + epoch_time = str(epoch_time).split(".")[0] + print( + f"==>> epoch {epoch+1} time: {epoch_time}\nvalid_loss: {round(val_mean_loss,4)}" + ) + # print( + # f"valid_n_MPP_loss: {round(val_n_mean_MPP_loss,4)} valid_n_norm_loss: {round(val_n_mean_norm_loss,4)} valid_n_MPP+norm_loss: {round(val_n_mean_MPP_and_norm_loss,4)}" + # ) + print(f"valid_fpr: {val_fpr} valid_tpr: {val_tpr} valid_bthr: {val_bthr}") + print( + f"valid_auc: {val_auc:.4f} valid_ap: {val_ap:.4f} valid_accuracy: {val_accuracy:.2f}" + ) + print( + f"valid_ab_fpr: {val_ab_fpr} valid_ab_tpr: {val_ab_tpr} valid_ab_bthr: {val_ab_bthr}" + ) + print( + f"valid_ab_auc: {val_ab_auc:.4f} valid_ab_ap: {val_ab_ap:.4f} valid_ab_accuracy: {val_ab_accuracy:.2f}" + ) + print( + f"==>> val_abnormal_max_mean: {val_mean_abnormal_max} val_abnormal_mean: {val_mean_abnormal_mean}" + ) + print( + f"==>> val_normal_max_mean: {val_mean_normal_max} val_normal_mean: {val_mean_normal_mean}" + ) + print(f"==>> error_count: {error_count}") + + if counter > patience: + print("Early Stopping...") + break + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + print(str_to_keep) + print(f"==>> total time: {total_time}") + + +def train_MIL( + normal_root_dir, + abnormal_root_dir, + label_dir, + model_dir, + model_name, + model_size, + device, + num_workers, + batch_size, + # val_num_workers, + # val_batch_size, + learning_rate, + weight_decay, + max_epoch, + val_interval, + save_interval, + w_normal, + w_mpp, + gt_thr, + dist_thr, + len_feature, + use_l2norm, + num_segments, + ratio_sample, + ratio_batch, + ratios, + kernel_sizes, + patience, + resume_name, + seed, + # mp, + # use_extra, + wandb_mode, + wandb_run_name, +): + + time_start = datetime.now() + + train_start = time_start.strftime("%Y%m%d_%H%M%S") + + set_seed(seed) + + if not osp.exists(model_dir): + os.makedirs(model_dir) + + batch_size = batch_size + + val_batch_size = 1 + val_num_workers = 0 + + # -- early stopping flag + patience = patience + counter = 0 + + # 데이터셋 + normal_train_dataset = NewNormalVMAE( + is_train=1, + model_size=model_size, + root=normal_root_dir, + num_segments=num_segments, + l2_norm=use_l2norm, + ) + # 800개 + normal_valid_dataset = NewNormalVMAE( + is_train=0, + model_size=model_size, + root=normal_root_dir, + num_segments=num_segments, + l2_norm=use_l2norm, + ) + # 149개 + + normal_train_loader = DataLoader( + dataset=normal_train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + normal_valid_loader = DataLoader( + dataset=normal_valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + abnormal_train_dataset = NewAbnormalVMAE( + is_train=1, + model_size=model_size, + root=abnormal_root_dir, + label_root=label_dir, + num_segments=num_segments, + l2_norm=use_l2norm, + ) + # 809개 + abnormal_valid_dataset = NewAbnormalVMAE( + is_train=0, + model_size=model_size, + root=abnormal_root_dir, + label_root=label_dir, + num_segments=num_segments, + gt_thr=gt_thr, + l2_norm=use_l2norm, + ) + # 140개 + + abnormal_train_loader = DataLoader( + dataset=abnormal_train_dataset, + batch_size=batch_size, + shuffle=True, + drop_last=True, + num_workers=num_workers, + ) + + abnormal_valid_loader = DataLoader( + dataset=abnormal_valid_dataset, + batch_size=val_batch_size, + shuffle=False, + num_workers=val_num_workers, + ) + + data_load_end = datetime.now() + data_load_time = data_load_end - time_start + data_load_time = str(data_load_time).split(".")[0] + print(f"==>> {model_size} data_load_time: {data_load_time}") + + # Initialize the model + model = MILClassifier(drop_p=0.3) + + load_dict = None + + if resume_name: + load_dict = torch.load( + osp.join(model_dir, f"{resume_name}.pth"), map_location="cpu" + ) + model.load_state_dict(load_dict["model_state_dict"]) + + model.to(device) + + # optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + # 1e-6 => 0.0010000000474974513 + optimizer = torch.optim.Adagrad( + model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513 + ) + # optimizer = torch.optim.AdamW( + # model.parameters(), lr=learning_rate, betas=(0.9, 0.999), weight_decay=weight_decay + # ) + # optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate, weight_decay=0.0010000000474974513) + + scheduler = torch.optim.lr_scheduler.MultiStepLR( + optimizer, milestones=[1000, 1500], gamma=0.5 + ) + + if resume_name: + optimizer.load_state_dict(load_dict["optimizer_state_dict"]) + scheduler.load_state_dict(load_dict["scheduler_state_dict"]) + # scaler.load_state_dict(load_dict["scaler_state_dict"]) + + criterion = nn.BCELoss() + MIL_criterion = MIL + + print(f"Start training..") + + wandb.init( + project="VAD", + entity="pao-kim-si-woong", + config={ + "lr": learning_rate, + "dataset": "무인매장", + "n_epochs": max_epoch, + "loss": "MIL", + "notes": "VAD 실험", + }, + name=wandb_run_name + "_" + train_start, + mode=wandb_mode, + ) + + wandb.watch((model,)) + + best_loss = np.inf + best_auc = 0 + + total_batches = len(abnormal_train_loader) + + for epoch in range(max_epoch): + model.train() + + epoch_start = datetime.now() + + epoch_MIL_loss = 0 + + epoch_abnormal_max = 0 + epoch_abnormal_mean = 0 + epoch_normal_max = 0 + epoch_normal_mean = 0 + + for step, abnormal_input in tqdm( + enumerate(abnormal_train_loader), + total=total_batches, + ): + if step % len(normal_train_loader) == 0: + norm_train_iter = iter(normal_train_loader) + # 중복 추출하더라도 정상, 이상 영상 1대1 대응 loop 끝까지 유지 + + normal_input = next(norm_train_iter) + + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + input = torch.cat((abnormal_input, normal_input), dim=1) + # @@@@ MIL은 이상 영상 먼저 @@@@ + # inputs는 (batch_size, 2 * num_segments, 710) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + # batch_size = input.shape[0] + + input = input.to(device) + + optimizer.zero_grad() + + pred = model(input) + # pred는 (batch_size * 2 * num_segments, 1) + + MIL_loss = MIL_criterion(pred, batch_size, num_segments) + + MIL_loss.backward() + + # loss.backward() + optimizer.step() + with torch.no_grad(): + pred_a = pred.view(batch_size, 2, num_segments)[:, 0, :] + pred_n = pred.view(batch_size, 2, num_segments)[:, 1, :] + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + pred_n_mean = torch.mean(pred_n) + + epoch_MIL_loss += MIL_loss.item() + + epoch_normal_max += pred_n_max.item() + epoch_normal_mean += pred_n_mean.item() + epoch_abnormal_max += pred_a_max.item() + epoch_abnormal_mean += pred_a_mean.item() + + epoch_mean_MIL_loss = epoch_MIL_loss / total_batches + + epoch_mean_normal_max = epoch_normal_max / total_batches + epoch_mean_normal_mean = epoch_normal_mean / total_batches + epoch_mean_abnormal_max = epoch_abnormal_max / total_batches + epoch_mean_abnormal_mean = epoch_abnormal_mean / total_batches + + train_end = datetime.now() + train_time = train_end - epoch_start + train_time = str(train_time).split(".")[0] + print(f"==>> epoch {epoch+1} train_time: {train_time}") + print(f"MIL_loss: {round(epoch_mean_MIL_loss,4)}") + print( + f"==>> abnormal_max_mean: {epoch_mean_abnormal_max} abnormal_mean: {epoch_mean_abnormal_mean}" + ) + print( + f"==>> normal_max_mean: {epoch_mean_normal_max} normal_mean: {epoch_mean_normal_mean}" + ) + + if (epoch + 1) % save_interval == 0: + + ckpt_fpath = osp.join(model_dir, f"{model_name}_{train_start}_latest.pth") + + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + "optimizer_state_dict": optimizer.state_dict(), + "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + } + + torch.save(states, ckpt_fpath) + + # validation 주기에 따라 loss를 출력하고 best model을 저장합니다. + if (epoch + 1) % val_interval == 0: + + print(f"Start validation #{epoch+1:2d}") + model.eval() + + with torch.no_grad(): + total_loss = 0 + + total_n_corrects = 0 + + total_ab_n_corrects = 0 + + total_fpr = 0 + total_tpr = 0 + total_bthr = 0 + total_auc = 0 + total_ap = 0 + + total_ab_fpr = 0 + total_ab_tpr = 0 + total_ab_bthr = 0 + total_ab_auc = 0 + total_ab_ap = 0 + + error_count = 0 + + total_abnormal_max = 0 + total_abnormal_mean = 0 + total_normal_max = 0 + total_normal_mean = 0 + + norm_valid_iter = iter(normal_valid_loader) + # iterator를 여기서 매번 새로 할당해줘야 iterator가 다시 처음부터 작동 + + for step, abnormal_inputs in tqdm( + enumerate(abnormal_valid_loader), total=len(abnormal_valid_loader) + ): + normal_inputs = next(norm_valid_iter) + + normal_input, normal_gt = normal_inputs + # (val_batch_size, num_segments, 710), (val_batch_size, num_segments) + abnormal_input, abnormal_gt = abnormal_inputs + # (val_batch_size, num_segments, 710), (val_batch_size, num_segments) + + inputs = torch.cat((abnormal_input, normal_input), dim=1) + gts = torch.cat((abnormal_gt, normal_gt), dim=1) + # @@@@ MIL은 이상 영상 먼저 @@@@ + # inputs는 (val_batch_size, 2 * num_segments, 710), gts는 (val_batch_size, 2 * num_segments) + + inputs = inputs.to(device) + # (val_batch_size, 2 * num_segments, 710) + gts = gts.view(-1, 1).to(device) + # (val_batch_size * 2 * num_segments, 1) + + pred = model(inputs) + # pred는 (val_batch_size * 2 * num_segments, 1) + + val_loss = criterion(pred, gts) + + pred_a = pred.view(val_batch_size, 2, num_segments)[:, 0, :] + pred_n = pred.view(val_batch_size, 2, num_segments)[:, 1, :] + + pred_a_max = torch.mean(torch.max(pred_a, dim=-1)[0]) + pred_n_max = torch.mean(torch.max(pred_n, dim=-1)[0]) + + pred_a_mean = torch.mean(pred_a) + pred_n_mean = torch.mean(pred_n) + + pred_correct = pred > dist_thr + gts_correct = gts # > gt_thr + + pred_correct = pred_correct == gts_correct + corrects = torch.sum(pred_correct).item() + ab_corrects = torch.sum(pred_correct[:num_segments]).item() + # @@@@ MIL은 이상 영상 먼저 @@@@ + + pred_np = (pred.squeeze()).detach().cpu().numpy() + gts_np = (gts.squeeze()).detach().cpu().numpy() + # pred_np, gts_np 둘다 (batch_size * 2 * num_segments) + + try: + # auc = roc_auc_score(y_true=gt_np, y_score=pred_np) + # auc = roc_auc_score(y_true=gt_np, y_score=pred) + + fpr, tpr, cut = roc_curve(y_true=gts_np, y_score=pred_np) + precision, recall, cut2 = precision_recall_curve( + gts_np, pred_np + ) + + auc = sklearn.metrics.auc(fpr, tpr) + ap = sklearn.metrics.auc(recall, precision) + + diff = tpr - fpr + diff_idx = np.argmax(diff) + best_thr = cut[diff_idx] + + pred_positive = pred_np > dist_thr + TP_and_FN = pred_positive[gts_np > 0.9] + FP_and_TN = pred_positive[gts_np < 0.1] + + total_fpr += np.sum(FP_and_TN) / len(FP_and_TN) + total_tpr += np.sum(TP_and_FN) / len(TP_and_FN) + total_bthr += best_thr if diff_idx != 0 else 1 + + total_auc += auc + total_ap += ap + total_n_corrects += corrects / (num_segments * 2) + + ab_fpr, ab_tpr, ab_cut = roc_curve( + y_true=gts_np[:num_segments], y_score=pred_np[:num_segments] + ) + # @@@@ MIL은 이상 영상 먼저 @@@@ + ab_precision, ab_recall, ab_cut2 = precision_recall_curve( + gts_np[:num_segments], pred_np[:num_segments] + ) + # @@@@ MIL은 이상 영상 먼저 @@@@ + + ab_auc = sklearn.metrics.auc(ab_fpr, ab_tpr) + ab_ap = sklearn.metrics.auc(ab_recall, ab_precision) + + ab_diff = ab_tpr - ab_fpr + ab_diff_idx = np.argmax(ab_diff) + ab_best_thr = ab_cut[ab_diff_idx] + + ab_pred_positive = pred_positive[:num_segments] + ab_TP_and_FN = ab_pred_positive[gts_np[:num_segments] > 0.9] + ab_FP_and_TN = ab_pred_positive[gts_np[:num_segments] < 0.1] + # @@@@ MIL은 이상 영상 먼저 @@@@ + + total_ab_fpr += np.sum(ab_FP_and_TN) / len(ab_FP_and_TN) + total_ab_tpr += np.sum(ab_TP_and_FN) / len(ab_TP_and_FN) + total_ab_bthr += ab_best_thr if ab_diff_idx != 0 else 1 + + total_ab_auc += ab_auc + total_ab_ap += ab_ap + total_ab_n_corrects += ab_corrects / (num_segments) + + total_loss += val_loss.item() + + total_normal_max += pred_n_max.item() + total_normal_mean += pred_n_mean.item() + total_abnormal_max += pred_a_max.item() + total_abnormal_mean += pred_a_mean.item() + + except ValueError: + # print( + # "ValueError: Only one class present in y_true. ROC AUC score is not defined in that case." + # ) + # total_auc += 0 + error_count += 1 + # print("gt가 전부 0인 abnormal 영상 있음") + + val_mean_loss = total_loss / (len(abnormal_valid_loader) - error_count) + + val_fpr = total_fpr / (len(abnormal_valid_loader) - error_count) + val_tpr = total_tpr / (len(abnormal_valid_loader) - error_count) + val_bthr = total_bthr / (len(abnormal_valid_loader) - error_count) + val_auc = total_auc / (len(abnormal_valid_loader) - error_count) + val_ap = total_ap / (len(abnormal_valid_loader) - error_count) + val_accuracy = total_n_corrects / ( + (len(abnormal_valid_loader) - error_count) + ) + + val_ab_fpr = total_ab_fpr / (len(abnormal_valid_loader) - error_count) + val_ab_tpr = total_ab_tpr / (len(abnormal_valid_loader) - error_count) + val_ab_bthr = total_ab_bthr / (len(abnormal_valid_loader) - error_count) + val_ab_auc = total_ab_auc / (len(abnormal_valid_loader) - error_count) + val_ab_ap = total_ab_ap / (len(abnormal_valid_loader) - error_count) + val_ab_accuracy = total_ab_n_corrects / ( + (len(abnormal_valid_loader) - error_count) + ) + + val_mean_normal_max = total_normal_max / ( + len(abnormal_valid_loader) - error_count + ) + val_mean_normal_mean = total_normal_mean / ( + len(abnormal_valid_loader) - error_count + ) + val_mean_abnormal_max = total_abnormal_max / ( + len(abnormal_valid_loader) - error_count + ) + val_mean_abnormal_mean = total_abnormal_mean / ( + len(abnormal_valid_loader) - error_count + ) + + if best_loss > val_mean_loss: + print( + f"Best loss performance at epoch: {epoch + 1}, {best_loss:.4f} -> {val_mean_loss:.4f}" + ) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best.pth" + ) + torch.save(states, best_ckpt_fpath) + best_loss = val_mean_loss + # counter = 0 + # else: + # counter += 1 + + if best_auc < val_auc: + str_to_keep = f"Best auc performance at epoch: {epoch + 1}, {best_auc:.4f} -> {val_auc:.4f}" + print(str_to_keep) + print(f"Save model in {model_dir}") + states = { + "epoch": epoch, + "model_name": model_name, + "model_state_dict": model.state_dict(), # 모델의 state_dict 저장 + # "optimizer_state_dict": optimizer.state_dict(), + # "scheduler_state_dict": scheduler.state_dict(), + # "scaler_state_dict": scaler.state_dict(), + # best.pth는 inference에서만 쓰기? + } + + best_ckpt_fpath = osp.join( + model_dir, f"{model_name}_{train_start}_best_auc.pth" + ) + torch.save(states, best_ckpt_fpath) + best_auc = val_auc + counter = 0 + else: + counter += 1 + + new_wandb_metric_dict = { + "train_MIL_loss": epoch_mean_MIL_loss, + "valid_loss": val_mean_loss, + "valid_fpr": val_fpr, + "valid_tpr": val_tpr, + "valid_bthr": val_bthr, + "valid_auc": val_auc, + "valid_ap": val_ap, + "valid_accuracy": val_accuracy, + "valid_ab_fpr": val_ab_fpr, + "valid_ab_tpr": val_ab_tpr, + "valid_ab_bthr": val_ab_bthr, + "valid_ab_auc": val_ab_auc, + "valid_ab_ap": val_ab_ap, + "valid_ab_accuracy": val_ab_accuracy, + "learning_rate": scheduler.get_last_lr()[0], + "train_abnormal_max_mean": epoch_mean_abnormal_max, + "train_abnormal_mean": epoch_mean_abnormal_mean, + "train_normal_max_mean": epoch_mean_normal_max, + "train_normal_mean": epoch_mean_normal_mean, + "valid_abnormal_max_mean": val_mean_abnormal_max, + "valid_abnormal_mean": val_mean_abnormal_mean, + "valid_normal_max_mean": val_mean_normal_max, + "valid_normal_mean": val_mean_normal_mean, + } + + wandb.log(new_wandb_metric_dict) + + scheduler.step() + + epoch_end = datetime.now() + epoch_time = epoch_end - epoch_start + epoch_time = str(epoch_time).split(".")[0] + print( + f"==>> epoch {epoch+1} time: {epoch_time}\nvalid_loss: {round(val_mean_loss,4)}" + ) + # print( + # f"valid_n_MPP_loss: {round(val_n_mean_MPP_loss,4)} valid_n_norm_loss: {round(val_n_mean_norm_loss,4)} valid_n_MPP+norm_loss: {round(val_n_mean_MPP_and_norm_loss,4)}" + # ) + print(f"valid_fpr: {val_fpr} valid_tpr: {val_tpr} valid_bthr: {val_bthr}") + print( + f"valid_auc: {val_auc:.4f} valid_ap: {val_ap:.4f} valid_accuracy: {val_accuracy:.2f}" + ) + print( + f"valid_ab_fpr: {val_ab_fpr} valid_ab_tpr: {val_ab_tpr} valid_ab_bthr: {val_ab_bthr}" + ) + print( + f"valid_ab_auc: {val_ab_auc:.4f} valid_ab_ap: {val_ab_ap:.4f} valid_ab_accuracy: {val_ab_accuracy:.2f}" + ) + print( + f"==>> val_abnormal_max_mean: {val_mean_abnormal_max} val_abnormal_mean: {val_mean_abnormal_mean}" + ) + print( + f"==>> val_normal_max_mean: {val_mean_normal_max} val_normal_mean: {val_mean_normal_mean}" + ) + print(f"==>> error_count: {error_count}") + + if counter > patience: + print("Early Stopping...") + break + + time_end = datetime.now() + total_time = time_end - time_start + total_time = str(total_time).split(".")[0] + print(str_to_keep) + print(f"==>> total time: {total_time}") + + +def main(args): + if (args.wandb_run_name).split("_")[0] == "BNWVAD": + train_BNWVAD(**args.__dict__) + else: + train_MIL(**args.__dict__) + + +if __name__ == "__main__": + args = parse_args() + + main(args) diff --git a/model/vmae/__init__.py b/model/vmae/__init__.py new file mode 100644 index 0000000..fa5812e --- /dev/null +++ b/model/vmae/__init__.py @@ -0,0 +1,29 @@ +from .classifier import WSAD, MILClassifier +from .modeling_finetune import ( + vit_base_patch16_224, + vit_giant_patch14_224, + vit_huge_patch16_224, + vit_large_patch16_224, + vit_small_patch16_224, +) +from .modeling_pretrain import ( + pretrain_videomae_base_patch16_224, + pretrain_videomae_giant_patch14_224, + pretrain_videomae_huge_patch16_224, + pretrain_videomae_large_patch16_224, + pretrain_videomae_small_patch16_224, +) + +__all__ = [ + "pretrain_videomae_small_patch16_224", + "pretrain_videomae_base_patch16_224", + "pretrain_videomae_large_patch16_224", + "pretrain_videomae_huge_patch16_224", + "pretrain_videomae_giant_patch14_224", + "vit_small_patch16_224", + "vit_base_patch16_224", + "vit_large_patch16_224", + "vit_huge_patch16_224", + "vit_giant_patch14_224", + "MILClassifier", +] diff --git a/model/vmae/classifier.py b/model/vmae/classifier.py new file mode 100644 index 0000000..a626184 --- /dev/null +++ b/model/vmae/classifier.py @@ -0,0 +1,529 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange + + +class MILClassifier(nn.Module): + def __init__(self, input_dim=710, drop_p=0.0): + super().__init__() + # self.embedding = Temporal(input_dim, 512) + # self.selfatt = Transformer(512, 2, 4, 128, 512, dropout=0) + self.classifier = nn.Sequential( + nn.Linear(input_dim, 512), + # nn.Linear(512, 512), + # nn.BatchNorm1d(512), + nn.ReLU(), + nn.Dropout(drop_p), + nn.Linear(512, 512), + # nn.BatchNorm1d(1024), + nn.ReLU(), + nn.Dropout(drop_p), + # nn.Linear(1024, 512), + # # nn.BatchNorm1d(512), + # nn.ReLU(), + # nn.Dropout(drop_p), + nn.Linear(512, 32), + # nn.BatchNorm1d(32), + nn.ReLU(), + nn.Dropout(drop_p), + nn.Linear(32, 1), + nn.Sigmoid(), + ) + + self.drop_p = drop_p + self.weight_init() + + def weight_init(self): + # for layer in self.classifier: + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_normal_(m.weight) + elif isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + + # x = self.embedding(x) + # x = self.selfatt(x) + + x = x.view(-1, x.size(-1)) + + x = self.classifier(x) + + return x + + +class NormalHead(nn.Module): + def __init__(self, in_channel=512, ratios=[16, 32], kernel_sizes=[1, 1, 1]): + super(NormalHead, self).__init__() + self.ratios = ratios + # 기본값 [16, 32] + self.kernel_sizes = kernel_sizes + # 기본값 [1, 1, 1] + + self.build_layers(in_channel) + + def build_layers(self, in_channel): + ratio_1, ratio_2 = self.ratios + self.conv1 = nn.Conv1d( + in_channel, + in_channel // ratio_1, + self.kernel_sizes[0], + 1, + self.kernel_sizes[0] // 2, + ) + # stride는 1, padding은 kernel_size // 2로 두면 + # (input_length - kernel_size + 2 * (kernel_size // 2)) + 1 == input_length + # => 길이 유지 + self.bn1 = nn.BatchNorm1d(in_channel // ratio_1) + self.conv2 = nn.Conv1d( + in_channel // ratio_1, + in_channel // ratio_2, + self.kernel_sizes[1], + 1, + self.kernel_sizes[1] // 2, + ) + self.bn2 = nn.BatchNorm1d(in_channel // ratio_2) + self.conv3 = nn.Conv1d( + in_channel // ratio_2, 1, self.kernel_sizes[2], 1, self.kernel_sizes[2] // 2 + ) + self.act = nn.ReLU() + self.sigmoid = nn.Sigmoid() + + self.bns = [self.bn1, self.bn2] + + def forward(self, x): + """ + x: BN * C * T + return BN * C // 64 * T and BN * 1 * T + """ + outputs = [] + x = self.conv1(x) + outputs.append(x) + x = self.conv2(self.act(self.bn1(x))) + outputs.append(x) + x = self.sigmoid(self.conv3(self.act(self.bn2(x)))) + outputs.append(x) + return outputs + + +def pair(t): + return t if isinstance(t, tuple) else (t, t) + + +class PreNorm(nn.Module): + def __init__(self, dim, fn): + super().__init__() + self.norm = nn.LayerNorm(dim) + self.fn = fn + + def forward(self, x, **kwargs): + return self.fn(self.norm(x), **kwargs) + + +class FeedForward(nn.Module): + def __init__(self, dim, hidden_dim, dropout=0.0): + super().__init__() + self.net = nn.Sequential( + nn.Linear(dim, hidden_dim), + nn.GELU(), + nn.Dropout(dropout), + nn.Linear(hidden_dim, dim), + nn.Dropout(dropout), + ) + + def forward(self, x): + return self.net(x) + + +class Attention(nn.Module): + def __init__(self, dim, heads=8, dim_head=64, dropout=0.0): + super().__init__() + inner_dim = dim_head * heads + project_out = not (heads == 1 and dim_head == dim) + + self.heads = heads + self.scale = dim_head**-0.5 + + self.attend = nn.Softmax(dim=-1) + self.to_qkv = nn.Linear(dim, inner_dim * 4, bias=False) + + self.to_out = ( + nn.Sequential(nn.Linear(2 * inner_dim, dim), nn.Dropout(dropout)) + if project_out + else nn.Identity() + ) + + def forward(self, x): + b, n, d = x.size() + qkvt = self.to_qkv(x).chunk(4, dim=-1) + q, k, v, t = map( + lambda t: rearrange(t, "b n (h d) -> b h n d", h=self.heads), qkvt + ) + + dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale + + attn1 = self.attend(dots) + + tmp_ones = torch.ones(n).cuda() + tmp_n = torch.linspace(1, n, n).cuda() + tg_tmp = torch.abs(tmp_n * tmp_ones - tmp_n.view(-1, 1)) + attn2 = torch.exp(-tg_tmp / torch.exp(torch.tensor(1.0))) + attn2 = ( + (attn2 / attn2.sum(-1)) + .unsqueeze(0) + .unsqueeze(1) + .repeat(b, self.heads, 1, 1) + ) + + out = torch.cat([torch.matmul(attn1, v), torch.matmul(attn2, t)], dim=-1) + out = rearrange(out, "b h n d -> b n (h d)") + return self.to_out(out) + + +class Transformer(nn.Module): + def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0.0): + super().__init__() + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList( + [ + PreNorm( + dim, + Attention( + dim, heads=heads, dim_head=dim_head, dropout=dropout + ), + ), + PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout)), + ] + ) + ) + + def forward(self, x): + for attn, ff in self.layers: + x = attn(x) + x + x = ff(x) + x + return x + + +class Temporal(nn.Module): + # Temporal convolutional network + def __init__(self, input_size, out_size): + super(Temporal, self).__init__() + self.conv_1 = nn.Sequential( + nn.Conv1d( + in_channels=input_size, + out_channels=out_size, + kernel_size=3, + stride=1, + padding=1, + ), + nn.ReLU(), + ) + + def forward(self, x): + # x는 (batch * n crops, t snippets, d feature dim) + # 영상 1개를 t개의 snippet(토막)으로 나누고 각 snippet은 d 차원 feature 벡터 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # permute를 하지 않고 conv1d를 하면 t가 채널축 + # => conv 특성상 1 entry 계산할 때 (t, 3)사이즈 필터를 곱해서 계산 + # => 이 entry는 각 snippet의 feature는 3개만 보지만 시간축(t)으로는 영상 전체를 보게 된다 + # => 마치 과거, 현재, 미래 정보를 다보고 계산하는 것과 마찬가지 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + x = x.permute(0, 2, 1) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # permute를 하게 되면 (batch * n crops, d feature dim, t snippets) + # 이제 conv1d를 하면 (d, 3) 사이즈 필터를 곱해서 계산한다 + # => 바로 전, 현재, 바로 다음(또는 전전, 전, 현재) 시간의 영상 snippet 3개만 보고 각 snippet의 feature들은 전부 보게 된다 + # +@ 영상을 나누는 snippet(segment) 개수 유지(kernel_size=3, stride=1, padding=1) + # +@ 이제 각 snippet의 feature dimension수를 조절 가능(필터 개수로 조절) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + x = self.conv_1(x) + x = x.permute(0, 2, 1) + return x + + +class WSAD(nn.Module): + def __init__( + self, + input_size, + ratio_sample=0.2, + ratio_batch=0.4, + ratios=[16, 32], + kernel_sizes=[1, 1, 1], + ): + super().__init__() + # self.flag = flag + + self.ratio_sample = ratio_sample + # 기본값 0.2 + self.ratio_batch = ratio_batch + # 기본값 0.4 + + self.ratios = ratios + # 기본값 [16, 32] + self.kernel_sizes = kernel_sizes + # 기본값 [1, 1, 1] + + self.normal_head = NormalHead( + in_channel=512, ratios=ratios, kernel_sizes=kernel_sizes + ) + self.embedding = Temporal(input_size, 512) + self.selfatt = Transformer(512, 2, 4, 128, 512, dropout=0) + # embedding + selfatt은 논문의 feature enhancer + # embedding은 feature 차원을 permute + conv1d를 이용해 512로 변경 + # selfatt는 transformer계열 enhancer + self.step = 0 + + def get_normal_scores(self, x, ncrops=None): + # x는 (batch * n crops, segment 개수, feature 차원 = 512(논문)) + new_x = x.permute(0, 2, 1) + # conv1d에 넣기전에 (batch * n crops, feature 차원, segment 개수)로 변경 + + outputs = self.normal_head(new_x) + # normal_head는 conv1d - bn - relu - conv1d - bn - relu - conv1d - sig 3층 구조 + # outputs는 normal_head 안의 3개의 conv1d output을 담은 list (마지막 output은 conv1d + sig output) + normal_scores = outputs[-1] + xhs = outputs[:-1] + + if ncrops: + b = normal_scores.shape[0] // ncrops + normal_scores = normal_scores.view(b, ncrops, -1).mean(1) + # (batch_size, t snippets) + + return xhs, normal_scores + + def get_mahalanobis_distance(self, feats, anchor, var, ncrops=None): + # 첫번째는 feat는 (batch_size * n crops, 512 // 16, t snippets) + # 두번째는 (batch_size * n crops, 512 // 32, t snippets) + # BN은 각 feature(채널 축)별 batch*h*w개 평균, 분산 계산 + # => (b, c, h*w) -> (c) + # => None으로 unsqueeze해서 (1, c, 1)로 변경 + distance = torch.sqrt( + torch.sum((feats - anchor[None, :, None]) ** 2 / var[None, :, None], dim=1) + ) + # (x - m)^2/var -> torch.sum(dim=1)로 각배치 안의 각 토막(segment)별로 값 존재 (b, t) + # sqrt후에도 사이즈 그대로 (b, t) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # var가 전부 1이면 distance는 BN running mean vector와 각 토막의 feature vector 간의 차이 벡터의 L2 norm 길이가 된다 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + if ncrops: + bs = distance.shape[0] // ncrops + # b x t + distance = distance.view(bs, ncrops, -1).mean(dim=1) + # (batch_size, n crops, t snippets)을 dim=1로 평균 => 동일 영상 10개 crop들의 결과를 평균 + # => (batch_size, t snippets) + return distance + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # 배치 내의 각 영상의 각 토막 feature 벡터가 + # 데이터셋 분포 내 모든 영상의 모든 토막의 feature(512 // 16 또는 512 // 32 차원) 벡터들의 평균인 벡터(running_mean으로 추정)와 + # 얼마나 다른지 알려주는 mahalanobis 거리 계산 + # 데이터 분포내의 모든 토막 feature 벡터의 평균이고 정상토막의 비중이 이상토막의 비중보다 압도적으로 크기 때문에 + # 이 평균 벡터는 정상 토막의 기준처럼 사용 가능(anchor) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + def pos_neg_select(self, feats, distance, ncrops): + batch_select_ratio = self.ratio_batch + # 기본값 0.4 + sample_select_ratio = self.ratio_sample + # 기본값 0.2 + bs, c, t = feats.shape + # 첫번째는 (batch_size * n crops, 512 // 16, t snippets) + # 두번째는 (batch_size * n crops, 512 // 32, t snippets) + select_num_sample = int(t * sample_select_ratio) + # sample-level selection(SLS)은 20% + select_num_batch = int(bs // 2 * t * batch_select_ratio) + # 데이터는 torch.cat((정상영상, 이상영상), dim=0)으로 정상영상 배치 뒤에 이상영상 배치가 붙어있음 + # => bs // 2가 실제 batch_size * n crops 개수 + # => batch-level selection(BLS)은 (bs // 2) * t개 중 40% + # ==> 40 // 2 해서 사실상 SLS와 동일 비율로 배치 하나당 20% + + feats = feats.view(bs, ncrops, c, t).mean(dim=1) # b x c x t + # 동일 영상에서 나온 10개 crop들 결과 평균 + # => (batch_size, c features, t snippets) + nor_distance = distance[: bs // 2] # b x t + # distance는 10개 crop들을 이미 평균내고 (batch_size, t snippets) + # 그리고 배치 앞 절반은 정상영상 배치 => (n_batch_size = batch_size // 2, t snippets) + nor_feats = feats[: bs // 2].permute(0, 2, 1) # b x t x c + # 정상부분 앞 절반만 가져와 permute => (n_batch_size, t snippets, c features) + abn_distance = distance[bs // 2 :] # b x t + # 배치 뒤 절반은 이상영상 배치 (a_batch_size = batch_size // 2, t snippets) + abn_feats = feats[bs // 2 :].permute(0, 2, 1) # b x t x c + # (a_batch_size, t snippets, c features) + abn_distance_flatten = abn_distance.reshape(-1) + # (a_batch_size * t snippets) + abn_feats_flatten = abn_feats.reshape(-1, c) + # (a_batch_size * t snippets, c features) + + mask_select_abnormal_sample = torch.zeros_like(abn_distance, dtype=torch.bool) + # (a_batch_size, t snippets) + topk_abnormal_sample = torch.topk(abn_distance, select_num_sample, dim=-1)[1] + # torch.topk(abn_distance, select_num_sample, dim=-1)는 top k개 value와 그 value들 indices를 담고 있다 + # value와 indices 둘 다 (a_batch_size, top K = select_num_sample) 형태 + # => [1]로 indices만 가져오기 + mask_select_abnormal_sample.scatter_( + dim=1, + index=topk_abnormal_sample, + src=torch.full_like(topk_abnormal_sample, True, dtype=torch.bool), + ) + # (a_batch_size, t snippets) 형태이고 True는 a_batch_size * select_num_sample개이고 나머지는 False + # (top k에 속하는 index 자리만 True, 나머지는 False) + # scatter는 gather의 reverse operation + + mask_select_abnormal_batch = torch.zeros_like( + abn_distance_flatten, dtype=torch.bool + ) + # (a_batch_size * t snippets) + topk_abnormal_batch = torch.topk( + abn_distance_flatten, select_num_batch, dim=-1 + )[1] + # (a_batch_size * select_num_batch) + # top K = select_num_batch 개 indices + mask_select_abnormal_batch.scatter_( + dim=0, + index=topk_abnormal_batch, + src=torch.full_like(topk_abnormal_batch, True, dtype=torch.bool), + ) + # (a_batch_size * t snippets) + + mask_select_abnormal = ( + mask_select_abnormal_batch | mask_select_abnormal_sample.reshape(-1) + ) + # SLS와 BLS를 or 연산 | 으로 합쳐서 논문의 Sample-Batch Selection(SBS) + select_abn_feats = abn_feats_flatten[mask_select_abnormal] + # mask_select_abnormal는 (a_batch_size * t snippets)개 중 num_select_abnormal개만 True고 나머진 False + # abn_feats_flatten의 (a_batch_size * t snippets, c features)에서 mask_select_abnormal를 indices로 쓰면 + # (num_select_abnormal, c feature) 형태가 된다 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # select_abn_feats는 SLS와 BLS를 합쳐 SBS를 만드는 과정에서 상위 ~%에 들었다는 정보만 남고 distance 상위 몇번째인지 순서 정보가 날아간다 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + + num_select_abnormal = torch.sum(mask_select_abnormal) + # SBS 추출 개수 + + k_nor = int(num_select_abnormal / (bs // 2)) + 1 + # 이상영상 배치에서 SBS로 선택한 개수 / 배치내 영상 개수 == 1 배치 당 평균 선택 개수 + # + 1을 해주어서 정상 영상에서 선택된 토막(snippets)개수가 이상 영상 선택 토막 개수보다 크게 설정 + topk_normal_sample = torch.topk(nor_distance, k_nor, dim=-1)[1] + # nor_distance는 (n_batch_size, t snippets) + # topk_normal_sample는 각 영상의 t개 토막 중 상위 k_nor개의 indices + # => (n_batch_size, k_nor) + select_nor_feats = torch.gather( + nor_feats, 1, topk_normal_sample[..., None].expand(-1, -1, c) + ) + # nor_feats는 (n_batch_size, t snippets, c features) + # gather의 index는 input과 차원수가 같아야하므로 None으로 (n_batch_size, k_nor, 1), expand로 (n_batch_size, k_nor, c) 형태로 변경 + # expand : Returns a new view of the self tensor with singleton dimensions expanded to a larger size. + # gather dimension이 1 => select_nor_feats[i][j][k] = nor_feats[i][topk_normal_sample[i][j][k]][k] + # select_nor_feats는 (n_batch_size, k_nor, c) 형태 (gather는 index와 output 형태가 동일) + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + # select_abn_feats와 다르게 select_nor_feats는 크기 순서를 지우지 않고 gather를 써서 dim=1 방향으로 nor_distance 값 내림차순 + # @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ + select_nor_feats = select_nor_feats.permute(1, 0, 2).reshape(-1, c) + # (k_nor, n_batch_size, c)로 바꾼 후 reshape로 (k_nor * n_batch_size, c) 형태 + select_nor_feats = select_nor_feats[:num_select_abnormal] + # k_nor * n_batch_size는 num_select_abnormal보다 크다 => out of index 에러 안 일어남 + # select_nor_feats는 최종적으로 (num_select_abnormal, c feature) 형태 + + return select_nor_feats, select_abn_feats + + def forward(self, x, flag="Eval"): + if len(x.size()) == 4: + b, n, t, d = x.size() + # 실험에 사용한 I3D UCF-Crime feature는 하나의 영상을 중앙, 4코너 + 중앙, 4코너 거울상 = 10개 crop으로 증강해서 계산 + # => batch 개수, n crop 개수, t 토막(snippet, segment) 개수, d snippet당 feature 차원수 + + x = x.reshape(b * n, t, d) + else: + b, t, d = x.size() + n = 1 + x = self.embedding(x) + x = self.selfatt(x) + # feature enhancer를 지난 feature의 차원수 d == 512(논문) + + normal_feats, normal_scores = self.get_normal_scores(x, n) + # normal_head는 conv1d - bn - relu - conv1d - bn - relu - conv1d - sig 3층 구조 + # normal_feats는 [첫 conv1d output, 두번째 conv1d output] + # => (batch_size * n crops, 512 // 16, t snippets), (batch_size * n crops, 512 // 32, t snippets) 형태 + # normal_scores는 마지막 conv1d - sig output => (batch_size, t snippets) 형태 (n crops는 평균-> 1) + + anchors = [bn.running_mean for bn in self.normal_head.bns] + variances = [bn.running_var for bn in self.normal_head.bns] + # conv1d output 바로 뒤 bn은 conv1d output 전체 분포 추정 평균, 분산을 담고 있다 + # 두개의 bn => 첫 conv1d output, 두번째 conv1d output 추정 평균, 분산 + + distances = [ + self.get_mahalanobis_distance(normal_feat, anchor, var, ncrops=n) + for normal_feat, anchor, var in zip(normal_feats, anchors, variances) + ] + # list안의 각 distance는 (batch_size, t snippets) 형태 + + if flag == "Train": + + select_normals = [] + select_abnormals = [] + for feat, distance in zip(normal_feats, distances): + select_feat_normal, select_feat_abnormal = self.pos_neg_select( + feat, distance, n + ) + # select_feat_normal, select_feat_abnormal 둘다 (num_select_abnormal, c feature) 형태 + select_normals.append(select_feat_normal[..., None]) + select_abnormals.append(select_feat_abnormal[..., None]) + # 두 정상, 이상 리스트 모두 feature 두개씩 + # 첫번째는 (num_select_abnormal, 512 // 16 feature, 1) + # 두번째는 (num_select_abnormal, 512 // 32 feature, 1) + + bn_results = dict( + anchors=anchors, + variances=variances, + select_normals=select_normals, + select_abnormals=select_abnormals, + ) + # breakpoint() + distance_sum = sum(distances) + + return { + "pre_normal_scores": normal_scores[0 : b // 2], + # classifier 학습에 사용되는 normal loss 계산에는 label 노이즈가 없는 normal 영상만 사용 + # (label noise: MIL은 비디오 단위 라벨링만 있음 + # => 이상 영상안의 normal snippet을 abnormal snippet으로 판단 하는 등의 noise 발생 가능) + # 정상 영상의 snippet들은 무조건 정상 => 정상 영상 하나의 t snippets의 scores => t 차원 score 벡터 + # ==> 이 t 차원 score 벡터의 L2 norm 값 * n_batch_size 개 정상 영상 == normal loss + # ==> L2 norm인 normal loss가 작아지기 위해서 정상 영상 snippet들의 예측 score가 작아지는 방향으로 학습 + # 논문 3.4 확인 + "bn_results": bn_results, + # mpp loss 계산에 사용 + # 논문 3.2 확인 + # @@@@@@@@@@@@@@@@@@@@@@@@@ + # bce loss를 위해 추가 + "normal_scores": normal_scores, + "scores": distance_sum * normal_scores, + } + elif flag == "Train_extra": + distance_sum = sum(distances) + # (batch_size, t snippets) 형태인 distance들 sum + + return { + "normal_scores": normal_scores, + "scores": distance_sum * normal_scores, + } + elif flag == "Eval_MPP": + + distance_sum = sum(distances) + # (batch_size, t snippets) 형태인 distance들 sum + + return { + "normal_scores": normal_scores, + "scores": distance_sum * normal_scores, + } + else: + + distance_sum = sum(distances) + # (batch_size, t snippets) 형태인 distance들 sum + + return distance_sum * normal_scores + # normal_scores도 (batch_size, t snippets) 형태 diff --git a/model/vmae/modeling_finetune.py b/model/vmae/modeling_finetune.py new file mode 100644 index 0000000..01d824e --- /dev/null +++ b/model/vmae/modeling_finetune.py @@ -0,0 +1,574 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +from functools import partial + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from timm.models.layers import drop_path, to_2tuple, trunc_normal_ +from timm.models.registry import register_model + + +def _cfg(url="", **kwargs): + return { + "url": url, + "num_classes": 400, + "input_size": (3, 224, 224), + "pool_size": None, + "crop_pct": 0.9, + "interpolation": "bicubic", + "mean": (0.5, 0.5, 0.5), + "std": (0.5, 0.5, 0.5), + **kwargs, + } + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob=None): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +class Mlp(nn.Module): + + def __init__( + self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.0, + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + # x = self.drop(x) + # commit this for the orignal BERT implement + x = self.fc2(x) + x = self.drop(x) + return x + + +class CosAttention(nn.Module): + + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + attn_head_dim=None, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + # self.scale = qk_scale or head_dim**-0.5 + # DO NOT RENAME [self.scale] (for no weight decay) + if qk_scale is None: + self.scale = nn.Parameter( + torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True + ) + else: + self.scale = qk_scale + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat( + ( + self.q_bias, + torch.zeros_like(self.v_bias, requires_grad=False), + self.v_bias, + ) + ) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + attn = F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1) + + # torch.log(torch.tensor(1. / 0.01)) = 4.6052 + logit_scale = torch.clamp(self.scale, max=4.6052).exp() + + attn = attn * logit_scale + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Attention(nn.Module): + + def __init__( + self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0.0, + proj_drop=0.0, + attn_head_dim=None, + ): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat( + ( + self.q_bias, + torch.zeros_like(self.v_bias, requires_grad=False), + self.v_bias, + ) + ) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = ( + qkv[0], + qkv[1], + qkv[2], + ) # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = q @ k.transpose(-2, -1) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__( + self, + dim, + num_heads, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop=0.0, + attn_drop=0.0, + drop_path=0.0, + init_values=None, + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + attn_head_dim=None, + cos_attn=False, + ): + super().__init__() + self.norm1 = norm_layer(dim) + if cos_attn: + self.attn = CosAttention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + attn_head_dim=attn_head_dim, + ) + else: + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + attn_head_dim=attn_head_dim, + ) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop, + ) + + if init_values > 0: + self.gamma_1 = nn.Parameter( + init_values * torch.ones((dim)), requires_grad=True + ) + self.gamma_2 = nn.Parameter( + init_values * torch.ones((dim)), requires_grad=True + ) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x): + if self.gamma_1 is None: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding""" + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + embed_dim=768, + num_frames=16, + tubelet_size=2, + ): + super().__init__() + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + num_spatial_patches = (img_size[0] // patch_size[0]) * ( + img_size[1] // patch_size[1] + ) + num_patches = num_spatial_patches * (num_frames // tubelet_size) + + self.img_size = img_size + self.tubelet_size = tubelet_size + self.patch_size = patch_size + self.num_patches = num_patches + self.proj = nn.Conv3d( + in_channels=in_chans, + out_channels=embed_dim, + kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]), + stride=(self.tubelet_size, patch_size[0], patch_size[1]), + ) + + def forward(self, x, **kwargs): + B, C, T, H, W = x.shape + assert ( + H == self.img_size[0] and W == self.img_size[1] + ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." + # b, c, l -> b, l, c + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +# sin-cos position encoding +# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 +def get_sinusoid_encoding_table(n_position, d_hid): + """Sinusoid position encoding table""" + + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (hid_j // 2) / d_hid) + for hid_j in range(d_hid) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos_i) for pos_i in range(n_position)] + ) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.tensor( + sinusoid_table, dtype=torch.float, requires_grad=False + ).unsqueeze(0) + + +class VisionTransformer(nn.Module): + """Vision Transformer with support for patch or hybrid CNN input stage""" + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=1000, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + head_drop_rate=0.0, + norm_layer=nn.LayerNorm, + init_values=0.0, + use_learnable_pos_emb=False, + init_scale=0.0, + all_frames=16, + tubelet_size=2, + use_mean_pooling=True, + with_cp=False, + cos_attn=False, + ): + super().__init__() + self.num_classes = num_classes + # num_features for consistency with other models + self.num_features = self.embed_dim = embed_dim + self.tubelet_size = tubelet_size + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + num_frames=all_frames, + tubelet_size=tubelet_size, + ) + num_patches = self.patch_embed.num_patches + self.with_cp = with_cp + + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) + else: + # sine-cosine positional embeddings is on the way + self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) + + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList( + [ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values, + cos_attn=cos_attn, + ) + for i in range(depth) + ] + ) + self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) + self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None + self.head_dropout = nn.Dropout(head_drop_rate) + self.head = ( + nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + if use_learnable_pos_emb: + trunc_normal_(self.pos_embed, std=0.02) + + self.apply(self._init_weights) + + self.head.weight.data.mul_(init_scale) + self.head.bias.data.mul_(init_scale) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token"} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=""): + self.num_classes = num_classes + self.head = ( + nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + def forward_features(self, x): + B = x.size(0) + + x = self.patch_embed(x) + + if self.pos_embed is not None: + x = ( + x + + self.pos_embed.expand(B, -1, -1) + .type_as(x) + .to(x.device) + .clone() + .detach() + ) + x = self.pos_drop(x) + + for blk in self.blocks: + if self.with_cp: + x = cp.checkpoint(blk, x) + else: + x = blk(x) + + if self.fc_norm is not None: + return self.fc_norm(x.mean(1)) + else: + return self.norm(x[:, 0]) + + def forward(self, x): + x = self.forward_features(x) + x = self.head_dropout(x) + x = self.head(x) + return x + + +@register_model +def vit_small_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=384, + depth=12, + num_heads=6, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_base_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_large_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=1024, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_huge_patch16_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=16, + embed_dim=1280, + depth=32, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + return model + + +@register_model +def vit_giant_patch14_224(pretrained=False, **kwargs): + model = VisionTransformer( + patch_size=14, + embed_dim=1408, + depth=40, + num_heads=16, + mlp_ratio=48 / 11, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + return model diff --git a/model/vmae/modeling_pretrain.py b/model/vmae/modeling_pretrain.py new file mode 100644 index 0000000..d5c3539 --- /dev/null +++ b/model/vmae/modeling_pretrain.py @@ -0,0 +1,493 @@ +# -------------------------------------------------------- +# Based on BEiT, timm, DINO and DeiT code bases +# https://github.com/microsoft/unilm/tree/master/beit +# https://github.com/rwightman/pytorch-image-models/tree/master/timm +# https://github.com/facebookresearch/deit +# https://github.com/facebookresearch/dino +# --------------------------------------------------------' +from functools import partial + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from timm.models.layers import trunc_normal_ as __call_trunc_normal_ +from timm.models.registry import register_model + +from .modeling_finetune import Block, PatchEmbed, _cfg, get_sinusoid_encoding_table + + +def trunc_normal_(tensor, mean=0.0, std=1.0): + __call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) + + +class PretrainVisionTransformerEncoder(nn.Module): + """Vision Transformer with support for patch or hybrid CNN input stage""" + + def __init__( + self, + img_size=224, + patch_size=16, + in_chans=3, + num_classes=0, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + init_values=None, + tubelet_size=2, + use_learnable_pos_emb=False, + with_cp=False, + all_frames=16, + cos_attn=False, + ): + super().__init__() + self.num_classes = num_classes + # num_features for consistency with other models + self.num_features = self.embed_dim = embed_dim + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + num_frames=all_frames, + tubelet_size=tubelet_size, + ) + num_patches = self.patch_embed.num_patches + self.with_cp = with_cp + + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) + else: + # sine-cosine positional embeddings + self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList( + [ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values, + cos_attn=cos_attn, + ) + for i in range(depth) + ] + ) + self.norm = norm_layer(embed_dim) + self.head = ( + nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + if use_learnable_pos_emb: + trunc_normal_(self.pos_embed, std=0.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token"} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=""): + self.num_classes = num_classes + self.head = ( + nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + def forward_features(self, x, mask): + x = self.patch_embed(x) + + x = x + self.pos_embed.type_as(x).to(x.device).clone().detach() + + B, _, C = x.shape + x_vis = x[~mask].reshape(B, -1, C) # ~mask means visible + + for blk in self.blocks: + if self.with_cp: + x_vis = cp.checkpoint(blk, x_vis) + else: + x_vis = blk(x_vis) + + x_vis = self.norm(x_vis) + return x_vis + + def forward(self, x, mask): + x = self.forward_features(x, mask) + x = self.head(x) + return x + + +class PretrainVisionTransformerDecoder(nn.Module): + """Vision Transformer with support for patch or hybrid CNN input stage""" + + def __init__( + self, + patch_size=16, + num_classes=768, + embed_dim=768, + depth=12, + num_heads=12, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + init_values=None, + num_patches=196, + tubelet_size=2, + with_cp=False, + cos_attn=False, + ): + super().__init__() + self.num_classes = num_classes + assert num_classes == 3 * tubelet_size * patch_size**2 + # num_features for consistency with other models + self.num_features = self.embed_dim = embed_dim + self.patch_size = patch_size + self.with_cp = with_cp + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, depth) + ] # stochastic depth decay rule + self.blocks = nn.ModuleList( + [ + Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + init_values=init_values, + cos_attn=cos_attn, + ) + for i in range(depth) + ] + ) + self.norm = norm_layer(embed_dim) + self.head = ( + nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token"} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=""): + self.num_classes = num_classes + self.head = ( + nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() + ) + + def forward(self, x, return_token_num): + for blk in self.blocks: + if self.with_cp: + x = cp.checkpoint(blk, x) + else: + x = blk(x) + + if return_token_num > 0: + # only return the mask tokens predict pixels + x = self.head(self.norm(x[:, -return_token_num:])) + else: + # [B, N, 3*16^2] + x = self.head(self.norm(x)) + return x + + +class PretrainVisionTransformer(nn.Module): + """Vision Transformer with support for patch or hybrid CNN input stage""" + + def __init__( + self, + img_size=224, + patch_size=16, + encoder_in_chans=3, + encoder_num_classes=0, + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + decoder_num_classes=1536, # decoder_num_classes=768 + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=8, + mlp_ratio=4.0, + qkv_bias=False, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.0, + norm_layer=nn.LayerNorm, + init_values=0.0, + use_learnable_pos_emb=False, + tubelet_size=2, + num_classes=0, # avoid the error from create_fn in timm + in_chans=0, # avoid the error from create_fn in timm + with_cp=False, + all_frames=16, + cos_attn=False, + ): + super().__init__() + self.encoder = PretrainVisionTransformerEncoder( + img_size=img_size, + patch_size=patch_size, + in_chans=encoder_in_chans, + num_classes=encoder_num_classes, + embed_dim=encoder_embed_dim, + depth=encoder_depth, + num_heads=encoder_num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + norm_layer=norm_layer, + init_values=init_values, + tubelet_size=tubelet_size, + use_learnable_pos_emb=use_learnable_pos_emb, + with_cp=with_cp, + all_frames=all_frames, + cos_attn=cos_attn, + ) + + self.decoder = PretrainVisionTransformerDecoder( + patch_size=patch_size, + num_patches=self.encoder.patch_embed.num_patches, + num_classes=decoder_num_classes, + embed_dim=decoder_embed_dim, + depth=decoder_depth, + num_heads=decoder_num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + norm_layer=norm_layer, + init_values=init_values, + tubelet_size=tubelet_size, + with_cp=with_cp, + cos_attn=cos_attn, + ) + + self.encoder_to_decoder = nn.Linear( + encoder_embed_dim, decoder_embed_dim, bias=False + ) + + self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) + + self.pos_embed = get_sinusoid_encoding_table( + self.encoder.patch_embed.num_patches, decoder_embed_dim + ) + + trunc_normal_(self.mask_token, std=0.02) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def get_num_layers(self): + return len(self.blocks) + + @torch.jit.ignore + def no_weight_decay(self): + return {"pos_embed", "cls_token", "mask_token"} + + def forward(self, x, mask, decode_mask=None): + decode_vis = mask if decode_mask is None else ~decode_mask + + x_vis = self.encoder(x, mask) # [B, N_vis, C_e] + x_vis = self.encoder_to_decoder(x_vis) # [B, N_vis, C_d] + B, N_vis, C = x_vis.shape + + # we don't unshuffle the correct visible token order, + # but shuffle the pos embedding accorddingly. + expand_pos_embed = ( + self.pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach() + ) + pos_emd_vis = expand_pos_embed[~mask].reshape(B, -1, C) + pos_emd_mask = expand_pos_embed[decode_vis].reshape(B, -1, C) + + # [B, N, C_d] + x_full = torch.cat([x_vis + pos_emd_vis, self.mask_token + pos_emd_mask], dim=1) + # NOTE: if N_mask==0, the shape of x is [B, N_mask, 3 * 16 * 16] + x = self.decoder(x_full, pos_emd_mask.shape[1]) + + return x + + +@register_model +def pretrain_videomae_small_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=384, + encoder_depth=12, + encoder_num_heads=6, + encoder_num_classes=0, + decoder_num_classes=1536, # 16 * 16 * 3 * 2 + decoder_embed_dim=192, + decoder_num_heads=3, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_videomae_base_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_num_classes=0, + decoder_num_classes=1536, # 16 * 16 * 3 * 2 + decoder_embed_dim=384, + decoder_num_heads=6, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_videomae_large_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1536, # 16 * 16 * 3 * 2 + decoder_embed_dim=512, + decoder_num_heads=8, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_videomae_huge_patch16_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=16, + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1536, # 16 * 16 * 3 * 2 + decoder_embed_dim=512, + decoder_num_heads=8, + mlp_ratio=4, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model + + +@register_model +def pretrain_videomae_giant_patch14_224(pretrained=False, **kwargs): + model = PretrainVisionTransformer( + img_size=224, + patch_size=14, + encoder_embed_dim=1408, + encoder_depth=40, + encoder_num_heads=16, + encoder_num_classes=0, + decoder_num_classes=1176, # 14 * 14 * 3 * 2, + decoder_embed_dim=512, + decoder_num_heads=8, + mlp_ratio=48 / 11, + qkv_bias=True, + norm_layer=partial(nn.LayerNorm, eps=1e-6), + **kwargs, + ) + model.default_cfg = _cfg() + if pretrained: + checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") + model.load_state_dict(checkpoint["model"]) + return model diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..ba814e1 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,5050 @@ +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. + +[[package]] +name = "absl-py" +version = "2.1.0" +description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py." +optional = false +python-versions = ">=3.7" +files = [ + {file = "absl-py-2.1.0.tar.gz", hash = "sha256:7820790efbb316739cde8b4e19357243fc3608a152024288513dd968d7d959ff"}, + {file = "absl_py-2.1.0-py3-none-any.whl", hash = "sha256:526a04eadab8b4ee719ce68f204172ead1027549089702d99b9059f129ff1308"}, +] + +[[package]] +name = "albumentations" +version = "1.4.1" +description = "Fast image augmentation library and easy to use wrapper around other libraries" +optional = false +python-versions = ">=3.8" +files = [ + {file = "albumentations-1.4.1-py3-none-any.whl", hash = "sha256:7933aea451a5923bd1d6d16521ce87a9e03a500db3b19ac0ed3a9b3117a41862"}, + {file = "albumentations-1.4.1.tar.gz", hash = "sha256:556c53a958bd26ce484f545058d755af55db13a80b8aef6946b87adcb38a5545"}, +] + +[package.dependencies] +numpy = ">=1.24.4" +opencv-python-headless = ">=4.9.0" +PyYAML = "*" +scikit-image = ">=0.21.0" +scikit-learn = ">=1.3.2" +scipy = ">=1.10.0" +typing-extensions = ">=4.9.0" + +[[package]] +name = "annotated-types" +version = "0.6.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +files = [ + {file = "annotated_types-0.6.0-py3-none-any.whl", hash = "sha256:0641064de18ba7a25dee8f96403ebc39113d0cb953a01429249d5c7564666a43"}, + {file = "annotated_types-0.6.0.tar.gz", hash = "sha256:563339e807e53ffd9c267e99fc6d9ea23eb8443c08f112651963e24e22f84a5d"}, +] + +[[package]] +name = "anyio" +version = "3.7.1" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.7" +files = [ + {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, + {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] +test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (<0.22)"] + +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = "*" +files = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] + +[[package]] +name = "appnope" +version = "0.1.4" +description = "Disable App Nap on macOS >= 10.9" +optional = false +python-versions = ">=3.6" +files = [ + {file = "appnope-0.1.4-py2.py3-none-any.whl", hash = "sha256:502575ee11cd7a28c0205f379b525beefebab9d161b7c964670864014ed7213c"}, + {file = "appnope-0.1.4.tar.gz", hash = "sha256:1de3860566df9caf38f01f86f65e0e13e379af54f9e4bee1e66b48f2efffd1ee"}, +] + +[[package]] +name = "asttokens" +version = "2.4.1" +description = "Annotate AST trees with source code positions" +optional = false +python-versions = "*" +files = [ + {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, + {file = "asttokens-2.4.1.tar.gz", hash = "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0"}, +] + +[package.dependencies] +six = ">=1.12.0" + +[package.extras] +astroid = ["astroid (>=1,<2)", "astroid (>=2,<4)"] +test = ["astroid (>=1,<2)", "astroid (>=2,<4)", "pytest"] + +[[package]] +name = "astunparse" +version = "1.6.3" +description = "An AST unparser for Python" +optional = false +python-versions = "*" +files = [ + {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"}, + {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"}, +] + +[package.dependencies] +six = ">=1.6.1,<2.0" +wheel = ">=0.23.0,<1.0" + +[[package]] +name = "autoflake" +version = "2.3.0" +description = "Removes unused imports and unused variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "autoflake-2.3.0-py3-none-any.whl", hash = "sha256:79a51eb8c0744759d2efe052455ab20aa6a314763510c3fd897499a402126327"}, + {file = "autoflake-2.3.0.tar.gz", hash = "sha256:8c2011fa34701b9d7dcf05b9873bc4859d4fce4e62dfea90dffefd1576f5f01d"}, +] + +[package.dependencies] +pyflakes = ">=3.0.0" + +[[package]] +name = "black" +version = "24.2.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.8" +files = [ + {file = "black-24.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29"}, + {file = "black-24.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430"}, + {file = "black-24.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f"}, + {file = "black-24.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a"}, + {file = "black-24.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd"}, + {file = "black-24.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2"}, + {file = "black-24.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92"}, + {file = "black-24.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23"}, + {file = "black-24.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b"}, + {file = "black-24.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9"}, + {file = "black-24.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693"}, + {file = "black-24.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982"}, + {file = "black-24.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4"}, + {file = "black-24.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218"}, + {file = "black-24.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0"}, + {file = "black-24.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d"}, + {file = "black-24.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8"}, + {file = "black-24.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8"}, + {file = "black-24.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540"}, + {file = "black-24.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31"}, + {file = "black-24.2.0-py3-none-any.whl", hash = "sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6"}, + {file = "black-24.2.0.tar.gz", hash = "sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "boto3" +version = "1.34.53" +description = "The AWS SDK for Python" +optional = false +python-versions = ">= 3.8" +files = [ + {file = "boto3-1.34.53-py3-none-any.whl", hash = "sha256:340c73f57fcca6f503403e2e13a0a4ad44bec218feee2e0896be612324394afd"}, + {file = "boto3-1.34.53.tar.gz", hash = "sha256:cd30261a782824ce543a628ae524480abb4ca6ab4e4a2631477e48baed43b5f2"}, +] + +[package.dependencies] +botocore = ">=1.34.53,<1.35.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.10.0,<0.11.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.34.53" +description = "Low-level, data-driven core of boto 3." +optional = false +python-versions = ">= 3.8" +files = [ + {file = "botocore-1.34.53-py3-none-any.whl", hash = "sha256:cbbcaddc35738d32df55d26ed5561cf3fa32751a6b22e7e342be87b5e3f55eec"}, + {file = "botocore-1.34.53.tar.gz", hash = "sha256:3d243781e994dfc5b20036d9fb92672bfaef4dbe388eaa79dae6440ea56c53eb"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""} + +[package.extras] +crt = ["awscrt (==0.19.19)"] + +[[package]] +name = "brotli" +version = "1.1.0" +description = "Python bindings for the Brotli compression library" +optional = false +python-versions = "*" +files = [ + {file = "Brotli-1.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e1140c64812cb9b06c922e77f1c26a75ec5e3f0fb2bf92cc8c58720dec276752"}, + {file = "Brotli-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c8fd5270e906eef71d4a8d19b7c6a43760c6abcfcc10c9101d14eb2357418de9"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ae56aca0402a0f9a3431cddda62ad71666ca9d4dc3a10a142b9dce2e3c0cda3"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43ce1b9935bfa1ede40028054d7f48b5469cd02733a365eec8a329ffd342915d"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7c4855522edb2e6ae7fdb58e07c3ba9111e7621a8956f481c68d5d979c93032e"}, + {file = "Brotli-1.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:38025d9f30cf4634f8309c6874ef871b841eb3c347e90b0851f63d1ded5212da"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e6a904cb26bfefc2f0a6f240bdf5233be78cd2488900a2f846f3c3ac8489ab80"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a37b8f0391212d29b3a91a799c8e4a2855e0576911cdfb2515487e30e322253d"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e84799f09591700a4154154cab9787452925578841a94321d5ee8fb9a9a328f0"}, + {file = "Brotli-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f66b5337fa213f1da0d9000bc8dc0cb5b896b726eefd9c6046f699b169c41b9e"}, + {file = "Brotli-1.1.0-cp310-cp310-win32.whl", hash = "sha256:be36e3d172dc816333f33520154d708a2657ea63762ec16b62ece02ab5e4daf2"}, + {file = "Brotli-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:0c6244521dda65ea562d5a69b9a26120769b7a9fb3db2fe9545935ed6735b128"}, + {file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a3daabb76a78f829cafc365531c972016e4aa8d5b4bf60660ad8ecee19df7ccc"}, + {file = "Brotli-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c8146669223164fc87a7e3de9f81e9423c67a79d6b3447994dfb9c95da16e2d6"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30924eb4c57903d5a7526b08ef4a584acc22ab1ffa085faceb521521d2de32dd"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ceb64bbc6eac5a140ca649003756940f8d6a7c444a68af170b3187623b43bebf"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a469274ad18dc0e4d316eefa616d1d0c2ff9da369af19fa6f3daa4f09671fd61"}, + {file = "Brotli-1.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:524f35912131cc2cabb00edfd8d573b07f2d9f21fa824bd3fb19725a9cf06327"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5b3cc074004d968722f51e550b41a27be656ec48f8afaeeb45ebf65b561481dd"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:19c116e796420b0cee3da1ccec3b764ed2952ccfcc298b55a10e5610ad7885f9"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:510b5b1bfbe20e1a7b3baf5fed9e9451873559a976c1a78eebaa3b86c57b4265"}, + {file = "Brotli-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a1fd8a29719ccce974d523580987b7f8229aeace506952fa9ce1d53a033873c8"}, + {file = "Brotli-1.1.0-cp311-cp311-win32.whl", hash = "sha256:39da8adedf6942d76dc3e46653e52df937a3c4d6d18fdc94a7c29d263b1f5b50"}, + {file = "Brotli-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:aac0411d20e345dc0920bdec5548e438e999ff68d77564d5e9463a7ca9d3e7b1"}, + {file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409"}, + {file = "Brotli-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7f4bf76817c14aa98cc6697ac02f3972cb8c3da93e9ef16b9c66573a68014f91"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0c5516f0aed654134a2fc936325cc2e642f8a0e096d075209672eb321cff408"}, + {file = "Brotli-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c3020404e0b5eefd7c9485ccf8393cfb75ec38ce75586e046573c9dc29967a0"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4ed11165dd45ce798d99a136808a794a748d5dc38511303239d4e2363c0695dc"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248"}, + {file = "Brotli-1.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966"}, + {file = "Brotli-1.1.0-cp312-cp312-win32.whl", hash = "sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0"}, + {file = "Brotli-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951"}, + {file = "Brotli-1.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a090ca607cbb6a34b0391776f0cb48062081f5f60ddcce5d11838e67a01928d1"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2de9d02f5bda03d27ede52e8cfe7b865b066fa49258cbab568720aa5be80a47d"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2333e30a5e00fe0fe55903c8832e08ee9c3b1382aacf4db26664a16528d51b4b"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4d4a848d1837973bf0f4b5e54e3bec977d99be36a7895c61abb659301b02c112"}, + {file = "Brotli-1.1.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fdc3ff3bfccdc6b9cc7c342c03aa2400683f0cb891d46e94b64a197910dc4064"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:5eeb539606f18a0b232d4ba45adccde4125592f3f636a6182b4a8a436548b914"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:fd5f17ff8f14003595ab414e45fce13d073e0762394f957182e69035c9f3d7c2"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:069a121ac97412d1fe506da790b3e69f52254b9df4eb665cd42460c837193354"}, + {file = "Brotli-1.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e93dfc1a1165e385cc8239fab7c036fb2cd8093728cbd85097b284d7b99249a2"}, + {file = "Brotli-1.1.0-cp36-cp36m-win32.whl", hash = "sha256:a599669fd7c47233438a56936988a2478685e74854088ef5293802123b5b2460"}, + {file = "Brotli-1.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:d143fd47fad1db3d7c27a1b1d66162e855b5d50a89666af46e1679c496e8e579"}, + {file = "Brotli-1.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:11d00ed0a83fa22d29bc6b64ef636c4552ebafcef57154b4ddd132f5638fbd1c"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f733d788519c7e3e71f0855c96618720f5d3d60c3cb829d8bbb722dddce37985"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:929811df5462e182b13920da56c6e0284af407d1de637d8e536c5cd00a7daf60"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b63b949ff929fbc2d6d3ce0e924c9b93c9785d877a21a1b678877ffbbc4423a"}, + {file = "Brotli-1.1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d192f0f30804e55db0d0e0a35d83a9fead0e9a359a9ed0285dbacea60cc10a84"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f296c40e23065d0d6650c4aefe7470d2a25fffda489bcc3eb66083f3ac9f6643"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:919e32f147ae93a09fe064d77d5ebf4e35502a8df75c29fb05788528e330fe74"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:23032ae55523cc7bccb4f6a0bf368cd25ad9bcdcc1990b64a647e7bbcce9cb5b"}, + {file = "Brotli-1.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:224e57f6eac61cc449f498cc5f0e1725ba2071a3d4f48d5d9dffba42db196438"}, + {file = "Brotli-1.1.0-cp37-cp37m-win32.whl", hash = "sha256:587ca6d3cef6e4e868102672d3bd9dc9698c309ba56d41c2b9c85bbb903cdb95"}, + {file = "Brotli-1.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2954c1c23f81c2eaf0b0717d9380bd348578a94161a65b3a2afc62c86467dd68"}, + {file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:efa8b278894b14d6da122a72fefcebc28445f2d3f880ac59d46c90f4c13be9a3"}, + {file = "Brotli-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:03d20af184290887bdea3f0f78c4f737d126c74dc2f3ccadf07e54ceca3bf208"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6172447e1b368dcbc458925e5ddaf9113477b0ed542df258d84fa28fc45ceea7"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a743e5a28af5f70f9c080380a5f908d4d21d40e8f0e0c8901604d15cfa9ba751"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0541e747cce78e24ea12d69176f6a7ddb690e62c425e01d31cc065e69ce55b48"}, + {file = "Brotli-1.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cdbc1fc1bc0bff1cef838eafe581b55bfbffaed4ed0318b724d0b71d4d377619"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:890b5a14ce214389b2cc36ce82f3093f96f4cc730c1cffdbefff77a7c71f2a97"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ab4fbee0b2d9098c74f3057b2bc055a8bd92ccf02f65944a241b4349229185a"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:141bd4d93984070e097521ed07e2575b46f817d08f9fa42b16b9b5f27b5ac088"}, + {file = "Brotli-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fce1473f3ccc4187f75b4690cfc922628aed4d3dd013d047f95a9b3919a86596"}, + {file = "Brotli-1.1.0-cp38-cp38-win32.whl", hash = "sha256:db85ecf4e609a48f4b29055f1e144231b90edc90af7481aa731ba2d059226b1b"}, + {file = "Brotli-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3d7954194c36e304e1523f55d7042c59dc53ec20dd4e9ea9d151f1b62b4415c0"}, + {file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5fb2ce4b8045c78ebbc7b8f3c15062e435d47e7393cc57c25115cfd49883747a"}, + {file = "Brotli-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7905193081db9bfa73b1219140b3d315831cbff0d8941f22da695832f0dd188f"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a77def80806c421b4b0af06f45d65a136e7ac0bdca3c09d9e2ea4e515367c7e9"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8dadd1314583ec0bf2d1379f7008ad627cd6336625d6679cf2f8e67081b83acf"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:901032ff242d479a0efa956d853d16875d42157f98951c0230f69e69f9c09bac"}, + {file = "Brotli-1.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:22fc2a8549ffe699bfba2256ab2ed0421a7b8fadff114a3d201794e45a9ff578"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ae15b066e5ad21366600ebec29a7ccbc86812ed267e4b28e860b8ca16a2bc474"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:949f3b7c29912693cee0afcf09acd6ebc04c57af949d9bf77d6101ebb61e388c"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:89f4988c7203739d48c6f806f1e87a1d96e0806d44f0fba61dba81392c9e474d"}, + {file = "Brotli-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:de6551e370ef19f8de1807d0a9aa2cdfdce2e85ce88b122fe9f6b2b076837e59"}, + {file = "Brotli-1.1.0-cp39-cp39-win32.whl", hash = "sha256:f0d8a7a6b5983c2496e364b969f0e526647a06b075d034f3297dc66f3b360c64"}, + {file = "Brotli-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cdad5b9014d83ca68c25d2e9444e28e967ef16e80f6b436918c700c117a85467"}, + {file = "Brotli-1.1.0.tar.gz", hash = "sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724"}, +] + +[[package]] +name = "brotlicffi" +version = "1.1.0.0" +description = "Python CFFI bindings to the Brotli library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "brotlicffi-1.1.0.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9b7ae6bd1a3f0df532b6d67ff674099a96d22bc0948955cb338488c31bfb8851"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19ffc919fa4fc6ace69286e0a23b3789b4219058313cf9b45625016bf7ff996b"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9feb210d932ffe7798ee62e6145d3a757eb6233aa9a4e7db78dd3690d7755814"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84763dbdef5dd5c24b75597a77e1b30c66604725707565188ba54bab4f114820"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-win32.whl", hash = "sha256:1b12b50e07c3911e1efa3a8971543e7648100713d4e0971b13631cce22c587eb"}, + {file = "brotlicffi-1.1.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:994a4f0681bb6c6c3b0925530a1926b7a189d878e6e5e38fae8efa47c5d9c613"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2e4aeb0bd2540cb91b069dbdd54d458da8c4334ceaf2d25df2f4af576d6766ca"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4b7b0033b0d37bb33009fb2fef73310e432e76f688af76c156b3594389d81391"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54a07bb2374a1eba8ebb52b6fafffa2afd3c4df85ddd38fcc0511f2bb387c2a8"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7901a7dc4b88f1c1475de59ae9be59799db1007b7d059817948d8e4f12e24e35"}, + {file = "brotlicffi-1.1.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce01c7316aebc7fce59da734286148b1d1b9455f89cf2c8a4dfce7d41db55c2d"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:246f1d1a90279bb6069de3de8d75a8856e073b8ff0b09dcca18ccc14cec85979"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc4bc5d82bc56ebd8b514fb8350cfac4627d6b0743382e46d033976a5f80fab6"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37c26ecb14386a44b118ce36e546ce307f4810bc9598a6e6cb4f7fca725ae7e6"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca72968ae4eaf6470498d5c2887073f7efe3b1e7d7ec8be11a06a79cc810e990"}, + {file = "brotlicffi-1.1.0.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:add0de5b9ad9e9aa293c3aa4e9deb2b61e99ad6c1634e01d01d98c03e6a354cc"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9b6068e0f3769992d6b622a1cd2e7835eae3cf8d9da123d7f51ca9c1e9c333e5"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8557a8559509b61e65083f8782329188a250102372576093c88930c875a69838"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a7ae37e5d79c5bdfb5b4b99f2715a6035e6c5bf538c3746abc8e26694f92f33"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:391151ec86bb1c683835980f4816272a87eaddc46bb91cbf44f62228b84d8cca"}, + {file = "brotlicffi-1.1.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2f3711be9290f0453de8eed5275d93d286abe26b08ab4a35d7452caa1fef532f"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1a807d760763e398bbf2c6394ae9da5815901aa93ee0a37bca5efe78d4ee3171"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa8ca0623b26c94fccc3a1fdd895be1743b838f3917300506d04aa3346fd2a14"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3de0cf28a53a3238b252aca9fed1593e9d36c1d116748013339f0949bfc84112"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6be5ec0e88a4925c91f3dea2bb0013b3a2accda6f77238f76a34a1ea532a1cb0"}, + {file = "brotlicffi-1.1.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d9eb71bb1085d996244439154387266fd23d6ad37161f6f52f1cd41dd95a3808"}, + {file = "brotlicffi-1.1.0.0.tar.gz", hash = "sha256:b77827a689905143f87915310b93b273ab17888fd43ef350d4832c4a71083c13"}, +] + +[package.dependencies] +cffi = ">=1.0.0" + +[[package]] +name = "cap_from_youtube" +version = "0.0.9" +description = "Get an OpenCV video capture from an YouTube video URL" +optional = false +python-versions = "*" +files = [] +develop = false + +[package.dependencies] +numpy = "*" +opencv-python = "*" +yt_dlp = "*" + +[package.source] +type = "git" +url = "https://github.com/ibaiGorordo/cap_from_youtube" +reference = "HEAD" +resolved_reference = "d34d7d9f3ce3853d3c31ac393a091e4a723a03d1" + +[[package]] +name = "certifi" +version = "2024.2.2" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.2.2-py3-none-any.whl", hash = "sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1"}, + {file = "certifi-2024.2.2.tar.gz", hash = "sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f"}, +] + +[[package]] +name = "cffi" +version = "1.16.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, + {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, + {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, + {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, + {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, + {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, + {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, + {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, + {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, + {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, + {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, + {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, + {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, + {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, + {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "cfgv" +version = "3.4.0" +description = "Validate configuration and produce human readable error messages." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "cmake" +version = "3.28.3" +description = "CMake is an open-source, cross-platform family of tools designed to build, test and package software" +optional = false +python-versions = "*" +files = [ + {file = "cmake-3.28.3-py2.py3-none-macosx_10_10_universal2.macosx_10_10_x86_64.macosx_11_0_arm64.macosx_11_0_universal2.whl", hash = "sha256:f27187ae016b089d1c1fca6a24b3af58f9d79471097eaa3b7a7a7623ad12ea89"}, + {file = "cmake-3.28.3-py2.py3-none-manylinux2010_i686.manylinux_2_12_i686.whl", hash = "sha256:f5573c453f7a6c213c82741c173d174b5c6b576eea5cc00e2a8a5a30c40244b3"}, + {file = "cmake-3.28.3-py2.py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:35b14086257dc7ce8e83c19d2d20f7953d584fa3c9d1904211d8498fe1134ecc"}, + {file = "cmake-3.28.3-py2.py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:795c4c7f0ad16cc6553085502a76aa7fcf36fd2f4c8420542d1c7f3be6f9de1e"}, + {file = "cmake-3.28.3-py2.py3-none-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:2b811a7c97b2b31a56397baeb5ca93119fa4d215846851059748427c67f14a58"}, + {file = "cmake-3.28.3-py2.py3-none-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8415ed1a9335eb30b0e435c38bcaeb8fd9ae900a9594fe500f3bcba744be1dc7"}, + {file = "cmake-3.28.3-py2.py3-none-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:2745d4362ac23f2f979e71d44759af740c3890429cb8a7e2fd449a30a901632f"}, + {file = "cmake-3.28.3-py2.py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d3bc42bf54ea3d64e5d81eb31275076817507cf4a6aa07a49ffc01985cae1f09"}, + {file = "cmake-3.28.3-py2.py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:de10be2f470c41a3628e27157168f017ade2f14065588497e00f4582bc5eec07"}, + {file = "cmake-3.28.3-py2.py3-none-musllinux_1_1_i686.whl", hash = "sha256:5e4972e455fc24509561873cb06c9d9394852d77adde1cf970b859ad14a2a66f"}, + {file = "cmake-3.28.3-py2.py3-none-musllinux_1_1_ppc64le.whl", hash = "sha256:ea338ae68e0c5626f7c21f89b765eb0e81f7b497e977503a3bcce569984dc8a7"}, + {file = "cmake-3.28.3-py2.py3-none-musllinux_1_1_s390x.whl", hash = "sha256:c6415d382933854d2b5508c4d2218cfb1a8cb90f5f78b4e97183f80089868eea"}, + {file = "cmake-3.28.3-py2.py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:cc67c5e5df8db0be57d25b81f7dc76e0ec79215f914e585a8045589a380bcd3c"}, + {file = "cmake-3.28.3-py2.py3-none-win32.whl", hash = "sha256:29d127e5ef256d389feac0884e918612b89eb3a8febff1acf83bb27bc65042ab"}, + {file = "cmake-3.28.3-py2.py3-none-win_amd64.whl", hash = "sha256:f6fc9755979d17970ca6d9688fb5cdd3702c9eaa7ac1ee97074e3d39d3400970"}, + {file = "cmake-3.28.3-py2.py3-none-win_arm64.whl", hash = "sha256:4b1b413cf7683d54ec2a0f3b17a4d7c6979eb469270439c0e7a082256c78ab96"}, + {file = "cmake-3.28.3.tar.gz", hash = "sha256:a8092815c739da7d6775c26ec30c2645f0fca9527a29e36a682faec7d39cde89"}, +] + +[package.extras] +test = ["coverage (>=4.2)", "importlib-metadata (>=2.0)", "pytest (>=3.0.3)", "pytest-cov (>=2.4.0)"] + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "coloredlogs" +version = "15.0.1" +description = "Colored terminal output for Python's logging module" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, + {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, +] + +[package.dependencies] +humanfriendly = ">=9.1" + +[package.extras] +cron = ["capturer (>=2.4)"] + +[[package]] +name = "comm" +version = "0.2.1" +description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." +optional = false +python-versions = ">=3.8" +files = [ + {file = "comm-0.2.1-py3-none-any.whl", hash = "sha256:87928485c0dfc0e7976fd89fc1e187023cf587e7c353e4a9b417555b44adf021"}, + {file = "comm-0.2.1.tar.gz", hash = "sha256:0bc91edae1344d39d3661dcbc36937181fdaddb304790458f8b044dbc064b89a"}, +] + +[package.dependencies] +traitlets = ">=4" + +[package.extras] +test = ["pytest"] + +[[package]] +name = "contourpy" +version = "1.2.0" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.9" +files = [ + {file = "contourpy-1.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0274c1cb63625972c0c007ab14dd9ba9e199c36ae1a231ce45d725cbcbfd10a8"}, + {file = "contourpy-1.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ab459a1cbbf18e8698399c595a01f6dcc5c138220ca3ea9e7e6126232d102bb4"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fdd887f17c2f4572ce548461e4f96396681212d858cae7bd52ba3310bc6f00f"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d16edfc3fc09968e09ddffada434b3bf989bf4911535e04eada58469873e28e"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c203f617abc0dde5792beb586f827021069fb6d403d7f4d5c2b543d87edceb9"}, + {file = "contourpy-1.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b69303ceb2e4d4f146bf82fda78891ef7bcd80c41bf16bfca3d0d7eb545448aa"}, + {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:884c3f9d42d7218304bc74a8a7693d172685c84bd7ab2bab1ee567b769696df9"}, + {file = "contourpy-1.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4a1b1208102be6e851f20066bf0e7a96b7d48a07c9b0cfe6d0d4545c2f6cadab"}, + {file = "contourpy-1.2.0-cp310-cp310-win32.whl", hash = "sha256:34b9071c040d6fe45d9826cbbe3727d20d83f1b6110d219b83eb0e2a01d79488"}, + {file = "contourpy-1.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:bd2f1ae63998da104f16a8b788f685e55d65760cd1929518fd94cd682bf03e41"}, + {file = "contourpy-1.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dd10c26b4eadae44783c45ad6655220426f971c61d9b239e6f7b16d5cdaaa727"}, + {file = "contourpy-1.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5c6b28956b7b232ae801406e529ad7b350d3f09a4fde958dfdf3c0520cdde0dd"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebeac59e9e1eb4b84940d076d9f9a6cec0064e241818bcb6e32124cc5c3e377a"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:139d8d2e1c1dd52d78682f505e980f592ba53c9f73bd6be102233e358b401063"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e9dc350fb4c58adc64df3e0703ab076f60aac06e67d48b3848c23647ae4310e"}, + {file = "contourpy-1.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18fc2b4ed8e4a8fe849d18dce4bd3c7ea637758c6343a1f2bae1e9bd4c9f4686"}, + {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:16a7380e943a6d52472096cb7ad5264ecee36ed60888e2a3d3814991a0107286"}, + {file = "contourpy-1.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8d8faf05be5ec8e02a4d86f616fc2a0322ff4a4ce26c0f09d9f7fb5330a35c95"}, + {file = "contourpy-1.2.0-cp311-cp311-win32.whl", hash = "sha256:67b7f17679fa62ec82b7e3e611c43a016b887bd64fb933b3ae8638583006c6d6"}, + {file = "contourpy-1.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:99ad97258985328b4f207a5e777c1b44a83bfe7cf1f87b99f9c11d4ee477c4de"}, + {file = "contourpy-1.2.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:575bcaf957a25d1194903a10bc9f316c136c19f24e0985a2b9b5608bdf5dbfe0"}, + {file = "contourpy-1.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9e6c93b5b2dbcedad20a2f18ec22cae47da0d705d454308063421a3b290d9ea4"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:464b423bc2a009088f19bdf1f232299e8b6917963e2b7e1d277da5041f33a779"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:68ce4788b7d93e47f84edd3f1f95acdcd142ae60bc0e5493bfd120683d2d4316"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d7d1f8871998cdff5d2ff6a087e5e1780139abe2838e85b0b46b7ae6cc25399"}, + {file = "contourpy-1.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e739530c662a8d6d42c37c2ed52a6f0932c2d4a3e8c1f90692ad0ce1274abe0"}, + {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:247b9d16535acaa766d03037d8e8fb20866d054d3c7fbf6fd1f993f11fc60ca0"}, + {file = "contourpy-1.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:461e3ae84cd90b30f8d533f07d87c00379644205b1d33a5ea03381edc4b69431"}, + {file = "contourpy-1.2.0-cp312-cp312-win32.whl", hash = "sha256:1c2559d6cffc94890b0529ea7eeecc20d6fadc1539273aa27faf503eb4656d8f"}, + {file = "contourpy-1.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:491b1917afdd8638a05b611a56d46587d5a632cabead889a5440f7c638bc6ed9"}, + {file = "contourpy-1.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5fd1810973a375ca0e097dee059c407913ba35723b111df75671a1976efa04bc"}, + {file = "contourpy-1.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:999c71939aad2780f003979b25ac5b8f2df651dac7b38fb8ce6c46ba5abe6ae9"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7caf9b241464c404613512d5594a6e2ff0cc9cb5615c9475cc1d9b514218ae8"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:266270c6f6608340f6c9836a0fb9b367be61dde0c9a9a18d5ece97774105ff3e"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbd50d0a0539ae2e96e537553aff6d02c10ed165ef40c65b0e27e744a0f10af8"}, + {file = "contourpy-1.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11f8d2554e52f459918f7b8e6aa20ec2a3bce35ce95c1f0ef4ba36fbda306df5"}, + {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ce96dd400486e80ac7d195b2d800b03e3e6a787e2a522bfb83755938465a819e"}, + {file = "contourpy-1.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6d3364b999c62f539cd403f8123ae426da946e142312a514162adb2addd8d808"}, + {file = "contourpy-1.2.0-cp39-cp39-win32.whl", hash = "sha256:1c88dfb9e0c77612febebb6ac69d44a8d81e3dc60f993215425b62c1161353f4"}, + {file = "contourpy-1.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:78e6ad33cf2e2e80c5dfaaa0beec3d61face0fb650557100ee36db808bfa6843"}, + {file = "contourpy-1.2.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:be16975d94c320432657ad2402f6760990cb640c161ae6da1363051805fa8108"}, + {file = "contourpy-1.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b95a225d4948b26a28c08307a60ac00fb8671b14f2047fc5476613252a129776"}, + {file = "contourpy-1.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:0d7e03c0f9a4f90dc18d4e77e9ef4ec7b7bbb437f7f675be8e530d65ae6ef956"}, + {file = "contourpy-1.2.0.tar.gz", hash = "sha256:171f311cb758de7da13fc53af221ae47a5877be5a0843a9fe150818c51ed276a"}, +] + +[package.dependencies] +numpy = ">=1.20,<2.0" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.6.1)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-xdist", "wurlitzer"] + +[[package]] +name = "cycler" +version = "0.12.1" +description = "Composable style cycles" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, + {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, +] + +[package.extras] +docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] +tests = ["pytest", "pytest-cov", "pytest-xdist"] + +[[package]] +name = "debugpy" +version = "1.8.1" +description = "An implementation of the Debug Adapter Protocol for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "debugpy-1.8.1-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:3bda0f1e943d386cc7a0e71bfa59f4137909e2ed947fb3946c506e113000f741"}, + {file = "debugpy-1.8.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dda73bf69ea479c8577a0448f8c707691152e6c4de7f0c4dec5a4bc11dee516e"}, + {file = "debugpy-1.8.1-cp310-cp310-win32.whl", hash = "sha256:3a79c6f62adef994b2dbe9fc2cc9cc3864a23575b6e387339ab739873bea53d0"}, + {file = "debugpy-1.8.1-cp310-cp310-win_amd64.whl", hash = "sha256:7eb7bd2b56ea3bedb009616d9e2f64aab8fc7000d481faec3cd26c98a964bcdd"}, + {file = "debugpy-1.8.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:016a9fcfc2c6b57f939673c874310d8581d51a0fe0858e7fac4e240c5eb743cb"}, + {file = "debugpy-1.8.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd97ed11a4c7f6d042d320ce03d83b20c3fb40da892f994bc041bbc415d7a099"}, + {file = "debugpy-1.8.1-cp311-cp311-win32.whl", hash = "sha256:0de56aba8249c28a300bdb0672a9b94785074eb82eb672db66c8144fff673146"}, + {file = "debugpy-1.8.1-cp311-cp311-win_amd64.whl", hash = "sha256:1a9fe0829c2b854757b4fd0a338d93bc17249a3bf69ecf765c61d4c522bb92a8"}, + {file = "debugpy-1.8.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3ebb70ba1a6524d19fa7bb122f44b74170c447d5746a503e36adc244a20ac539"}, + {file = "debugpy-1.8.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2e658a9630f27534e63922ebf655a6ab60c370f4d2fc5c02a5b19baf4410ace"}, + {file = "debugpy-1.8.1-cp312-cp312-win32.whl", hash = "sha256:caad2846e21188797a1f17fc09c31b84c7c3c23baf2516fed5b40b378515bbf0"}, + {file = "debugpy-1.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:edcc9f58ec0fd121a25bc950d4578df47428d72e1a0d66c07403b04eb93bcf98"}, + {file = "debugpy-1.8.1-cp38-cp38-macosx_11_0_x86_64.whl", hash = "sha256:7a3afa222f6fd3d9dfecd52729bc2e12c93e22a7491405a0ecbf9e1d32d45b39"}, + {file = "debugpy-1.8.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d915a18f0597ef685e88bb35e5d7ab968964b7befefe1aaea1eb5b2640b586c7"}, + {file = "debugpy-1.8.1-cp38-cp38-win32.whl", hash = "sha256:92116039b5500633cc8d44ecc187abe2dfa9b90f7a82bbf81d079fcdd506bae9"}, + {file = "debugpy-1.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:e38beb7992b5afd9d5244e96ad5fa9135e94993b0c551ceebf3fe1a5d9beb234"}, + {file = "debugpy-1.8.1-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:bfb20cb57486c8e4793d41996652e5a6a885b4d9175dd369045dad59eaacea42"}, + {file = "debugpy-1.8.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efd3fdd3f67a7e576dd869c184c5dd71d9aaa36ded271939da352880c012e703"}, + {file = "debugpy-1.8.1-cp39-cp39-win32.whl", hash = "sha256:58911e8521ca0c785ac7a0539f1e77e0ce2df753f786188f382229278b4cdf23"}, + {file = "debugpy-1.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:6df9aa9599eb05ca179fb0b810282255202a66835c6efb1d112d21ecb830ddd3"}, + {file = "debugpy-1.8.1-py2.py3-none-any.whl", hash = "sha256:28acbe2241222b87e255260c76741e1fbf04fdc3b6d094fcf57b6c6f75ce1242"}, + {file = "debugpy-1.8.1.zip", hash = "sha256:f696d6be15be87aef621917585f9bb94b1dc9e8aced570db1b8a6fc14e8f9b42"}, +] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "distlib" +version = "0.3.8" +description = "Distribution utilities" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, + {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, +] + +[[package]] +name = "dm-tree" +version = "0.1.8" +description = "Tree is a library for working with nested data structures." +optional = false +python-versions = "*" +files = [ + {file = "dm-tree-0.1.8.tar.gz", hash = "sha256:0fcaabbb14e7980377439e7140bd05552739ca5e515ecb3119f234acee4b9430"}, + {file = "dm_tree-0.1.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:35cc164a79336bfcfafb47e5f297898359123bbd3330c1967f0c4994f9cf9f60"}, + {file = "dm_tree-0.1.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39070ba268c0491af9fe7a58644d99e8b4f2cde6e5884ba3380bddc84ed43d5f"}, + {file = "dm_tree-0.1.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2869228d9c619074de501a3c10dc7f07c75422f8fab36ecdcb859b6f1b1ec3ef"}, + {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d20f2faa3672b52e5013f4077117bfb99c4cfc0b445d3bde1584c34032b57436"}, + {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5483dca4d7eb1a0d65fe86d3b6a53ae717face83c1f17e0887b1a4a64ae5c410"}, + {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d7c26e431fc93cc7e0cba867eb000db6a05f6f2b25af11ac4e9dada88fc5bca"}, + {file = "dm_tree-0.1.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d714371bb08839e4e5e29024fc95832d9affe129825ef38836b143028bd144"}, + {file = "dm_tree-0.1.8-cp310-cp310-win_amd64.whl", hash = "sha256:d40fa4106ca6edc66760246a08f500ec0c85ef55c762fb4a363f6ee739ba02ee"}, + {file = "dm_tree-0.1.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad16ceba90a56ec47cf45b21856d14962ac314787975ef786efb5e6e9ca75ec7"}, + {file = "dm_tree-0.1.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:803bfc53b4659f447ac694dbd04235f94a73ef7c1fd1e0df7c84ac41e0bc963b"}, + {file = "dm_tree-0.1.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:378cc8ad93c5fe3590f405a309980721f021c790ca1bdf9b15bb1d59daec57f5"}, + {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1607ce49aa42f010d1e5e616d92ce899d66835d4d8bea49679582435285515de"}, + {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:343a4a4ebaa127451ff971254a4be4084eb4bdc0b2513c32b46f6f728fd03f9e"}, + {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa42a605d099ee7d41ba2b5fb75e21423951fd26e5d50583a00471238fb3021d"}, + {file = "dm_tree-0.1.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83b7764de0d855338abefc6e3ee9fe40d301668310aa3baea3f778ff051f4393"}, + {file = "dm_tree-0.1.8-cp311-cp311-win_amd64.whl", hash = "sha256:a5d819c38c03f0bb5b3b3703c60e4b170355a0fc6b5819325bf3d4ceb3ae7e80"}, + {file = "dm_tree-0.1.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ea9e59e0451e7d29aece402d9f908f2e2a80922bcde2ebfd5dcb07750fcbfee8"}, + {file = "dm_tree-0.1.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:94d3f0826311f45ee19b75f5b48c99466e4218a0489e81c0f0167bda50cacf22"}, + {file = "dm_tree-0.1.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:435227cf3c5dc63f4de054cf3d00183790bd9ead4c3623138c74dde7f67f521b"}, + {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09964470f76a5201aff2e8f9b26842976de7889300676f927930f6285e256760"}, + {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:75c5d528bb992981c20793b6b453e91560784215dffb8a5440ba999753c14ceb"}, + {file = "dm_tree-0.1.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a94aba18a35457a1b5cd716fd7b46c5dafdc4cf7869b4bae665b91c4682a8e"}, + {file = "dm_tree-0.1.8-cp312-cp312-win_amd64.whl", hash = "sha256:96a548a406a6fb15fe58f6a30a57ff2f2aafbf25f05afab00c8f5e5977b6c715"}, + {file = "dm_tree-0.1.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8c60a7eadab64c2278861f56bca320b2720f163dca9d7558103c3b77f2416571"}, + {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af4b3d372f2477dcd89a6e717e4a575ca35ccc20cc4454a8a4b6f8838a00672d"}, + {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de287fabc464b8734be251e46e06aa9aa1001f34198da2b6ce07bd197172b9cb"}, + {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:054b461f8176f4bce7a21f7b1870f873a1ced3bdbe1282c816c550bb43c71fa6"}, + {file = "dm_tree-0.1.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f7915660f59c09068e428613c480150180df1060561fd0d1470684ae7007bd1"}, + {file = "dm_tree-0.1.8-cp37-cp37m-win_amd64.whl", hash = "sha256:b9f89a454e98806b44fe9d40ec9eee61f848388f7e79ac2371a55679bd5a3ac6"}, + {file = "dm_tree-0.1.8-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0e9620ccf06393eb6b613b5e366469304622d4ea96ae6540b28a33840e6c89cf"}, + {file = "dm_tree-0.1.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b095ba4f8ca1ba19350fd53cf1f8f3eb0bd406aa28af64a6dfc86707b32a810a"}, + {file = "dm_tree-0.1.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b9bd9b9ccb59409d33d51d84b7668010c04c2af7d4a371632874c1ca356cff3d"}, + {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d3172394079a86c3a759179c65f64c48d1a42b89495fcf38976d11cc3bb952c"}, + {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1612fcaecd79023dbc6a6ae48d51a80beb5c385d6f3f6d71688e57bc8d07de8"}, + {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c5c8c12e3fda754ef6af94161bacdaeda816d941995fac415d6855c6c386af68"}, + {file = "dm_tree-0.1.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:694c3654cfd2a81552c08ec66bb5c4a3d48fa292b9a181880fb081c36c5b9134"}, + {file = "dm_tree-0.1.8-cp38-cp38-win_amd64.whl", hash = "sha256:bb2d109f42190225112da899b9f3d46d0d5f26aef501c61e43529fe9322530b5"}, + {file = "dm_tree-0.1.8-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d16e1f2a073604cfcc09f7131ae8d534674f43c3aef4c25742eae295bc60d04f"}, + {file = "dm_tree-0.1.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:250b692fb75f45f02e2f58fbef9ab338904ef334b90557565621fa251df267cf"}, + {file = "dm_tree-0.1.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81fce77f22a302d7a5968aebdf4efafef4def7ce96528719a354e6990dcd49c7"}, + {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7ac31b9aecccb2c6e1ab29706f6ded3eba0c2c69c770322c9c685929c3d6afb"}, + {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fe962015b2fe1282892b28ebe962faed53c7f98d942da9a4625cbf27baef913"}, + {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c52cbf4f8b3dbd0beaedf44f69fa85eec5e9dede612e08035e06ada6ec9426"}, + {file = "dm_tree-0.1.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:181c35521d480d0365f39300542cb6cd7fd2b77351bb43d7acfda15aef63b317"}, + {file = "dm_tree-0.1.8-cp39-cp39-win_amd64.whl", hash = "sha256:8ed3564abed97c806db122c2d3e1a2b64c74a63debe9903aad795167cc301368"}, +] + +[[package]] +name = "dnspython" +version = "2.6.1" +description = "DNS toolkit" +optional = false +python-versions = ">=3.8" +files = [ + {file = "dnspython-2.6.1-py3-none-any.whl", hash = "sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50"}, + {file = "dnspython-2.6.1.tar.gz", hash = "sha256:e8f0f9c23a7b7cb99ded64e6c3a6f3e701d78f50c55e002b839dea7225cff7cc"}, +] + +[package.extras] +dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "sphinx (>=7.2.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] +dnssec = ["cryptography (>=41)"] +doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"] +doq = ["aioquic (>=0.9.25)"] +idna = ["idna (>=3.6)"] +trio = ["trio (>=0.23)"] +wmi = ["wmi (>=1.5.1)"] + +[[package]] +name = "docker-pycreds" +version = "0.4.0" +description = "Python bindings for the docker credentials store API" +optional = false +python-versions = "*" +files = [ + {file = "docker-pycreds-0.4.0.tar.gz", hash = "sha256:6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4"}, + {file = "docker_pycreds-0.4.0-py2.py3-none-any.whl", hash = "sha256:7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49"}, +] + +[package.dependencies] +six = ">=1.4.0" + +[[package]] +name = "ecdsa" +version = "0.18.0" +description = "ECDSA cryptographic signature library (pure python)" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "ecdsa-0.18.0-py2.py3-none-any.whl", hash = "sha256:80600258e7ed2f16b9aa1d7c295bd70194109ad5a30fdee0eaeefef1d4c559dd"}, + {file = "ecdsa-0.18.0.tar.gz", hash = "sha256:190348041559e21b22a1d65cee485282ca11a6f81d503fddb84d5017e9ed1e49"}, +] + +[package.dependencies] +six = ">=1.9.0" + +[package.extras] +gmpy = ["gmpy"] +gmpy2 = ["gmpy2"] + +[[package]] +name = "einops" +version = "0.7.0" +description = "A new flavour of deep learning operations" +optional = false +python-versions = ">=3.8" +files = [ + {file = "einops-0.7.0-py3-none-any.whl", hash = "sha256:0f3096f26b914f465f6ff3c66f5478f9a5e380bb367ffc6493a68143fbbf1fd1"}, + {file = "einops-0.7.0.tar.gz", hash = "sha256:b2b04ad6081a3b227080c9bf5e3ace7160357ff03043cd66cc5b2319eb7031d1"}, +] + +[[package]] +name = "email-validator" +version = "2.1.1" +description = "A robust email address syntax and deliverability validation library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "email_validator-2.1.1-py3-none-any.whl", hash = "sha256:97d882d174e2a65732fb43bfce81a3a834cbc1bde8bf419e30ef5ea976370a05"}, + {file = "email_validator-2.1.1.tar.gz", hash = "sha256:200a70680ba08904be6d1eef729205cc0d687634399a5924d842533efb824b84"}, +] + +[package.dependencies] +dnspython = ">=2.0.0" +idna = ">=2.0.0" + +[[package]] +name = "executing" +version = "2.0.1" +description = "Get the currently executing AST node of a frame, and other information" +optional = false +python-versions = ">=3.5" +files = [ + {file = "executing-2.0.1-py2.py3-none-any.whl", hash = "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc"}, + {file = "executing-2.0.1.tar.gz", hash = "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147"}, +] + +[package.extras] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] + +[[package]] +name = "fastapi" +version = "0.105.0" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fastapi-0.105.0-py3-none-any.whl", hash = "sha256:f19ebf6fdc82a3281d10f2cb4774bdfa90238e3b40af3525a0c09fd08ad1c480"}, + {file = "fastapi-0.105.0.tar.gz", hash = "sha256:4d12838819aa52af244580675825e750ad67c9df4614f557a769606af902cf22"}, +] + +[package.dependencies] +anyio = ">=3.7.1,<4.0.0" +email-validator = {version = ">=2.0.0", optional = true, markers = "extra == \"all\""} +httpx = {version = ">=0.23.0", optional = true, markers = "extra == \"all\""} +itsdangerous = {version = ">=1.1.0", optional = true, markers = "extra == \"all\""} +jinja2 = {version = ">=2.11.2", optional = true, markers = "extra == \"all\""} +orjson = {version = ">=3.2.1", optional = true, markers = "extra == \"all\""} +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +pydantic-extra-types = {version = ">=2.0.0", optional = true, markers = "extra == \"all\""} +pydantic-settings = {version = ">=2.0.0", optional = true, markers = "extra == \"all\""} +python-multipart = {version = ">=0.0.5", optional = true, markers = "extra == \"all\""} +pyyaml = {version = ">=5.3.1", optional = true, markers = "extra == \"all\""} +starlette = ">=0.27.0,<0.28.0" +typing-extensions = ">=4.8.0" +ujson = {version = ">=4.0.1,<4.0.2 || >4.0.2,<4.1.0 || >4.1.0,<4.2.0 || >4.2.0,<4.3.0 || >4.3.0,<5.0.0 || >5.0.0,<5.1.0 || >5.1.0", optional = true, markers = "extra == \"all\""} +uvicorn = {version = ">=0.12.0", extras = ["standard"], optional = true, markers = "extra == \"all\""} + +[package.extras] +all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "filelock" +version = "3.13.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.8" +files = [ + {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"}, + {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +typing = ["typing-extensions (>=4.8)"] + +[[package]] +name = "fire" +version = "0.5.0" +description = "A library for automatically generating command line interfaces." +optional = false +python-versions = "*" +files = [ + {file = "fire-0.5.0.tar.gz", hash = "sha256:a6b0d49e98c8963910021f92bba66f65ab440da2982b78eb1bbf95a0a34aacc6"}, +] + +[package.dependencies] +six = "*" +termcolor = "*" + +[[package]] +name = "flake8" +version = "7.0.0" +description = "the modular source code checker: pep8 pyflakes and co" +optional = false +python-versions = ">=3.8.1" +files = [ + {file = "flake8-7.0.0-py2.py3-none-any.whl", hash = "sha256:a6dfbb75e03252917f2473ea9653f7cd799c3064e54d4c8140044c5c065f53c3"}, + {file = "flake8-7.0.0.tar.gz", hash = "sha256:33f96621059e65eec474169085dc92bf26e7b2d47366b70be2f67ab80dc25132"}, +] + +[package.dependencies] +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.11.0,<2.12.0" +pyflakes = ">=3.2.0,<3.3.0" + +[[package]] +name = "flatbuffers" +version = "23.5.26" +description = "The FlatBuffers serialization format for Python" +optional = false +python-versions = "*" +files = [ + {file = "flatbuffers-23.5.26-py2.py3-none-any.whl", hash = "sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1"}, + {file = "flatbuffers-23.5.26.tar.gz", hash = "sha256:9ea1144cac05ce5d86e2859f431c6cd5e66cd9c78c558317c7955fb8d4c78d89"}, +] + +[[package]] +name = "fonttools" +version = "4.49.0" +description = "Tools to manipulate font files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fonttools-4.49.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d970ecca0aac90d399e458f0b7a8a597e08f95de021f17785fb68e2dc0b99717"}, + {file = "fonttools-4.49.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac9a745b7609f489faa65e1dc842168c18530874a5f5b742ac3dd79e26bca8bc"}, + {file = "fonttools-4.49.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ba0e00620ca28d4ca11fc700806fd69144b463aa3275e1b36e56c7c09915559"}, + {file = "fonttools-4.49.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdee3ab220283057e7840d5fb768ad4c2ebe65bdba6f75d5d7bf47f4e0ed7d29"}, + {file = "fonttools-4.49.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ce7033cb61f2bb65d8849658d3786188afd80f53dad8366a7232654804529532"}, + {file = "fonttools-4.49.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:07bc5ea02bb7bc3aa40a1eb0481ce20e8d9b9642a9536cde0218290dd6085828"}, + {file = "fonttools-4.49.0-cp310-cp310-win32.whl", hash = "sha256:86eef6aab7fd7c6c8545f3ebd00fd1d6729ca1f63b0cb4d621bccb7d1d1c852b"}, + {file = "fonttools-4.49.0-cp310-cp310-win_amd64.whl", hash = "sha256:1fac1b7eebfce75ea663e860e7c5b4a8831b858c17acd68263bc156125201abf"}, + {file = "fonttools-4.49.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:edc0cce355984bb3c1d1e89d6a661934d39586bb32191ebff98c600f8957c63e"}, + {file = "fonttools-4.49.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:83a0d9336de2cba86d886507dd6e0153df333ac787377325a39a2797ec529814"}, + {file = "fonttools-4.49.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36c8865bdb5cfeec88f5028e7e592370a0657b676c6f1d84a2108e0564f90e22"}, + {file = "fonttools-4.49.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33037d9e56e2562c710c8954d0f20d25b8386b397250d65581e544edc9d6b942"}, + {file = "fonttools-4.49.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8fb022d799b96df3eaa27263e9eea306bd3d437cc9aa981820850281a02b6c9a"}, + {file = "fonttools-4.49.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33c584c0ef7dc54f5dd4f84082eabd8d09d1871a3d8ca2986b0c0c98165f8e86"}, + {file = "fonttools-4.49.0-cp311-cp311-win32.whl", hash = "sha256:cbe61b158deb09cffdd8540dc4a948d6e8f4d5b4f3bf5cd7db09bd6a61fee64e"}, + {file = "fonttools-4.49.0-cp311-cp311-win_amd64.whl", hash = "sha256:fc11e5114f3f978d0cea7e9853627935b30d451742eeb4239a81a677bdee6bf6"}, + {file = "fonttools-4.49.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d647a0e697e5daa98c87993726da8281c7233d9d4ffe410812a4896c7c57c075"}, + {file = "fonttools-4.49.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f3bbe672df03563d1f3a691ae531f2e31f84061724c319652039e5a70927167e"}, + {file = "fonttools-4.49.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bebd91041dda0d511b0d303180ed36e31f4f54b106b1259b69fade68413aa7ff"}, + {file = "fonttools-4.49.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4145f91531fd43c50f9eb893faa08399816bb0b13c425667c48475c9f3a2b9b5"}, + {file = "fonttools-4.49.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ea329dafb9670ffbdf4dbc3b0e5c264104abcd8441d56de77f06967f032943cb"}, + {file = "fonttools-4.49.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:c076a9e548521ecc13d944b1d261ff3d7825048c338722a4bd126d22316087b7"}, + {file = "fonttools-4.49.0-cp312-cp312-win32.whl", hash = "sha256:b607ea1e96768d13be26d2b400d10d3ebd1456343eb5eaddd2f47d1c4bd00880"}, + {file = "fonttools-4.49.0-cp312-cp312-win_amd64.whl", hash = "sha256:a974c49a981e187381b9cc2c07c6b902d0079b88ff01aed34695ec5360767034"}, + {file = "fonttools-4.49.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b85ec0bdd7bdaa5c1946398cbb541e90a6dfc51df76dfa88e0aaa41b335940cb"}, + {file = "fonttools-4.49.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:af20acbe198a8a790618ee42db192eb128afcdcc4e96d99993aca0b60d1faeb4"}, + {file = "fonttools-4.49.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d418b1fee41a1d14931f7ab4b92dc0bc323b490e41d7a333eec82c9f1780c75"}, + {file = "fonttools-4.49.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b44a52b8e6244b6548851b03b2b377a9702b88ddc21dcaf56a15a0393d425cb9"}, + {file = "fonttools-4.49.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7c7125068e04a70739dad11857a4d47626f2b0bd54de39e8622e89701836eabd"}, + {file = "fonttools-4.49.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29e89d0e1a7f18bc30f197cfadcbef5a13d99806447c7e245f5667579a808036"}, + {file = "fonttools-4.49.0-cp38-cp38-win32.whl", hash = "sha256:9d95fa0d22bf4f12d2fb7b07a46070cdfc19ef5a7b1c98bc172bfab5bf0d6844"}, + {file = "fonttools-4.49.0-cp38-cp38-win_amd64.whl", hash = "sha256:768947008b4dc552d02772e5ebd49e71430a466e2373008ce905f953afea755a"}, + {file = "fonttools-4.49.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:08877e355d3dde1c11973bb58d4acad1981e6d1140711230a4bfb40b2b937ccc"}, + {file = "fonttools-4.49.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fdb54b076f25d6b0f0298dc706acee5052de20c83530fa165b60d1f2e9cbe3cb"}, + {file = "fonttools-4.49.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0af65c720520710cc01c293f9c70bd69684365c6015cc3671db2b7d807fe51f2"}, + {file = "fonttools-4.49.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f255ce8ed7556658f6d23f6afd22a6d9bbc3edb9b96c96682124dc487e1bf42"}, + {file = "fonttools-4.49.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d00af0884c0e65f60dfaf9340e26658836b935052fdd0439952ae42e44fdd2be"}, + {file = "fonttools-4.49.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:263832fae27481d48dfafcc43174644b6706639661e242902ceb30553557e16c"}, + {file = "fonttools-4.49.0-cp39-cp39-win32.whl", hash = "sha256:0404faea044577a01bb82d47a8fa4bc7a54067fa7e324785dd65d200d6dd1133"}, + {file = "fonttools-4.49.0-cp39-cp39-win_amd64.whl", hash = "sha256:b050d362df50fc6e38ae3954d8c29bf2da52be384649ee8245fdb5186b620836"}, + {file = "fonttools-4.49.0-py3-none-any.whl", hash = "sha256:af281525e5dd7fa0b39fb1667b8d5ca0e2a9079967e14c4bfe90fd1cd13e0f18"}, + {file = "fonttools-4.49.0.tar.gz", hash = "sha256:ebf46e7f01b7af7861310417d7c49591a85d99146fc23a5ba82fdb28af156321"}, +] + +[package.extras] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "pycairo", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.1.0)", "xattr", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres", "pycairo", "scipy"] +lxml = ["lxml (>=4.0)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr"] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=15.1.0)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] + +[[package]] +name = "gast" +version = "0.5.4" +description = "Python AST that abstracts the underlying Python version" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "gast-0.5.4-py3-none-any.whl", hash = "sha256:6fc4fa5fa10b72fb8aab4ae58bcb023058386e67b6fa2e3e34cec5c769360316"}, + {file = "gast-0.5.4.tar.gz", hash = "sha256:9c270fe5f4b130969b54174de7db4e764b09b4f7f67ccfc32480e29f78348d97"}, +] + +[[package]] +name = "gitdb" +version = "4.0.11" +description = "Git Object Database" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"}, + {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.42" +description = "GitPython is a Python library used to interact with Git repositories" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.42-py3-none-any.whl", hash = "sha256:1bf9cd7c9e7255f77778ea54359e54ac22a72a5b51288c457c881057b7bb9ecd"}, + {file = "GitPython-3.1.42.tar.gz", hash = "sha256:2d99869e0fef71a73cbd242528105af1d6c1b108c60dfabd994bf292f76c3ceb"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[package.extras] +test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest (>=7.3.1)", "pytest-cov", "pytest-instafail", "pytest-mock", "pytest-sugar"] + +[[package]] +name = "google-pasta" +version = "0.2.0" +description = "pasta is an AST-based Python refactoring library" +optional = false +python-versions = "*" +files = [ + {file = "google-pasta-0.2.0.tar.gz", hash = "sha256:c9f2c8dfc8f96d0d5808299920721be30c9eec37f2389f28904f454565c8a16e"}, + {file = "google_pasta-0.2.0-py2-none-any.whl", hash = "sha256:4612951da876b1a10fe3960d7226f0c7682cf901e16ac06e473b267a5afa8954"}, + {file = "google_pasta-0.2.0-py3-none-any.whl", hash = "sha256:b32482794a366b5366a32c92a9a9201b107821889935a02b3e51f6b432ea84ed"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "greenlet" +version = "3.0.3" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.7" +files = [ + {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, + {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, + {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, + {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, + {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, + {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, + {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, + {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, + {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, + {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, + {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, + {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, + {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, + {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, + {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, + {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, + {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, + {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, + {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, + {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, + {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, + {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, + {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, + {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, + {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, + {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, + {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, + {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil"] + +[[package]] +name = "grpcio" +version = "1.62.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpcio-1.62.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:136ffd79791b1eddda8d827b607a6285474ff8a1a5735c4947b58c481e5e4271"}, + {file = "grpcio-1.62.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:d6a56ba703be6b6267bf19423d888600c3f574ac7c2cc5e6220af90662a4d6b0"}, + {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:4cd356211579043fce9f52acc861e519316fff93980a212c8109cca8f47366b6"}, + {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e803e9b58d8f9b4ff0ea991611a8d51b31c68d2e24572cd1fe85e99e8cc1b4f8"}, + {file = "grpcio-1.62.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4c04fe33039b35b97c02d2901a164bbbb2f21fb9c4e2a45a959f0b044c3512c"}, + {file = "grpcio-1.62.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:95370c71b8c9062f9ea033a0867c4c73d6f0ff35113ebd2618171ec1f1e903e0"}, + {file = "grpcio-1.62.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c912688acc05e4ff012c8891803659d6a8a8b5106f0f66e0aed3fb7e77898fa6"}, + {file = "grpcio-1.62.0-cp310-cp310-win32.whl", hash = "sha256:821a44bd63d0f04e33cf4ddf33c14cae176346486b0df08b41a6132b976de5fc"}, + {file = "grpcio-1.62.0-cp310-cp310-win_amd64.whl", hash = "sha256:81531632f93fece32b2762247c4c169021177e58e725494f9a746ca62c83acaa"}, + {file = "grpcio-1.62.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:3fa15850a6aba230eed06b236287c50d65a98f05054a0f01ccedf8e1cc89d57f"}, + {file = "grpcio-1.62.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:36df33080cd7897623feff57831eb83c98b84640b016ce443305977fac7566fb"}, + {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:7a195531828b46ea9c4623c47e1dc45650fc7206f8a71825898dd4c9004b0928"}, + {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab140a3542bbcea37162bdfc12ce0d47a3cda3f2d91b752a124cc9fe6776a9e2"}, + {file = "grpcio-1.62.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f9d6c3223914abb51ac564dc9c3782d23ca445d2864321b9059d62d47144021"}, + {file = "grpcio-1.62.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:fbe0c20ce9a1cff75cfb828b21f08d0a1ca527b67f2443174af6626798a754a4"}, + {file = "grpcio-1.62.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:38f69de9c28c1e7a8fd24e4af4264726637b72f27c2099eaea6e513e7142b47e"}, + {file = "grpcio-1.62.0-cp311-cp311-win32.whl", hash = "sha256:ce1aafdf8d3f58cb67664f42a617af0e34555fe955450d42c19e4a6ad41c84bd"}, + {file = "grpcio-1.62.0-cp311-cp311-win_amd64.whl", hash = "sha256:eef1d16ac26c5325e7d39f5452ea98d6988c700c427c52cbc7ce3201e6d93334"}, + {file = "grpcio-1.62.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8aab8f90b2a41208c0a071ec39a6e5dbba16fd827455aaa070fec241624ccef8"}, + {file = "grpcio-1.62.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:62aa1659d8b6aad7329ede5d5b077e3d71bf488d85795db517118c390358d5f6"}, + {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:0d7ae7fc7dbbf2d78d6323641ded767d9ec6d121aaf931ec4a5c50797b886532"}, + {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f359d635ee9428f0294bea062bb60c478a8ddc44b0b6f8e1f42997e5dc12e2ee"}, + {file = "grpcio-1.62.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d48e5b1f8f4204889f1acf30bb57c30378e17c8d20df5acbe8029e985f735c"}, + {file = "grpcio-1.62.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:662d3df5314ecde3184cf87ddd2c3a66095b3acbb2d57a8cada571747af03873"}, + {file = "grpcio-1.62.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:92cdb616be44c8ac23a57cce0243af0137a10aa82234f23cd46e69e115071388"}, + {file = "grpcio-1.62.0-cp312-cp312-win32.whl", hash = "sha256:0b9179478b09ee22f4a36b40ca87ad43376acdccc816ce7c2193a9061bf35701"}, + {file = "grpcio-1.62.0-cp312-cp312-win_amd64.whl", hash = "sha256:614c3ed234208e76991992342bab725f379cc81c7dd5035ee1de2f7e3f7a9842"}, + {file = "grpcio-1.62.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:7e1f51e2a460b7394670fdb615e26d31d3260015154ea4f1501a45047abe06c9"}, + {file = "grpcio-1.62.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:bcff647e7fe25495e7719f779cc219bbb90b9e79fbd1ce5bda6aae2567f469f2"}, + {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:56ca7ba0b51ed0de1646f1735154143dcbdf9ec2dbe8cc6645def299bb527ca1"}, + {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e84bfb2a734e4a234b116be208d6f0214e68dcf7804306f97962f93c22a1839"}, + {file = "grpcio-1.62.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c1488b31a521fbba50ae86423f5306668d6f3a46d124f7819c603979fc538c4"}, + {file = "grpcio-1.62.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:98d8f4eb91f1ce0735bf0b67c3b2a4fea68b52b2fd13dc4318583181f9219b4b"}, + {file = "grpcio-1.62.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b3d3d755cfa331d6090e13aac276d4a3fb828bf935449dc16c3d554bf366136b"}, + {file = "grpcio-1.62.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a33f2bfd8a58a02aab93f94f6c61279be0f48f99fcca20ebaee67576cd57307b"}, + {file = "grpcio-1.62.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:5e709f7c8028ce0443bddc290fb9c967c1e0e9159ef7a030e8c21cac1feabd35"}, + {file = "grpcio-1.62.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:2f3d9a4d0abb57e5f49ed5039d3ed375826c2635751ab89dcc25932ff683bbb6"}, + {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:62ccb92f594d3d9fcd00064b149a0187c246b11e46ff1b7935191f169227f04c"}, + {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:921148f57c2e4b076af59a815467d399b7447f6e0ee10ef6d2601eb1e9c7f402"}, + {file = "grpcio-1.62.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f897b16190b46bc4d4aaf0a32a4b819d559a37a756d7c6b571e9562c360eed72"}, + {file = "grpcio-1.62.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1bc8449084fe395575ed24809752e1dc4592bb70900a03ca42bf236ed5bf008f"}, + {file = "grpcio-1.62.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81d444e5e182be4c7856cd33a610154fe9ea1726bd071d07e7ba13fafd202e38"}, + {file = "grpcio-1.62.0-cp38-cp38-win32.whl", hash = "sha256:88f41f33da3840b4a9bbec68079096d4caf629e2c6ed3a72112159d570d98ebe"}, + {file = "grpcio-1.62.0-cp38-cp38-win_amd64.whl", hash = "sha256:fc2836cb829895ee190813446dce63df67e6ed7b9bf76060262c55fcd097d270"}, + {file = "grpcio-1.62.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:fcc98cff4084467839d0a20d16abc2a76005f3d1b38062464d088c07f500d170"}, + {file = "grpcio-1.62.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:0d3dee701e48ee76b7d6fbbba18ba8bc142e5b231ef7d3d97065204702224e0e"}, + {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:b7a6be562dd18e5d5bec146ae9537f20ae1253beb971c0164f1e8a2f5a27e829"}, + {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29cb592c4ce64a023712875368bcae13938c7f03e99f080407e20ffe0a9aa33b"}, + {file = "grpcio-1.62.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1eda79574aec8ec4d00768dcb07daba60ed08ef32583b62b90bbf274b3c279f7"}, + {file = "grpcio-1.62.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7eea57444a354ee217fda23f4b479a4cdfea35fb918ca0d8a0e73c271e52c09c"}, + {file = "grpcio-1.62.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0e97f37a3b7c89f9125b92d22e9c8323f4e76e7993ba7049b9f4ccbe8bae958a"}, + {file = "grpcio-1.62.0-cp39-cp39-win32.whl", hash = "sha256:39cd45bd82a2e510e591ca2ddbe22352e8413378852ae814549c162cf3992a93"}, + {file = "grpcio-1.62.0-cp39-cp39-win_amd64.whl", hash = "sha256:b71c65427bf0ec6a8b48c68c17356cb9fbfc96b1130d20a07cb462f4e4dcdcd5"}, + {file = "grpcio-1.62.0.tar.gz", hash = "sha256:748496af9238ac78dcd98cce65421f1adce28c3979393e3609683fcd7f3880d7"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.62.0)"] + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "h5py" +version = "3.10.0" +description = "Read and write HDF5 files from Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "h5py-3.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b963fb772964fc1d1563c57e4e2e874022ce11f75ddc6df1a626f42bd49ab99f"}, + {file = "h5py-3.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:012ab448590e3c4f5a8dd0f3533255bc57f80629bf7c5054cf4c87b30085063c"}, + {file = "h5py-3.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:781a24263c1270a62cd67be59f293e62b76acfcc207afa6384961762bb88ea03"}, + {file = "h5py-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42e6c30698b520f0295d70157c4e202a9e402406f50dc08f5a7bc416b24e52d"}, + {file = "h5py-3.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:93dd840bd675787fc0b016f7a05fc6efe37312a08849d9dd4053fd0377b1357f"}, + {file = "h5py-3.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2381e98af081b6df7f6db300cd88f88e740649d77736e4b53db522d8874bf2dc"}, + {file = "h5py-3.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:667fe23ab33d5a8a6b77970b229e14ae3bb84e4ea3382cc08567a02e1499eedd"}, + {file = "h5py-3.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90286b79abd085e4e65e07c1bd7ee65a0f15818ea107f44b175d2dfe1a4674b7"}, + {file = "h5py-3.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c013d2e79c00f28ffd0cc24e68665ea03ae9069e167087b2adb5727d2736a52"}, + {file = "h5py-3.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:92273ce69ae4983dadb898fd4d3bea5eb90820df953b401282ee69ad648df684"}, + {file = "h5py-3.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c97d03f87f215e7759a354460fb4b0d0f27001450b18b23e556e7856a0b21c3"}, + {file = "h5py-3.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86df4c2de68257b8539a18646ceccdcf2c1ce6b1768ada16c8dcfb489eafae20"}, + {file = "h5py-3.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba9ab36be991119a3ff32d0c7cbe5faf9b8d2375b5278b2aea64effbeba66039"}, + {file = "h5py-3.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:2c8e4fda19eb769e9a678592e67eaec3a2f069f7570c82d2da909c077aa94339"}, + {file = "h5py-3.10.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:492305a074327e8d2513011fa9fffeb54ecb28a04ca4c4227d7e1e9616d35641"}, + {file = "h5py-3.10.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9450464b458cca2c86252b624279115dcaa7260a40d3cb1594bf2b410a2bd1a3"}, + {file = "h5py-3.10.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd6f6d1384a9f491732cee233b99cd4bfd6e838a8815cc86722f9d2ee64032af"}, + {file = "h5py-3.10.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3074ec45d3dc6e178c6f96834cf8108bf4a60ccb5ab044e16909580352010a97"}, + {file = "h5py-3.10.0-cp38-cp38-win_amd64.whl", hash = "sha256:212bb997a91e6a895ce5e2f365ba764debeaef5d2dca5c6fb7098d66607adf99"}, + {file = "h5py-3.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5dfc65ac21fa2f630323c92453cadbe8d4f504726ec42f6a56cf80c2f90d6c52"}, + {file = "h5py-3.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d4682b94fd36ab217352be438abd44c8f357c5449b8995e63886b431d260f3d3"}, + {file = "h5py-3.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aece0e2e1ed2aab076c41802e50a0c3e5ef8816d60ece39107d68717d4559824"}, + {file = "h5py-3.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43a61b2c2ad65b1fabc28802d133eed34debcc2c8b420cb213d3d4ef4d3e2229"}, + {file = "h5py-3.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:ae2f0201c950059676455daf92700eeb57dcf5caaf71b9e1328e6e6593601770"}, + {file = "h5py-3.10.0.tar.gz", hash = "sha256:d93adc48ceeb33347eb24a634fb787efc7ae4644e6ea4ba733d099605045c049"}, +] + +[package.dependencies] +numpy = ">=1.17.3" + +[[package]] +name = "httpcore" +version = "1.0.4" +description = "A minimal low-level HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpcore-1.0.4-py3-none-any.whl", hash = "sha256:ac418c1db41bade2ad53ae2f3834a3a0f5ae76b56cf5aa497d2d033384fc7d73"}, + {file = "httpcore-1.0.4.tar.gz", hash = "sha256:cb2839ccfcba0d2d3c1131d3c3e26dfc327326fbe7a5dc0dbfe9f6c9151bb022"}, +] + +[package.dependencies] +certifi = "*" +h11 = ">=0.13,<0.15" + +[package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<0.25.0)"] + +[[package]] +name = "httptools" +version = "0.6.1" +description = "A collection of framework independent HTTP protocol utils." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d2f6c3c4cb1948d912538217838f6e9960bc4a521d7f9b323b3da579cd14532f"}, + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:00d5d4b68a717765b1fabfd9ca755bd12bf44105eeb806c03d1962acd9b8e563"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:639dc4f381a870c9ec860ce5c45921db50205a37cc3334e756269736ff0aac58"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e57997ac7fb7ee43140cc03664de5f268813a481dff6245e0075925adc6aa185"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ac5a0ae3d9f4fe004318d64b8a854edd85ab76cffbf7ef5e32920faef62f142"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3f30d3ce413088a98b9db71c60a6ada2001a08945cb42dd65a9a9fe228627658"}, + {file = "httptools-0.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:1ed99a373e327f0107cb513b61820102ee4f3675656a37a50083eda05dc9541b"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7a7ea483c1a4485c71cb5f38be9db078f8b0e8b4c4dc0210f531cdd2ddac1ef1"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:85ed077c995e942b6f1b07583e4eb0a8d324d418954fc6af913d36db7c05a5a0"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0bb634338334385351a1600a73e558ce619af390c2b38386206ac6a27fecfc"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9ceb2c957320def533671fc9c715a80c47025139c8d1f3797477decbc6edd2"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4f0f8271c0a4db459f9dc807acd0eadd4839934a4b9b892f6f160e94da309837"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6a4f5ccead6d18ec072ac0b84420e95d27c1cdf5c9f1bc8fbd8daf86bd94f43d"}, + {file = "httptools-0.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:5cceac09f164bcba55c0500a18fe3c47df29b62353198e4f37bbcc5d591172c3"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:75c8022dca7935cba14741a42744eee13ba05db00b27a4b940f0d646bd4d56d0"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:48ed8129cd9a0d62cf4d1575fcf90fb37e3ff7d5654d3a5814eb3d55f36478c2"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f58e335a1402fb5a650e271e8c2d03cfa7cea46ae124649346d17bd30d59c90"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93ad80d7176aa5788902f207a4e79885f0576134695dfb0fefc15b7a4648d503"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9bb68d3a085c2174c2477eb3ffe84ae9fb4fde8792edb7bcd09a1d8467e30a84"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b512aa728bc02354e5ac086ce76c3ce635b62f5fbc32ab7082b5e582d27867bb"}, + {file = "httptools-0.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:97662ce7fb196c785344d00d638fc9ad69e18ee4bfb4000b35a52efe5adcc949"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8e216a038d2d52ea13fdd9b9c9c7459fb80d78302b257828285eca1c773b99b3"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3e802e0b2378ade99cd666b5bffb8b2a7cc8f3d28988685dc300469ea8dd86cb"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bd3e488b447046e386a30f07af05f9b38d3d368d1f7b4d8f7e10af85393db97"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe467eb086d80217b7584e61313ebadc8d187a4d95bb62031b7bab4b205c3ba3"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3c3b214ce057c54675b00108ac42bacf2ab8f85c58e3f324a4e963bbc46424f4"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8ae5b97f690badd2ca27cbf668494ee1b6d34cf1c464271ef7bfa9ca6b83ffaf"}, + {file = "httptools-0.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:405784577ba6540fa7d6ff49e37daf104e04f4b4ff2d1ac0469eaa6a20fde084"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:95fb92dd3649f9cb139e9c56604cc2d7c7bf0fc2e7c8d7fbd58f96e35eddd2a3"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dcbab042cc3ef272adc11220517278519adf8f53fd3056d0e68f0a6f891ba94e"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cf2372e98406efb42e93bfe10f2948e467edfd792b015f1b4ecd897903d3e8d"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:678fcbae74477a17d103b7cae78b74800d795d702083867ce160fc202104d0da"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e0b281cf5a125c35f7f6722b65d8542d2e57331be573e9e88bc8b0115c4a7a81"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:95658c342529bba4e1d3d2b1a874db16c7cca435e8827422154c9da76ac4e13a"}, + {file = "httptools-0.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ebaec1bf683e4bf5e9fbb49b8cc36da482033596a415b3e4ebab5a4c0d7ec5e"}, + {file = "httptools-0.6.1.tar.gz", hash = "sha256:c6e26c30455600b95d94b1b836085138e82f177351454ee841c148f93a9bad5a"}, +] + +[package.extras] +test = ["Cython (>=0.29.24,<0.30.0)"] + +[[package]] +name = "httpx" +version = "0.27.0" +description = "The next generation HTTP client." +optional = false +python-versions = ">=3.8" +files = [ + {file = "httpx-0.27.0-py3-none-any.whl", hash = "sha256:71d5465162c13681bff01ad59b2cc68dd838ea1f10e51574bac27103f00c91a5"}, + {file = "httpx-0.27.0.tar.gz", hash = "sha256:a0cb88a46f32dc874e04ee956e4c2764aba2aa228f650b06788ba6bda2962ab5"}, +] + +[package.dependencies] +anyio = "*" +certifi = "*" +httpcore = "==1.*" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] + +[[package]] +name = "humanfriendly" +version = "10.0" +description = "Human friendly output for text interfaces using Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, + {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, +] + +[package.dependencies] +pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""} + +[[package]] +name = "identify" +version = "2.5.35" +description = "File identification library for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "identify-2.5.35-py2.py3-none-any.whl", hash = "sha256:c4de0081837b211594f8e877a6b4fad7ca32bbfc1a9307fdd61c28bfe923f13e"}, + {file = "identify-2.5.35.tar.gz", hash = "sha256:10a7ca245cfcd756a554a7288159f72ff105ad233c7c4b9c6f0f4d108f5f6791"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "idna" +version = "3.6" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"}, + {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"}, +] + +[[package]] +name = "imageio" +version = "2.34.0" +description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats." +optional = false +python-versions = ">=3.8" +files = [ + {file = "imageio-2.34.0-py3-none-any.whl", hash = "sha256:08082bf47ccb54843d9c73fe9fc8f3a88c72452ab676b58aca74f36167e8ccba"}, + {file = "imageio-2.34.0.tar.gz", hash = "sha256:ae9732e10acf807a22c389aef193f42215718e16bd06eed0c5bb57e1034a4d53"}, +] + +[package.dependencies] +numpy = "*" +pillow = ">=8.3.2" + +[package.extras] +all-plugins = ["astropy", "av", "imageio-ffmpeg", "pillow-heif", "psutil", "tifffile"] +all-plugins-pypy = ["av", "imageio-ffmpeg", "pillow-heif", "psutil", "tifffile"] +build = ["wheel"] +dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"] +docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"] +ffmpeg = ["imageio-ffmpeg", "psutil"] +fits = ["astropy"] +full = ["astropy", "av", "black", "flake8", "fsspec[github]", "gdal", "imageio-ffmpeg", "itk", "numpydoc", "pillow-heif", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "sphinx (<6)", "tifffile", "wheel"] +gdal = ["gdal"] +itk = ["itk"] +linting = ["black", "flake8"] +pillow-heif = ["pillow-heif"] +pyav = ["av"] +test = ["fsspec[github]", "pytest", "pytest-cov"] +tifffile = ["tifffile"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "ipykernel" +version = "6.29.3" +description = "IPython Kernel for Jupyter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ipykernel-6.29.3-py3-none-any.whl", hash = "sha256:5aa086a4175b0229d4eca211e181fb473ea78ffd9869af36ba7694c947302a21"}, + {file = "ipykernel-6.29.3.tar.gz", hash = "sha256:e14c250d1f9ea3989490225cc1a542781b095a18a19447fcf2b5eaf7d0ac5bd2"}, +] + +[package.dependencies] +appnope = {version = "*", markers = "platform_system == \"Darwin\""} +comm = ">=0.1.1" +debugpy = ">=1.6.5" +ipython = ">=7.23.1" +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +matplotlib-inline = ">=0.1" +nest-asyncio = "*" +packaging = "*" +psutil = "*" +pyzmq = ">=24" +tornado = ">=6.1" +traitlets = ">=5.4.0" + +[package.extras] +cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] +pyqt5 = ["pyqt5"] +pyside6 = ["pyside6"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.23.5)", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "ipython" +version = "8.22.2" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.10" +files = [ + {file = "ipython-8.22.2-py3-none-any.whl", hash = "sha256:3c86f284c8f3d8f2b6c662f885c4889a91df7cd52056fd02b7d8d6195d7f56e9"}, + {file = "ipython-8.22.2.tar.gz", hash = "sha256:2dcaad9049f9056f1fef63514f176c7d41f930daa78d05b82a176202818f2c14"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\""} +prompt-toolkit = ">=3.0.41,<3.1.0" +pygments = ">=2.4.0" +stack-data = "*" +traitlets = ">=5.13.0" + +[package.extras] +all = ["ipython[black,doc,kernel,nbconvert,nbformat,notebook,parallel,qtconsole,terminal]", "ipython[test,test-extra]"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "ipykernel", "ipython[test]", "matplotlib", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "sphinxcontrib-jquery", "stack-data", "typing-extensions"] +kernel = ["ipykernel"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["pickleshare", "pytest (<8)", "pytest-asyncio (<0.22)", "testpath"] +test-extra = ["curio", "ipython[test]", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.23)", "pandas", "trio"] + +[[package]] +name = "isort" +version = "5.13.2" +description = "A Python utility / library to sort Python imports." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, + {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, +] + +[package.extras] +colors = ["colorama (>=0.4.6)"] + +[[package]] +name = "itsdangerous" +version = "2.1.2" +description = "Safely pass data to untrusted environments and back." +optional = false +python-versions = ">=3.7" +files = [ + {file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"}, + {file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"}, +] + +[[package]] +name = "jedi" +version = "0.19.1" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=3.6" +files = [ + {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, + {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, +] + +[package.dependencies] +parso = ">=0.8.3,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] + +[[package]] +name = "jinja2" +version = "3.1.3" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.3-py3-none-any.whl", hash = "sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa"}, + {file = "Jinja2-3.1.3.tar.gz", hash = "sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "joblib" +version = "1.3.2" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "joblib-1.3.2-py3-none-any.whl", hash = "sha256:ef4331c65f239985f3f2220ecc87db222f08fd22097a3dd5698f693875f8cbb9"}, + {file = "joblib-1.3.2.tar.gz", hash = "sha256:92f865e621e17784e7955080b6d042489e3b8e294949cc44c6eac304f59772b1"}, +] + +[[package]] +name = "jupyter-client" +version = "8.6.0" +description = "Jupyter protocol implementation and client libraries" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_client-8.6.0-py3-none-any.whl", hash = "sha256:909c474dbe62582ae62b758bca86d6518c85234bdee2d908c778db6d72f39d99"}, + {file = "jupyter_client-8.6.0.tar.gz", hash = "sha256:0642244bb83b4764ae60d07e010e15f0e2d275ec4e918a8f7b80fbbef3ca60c7"}, +] + +[package.dependencies] +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +python-dateutil = ">=2.8.2" +pyzmq = ">=23.0" +tornado = ">=6.2" +traitlets = ">=5.3" + +[package.extras] +docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] + +[[package]] +name = "jupyter-core" +version = "5.7.1" +description = "Jupyter core package. A base package on which Jupyter projects rely." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_core-5.7.1-py3-none-any.whl", hash = "sha256:c65c82126453a723a2804aa52409930434598fd9d35091d63dfb919d2b765bb7"}, + {file = "jupyter_core-5.7.1.tar.gz", hash = "sha256:de61a9d7fc71240f688b2fb5ab659fbb56979458dc66a71decd098e03c79e218"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "keras" +version = "3.0.5" +description = "Multi-backend Keras." +optional = false +python-versions = ">=3.9" +files = [ + {file = "keras-3.0.5-py3-none-any.whl", hash = "sha256:4a022f2e97ea5a3db12ed809ffcb7ce1ef8d34feaeac52315ec8553ded2dcf97"}, + {file = "keras-3.0.5.tar.gz", hash = "sha256:df3d3795e12c3f6035e811c43c13f1eb41e37241796a0fea120ede4ebe1c4496"}, +] + +[package.dependencies] +absl-py = "*" +dm-tree = "*" +h5py = "*" +ml-dtypes = "*" +namex = "*" +numpy = "*" +rich = "*" + +[[package]] +name = "keras2onnx" +version = "1.7.0" +description = "Converts Machine Learning models to ONNX for use in Windows ML" +optional = false +python-versions = "*" +files = [ + {file = "keras2onnx-1.7.0-py3-none-any.whl", hash = "sha256:341159ae4b8b2ae06d876e71475e87a364ee2160b49981474a53f1d62b9626e6"}, +] + +[package.dependencies] +fire = "*" +numpy = "*" +onnx = "*" +onnxconverter-common = ">=1.7.0" +protobuf = "*" +requests = "*" + +[[package]] +name = "kiwisolver" +version = "1.4.5" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.7" +files = [ + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, + {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, + {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, + {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, + {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, + {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, + {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, + {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, + {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, + {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, + {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, + {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, + {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, + {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, + {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, + {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, + {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, + {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, + {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, + {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, + {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, + {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, + {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, + {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, +] + +[[package]] +name = "lazy-loader" +version = "0.3" +description = "lazy_loader" +optional = false +python-versions = ">=3.7" +files = [ + {file = "lazy_loader-0.3-py3-none-any.whl", hash = "sha256:1e9e76ee8631e264c62ce10006718e80b2cfc74340d17d1031e0f84af7478554"}, + {file = "lazy_loader-0.3.tar.gz", hash = "sha256:3b68898e34f5b2a29daaaac172c6555512d0f32074f147e2254e4a6d9d838f37"}, +] + +[package.extras] +lint = ["pre-commit (>=3.3)"] +test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"] + +[[package]] +name = "libclang" +version = "16.0.6" +description = "Clang Python Bindings, mirrored from the official LLVM repo: https://github.com/llvm/llvm-project/tree/main/clang/bindings/python, to make the installation process easier." +optional = false +python-versions = "*" +files = [ + {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:88bc7e7b393c32e41e03ba77ef02fdd647da1f764c2cd028e69e0837080b79f6"}, + {file = "libclang-16.0.6-1-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:d80ed5827736ed5ec2bcedf536720476fd9d4fa4c79ef0cb24aea4c59332f361"}, + {file = "libclang-16.0.6-py2.py3-none-macosx_10_9_x86_64.whl", hash = "sha256:da9e47ebc3f0a6d90fb169ef25f9fbcd29b4a4ef97a8b0e3e3a17800af1423f4"}, + {file = "libclang-16.0.6-py2.py3-none-macosx_11_0_arm64.whl", hash = "sha256:e1a5ad1e895e5443e205568c85c04b4608e4e973dae42f4dfd9cb46c81d1486b"}, + {file = "libclang-16.0.6-py2.py3-none-manylinux2010_x86_64.whl", hash = "sha256:9dcdc730939788b8b69ffd6d5d75fe5366e3ee007f1e36a99799ec0b0c001492"}, + {file = "libclang-16.0.6-py2.py3-none-manylinux2014_aarch64.whl", hash = "sha256:8130482120500476a027171f8f3c8dfc2536b591716eea71fc5da22cae13131b"}, + {file = "libclang-16.0.6-py2.py3-none-manylinux2014_armv7l.whl", hash = "sha256:1e940048f51d0b0999099a9b78629ab8a64b62af5e9ff1b2b062439c21ee244d"}, + {file = "libclang-16.0.6-py2.py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:f04e3060ae1f207f234d0608900c99c50edcb743e5e18276d78da2ddd727d39f"}, + {file = "libclang-16.0.6-py2.py3-none-win_amd64.whl", hash = "sha256:daab4a11dae228f1efa9efa3fe638b493b14d8d52c71fb3c7019e2f1df4514c2"}, + {file = "libclang-16.0.6-py2.py3-none-win_arm64.whl", hash = "sha256:4a9acbfd9c135a72f80d5dbff7588dfb0c81458244a89b9e83526e8595880e0a"}, + {file = "libclang-16.0.6.tar.gz", hash = "sha256:4acdde39dfe410c877b4ccc0d4b57eb952100e4ee26bbdf6cfdb88e2033a7d31"}, +] + +[[package]] +name = "lit" +version = "17.0.6" +description = "A Software Testing Tool" +optional = false +python-versions = "*" +files = [ + {file = "lit-17.0.6.tar.gz", hash = "sha256:dfa9af9b55fc4509a56be7bf2346f079d7f4a242d583b9f2e0b078fd0abae31b"}, +] + +[[package]] +name = "loguru" +version = "0.7.2" +description = "Python logging made (stupidly) simple" +optional = false +python-versions = ">=3.5" +files = [ + {file = "loguru-0.7.2-py3-none-any.whl", hash = "sha256:003d71e3d3ed35f0f8984898359d65b79e5b21943f78af86aa5491210429b8eb"}, + {file = "loguru-0.7.2.tar.gz", hash = "sha256:e671a53522515f34fd406340ee968cb9ecafbc4b36c679da03c18fd8d0bd51ac"}, +] + +[package.dependencies] +colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""} +win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""} + +[package.extras] +dev = ["Sphinx (==7.2.5)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.2.2)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.4.1)", "mypy (==v1.5.1)", "pre-commit (==3.4.0)", "pytest (==6.1.2)", "pytest (==7.4.0)", "pytest-cov (==2.12.1)", "pytest-cov (==4.1.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.0.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-rtd-theme (==1.3.0)", "tox (==3.27.1)", "tox (==4.11.0)"] + +[[package]] +name = "markdown" +version = "3.5.2" +description = "Python implementation of John Gruber's Markdown." +optional = false +python-versions = ">=3.8" +files = [ + {file = "Markdown-3.5.2-py3-none-any.whl", hash = "sha256:d43323865d89fc0cb9b20c75fc8ad313af307cc087e84b657d9eec768eddeadd"}, + {file = "Markdown-3.5.2.tar.gz", hash = "sha256:e1ac7b3dc550ee80e602e71c1d168002f062e49f1b11e26a36264dafd4df2ef8"}, +] + +[package.extras] +docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.5)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "markupsafe" +version = "2.1.5" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, + {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, + {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, + {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, + {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, + {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, + {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, + {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, +] + +[[package]] +name = "matplotlib" +version = "3.8.3" +description = "Python plotting package" +optional = false +python-versions = ">=3.9" +files = [ + {file = "matplotlib-3.8.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:cf60138ccc8004f117ab2a2bad513cc4d122e55864b4fe7adf4db20ca68a078f"}, + {file = "matplotlib-3.8.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f557156f7116be3340cdeef7f128fa99b0d5d287d5f41a16e169819dcf22357"}, + {file = "matplotlib-3.8.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f386cf162b059809ecfac3bcc491a9ea17da69fa35c8ded8ad154cd4b933d5ec"}, + {file = "matplotlib-3.8.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b3c5f96f57b0369c288bf6f9b5274ba45787f7e0589a34d24bdbaf6d3344632f"}, + {file = "matplotlib-3.8.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:83e0f72e2c116ca7e571c57aa29b0fe697d4c6425c4e87c6e994159e0c008635"}, + {file = "matplotlib-3.8.3-cp310-cp310-win_amd64.whl", hash = "sha256:1c5c8290074ba31a41db1dc332dc2b62def469ff33766cbe325d32a3ee291aea"}, + {file = "matplotlib-3.8.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5184e07c7e1d6d1481862ee361905b7059f7fe065fc837f7c3dc11eeb3f2f900"}, + {file = "matplotlib-3.8.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d7e7e0993d0758933b1a241a432b42c2db22dfa37d4108342ab4afb9557cbe3e"}, + {file = "matplotlib-3.8.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04b36ad07eac9740fc76c2aa16edf94e50b297d6eb4c081e3add863de4bb19a7"}, + {file = "matplotlib-3.8.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c42dae72a62f14982f1474f7e5c9959fc4bc70c9de11cc5244c6e766200ba65"}, + {file = "matplotlib-3.8.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf5932eee0d428192c40b7eac1399d608f5d995f975cdb9d1e6b48539a5ad8d0"}, + {file = "matplotlib-3.8.3-cp311-cp311-win_amd64.whl", hash = "sha256:40321634e3a05ed02abf7c7b47a50be50b53ef3eaa3a573847431a545585b407"}, + {file = "matplotlib-3.8.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:09074f8057917d17ab52c242fdf4916f30e99959c1908958b1fc6032e2d0f6d4"}, + {file = "matplotlib-3.8.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5745f6d0fb5acfabbb2790318db03809a253096e98c91b9a31969df28ee604aa"}, + {file = "matplotlib-3.8.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97653d869a71721b639714b42d87cda4cfee0ee74b47c569e4874c7590c55c5"}, + {file = "matplotlib-3.8.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:242489efdb75b690c9c2e70bb5c6550727058c8a614e4c7716f363c27e10bba1"}, + {file = "matplotlib-3.8.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:83c0653c64b73926730bd9ea14aa0f50f202ba187c307a881673bad4985967b7"}, + {file = "matplotlib-3.8.3-cp312-cp312-win_amd64.whl", hash = "sha256:ef6c1025a570354297d6c15f7d0f296d95f88bd3850066b7f1e7b4f2f4c13a39"}, + {file = "matplotlib-3.8.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c4af3f7317f8a1009bbb2d0bf23dfaba859eb7dd4ccbd604eba146dccaaaf0a4"}, + {file = "matplotlib-3.8.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4c6e00a65d017d26009bac6808f637b75ceade3e1ff91a138576f6b3065eeeba"}, + {file = "matplotlib-3.8.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7b49ab49a3bea17802df6872f8d44f664ba8f9be0632a60c99b20b6db2165b7"}, + {file = "matplotlib-3.8.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6728dde0a3997396b053602dbd907a9bd64ec7d5cf99e728b404083698d3ca01"}, + {file = "matplotlib-3.8.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:813925d08fb86aba139f2d31864928d67511f64e5945ca909ad5bc09a96189bb"}, + {file = "matplotlib-3.8.3-cp39-cp39-win_amd64.whl", hash = "sha256:cd3a0c2be76f4e7be03d34a14d49ded6acf22ef61f88da600a18a5cd8b3c5f3c"}, + {file = "matplotlib-3.8.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fa93695d5c08544f4a0dfd0965f378e7afc410d8672816aff1e81be1f45dbf2e"}, + {file = "matplotlib-3.8.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9764df0e8778f06414b9d281a75235c1e85071f64bb5d71564b97c1306a2afc"}, + {file = "matplotlib-3.8.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:5e431a09e6fab4012b01fc155db0ce6dccacdbabe8198197f523a4ef4805eb26"}, + {file = "matplotlib-3.8.3.tar.gz", hash = "sha256:7b416239e9ae38be54b028abbf9048aff5054a9aba5416bef0bd17f9162ce161"}, +] + +[package.dependencies] +contourpy = ">=1.0.1" +cycler = ">=0.10" +fonttools = ">=4.22.0" +kiwisolver = ">=1.3.1" +numpy = ">=1.21,<2" +packaging = ">=20.0" +pillow = ">=8" +pyparsing = ">=2.3.1" +python-dateutil = ">=2.7" + +[[package]] +name = "matplotlib-inline" +version = "0.1.6" +description = "Inline Matplotlib backend for Jupyter" +optional = false +python-versions = ">=3.5" +files = [ + {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, + {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, +] + +[package.dependencies] +traitlets = "*" + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "ml-dtypes" +version = "0.3.2" +description = "" +optional = false +python-versions = ">=3.9" +files = [ + {file = "ml_dtypes-0.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7afde548890a92b41c0fed3a6c525f1200a5727205f73dc21181a2726571bb53"}, + {file = "ml_dtypes-0.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a746fe5fb9cd974a91070174258f0be129c592b93f9ce7df6cc336416c3fbd"}, + {file = "ml_dtypes-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:961134ea44c7b8ca63eda902a44b58cd8bd670e21d62e255c81fba0a8e70d9b7"}, + {file = "ml_dtypes-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:6b35c4e8ca957c877ac35c79ffa77724ecc3702a1e4b18b08306c03feae597bb"}, + {file = "ml_dtypes-0.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:763697ab8a88d47443997a7cdf3aac7340049aed45f7521f6b0ec8a0594821fe"}, + {file = "ml_dtypes-0.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b89b194e9501a92d289c1ffd411380baf5daafb9818109a4f49b0a1b6dce4462"}, + {file = "ml_dtypes-0.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c34f2ba9660b21fe1034b608308a01be82bbef2a92fb8199f24dc6bad0d5226"}, + {file = "ml_dtypes-0.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:6604877d567a29bfe7cc02969ae0f2425260e5335505cf5e7fefc3e5465f5655"}, + {file = "ml_dtypes-0.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:93b78f53431c93953f7850bb1b925a17f0ab5d97527e38a7e865b5b4bc5cfc18"}, + {file = "ml_dtypes-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a17ef2322e60858d93584e9c52a5be7dd6236b056b7fa1ec57f1bb6ba043e33"}, + {file = "ml_dtypes-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8505946df1665db01332d885c2020b4cb9e84a8b1241eb4ba69d59591f65855"}, + {file = "ml_dtypes-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:f47619d978ab1ae7dfdc4052ea97c636c6263e1f19bd1be0e42c346b98d15ff4"}, + {file = "ml_dtypes-0.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c7b3fb3d4f6b39bcd4f6c4b98f406291f0d681a895490ee29a0f95bab850d53c"}, + {file = "ml_dtypes-0.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a4c3fcbf86fa52d0204f07cfd23947ef05b4ad743a1a988e163caa34a201e5e"}, + {file = "ml_dtypes-0.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91f8783fd1f2c23fd3b9ee5ad66b785dafa58ba3cdb050c4458021fa4d1eb226"}, + {file = "ml_dtypes-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:7ba8e1fafc7fff3e643f453bffa7d082df1678a73286ce8187d3e825e776eb94"}, + {file = "ml_dtypes-0.3.2.tar.gz", hash = "sha256:533059bc5f1764fac071ef54598db358c167c51a718f68f5bb55e3dee79d2967"}, +] + +[package.dependencies] +numpy = {version = ">=1.23.3", markers = "python_version >= \"3.11\""} + +[package.extras] +dev = ["absl-py", "pyink", "pylint (>=2.6.0)", "pytest", "pytest-xdist"] + +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = false +python-versions = "*" +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4)"] +tests = ["pytest (>=4.6)"] + +[[package]] +name = "mutagen" +version = "1.47.0" +description = "read and write audio tags for many formats" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mutagen-1.47.0-py3-none-any.whl", hash = "sha256:edd96f50c5907a9539d8e5bba7245f62c9f520aef333d13392a79a4f70aca719"}, + {file = "mutagen-1.47.0.tar.gz", hash = "sha256:719fadef0a978c31b4cf3c956261b3c58b6948b32023078a2117b1de09f0fc99"}, +] + +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + +[[package]] +name = "namex" +version = "0.0.7" +description = "A simple utility to separate the implementation of your Python package and its public API surface." +optional = false +python-versions = "*" +files = [ + {file = "namex-0.0.7-py3-none-any.whl", hash = "sha256:8a4f062945f405d77cb66b907f16aa2fd83681945e998be840eb6c4154d40108"}, + {file = "namex-0.0.7.tar.gz", hash = "sha256:84ba65bc4d22bd909e3d26bf2ffb4b9529b608cb3f9a4336f776b04204ced69b"}, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +files = [ + {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, + {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, +] + +[[package]] +name = "networkx" +version = "3.2.1" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.9" +files = [ + {file = "networkx-3.2.1-py3-none-any.whl", hash = "sha256:f18c69adc97877c42332c170849c96cefa91881c99a7cb3e95b7c659ebdc1ec2"}, + {file = "networkx-3.2.1.tar.gz", hash = "sha256:9f1bb5cf3409bf324e0a722c20bdb4c20ee39bf1c30ce8ae499c8502b0b5e0c6"}, +] + +[package.extras] +default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"] +developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"] +doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"] +test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"] + +[[package]] +name = "nodeenv" +version = "1.8.0" +description = "Node.js virtual environment builder" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +files = [ + {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"}, + {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"}, +] + +[package.dependencies] +setuptools = "*" + +[[package]] +name = "numpy" +version = "1.26.4" +description = "Fundamental package for array computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, + {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, + {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, + {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, + {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, + {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, + {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, + {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, + {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, + {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, + {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, + {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, + {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, + {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, + {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, + {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, + {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, + {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, + {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, + {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, + {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, + {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, + {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +] + +[[package]] +name = "onnx" +version = "1.15.0" +description = "Open Neural Network Exchange" +optional = false +python-versions = ">=3.8" +files = [ + {file = "onnx-1.15.0-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:51cacb6aafba308aaf462252ced562111f6991cdc7bc57a6c554c3519453a8ff"}, + {file = "onnx-1.15.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:0aee26b6f7f7da7e840de75ad9195a77a147d0662c94eaa6483be13ba468ffc1"}, + {file = "onnx-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baf6ef6c93b3b843edb97a8d5b3d229a1301984f3f8dee859c29634d2083e6f9"}, + {file = "onnx-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ed899fe6000edc05bb2828863d3841cfddd5a7cf04c1a771f112e94de75d9f"}, + {file = "onnx-1.15.0-cp310-cp310-win32.whl", hash = "sha256:f1ad3d77fc2f4b4296f0ac2c8cadd8c1dcf765fc586b737462d3a0fe8f7c696a"}, + {file = "onnx-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:ca4ebc4f47109bfb12c8c9e83dd99ec5c9f07d2e5f05976356c6ccdce3552010"}, + {file = "onnx-1.15.0-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:233ffdb5ca8cc2d960b10965a763910c0830b64b450376da59207f454701f343"}, + {file = "onnx-1.15.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:51fa79c9ea9af033638ec51f9177b8e76c55fad65bb83ea96ee88fafade18ee7"}, + {file = "onnx-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f277d4861729f5253a51fa41ce91bfec1c4574ee41b5637056b43500917295ce"}, + {file = "onnx-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8a7c94d2ebead8f739fdb70d1ce5a71726f4e17b3e5b8ad64455ea1b2801a85"}, + {file = "onnx-1.15.0-cp311-cp311-win32.whl", hash = "sha256:17dcfb86a8c6bdc3971443c29b023dd9c90ff1d15d8baecee0747a6b7f74e650"}, + {file = "onnx-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:60a3e28747e305cd2e766e6a53a0a6d952cf9e72005ec6023ce5e07666676a4e"}, + {file = "onnx-1.15.0-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:6b5c798d9e0907eaf319e3d3e7c89a2ed9a854bcb83da5fefb6d4c12d5e90721"}, + {file = "onnx-1.15.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:a4f774ff50092fe19bd8f46b2c9b27b1d30fbd700c22abde48a478142d464322"}, + {file = "onnx-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2b0e7f3938f2d994c34616bfb8b4b1cebbc4a0398483344fe5e9f2fe95175e6"}, + {file = "onnx-1.15.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49cebebd0020a4b12c1dd0909d426631212ef28606d7e4d49463d36abe7639ad"}, + {file = "onnx-1.15.0-cp38-cp38-win32.whl", hash = "sha256:1fdf8a3ff75abc2b32c83bf27fb7c18d6b976c9c537263fadd82b9560fe186fa"}, + {file = "onnx-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:763e55c26e8de3a2dce008d55ae81b27fa8fb4acbb01a29b9f3c01f200c4d676"}, + {file = "onnx-1.15.0-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:b2d5e802837629fc9c86f19448d19dd04d206578328bce202aeb3d4bedab43c4"}, + {file = "onnx-1.15.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9a9cfbb5e5d5d88f89d0dfc9df5fb858899db874e1d5ed21e76c481f3cafc90d"}, + {file = "onnx-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f472bbe5cb670a0a4a4db08f41fde69b187a009d0cb628f964840d3f83524e9"}, + {file = "onnx-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf2de9bef64792e5b8080c678023ac7d2b9e05d79a3e17e92cf6a4a624831d2"}, + {file = "onnx-1.15.0-cp39-cp39-win32.whl", hash = "sha256:ef4d9eb44b111e69e4534f3233fc2c13d1e26920d24ae4359d513bd54694bc6d"}, + {file = "onnx-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:95d7a3e2d79d371e272e39ae3f7547e0b116d0c7f774a4004e97febe6c93507f"}, + {file = "onnx-1.15.0.tar.gz", hash = "sha256:b18461a7d38f286618ca2a6e78062a2a9c634ce498e631e708a8041b00094825"}, +] + +[package.dependencies] +numpy = "*" +protobuf = ">=3.20.2" + +[package.extras] +reference = ["Pillow", "google-re2"] + +[[package]] +name = "onnx2pytorch" +version = "0.4.1" +description = "Library to transform onnx model to pytorch." +optional = false +python-versions = ">=3.6" +files = [ + {file = "onnx2pytorch-0.4.1-py3-none-any.whl", hash = "sha256:0cde7a4c45bb16217a75e15f83c0cf838e7beffa4a192208f0f9308f90d7e5a2"}, + {file = "onnx2pytorch-0.4.1.tar.gz", hash = "sha256:f935f6b162fc2db4111b8e2948dca23f5e698da0a8a50a1771299c08754becf3"}, +] + +[package.dependencies] +onnx = ">=1.6.0" +torch = ">=1.4.0" +torchvision = ">=0.9.0" + +[[package]] +name = "onnxconverter-common" +version = "1.13.0" +description = "ONNX Converter and Optimization Tools" +optional = false +python-versions = "*" +files = [ + {file = "onnxconverter-common-1.13.0.tar.gz", hash = "sha256:03db8a6033a3d6590f22df3f64234079caa826375d1fcb0b37b8123c06bf598c"}, + {file = "onnxconverter_common-1.13.0-py2.py3-none-any.whl", hash = "sha256:5ee1c025ef6c3b4abaede8425bc6b393248941a6cf8c21563d0d0e3f04634a0a"}, +] + +[package.dependencies] +numpy = "*" +onnx = "*" +packaging = "*" +protobuf = "*" + +[[package]] +name = "onnxruntime" +version = "1.17.1" +description = "ONNX Runtime is a runtime accelerator for Machine Learning models" +optional = false +python-versions = "*" +files = [ + {file = "onnxruntime-1.17.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:d43ac17ac4fa3c9096ad3c0e5255bb41fd134560212dc124e7f52c3159af5d21"}, + {file = "onnxruntime-1.17.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55b5e92a4c76a23981c998078b9bf6145e4fb0b016321a8274b1607bd3c6bd35"}, + {file = "onnxruntime-1.17.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ebbcd2bc3a066cf54e6f18c75708eb4d309ef42be54606d22e5bdd78afc5b0d7"}, + {file = "onnxruntime-1.17.1-cp310-cp310-win32.whl", hash = "sha256:5e3716b5eec9092e29a8d17aab55e737480487deabfca7eac3cd3ed952b6ada9"}, + {file = "onnxruntime-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:fbb98cced6782ae1bb799cc74ddcbbeeae8819f3ad1d942a74d88e72b6511337"}, + {file = "onnxruntime-1.17.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:36fd6f87a1ecad87e9c652e42407a50fb305374f9a31d71293eb231caae18784"}, + {file = "onnxruntime-1.17.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99a8bddeb538edabc524d468edb60ad4722cff8a49d66f4e280c39eace70500b"}, + {file = "onnxruntime-1.17.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd7fddb4311deb5a7d3390cd8e9b3912d4d963efbe4dfe075edbaf18d01c024e"}, + {file = "onnxruntime-1.17.1-cp311-cp311-win32.whl", hash = "sha256:606a7cbfb6680202b0e4f1890881041ffc3ac6e41760a25763bd9fe146f0b335"}, + {file = "onnxruntime-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:53e4e06c0a541696ebdf96085fd9390304b7b04b748a19e02cf3b35c869a1e76"}, + {file = "onnxruntime-1.17.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:40f08e378e0f85929712a2b2c9b9a9cc400a90c8a8ca741d1d92c00abec60843"}, + {file = "onnxruntime-1.17.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ac79da6d3e1bb4590f1dad4bb3c2979d7228555f92bb39820889af8b8e6bd472"}, + {file = "onnxruntime-1.17.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ae9ba47dc099004e3781f2d0814ad710a13c868c739ab086fc697524061695ea"}, + {file = "onnxruntime-1.17.1-cp312-cp312-win32.whl", hash = "sha256:2dff1a24354220ac30e4a4ce2fb1df38cb1ea59f7dac2c116238d63fe7f4c5ff"}, + {file = "onnxruntime-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:6226a5201ab8cafb15e12e72ff2a4fc8f50654e8fa5737c6f0bd57c5ff66827e"}, + {file = "onnxruntime-1.17.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:cd0c07c0d1dfb8629e820b05fda5739e4835b3b82faf43753d2998edf2cf00aa"}, + {file = "onnxruntime-1.17.1-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:617ebdf49184efa1ba6e4467e602fbfa029ed52c92f13ce3c9f417d303006381"}, + {file = "onnxruntime-1.17.1-cp38-cp38-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9dae9071e3facdf2920769dceee03b71c684b6439021defa45b830d05e148924"}, + {file = "onnxruntime-1.17.1-cp38-cp38-win32.whl", hash = "sha256:835d38fa1064841679433b1aa8138b5e1218ddf0cfa7a3ae0d056d8fd9cec713"}, + {file = "onnxruntime-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:96621e0c555c2453bf607606d08af3f70fbf6f315230c28ddea91754e17ad4e6"}, + {file = "onnxruntime-1.17.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:7a9539935fb2d78ebf2cf2693cad02d9930b0fb23cdd5cf37a7df813e977674d"}, + {file = "onnxruntime-1.17.1-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:45c6a384e9d9a29c78afff62032a46a993c477b280247a7e335df09372aedbe9"}, + {file = "onnxruntime-1.17.1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4e19f966450f16863a1d6182a685ca33ae04d7772a76132303852d05b95411ea"}, + {file = "onnxruntime-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e2ae712d64a42aac29ed7a40a426cb1e624a08cfe9273dcfe681614aa65b07dc"}, + {file = "onnxruntime-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:f7e9f7fb049825cdddf4a923cfc7c649d84d63c0134315f8e0aa9e0c3004672c"}, +] + +[package.dependencies] +coloredlogs = "*" +flatbuffers = "*" +numpy = ">=1.21.6" +packaging = "*" +protobuf = "*" +sympy = "*" + +[[package]] +name = "opencv-python" +version = "4.9.0.80" +description = "Wrapper package for OpenCV python bindings." +optional = false +python-versions = ">=3.6" +files = [ + {file = "opencv-python-4.9.0.80.tar.gz", hash = "sha256:1a9f0e6267de3a1a1db0c54213d022c7c8b5b9ca4b580e80bdc58516c922c9e1"}, + {file = "opencv_python-4.9.0.80-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:7e5f7aa4486651a6ebfa8ed4b594b65bd2d2f41beeb4241a3e4b1b85acbbbadb"}, + {file = "opencv_python-4.9.0.80-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:71dfb9555ccccdd77305fc3dcca5897fbf0cf28b297c51ee55e079c065d812a3"}, + {file = "opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b34a52e9da36dda8c151c6394aed602e4b17fa041df0b9f5b93ae10b0fcca2a"}, + {file = "opencv_python-4.9.0.80-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4088cab82b66a3b37ffc452976b14a3c599269c247895ae9ceb4066d8188a57"}, + {file = "opencv_python-4.9.0.80-cp37-abi3-win32.whl", hash = "sha256:dcf000c36dd1651118a2462257e3a9e76db789a78432e1f303c7bac54f63ef6c"}, + {file = "opencv_python-4.9.0.80-cp37-abi3-win_amd64.whl", hash = "sha256:3f16f08e02b2a2da44259c7cc712e779eff1dd8b55fdb0323e8cab09548086c0"}, +] + +[package.dependencies] +numpy = {version = ">=1.23.5", markers = "python_version >= \"3.11\""} + +[[package]] +name = "opencv-python-headless" +version = "4.9.0.80" +description = "Wrapper package for OpenCV python bindings." +optional = false +python-versions = ">=3.6" +files = [ + {file = "opencv-python-headless-4.9.0.80.tar.gz", hash = "sha256:71a4cd8cf7c37122901d8e81295db7fb188730e33a0e40039a4e59c1030b0958"}, + {file = "opencv_python_headless-4.9.0.80-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:2ea8a2edc4db87841991b2fbab55fc07b97ecb602e0f47d5d485bd75cee17c1a"}, + {file = "opencv_python_headless-4.9.0.80-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:e0ee54e27be493e8f7850847edae3128e18b540dac1d7b2e4001b8944e11e1c6"}, + {file = "opencv_python_headless-4.9.0.80-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57ce2865e8fec431c6f97a81e9faaf23fa5be61011d0a75ccf47a3c0d65fa73d"}, + {file = "opencv_python_headless-4.9.0.80-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:976656362d68d9f40a5c66f83901430538002465f7db59142784f3893918f3df"}, + {file = "opencv_python_headless-4.9.0.80-cp37-abi3-win32.whl", hash = "sha256:11e3849d83e6651d4e7699aadda9ec7ed7c38957cbbcb99db074f2a2d2de9670"}, + {file = "opencv_python_headless-4.9.0.80-cp37-abi3-win_amd64.whl", hash = "sha256:a8056c2cb37cd65dfcdf4153ca16f7362afcf3a50d600d6bb69c660fc61ee29c"}, +] + +[package.dependencies] +numpy = {version = ">=1.23.5", markers = "python_version >= \"3.11\""} + +[[package]] +name = "opt-einsum" +version = "3.3.0" +description = "Optimizing numpys einsum function" +optional = false +python-versions = ">=3.5" +files = [ + {file = "opt_einsum-3.3.0-py3-none-any.whl", hash = "sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147"}, + {file = "opt_einsum-3.3.0.tar.gz", hash = "sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549"}, +] + +[package.dependencies] +numpy = ">=1.7" + +[package.extras] +docs = ["numpydoc", "sphinx (==1.2.3)", "sphinx-rtd-theme", "sphinxcontrib-napoleon"] +tests = ["pytest", "pytest-cov", "pytest-pep8"] + +[[package]] +name = "orjson" +version = "3.9.15" +description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" +optional = false +python-versions = ">=3.8" +files = [ + {file = "orjson-3.9.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d61f7ce4727a9fa7680cd6f3986b0e2c732639f46a5e0156e550e35258aa313a"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4feeb41882e8aa17634b589533baafdceb387e01e117b1ec65534ec724023d04"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fbbeb3c9b2edb5fd044b2a070f127a0ac456ffd079cb82746fc84af01ef021a4"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b66bcc5670e8a6b78f0313bcb74774c8291f6f8aeef10fe70e910b8040f3ab75"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2973474811db7b35c30248d1129c64fd2bdf40d57d84beed2a9a379a6f57d0ab"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fe41b6f72f52d3da4db524c8653e46243c8c92df826ab5ffaece2dba9cccd58"}, + {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4228aace81781cc9d05a3ec3a6d2673a1ad0d8725b4e915f1089803e9efd2b99"}, + {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f7b65bfaf69493c73423ce9db66cfe9138b2f9ef62897486417a8fcb0a92bfe"}, + {file = "orjson-3.9.15-cp310-none-win32.whl", hash = "sha256:2d99e3c4c13a7b0fb3792cc04c2829c9db07838fb6973e578b85c1745e7d0ce7"}, + {file = "orjson-3.9.15-cp310-none-win_amd64.whl", hash = "sha256:b725da33e6e58e4a5d27958568484aa766e825e93aa20c26c91168be58e08cbb"}, + {file = "orjson-3.9.15-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c8e8fe01e435005d4421f183038fc70ca85d2c1e490f51fb972db92af6e047c2"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87f1097acb569dde17f246faa268759a71a2cb8c96dd392cd25c668b104cad2f"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff0f9913d82e1d1fadbd976424c316fbc4d9c525c81d047bbdd16bd27dd98cfc"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8055ec598605b0077e29652ccfe9372247474375e0e3f5775c91d9434e12d6b1"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6768a327ea1ba44c9114dba5fdda4a214bdb70129065cd0807eb5f010bfcbb5"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12365576039b1a5a47df01aadb353b68223da413e2e7f98c02403061aad34bde"}, + {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:71c6b009d431b3839d7c14c3af86788b3cfac41e969e3e1c22f8a6ea13139404"}, + {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e18668f1bd39e69b7fed19fa7cd1cd110a121ec25439328b5c89934e6d30d357"}, + {file = "orjson-3.9.15-cp311-none-win32.whl", hash = "sha256:62482873e0289cf7313461009bf62ac8b2e54bc6f00c6fabcde785709231a5d7"}, + {file = "orjson-3.9.15-cp311-none-win_amd64.whl", hash = "sha256:b3d336ed75d17c7b1af233a6561cf421dee41d9204aa3cfcc6c9c65cd5bb69a8"}, + {file = "orjson-3.9.15-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:82425dd5c7bd3adfe4e94c78e27e2fa02971750c2b7ffba648b0f5d5cc016a73"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c51378d4a8255b2e7c1e5cc430644f0939539deddfa77f6fac7b56a9784160a"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6ae4e06be04dc00618247c4ae3f7c3e561d5bc19ab6941427f6d3722a0875ef7"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcef128f970bb63ecf9a65f7beafd9b55e3aaf0efc271a4154050fc15cdb386e"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b72758f3ffc36ca566ba98a8e7f4f373b6c17c646ff8ad9b21ad10c29186f00d"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c57bc7b946cf2efa67ac55766e41764b66d40cbd9489041e637c1304400494"}, + {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:946c3a1ef25338e78107fba746f299f926db408d34553b4754e90a7de1d44068"}, + {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2f256d03957075fcb5923410058982aea85455d035607486ccb847f095442bda"}, + {file = "orjson-3.9.15-cp312-none-win_amd64.whl", hash = "sha256:5bb399e1b49db120653a31463b4a7b27cf2fbfe60469546baf681d1b39f4edf2"}, + {file = "orjson-3.9.15-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b17f0f14a9c0ba55ff6279a922d1932e24b13fc218a3e968ecdbf791b3682b25"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f6cbd8e6e446fb7e4ed5bac4661a29e43f38aeecbf60c4b900b825a353276a1"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76bc6356d07c1d9f4b782813094d0caf1703b729d876ab6a676f3aaa9a47e37c"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdfa97090e2d6f73dced247a2f2d8004ac6449df6568f30e7fa1a045767c69a6"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7413070a3e927e4207d00bd65f42d1b780fb0d32d7b1d951f6dc6ade318e1b5a"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9cf1596680ac1f01839dba32d496136bdd5d8ffb858c280fa82bbfeb173bdd40"}, + {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:809d653c155e2cc4fd39ad69c08fdff7f4016c355ae4b88905219d3579e31eb7"}, + {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:920fa5a0c5175ab14b9c78f6f820b75804fb4984423ee4c4f1e6d748f8b22bc1"}, + {file = "orjson-3.9.15-cp38-none-win32.whl", hash = "sha256:2b5c0f532905e60cf22a511120e3719b85d9c25d0e1c2a8abb20c4dede3b05a5"}, + {file = "orjson-3.9.15-cp38-none-win_amd64.whl", hash = "sha256:67384f588f7f8daf040114337d34a5188346e3fae6c38b6a19a2fe8c663a2f9b"}, + {file = "orjson-3.9.15-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6fc2fe4647927070df3d93f561d7e588a38865ea0040027662e3e541d592811e"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34cbcd216e7af5270f2ffa63a963346845eb71e174ea530867b7443892d77180"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f541587f5c558abd93cb0de491ce99a9ef8d1ae29dd6ab4dbb5a13281ae04cbd"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92255879280ef9c3c0bcb327c5a1b8ed694c290d61a6a532458264f887f052cb"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:05a1f57fb601c426635fcae9ddbe90dfc1ed42245eb4c75e4960440cac667262"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ede0bde16cc6e9b96633df1631fbcd66491d1063667f260a4f2386a098393790"}, + {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e88b97ef13910e5f87bcbc4dd7979a7de9ba8702b54d3204ac587e83639c0c2b"}, + {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57d5d8cf9c27f7ef6bc56a5925c7fbc76b61288ab674eb352c26ac780caa5b10"}, + {file = "orjson-3.9.15-cp39-none-win32.whl", hash = "sha256:001f4eb0ecd8e9ebd295722d0cbedf0748680fb9998d3993abaed2f40587257a"}, + {file = "orjson-3.9.15-cp39-none-win_amd64.whl", hash = "sha256:ea0b183a5fe6b2b45f3b854b0d19c4e932d6f5934ae1f723b07cf9560edd4ec7"}, + {file = "orjson-3.9.15.tar.gz", hash = "sha256:95cae920959d772f30ab36d3b25f83bb0f3be671e986c72ce22f8fa700dae061"}, +] + +[[package]] +name = "packaging" +version = "23.2" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, +] + +[[package]] +name = "pafy" +version = "0.5.5" +description = "Retrieve YouTube content and metadata" +optional = false +python-versions = "*" +files = [ + {file = "pafy-0.5.5-py2.py3-none-any.whl", hash = "sha256:769e35aa6988686d47fa2ab235d15c9952c7873c470f6a6b05cf6bcd93e62515"}, + {file = "pafy-0.5.5.tar.gz", hash = "sha256:364f1d1312c89582d97dc7225cf6858cde27cb11dfd64a9c2bab1a2f32133b1e"}, +] + +[package.extras] +youtube-dl-backend = ["youtube-dl"] + +[[package]] +name = "pandas" +version = "2.2.1" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.9" +files = [ + {file = "pandas-2.2.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8df8612be9cd1c7797c93e1c5df861b2ddda0b48b08f2c3eaa0702cf88fb5f88"}, + {file = "pandas-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0f573ab277252ed9aaf38240f3b54cfc90fff8e5cab70411ee1d03f5d51f3944"}, + {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f02a3a6c83df4026e55b63c1f06476c9aa3ed6af3d89b4f04ea656ccdaaaa359"}, + {file = "pandas-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c38ce92cb22a4bea4e3929429aa1067a454dcc9c335799af93ba9be21b6beb51"}, + {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c2ce852e1cf2509a69e98358e8458775f89599566ac3775e70419b98615f4b06"}, + {file = "pandas-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53680dc9b2519cbf609c62db3ed7c0b499077c7fefda564e330286e619ff0dd9"}, + {file = "pandas-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:94e714a1cca63e4f5939cdce5f29ba8d415d85166be3441165edd427dc9f6bc0"}, + {file = "pandas-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f821213d48f4ab353d20ebc24e4faf94ba40d76680642fb7ce2ea31a3ad94f9b"}, + {file = "pandas-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c70e00c2d894cb230e5c15e4b1e1e6b2b478e09cf27cc593a11ef955b9ecc81a"}, + {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e97fbb5387c69209f134893abc788a6486dbf2f9e511070ca05eed4b930b1b02"}, + {file = "pandas-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101d0eb9c5361aa0146f500773395a03839a5e6ecde4d4b6ced88b7e5a1a6403"}, + {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7d2ed41c319c9fb4fd454fe25372028dfa417aacb9790f68171b2e3f06eae8cd"}, + {file = "pandas-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:af5d3c00557d657c8773ef9ee702c61dd13b9d7426794c9dfeb1dc4a0bf0ebc7"}, + {file = "pandas-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:06cf591dbaefb6da9de8472535b185cba556d0ce2e6ed28e21d919704fef1a9e"}, + {file = "pandas-2.2.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:88ecb5c01bb9ca927ebc4098136038519aa5d66b44671861ffab754cae75102c"}, + {file = "pandas-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:04f6ec3baec203c13e3f8b139fb0f9f86cd8c0b94603ae3ae8ce9a422e9f5bee"}, + {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a935a90a76c44fe170d01e90a3594beef9e9a6220021acfb26053d01426f7dc2"}, + {file = "pandas-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c391f594aae2fd9f679d419e9a4d5ba4bce5bb13f6a989195656e7dc4b95c8f0"}, + {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9d1265545f579edf3f8f0cb6f89f234f5e44ba725a34d86535b1a1d38decbccc"}, + {file = "pandas-2.2.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:11940e9e3056576ac3244baef2fedade891977bcc1cb7e5cc8f8cc7d603edc89"}, + {file = "pandas-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:4acf681325ee1c7f950d058b05a820441075b0dd9a2adf5c4835b9bc056bf4fb"}, + {file = "pandas-2.2.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9bd8a40f47080825af4317d0340c656744f2bfdb6819f818e6ba3cd24c0e1397"}, + {file = "pandas-2.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:df0c37ebd19e11d089ceba66eba59a168242fc6b7155cba4ffffa6eccdfb8f16"}, + {file = "pandas-2.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:739cc70eaf17d57608639e74d63387b0d8594ce02f69e7a0b046f117974b3019"}, + {file = "pandas-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9d3558d263073ed95e46f4650becff0c5e1ffe0fc3a015de3c79283dfbdb3df"}, + {file = "pandas-2.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4aa1d8707812a658debf03824016bf5ea0d516afdea29b7dc14cf687bc4d4ec6"}, + {file = "pandas-2.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:76f27a809cda87e07f192f001d11adc2b930e93a2b0c4a236fde5429527423be"}, + {file = "pandas-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:1ba21b1d5c0e43416218db63037dbe1a01fc101dc6e6024bcad08123e48004ab"}, + {file = "pandas-2.2.1.tar.gz", hash = "sha256:0ab90f87093c13f3e8fa45b48ba9f39181046e8f3317d3aadb2fffbb1b978572"}, +] + +[package.dependencies] +numpy = {version = ">=1.23.2,<2", markers = "python_version == \"3.11\""} +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.7" + +[package.extras] +all = ["PyQt5 (>=5.15.9)", "SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)", "beautifulsoup4 (>=4.11.2)", "bottleneck (>=1.3.6)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=2022.12.0)", "fsspec (>=2022.11.0)", "gcsfs (>=2022.11.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.9.2)", "matplotlib (>=3.6.3)", "numba (>=0.56.4)", "numexpr (>=2.8.4)", "odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "pandas-gbq (>=0.19.0)", "psycopg2 (>=2.9.6)", "pyarrow (>=10.0.1)", "pymysql (>=1.0.2)", "pyreadstat (>=1.2.0)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "qtpy (>=2.3.0)", "s3fs (>=2022.11.0)", "scipy (>=1.10.0)", "tables (>=3.8.0)", "tabulate (>=0.9.0)", "xarray (>=2022.12.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)", "zstandard (>=0.19.0)"] +aws = ["s3fs (>=2022.11.0)"] +clipboard = ["PyQt5 (>=5.15.9)", "qtpy (>=2.3.0)"] +compression = ["zstandard (>=0.19.0)"] +computation = ["scipy (>=1.10.0)", "xarray (>=2022.12.0)"] +consortium-standard = ["dataframe-api-compat (>=0.1.7)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.1.0)", "python-calamine (>=0.1.7)", "pyxlsb (>=1.0.10)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.5)"] +feather = ["pyarrow (>=10.0.1)"] +fss = ["fsspec (>=2022.11.0)"] +gcp = ["gcsfs (>=2022.11.0)", "pandas-gbq (>=0.19.0)"] +hdf5 = ["tables (>=3.8.0)"] +html = ["beautifulsoup4 (>=4.11.2)", "html5lib (>=1.1)", "lxml (>=4.9.2)"] +mysql = ["SQLAlchemy (>=2.0.0)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.9.0)"] +parquet = ["pyarrow (>=10.0.1)"] +performance = ["bottleneck (>=1.3.6)", "numba (>=0.56.4)", "numexpr (>=2.8.4)"] +plot = ["matplotlib (>=3.6.3)"] +postgresql = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "psycopg2 (>=2.9.6)"] +pyarrow = ["pyarrow (>=10.0.1)"] +spss = ["pyreadstat (>=1.2.0)"] +sql-other = ["SQLAlchemy (>=2.0.0)", "adbc-driver-postgresql (>=0.8.0)", "adbc-driver-sqlite (>=0.8.0)"] +test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.9.2)"] + +[[package]] +name = "parso" +version = "0.8.3" +description = "A Python Parser" +optional = false +python-versions = ">=3.6" +files = [ + {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, + {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, +] + +[package.extras] +qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] +testing = ["docopt", "pytest (<6.0.0)"] + +[[package]] +name = "passlib" +version = "1.7.4" +description = "comprehensive password hashing framework supporting over 30 schemes" +optional = false +python-versions = "*" +files = [ + {file = "passlib-1.7.4-py2.py3-none-any.whl", hash = "sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1"}, + {file = "passlib-1.7.4.tar.gz", hash = "sha256:defd50f72b65c5402ab2c573830a6978e5f202ad0d984793c8dde2c4152ebe04"}, +] + +[package.extras] +argon2 = ["argon2-cffi (>=18.2.0)"] +bcrypt = ["bcrypt (>=3.1.0)"] +build-docs = ["cloud-sptheme (>=1.10.1)", "sphinx (>=1.6)", "sphinxcontrib-fulltoc (>=1.2.0)"] +totp = ["cryptography"] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +files = [ + {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, + {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "pillow" +version = "10.2.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pillow-10.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:7823bdd049099efa16e4246bdf15e5a13dbb18a51b68fa06d6c1d4d8b99a796e"}, + {file = "pillow-10.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:83b2021f2ade7d1ed556bc50a399127d7fb245e725aa0113ebd05cfe88aaf588"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fad5ff2f13d69b7e74ce5b4ecd12cc0ec530fcee76356cac6742785ff71c452"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da2b52b37dad6d9ec64e653637a096905b258d2fc2b984c41ae7d08b938a67e4"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:47c0995fc4e7f79b5cfcab1fc437ff2890b770440f7696a3ba065ee0fd496563"}, + {file = "pillow-10.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:322bdf3c9b556e9ffb18f93462e5f749d3444ce081290352c6070d014c93feb2"}, + {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:51f1a1bffc50e2e9492e87d8e09a17c5eea8409cda8d3f277eb6edc82813c17c"}, + {file = "pillow-10.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:69ffdd6120a4737710a9eee73e1d2e37db89b620f702754b8f6e62594471dee0"}, + {file = "pillow-10.2.0-cp310-cp310-win32.whl", hash = "sha256:c6dafac9e0f2b3c78df97e79af707cdc5ef8e88208d686a4847bab8266870023"}, + {file = "pillow-10.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:aebb6044806f2e16ecc07b2a2637ee1ef67a11840a66752751714a0d924adf72"}, + {file = "pillow-10.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:7049e301399273a0136ff39b84c3678e314f2158f50f517bc50285fb5ec847ad"}, + {file = "pillow-10.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:35bb52c37f256f662abdfa49d2dfa6ce5d93281d323a9af377a120e89a9eafb5"}, + {file = "pillow-10.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c23f307202661071d94b5e384e1e1dc7dfb972a28a2310e4ee16103e66ddb67"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:773efe0603db30c281521a7c0214cad7836c03b8ccff897beae9b47c0b657d61"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11fa2e5984b949b0dd6d7a94d967743d87c577ff0b83392f17cb3990d0d2fd6e"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:716d30ed977be8b37d3ef185fecb9e5a1d62d110dfbdcd1e2a122ab46fddb03f"}, + {file = "pillow-10.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a086c2af425c5f62a65e12fbf385f7c9fcb8f107d0849dba5839461a129cf311"}, + {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c8de2789052ed501dd829e9cae8d3dcce7acb4777ea4a479c14521c942d395b1"}, + {file = "pillow-10.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:609448742444d9290fd687940ac0b57fb35e6fd92bdb65386e08e99af60bf757"}, + {file = "pillow-10.2.0-cp311-cp311-win32.whl", hash = "sha256:823ef7a27cf86df6597fa0671066c1b596f69eba53efa3d1e1cb8b30f3533068"}, + {file = "pillow-10.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:1da3b2703afd040cf65ec97efea81cfba59cdbed9c11d8efc5ab09df9509fc56"}, + {file = "pillow-10.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:edca80cbfb2b68d7b56930b84a0e45ae1694aeba0541f798e908a49d66b837f1"}, + {file = "pillow-10.2.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:1b5e1b74d1bd1b78bc3477528919414874748dd363e6272efd5abf7654e68bef"}, + {file = "pillow-10.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0eae2073305f451d8ecacb5474997c08569fb4eb4ac231ffa4ad7d342fdc25ac"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b7c2286c23cd350b80d2fc9d424fc797575fb16f854b831d16fd47ceec078f2c"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e23412b5c41e58cec602f1135c57dfcf15482013ce6e5f093a86db69646a5aa"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:52a50aa3fb3acb9cf7213573ef55d31d6eca37f5709c69e6858fe3bc04a5c2a2"}, + {file = "pillow-10.2.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:127cee571038f252a552760076407f9cff79761c3d436a12af6000cd182a9d04"}, + {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8d12251f02d69d8310b046e82572ed486685c38f02176bd08baf216746eb947f"}, + {file = "pillow-10.2.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:54f1852cd531aa981bc0965b7d609f5f6cc8ce8c41b1139f6ed6b3c54ab82bfb"}, + {file = "pillow-10.2.0-cp312-cp312-win32.whl", hash = "sha256:257d8788df5ca62c980314053197f4d46eefedf4e6175bc9412f14412ec4ea2f"}, + {file = "pillow-10.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:154e939c5f0053a383de4fd3d3da48d9427a7e985f58af8e94d0b3c9fcfcf4f9"}, + {file = "pillow-10.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:f379abd2f1e3dddb2b61bc67977a6b5a0a3f7485538bcc6f39ec76163891ee48"}, + {file = "pillow-10.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:8373c6c251f7ef8bda6675dd6d2b3a0fcc31edf1201266b5cf608b62a37407f9"}, + {file = "pillow-10.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:870ea1ada0899fd0b79643990809323b389d4d1d46c192f97342eeb6ee0b8483"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4b6b1e20608493548b1f32bce8cca185bf0480983890403d3b8753e44077129"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3031709084b6e7852d00479fd1d310b07d0ba82765f973b543c8af5061cf990e"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:3ff074fc97dd4e80543a3e91f69d58889baf2002b6be64347ea8cf5533188213"}, + {file = "pillow-10.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:cb4c38abeef13c61d6916f264d4845fab99d7b711be96c326b84df9e3e0ff62d"}, + {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b1b3020d90c2d8e1dae29cf3ce54f8094f7938460fb5ce8bc5c01450b01fbaf6"}, + {file = "pillow-10.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:170aeb00224ab3dc54230c797f8404507240dd868cf52066f66a41b33169bdbe"}, + {file = "pillow-10.2.0-cp38-cp38-win32.whl", hash = "sha256:c4225f5220f46b2fde568c74fca27ae9771536c2e29d7c04f4fb62c83275ac4e"}, + {file = "pillow-10.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:0689b5a8c5288bc0504d9fcee48f61a6a586b9b98514d7d29b840143d6734f39"}, + {file = "pillow-10.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b792a349405fbc0163190fde0dc7b3fef3c9268292586cf5645598b48e63dc67"}, + {file = "pillow-10.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c570f24be1e468e3f0ce7ef56a89a60f0e05b30a3669a459e419c6eac2c35364"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8ecd059fdaf60c1963c58ceb8997b32e9dc1b911f5da5307aab614f1ce5c2fb"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c365fd1703040de1ec284b176d6af5abe21b427cb3a5ff68e0759e1e313a5e7e"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:70c61d4c475835a19b3a5aa42492409878bbca7438554a1f89d20d58a7c75c01"}, + {file = "pillow-10.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b6f491cdf80ae540738859d9766783e3b3c8e5bd37f5dfa0b76abdecc5081f13"}, + {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9d189550615b4948f45252d7f005e53c2040cea1af5b60d6f79491a6e147eef7"}, + {file = "pillow-10.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:49d9ba1ed0ef3e061088cd1e7538a0759aab559e2e0a80a36f9fd9d8c0c21591"}, + {file = "pillow-10.2.0-cp39-cp39-win32.whl", hash = "sha256:babf5acfede515f176833ed6028754cbcd0d206f7f614ea3447d67c33be12516"}, + {file = "pillow-10.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:0304004f8067386b477d20a518b50f3fa658a28d44e4116970abfcd94fac34a8"}, + {file = "pillow-10.2.0-cp39-cp39-win_arm64.whl", hash = "sha256:0fb3e7fc88a14eacd303e90481ad983fd5b69c761e9e6ef94c983f91025da869"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:322209c642aabdd6207517e9739c704dc9f9db943015535783239022002f054a"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3eedd52442c0a5ff4f887fab0c1c0bb164d8635b32c894bc1faf4c618dd89df2"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb28c753fd5eb3dd859b4ee95de66cc62af91bcff5db5f2571d32a520baf1f04"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:33870dc4653c5017bf4c8873e5488d8f8d5f8935e2f1fb9a2208c47cdd66efd2"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3c31822339516fb3c82d03f30e22b1d038da87ef27b6a78c9549888f8ceda39a"}, + {file = "pillow-10.2.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a2b56ba36e05f973d450582fb015594aaa78834fefe8dfb8fcd79b93e64ba4c6"}, + {file = "pillow-10.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d8e6aeb9201e655354b3ad049cb77d19813ad4ece0df1249d3c793de3774f8c7"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2247178effb34a77c11c0e8ac355c7a741ceca0a732b27bf11e747bbc950722f"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15587643b9e5eb26c48e49a7b33659790d28f190fc514a322d55da2fb5c2950e"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753cd8f2086b2b80180d9b3010dd4ed147efc167c90d3bf593fe2af21265e5a5"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7c8f97e8e7a9009bcacbe3766a36175056c12f9a44e6e6f2d5caad06dcfbf03b"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d1b35bcd6c5543b9cb547dee3150c93008f8dd0f1fef78fc0cd2b141c5baf58a"}, + {file = "pillow-10.2.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe4c15f6c9285dc54ce6553a3ce908ed37c8f3825b5a51a15c91442bb955b868"}, + {file = "pillow-10.2.0.tar.gz", hash = "sha256:e87f0b2c78157e12d7686b27d63c070fd65d994e8ddae6f328e0dcf4a0cd007e"}, +] + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] +fpx = ["olefile"] +mic = ["olefile"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +typing = ["typing-extensions"] +xmp = ["defusedxml"] + +[[package]] +name = "platformdirs" +version = "4.2.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.2.0-py3-none-any.whl", hash = "sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068"}, + {file = "platformdirs-4.2.0.tar.gz", hash = "sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768"}, +] + +[package.extras] +docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] + +[[package]] +name = "pluggy" +version = "1.4.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pluggy-1.4.0-py3-none-any.whl", hash = "sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981"}, + {file = "pluggy-1.4.0.tar.gz", hash = "sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pre-commit" +version = "3.6.2" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +optional = false +python-versions = ">=3.9" +files = [ + {file = "pre_commit-3.6.2-py2.py3-none-any.whl", hash = "sha256:ba637c2d7a670c10daedc059f5c49b5bd0aadbccfcd7ec15592cf9665117532c"}, + {file = "pre_commit-3.6.2.tar.gz", hash = "sha256:c3ef34f463045c88658c5b99f38c1e297abdcc0ff13f98d3370055fbbfabc67e"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +virtualenv = ">=20.10.0" + +[[package]] +name = "prompt-toolkit" +version = "3.0.43" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.43-py3-none-any.whl", hash = "sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6"}, + {file = "prompt_toolkit-3.0.43.tar.gz", hash = "sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "protobuf" +version = "3.20.3" +description = "Protocol Buffers" +optional = false +python-versions = ">=3.7" +files = [ + {file = "protobuf-3.20.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99"}, + {file = "protobuf-3.20.3-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e"}, + {file = "protobuf-3.20.3-cp310-cp310-win32.whl", hash = "sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c"}, + {file = "protobuf-3.20.3-cp310-cp310-win_amd64.whl", hash = "sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7"}, + {file = "protobuf-3.20.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469"}, + {file = "protobuf-3.20.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4"}, + {file = "protobuf-3.20.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4"}, + {file = "protobuf-3.20.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454"}, + {file = "protobuf-3.20.3-cp37-cp37m-win32.whl", hash = "sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905"}, + {file = "protobuf-3.20.3-cp37-cp37m-win_amd64.whl", hash = "sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c"}, + {file = "protobuf-3.20.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7"}, + {file = "protobuf-3.20.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee"}, + {file = "protobuf-3.20.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050"}, + {file = "protobuf-3.20.3-cp38-cp38-win32.whl", hash = "sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86"}, + {file = "protobuf-3.20.3-cp38-cp38-win_amd64.whl", hash = "sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9"}, + {file = "protobuf-3.20.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b"}, + {file = "protobuf-3.20.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b"}, + {file = "protobuf-3.20.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402"}, + {file = "protobuf-3.20.3-cp39-cp39-win32.whl", hash = "sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480"}, + {file = "protobuf-3.20.3-cp39-cp39-win_amd64.whl", hash = "sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7"}, + {file = "protobuf-3.20.3-py2.py3-none-any.whl", hash = "sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db"}, + {file = "protobuf-3.20.3.tar.gz", hash = "sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2"}, +] + +[[package]] +name = "psutil" +version = "5.9.8" +description = "Cross-platform lib for process and system monitoring in Python." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "psutil-5.9.8-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:26bd09967ae00920df88e0352a91cff1a78f8d69b3ecabbfe733610c0af486c8"}, + {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:05806de88103b25903dff19bb6692bd2e714ccf9e668d050d144012055cbca73"}, + {file = "psutil-5.9.8-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:611052c4bc70432ec770d5d54f64206aa7203a101ec273a0cd82418c86503bb7"}, + {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:50187900d73c1381ba1454cf40308c2bf6f34268518b3f36a9b663ca87e65e36"}, + {file = "psutil-5.9.8-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:02615ed8c5ea222323408ceba16c60e99c3f91639b07da6373fb7e6539abc56d"}, + {file = "psutil-5.9.8-cp27-none-win32.whl", hash = "sha256:36f435891adb138ed3c9e58c6af3e2e6ca9ac2f365efe1f9cfef2794e6c93b4e"}, + {file = "psutil-5.9.8-cp27-none-win_amd64.whl", hash = "sha256:bd1184ceb3f87651a67b2708d4c3338e9b10c5df903f2e3776b62303b26cb631"}, + {file = "psutil-5.9.8-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:aee678c8720623dc456fa20659af736241f575d79429a0e5e9cf88ae0605cc81"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cb6403ce6d8e047495a701dc7c5bd788add903f8986d523e3e20b98b733e421"}, + {file = "psutil-5.9.8-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d06016f7f8625a1825ba3732081d77c94589dca78b7a3fc072194851e88461a4"}, + {file = "psutil-5.9.8-cp36-cp36m-win32.whl", hash = "sha256:7d79560ad97af658a0f6adfef8b834b53f64746d45b403f225b85c5c2c140eee"}, + {file = "psutil-5.9.8-cp36-cp36m-win_amd64.whl", hash = "sha256:27cc40c3493bb10de1be4b3f07cae4c010ce715290a5be22b98493509c6299e2"}, + {file = "psutil-5.9.8-cp37-abi3-win32.whl", hash = "sha256:bc56c2a1b0d15aa3eaa5a60c9f3f8e3e565303b465dbf57a1b730e7a2b9844e0"}, + {file = "psutil-5.9.8-cp37-abi3-win_amd64.whl", hash = "sha256:8db4c1b57507eef143a15a6884ca10f7c73876cdf5d51e713151c1236a0e68cf"}, + {file = "psutil-5.9.8-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:d16bbddf0693323b8c6123dd804100241da461e41d6e332fb0ba6058f630f8c8"}, + {file = "psutil-5.9.8.tar.gz", hash = "sha256:6be126e3225486dff286a8fb9a06246a5253f4c7c53b475ea5f5ac934e64194c"}, +] + +[package.extras] +test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pure-eval" +version = "0.2.2" +description = "Safely evaluate AST nodes without side effects" +optional = false +python-versions = "*" +files = [ + {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, + {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, +] + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "py-cpuinfo" +version = "9.0.0" +description = "Get CPU info with pure Python" +optional = false +python-versions = "*" +files = [ + {file = "py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690"}, + {file = "py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5"}, +] + +[[package]] +name = "pyasn1" +version = "0.5.1" +description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +files = [ + {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"}, + {file = "pyasn1-0.5.1.tar.gz", hash = "sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c"}, +] + +[[package]] +name = "pycodestyle" +version = "2.11.1" +description = "Python style guide checker" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"}, + {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"}, +] + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pycryptodomex" +version = "3.20.0" +description = "Cryptographic library for Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "pycryptodomex-3.20.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:645bd4ca6f543685d643dadf6a856cc382b654cc923460e3a10a49c1b3832aeb"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ff5c9a67f8a4fba4aed887216e32cbc48f2a6fb2673bb10a99e43be463e15913"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:8ee606964553c1a0bc74057dd8782a37d1c2bc0f01b83193b6f8bb14523b877b"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7805830e0c56d88f4d491fa5ac640dfc894c5ec570d1ece6ed1546e9df2e98d6"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-musllinux_1_1_aarch64.whl", hash = "sha256:bc3ee1b4d97081260d92ae813a83de4d2653206967c4a0a017580f8b9548ddbc"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-win32.whl", hash = "sha256:8af1a451ff9e123d0d8bd5d5e60f8e3315c3a64f3cdd6bc853e26090e195cdc8"}, + {file = "pycryptodomex-3.20.0-cp27-cp27m-win_amd64.whl", hash = "sha256:cbe71b6712429650e3883dc81286edb94c328ffcd24849accac0a4dbcc76958a"}, + {file = "pycryptodomex-3.20.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:76bd15bb65c14900d98835fcd10f59e5e0435077431d3a394b60b15864fddd64"}, + {file = "pycryptodomex-3.20.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:653b29b0819605fe0898829c8ad6400a6ccde096146730c2da54eede9b7b8baa"}, + {file = "pycryptodomex-3.20.0-cp27-cp27mu-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62a5ec91388984909bb5398ea49ee61b68ecb579123694bffa172c3b0a107079"}, + {file = "pycryptodomex-3.20.0-cp27-cp27mu-musllinux_1_1_aarch64.whl", hash = "sha256:108e5f1c1cd70ffce0b68739c75734437c919d2eaec8e85bffc2c8b4d2794305"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-macosx_10_9_universal2.whl", hash = "sha256:59af01efb011b0e8b686ba7758d59cf4a8263f9ad35911bfe3f416cee4f5c08c"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:82ee7696ed8eb9a82c7037f32ba9b7c59e51dda6f105b39f043b6ef293989cb3"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91852d4480a4537d169c29a9d104dda44094c78f1f5b67bca76c29a91042b623"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca649483d5ed251d06daf25957f802e44e6bb6df2e8f218ae71968ff8f8edc4"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e186342cfcc3aafaad565cbd496060e5a614b441cacc3995ef0091115c1f6c5"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:25cd61e846aaab76d5791d006497134602a9e451e954833018161befc3b5b9ed"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-musllinux_1_1_i686.whl", hash = "sha256:9c682436c359b5ada67e882fec34689726a09c461efd75b6ea77b2403d5665b7"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:7a7a8f33a1f1fb762ede6cc9cbab8f2a9ba13b196bfaf7bc6f0b39d2ba315a43"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-win32.whl", hash = "sha256:c39778fd0548d78917b61f03c1fa8bfda6cfcf98c767decf360945fe6f97461e"}, + {file = "pycryptodomex-3.20.0-cp35-abi3-win_amd64.whl", hash = "sha256:2a47bcc478741b71273b917232f521fd5704ab4b25d301669879e7273d3586cc"}, + {file = "pycryptodomex-3.20.0-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:1be97461c439a6af4fe1cf8bf6ca5936d3db252737d2f379cc6b2e394e12a458"}, + {file = "pycryptodomex-3.20.0-pp27-pypy_73-win32.whl", hash = "sha256:19764605feea0df966445d46533729b645033f134baeb3ea26ad518c9fdf212c"}, + {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f2e497413560e03421484189a6b65e33fe800d3bd75590e6d78d4dfdb7accf3b"}, + {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e48217c7901edd95f9f097feaa0388da215ed14ce2ece803d3f300b4e694abea"}, + {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d00fe8596e1cc46b44bf3907354e9377aa030ec4cd04afbbf6e899fc1e2a7781"}, + {file = "pycryptodomex-3.20.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88afd7a3af7ddddd42c2deda43d53d3dfc016c11327d0915f90ca34ebda91499"}, + {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d3584623e68a5064a04748fb6d76117a21a7cb5eaba20608a41c7d0c61721794"}, + {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0daad007b685db36d977f9de73f61f8da2a7104e20aca3effd30752fd56f73e1"}, + {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dcac11031a71348faaed1f403a0debd56bf5404232284cf8c761ff918886ebc"}, + {file = "pycryptodomex-3.20.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:69138068268127cd605e03438312d8f271135a33140e2742b417d027a0539427"}, + {file = "pycryptodomex-3.20.0.tar.gz", hash = "sha256:7a710b79baddd65b806402e14766c721aee8fb83381769c27920f26476276c1e"}, +] + +[[package]] +name = "pydantic" +version = "2.6.3" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic-2.6.3-py3-none-any.whl", hash = "sha256:72c6034df47f46ccdf81869fddb81aade68056003900a8724a4f160700016a2a"}, + {file = "pydantic-2.6.3.tar.gz", hash = "sha256:e07805c4c7f5c6826e33a1d4c9d47950d7eaf34868e2690f8594d2e30241f11f"}, +] + +[package.dependencies] +annotated-types = ">=0.4.0" +pydantic-core = "2.16.3" +typing-extensions = ">=4.6.1" + +[package.extras] +email = ["email-validator (>=2.0.0)"] + +[[package]] +name = "pydantic-core" +version = "2.16.3" +description = "" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:75b81e678d1c1ede0785c7f46690621e4c6e63ccd9192af1f0bd9d504bbb6bf4"}, + {file = "pydantic_core-2.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9c865a7ee6f93783bd5d781af5a4c43dadc37053a5b42f7d18dc019f8c9d2bd1"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:162e498303d2b1c036b957a1278fa0899d02b2842f1ff901b6395104c5554a45"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f583bd01bbfbff4eaee0868e6fc607efdfcc2b03c1c766b06a707abbc856187"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b926dd38db1519ed3043a4de50214e0d600d404099c3392f098a7f9d75029ff8"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:716b542728d4c742353448765aa7cdaa519a7b82f9564130e2b3f6766018c9ec"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4ad7f7ee1a13d9cb49d8198cd7d7e3aa93e425f371a68235f784e99741561f"}, + {file = "pydantic_core-2.16.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bd87f48924f360e5d1c5f770d6155ce0e7d83f7b4e10c2f9ec001c73cf475c99"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0df446663464884297c793874573549229f9eca73b59360878f382a0fc085979"}, + {file = "pydantic_core-2.16.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4df8a199d9f6afc5ae9a65f8f95ee52cae389a8c6b20163762bde0426275b7db"}, + {file = "pydantic_core-2.16.3-cp310-none-win32.whl", hash = "sha256:456855f57b413f077dff513a5a28ed838dbbb15082ba00f80750377eed23d132"}, + {file = "pydantic_core-2.16.3-cp310-none-win_amd64.whl", hash = "sha256:732da3243e1b8d3eab8c6ae23ae6a58548849d2e4a4e03a1924c8ddf71a387cb"}, + {file = "pydantic_core-2.16.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:519ae0312616026bf4cedc0fe459e982734f3ca82ee8c7246c19b650b60a5ee4"}, + {file = "pydantic_core-2.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b3992a322a5617ded0a9f23fd06dbc1e4bd7cf39bc4ccf344b10f80af58beacd"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d62da299c6ecb04df729e4b5c52dc0d53f4f8430b4492b93aa8de1f541c4aac"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2acca2be4bb2f2147ada8cac612f8a98fc09f41c89f87add7256ad27332c2fda"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1b662180108c55dfbf1280d865b2d116633d436cfc0bba82323554873967b340"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e7c6ed0dc9d8e65f24f5824291550139fe6f37fac03788d4580da0d33bc00c97"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6b1bb0827f56654b4437955555dc3aeeebeddc47c2d7ed575477f082622c49e"}, + {file = "pydantic_core-2.16.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e56f8186d6210ac7ece503193ec84104da7ceb98f68ce18c07282fcc2452e76f"}, + {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:936e5db01dd49476fa8f4383c259b8b1303d5dd5fb34c97de194560698cc2c5e"}, + {file = "pydantic_core-2.16.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33809aebac276089b78db106ee692bdc9044710e26f24a9a2eaa35a0f9fa70ba"}, + {file = "pydantic_core-2.16.3-cp311-none-win32.whl", hash = "sha256:ded1c35f15c9dea16ead9bffcde9bb5c7c031bff076355dc58dcb1cb436c4721"}, + {file = "pydantic_core-2.16.3-cp311-none-win_amd64.whl", hash = "sha256:d89ca19cdd0dd5f31606a9329e309d4fcbb3df860960acec32630297d61820df"}, + {file = "pydantic_core-2.16.3-cp311-none-win_arm64.whl", hash = "sha256:6162f8d2dc27ba21027f261e4fa26f8bcb3cf9784b7f9499466a311ac284b5b9"}, + {file = "pydantic_core-2.16.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:0f56ae86b60ea987ae8bcd6654a887238fd53d1384f9b222ac457070b7ac4cff"}, + {file = "pydantic_core-2.16.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9bd22a2a639e26171068f8ebb5400ce2c1bc7d17959f60a3b753ae13c632975"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4204e773b4b408062960e65468d5346bdfe139247ee5f1ca2a378983e11388a2"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f651dd19363c632f4abe3480a7c87a9773be27cfe1341aef06e8759599454120"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aaf09e615a0bf98d406657e0008e4a8701b11481840be7d31755dc9f97c44053"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8e47755d8152c1ab5b55928ab422a76e2e7b22b5ed8e90a7d584268dd49e9c6b"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:500960cb3a0543a724a81ba859da816e8cf01b0e6aaeedf2c3775d12ee49cade"}, + {file = "pydantic_core-2.16.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf6204fe865da605285c34cf1172879d0314ff267b1c35ff59de7154f35fdc2e"}, + {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d33dd21f572545649f90c38c227cc8631268ba25c460b5569abebdd0ec5974ca"}, + {file = "pydantic_core-2.16.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49d5d58abd4b83fb8ce763be7794d09b2f50f10aa65c0f0c1696c677edeb7cbf"}, + {file = "pydantic_core-2.16.3-cp312-none-win32.whl", hash = "sha256:f53aace168a2a10582e570b7736cc5bef12cae9cf21775e3eafac597e8551fbe"}, + {file = "pydantic_core-2.16.3-cp312-none-win_amd64.whl", hash = "sha256:0d32576b1de5a30d9a97f300cc6a3f4694c428d956adbc7e6e2f9cad279e45ed"}, + {file = "pydantic_core-2.16.3-cp312-none-win_arm64.whl", hash = "sha256:ec08be75bb268473677edb83ba71e7e74b43c008e4a7b1907c6d57e940bf34b6"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:b1f6f5938d63c6139860f044e2538baeee6f0b251a1816e7adb6cbce106a1f01"}, + {file = "pydantic_core-2.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a1ef6a36fdbf71538142ed604ad19b82f67b05749512e47f247a6ddd06afdc7"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704d35ecc7e9c31d48926150afada60401c55efa3b46cd1ded5a01bdffaf1d48"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d937653a696465677ed583124b94a4b2d79f5e30b2c46115a68e482c6a591c8a"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9803edf8e29bd825f43481f19c37f50d2b01899448273b3a7758441b512acf8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72282ad4892a9fb2da25defeac8c2e84352c108705c972db82ab121d15f14e6d"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f752826b5b8361193df55afcdf8ca6a57d0232653494ba473630a83ba50d8c9"}, + {file = "pydantic_core-2.16.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4384a8f68ddb31a0b0c3deae88765f5868a1b9148939c3f4121233314ad5532c"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4b2bf78342c40b3dc830880106f54328928ff03e357935ad26c7128bbd66ce8"}, + {file = "pydantic_core-2.16.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:13dcc4802961b5f843a9385fc821a0b0135e8c07fc3d9949fd49627c1a5e6ae5"}, + {file = "pydantic_core-2.16.3-cp38-none-win32.whl", hash = "sha256:e3e70c94a0c3841e6aa831edab1619ad5c511199be94d0c11ba75fe06efe107a"}, + {file = "pydantic_core-2.16.3-cp38-none-win_amd64.whl", hash = "sha256:ecdf6bf5f578615f2e985a5e1f6572e23aa632c4bd1dc67f8f406d445ac115ed"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bda1ee3e08252b8d41fa5537413ffdddd58fa73107171a126d3b9ff001b9b820"}, + {file = "pydantic_core-2.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:21b888c973e4f26b7a96491c0965a8a312e13be108022ee510248fe379a5fa23"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be0ec334369316fa73448cc8c982c01e5d2a81c95969d58b8f6e272884df0074"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b5b6079cc452a7c53dd378c6f881ac528246b3ac9aae0f8eef98498a75657805"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ee8d5f878dccb6d499ba4d30d757111847b6849ae07acdd1205fffa1fc1253c"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7233d65d9d651242a68801159763d09e9ec96e8a158dbf118dc090cd77a104c9"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6119dc90483a5cb50a1306adb8d52c66e447da88ea44f323e0ae1a5fcb14256"}, + {file = "pydantic_core-2.16.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:578114bc803a4c1ff9946d977c221e4376620a46cf78da267d946397dc9514a8"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d8f99b147ff3fcf6b3cc60cb0c39ea443884d5559a30b1481e92495f2310ff2b"}, + {file = "pydantic_core-2.16.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ac6b4ce1e7283d715c4b729d8f9dab9627586dafce81d9eaa009dd7f25dd972"}, + {file = "pydantic_core-2.16.3-cp39-none-win32.whl", hash = "sha256:e7774b570e61cb998490c5235740d475413a1f6de823169b4cf94e2fe9e9f6b2"}, + {file = "pydantic_core-2.16.3-cp39-none-win_amd64.whl", hash = "sha256:9091632a25b8b87b9a605ec0e61f241c456e9248bfdcf7abdf344fdb169c81cf"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:36fa178aacbc277bc6b62a2c3da95226520da4f4e9e206fdf076484363895d2c"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:dcca5d2bf65c6fb591fff92da03f94cd4f315972f97c21975398bd4bd046854a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a72fb9963cba4cd5793854fd12f4cfee731e86df140f59ff52a49b3552db241"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b60cc1a081f80a2105a59385b92d82278b15d80ebb3adb200542ae165cd7d183"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cbcc558401de90a746d02ef330c528f2e668c83350f045833543cd57ecead1ad"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:fee427241c2d9fb7192b658190f9f5fd6dfe41e02f3c1489d2ec1e6a5ab1e04a"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f4cb85f693044e0f71f394ff76c98ddc1bc0953e48c061725e540396d5c8a2e1"}, + {file = "pydantic_core-2.16.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:b29eeb887aa931c2fcef5aa515d9d176d25006794610c264ddc114c053bf96fe"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a425479ee40ff021f8216c9d07a6a3b54b31c8267c6e17aa88b70d7ebd0e5e5b"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5c5cbc703168d1b7a838668998308018a2718c2130595e8e190220238addc96f"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99b6add4c0b39a513d323d3b93bc173dac663c27b99860dd5bf491b240d26137"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f76ee558751746d6a38f89d60b6228fa174e5172d143886af0f85aa306fd89"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:00ee1c97b5364b84cb0bd82e9bbf645d5e2871fb8c58059d158412fee2d33d8a"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:287073c66748f624be4cef893ef9174e3eb88fe0b8a78dc22e88eca4bc357ca6"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ed25e1835c00a332cb10c683cd39da96a719ab1dfc08427d476bce41b92531fc"}, + {file = "pydantic_core-2.16.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:86b3d0033580bd6bbe07590152007275bd7af95f98eaa5bd36f3da219dcd93da"}, + {file = "pydantic_core-2.16.3.tar.gz", hash = "sha256:1cac689f80a3abab2d3c0048b29eea5751114054f032a941a32de4c852c59cad"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pydantic-extra-types" +version = "2.5.0" +description = "Extra Pydantic types." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_extra_types-2.5.0-py3-none-any.whl", hash = "sha256:7346873019cac32061b471adf2cdac711664ddb7a6ede04219bed2da34888c4d"}, + {file = "pydantic_extra_types-2.5.0.tar.gz", hash = "sha256:46b85240093dc63ad4a8f3cab49e03d76ae0577e4f99e2bbff7d32f99d009bf9"}, +] + +[package.dependencies] +pydantic = ">=2.5.2" + +[package.extras] +all = ["pendulum (>=3.0.0,<4.0.0)", "phonenumbers (>=8,<9)", "pycountry (>=23,<24)", "python-ulid (>=1,<2)"] + +[[package]] +name = "pydantic-settings" +version = "2.2.1" +description = "Settings management using Pydantic" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pydantic_settings-2.2.1-py3-none-any.whl", hash = "sha256:0235391d26db4d2190cb9b31051c4b46882d28a51533f97440867f012d4da091"}, + {file = "pydantic_settings-2.2.1.tar.gz", hash = "sha256:00b9f6a5e95553590434c0fa01ead0b216c3e10bc54ae02e37f359948643c5ed"}, +] + +[package.dependencies] +pydantic = ">=2.3.0" +python-dotenv = ">=0.21.0" + +[package.extras] +toml = ["tomli (>=2.0.1)"] +yaml = ["pyyaml (>=6.0.1)"] + +[[package]] +name = "pydot" +version = "2.0.0" +description = "Python interface to Graphviz's Dot" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydot-2.0.0-py3-none-any.whl", hash = "sha256:408a47913ea7bd5d2d34b274144880c1310c4aee901f353cf21fe2e526a4ea28"}, + {file = "pydot-2.0.0.tar.gz", hash = "sha256:60246af215123fa062f21cd791be67dda23a6f280df09f68919e637a1e4f3235"}, +] + +[package.dependencies] +pyparsing = ">=3" + +[package.extras] +dev = ["black", "chardet"] +release = ["zest.releaser[recommended]"] +tests = ["black", "chardet", "tox"] + +[[package]] +name = "pyflakes" +version = "3.2.0" +description = "passive checker of Python programs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"}, + {file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"}, +] + +[[package]] +name = "pygments" +version = "2.17.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, + {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, +] + +[package.extras] +plugins = ["importlib-metadata"] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pymysql" +version = "1.1.0" +description = "Pure Python MySQL Driver" +optional = false +python-versions = ">=3.7" +files = [ + {file = "PyMySQL-1.1.0-py3-none-any.whl", hash = "sha256:8969ec6d763c856f7073c4c64662882675702efcb114b4bcbb955aea3a069fa7"}, + {file = "PyMySQL-1.1.0.tar.gz", hash = "sha256:4f13a7df8bf36a51e81dd9f3605fede45a4878fe02f9236349fd82a3f0612f96"}, +] + +[package.extras] +ed25519 = ["PyNaCl (>=1.4.0)"] +rsa = ["cryptography"] + +[[package]] +name = "pyparsing" +version = "3.1.2" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.2-py3-none-any.whl", hash = "sha256:f9db75911801ed778fe61bb643079ff86601aca99fcae6345aa67292038fb742"}, + {file = "pyparsing-3.1.2.tar.gz", hash = "sha256:a1bac0ce561155ecc3ed78ca94d3c9378656ad4c94c1270de543f621420f94ad"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pyreadline3" +version = "3.4.1" +description = "A python implementation of GNU readline." +optional = false +python-versions = "*" +files = [ + {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, + {file = "pyreadline3-3.4.1.tar.gz", hash = "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae"}, +] + +[[package]] +name = "pytest" +version = "8.0.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-8.0.2-py3-none-any.whl", hash = "sha256:edfaaef32ce5172d5466b5127b42e0d6d35ebbe4453f0e3505d96afd93f6b096"}, + {file = "pytest-8.0.2.tar.gz", hash = "sha256:d4051d623a2e0b7e51960ba963193b09ce6daeb9759a451844a21e4ddedfc1bd"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.3.0,<2.0" + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "python-dateutil" +version = "2.9.0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.9.0.tar.gz", hash = "sha256:78e73e19c63f5b20ffa567001531680d939dc042bf7850431877645523c66709"}, + {file = "python_dateutil-2.9.0-py2.py3-none-any.whl", hash = "sha256:cbf2f1da5e6083ac2fbfd4da39a25f34312230110440f424a14c7558bb85d82e"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-dotenv" +version = "1.0.1" +description = "Read key-value pairs from a .env file and set them as environment variables" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, + {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, +] + +[package.extras] +cli = ["click (>=5.0)"] + +[[package]] +name = "python-jose" +version = "3.3.0" +description = "JOSE implementation in Python" +optional = false +python-versions = "*" +files = [ + {file = "python-jose-3.3.0.tar.gz", hash = "sha256:55779b5e6ad599c6336191246e95eb2293a9ddebd555f796a65f838f07e5d78a"}, + {file = "python_jose-3.3.0-py2.py3-none-any.whl", hash = "sha256:9b1376b023f8b298536eedd47ae1089bcdb848f1535ab30555cd92002d78923a"}, +] + +[package.dependencies] +ecdsa = "!=0.15" +pyasn1 = "*" +rsa = "*" + +[package.extras] +cryptography = ["cryptography (>=3.4.0)"] +pycrypto = ["pyasn1", "pycrypto (>=2.6.0,<2.7.0)"] +pycryptodome = ["pyasn1", "pycryptodome (>=3.3.1,<4.0.0)"] + +[[package]] +name = "python-multipart" +version = "0.0.9" +description = "A streaming multipart parser for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"}, + {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, +] + +[package.extras] +dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"] + +[[package]] +name = "pytz" +version = "2024.1" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, +] + +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] + +[[package]] +name = "pyyaml" +version = "6.0.1" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, + {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, + {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, + {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, + {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, + {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, + {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, + {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, + {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, + {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, + {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, + {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, + {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, + {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, + {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, + {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, + {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, + {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, + {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, + {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, + {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, + {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, + {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, + {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, + {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, + {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, + {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, + {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, + {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, + {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, + {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, + {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, + {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, +] + +[[package]] +name = "pyzmq" +version = "25.1.2" +description = "Python bindings for 0MQ" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyzmq-25.1.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:e624c789359f1a16f83f35e2c705d07663ff2b4d4479bad35621178d8f0f6ea4"}, + {file = "pyzmq-25.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49151b0efece79f6a79d41a461d78535356136ee70084a1c22532fc6383f4ad0"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9a5f194cf730f2b24d6af1f833c14c10f41023da46a7f736f48b6d35061e76e"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:faf79a302f834d9e8304fafdc11d0d042266667ac45209afa57e5efc998e3872"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f51a7b4ead28d3fca8dda53216314a553b0f7a91ee8fc46a72b402a78c3e43d"}, + {file = "pyzmq-25.1.2-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0ddd6d71d4ef17ba5a87becf7ddf01b371eaba553c603477679ae817a8d84d75"}, + {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:246747b88917e4867e2367b005fc8eefbb4a54b7db363d6c92f89d69abfff4b6"}, + {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:00c48ae2fd81e2a50c3485de1b9d5c7c57cd85dc8ec55683eac16846e57ac979"}, + {file = "pyzmq-25.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5a68d491fc20762b630e5db2191dd07ff89834086740f70e978bb2ef2668be08"}, + {file = "pyzmq-25.1.2-cp310-cp310-win32.whl", hash = "sha256:09dfe949e83087da88c4a76767df04b22304a682d6154de2c572625c62ad6886"}, + {file = "pyzmq-25.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:fa99973d2ed20417744fca0073390ad65ce225b546febb0580358e36aa90dba6"}, + {file = "pyzmq-25.1.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:82544e0e2d0c1811482d37eef297020a040c32e0687c1f6fc23a75b75db8062c"}, + {file = "pyzmq-25.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:01171fc48542348cd1a360a4b6c3e7d8f46cdcf53a8d40f84db6707a6768acc1"}, + {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc69c96735ab501419c432110016329bf0dea8898ce16fab97c6d9106dc0b348"}, + {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3e124e6b1dd3dfbeb695435dff0e383256655bb18082e094a8dd1f6293114642"}, + {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7598d2ba821caa37a0f9d54c25164a4fa351ce019d64d0b44b45540950458840"}, + {file = "pyzmq-25.1.2-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d1299d7e964c13607efd148ca1f07dcbf27c3ab9e125d1d0ae1d580a1682399d"}, + {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4e6f689880d5ad87918430957297c975203a082d9a036cc426648fcbedae769b"}, + {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cc69949484171cc961e6ecd4a8911b9ce7a0d1f738fcae717177c231bf77437b"}, + {file = "pyzmq-25.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9880078f683466b7f567b8624bfc16cad65077be046b6e8abb53bed4eeb82dd3"}, + {file = "pyzmq-25.1.2-cp311-cp311-win32.whl", hash = "sha256:4e5837af3e5aaa99a091302df5ee001149baff06ad22b722d34e30df5f0d9097"}, + {file = "pyzmq-25.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:25c2dbb97d38b5ac9fd15586e048ec5eb1e38f3d47fe7d92167b0c77bb3584e9"}, + {file = "pyzmq-25.1.2-cp312-cp312-macosx_10_15_universal2.whl", hash = "sha256:11e70516688190e9c2db14fcf93c04192b02d457b582a1f6190b154691b4c93a"}, + {file = "pyzmq-25.1.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:313c3794d650d1fccaaab2df942af9f2c01d6217c846177cfcbc693c7410839e"}, + {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b3cbba2f47062b85fe0ef9de5b987612140a9ba3a9c6d2543c6dec9f7c2ab27"}, + {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc31baa0c32a2ca660784d5af3b9487e13b61b3032cb01a115fce6588e1bed30"}, + {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02c9087b109070c5ab0b383079fa1b5f797f8d43e9a66c07a4b8b8bdecfd88ee"}, + {file = "pyzmq-25.1.2-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f8429b17cbb746c3e043cb986328da023657e79d5ed258b711c06a70c2ea7537"}, + {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5074adeacede5f810b7ef39607ee59d94e948b4fd954495bdb072f8c54558181"}, + {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7ae8f354b895cbd85212da245f1a5ad8159e7840e37d78b476bb4f4c3f32a9fe"}, + {file = "pyzmq-25.1.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b264bf2cc96b5bc43ce0e852be995e400376bd87ceb363822e2cb1964fcdc737"}, + {file = "pyzmq-25.1.2-cp312-cp312-win32.whl", hash = "sha256:02bbc1a87b76e04fd780b45e7f695471ae6de747769e540da909173d50ff8e2d"}, + {file = "pyzmq-25.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:ced111c2e81506abd1dc142e6cd7b68dd53747b3b7ae5edbea4578c5eeff96b7"}, + {file = "pyzmq-25.1.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:7b6d09a8962a91151f0976008eb7b29b433a560fde056ec7a3db9ec8f1075438"}, + {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:967668420f36878a3c9ecb5ab33c9d0ff8d054f9c0233d995a6d25b0e95e1b6b"}, + {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5edac3f57c7ddaacdb4d40f6ef2f9e299471fc38d112f4bc6d60ab9365445fb0"}, + {file = "pyzmq-25.1.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0dabfb10ef897f3b7e101cacba1437bd3a5032ee667b7ead32bbcdd1a8422fe7"}, + {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:2c6441e0398c2baacfe5ba30c937d274cfc2dc5b55e82e3749e333aabffde561"}, + {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:16b726c1f6c2e7625706549f9dbe9b06004dfbec30dbed4bf50cbdfc73e5b32a"}, + {file = "pyzmq-25.1.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:a86c2dd76ef71a773e70551a07318b8e52379f58dafa7ae1e0a4be78efd1ff16"}, + {file = "pyzmq-25.1.2-cp36-cp36m-win32.whl", hash = "sha256:359f7f74b5d3c65dae137f33eb2bcfa7ad9ebefd1cab85c935f063f1dbb245cc"}, + {file = "pyzmq-25.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:55875492f820d0eb3417b51d96fea549cde77893ae3790fd25491c5754ea2f68"}, + {file = "pyzmq-25.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8c8a419dfb02e91b453615c69568442e897aaf77561ee0064d789705ff37a92"}, + {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8807c87fa893527ae8a524c15fc505d9950d5e856f03dae5921b5e9aa3b8783b"}, + {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5e319ed7d6b8f5fad9b76daa0a68497bc6f129858ad956331a5835785761e003"}, + {file = "pyzmq-25.1.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3c53687dde4d9d473c587ae80cc328e5b102b517447456184b485587ebd18b62"}, + {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:9add2e5b33d2cd765ad96d5eb734a5e795a0755f7fc49aa04f76d7ddda73fd70"}, + {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e690145a8c0c273c28d3b89d6fb32c45e0d9605b2293c10e650265bf5c11cfec"}, + {file = "pyzmq-25.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:00a06faa7165634f0cac1abb27e54d7a0b3b44eb9994530b8ec73cf52e15353b"}, + {file = "pyzmq-25.1.2-cp37-cp37m-win32.whl", hash = "sha256:0f97bc2f1f13cb16905a5f3e1fbdf100e712d841482b2237484360f8bc4cb3d7"}, + {file = "pyzmq-25.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6cc0020b74b2e410287e5942e1e10886ff81ac77789eb20bec13f7ae681f0fdd"}, + {file = "pyzmq-25.1.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:bef02cfcbded83473bdd86dd8d3729cd82b2e569b75844fb4ea08fee3c26ae41"}, + {file = "pyzmq-25.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e10a4b5a4b1192d74853cc71a5e9fd022594573926c2a3a4802020360aa719d8"}, + {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8c5f80e578427d4695adac6fdf4370c14a2feafdc8cb35549c219b90652536ae"}, + {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5dde6751e857910c1339890f3524de74007958557593b9e7e8c5f01cd919f8a7"}, + {file = "pyzmq-25.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea1608dd169da230a0ad602d5b1ebd39807ac96cae1845c3ceed39af08a5c6df"}, + {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0f513130c4c361201da9bc69df25a086487250e16b5571ead521b31ff6b02220"}, + {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:019744b99da30330798bb37df33549d59d380c78e516e3bab9c9b84f87a9592f"}, + {file = "pyzmq-25.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2e2713ef44be5d52dd8b8e2023d706bf66cb22072e97fc71b168e01d25192755"}, + {file = "pyzmq-25.1.2-cp38-cp38-win32.whl", hash = "sha256:07cd61a20a535524906595e09344505a9bd46f1da7a07e504b315d41cd42eb07"}, + {file = "pyzmq-25.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb7e49a17fb8c77d3119d41a4523e432eb0c6932187c37deb6fbb00cc3028088"}, + {file = "pyzmq-25.1.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:94504ff66f278ab4b7e03e4cba7e7e400cb73bfa9d3d71f58d8972a8dc67e7a6"}, + {file = "pyzmq-25.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6dd0d50bbf9dca1d0bdea219ae6b40f713a3fb477c06ca3714f208fd69e16fd8"}, + {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:004ff469d21e86f0ef0369717351073e0e577428e514c47c8480770d5e24a565"}, + {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c0b5ca88a8928147b7b1e2dfa09f3b6c256bc1135a1338536cbc9ea13d3b7add"}, + {file = "pyzmq-25.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c9a79f1d2495b167119d02be7448bfba57fad2a4207c4f68abc0bab4b92925b"}, + {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:518efd91c3d8ac9f9b4f7dd0e2b7b8bf1a4fe82a308009016b07eaa48681af82"}, + {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1ec23bd7b3a893ae676d0e54ad47d18064e6c5ae1fadc2f195143fb27373f7f6"}, + {file = "pyzmq-25.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db36c27baed588a5a8346b971477b718fdc66cf5b80cbfbd914b4d6d355e44e2"}, + {file = "pyzmq-25.1.2-cp39-cp39-win32.whl", hash = "sha256:39b1067f13aba39d794a24761e385e2eddc26295826530a8c7b6c6c341584289"}, + {file = "pyzmq-25.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:8e9f3fabc445d0ce320ea2c59a75fe3ea591fdbdeebec5db6de530dd4b09412e"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a8c1d566344aee826b74e472e16edae0a02e2a044f14f7c24e123002dcff1c05"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:759cfd391a0996345ba94b6a5110fca9c557ad4166d86a6e81ea526c376a01e8"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c61e346ac34b74028ede1c6b4bcecf649d69b707b3ff9dc0fab453821b04d1e"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb8fc1f8d69b411b8ec0b5f1ffbcaf14c1db95b6bccea21d83610987435f1a4"}, + {file = "pyzmq-25.1.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:3c00c9b7d1ca8165c610437ca0c92e7b5607b2f9076f4eb4b095c85d6e680a1d"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:df0c7a16ebb94452d2909b9a7b3337940e9a87a824c4fc1c7c36bb4404cb0cde"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:45999e7f7ed5c390f2e87ece7f6c56bf979fb213550229e711e45ecc7d42ccb8"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ac170e9e048b40c605358667aca3d94e98f604a18c44bdb4c102e67070f3ac9b"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1b604734bec94f05f81b360a272fc824334267426ae9905ff32dc2be433ab96"}, + {file = "pyzmq-25.1.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a793ac733e3d895d96f865f1806f160696422554e46d30105807fdc9841b9f7d"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0806175f2ae5ad4b835ecd87f5f85583316b69f17e97786f7443baaf54b9bb98"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ef12e259e7bc317c7597d4f6ef59b97b913e162d83b421dd0db3d6410f17a244"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ea253b368eb41116011add00f8d5726762320b1bda892f744c91997b65754d73"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b9b1f2ad6498445a941d9a4fee096d387fee436e45cc660e72e768d3d8ee611"}, + {file = "pyzmq-25.1.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:8b14c75979ce932c53b79976a395cb2a8cd3aaf14aef75e8c2cb55a330b9b49d"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:889370d5174a741a62566c003ee8ddba4b04c3f09a97b8000092b7ca83ec9c49"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a18fff090441a40ffda8a7f4f18f03dc56ae73f148f1832e109f9bffa85df15"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99a6b36f95c98839ad98f8c553d8507644c880cf1e0a57fe5e3a3f3969040882"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4345c9a27f4310afbb9c01750e9461ff33d6fb74cd2456b107525bbeebcb5be3"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3516e0b6224cf6e43e341d56da15fd33bdc37fa0c06af4f029f7d7dfceceabbc"}, + {file = "pyzmq-25.1.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:146b9b1f29ead41255387fb07be56dc29639262c0f7344f570eecdcd8d683314"}, + {file = "pyzmq-25.1.2.tar.gz", hash = "sha256:93f1aa311e8bb912e34f004cf186407a4e90eec4f0ecc0efd26056bf7eda0226"}, +] + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rich" +version = "13.7.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "rich-13.7.1-py3-none-any.whl", hash = "sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222"}, + {file = "rich-13.7.1.tar.gz", hash = "sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "rsa" +version = "4.9" +description = "Pure-Python RSA implementation" +optional = false +python-versions = ">=3.6,<4" +files = [ + {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, + {file = "rsa-4.9.tar.gz", hash = "sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21"}, +] + +[package.dependencies] +pyasn1 = ">=0.1.3" + +[[package]] +name = "s3transfer" +version = "0.10.0" +description = "An Amazon S3 Transfer Manager" +optional = false +python-versions = ">= 3.8" +files = [ + {file = "s3transfer-0.10.0-py3-none-any.whl", hash = "sha256:3cdb40f5cfa6966e812209d0994f2a4709b561c88e90cf00c2696d2df4e56b2e"}, + {file = "s3transfer-0.10.0.tar.gz", hash = "sha256:d0c8bbf672d5eebbe4e57945e23b972d963f07d82f661cabf678a5c88831595b"}, +] + +[package.dependencies] +botocore = ">=1.33.2,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"] + +[[package]] +name = "scikit-image" +version = "0.22.0" +description = "Image processing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scikit_image-0.22.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:74ec5c1d4693506842cc7c9487c89d8fc32aed064e9363def7af08b8f8cbb31d"}, + {file = "scikit_image-0.22.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:a05ae4fe03d802587ed8974e900b943275548cde6a6807b785039d63e9a7a5ff"}, + {file = "scikit_image-0.22.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a92dca3d95b1301442af055e196a54b5a5128c6768b79fc0a4098f1d662dee6"}, + {file = "scikit_image-0.22.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3663d063d8bf2fb9bdfb0ca967b9ee3b6593139c860c7abc2d2351a8a8863938"}, + {file = "scikit_image-0.22.0-cp310-cp310-win_amd64.whl", hash = "sha256:ebdbdc901bae14dab637f8d5c99f6d5cc7aaf4a3b6f4003194e003e9f688a6fc"}, + {file = "scikit_image-0.22.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:95d6da2d8a44a36ae04437c76d32deb4e3c993ffc846b394b9949fd8ded73cb2"}, + {file = "scikit_image-0.22.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:2c6ef454a85f569659b813ac2a93948022b0298516b757c9c6c904132be327e2"}, + {file = "scikit_image-0.22.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e87872f067444ee90a00dd49ca897208308645382e8a24bd3e76f301af2352cd"}, + {file = "scikit_image-0.22.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5c378db54e61b491b9edeefff87e49fcf7fdf729bb93c777d7a5f15d36f743e"}, + {file = "scikit_image-0.22.0-cp311-cp311-win_amd64.whl", hash = "sha256:2bcb74adb0634258a67f66c2bb29978c9a3e222463e003b67ba12056c003971b"}, + {file = "scikit_image-0.22.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:003ca2274ac0fac252280e7179ff986ff783407001459ddea443fe7916e38cff"}, + {file = "scikit_image-0.22.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:cf3c0c15b60ae3e557a0c7575fbd352f0c3ce0afca562febfe3ab80efbeec0e9"}, + {file = "scikit_image-0.22.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5b23908dd4d120e6aecb1ed0277563e8cbc8d6c0565bdc4c4c6475d53608452"}, + {file = "scikit_image-0.22.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be79d7493f320a964f8fcf603121595ba82f84720de999db0fcca002266a549a"}, + {file = "scikit_image-0.22.0-cp312-cp312-win_amd64.whl", hash = "sha256:722b970aa5da725dca55252c373b18bbea7858c1cdb406e19f9b01a4a73b30b2"}, + {file = "scikit_image-0.22.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:22318b35044cfeeb63ee60c56fc62450e5fe516228138f1d06c7a26378248a86"}, + {file = "scikit_image-0.22.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:9e801c44a814afdadeabf4dffdffc23733e393767958b82319706f5fa3e1eaa9"}, + {file = "scikit_image-0.22.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c472a1fb3665ec5c00423684590631d95f9afcbc97f01407d348b821880b2cb3"}, + {file = "scikit_image-0.22.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b7a6c89e8d6252332121b58f50e1625c35f7d6a85489c0b6b7ee4f5155d547a"}, + {file = "scikit_image-0.22.0-cp39-cp39-win_amd64.whl", hash = "sha256:5071b8f6341bfb0737ab05c8ab4ac0261f9e25dbcc7b5d31e5ed230fd24a7929"}, + {file = "scikit_image-0.22.0.tar.gz", hash = "sha256:018d734df1d2da2719087d15f679d19285fce97cd37695103deadfaef2873236"}, +] + +[package.dependencies] +imageio = ">=2.27" +lazy_loader = ">=0.3" +networkx = ">=2.8" +numpy = ">=1.22" +packaging = ">=21" +pillow = ">=9.0.1" +scipy = ">=1.8" +tifffile = ">=2022.8.12" + +[package.extras] +build = ["Cython (>=0.29.32)", "build", "meson-python (>=0.14)", "ninja", "numpy (>=1.22)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.6)", "wheel"] +data = ["pooch (>=1.6.0)"] +developer = ["pre-commit", "tomli"] +docs = ["PyWavelets (>=1.1.1)", "dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.5)", "myst-parser", "numpydoc (>=1.6)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.14.1)", "pytest-runner", "scikit-learn (>=1.1)", "seaborn (>=0.11)", "sphinx (>=7.2)", "sphinx-copybutton", "sphinx-gallery (>=0.14)", "sphinx_design (>=0.5)", "tifffile (>=2022.8.12)"] +optional = ["PyWavelets (>=1.1.1)", "SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.5)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=1.1)"] +test = ["asv", "matplotlib (>=3.5)", "numpydoc (>=1.5)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-faulthandler", "pytest-localserver"] + +[[package]] +name = "scikit-learn" +version = "1.4.1.post1" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scikit-learn-1.4.1.post1.tar.gz", hash = "sha256:93d3d496ff1965470f9977d05e5ec3376fb1e63b10e4fda5e39d23c2d8969a30"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c540aaf44729ab5cd4bd5e394f2b375e65ceaea9cdd8c195788e70433d91bbc5"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4310bff71aa98b45b46cd26fa641309deb73a5d1c0461d181587ad4f30ea3c36"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f43dd527dabff5521af2786a2f8de5ba381e182ec7292663508901cf6ceaf6e"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c02e27d65b0c7dc32f2c5eb601aaf5530b7a02bfbe92438188624524878336f2"}, + {file = "scikit_learn-1.4.1.post1-cp310-cp310-win_amd64.whl", hash = "sha256:629e09f772ad42f657ca60a1a52342eef786218dd20cf1369a3b8d085e55ef8f"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6145dfd9605b0b50ae72cdf72b61a2acd87501369a763b0d73d004710ebb76b5"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1afed6951bc9d2053c6ee9a518a466cbc9b07c6a3f9d43bfe734192b6125d508"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce03506ccf5f96b7e9030fea7eb148999b254c44c10182ac55857bc9b5d4815f"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ba516fcdc73d60e7f48cbb0bccb9acbdb21807de3651531208aac73c758e3ab"}, + {file = "scikit_learn-1.4.1.post1-cp311-cp311-win_amd64.whl", hash = "sha256:78cd27b4669513b50db4f683ef41ea35b5dddc797bd2bbd990d49897fd1c8a46"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a1e289f33f613cefe6707dead50db31930530dc386b6ccff176c786335a7b01c"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0df87de9ce1c0140f2818beef310fb2e2afdc1e66fc9ad587965577f17733649"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:712c1c69c45b58ef21635360b3d0a680ff7d83ac95b6f9b82cf9294070cda710"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1754b0c2409d6ed5a3380512d0adcf182a01363c669033a2b55cca429ed86a81"}, + {file = "scikit_learn-1.4.1.post1-cp312-cp312-win_amd64.whl", hash = "sha256:1d491ef66e37f4e812db7e6c8286520c2c3fc61b34bf5e59b67b4ce528de93af"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:aa0029b78ef59af22cfbd833e8ace8526e4df90212db7ceccbea582ebb5d6794"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:14e4c88436ac96bf69eb6d746ac76a574c314a23c6961b7d344b38877f20fee1"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7cd3a77c32879311f2aa93466d3c288c955ef71d191503cf0677c3340ae8ae0"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a3ee19211ded1a52ee37b0a7b373a8bfc66f95353af058a210b692bd4cda0dd"}, + {file = "scikit_learn-1.4.1.post1-cp39-cp39-win_amd64.whl", hash = "sha256:234b6bda70fdcae9e4abbbe028582ce99c280458665a155eed0b820599377d25"}, +] + +[package.dependencies] +joblib = ">=1.2.0" +numpy = ">=1.19.5,<2.0" +scipy = ">=1.6.0" +threadpoolctl = ">=2.0.0" + +[package.extras] +benchmark = ["matplotlib (>=3.3.4)", "memory-profiler (>=0.57.0)", "pandas (>=1.1.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.3.4)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.15.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.3.4)", "pandas (>=1.1.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.17.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=23.3.0)", "matplotlib (>=3.3.4)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.1.5)", "polars (>=0.19.12)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pyarrow (>=12.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.17.2)"] + +[[package]] +name = "scipy" +version = "1.12.0" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "scipy-1.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:78e4402e140879387187f7f25d91cc592b3501a2e51dfb320f48dfb73565f10b"}, + {file = "scipy-1.12.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:f5f00ebaf8de24d14b8449981a2842d404152774c1a1d880c901bf454cb8e2a1"}, + {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e53958531a7c695ff66c2e7bb7b79560ffdc562e2051644c5576c39ff8efb563"}, + {file = "scipy-1.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e32847e08da8d895ce09d108a494d9eb78974cf6de23063f93306a3e419960c"}, + {file = "scipy-1.12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c1020cad92772bf44b8e4cdabc1df5d87376cb219742549ef69fc9fd86282dd"}, + {file = "scipy-1.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:75ea2a144096b5e39402e2ff53a36fecfd3b960d786b7efd3c180e29c39e53f2"}, + {file = "scipy-1.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:408c68423f9de16cb9e602528be4ce0d6312b05001f3de61fe9ec8b1263cad08"}, + {file = "scipy-1.12.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5adfad5dbf0163397beb4aca679187d24aec085343755fcdbdeb32b3679f254c"}, + {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3003652496f6e7c387b1cf63f4bb720951cfa18907e998ea551e6de51a04467"}, + {file = "scipy-1.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b8066bce124ee5531d12a74b617d9ac0ea59245246410e19bca549656d9a40a"}, + {file = "scipy-1.12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8bee4993817e204d761dba10dbab0774ba5a8612e57e81319ea04d84945375ba"}, + {file = "scipy-1.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a24024d45ce9a675c1fb8494e8e5244efea1c7a09c60beb1eeb80373d0fecc70"}, + {file = "scipy-1.12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e7e76cc48638228212c747ada851ef355c2bb5e7f939e10952bc504c11f4e372"}, + {file = "scipy-1.12.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:f7ce148dffcd64ade37b2df9315541f9adad6efcaa86866ee7dd5db0c8f041c3"}, + {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c39f92041f490422924dfdb782527a4abddf4707616e07b021de33467f917bc"}, + {file = "scipy-1.12.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7ebda398f86e56178c2fa94cad15bf457a218a54a35c2a7b4490b9f9cb2676c"}, + {file = "scipy-1.12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:95e5c750d55cf518c398a8240571b0e0782c2d5a703250872f36eaf737751338"}, + {file = "scipy-1.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:e646d8571804a304e1da01040d21577685ce8e2db08ac58e543eaca063453e1c"}, + {file = "scipy-1.12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:913d6e7956c3a671de3b05ccb66b11bc293f56bfdef040583a7221d9e22a2e35"}, + {file = "scipy-1.12.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:bba1b0c7256ad75401c73e4b3cf09d1f176e9bd4248f0d3112170fb2ec4db067"}, + {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:730badef9b827b368f351eacae2e82da414e13cf8bd5051b4bdfd720271a5371"}, + {file = "scipy-1.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6546dc2c11a9df6926afcbdd8a3edec28566e4e785b915e849348c6dd9f3f490"}, + {file = "scipy-1.12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:196ebad3a4882081f62a5bf4aeb7326aa34b110e533aab23e4374fcccb0890dc"}, + {file = "scipy-1.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:b360f1b6b2f742781299514e99ff560d1fe9bd1bff2712894b52abe528d1fd1e"}, + {file = "scipy-1.12.0.tar.gz", hash = "sha256:4bf5abab8a36d20193c698b0f1fc282c1d083c94723902c447e5d2f1780936a3"}, +] + +[package.dependencies] +numpy = ">=1.22.4,<1.29.0" + +[package.extras] +dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] +doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] +test = ["asv", "gmpy2", "hypothesis", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "seaborn" +version = "0.13.2" +description = "Statistical data visualization" +optional = false +python-versions = ">=3.8" +files = [ + {file = "seaborn-0.13.2-py3-none-any.whl", hash = "sha256:636f8336facf092165e27924f223d3c62ca560b1f2bb5dff7ab7fad265361987"}, + {file = "seaborn-0.13.2.tar.gz", hash = "sha256:93e60a40988f4d65e9f4885df477e2fdaff6b73a9ded434c1ab356dd57eefff7"}, +] + +[package.dependencies] +matplotlib = ">=3.4,<3.6.1 || >3.6.1" +numpy = ">=1.20,<1.24.0 || >1.24.0" +pandas = ">=1.2" + +[package.extras] +dev = ["flake8", "flit", "mypy", "pandas-stubs", "pre-commit", "pytest", "pytest-cov", "pytest-xdist"] +docs = ["ipykernel", "nbconvert", "numpydoc", "pydata_sphinx_theme (==0.10.0rc2)", "pyyaml", "sphinx (<6.0.0)", "sphinx-copybutton", "sphinx-design", "sphinx-issues"] +stats = ["scipy (>=1.7)", "statsmodels (>=0.12)"] + +[[package]] +name = "sentry-sdk" +version = "1.43.0" +description = "Python client for Sentry (https://sentry.io)" +optional = false +python-versions = "*" +files = [ + {file = "sentry-sdk-1.43.0.tar.gz", hash = "sha256:41df73af89d22921d8733714fb0fc5586c3461907e06688e6537d01a27e0e0f6"}, + {file = "sentry_sdk-1.43.0-py2.py3-none-any.whl", hash = "sha256:8d768724839ca18d7b4c7463ef7528c40b7aa2bfbf7fe554d5f9a7c044acfd36"}, +] + +[package.dependencies] +certifi = "*" +urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""} + +[package.extras] +aiohttp = ["aiohttp (>=3.5)"] +arq = ["arq (>=0.23)"] +asyncpg = ["asyncpg (>=0.23)"] +beam = ["apache-beam (>=2.12)"] +bottle = ["bottle (>=0.12.13)"] +celery = ["celery (>=3)"] +celery-redbeat = ["celery-redbeat (>=2)"] +chalice = ["chalice (>=1.16.0)"] +clickhouse-driver = ["clickhouse-driver (>=0.2.0)"] +django = ["django (>=1.8)"] +falcon = ["falcon (>=1.4)"] +fastapi = ["fastapi (>=0.79.0)"] +flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] +grpcio = ["grpcio (>=1.21.1)"] +httpx = ["httpx (>=0.16.0)"] +huey = ["huey (>=2)"] +loguru = ["loguru (>=0.5)"] +openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"] +opentelemetry = ["opentelemetry-distro (>=0.35b0)"] +opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"] +pure-eval = ["asttokens", "executing", "pure-eval"] +pymongo = ["pymongo (>=3.1)"] +pyspark = ["pyspark (>=2.4.4)"] +quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] +rq = ["rq (>=0.6)"] +sanic = ["sanic (>=0.8)"] +sqlalchemy = ["sqlalchemy (>=1.2)"] +starlette = ["starlette (>=0.19.1)"] +starlite = ["starlite (>=1.48)"] +tornado = ["tornado (>=5)"] + +[[package]] +name = "setproctitle" +version = "1.3.3" +description = "A Python module to customize the process title" +optional = false +python-versions = ">=3.7" +files = [ + {file = "setproctitle-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:897a73208da48db41e687225f355ce993167079eda1260ba5e13c4e53be7f754"}, + {file = "setproctitle-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c331e91a14ba4076f88c29c777ad6b58639530ed5b24b5564b5ed2fd7a95452"}, + {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbbd6c7de0771c84b4aa30e70b409565eb1fc13627a723ca6be774ed6b9d9fa3"}, + {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c05ac48ef16ee013b8a326c63e4610e2430dbec037ec5c5b58fcced550382b74"}, + {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1342f4fdb37f89d3e3c1c0a59d6ddbedbde838fff5c51178a7982993d238fe4f"}, + {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc74e84fdfa96821580fb5e9c0b0777c1c4779434ce16d3d62a9c4d8c710df39"}, + {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9617b676b95adb412bb69645d5b077d664b6882bb0d37bfdafbbb1b999568d85"}, + {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6a249415f5bb88b5e9e8c4db47f609e0bf0e20a75e8d744ea787f3092ba1f2d0"}, + {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:38da436a0aaace9add67b999eb6abe4b84397edf4a78ec28f264e5b4c9d53cd5"}, + {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:da0d57edd4c95bf221b2ebbaa061e65b1788f1544977288bdf95831b6e44e44d"}, + {file = "setproctitle-1.3.3-cp310-cp310-win32.whl", hash = "sha256:a1fcac43918b836ace25f69b1dca8c9395253ad8152b625064415b1d2f9be4fb"}, + {file = "setproctitle-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:200620c3b15388d7f3f97e0ae26599c0c378fdf07ae9ac5a13616e933cbd2086"}, + {file = "setproctitle-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:334f7ed39895d692f753a443102dd5fed180c571eb6a48b2a5b7f5b3564908c8"}, + {file = "setproctitle-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:950f6476d56ff7817a8fed4ab207727fc5260af83481b2a4b125f32844df513a"}, + {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:195c961f54a09eb2acabbfc90c413955cf16c6e2f8caa2adbf2237d1019c7dd8"}, + {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f05e66746bf9fe6a3397ec246fe481096664a9c97eb3fea6004735a4daf867fd"}, + {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b5901a31012a40ec913265b64e48c2a4059278d9f4e6be628441482dd13fb8b5"}, + {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64286f8a995f2cd934082b398fc63fca7d5ffe31f0e27e75b3ca6b4efda4e353"}, + {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:184239903bbc6b813b1a8fc86394dc6ca7d20e2ebe6f69f716bec301e4b0199d"}, + {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:664698ae0013f986118064b6676d7dcd28fefd0d7d5a5ae9497cbc10cba48fa5"}, + {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e5119a211c2e98ff18b9908ba62a3bd0e3fabb02a29277a7232a6fb4b2560aa0"}, + {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:417de6b2e214e837827067048f61841f5d7fc27926f2e43954567094051aff18"}, + {file = "setproctitle-1.3.3-cp311-cp311-win32.whl", hash = "sha256:6a143b31d758296dc2f440175f6c8e0b5301ced3b0f477b84ca43cdcf7f2f476"}, + {file = "setproctitle-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a680d62c399fa4b44899094027ec9a1bdaf6f31c650e44183b50d4c4d0ccc085"}, + {file = "setproctitle-1.3.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d4460795a8a7a391e3567b902ec5bdf6c60a47d791c3b1d27080fc203d11c9dc"}, + {file = "setproctitle-1.3.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bdfd7254745bb737ca1384dee57e6523651892f0ea2a7344490e9caefcc35e64"}, + {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:477d3da48e216d7fc04bddab67b0dcde633e19f484a146fd2a34bb0e9dbb4a1e"}, + {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ab2900d111e93aff5df9fddc64cf51ca4ef2c9f98702ce26524f1acc5a786ae7"}, + {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:088b9efc62d5aa5d6edf6cba1cf0c81f4488b5ce1c0342a8b67ae39d64001120"}, + {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6d50252377db62d6a0bb82cc898089916457f2db2041e1d03ce7fadd4a07381"}, + {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:87e668f9561fd3a457ba189edfc9e37709261287b52293c115ae3487a24b92f6"}, + {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:287490eb90e7a0ddd22e74c89a92cc922389daa95babc833c08cf80c84c4df0a"}, + {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:4fe1c49486109f72d502f8be569972e27f385fe632bd8895f4730df3c87d5ac8"}, + {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4a6ba2494a6449b1f477bd3e67935c2b7b0274f2f6dcd0f7c6aceae10c6c6ba3"}, + {file = "setproctitle-1.3.3-cp312-cp312-win32.whl", hash = "sha256:2df2b67e4b1d7498632e18c56722851ba4db5d6a0c91aaf0fd395111e51cdcf4"}, + {file = "setproctitle-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:f38d48abc121263f3b62943f84cbaede05749047e428409c2c199664feb6abc7"}, + {file = "setproctitle-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:816330675e3504ae4d9a2185c46b573105d2310c20b19ea2b4596a9460a4f674"}, + {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68f960bc22d8d8e4ac886d1e2e21ccbd283adcf3c43136161c1ba0fa509088e0"}, + {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00e6e7adff74796ef12753ff399491b8827f84f6c77659d71bd0b35870a17d8f"}, + {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53bc0d2358507596c22b02db079618451f3bd720755d88e3cccd840bafb4c41c"}, + {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad6d20f9541f5f6ac63df553b6d7a04f313947f550eab6a61aa758b45f0d5657"}, + {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c1c84beab776b0becaa368254801e57692ed749d935469ac10e2b9b825dbdd8e"}, + {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:507e8dc2891021350eaea40a44ddd887c9f006e6b599af8d64a505c0f718f170"}, + {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b1067647ac7aba0b44b591936118a22847bda3c507b0a42d74272256a7a798e9"}, + {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2e71f6365744bf53714e8bd2522b3c9c1d83f52ffa6324bd7cbb4da707312cd8"}, + {file = "setproctitle-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:7f1d36a1e15a46e8ede4e953abb104fdbc0845a266ec0e99cc0492a4364f8c44"}, + {file = "setproctitle-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9a402881ec269d0cc9c354b149fc29f9ec1a1939a777f1c858cdb09c7a261df"}, + {file = "setproctitle-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ff814dea1e5c492a4980e3e7d094286077054e7ea116cbeda138819db194b2cd"}, + {file = "setproctitle-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:accb66d7b3ccb00d5cd11d8c6e07055a4568a24c95cf86109894dcc0c134cc89"}, + {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554eae5a5b28f02705b83a230e9d163d645c9a08914c0ad921df363a07cf39b1"}, + {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a911b26264dbe9e8066c7531c0591cfab27b464459c74385b276fe487ca91c12"}, + {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2982efe7640c4835f7355fdb4da313ad37fb3b40f5c69069912f8048f77b28c8"}, + {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df3f4274b80709d8bcab2f9a862973d453b308b97a0b423a501bcd93582852e3"}, + {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:af2c67ae4c795d1674a8d3ac1988676fa306bcfa1e23fddb5e0bd5f5635309ca"}, + {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:af4061f67fd7ec01624c5e3c21f6b7af2ef0e6bab7fbb43f209e6506c9ce0092"}, + {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:37a62cbe16d4c6294e84670b59cf7adcc73faafe6af07f8cb9adaf1f0e775b19"}, + {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a83ca086fbb017f0d87f240a8f9bbcf0809f3b754ee01cec928fff926542c450"}, + {file = "setproctitle-1.3.3-cp38-cp38-win32.whl", hash = "sha256:059f4ce86f8cc92e5860abfc43a1dceb21137b26a02373618d88f6b4b86ba9b2"}, + {file = "setproctitle-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:ab92e51cd4a218208efee4c6d37db7368fdf182f6e7ff148fb295ecddf264287"}, + {file = "setproctitle-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c7951820b77abe03d88b114b998867c0f99da03859e5ab2623d94690848d3e45"}, + {file = "setproctitle-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5bc94cf128676e8fac6503b37763adb378e2b6be1249d207630f83fc325d9b11"}, + {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f5d9027eeda64d353cf21a3ceb74bb1760bd534526c9214e19f052424b37e42"}, + {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e4a8104db15d3462e29d9946f26bed817a5b1d7a47eabca2d9dc2b995991503"}, + {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c32c41ace41f344d317399efff4cffb133e709cec2ef09c99e7a13e9f3b9483c"}, + {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbf16381c7bf7f963b58fb4daaa65684e10966ee14d26f5cc90f07049bfd8c1e"}, + {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e18b7bd0898398cc97ce2dfc83bb192a13a087ef6b2d5a8a36460311cb09e775"}, + {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69d565d20efe527bd8a9b92e7f299ae5e73b6c0470f3719bd66f3cd821e0d5bd"}, + {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ddedd300cd690a3b06e7eac90ed4452348b1348635777ce23d460d913b5b63c3"}, + {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:415bfcfd01d1fbf5cbd75004599ef167a533395955305f42220a585f64036081"}, + {file = "setproctitle-1.3.3-cp39-cp39-win32.whl", hash = "sha256:21112fcd2195d48f25760f0eafa7a76510871bbb3b750219310cf88b04456ae3"}, + {file = "setproctitle-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:5a740f05d0968a5a17da3d676ce6afefebeeeb5ce137510901bf6306ba8ee002"}, + {file = "setproctitle-1.3.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6b9e62ddb3db4b5205c0321dd69a406d8af9ee1693529d144e86bd43bcb4b6c0"}, + {file = "setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e3b99b338598de0bd6b2643bf8c343cf5ff70db3627af3ca427a5e1a1a90dd9"}, + {file = "setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ae9a02766dad331deb06855fb7a6ca15daea333b3967e214de12cfae8f0ef5"}, + {file = "setproctitle-1.3.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:200ede6fd11233085ba9b764eb055a2a191fb4ffb950c68675ac53c874c22e20"}, + {file = "setproctitle-1.3.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0d3a953c50776751e80fe755a380a64cb14d61e8762bd43041ab3f8cc436092f"}, + {file = "setproctitle-1.3.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5e08e232b78ba3ac6bc0d23ce9e2bee8fad2be391b7e2da834fc9a45129eb87"}, + {file = "setproctitle-1.3.3-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1da82c3e11284da4fcbf54957dafbf0655d2389cd3d54e4eaba636faf6d117a"}, + {file = "setproctitle-1.3.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:aeaa71fb9568ebe9b911ddb490c644fbd2006e8c940f21cb9a1e9425bd709574"}, + {file = "setproctitle-1.3.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:59335d000c6250c35989394661eb6287187854e94ac79ea22315469ee4f4c244"}, + {file = "setproctitle-1.3.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3ba57029c9c50ecaf0c92bb127224cc2ea9fda057b5d99d3f348c9ec2855ad3"}, + {file = "setproctitle-1.3.3-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d876d355c53d975c2ef9c4f2487c8f83dad6aeaaee1b6571453cb0ee992f55f6"}, + {file = "setproctitle-1.3.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:224602f0939e6fb9d5dd881be1229d485f3257b540f8a900d4271a2c2aa4e5f4"}, + {file = "setproctitle-1.3.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d7f27e0268af2d7503386e0e6be87fb9b6657afd96f5726b733837121146750d"}, + {file = "setproctitle-1.3.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5e7266498cd31a4572378c61920af9f6b4676a73c299fce8ba93afd694f8ae7"}, + {file = "setproctitle-1.3.3-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33c5609ad51cd99d388e55651b19148ea99727516132fb44680e1f28dd0d1de9"}, + {file = "setproctitle-1.3.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:eae8988e78192fd1a3245a6f4f382390b61bce6cfcc93f3809726e4c885fa68d"}, + {file = "setproctitle-1.3.3.tar.gz", hash = "sha256:c913e151e7ea01567837ff037a23ca8740192880198b7fbb90b16d181607caae"}, +] + +[package.extras] +test = ["pytest"] + +[[package]] +name = "setuptools" +version = "69.1.1" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "setuptools-69.1.1-py3-none-any.whl", hash = "sha256:02fa291a0471b3a18b2b2481ed902af520c69e8ae0919c13da936542754b4c56"}, + {file = "setuptools-69.1.1.tar.gz", hash = "sha256:5c0806c7d9af348e6dd3777b4f4dbb42c7ad85b190104837488eab9a7c945cf8"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pip (>=19.1)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff (>=0.2.1)", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.2)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smmap" +version = "5.0.1" +description = "A pure Python implementation of a sliding window memory map manager" +optional = false +python-versions = ">=3.7" +files = [ + {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"}, + {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"}, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.23" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.23-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:638c2c0b6b4661a4fd264f6fb804eccd392745c5887f9317feb64bb7cb03b3ea"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e3b5036aa326dc2df50cba3c958e29b291a80f604b1afa4c8ce73e78e1c9f01d"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:787af80107fb691934a01889ca8f82a44adedbf5ef3d6ad7d0f0b9ac557e0c34"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c14eba45983d2f48f7546bb32b47937ee2cafae353646295f0e99f35b14286ab"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0666031df46b9badba9bed00092a1ffa3aa063a5e68fa244acd9f08070e936d3"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:89a01238fcb9a8af118eaad3ffcc5dedaacbd429dc6fdc43fe430d3a941ff965"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-win32.whl", hash = "sha256:cabafc7837b6cec61c0e1e5c6d14ef250b675fa9c3060ed8a7e38653bd732ff8"}, + {file = "SQLAlchemy-2.0.23-cp310-cp310-win_amd64.whl", hash = "sha256:87a3d6b53c39cd173990de2f5f4b83431d534a74f0e2f88bd16eabb5667e65c6"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d5578e6863eeb998980c212a39106ea139bdc0b3f73291b96e27c929c90cd8e1"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:62d9e964870ea5ade4bc870ac4004c456efe75fb50404c03c5fd61f8bc669a72"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c80c38bd2ea35b97cbf7c21aeb129dcbebbf344ee01a7141016ab7b851464f8e"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75eefe09e98043cff2fb8af9796e20747ae870c903dc61d41b0c2e55128f958d"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd45a5b6c68357578263d74daab6ff9439517f87da63442d244f9f23df56138d"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a86cb7063e2c9fb8e774f77fbf8475516d270a3e989da55fa05d08089d77f8c4"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-win32.whl", hash = "sha256:b41f5d65b54cdf4934ecede2f41b9c60c9f785620416e8e6c48349ab18643855"}, + {file = "SQLAlchemy-2.0.23-cp311-cp311-win_amd64.whl", hash = "sha256:9ca922f305d67605668e93991aaf2c12239c78207bca3b891cd51a4515c72e22"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d0f7fb0c7527c41fa6fcae2be537ac137f636a41b4c5a4c58914541e2f436b45"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c424983ab447dab126c39d3ce3be5bee95700783204a72549c3dceffe0fc8f4"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f508ba8f89e0a5ecdfd3761f82dda2a3d7b678a626967608f4273e0dba8f07ac"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6463aa765cf02b9247e38b35853923edbf2f6fd1963df88706bc1d02410a5577"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e599a51acf3cc4d31d1a0cf248d8f8d863b6386d2b6782c5074427ebb7803bda"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fd54601ef9cc455a0c61e5245f690c8a3ad67ddb03d3b91c361d076def0b4c60"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-win32.whl", hash = "sha256:42d0b0290a8fb0165ea2c2781ae66e95cca6e27a2fbe1016ff8db3112ac1e846"}, + {file = "SQLAlchemy-2.0.23-cp312-cp312-win_amd64.whl", hash = "sha256:227135ef1e48165f37590b8bfc44ed7ff4c074bf04dc8d6f8e7f1c14a94aa6ca"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:14aebfe28b99f24f8a4c1346c48bc3d63705b1f919a24c27471136d2f219f02d"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e983fa42164577d073778d06d2cc5d020322425a509a08119bdcee70ad856bf"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e0dc9031baa46ad0dd5a269cb7a92a73284d1309228be1d5935dac8fb3cae24"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5f94aeb99f43729960638e7468d4688f6efccb837a858b34574e01143cf11f89"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:63bfc3acc970776036f6d1d0e65faa7473be9f3135d37a463c5eba5efcdb24c8"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-win32.whl", hash = "sha256:f48ed89dd11c3c586f45e9eec1e437b355b3b6f6884ea4a4c3111a3358fd0c18"}, + {file = "SQLAlchemy-2.0.23-cp37-cp37m-win_amd64.whl", hash = "sha256:1e018aba8363adb0599e745af245306cb8c46b9ad0a6fc0a86745b6ff7d940fc"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:64ac935a90bc479fee77f9463f298943b0e60005fe5de2aa654d9cdef46c54df"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c4722f3bc3c1c2fcc3702dbe0016ba31148dd6efcd2a2fd33c1b4897c6a19693"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4af79c06825e2836de21439cb2a6ce22b2ca129bad74f359bddd173f39582bf5"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:683ef58ca8eea4747737a1c35c11372ffeb84578d3aab8f3e10b1d13d66f2bc4"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d4041ad05b35f1f4da481f6b811b4af2f29e83af253bf37c3c4582b2c68934ab"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aeb397de65a0a62f14c257f36a726945a7f7bb60253462e8602d9b97b5cbe204"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-win32.whl", hash = "sha256:42ede90148b73fe4ab4a089f3126b2cfae8cfefc955c8174d697bb46210c8306"}, + {file = "SQLAlchemy-2.0.23-cp38-cp38-win_amd64.whl", hash = "sha256:964971b52daab357d2c0875825e36584d58f536e920f2968df8d581054eada4b"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:616fe7bcff0a05098f64b4478b78ec2dfa03225c23734d83d6c169eb41a93e55"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0e680527245895aba86afbd5bef6c316831c02aa988d1aad83c47ffe92655e74"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9585b646ffb048c0250acc7dad92536591ffe35dba624bb8fd9b471e25212a35"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4895a63e2c271ffc7a81ea424b94060f7b3b03b4ea0cd58ab5bb676ed02f4221"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:cc1d21576f958c42d9aec68eba5c1a7d715e5fc07825a629015fe8e3b0657fb0"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:967c0b71156f793e6662dd839da54f884631755275ed71f1539c95bbada9aaab"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-win32.whl", hash = "sha256:0a8c6aa506893e25a04233bc721c6b6cf844bafd7250535abb56cb6cc1368884"}, + {file = "SQLAlchemy-2.0.23-cp39-cp39-win_amd64.whl", hash = "sha256:f3420d00d2cb42432c1d0e44540ae83185ccbbc67a6054dcc8ab5387add6620b"}, + {file = "SQLAlchemy-2.0.23-py3-none-any.whl", hash = "sha256:31952bbc527d633b9479f5f81e8b9dfada00b91d6baba021a869095f1a97006d"}, + {file = "SQLAlchemy-2.0.23.tar.gz", hash = "sha256:c1bda93cbbe4aa2aa0aa8655c5aeda505cd219ff3e8da91d1d329e143e4aff69"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.2.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aioodbc = ["aioodbc", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx-oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3-binary"] + +[[package]] +name = "sqlmodel" +version = "0.0.14" +description = "SQLModel, SQL databases in Python, designed for simplicity, compatibility, and robustness." +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "sqlmodel-0.0.14-py3-none-any.whl", hash = "sha256:accea3ff5d878e41ac439b11e78613ed61ce300cfcb860e87a2d73d4884cbee4"}, + {file = "sqlmodel-0.0.14.tar.gz", hash = "sha256:0bff8fc94af86b44925aa813f56cf6aabdd7f156b73259f2f60692c6a64ac90e"}, +] + +[package.dependencies] +pydantic = ">=1.10.13,<3.0.0" +SQLAlchemy = ">=2.0.0,<2.1.0" + +[[package]] +name = "sse-starlette" +version = "2.0.0" +description = "SSE plugin for Starlette" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sse_starlette-2.0.0-py3-none-any.whl", hash = "sha256:c4dd134302cb9708d47cae23c365fe0a089aa2a875d2f887ac80f235a9ee5744"}, + {file = "sse_starlette-2.0.0.tar.gz", hash = "sha256:0c43cc43aca4884c88c8416b65777c4de874cc4773e6458d3579c0a353dc2fb7"}, +] + +[package.dependencies] +anyio = "*" +starlette = "*" +uvicorn = "*" + +[package.extras] +examples = ["fastapi"] + +[[package]] +name = "stack-data" +version = "0.6.3" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = false +python-versions = "*" +files = [ + {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, + {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + +[[package]] +name = "starlette" +version = "0.27.0" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.7" +files = [ + {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, + {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"}, +] + +[package.dependencies] +anyio = ">=3.4.0,<5" + +[package.extras] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] + +[[package]] +name = "sympy" +version = "1.12" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, + {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, +] + +[package.dependencies] +mpmath = ">=0.19" + +[[package]] +name = "tensorboard" +version = "2.16.2" +description = "TensorBoard lets you watch Tensors Flow" +optional = false +python-versions = ">=3.9" +files = [ + {file = "tensorboard-2.16.2-py3-none-any.whl", hash = "sha256:9f2b4e7dad86667615c0e5cd072f1ea8403fc032a299f0072d6f74855775cc45"}, +] + +[package.dependencies] +absl-py = ">=0.4" +grpcio = ">=1.48.2" +markdown = ">=2.6.8" +numpy = ">=1.12.0" +protobuf = ">=3.19.6,<4.24.0 || >4.24.0" +setuptools = ">=41.0.0" +six = ">1.9" +tensorboard-data-server = ">=0.7.0,<0.8.0" +werkzeug = ">=1.0.1" + +[[package]] +name = "tensorboard-data-server" +version = "0.7.2" +description = "Fast data loading for TensorBoard" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tensorboard_data_server-0.7.2-py3-none-any.whl", hash = "sha256:7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb"}, + {file = "tensorboard_data_server-0.7.2-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60"}, + {file = "tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:ef687163c24185ae9754ed5650eb5bc4d84ff257aabdc33f0cc6f74d8ba54530"}, +] + +[[package]] +name = "tensorflow" +version = "2.16.0rc0" +description = "TensorFlow is an open source machine learning framework for everyone." +optional = false +python-versions = ">=3.9" +files = [ + {file = "tensorflow-2.16.0rc0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:0f25499d5e4162a7959ce5f936866e22cf58d65a5ccdf5d88aca30ba2af8304b"}, + {file = "tensorflow-2.16.0rc0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:db5f84041f3f92afb586011263f0cd47d141fb0f6a05856cccc71c5846d82b81"}, + {file = "tensorflow-2.16.0rc0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09f5f36f4608fca4565c9e5f73f938915eca508603ecbebb50bd913411ef5695"}, + {file = "tensorflow-2.16.0rc0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:def35d0f9226c5ffa57aa81318d0e4bbb11af53c04011c9ec935851b2c0836d1"}, + {file = "tensorflow-2.16.0rc0-cp310-cp310-win_amd64.whl", hash = "sha256:7e4e04e46ce9c656f0277fd862392d7c9889ff029cb86c8bed36c84eaff02550"}, + {file = "tensorflow-2.16.0rc0-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:b94d833acdd08e7146436e7469520b886d4bfc49a315448567962e21e45b1ec1"}, + {file = "tensorflow-2.16.0rc0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1db8b092d17d161352035b7e8c0a2acaad28f95ebc808dfddc402f22199336d4"}, + {file = "tensorflow-2.16.0rc0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70d81e05dd51d07f1d6f5644581e0834cca281ccd4073bda97207e5f3327a552"}, + {file = "tensorflow-2.16.0rc0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f3bd46b779b3ed6b3d9e93a27f6dde3b858ac8f023d8eb589397b843ce618a0"}, + {file = "tensorflow-2.16.0rc0-cp311-cp311-win_amd64.whl", hash = "sha256:7c8b52394ffd50b1fa6fdfd52cefdcfa7540a2414d799215b0d49f69a7393fc0"}, + {file = "tensorflow-2.16.0rc0-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:50c9a04caf03144c2744580a894f5b065501ac00e140c2cc4227b0f810ed9a5d"}, + {file = "tensorflow-2.16.0rc0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:f9175ca870a74f306a14f471f325d2f6871d22a54104f3e13f219ca9da8dac19"}, + {file = "tensorflow-2.16.0rc0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b94180581c559ee3907e9f9de4a8154fbc07dd35a4ba68c26a660f6d412b4066"}, + {file = "tensorflow-2.16.0rc0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f422d044eb7f8da3d8afc2bcf6073d7a4436cad3b31427a6b4ae890e1675d72"}, + {file = "tensorflow-2.16.0rc0-cp312-cp312-win_amd64.whl", hash = "sha256:b3cdd0f6d1445c8acd75ff657587331299235c4eeaa1a93f02f0a0ea2e4b8a81"}, + {file = "tensorflow-2.16.0rc0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:9ea82ee343c18434d3e76331ef60f1bbcb3d66ca133045b443b08ceceaae19a4"}, + {file = "tensorflow-2.16.0rc0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:c24e7a672fd6f4c2dc2ffe68f2287077e2342e437cbafe808ab1cc8f17658336"}, + {file = "tensorflow-2.16.0rc0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5bd629f12bf24876c3f79f4f9654e8272e1caf9e3a23909e7708403d5af6cfb"}, + {file = "tensorflow-2.16.0rc0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a563b2ba95a1b1da77a0158bb3449ea526c30cd8f56bc9101f501751be0331a1"}, + {file = "tensorflow-2.16.0rc0-cp39-cp39-win_amd64.whl", hash = "sha256:82ccf10d3d79d33799395bc35ad87a2cd42509ef04884b372f81151212083bdd"}, +] + +[package.dependencies] +absl-py = ">=1.0.0" +astunparse = ">=1.6.0" +flatbuffers = ">=23.5.26" +gast = ">=0.2.1,<0.5.0 || >0.5.0,<0.5.1 || >0.5.1,<0.5.2 || >0.5.2" +google-pasta = ">=0.1.1" +grpcio = ">=1.24.3,<2.0" +h5py = ">=3.10.0" +keras = ">=3.0.0" +libclang = ">=13.0.0" +ml-dtypes = ">=0.3.1,<0.4.0" +numpy = {version = ">=1.23.5,<2.0.0", markers = "python_version <= \"3.11\""} +opt-einsum = ">=2.3.2" +packaging = "*" +protobuf = ">=3.20.3,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0dev" +requests = ">=2.21.0,<3" +setuptools = "*" +six = ">=1.12.0" +tensorboard = ">=2.16,<2.17" +tensorflow-io-gcs-filesystem = {version = ">=0.23.1", markers = "python_version < \"3.12\""} +termcolor = ">=1.1.0" +typing-extensions = ">=3.6.6" +wrapt = ">=1.11.0" + +[package.extras] +and-cuda = ["nvidia-cublas-cu12 (==12.3.4.1)", "nvidia-cuda-cupti-cu12 (==12.3.101)", "nvidia-cuda-nvcc-cu12 (==12.3.107)", "nvidia-cuda-nvrtc-cu12 (==12.3.107)", "nvidia-cuda-runtime-cu12 (==12.3.101)", "nvidia-cudnn-cu12 (==8.9.7.29)", "nvidia-cufft-cu12 (==11.0.12.1)", "nvidia-curand-cu12 (==10.3.4.107)", "nvidia-cusolver-cu12 (==11.5.4.101)", "nvidia-cusparse-cu12 (==12.2.0.103)", "nvidia-nccl-cu12 (==2.19.3)", "nvidia-nvjitlink-cu12 (==12.3.101)"] + +[[package]] +name = "tensorflow-io-gcs-filesystem" +version = "0.36.0" +description = "TensorFlow IO" +optional = false +python-versions = ">=3.7, <3.12" +files = [ + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:702c6df62b38095ff613c433546d9424d4f33902a5ab26b00fd26457e27a99fa"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e9b8aaca2789af356c42afda0f52380f82e5abb2f3c0b85087833fcfe03875d8"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c477aed96864ceae77d7051c3b687f28813aba7320fc5dd552164fad6ec8d1a1"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be1ff92559dfa23048b01179a1827081947583f5c6f9986ccac471df8a29322a"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:72c3ca4b8c0d8dbdd970699d05a100107cf200317ad8e6a8373e2c37225cd552"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:848e8e89a0f49258c7782189c938d8d1162d989da1a80c79f95c7af3ef6006c8"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d72db1ab03edb65fa1e98d06e504ccbc64282d38ab3589afb6db66dc448d1c1"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bd4d946b5fa23220daa473a80e511a5fb27493d7e49d17dff0bb43bb0a31f32"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa346fd1dd9f57848b73874007440504f060fadd689fa1cc29cc49817d0eeaf3"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:0a4437824424a4423cf86162cb8b21b1bec24698194332748b50bb952e62ab9f"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:31806bd7ac2db789161bc720747de22947063265561a4c17be54698fd9780b03"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc0e57976c1aa035af6281f0330cfb8dd50eee2f63412ecc84d60ff5075d29b7"}, + {file = "tensorflow_io_gcs_filesystem-0.36.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e97ff5c280eb10f699098ae21057be2b146d39e8a906cd5db91f2ea6c34e47d0"}, +] + +[package.extras] +tensorflow = ["tensorflow (>=2.15.0,<2.16.0)"] +tensorflow-aarch64 = ["tensorflow-aarch64 (>=2.15.0,<2.16.0)"] +tensorflow-cpu = ["tensorflow-cpu (>=2.15.0,<2.16.0)"] +tensorflow-gpu = ["tensorflow-gpu (>=2.15.0,<2.16.0)"] +tensorflow-rocm = ["tensorflow-rocm (>=2.15.0,<2.16.0)"] + +[[package]] +name = "termcolor" +version = "2.4.0" +description = "ANSI color formatting for output in terminal" +optional = false +python-versions = ">=3.8" +files = [ + {file = "termcolor-2.4.0-py3-none-any.whl", hash = "sha256:9297c0df9c99445c2412e832e882a7884038a25617c60cea2ad69488d4040d63"}, + {file = "termcolor-2.4.0.tar.gz", hash = "sha256:aab9e56047c8ac41ed798fa36d892a37aca6b3e9159f3e0c24bc64a9b3ac7b7a"}, +] + +[package.extras] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "tf2onnx" +version = "1.16.1" +description = "Tensorflow to ONNX converter" +optional = false +python-versions = "*" +files = [] +develop = false + +[package.dependencies] +flatbuffers = ">=1.12" +numpy = ">=1.14.1" +onnx = ">=1.4.1" +protobuf = ">=3.20,<4.0" +requests = "*" +six = "*" + +[package.source] +type = "git" +url = "https://github.com/onnx/tensorflow-onnx" +reference = "HEAD" +resolved_reference = "9538da86d8e932d0eea8f0999672ea1a6a203b57" + +[[package]] +name = "thop" +version = "0.1.1.post2209072238" +description = "A tool to count the FLOPs of PyTorch model." +optional = false +python-versions = "*" +files = [ + {file = "thop-0.1.1.post2209072238-py3-none-any.whl", hash = "sha256:01473c225231927d2ad718351f78ebf7cffe6af3bed464c4f1ba1ef0f7cdda27"}, +] + +[package.dependencies] +torch = "*" + +[[package]] +name = "threadpoolctl" +version = "3.3.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.8" +files = [ + {file = "threadpoolctl-3.3.0-py3-none-any.whl", hash = "sha256:6155be1f4a39f31a18ea70f94a77e0ccd57dced08122ea61109e7da89883781e"}, + {file = "threadpoolctl-3.3.0.tar.gz", hash = "sha256:5dac632b4fa2d43f42130267929af3ba01399ef4bd1882918e92dbc30365d30c"}, +] + +[[package]] +name = "tifffile" +version = "2024.2.12" +description = "Read and write TIFF files" +optional = false +python-versions = ">=3.9" +files = [ + {file = "tifffile-2024.2.12-py3-none-any.whl", hash = "sha256:870998f82fbc94ff7c3528884c1b0ae54863504ff51dbebea431ac3fa8fb7c21"}, + {file = "tifffile-2024.2.12.tar.gz", hash = "sha256:4920a3ec8e8e003e673d3c6531863c99eedd570d1b8b7e141c072ed78ff8030d"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr"] + +[[package]] +name = "timm" +version = "0.4.12" +description = "(Unofficial) PyTorch Image Models" +optional = false +python-versions = ">=3.6" +files = [ + {file = "timm-0.4.12-py3-none-any.whl", hash = "sha256:dba6b1702b7d24bf9f0f1c2fc394b4ee28f93cde5404f1dc732d63ccd00533b6"}, + {file = "timm-0.4.12.tar.gz", hash = "sha256:b14be70dbd4528b5ca8657cf5bc2672c7918c3d9ebfbffe80f4785b54e884b1e"}, +] + +[package.dependencies] +torch = ">=1.4" +torchvision = "*" + +[[package]] +name = "torch" +version = "2.0.1+cu118" +description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "torch-2.0.1+cu118-cp311-cp311-linux_x86_64.whl", hash = "sha256:143b6c658c17d43376e2dfbaa2c106d35639d615e5e8dec4429cf1e510dd8d61"}, +] + +[package.dependencies] +filelock = "*" +jinja2 = "*" +networkx = "*" +sympy = "*" +triton = {version = "2.0.0", markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\""} +typing-extensions = "*" + +[package.extras] +opt-einsum = ["opt-einsum (>=3.3)"] + +[package.source] +type = "url" +url = "https://download.pytorch.org/whl/cu118/torch-2.0.1%2Bcu118-cp311-cp311-linux_x86_64.whl" + +[[package]] +name = "torchinfo" +version = "1.8.0" +description = "Model summary in PyTorch, based off of the original torchsummary." +optional = false +python-versions = ">=3.7" +files = [ + {file = "torchinfo-1.8.0-py3-none-any.whl", hash = "sha256:2e911c2918603f945c26ff21a3a838d12709223dc4ccf243407bce8b6e897b46"}, + {file = "torchinfo-1.8.0.tar.gz", hash = "sha256:72e94b0e9a3e64dc583a8e5b7940b8938a1ac0f033f795457f27e6f4e7afa2e9"}, +] + +[[package]] +name = "torchvision" +version = "0.15.2+cu118" +description = "image and video datasets and models for torch deep learning" +optional = false +python-versions = ">=3.8" +files = [ + {file = "torchvision-0.15.2+cu118-cp311-cp311-linux_x86_64.whl", hash = "sha256:def9af47ebc2cad55c5aa2dad1230dcf4261833ed6df8a73e839bc233764f09e"}, +] + +[package.dependencies] +numpy = "*" +pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" +requests = "*" +torch = "2.0.1" + +[package.extras] +scipy = ["scipy"] + +[package.source] +type = "url" +url = "https://download.pytorch.org/whl/cu118/torchvision-0.15.2%2Bcu118-cp311-cp311-linux_x86_64.whl" + +[[package]] +name = "tornado" +version = "6.4" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">= 3.8" +files = [ + {file = "tornado-6.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:02ccefc7d8211e5a7f9e8bc3f9e5b0ad6262ba2fbb683a6443ecc804e5224ce0"}, + {file = "tornado-6.4-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:27787de946a9cffd63ce5814c33f734c627a87072ec7eed71f7fc4417bb16263"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f7894c581ecdcf91666a0912f18ce5e757213999e183ebfc2c3fdbf4d5bd764e"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e43bc2e5370a6a8e413e1e1cd0c91bedc5bd62a74a532371042a18ef19e10579"}, + {file = "tornado-6.4-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0251554cdd50b4b44362f73ad5ba7126fc5b2c2895cc62b14a1c2d7ea32f212"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fd03192e287fbd0899dd8f81c6fb9cbbc69194d2074b38f384cb6fa72b80e9c2"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:88b84956273fbd73420e6d4b8d5ccbe913c65d31351b4c004ae362eba06e1f78"}, + {file = "tornado-6.4-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:71ddfc23a0e03ef2df1c1397d859868d158c8276a0603b96cf86892bff58149f"}, + {file = "tornado-6.4-cp38-abi3-win32.whl", hash = "sha256:6f8a6c77900f5ae93d8b4ae1196472d0ccc2775cc1dfdc9e7727889145c45052"}, + {file = "tornado-6.4-cp38-abi3-win_amd64.whl", hash = "sha256:10aeaa8006333433da48dec9fe417877f8bcc21f48dda8d661ae79da357b2a63"}, + {file = "tornado-6.4.tar.gz", hash = "sha256:72291fa6e6bc84e626589f1c29d90a5a6d593ef5ae68052ee2ef000dfd273dee"}, +] + +[[package]] +name = "tqdm" +version = "4.66.2" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.66.2-py3-none-any.whl", hash = "sha256:1ee4f8a893eb9bef51c6e35730cebf234d5d0b6bd112b0271e10ed7c24a02bd9"}, + {file = "tqdm-4.66.2.tar.gz", hash = "sha256:6cd52cdf0fef0e0f543299cfc96fec90d7b8a7e88745f411ec33eb44d5ed3531"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "traitlets" +version = "5.14.1" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +files = [ + {file = "traitlets-5.14.1-py3-none-any.whl", hash = "sha256:2e5a030e6eff91737c643231bfcf04a65b0132078dad75e4936700b213652e74"}, + {file = "traitlets-5.14.1.tar.gz", hash = "sha256:8585105b371a04b8316a43d5ce29c098575c2e477850b62b848b964f1444527e"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"] + +[[package]] +name = "triton" +version = "2.0.0" +description = "A language and compiler for custom Deep Learning operations" +optional = false +python-versions = "*" +files = [ + {file = "triton-2.0.0-1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38806ee9663f4b0f7cd64790e96c579374089e58f49aac4a6608121aa55e2505"}, + {file = "triton-2.0.0-1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:226941c7b8595219ddef59a1fdb821e8c744289a132415ddd584facedeb475b1"}, + {file = "triton-2.0.0-1-cp36-cp36m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4c9fc8c89874bc48eb7e7b2107a9b8d2c0bf139778637be5bfccb09191685cfd"}, + {file = "triton-2.0.0-1-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d2684b6a60b9f174f447f36f933e9a45f31db96cb723723ecd2dcfd1c57b778b"}, + {file = "triton-2.0.0-1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9d4978298b74fcf59a75fe71e535c092b023088933b2f1df933ec32615e4beef"}, + {file = "triton-2.0.0-1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:74f118c12b437fb2ca25e1a04759173b517582fcf4c7be11913316c764213656"}, + {file = "triton-2.0.0-1-pp37-pypy37_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9618815a8da1d9157514f08f855d9e9ff92e329cd81c0305003eb9ec25cc5add"}, + {file = "triton-2.0.0-1-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1aca3303629cd3136375b82cb9921727f804e47ebee27b2677fef23005c3851a"}, + {file = "triton-2.0.0-1-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e3e13aa8b527c9b642e3a9defcc0fbd8ffbe1c80d8ac8c15a01692478dc64d8a"}, + {file = "triton-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f05a7e64e4ca0565535e3d5d3405d7e49f9d308505bb7773d21fb26a4c008c2"}, + {file = "triton-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb4b99ca3c6844066e516658541d876c28a5f6e3a852286bbc97ad57134827fd"}, + {file = "triton-2.0.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47b4d70dc92fb40af553b4460492c31dc7d3a114a979ffb7a5cdedb7eb546c08"}, + {file = "triton-2.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fedce6a381901b1547e0e7e1f2546e4f65dca6d91e2d8a7305a2d1f5551895be"}, + {file = "triton-2.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75834f27926eab6c7f00ce73aaf1ab5bfb9bec6eb57ab7c0bfc0a23fac803b4c"}, + {file = "triton-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0117722f8c2b579cd429e0bee80f7731ae05f63fe8e9414acd9a679885fcbf42"}, + {file = "triton-2.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcd9be5d0c2e45d2b7e6ddc6da20112b6862d69741576f9c3dbaf941d745ecae"}, + {file = "triton-2.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42a0d2c3fc2eab4ba71384f2e785fbfd47aa41ae05fa58bf12cb31dcbd0aeceb"}, + {file = "triton-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52c47b72c72693198163ece9d90a721299e4fb3b8e24fd13141e384ad952724f"}, +] + +[package.dependencies] +cmake = "*" +filelock = "*" +lit = "*" +torch = "*" + +[package.extras] +tests = ["autopep8", "flake8", "isort", "numpy", "pytest", "scipy (>=1.7.1)"] +tutorials = ["matplotlib", "pandas", "tabulate"] + +[[package]] +name = "typing-extensions" +version = "4.10.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.10.0-py3-none-any.whl", hash = "sha256:69b1a937c3a517342112fb4c6df7e72fc39a38e7891a5730ed4985b5214b5475"}, + {file = "typing_extensions-4.10.0.tar.gz", hash = "sha256:b0abd7c89e8fb96f98db18d86106ff1d90ab692004eb746cf6eda2682f91b3cb"}, +] + +[[package]] +name = "tzdata" +version = "2024.1" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, + {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, +] + +[[package]] +name = "ujson" +version = "5.9.0" +description = "Ultra fast JSON encoder and decoder for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ujson-5.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ab71bf27b002eaf7d047c54a68e60230fbd5cd9da60de7ca0aa87d0bccead8fa"}, + {file = "ujson-5.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a365eac66f5aa7a7fdf57e5066ada6226700884fc7dce2ba5483538bc16c8c5"}, + {file = "ujson-5.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e015122b337858dba5a3dc3533af2a8fc0410ee9e2374092f6a5b88b182e9fcc"}, + {file = "ujson-5.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:779a2a88c53039bebfbccca934430dabb5c62cc179e09a9c27a322023f363e0d"}, + {file = "ujson-5.9.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10ca3c41e80509fd9805f7c149068fa8dbee18872bbdc03d7cca928926a358d5"}, + {file = "ujson-5.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4a566e465cb2fcfdf040c2447b7dd9718799d0d90134b37a20dff1e27c0e9096"}, + {file = "ujson-5.9.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f833c529e922577226a05bc25b6a8b3eb6c4fb155b72dd88d33de99d53113124"}, + {file = "ujson-5.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b68a0caab33f359b4cbbc10065c88e3758c9f73a11a65a91f024b2e7a1257106"}, + {file = "ujson-5.9.0-cp310-cp310-win32.whl", hash = "sha256:7cc7e605d2aa6ae6b7321c3ae250d2e050f06082e71ab1a4200b4ae64d25863c"}, + {file = "ujson-5.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:a6d3f10eb8ccba4316a6b5465b705ed70a06011c6f82418b59278fbc919bef6f"}, + {file = "ujson-5.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b23bbb46334ce51ddb5dded60c662fbf7bb74a37b8f87221c5b0fec1ec6454b"}, + {file = "ujson-5.9.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6974b3a7c17bbf829e6c3bfdc5823c67922e44ff169851a755eab79a3dd31ec0"}, + {file = "ujson-5.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5964ea916edfe24af1f4cc68488448fbb1ec27a3ddcddc2b236da575c12c8ae"}, + {file = "ujson-5.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ba7cac47dd65ff88571eceeff48bf30ed5eb9c67b34b88cb22869b7aa19600d"}, + {file = "ujson-5.9.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6bbd91a151a8f3358c29355a491e915eb203f607267a25e6ab10531b3b157c5e"}, + {file = "ujson-5.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:829a69d451a49c0de14a9fecb2a2d544a9b2c884c2b542adb243b683a6f15908"}, + {file = "ujson-5.9.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:a807ae73c46ad5db161a7e883eec0fbe1bebc6a54890152ccc63072c4884823b"}, + {file = "ujson-5.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8fc2aa18b13d97b3c8ccecdf1a3c405f411a6e96adeee94233058c44ff92617d"}, + {file = "ujson-5.9.0-cp311-cp311-win32.whl", hash = "sha256:70e06849dfeb2548be48fdd3ceb53300640bc8100c379d6e19d78045e9c26120"}, + {file = "ujson-5.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:7309d063cd392811acc49b5016728a5e1b46ab9907d321ebbe1c2156bc3c0b99"}, + {file = "ujson-5.9.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:20509a8c9f775b3a511e308bbe0b72897ba6b800767a7c90c5cca59d20d7c42c"}, + {file = "ujson-5.9.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b28407cfe315bd1b34f1ebe65d3bd735d6b36d409b334100be8cdffae2177b2f"}, + {file = "ujson-5.9.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d302bd17989b6bd90d49bade66943c78f9e3670407dbc53ebcf61271cadc399"}, + {file = "ujson-5.9.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f21315f51e0db8ee245e33a649dd2d9dce0594522de6f278d62f15f998e050e"}, + {file = "ujson-5.9.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5635b78b636a54a86fdbf6f027e461aa6c6b948363bdf8d4fbb56a42b7388320"}, + {file = "ujson-5.9.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:82b5a56609f1235d72835ee109163c7041b30920d70fe7dac9176c64df87c164"}, + {file = "ujson-5.9.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5ca35f484622fd208f55041b042d9d94f3b2c9c5add4e9af5ee9946d2d30db01"}, + {file = "ujson-5.9.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:829b824953ebad76d46e4ae709e940bb229e8999e40881338b3cc94c771b876c"}, + {file = "ujson-5.9.0-cp312-cp312-win32.whl", hash = "sha256:25fa46e4ff0a2deecbcf7100af3a5d70090b461906f2299506485ff31d9ec437"}, + {file = "ujson-5.9.0-cp312-cp312-win_amd64.whl", hash = "sha256:60718f1720a61560618eff3b56fd517d107518d3c0160ca7a5a66ac949c6cf1c"}, + {file = "ujson-5.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d581db9db9e41d8ea0b2705c90518ba623cbdc74f8d644d7eb0d107be0d85d9c"}, + {file = "ujson-5.9.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ff741a5b4be2d08fceaab681c9d4bc89abf3c9db600ab435e20b9b6d4dfef12e"}, + {file = "ujson-5.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdcb02cabcb1e44381221840a7af04433c1dc3297af76fde924a50c3054c708c"}, + {file = "ujson-5.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e208d3bf02c6963e6ef7324dadf1d73239fb7008491fdf523208f60be6437402"}, + {file = "ujson-5.9.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4b3917296630a075e04d3d07601ce2a176479c23af838b6cf90a2d6b39b0d95"}, + {file = "ujson-5.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0c4d6adb2c7bb9eb7c71ad6f6f612e13b264942e841f8cc3314a21a289a76c4e"}, + {file = "ujson-5.9.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0b159efece9ab5c01f70b9d10bbb77241ce111a45bc8d21a44c219a2aec8ddfd"}, + {file = "ujson-5.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0cb4a7814940ddd6619bdce6be637a4b37a8c4760de9373bac54bb7b229698b"}, + {file = "ujson-5.9.0-cp38-cp38-win32.whl", hash = "sha256:dc80f0f5abf33bd7099f7ac94ab1206730a3c0a2d17549911ed2cb6b7aa36d2d"}, + {file = "ujson-5.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:506a45e5fcbb2d46f1a51fead991c39529fc3737c0f5d47c9b4a1d762578fc30"}, + {file = "ujson-5.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0fd2eba664a22447102062814bd13e63c6130540222c0aa620701dd01f4be81"}, + {file = "ujson-5.9.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bdf7fc21a03bafe4ba208dafa84ae38e04e5d36c0e1c746726edf5392e9f9f36"}, + {file = "ujson-5.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2f909bc08ce01f122fd9c24bc6f9876aa087188dfaf3c4116fe6e4daf7e194f"}, + {file = "ujson-5.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd4ea86c2afd41429751d22a3ccd03311c067bd6aeee2d054f83f97e41e11d8f"}, + {file = "ujson-5.9.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:63fb2e6599d96fdffdb553af0ed3f76b85fda63281063f1cb5b1141a6fcd0617"}, + {file = "ujson-5.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:32bba5870c8fa2a97f4a68f6401038d3f1922e66c34280d710af00b14a3ca562"}, + {file = "ujson-5.9.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:37ef92e42535a81bf72179d0e252c9af42a4ed966dc6be6967ebfb929a87bc60"}, + {file = "ujson-5.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f69f16b8f1c69da00e38dc5f2d08a86b0e781d0ad3e4cc6a13ea033a439c4844"}, + {file = "ujson-5.9.0-cp39-cp39-win32.whl", hash = "sha256:3382a3ce0ccc0558b1c1668950008cece9bf463ebb17463ebf6a8bfc060dae34"}, + {file = "ujson-5.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:6adef377ed583477cf005b58c3025051b5faa6b8cc25876e594afbb772578f21"}, + {file = "ujson-5.9.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ffdfebd819f492e48e4f31c97cb593b9c1a8251933d8f8972e81697f00326ff1"}, + {file = "ujson-5.9.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4eec2ddc046360d087cf35659c7ba0cbd101f32035e19047013162274e71fcf"}, + {file = "ujson-5.9.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbb90aa5c23cb3d4b803c12aa220d26778c31b6e4b7a13a1f49971f6c7d088e"}, + {file = "ujson-5.9.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba0823cb70866f0d6a4ad48d998dd338dce7314598721bc1b7986d054d782dfd"}, + {file = "ujson-5.9.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4e35d7885ed612feb6b3dd1b7de28e89baaba4011ecdf995e88be9ac614765e9"}, + {file = "ujson-5.9.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b048aa93eace8571eedbd67b3766623e7f0acbf08ee291bef7d8106210432427"}, + {file = "ujson-5.9.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:323279e68c195110ef85cbe5edce885219e3d4a48705448720ad925d88c9f851"}, + {file = "ujson-5.9.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ac92d86ff34296f881e12aa955f7014d276895e0e4e868ba7fddebbde38e378"}, + {file = "ujson-5.9.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6eecbd09b316cea1fd929b1e25f70382917542ab11b692cb46ec9b0a26c7427f"}, + {file = "ujson-5.9.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:473fb8dff1d58f49912323d7cb0859df5585cfc932e4b9c053bf8cf7f2d7c5c4"}, + {file = "ujson-5.9.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f91719c6abafe429c1a144cfe27883eace9fb1c09a9c5ef1bcb3ae80a3076a4e"}, + {file = "ujson-5.9.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b1c0991c4fe256f5fdb19758f7eac7f47caac29a6c57d0de16a19048eb86bad"}, + {file = "ujson-5.9.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a8ea0f55a1396708e564595aaa6696c0d8af532340f477162ff6927ecc46e21"}, + {file = "ujson-5.9.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:07e0cfdde5fd91f54cd2d7ffb3482c8ff1bf558abf32a8b953a5d169575ae1cd"}, + {file = "ujson-5.9.0.tar.gz", hash = "sha256:89cc92e73d5501b8a7f48575eeb14ad27156ad092c2e9fc7e3cf949f07e75532"}, +] + +[[package]] +name = "ultralytics" +version = "8.1.24" +description = "Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification." +optional = false +python-versions = ">=3.8" +files = [ + {file = "ultralytics-8.1.24-py3-none-any.whl", hash = "sha256:e22729a3e0338362e6e90969f1f77433bd8aedc5c3af2b6a682eb788605a7118"}, + {file = "ultralytics-8.1.24.tar.gz", hash = "sha256:15071ce158a16bb11a393da9548f18ad45b960ac06383a52d22bd74b093c1a77"}, +] + +[package.dependencies] +matplotlib = ">=3.3.0" +opencv-python = ">=4.6.0" +pandas = ">=1.1.4" +pillow = ">=7.1.2" +psutil = "*" +py-cpuinfo = "*" +pyyaml = ">=5.3.1" +requests = ">=2.23.0" +scipy = ">=1.4.1" +seaborn = ">=0.11.0" +thop = ">=0.1.1" +torch = ">=1.8.0" +torchvision = ">=0.9.0" +tqdm = ">=4.64.0" + +[package.extras] +dev = ["check-manifest", "coverage[toml]", "ipython", "mkdocs-jupyter", "mkdocs-material (>=9.5.9)", "mkdocs-redirects", "mkdocs-ultralytics-plugin (>=0.0.44)", "mkdocstrings[python]", "pre-commit", "pytest", "pytest-cov"] +explorer = ["duckdb (<=0.9.2)", "lancedb", "streamlit"] +export = ["coremltools (>=7.0)", "onnx (>=1.12.0)", "openvino (>=2023.3)", "tensorflow (<=2.13.1)", "tensorflowjs (>=3.9.0)"] +extra = ["albumentations (>=1.0.3)", "hub-sdk (>=0.0.2)", "ipython", "pycocotools (>=2.0.7)"] +logging = ["comet", "dvclive (>=2.12.0)", "tensorboard (>=2.13.0)"] + +[[package]] +name = "urllib3" +version = "2.0.7" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.7" +files = [ + {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"}, + {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "uvicorn" +version = "0.27.1" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.8" +files = [ + {file = "uvicorn-0.27.1-py3-none-any.whl", hash = "sha256:5c89da2f3895767472a35556e539fd59f7edbe9b1e9c0e1c99eebeadc61838e4"}, + {file = "uvicorn-0.27.1.tar.gz", hash = "sha256:3d9a267296243532db80c83a959a3400502165ade2c1338dea4e67915fd4745a"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} +h11 = ">=0.8" +httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standard\""} +python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} +uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} +watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} + +[package.extras] +standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] + +[[package]] +name = "uvloop" +version = "0.19.0" +description = "Fast implementation of asyncio event loop on top of libuv" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:de4313d7f575474c8f5a12e163f6d89c0a878bc49219641d49e6f1444369a90e"}, + {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5588bd21cf1fcf06bded085f37e43ce0e00424197e7c10e77afd4bbefffef428"}, + {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b1fd71c3843327f3bbc3237bedcdb6504fd50368ab3e04d0410e52ec293f5b8"}, + {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a05128d315e2912791de6088c34136bfcdd0c7cbc1cf85fd6fd1bb321b7c849"}, + {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cd81bdc2b8219cb4b2556eea39d2e36bfa375a2dd021404f90a62e44efaaf957"}, + {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f17766fb6da94135526273080f3455a112f82570b2ee5daa64d682387fe0dcd"}, + {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4ce6b0af8f2729a02a5d1575feacb2a94fc7b2e983868b009d51c9a9d2149bef"}, + {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:31e672bb38b45abc4f26e273be83b72a0d28d074d5b370fc4dcf4c4eb15417d2"}, + {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:570fc0ed613883d8d30ee40397b79207eedd2624891692471808a95069a007c1"}, + {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5138821e40b0c3e6c9478643b4660bd44372ae1e16a322b8fc07478f92684e24"}, + {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:91ab01c6cd00e39cde50173ba4ec68a1e578fee9279ba64f5221810a9e786533"}, + {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:47bf3e9312f63684efe283f7342afb414eea4d3011542155c7e625cd799c3b12"}, + {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:da8435a3bd498419ee8c13c34b89b5005130a476bda1d6ca8cfdde3de35cd650"}, + {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:02506dc23a5d90e04d4f65c7791e65cf44bd91b37f24cfc3ef6cf2aff05dc7ec"}, + {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2693049be9d36fef81741fddb3f441673ba12a34a704e7b4361efb75cf30befc"}, + {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7010271303961c6f0fe37731004335401eb9075a12680738731e9c92ddd96ad6"}, + {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5daa304d2161d2918fa9a17d5635099a2f78ae5b5960e742b2fcfbb7aefaa593"}, + {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7207272c9520203fea9b93843bb775d03e1cf88a80a936ce760f60bb5add92f3"}, + {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:78ab247f0b5671cc887c31d33f9b3abfb88d2614b84e4303f1a63b46c046c8bd"}, + {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:472d61143059c84947aa8bb74eabbace30d577a03a1805b77933d6bd13ddebbd"}, + {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45bf4c24c19fb8a50902ae37c5de50da81de4922af65baf760f7c0c42e1088be"}, + {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271718e26b3e17906b28b67314c45d19106112067205119dddbd834c2b7ce797"}, + {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:34175c9fd2a4bc3adc1380e1261f60306344e3407c20a4d684fd5f3be010fa3d"}, + {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e27f100e1ff17f6feeb1f33968bc185bf8ce41ca557deee9d9bbbffeb72030b7"}, + {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13dfdf492af0aa0a0edf66807d2b465607d11c4fa48f4a1fd41cbea5b18e8e8b"}, + {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e3d4e85ac060e2342ff85e90d0c04157acb210b9ce508e784a944f852a40e67"}, + {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ca4956c9ab567d87d59d49fa3704cf29e37109ad348f2d5223c9bf761a332e7"}, + {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f467a5fd23b4fc43ed86342641f3936a68ded707f4627622fa3f82a120e18256"}, + {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:492e2c32c2af3f971473bc22f086513cedfc66a130756145a931a90c3958cb17"}, + {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2df95fca285a9f5bfe730e51945ffe2fa71ccbfdde3b0da5772b4ee4f2e770d5"}, + {file = "uvloop-0.19.0.tar.gz", hash = "sha256:0246f4fd1bf2bf702e06b0d45ee91677ee5c31242f39aab4ea6fe0c51aedd0fd"}, +] + +[package.extras] +docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] + +[[package]] +name = "virtualenv" +version = "20.25.1" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.7" +files = [ + {file = "virtualenv-20.25.1-py3-none-any.whl", hash = "sha256:961c026ac520bac5f69acb8ea063e8a4f071bcc9457b9c1f28f6b085c511583a"}, + {file = "virtualenv-20.25.1.tar.gz", hash = "sha256:e08e13ecdca7a0bd53798f356d5831434afa5b07b93f0abdf0797b7a06ffe197"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] + +[[package]] +name = "wandb" +version = "0.16.4" +description = "A CLI and library for interacting with the Weights & Biases API." +optional = false +python-versions = ">=3.7" +files = [ + {file = "wandb-0.16.4-py3-none-any.whl", hash = "sha256:bb9eb5aa2c2c85e11c76040c4271366f54d4975167aa6320ba86c3f2d97fe5fa"}, + {file = "wandb-0.16.4.tar.gz", hash = "sha256:8752c67d1347a4c29777e64dc1e1a742a66c5ecde03aebadf2b0d62183fa307c"}, +] + +[package.dependencies] +appdirs = ">=1.4.3" +Click = ">=7.1,<8.0.0 || >8.0.0" +docker-pycreds = ">=0.4.0" +GitPython = ">=1.0.0,<3.1.29 || >3.1.29" +protobuf = {version = ">=3.19.0,<4.21.0 || >4.21.0,<5", markers = "python_version > \"3.9\" or sys_platform != \"linux\""} +psutil = ">=5.0.0" +PyYAML = "*" +requests = ">=2.0.0,<3" +sentry-sdk = ">=1.0.0" +setproctitle = "*" +setuptools = "*" + +[package.extras] +async = ["httpx (>=0.23.0)"] +aws = ["boto3"] +azure = ["azure-identity", "azure-storage-blob"] +gcp = ["google-cloud-storage"] +importers = ["filelock", "mlflow", "polars", "rich", "tenacity"] +kubeflow = ["google-cloud-storage", "kubernetes", "minio", "sh"] +launch = ["PyYAML (>=6.0.0)", "awscli", "azure-containerregistry", "azure-identity", "azure-storage-blob", "boto3", "botocore", "chardet", "google-auth", "google-cloud-aiplatform", "google-cloud-artifact-registry", "google-cloud-compute", "google-cloud-storage", "iso8601", "kubernetes", "kubernetes-asyncio", "nbconvert", "nbformat", "optuna", "pydantic", "tomli", "typing-extensions"] +media = ["bokeh", "moviepy", "numpy", "pillow", "plotly (>=5.18.0)", "rdkit-pypi", "soundfile"] +models = ["cloudpickle"] +perf = ["orjson"] +reports = ["pydantic (>=2.0.0)"] +sweeps = ["sweeps (>=0.2.0)"] + +[[package]] +name = "watchfiles" +version = "0.21.0" +description = "Simple, modern and high performance file watching and code reload in python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchfiles-0.21.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:27b4035013f1ea49c6c0b42d983133b136637a527e48c132d368eb19bf1ac6aa"}, + {file = "watchfiles-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c81818595eff6e92535ff32825f31c116f867f64ff8cdf6562cd1d6b2e1e8f3e"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c107ea3cf2bd07199d66f156e3ea756d1b84dfd43b542b2d870b77868c98c03"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d9ac347653ebd95839a7c607608703b20bc07e577e870d824fa4801bc1cb124"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5eb86c6acb498208e7663ca22dbe68ca2cf42ab5bf1c776670a50919a56e64ab"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f564bf68404144ea6b87a78a3f910cc8de216c6b12a4cf0b27718bf4ec38d303"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d0f32ebfaa9c6011f8454994f86108c2eb9c79b8b7de00b36d558cadcedaa3d"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6d45d9b699ecbac6c7bd8e0a2609767491540403610962968d258fd6405c17c"}, + {file = "watchfiles-0.21.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:aff06b2cac3ef4616e26ba17a9c250c1fe9dd8a5d907d0193f84c499b1b6e6a9"}, + {file = "watchfiles-0.21.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d9792dff410f266051025ecfaa927078b94cc7478954b06796a9756ccc7e14a9"}, + {file = "watchfiles-0.21.0-cp310-none-win32.whl", hash = "sha256:214cee7f9e09150d4fb42e24919a1e74d8c9b8a9306ed1474ecaddcd5479c293"}, + {file = "watchfiles-0.21.0-cp310-none-win_amd64.whl", hash = "sha256:1ad7247d79f9f55bb25ab1778fd47f32d70cf36053941f07de0b7c4e96b5d235"}, + {file = "watchfiles-0.21.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:668c265d90de8ae914f860d3eeb164534ba2e836811f91fecc7050416ee70aa7"}, + {file = "watchfiles-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a23092a992e61c3a6a70f350a56db7197242f3490da9c87b500f389b2d01eef"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e7941bbcfdded9c26b0bf720cb7e6fd803d95a55d2c14b4bd1f6a2772230c586"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11cd0c3100e2233e9c53106265da31d574355c288e15259c0d40a4405cbae317"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78f30cbe8b2ce770160d3c08cff01b2ae9306fe66ce899b73f0409dc1846c1b"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6674b00b9756b0af620aa2a3346b01f8e2a3dc729d25617e1b89cf6af4a54eb1"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd7ac678b92b29ba630d8c842d8ad6c555abda1b9ef044d6cc092dacbfc9719d"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c873345680c1b87f1e09e0eaf8cf6c891b9851d8b4d3645e7efe2ec20a20cc7"}, + {file = "watchfiles-0.21.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:49f56e6ecc2503e7dbe233fa328b2be1a7797d31548e7a193237dcdf1ad0eee0"}, + {file = "watchfiles-0.21.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:02d91cbac553a3ad141db016e3350b03184deaafeba09b9d6439826ee594b365"}, + {file = "watchfiles-0.21.0-cp311-none-win32.whl", hash = "sha256:ebe684d7d26239e23d102a2bad2a358dedf18e462e8808778703427d1f584400"}, + {file = "watchfiles-0.21.0-cp311-none-win_amd64.whl", hash = "sha256:4566006aa44cb0d21b8ab53baf4b9c667a0ed23efe4aaad8c227bfba0bf15cbe"}, + {file = "watchfiles-0.21.0-cp311-none-win_arm64.whl", hash = "sha256:c550a56bf209a3d987d5a975cdf2063b3389a5d16caf29db4bdddeae49f22078"}, + {file = "watchfiles-0.21.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:51ddac60b96a42c15d24fbdc7a4bfcd02b5a29c047b7f8bf63d3f6f5a860949a"}, + {file = "watchfiles-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:511f0b034120cd1989932bf1e9081aa9fb00f1f949fbd2d9cab6264916ae89b1"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cfb92d49dbb95ec7a07511bc9efb0faff8fe24ef3805662b8d6808ba8409a71a"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f92944efc564867bbf841c823c8b71bb0be75e06b8ce45c084b46411475a915"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:642d66b75eda909fd1112d35c53816d59789a4b38c141a96d62f50a3ef9b3360"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d23bcd6c8eaa6324fe109d8cac01b41fe9a54b8c498af9ce464c1aeeb99903d6"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18d5b4da8cf3e41895b34e8c37d13c9ed294954907929aacd95153508d5d89d7"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b8d1eae0f65441963d805f766c7e9cd092f91e0c600c820c764a4ff71a0764c"}, + {file = "watchfiles-0.21.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1fd9a5205139f3c6bb60d11f6072e0552f0a20b712c85f43d42342d162be1235"}, + {file = "watchfiles-0.21.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a1e3014a625bcf107fbf38eece0e47fa0190e52e45dc6eee5a8265ddc6dc5ea7"}, + {file = "watchfiles-0.21.0-cp312-none-win32.whl", hash = "sha256:9d09869f2c5a6f2d9df50ce3064b3391d3ecb6dced708ad64467b9e4f2c9bef3"}, + {file = "watchfiles-0.21.0-cp312-none-win_amd64.whl", hash = "sha256:18722b50783b5e30a18a8a5db3006bab146d2b705c92eb9a94f78c72beb94094"}, + {file = "watchfiles-0.21.0-cp312-none-win_arm64.whl", hash = "sha256:a3b9bec9579a15fb3ca2d9878deae789df72f2b0fdaf90ad49ee389cad5edab6"}, + {file = "watchfiles-0.21.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:4ea10a29aa5de67de02256a28d1bf53d21322295cb00bd2d57fcd19b850ebd99"}, + {file = "watchfiles-0.21.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:40bca549fdc929b470dd1dbfcb47b3295cb46a6d2c90e50588b0a1b3bd98f429"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9b37a7ba223b2f26122c148bb8d09a9ff312afca998c48c725ff5a0a632145f7"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec8c8900dc5c83650a63dd48c4d1d245343f904c4b64b48798c67a3767d7e165"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ad3fe0a3567c2f0f629d800409cd528cb6251da12e81a1f765e5c5345fd0137"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d353c4cfda586db2a176ce42c88f2fc31ec25e50212650c89fdd0f560ee507b"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83a696da8922314ff2aec02987eefb03784f473281d740bf9170181829133765"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a03651352fc20975ee2a707cd2d74a386cd303cc688f407296064ad1e6d1562"}, + {file = "watchfiles-0.21.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3ad692bc7792be8c32918c699638b660c0de078a6cbe464c46e1340dadb94c19"}, + {file = "watchfiles-0.21.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06247538e8253975bdb328e7683f8515ff5ff041f43be6c40bff62d989b7d0b0"}, + {file = "watchfiles-0.21.0-cp38-none-win32.whl", hash = "sha256:9a0aa47f94ea9a0b39dd30850b0adf2e1cd32a8b4f9c7aa443d852aacf9ca214"}, + {file = "watchfiles-0.21.0-cp38-none-win_amd64.whl", hash = "sha256:8d5f400326840934e3507701f9f7269247f7c026d1b6cfd49477d2be0933cfca"}, + {file = "watchfiles-0.21.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7f762a1a85a12cc3484f77eee7be87b10f8c50b0b787bb02f4e357403cad0c0e"}, + {file = "watchfiles-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6e9be3ef84e2bb9710f3f777accce25556f4a71e15d2b73223788d528fcc2052"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4c48a10d17571d1275701e14a601e36959ffada3add8cdbc9e5061a6e3579a5d"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c889025f59884423428c261f212e04d438de865beda0b1e1babab85ef4c0f01"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:66fac0c238ab9a2e72d026b5fb91cb902c146202bbd29a9a1a44e8db7b710b6f"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4a21f71885aa2744719459951819e7bf5a906a6448a6b2bbce8e9cc9f2c8128"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c9198c989f47898b2c22201756f73249de3748e0fc9de44adaf54a8b259cc0c"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f57c4461cd24fda22493109c45b3980863c58a25b8bec885ca8bea6b8d4b28"}, + {file = "watchfiles-0.21.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:853853cbf7bf9408b404754b92512ebe3e3a83587503d766d23e6bf83d092ee6"}, + {file = "watchfiles-0.21.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d5b1dc0e708fad9f92c296ab2f948af403bf201db8fb2eb4c8179db143732e49"}, + {file = "watchfiles-0.21.0-cp39-none-win32.whl", hash = "sha256:59137c0c6826bd56c710d1d2bda81553b5e6b7c84d5a676747d80caf0409ad94"}, + {file = "watchfiles-0.21.0-cp39-none-win_amd64.whl", hash = "sha256:6cb8fdc044909e2078c248986f2fc76f911f72b51ea4a4fbbf472e01d14faa58"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab03a90b305d2588e8352168e8c5a1520b721d2d367f31e9332c4235b30b8994"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:927c589500f9f41e370b0125c12ac9e7d3a2fd166b89e9ee2828b3dda20bfe6f"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bd467213195e76f838caf2c28cd65e58302d0254e636e7c0fca81efa4a2e62c"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02b73130687bc3f6bb79d8a170959042eb56eb3a42df3671c79b428cd73f17cc"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:08dca260e85ffae975448e344834d765983237ad6dc308231aa16e7933db763e"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3ccceb50c611c433145502735e0370877cced72a6c70fd2410238bcbc7fe51d8"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57d430f5fb63fea141ab71ca9c064e80de3a20b427ca2febcbfcef70ff0ce895"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd5fad9b9c0dd89904bbdea978ce89a2b692a7ee8a0ce19b940e538c88a809c"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:be6dd5d52b73018b21adc1c5d28ac0c68184a64769052dfeb0c5d9998e7f56a2"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b3cab0e06143768499384a8a5efb9c4dc53e19382952859e4802f294214f36ec"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c6ed10c2497e5fedadf61e465b3ca12a19f96004c15dcffe4bd442ebadc2d85"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43babacef21c519bc6631c5fce2a61eccdfc011b4bcb9047255e9620732c8097"}, + {file = "watchfiles-0.21.0.tar.gz", hash = "sha256:c76c635fabf542bb78524905718c39f736a98e5ab25b23ec6d4abede1a85a6a3"}, +] + +[package.dependencies] +anyio = ">=3.0.0" + +[[package]] +name = "wcwidth" +version = "0.2.13" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, +] + +[[package]] +name = "websockets" +version = "12.0" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.8" +files = [ + {file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"}, + {file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"}, + {file = "websockets-12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb809e816916a3b210bed3c82fb88eaf16e8afcf9c115ebb2bacede1797d2547"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c588f6abc13f78a67044c6b1273a99e1cf31038ad51815b3b016ce699f0d75c2"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aa9348186d79a5f232115ed3fa9020eab66d6c3437d72f9d2c8ac0c6858c558"}, + {file = "websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6350b14a40c95ddd53e775dbdbbbc59b124a5c8ecd6fbb09c2e52029f7a9f480"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:70ec754cc2a769bcd218ed8d7209055667b30860ffecb8633a834dde27d6307c"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e96f5ed1b83a8ddb07909b45bd94833b0710f738115751cdaa9da1fb0cb66e8"}, + {file = "websockets-12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4d87be612cbef86f994178d5186add3d94e9f31cc3cb499a0482b866ec477603"}, + {file = "websockets-12.0-cp310-cp310-win32.whl", hash = "sha256:befe90632d66caaf72e8b2ed4d7f02b348913813c8b0a32fae1cc5fe3730902f"}, + {file = "websockets-12.0-cp310-cp310-win_amd64.whl", hash = "sha256:363f57ca8bc8576195d0540c648aa58ac18cf85b76ad5202b9f976918f4219cf"}, + {file = "websockets-12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d873c7de42dea355d73f170be0f23788cf3fa9f7bed718fd2830eefedce01b4"}, + {file = "websockets-12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3f61726cae9f65b872502ff3c1496abc93ffbe31b278455c418492016e2afc8f"}, + {file = "websockets-12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed2fcf7a07334c77fc8a230755c2209223a7cc44fc27597729b8ef5425aa61a3"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e332c210b14b57904869ca9f9bf4ca32f5427a03eeb625da9b616c85a3a506c"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5693ef74233122f8ebab026817b1b37fe25c411ecfca084b29bc7d6efc548f45"}, + {file = "websockets-12.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e9e7db18b4539a29cc5ad8c8b252738a30e2b13f033c2d6e9d0549b45841c04"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e2df67b8014767d0f785baa98393725739287684b9f8d8a1001eb2839031447"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bea88d71630c5900690fcb03161ab18f8f244805c59e2e0dc4ffadae0a7ee0ca"}, + {file = "websockets-12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dff6cdf35e31d1315790149fee351f9e52978130cef6c87c4b6c9b3baf78bc53"}, + {file = "websockets-12.0-cp311-cp311-win32.whl", hash = "sha256:3e3aa8c468af01d70332a382350ee95f6986db479ce7af14d5e81ec52aa2b402"}, + {file = "websockets-12.0-cp311-cp311-win_amd64.whl", hash = "sha256:25eb766c8ad27da0f79420b2af4b85d29914ba0edf69f547cc4f06ca6f1d403b"}, + {file = "websockets-12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0e6e2711d5a8e6e482cacb927a49a3d432345dfe7dea8ace7b5790df5932e4df"}, + {file = "websockets-12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbcf72a37f0b3316e993e13ecf32f10c0e1259c28ffd0a85cee26e8549595fbc"}, + {file = "websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12743ab88ab2af1d17dd4acb4645677cb7063ef4db93abffbf164218a5d54c6b"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b645f491f3c48d3f8a00d1fce07445fab7347fec54a3e65f0725d730d5b99cb"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9893d1aa45a7f8b3bc4510f6ccf8db8c3b62120917af15e3de247f0780294b92"}, + {file = "websockets-12.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f38a7b376117ef7aff996e737583172bdf535932c9ca021746573bce40165ed"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f764ba54e33daf20e167915edc443b6f88956f37fb606449b4a5b10ba42235a5"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1e4b3f8ea6a9cfa8be8484c9221ec0257508e3a1ec43c36acdefb2a9c3b00aa2"}, + {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"}, + {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"}, + {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"}, + {file = "websockets-12.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5f6ffe2c6598f7f7207eef9a1228b6f5c818f9f4d53ee920aacd35cec8110438"}, + {file = "websockets-12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9edf3fc590cc2ec20dc9d7a45108b5bbaf21c0d89f9fd3fd1685e223771dc0b2"}, + {file = "websockets-12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8572132c7be52632201a35f5e08348137f658e5ffd21f51f94572ca6c05ea81d"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604428d1b87edbf02b233e2c207d7d528460fa978f9e391bd8aaf9c8311de137"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a9d160fd080c6285e202327aba140fc9a0d910b09e423afff4ae5cbbf1c7205"}, + {file = "websockets-12.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87b4aafed34653e465eb77b7c93ef058516cb5acf3eb21e42f33928616172def"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b2ee7288b85959797970114deae81ab41b731f19ebcd3bd499ae9ca0e3f1d2c8"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7fa3d25e81bfe6a89718e9791128398a50dec6d57faf23770787ff441d851967"}, + {file = "websockets-12.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a571f035a47212288e3b3519944f6bf4ac7bc7553243e41eac50dd48552b6df7"}, + {file = "websockets-12.0-cp38-cp38-win32.whl", hash = "sha256:3c6cc1360c10c17463aadd29dd3af332d4a1adaa8796f6b0e9f9df1fdb0bad62"}, + {file = "websockets-12.0-cp38-cp38-win_amd64.whl", hash = "sha256:1bf386089178ea69d720f8db6199a0504a406209a0fc23e603b27b300fdd6892"}, + {file = "websockets-12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ab3d732ad50a4fbd04a4490ef08acd0517b6ae6b77eb967251f4c263011a990d"}, + {file = "websockets-12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1d9697f3337a89691e3bd8dc56dea45a6f6d975f92e7d5f773bc715c15dde28"}, + {file = "websockets-12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1df2fbd2c8a98d38a66f5238484405b8d1d16f929bb7a33ed73e4801222a6f53"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23509452b3bc38e3a057382c2e941d5ac2e01e251acce7adc74011d7d8de434c"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e5fc14ec6ea568200ea4ef46545073da81900a2b67b3e666f04adf53ad452ec"}, + {file = "websockets-12.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46e71dbbd12850224243f5d2aeec90f0aaa0f2dde5aeeb8fc8df21e04d99eff9"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b81f90dcc6c85a9b7f29873beb56c94c85d6f0dac2ea8b60d995bd18bf3e2aae"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a02413bc474feda2849c59ed2dfb2cddb4cd3d2f03a2fedec51d6e959d9b608b"}, + {file = "websockets-12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bbe6013f9f791944ed31ca08b077e26249309639313fff132bfbf3ba105673b9"}, + {file = "websockets-12.0-cp39-cp39-win32.whl", hash = "sha256:cbe83a6bbdf207ff0541de01e11904827540aa069293696dd528a6640bd6a5f6"}, + {file = "websockets-12.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc4e7fa5414512b481a2483775a8e8be7803a35b30ca805afa4998a84f9fd9e8"}, + {file = "websockets-12.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:248d8e2446e13c1d4326e0a6a4e9629cb13a11195051a73acf414812700badbd"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44069528d45a933997a6fef143030d8ca8042f0dfaad753e2906398290e2870"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e37d36f0d19f0a4413d3e18c0d03d0c268ada2061868c1e6f5ab1a6d575077"}, + {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d829f975fc2e527a3ef2f9c8f25e553eb7bc779c6665e8e1d52aa22800bb38b"}, + {file = "websockets-12.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2c71bd45a777433dd9113847af751aae36e448bc6b8c361a566cb043eda6ec30"}, + {file = "websockets-12.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0bee75f400895aef54157b36ed6d3b308fcab62e5260703add87f44cee9c82a6"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:423fc1ed29f7512fceb727e2d2aecb952c46aa34895e9ed96071821309951123"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a5e9964ef509016759f2ef3f2c1e13f403725a5e6a1775555994966a66e931"}, + {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3181df4583c4d3994d31fb235dc681d2aaad744fbdbf94c4802485ececdecf2"}, + {file = "websockets-12.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b067cb952ce8bf40115f6c19f478dc71c5e719b7fbaa511359795dfd9d1a6468"}, + {file = "websockets-12.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:00700340c6c7ab788f176d118775202aadea7602c5cc6be6ae127761c16d6b0b"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e469d01137942849cff40517c97a30a93ae79917752b34029f0ec72df6b46399"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffefa1374cd508d633646d51a8e9277763a9b78ae71324183693959cf94635a7"}, + {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0cab91b3956dfa9f512147860783a1829a8d905ee218a9837c18f683239611"}, + {file = "websockets-12.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2cb388a5bfb56df4d9a406783b7f9dbefb888c09b71629351cc6b036e9259370"}, + {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"}, + {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, +] + +[[package]] +name = "werkzeug" +version = "3.0.1" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.8" +files = [ + {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"}, + {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wheel" +version = "0.42.0" +description = "A built-package format for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "wheel-0.42.0-py3-none-any.whl", hash = "sha256:177f9c9b0d45c47873b619f5b650346d632cdc35fb5e4d25058e09c9e581433d"}, + {file = "wheel-0.42.0.tar.gz", hash = "sha256:c45be39f7882c9d34243236f2d63cbd58039e360f85d0913425fbd7ceea617a8"}, +] + +[package.extras] +test = ["pytest (>=6.0.0)", "setuptools (>=65)"] + +[[package]] +name = "win32-setctime" +version = "1.1.0" +description = "A small Python utility to set file creation time on Windows" +optional = false +python-versions = ">=3.5" +files = [ + {file = "win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad"}, + {file = "win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2"}, +] + +[package.extras] +dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] + +[[package]] +name = "wrapt" +version = "1.16.0" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.6" +files = [ + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, +] + +[[package]] +name = "xmltodict" +version = "0.13.0" +description = "Makes working with XML feel like you are working with JSON" +optional = false +python-versions = ">=3.4" +files = [ + {file = "xmltodict-0.13.0-py2.py3-none-any.whl", hash = "sha256:aa89e8fd76320154a40d19a0df04a4695fb9dc5ba977cbb68ab3e4eb225e7852"}, + {file = "xmltodict-0.13.0.tar.gz", hash = "sha256:341595a488e3e01a85a9d8911d8912fd922ede5fecc4dce437eb4b6c8d037e56"}, +] + +[[package]] +name = "youtube-dl" +version = "2021.12.17" +description = "YouTube video downloader" +optional = false +python-versions = "*" +files = [ + {file = "youtube_dl-2021.12.17-py2.py3-none-any.whl", hash = "sha256:f1336d5de68647e0364a47b3c0712578e59ec76f02048ff5c50ef1c69d79cd55"}, + {file = "youtube_dl-2021.12.17.tar.gz", hash = "sha256:bc59e86c5d15d887ac590454511f08ce2c47698d5a82c27bfe27b5d814bbaed2"}, +] + +[[package]] +name = "yt-dlp" +version = "2024.3.10" +description = "A youtube-dl fork with additional features and patches" +optional = false +python-versions = ">=3.8" +files = [ + {file = "yt_dlp-2024.3.10-py3-none-any.whl", hash = "sha256:bbe66b9a3aa23b6378ccca3ea20f5aadf385fa21a993513a105d73c827a86ed4"}, + {file = "yt_dlp-2024.3.10.tar.gz", hash = "sha256:6e74cb14a69dbeb872c8ef4e0b8bbed2ee846ec633513cf3124a74c1faedc07b"}, +] + +[package.dependencies] +brotli = {version = "*", markers = "implementation_name == \"cpython\""} +brotlicffi = {version = "*", markers = "implementation_name != \"cpython\""} +certifi = "*" +mutagen = "*" +pycryptodomex = "*" +requests = ">=2.31.0,<3" +urllib3 = ">=1.26.17,<3" +websockets = ">=12.0" + +[package.extras] +build = ["build", "hatchling", "pip", "wheel"] +dev = ["flake8", "isort", "pytest"] +py2exe = ["py2exe (>=0.12)"] +pyinstaller = ["pyinstaller (>=6.3)"] +secretstorage = ["cffi", "secretstorage"] + +[metadata] +lock-version = "2.0" +python-versions = ">=3.11,<3.12" +content-hash = "d1b8398f1344ee687a7420d8db8b768e92fff14408d64a99f5cc550bd987f3d7" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..f23c512 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,91 @@ +[tool.poetry] +name = "level2-3-cv-finalproject-cv-06" +version = "0.1.0" +description = "" +authors = ["백광현_T6074 "] +readme = "README.md" + +[tool.poetry.dependencies] +python = ">=3.11,<3.12" +torch = {url = "https://download.pytorch.org/whl/cu118/torch-2.0.1%2Bcu118-cp311-cp311-linux_x86_64.whl"} +torchvision = {url = "https://download.pytorch.org/whl/cu118/torchvision-0.15.2%2Bcu118-cp311-cp311-linux_x86_64.whl"} +fastapi = {extras = ["all"], version = "^0.105.0"} +uvicorn = "^0.27.1" +loguru = "^0.7.2" +jinja2 = "^3.1.3" +sqlmodel = "^0.0.14" +pydantic-settings = "^2.1.0" +sqlalchemy = "2.0.23" +pymysql = "^1.1.0" +python-multipart = "^0.0.9" +passlib = "^1.7.4" +python-jose = "^3.3.0" +boto3 = "^1.34.53" +opencv-python = "^4.9.0.80" +numpy = "^1.26.4" +matplotlib = "^3.8.3" +pillow = "^10.2.0" +scikit-learn = "^1.4.1.post1" +onnx = "^1.15.0" +onnx2pytorch = "^0.4.1" +ultralytics = "^8.1.24" +h5py = "^3.10.0" +ipykernel = "^6.29.3" +tensorflow = "^2.15.0.post1" +tf2onnx = {git = "https://github.com/onnx/tensorflow-onnx"} +onnxruntime = "^1.17.1" +keras2onnx = "^1.7.0" +keras = "^3.0.5" +pydot = "^2.0.0" +websockets = "^12.0" +sse-starlette = "^2.0.0" +timm = "0.4.12" +albumentations = "^1.4.1" +wandb = "^0.16.4" +xmltodict = "^0.13.0" +tqdm = "^4.66.2" +einops = "^0.7.0" +torchinfo = "^1.8.0" +pandas = "^2.2.1" +pafy = "^0.5.5" +youtube-dl = "^2021.12.17" +cap-from-youtube = {git = "https://github.com/ibaiGorordo/cap_from_youtube"} + +[tool.poetry.group.dev.dependencies] +pre-commit = "^3.6.2" +black = "^24.2.0" +autoflake = "^2.3.0" +isort = "^5.13.2" +flake8 = "^7.0.0" + +[tool.poetry.group.test.dependencies] +pytest = "^8.0.2" + +[tool.black] +line-length = 88 +target-version = ['py311'] +include = '\.pyi?$' +exclude = ''' +/( + \.git + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist +)/ +''' + +[tool.isort] +profile = "black" + +[tool.flake8] +ignore = "E203, E501, W503" +max-line-length = 88 +exclude = ".git,__pycache__,docs/,old/,build/,dist/" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..e358bd6 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,178 @@ +absl-py==2.1.0 ; python_version >= "3.11" and python_version < "3.12" +albumentations==1.4.1 ; python_version >= "3.11" and python_version < "3.12" +annotated-types==0.6.0 ; python_version >= "3.11" and python_version < "3.12" +anyio==3.7.1 ; python_version >= "3.11" and python_version < "3.12" +appdirs==1.4.4 ; python_version >= "3.11" and python_version < "3.12" +appnope==0.1.4 ; python_version >= "3.11" and python_version < "3.12" and platform_system == "Darwin" +asttokens==2.4.1 ; python_version >= "3.11" and python_version < "3.12" +astunparse==1.6.3 ; python_version >= "3.11" and python_version < "3.12" +boto3==1.34.53 ; python_version >= "3.11" and python_version < "3.12" +botocore==1.34.53 ; python_version >= "3.11" and python_version < "3.12" +brotli==1.1.0 ; python_version >= "3.11" and python_version < "3.12" and implementation_name == "cpython" +brotlicffi==1.1.0.0 ; python_version >= "3.11" and python_version < "3.12" and implementation_name != "cpython" +cap-from-youtube @ git+https://github.com/ibaiGorordo/cap_from_youtube@d34d7d9f3ce3853d3c31ac393a091e4a723a03d1 ; python_version >= "3.11" and python_version < "3.12" +certifi==2024.2.2 ; python_version >= "3.11" and python_version < "3.12" +cffi==1.16.0 ; python_version >= "3.11" and python_version < "3.12" and implementation_name != "cpython" +charset-normalizer==3.3.2 ; python_version >= "3.11" and python_version < "3.12" +click==8.1.7 ; python_version >= "3.11" and python_version < "3.12" +cmake==3.28.3 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.11" and python_version < "3.12" +colorama==0.4.6 ; python_version >= "3.11" and python_version < "3.12" and (sys_platform == "win32" or platform_system == "Windows") +coloredlogs==15.0.1 ; python_version >= "3.11" and python_version < "3.12" +comm==0.2.1 ; python_version >= "3.11" and python_version < "3.12" +contourpy==1.2.0 ; python_version >= "3.11" and python_version < "3.12" +cycler==0.12.1 ; python_version >= "3.11" and python_version < "3.12" +debugpy==1.8.1 ; python_version >= "3.11" and python_version < "3.12" +decorator==5.1.1 ; python_version >= "3.11" and python_version < "3.12" +dm-tree==0.1.8 ; python_version >= "3.11" and python_version < "3.12" +dnspython==2.6.1 ; python_version >= "3.11" and python_version < "3.12" +docker-pycreds==0.4.0 ; python_version >= "3.11" and python_version < "3.12" +ecdsa==0.18.0 ; python_version >= "3.11" and python_version < "3.12" +einops==0.7.0 ; python_version >= "3.11" and python_version < "3.12" +email-validator==2.1.1 ; python_version >= "3.11" and python_version < "3.12" +executing==2.0.1 ; python_version >= "3.11" and python_version < "3.12" +fastapi[all]==0.105.0 ; python_version >= "3.11" and python_version < "3.12" +filelock==3.13.1 ; python_version >= "3.11" and python_version < "3.12" +fire==0.5.0 ; python_version >= "3.11" and python_version < "3.12" +flatbuffers==23.5.26 ; python_version >= "3.11" and python_version < "3.12" +fonttools==4.49.0 ; python_version >= "3.11" and python_version < "3.12" +gast==0.5.4 ; python_version >= "3.11" and python_version < "3.12" +gitdb==4.0.11 ; python_version >= "3.11" and python_version < "3.12" +gitpython==3.1.42 ; python_version >= "3.11" and python_version < "3.12" +google-pasta==0.2.0 ; python_version >= "3.11" and python_version < "3.12" +greenlet==3.0.3 ; python_version >= "3.11" and python_version < "3.12" and (platform_machine == "aarch64" or platform_machine == "ppc64le" or platform_machine == "x86_64" or platform_machine == "amd64" or platform_machine == "AMD64" or platform_machine == "win32" or platform_machine == "WIN32") +grpcio==1.62.0 ; python_version >= "3.11" and python_version < "3.12" +h11==0.14.0 ; python_version >= "3.11" and python_version < "3.12" +h5py==3.10.0 ; python_version >= "3.11" and python_version < "3.12" +httpcore==1.0.4 ; python_version >= "3.11" and python_version < "3.12" +httptools==0.6.1 ; python_version >= "3.11" and python_version < "3.12" +httpx==0.27.0 ; python_version >= "3.11" and python_version < "3.12" +humanfriendly==10.0 ; python_version >= "3.11" and python_version < "3.12" +idna==3.6 ; python_version >= "3.11" and python_version < "3.12" +imageio==2.34.0 ; python_version >= "3.11" and python_version < "3.12" +ipykernel==6.29.3 ; python_version >= "3.11" and python_version < "3.12" +ipython==8.22.2 ; python_version >= "3.11" and python_version < "3.12" +itsdangerous==2.1.2 ; python_version >= "3.11" and python_version < "3.12" +jedi==0.19.1 ; python_version >= "3.11" and python_version < "3.12" +jinja2==3.1.3 ; python_version >= "3.11" and python_version < "3.12" +jmespath==1.0.1 ; python_version >= "3.11" and python_version < "3.12" +joblib==1.3.2 ; python_version >= "3.11" and python_version < "3.12" +jupyter-client==8.6.0 ; python_version >= "3.11" and python_version < "3.12" +jupyter-core==5.7.1 ; python_version >= "3.11" and python_version < "3.12" +keras2onnx==1.7.0 ; python_version >= "3.11" and python_version < "3.12" +keras==3.0.5 ; python_version >= "3.11" and python_version < "3.12" +kiwisolver==1.4.5 ; python_version >= "3.11" and python_version < "3.12" +lazy-loader==0.3 ; python_version >= "3.11" and python_version < "3.12" +libclang==16.0.6 ; python_version >= "3.11" and python_version < "3.12" +lit==17.0.6 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.11" and python_version < "3.12" +loguru==0.7.2 ; python_version >= "3.11" and python_version < "3.12" +markdown-it-py==3.0.0 ; python_version >= "3.11" and python_version < "3.12" +markdown==3.5.2 ; python_version >= "3.11" and python_version < "3.12" +markupsafe==2.1.5 ; python_version >= "3.11" and python_version < "3.12" +matplotlib-inline==0.1.6 ; python_version >= "3.11" and python_version < "3.12" +matplotlib==3.8.3 ; python_version >= "3.11" and python_version < "3.12" +mdurl==0.1.2 ; python_version >= "3.11" and python_version < "3.12" +ml-dtypes==0.3.2 ; python_version >= "3.11" and python_version < "3.12" +mpmath==1.3.0 ; python_version >= "3.11" and python_version < "3.12" +mutagen==1.47.0 ; python_version >= "3.11" and python_version < "3.12" +namex==0.0.7 ; python_version >= "3.11" and python_version < "3.12" +nest-asyncio==1.6.0 ; python_version >= "3.11" and python_version < "3.12" +networkx==3.2.1 ; python_version >= "3.11" and python_version < "3.12" +numpy==1.26.4 ; python_version >= "3.11" and python_version < "3.12" +onnx2pytorch==0.4.1 ; python_version >= "3.11" and python_version < "3.12" +onnx==1.15.0 ; python_version >= "3.11" and python_version < "3.12" +onnxconverter-common==1.13.0 ; python_version >= "3.11" and python_version < "3.12" +onnxruntime==1.17.1 ; python_version >= "3.11" and python_version < "3.12" +opencv-python-headless==4.9.0.80 ; python_version >= "3.11" and python_version < "3.12" +opencv-python==4.9.0.80 ; python_version >= "3.11" and python_version < "3.12" +opt-einsum==3.3.0 ; python_version >= "3.11" and python_version < "3.12" +orjson==3.9.15 ; python_version >= "3.11" and python_version < "3.12" +packaging==23.2 ; python_version >= "3.11" and python_version < "3.12" +pafy==0.5.5 ; python_version >= "3.11" and python_version < "3.12" +pandas==2.2.1 ; python_version >= "3.11" and python_version < "3.12" +parso==0.8.3 ; python_version >= "3.11" and python_version < "3.12" +passlib==1.7.4 ; python_version >= "3.11" and python_version < "3.12" +pexpect==4.9.0 ; python_version >= "3.11" and python_version < "3.12" and (sys_platform != "win32" and sys_platform != "emscripten") +pillow==10.2.0 ; python_version >= "3.11" and python_version < "3.12" +platformdirs==4.2.0 ; python_version >= "3.11" and python_version < "3.12" +prompt-toolkit==3.0.43 ; python_version >= "3.11" and python_version < "3.12" +protobuf==3.20.3 ; python_version >= "3.11" and python_version < "3.12" +psutil==5.9.8 ; python_version >= "3.11" and python_version < "3.12" +ptyprocess==0.7.0 ; python_version >= "3.11" and python_version < "3.12" and (sys_platform != "win32" and sys_platform != "emscripten") +pure-eval==0.2.2 ; python_version >= "3.11" and python_version < "3.12" +py-cpuinfo==9.0.0 ; python_version >= "3.11" and python_version < "3.12" +pyasn1==0.5.1 ; python_version >= "3.11" and python_version < "3.12" +pycparser==2.21 ; python_version >= "3.11" and python_version < "3.12" and implementation_name != "cpython" +pycryptodomex==3.20.0 ; python_version >= "3.11" and python_version < "3.12" +pydantic-core==2.16.3 ; python_version >= "3.11" and python_version < "3.12" +pydantic-extra-types==2.5.0 ; python_version >= "3.11" and python_version < "3.12" +pydantic-settings==2.2.1 ; python_version >= "3.11" and python_version < "3.12" +pydantic==2.6.3 ; python_version >= "3.11" and python_version < "3.12" +pydot==2.0.0 ; python_version >= "3.11" and python_version < "3.12" +pygments==2.17.2 ; python_version >= "3.11" and python_version < "3.12" +pymysql==1.1.0 ; python_version >= "3.11" and python_version < "3.12" +pyparsing==3.1.2 ; python_version >= "3.11" and python_version < "3.12" +pyreadline3==3.4.1 ; sys_platform == "win32" and python_version >= "3.11" and python_version < "3.12" +python-dateutil==2.9.0 ; python_version >= "3.11" and python_version < "3.12" +python-dotenv==1.0.1 ; python_version >= "3.11" and python_version < "3.12" +python-jose==3.3.0 ; python_version >= "3.11" and python_version < "3.12" +python-multipart==0.0.9 ; python_version >= "3.11" and python_version < "3.12" +pytz==2024.1 ; python_version >= "3.11" and python_version < "3.12" +pywin32==306 ; sys_platform == "win32" and platform_python_implementation != "PyPy" and python_version >= "3.11" and python_version < "3.12" +pyyaml==6.0.1 ; python_version >= "3.11" and python_version < "3.12" +pyzmq==25.1.2 ; python_version >= "3.11" and python_version < "3.12" +requests==2.31.0 ; python_version >= "3.11" and python_version < "3.12" +rich==13.7.1 ; python_version >= "3.11" and python_version < "3.12" +rsa==4.9 ; python_version >= "3.11" and python_version < "3.12" +s3transfer==0.10.0 ; python_version >= "3.11" and python_version < "3.12" +scikit-image==0.22.0 ; python_version >= "3.11" and python_version < "3.12" +scikit-learn==1.4.1.post1 ; python_version >= "3.11" and python_version < "3.12" +scipy==1.12.0 ; python_version >= "3.11" and python_version < "3.12" +seaborn==0.13.2 ; python_version >= "3.11" and python_version < "3.12" +sentry-sdk==1.43.0 ; python_version >= "3.11" and python_version < "3.12" +setproctitle==1.3.3 ; python_version >= "3.11" and python_version < "3.12" +setuptools==69.1.1 ; python_version >= "3.11" and python_version < "3.12" +six==1.16.0 ; python_version >= "3.11" and python_version < "3.12" +smmap==5.0.1 ; python_version >= "3.11" and python_version < "3.12" +sniffio==1.3.1 ; python_version >= "3.11" and python_version < "3.12" +sqlalchemy==2.0.23 ; python_version >= "3.11" and python_version < "3.12" +sqlmodel==0.0.14 ; python_version >= "3.11" and python_version < "3.12" +sse-starlette==2.0.0 ; python_version >= "3.11" and python_version < "3.12" +stack-data==0.6.3 ; python_version >= "3.11" and python_version < "3.12" +starlette==0.27.0 ; python_version >= "3.11" and python_version < "3.12" +sympy==1.12 ; python_version >= "3.11" and python_version < "3.12" +tensorboard-data-server==0.7.2 ; python_version >= "3.11" and python_version < "3.12" +tensorboard==2.16.2 ; python_version >= "3.11" and python_version < "3.12" +tensorflow-io-gcs-filesystem==0.36.0 ; python_version >= "3.11" and python_version < "3.12" +tensorflow==2.16.0rc0 ; python_version >= "3.11" and python_version < "3.12" +termcolor==2.4.0 ; python_version >= "3.11" and python_version < "3.12" +tf2onnx @ git+https://github.com/onnx/tensorflow-onnx@9538da86d8e932d0eea8f0999672ea1a6a203b57 ; python_version >= "3.11" and python_version < "3.12" +thop==0.1.1.post2209072238 ; python_version >= "3.11" and python_version < "3.12" +threadpoolctl==3.3.0 ; python_version >= "3.11" and python_version < "3.12" +tifffile==2024.2.12 ; python_version >= "3.11" and python_version < "3.12" +timm==0.4.12 ; python_version >= "3.11" and python_version < "3.12" +torch @ https://download.pytorch.org/whl/cu118/torch-2.0.1%2Bcu118-cp311-cp311-linux_x86_64.whl ; python_version >= "3.11" and python_version < "3.12" +torchinfo==1.8.0 ; python_version >= "3.11" and python_version < "3.12" +torchvision @ https://download.pytorch.org/whl/cu118/torchvision-0.15.2%2Bcu118-cp311-cp311-linux_x86_64.whl ; python_version >= "3.11" and python_version < "3.12" +tornado==6.4 ; python_version >= "3.11" and python_version < "3.12" +tqdm==4.66.2 ; python_version >= "3.11" and python_version < "3.12" +traitlets==5.14.1 ; python_version >= "3.11" and python_version < "3.12" +triton==2.0.0 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.11" and python_version < "3.12" +typing-extensions==4.10.0 ; python_version >= "3.11" and python_version < "3.12" +tzdata==2024.1 ; python_version >= "3.11" and python_version < "3.12" +ujson==5.9.0 ; python_version >= "3.11" and python_version < "3.12" +ultralytics==8.1.24 ; python_version >= "3.11" and python_version < "3.12" +urllib3==2.0.7 ; python_version >= "3.11" and python_version < "3.12" +uvicorn==0.27.1 ; python_version >= "3.11" and python_version < "3.12" +uvicorn[standard]==0.27.1 ; python_version >= "3.11" and python_version < "3.12" +uvloop==0.19.0 ; (sys_platform != "win32" and sys_platform != "cygwin") and platform_python_implementation != "PyPy" and python_version >= "3.11" and python_version < "3.12" +wandb==0.16.4 ; python_version >= "3.11" and python_version < "3.12" +watchfiles==0.21.0 ; python_version >= "3.11" and python_version < "3.12" +wcwidth==0.2.13 ; python_version >= "3.11" and python_version < "3.12" +websockets==12.0 ; python_version >= "3.11" and python_version < "3.12" +werkzeug==3.0.1 ; python_version >= "3.11" and python_version < "3.12" +wheel==0.42.0 ; python_version >= "3.11" and python_version < "3.12" +win32-setctime==1.1.0 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "win32" +wrapt==1.16.0 ; python_version >= "3.11" and python_version < "3.12" +xmltodict==0.13.0 ; python_version >= "3.11" and python_version < "3.12" +youtube-dl==2021.12.17 ; python_version >= "3.11" and python_version < "3.12" +yt-dlp==2024.3.10 ; python_version >= "3.11" and python_version < "3.12" diff --git a/server_setting.sh b/server_setting.sh new file mode 100644 index 0000000..f03b8ed --- /dev/null +++ b/server_setting.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +echo "--------------------------" +echo "install packages for build" +echo "--------------------------" +apt-get upgrade -y +apt-get update -y +apt-get install git -y +apt-get install curl -y +apt-get install gcc make -y +apt-get install -y net-tools tree vim telnet netcat +apt-get install -y make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev liblzma-dev wget llvm libncurses5-dev libncursesw5-dev xz-utils tk-dev +apt-get install -y mysql-server + +echo "-------------------------------" +echo "install pyenv and bashrc update" +echo "-------------------------------" +git clone https://github.com/pyenv/pyenv.git ~/.pyenv +if !(grep -qc "PYENV_ROOT" ~/.bashrc); then + echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bashrc + echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.bashrc + echo 'eval "$(pyenv init -)"' >> ~/.bashrc +fi + +sleep 5 +. ~/.bashrc + +echo "--------------------" +echo "install python3.11.4" +echo "--------------------" +pyenv install 3.11.4 +pyenv global 3.11.4 + +echo "--------------------------------" +echo "install poetry and bashrc update" +echo "--------------------------------" +curl -sSL https://install.python-poetry.org | python3 - +if !(grep -qc "$HOME/.local/bin" ~/.bashrc); then + echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.bashrc +fi + +echo "Executing ~/.bashrc" >> ~/.bashrc +. ~/.bashrc + +poetry config virtualenvs.in-project true \ No newline at end of file