diff --git a/.gitignore b/.gitignore index 388a597..db72a2e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ __pycache__/ +.cache/ .vscode/ .bash_history *.generated @@ -6,6 +7,4 @@ docker/openvino_base_environment.txt docker/Dockerfile.env docker/final.env models -tests/results/**/* -samples/ava_ai_extension/tests/results/**/* samples/edgex_bridge/edgex/**/* diff --git a/README.md b/README.md index fb0aa40..e0a8ff9 100644 --- a/README.md +++ b/README.md @@ -116,7 +116,9 @@ VA Serving includes a sample client [vaclient](./vaclient/README.md) that can co Before running a pipeline, we need to know what pipelines are available. We do this using vaclient's `list-pipeline` command. In new shell run the following command: ```bash -$ ./vaclient/vaclient.sh list-pipelines +./vaclient/vaclient.sh list-pipelines + ``` + ``` - object_detection/person_vehicle_bike - object_classification/vehicle_attributes - audio_detection/environment @@ -125,12 +127,15 @@ $ ./vaclient/vaclient.sh list-pipelines > **Note:** The pipelines you will see may differ slightly Pipelines are displayed as a name/version tuple. The name reflects the action and version supplies more details of that action. Let's go with `object_detection/person_vehicle_bike`. Now we need to choose a media source. We recommend the [IoT Devkit sample videos](https://github.com/intel-iot-devkit/sample-videos) to get started. As the pipeline version indicates support for detecting people, person-bicycle-car-detection.mp4 would be a good choice. +> **Note:** Make sure to include `raw=true` parameter in the Github URL as shown in our examples. Failure to do so will result in a pipeline execution error. vaclient offers a `run` command that takes two additional arguments the `pipeline` and the `uri` for the media source. The `run` command displays inference results until either the media is exhausted or `CTRL+C` is pressed. Inference result bounding boxes are displayed in the format `label (confidence) [top left width height] {meta-data}` provided applicable data is present. At the end of the pipeline run, the average fps is shown. ``` -$ ./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +``` +``` Timestamp 48583333333 - vehicle (0.95) [0.00, 0.12, 0.15, 0.36] Timestamp 48666666666 @@ -159,20 +164,25 @@ All being well it will go into `QUEUED` then `RUNNING` state. We can interrogate > **NOTE:** The pipeline instance value depends on the number of pipelines started while the server is running so may differ from the value shown in the following examples. ``` -$ ./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +``` +``` -Starting pipeline... -Pipeline running: object_detection/person_vehicle_bike, instance = 2 +Starting pipeline object_detection/person_vehicle_bike, instance = 2 ``` You will need both the pipeline tuple and `instance` id for the status command. This command will display pipeline state: ``` -$ ./vaclient/vaclient.sh status object_detection/person_vehicle_bike 2 +./vaclient/vaclient.sh status object_detection/person_vehicle_bike 2 +``` +``` RUNNING ``` Then wait for a minute or so and try again. Pipeline will be completed. ``` -$ ./vaclient/vaclient.sh status object_detection/person_vehicle_bike 2 +./vaclient/vaclient.sh status object_detection/person_vehicle_bike 2 +``` +``` COMPLETED ``` @@ -180,34 +190,50 @@ COMPLETED If a pipeline is stopped, rather than allowed to complete, it goes into the ABORTED state. Start the pipeline again, this time we'll stop it. ``` -$ ./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +``` +``` -Starting pipeline... -Pipeline running: object_detection/person_vehicle_bike, instance = 3 -$ ./vaclient/vaclient.sh status object_detection/person_vehicle_bike 3 +Starting pipeline object_detection/person_vehicle_bike, instance = 3 +``` +``` +./vaclient/vaclient.sh status object_detection/person_vehicle_bike 3 +``` +``` RUNNING -$ ./vaclient/vaclient.sh stop object_detection/person_vehicle_bike 3 +``` +``` +./vaclient/vaclient.sh stop object_detection/person_vehicle_bike 3 +``` +``` Stopping Pipeline... Pipeline stopped avg_fps: 24.33 -$ ./vaclient/vaclient.sh status object_detection/person_vehicle_bike 3 +``` +``` +./vaclient/vaclient.sh status object_detection/person_vehicle_bike 3 +``` +``` ABORTED ``` ### Error The error state covers a number of outcomes such as the request could not be satisfied, a pipeline dependency was missing or an initialization problem. We can create an error condition by supplying a valid but unreachable uri. ``` -$ ./vaclient/vaclient.sh start object_detection/person_vehicle_bike http://bad-uri +./vaclient/vaclient.sh start object_detection/person_vehicle_bike http://bad-uri +``` +``` -Starting pipeline... -Pipeline running: object_detection/person_vehicle_bike, instance = 4 +Starting pipeline object_detection/person_vehicle_bike, instance = 4 ``` Note that VA Serving does not report an error at this stage as it goes into `QUEUED` state before it realizes that the source is not providing media. Checking on state a few seconds later will show the error. ``` -$ ./vaclient/vaclient.sh status object_detection/person_vehicle_bike 4 +./vaclient/vaclient.sh status object_detection/person_vehicle_bike 4 +``` +``` ERROR ``` @@ -217,16 +243,15 @@ RTSP allows you to connect to a server and display a video stream. VA Serving in First start VA Serving with RTSP enabled. By default, the RTSP stream will use port 8554. ``` -$ docker/run.sh --enable-rtsp -v /tmp:/tmp +docker/run.sh --enable-rtsp -v /tmp:/tmp ``` Then start a pipeline specifying the RTSP server endpoint path `vaserving`. In this case the RTSP endpoint would be `rtsp://localhost:8554/vaserving` ``` -$ ./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --rtsp-path vaserving +./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --rtsp-path vaserving ``` If you see the error ``` -Starting pipeline... -Pipeline running: object_detection/person_vehicle_bike, instance = 1 +Starting pipeline object_detection/person_vehicle_bike, instance = 1 Error in pipeline, please check vaserving log messages ``` You probably forgot to enable RTSP in the server. @@ -240,9 +265,11 @@ Now start `vlc` and from the `Media` menu select `Open Network Stream`. For URL ## Change Pipeline and Source Media With vaclient it is easy to customize service requests. Here will use a vehicle classification pipeline `object_classification/vehicle_attributes` with the Iot Devkit video `car-detection.mp4`. Note how vaclient now displays classification metadata including type and color of vehicle. ``` - $ ./vaclient/vaclient.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true -Starting pipeline... -Pipeline running: object_classification/vehicle_attributes, instance = 1 +./vaclient/vaclient.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true +``` +``` +Starting pipeline object_classification/vehicle_attributes, instance = 1 +Pipeline running Timestamp 18080000000 - vehicle (1.00) [0.41, 0.00, 0.57, 0.33] {'color': 'red', 'type': 'car'} @@ -271,9 +298,10 @@ If you look at video you can see that there are some errors in classification - Inference accelerator devices can be easily selected using the device parameter. Here we run the car classification pipeline again, but this time use the integrated GPU for detection inference by setting the `detection-device` parameter. ``` -$ ./vaclient/vaclient.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true --parameter detection-device GPU --parameter detection-model-instance-id person_vehicle_bike_detection_gpu -Starting pipeline... -Pipeline running: object_classification/vehicle_attributes, instance = 2 +./vaclient/vaclient.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true --parameter detection-device GPU --parameter detection-model-instance-id person_vehicle_bike_detection_gpu +``` +``` +Starting pipeline object_classification/vehicle_attributes, instance = 2 ``` > **Note:** The GPU inference plug-in dynamically builds OpenCL kernels when it is first loaded resulting in a ~30s delay before inference results are produced. @@ -286,7 +314,9 @@ As the previous example has shown, the vaclient application works by converting The `--show-request` option displays the REST verb, uri and body in the request. Let's repeat the previous GPU inference example, adding RTSP output and show the underlying request. ``` -$ ./vaclient/vaclient.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true --parameter detection-device GPU --rtsp-path vaserving --show-request +./vaclient/vaclient.sh run object_classification/vehicle_attributes https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true --parameter detection-device GPU --rtsp-path vaserving --show-request +``` +``` POST http://localhost:8080/pipelines/object_classification/vehicle_attributes Body:{'source': {'uri': 'https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true', 'type': 'uri'}, 'destination': {'metadata': {'type': 'file', 'path': '/tmp/results.jsonl', 'format': 'json-lines'}, 'frame': {'type': 'rtsp', 'path': 'vaserving'}}, 'parameters': {'detection-device': 'GPU'}} @@ -332,11 +362,11 @@ They are easier to understand when the json is pretty-printed The `--show-request` output can be easily converted int a curl command. ``` -$ curl -X -H "Content-Type: application/json' -d +curl -X -H "Content-Type: application/json' -d ``` So the above request would be as below. Note the pipeline instance `1` returned by the request. ```bash -$ curl localhost:8080/pipelines/object_classification/vehicle_attributes -X POST -H \ +curl localhost:8080/pipelines/object_classification/vehicle_attributes -X POST -H \ 'Content-Type: application/json' -d \ '{ "source": { @@ -358,6 +388,8 @@ $ curl localhost:8080/pipelines/object_classification/vehicle_attributes -X POST "detection-device": "GPU" } }' +``` +``` 1 ``` # Changing Pipeline Model diff --git a/docker/Dockerfile b/docker/Dockerfile index 1a60e10..09598d0 100755 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -45,6 +45,15 @@ RUN if [[ ${VA_SERVING_BASE} == *"openvino/ubuntu20_data_runtime:2021.2" ]]; the rm -rf /var/lib/apt/lists/* ;\ fi +# Install boost library required for HDDL plugin +RUN if [[ ${VA_SERVING_BASE} == *"openvino/ubuntu20_data_runtime"* ]]; then \ + DEBIAN_FRONTEND=noninteractive apt-get update && \ + apt-get install -y -q --no-install-recommends \ + libboost-program-options1.71.0 && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* ;\ + fi + RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ apt-get upgrade -y -q && \ apt-get dist-upgrade -y -q && \ @@ -70,6 +79,9 @@ COPY ./vaserving /home/video-analytics-serving/vaserving COPY ./vaclient /home/video-analytics-serving/vaclient COPY --chown=vaserving ./tools /home/video-analytics-serving/tools +# Copy GVA Python extensions +COPY ./extensions /home/video-analytics-serving/extensions + # Media Analytics Framework set via environment variable ENV FRAMEWORK=${FRAMEWORK} WORKDIR /home/video-analytics-serving @@ -107,9 +119,6 @@ ONBUILD ARG PIPELINES_PATH ONBUILD ENV PIPELINES_PATH=${PIPELINES_PATH} ONBUILD COPY ${PIPELINES_PATH} /home/video-analytics-serving/pipelines -# Copy GVA Python extensions -ONBUILD COPY ./extensions /home/video-analytics-serving/extensions - # Stage that is used is controlled via PIPELINES_COMMAND build argument FROM ${PIPELINES_COMMAND} as video-analytics-serving-with-models-and-pipelines ######################################################## diff --git a/docker/build.sh b/docker/build.sh index e04cdab..331e65d 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -10,7 +10,7 @@ DOCKERFILE_DIR=$(dirname "$(readlink -f "$0")") SOURCE_DIR=$(dirname "$DOCKERFILE_DIR") BASE_IMAGE_FFMPEG="openvisualcloud/xeone3-ubuntu1804-analytics-ffmpeg:20.10" -BASE_IMAGE_GSTREAMER="openvino/ubuntu20_data_runtime:2021.4.1" +BASE_IMAGE_GSTREAMER="openvino/ubuntu20_data_runtime:2021.4.2" BASE_IMAGE=${BASE_IMAGE:-""} BASE_BUILD_CONTEXT= @@ -36,7 +36,7 @@ BASE_BUILD_OPTIONS="--network=host " SUPPORTED_IMAGES=($BASE_IMAGE_GSTREAMER $BASE_IMAGE_FFMPEG) OPEN_MODEL_ZOO_TOOLS_IMAGE=${OPEN_MODEL_ZOO_TOOLS_IMAGE:-"openvino/ubuntu20_data_dev"} -OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-"2021.4.1"} +OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-"2021.4.2"} FORCE_MODEL_DOWNLOAD= DEFAULT_GSTREAMER_BASE_BUILD_TAG="video-analytics-serving-gstreamer-base" @@ -419,7 +419,7 @@ cp -f $DOCKERFILE_DIR/Dockerfile $DOCKERFILE_DIR/Dockerfile.env ENVIRONMENT_FILE_LIST= if [[ "$BASE_IMAGE" == *"openvino/"* ]]; then - $RUN_PREFIX docker run -t --rm $DOCKER_RUN_ENVIRONMENT --entrypoint /bin/bash -e HOSTNAME=BASE $BASE_IMAGE "-i" "-c" "env" > $DOCKERFILE_DIR/openvino_base_environment.txt + $RUN_PREFIX docker run -t --rm --entrypoint /bin/bash -e HOSTNAME=BASE $BASE_IMAGE "-i" "-c" "env" > $DOCKERFILE_DIR/openvino_base_environment.txt ENVIRONMENT_FILE_LIST+="$DOCKERFILE_DIR/openvino_base_environment.txt " fi @@ -430,7 +430,7 @@ for ENVIRONMENT_FILE in ${ENVIRONMENT_FILES[@]}; do done if [ ! -z "$ENVIRONMENT_FILE_LIST" ]; then - cat $ENVIRONMENT_FILE_LIST | grep -E '=' | tr '\n' ' ' | tr '\r' ' ' > $DOCKERFILE_DIR/final.env + cat $ENVIRONMENT_FILE_LIST | grep -E '=' | sed -e 's/,\s\+/,/g' | tr '\n' ' ' | tr '\r' ' ' > $DOCKERFILE_DIR/final.env echo " HOME=/home/video-analytics-serving " >> $DOCKERFILE_DIR/final.env echo "ENV " | cat - $DOCKERFILE_DIR/final.env | tr -d '\n' >> $DOCKERFILE_DIR/Dockerfile.env printf "\nENV PYTHONPATH=\$PYTHONPATH:/home/video-analytics-serving\nENV GST_PLUGIN_PATH=\$GST_PLUGIN_PATH:/usr/lib/x86_64-linux-gnu/gstreamer-1.0/" >> $DOCKERFILE_DIR/Dockerfile.env diff --git a/docker/run.sh b/docker/run.sh index 2cf3f2b..a1c5499 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -83,7 +83,11 @@ enable_hardware_access() { if ls /dev/dri/render* 1> /dev/null 2>&1; then echo "Found /dev/dri/render entry - enabling for GPU" DEVICES+='--device /dev/dri ' - USER_GROUPS+="--group-add $(stat -c '%g' /dev/dri/render*) " + RENDER_GROUPS=$(stat -c '%g' /dev/dri/render*) + for group in $RENDER_GROUPS + do + USER_GROUPS+="--group-add $group " + done fi # Intel(R) NCS2 @@ -94,10 +98,21 @@ enable_hardware_access() { fi # HDDL - if [ -e /dev/ion ]; then - echo "Found /dev/ion - enabling for HDDL-R" - DEVICES+="--device /dev/ion " - VOLUME_MOUNT+="-v /var/tmp:/var/tmp " + if compgen -G /dev/myriad* > /dev/null ; then + echo "Found /dev/myriad devices - enabling for HDDL-R" + VOLUME_MOUNT+="-v /var/tmp:/var/tmp -v /dev/shm:/dev/shm " + fi + + # Webcam + for device in $(ls /dev | grep video); do + echo "Found /dev/$device - enabling webcam" + DEVICES+="--device /dev/$device " + done + + # Microphone + if [ -e /dev/snd ]; then + echo "Found /dev/snd - enabling microphone" + DEVICES+="--device /dev/snd " fi } @@ -298,16 +313,19 @@ if [ "${MODE}" == "DEV" ]; then PIPELINES=$SOURCE_DIR/pipelines/$FRAMEWORK fi PRIVILEGED="--privileged " +elif [ ! -z "$ENTRYPOINT" ]; then + MODE=CUSTOM_ENTRYPOINT elif [ "${MODE}" == "SERVICE" ]; then if [ -z "$PORTS" ]; then PORTS+="-p 8080:8080 " fi - enable_hardware_access else echo "Invalid Mode" show_help fi +enable_hardware_access + if [ ! -z "$ENABLE_RTSP" ]; then ENVIRONMENT+="-e ENABLE_RTSP=true -e RTSP_PORT=$RTSP_PORT " PORTS+="-p $RTSP_PORT:$RTSP_PORT " diff --git a/docker/video-analytics-serving-third-party-programs.txt b/docker/video-analytics-serving-third-party-programs.txt deleted file mode 100644 index 634c6fc..0000000 --- a/docker/video-analytics-serving-third-party-programs.txt +++ /dev/null @@ -1,609 +0,0 @@ -This file contains the list of third party software ("third party programs") contained in the Intel software and their required notices and/or license terms. This third party software, even if included with the distribution of the Intel software, may be governed by separate license terms, including without limitation, third party license terms, other Intel software license terms, and open source software license terms. These separate license terms govern your use of the third party programs as set forth in the "third-party-programs.txt" or other similarly-named text file. - - -Third party programs and their corresponding required notices and/or license terms are listed below. - -------------------------------------------------------------- - - -1. setuptools - Copyright (C) 2016 Jason R Coombs - - pip3 - Copyright (c) 2008-2020 The pip developers (see AUTHORS.txt file) - - jsonschema - Copyright (c) 2013 Julian Berman - - rfc3986-validator - Copyright (c) 2019, Nicolas Aimetti - - rfc3339-validator - Copyright (c) 2019, Nicolas Aimetti - - pyyaml - Copyright (c) 2017-2020 Ingy döt Net - Copyright (c) 2006-2016 Kirill Simonov - - Live Video Analytics - https://azure.microsoft.com/en-us/services/media-services/live-video-analytics/ - Copyright 2020 Microsoft Corporation - Modifications Copyright 2020 Intel Corporation - - VAAPI driver for the Intel GEN8+ Graphics family - Copyright (c) 2017-2021, Intel Corporation - - Intel OpenCL - Copyright (C) 2018-2021 Intel Corporation - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -------------------------------------------------------------- - - -2. grpcio - Copyright 2014 gRPC authors. - - aclnet - https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/aclnet - Copyright (c) 2020 Intel Corporation - - person-vehicle-bike-detection-crossroad-0078 - vehicle-attributes-recognition-barrier-0039 - vehicle-detection-0202 - person-detection-retail-0013 - action-recognition-0001-decoder - action-recognition-0001-encoder - driver-action-recognition-adas-0002-decoder - driver-action-recognition-adas-0002-encoder - https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel - Copyright (c) 2021 Intel Corporation - - Requests - Copyright 2019 Kenneth Reitz - - swagger-ui-bundle - Copyright 2020 SmartBear Software Inc. - - Tornado Web Server - Copyright: 2009-2011 Facebook - - Zalando Connexion - Copyright 2015 Zalando SE - -Apache 2.0 License - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -------------------------------------------------------------- - -3. Python 3 - Copyright © 2001-2020 Python Software Foundation; All Rights - Reserved - - 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and - the Individual or Organization ("Licensee") accessing and otherwise using Python - 3.9.0 software in source or binary form and its associated documentation. - - 2. Subject to the terms and conditions of this License Agreement, PSF hereby - grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, - analyze, test, perform and/or display publicly, prepare derivative works, - distribute, and otherwise use Python 3.9.0 alone or in any derivative - version, provided, however, that PSF's License Agreement and PSF's notice of - copyright, i.e., "Copyright © 2001-2020 Python Software Foundation; All Rights - Reserved" are retained in Python 3.9.0 alone or in any derivative version - prepared by Licensee. - - 3. In the event Licensee prepares a derivative work that is based on or - incorporates Python 3.9.0 or any part thereof, and wants to make the - derivative work available to others as provided herein, then Licensee hereby - agrees to include in any such work a brief summary of the changes made to Python - 3.9.0. - - 4. PSF is making Python 3.9.0 available to Licensee on an "AS IS" basis. - PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF - EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR - WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE - USE OF PYTHON 3.9.0 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. - - 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 3.9.0 - FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF - MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 3.9.0, OR ANY DERIVATIVE - THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - - 6. This License Agreement will automatically terminate upon a material breach of - its terms and conditions. - - 7. Nothing in this License Agreement shall be deemed to create any relationship - of agency, partnership, or joint venture between PSF and Licensee. This License - Agreement does not grant permission to use PSF trademarks or trade name in a - trademark sense to endorse or promote products or services of Licensee, or any - third party. - - 8. By copying, installing or otherwise using Python 3.9.0, Licensee agrees - to be bound by the terms and conditions of this License Agreement. - -------------------------------------------------------------- - -4. Numpy (BSD 3-clause Numpy Customized) - Copyright (c) 2005-2020, NumPy Developers. - All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------- - -5. protobuf (BSD 3-clause Google Customized) - Copyright 2008 Google Inc. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - Code generated by the Protocol Buffer compiler is owned by the owner - of the input file used when generating it. This code is not - standalone and requires a support library to be linked with it. This - support library is itself covered by the above license. - -------------------------------------------------------------- - -6. python-psutil - Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola' - All rights reserved. - -BSD 3-Clause License - -Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola' -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of the psutil authors nor the names of its contributors - may be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------- - -7. jemalloc - -Unless otherwise specified, files in the jemalloc source distribution are -subject to the following license: - -Copyright (C) 2002-present Jason Evans . -All rights reserved. -Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-present Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice(s), - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice(s), - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------- - -8. Python Paho MQTT Client - -Eclipse Distribution License - v 1.0 -Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------- - -9. python-dateutil - -Copyright 2017- Paul Ganssle -Copyright 2017- dateutil contributors (see AUTHORS file) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -The above license applies to all contributions after 2017-12-01, as well as -all contributions that have been re-licensed (see AUTHORS file for the list of -contributors who have re-licensed their code). -------------------------- -dateutil - Extensions to the standard Python datetime module. - -Copyright (c) 2003-2011 - Gustavo Niemeyer -Copyright (c) 2012-2014 - Tomi Pieviläinen -Copyright (c) 2014-2016 - Yaron de Leeuw -Copyright (c) 2015- - Paul Ganssle -Copyright (c) 2015- - dateutil contributors (see AUTHORS file) - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The above BSD License Applies to all code, even that also covered by Apache 2.0. - -------------------------------------------------------------- -10. Media-Driver - Copyright (c) 2007-2017 Intel Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -Copyright (c) 2010, The WebM Project authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -Neither the name of Google, nor the WebM Project, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. Copyright (c) 2008 Red Hat Inc. Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA Copyright (c) 2007-2008 Intel Corporation Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. Copyright © 2014 NVIDIA Corporation Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright © 2007 Red Hat Inc. Copyright © 2007-2012 Intel Corporation Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. Sun Apr 18 09:35:45 1999 by faith@precisioninsight.com Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -Copyright 2008, Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------- - -* Docker Base Images - - OpenVINO Runtime Base Image - https://hub.docker.com/r/openvino/ubuntu18_runtime - https://hub.docker.com/r/openvino/ubuntu20_runtime - https://github.com/openvinotoolkit/docker_ci - Copyright (C) 2019-2021 Intel Corporation - All rights reserved. - - OpenVINO Data Runtime Base Image - https://hub.docker.com/r/openvino/ubuntu18_data_runtime - https://hub.docker.com/r/openvino/ubuntu20_data_runtime - https://github.com/openvinotoolkit/docker_ci - Copyright (C) 2019-2021 Intel Corporation - All rights reserved. - - OpenVisualCloud Docker Base Image - https://hub.docker.com/r/openvisualcloud/xeone3-ubuntu1804-analytics-gst - https://hub.docker.com/r/openvisualcloud/xeone3-ubuntu1804-analytics-ffmpeg - Copyright (c) 2018,Intel Corporation - All rights reserved. - -This docker installs third party components licensed under various open source licenses. The terms under which those components may be used and distributed can be found with the license document that is provided with those components. Please familiarize yourself with those terms to ensure your distribution of those components complies with the terms of those licenses. - -------------------------------------------------------------- -Other names and brands may be claimed as the property of others. - diff --git a/docs/build_script_reference.md b/docs/build_script_reference.md index 8c55271..83a75c7 100644 --- a/docs/build_script_reference.md +++ b/docs/build_script_reference.md @@ -4,7 +4,9 @@ The `build.sh` script passes common options to the underlying `docker build` com Use the --help option to see how the use the script. All arguments are optional. ``` -$ docker/build.sh --help +docker/build.sh --help +``` +``` usage: build.sh [--base base image] [--framework ffmpeg || gstreamer] diff --git a/docs/building_video_analytics_serving.md b/docs/building_video_analytics_serving.md index cb55587..5b270e6 100644 --- a/docs/building_video_analytics_serving.md +++ b/docs/building_video_analytics_serving.md @@ -26,7 +26,7 @@ can be customized to meet an application's requirements. | Command | Media Analytics Base Image | Image Name | Description | | --- | --- | --- | ---- | -| `./docker/build.sh`| **ubuntu20_data_runtime:2021.4.1** docker [image](https://hub.docker.com/r/openvino/ubuntu20_data_runtime) |`video-analytics-serving-gstreamer` | DL Streamer based microservice with default pipeline definitions and deep learning models. | +| `./docker/build.sh`| **ubuntu20_data_runtime:2021.4.2** docker [image](https://hub.docker.com/r/openvino/ubuntu20_data_runtime) |`video-analytics-serving-gstreamer` | DL Streamer based microservice with default pipeline definitions and deep learning models. | | `./docker/build.sh --framework ffmpeg --open-model-zoo...`| **xeone3-ubuntu1804-analytics-ffmpeg:20.10** docker [image](https://hub.docker.com/r/openvisualcloud/xeon-ubuntu1804-analytics-ffmpeg) |`video-analytics-serving-ffmpeg`| FFmpeg Video Analytics based microservice with default pipeline definitions and deep learning models. | ### Building with OpenVINO, Ubuntu 20.04 and DL Streamer Support **Example:** @@ -70,7 +70,7 @@ All validation is done in docker environment. Host built (aka "bare metal") conf | **Base Image** | **Framework** | **Openvino Version** | **Link** | **Default** | |---------------------|---------------|---------------|------------------------|-------------| -| OpenVINO 2021.4.1 ubuntu20_data_runtime | GStreamer | 2021.4.1 | [Docker Hub](https://hub.docker.com/r/openvino/ubuntu20_data_runtime) | Y | +| OpenVINO 2021.4.2 ubuntu20_data_runtime | GStreamer | 2021.4.2 | [Docker Hub](https://hub.docker.com/r/openvino/ubuntu20_data_runtime) | Y | | Open Visual Cloud 20.10 xeone3-ubuntu1804-analytics-ffmpeg | FFmpeg | 2021.1 | [Docker Hub](https://hub.docker.com/r/openvisualcloud/xeone3-ubuntu1804-analytics-ffmpeg) | Y | --- diff --git a/docs/changing_object_detection_models.md b/docs/changing_object_detection_models.md index 6e997ae..9fa2050 100644 --- a/docs/changing_object_detection_models.md +++ b/docs/changing_object_detection_models.md @@ -48,25 +48,28 @@ Build and run the sample microservice with the following commands: ``` ### List Models -Use [vaclient](/vaclient/README.md) to list the models. Check that `object_detection/person_vehicle_bike` is present and that that `yolo-v2-tiny-tf` is not. Also count the number of models. In this example there are 8. +Use [vaclient](/vaclient/README.md) to list the models. Check that `object_detection/person_vehicle_bike` is present and that that `yolo-v2-tiny-tf` is not. Also count the number of models. In this example there are 7. +``` +./vaclient/vaclient.sh list-models +``` ``` -$ ./vaclient/vaclient.sh list-models - - emotion_recognition/1 - - object_detection/person_vehicle_bike - - object_classification/vehicle_attributes - audio_detection/environment + - face_detection_retail/1 + - object_classification/vehicle_attributes - action_recognition/encoder - action_recognition/decoder - - face_detection_retail/1 + - object_detection/person_vehicle_bike + - emotion_recognition/1 ``` ### Detect Objects on Sample Video In a second terminal window use [vaclient](/vaclient/README.md) to run the pipeline. Expected output is abbreviated. ```bash -./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/raw/master/bottle-detection.mp4 +./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/raw/master/bottle-detection.mp4?raw=true +``` +``` -Starting pipeline... -Pipeline running: object_detection/person_vehicle_bike, instance = 1 +Starting pipeline object_detection/person_vehicle_bike, instance = 1 Timestamp 33519553 - vehicle (0.53) [0.79, 0.71, 0.89, 0.88] Timestamp 67039106 @@ -139,7 +142,9 @@ Copied model_proc to: /output/models/object_detection/yolo-v2-tiny-tf/yolo-v2-ti The model will now be in `models` folder in the root of the project: > **Note:** If you see an entry for `keras-YOLOv3-model-set` you can ignore it ``` -$ tree models +tree models +``` +``` models └── object_detection └── yolo-v2-tiny-tf @@ -161,11 +166,9 @@ models Make a copy of the `object_detection` version `person_vehicle_bike` pipeline definition file and change the version to `yolo-v2-tiny-tf`. ``` -$ cp -r pipelines/gstreamer/object_detection/person_vehicle_bike pipelines/gstreamer/object_detection/yolo-v2-tiny-tf +cp -r pipelines/gstreamer/object_detection/person_vehicle_bike pipelines/gstreamer/object_detection/yolo-v2-tiny-tf ``` -> **Note:** You can also update the existing version `1` to point to the new model instead of creating a new version. - #### Edit the Pipeline Template Video Analytics Serving pipeline definition files contain a template @@ -209,9 +212,11 @@ pipelines to make testing local changes easier. ``` Once started you can verify that the new model and pipeline have been loaded. -The `list-models` command now shows 9 models, including `object_detection/yolo-v2-tiny-tf` +The `list-models` command now shows 8 models, including `object_detection/yolo-v2-tiny-tf` ```bash -$ ./vaclient/vaclient.sh list-models +./vaclient/vaclient.sh list-models +``` +``` - emotion_recognition/1 - object_detection/yolo-v2-tiny-tf - object_detection/person_vehicle_bike @@ -223,7 +228,9 @@ $ ./vaclient/vaclient.sh list-models ``` The `list-pipelines` command shows `object_detection/yolo-v2-tiny-tf` ```bash -$ ./vaclient/vaclient.sh list-pipelines +./vaclient/vaclient.sh list-pipelines +``` +``` - object_detection/app_src_dst - object_detection/yolo-v2-tiny-tf - object_detection/object_zone_count @@ -242,7 +249,9 @@ $ ./vaclient/vaclient.sh list-pipelines Now use vaclient to run the `object_detection/yolo-v2-tiny-tf` pipeline with the new model. You can see the `yolo-v2-tiny-tf` model in action as objects are now correctly detected as bottles. ```bash -$ ./vaclient/vaclient.sh run object_detection/yolo-v2-tiny-tf https://github.com/intel-iot-devkit/sample-videos/raw/master/bottle-detection.mp4 +./vaclient/vaclient.sh run object_detection/yolo-v2-tiny-tf https://github.com/intel-iot-devkit/sample-videos/raw/master/bottle-detection.mp4?raw=true +``` +``` Pipeline running: object_detection/yolo-v2-tiny-tf, instance = 1 Timestamp 972067039 - bottle (0.51) [0.09, 0.36, 0.18, 0.62] @@ -270,7 +279,9 @@ rm -r models ``` Once started you can verify that the new model has been loaded. ```bash -$ ./vaclient/vaclient.sh list-models +./vaclient/vaclient.sh list-models +``` +``` - emotion_recognition/1 - object_detection/yolo-v2-tiny-tf - object_detection/person_vehicle_bike diff --git a/docs/creating_extensions.md b/docs/creating_extensions.md index 5341015..c0fcbac 100644 --- a/docs/creating_extensions.md +++ b/docs/creating_extensions.md @@ -18,7 +18,7 @@ This section will outline an example to show how inference results of object det ### Extension `process_frame` is the default function invoked by GVA Python. -> Note: The `process_frame` function needs to `return True` in order for the rest of the pipeline to function. In the absence of this statement, the extension runs and exits without executing the subsequent parts of the pipeline which might be useful for extension debug. +> Note: The `process_frame` function needs to `return True` in order for the rest of the pipeline to function. Without `return True` or with `return False`, the extension runs but drops frames. This could be useful to short circuit processing i.e intentionally drop certain frames. In the example below, in `process_frame`, the number of objects in the frame is obtained by counting the number of detected regions. A statement is printed if the number of objects exceeds a threshold value. diff --git a/docs/customizing_pipeline_requests.md b/docs/customizing_pipeline_requests.md index 6a4ad5c..6790574 100644 --- a/docs/customizing_pipeline_requests.md +++ b/docs/customizing_pipeline_requests.md @@ -5,7 +5,7 @@ Pipeline requests are initiated to exercise the Video Analytics Serving REST API ## Request Format -> Note: This document shows curl requests. Requests can also be sent via vaclient, see [VA Client Command Options](../vaclient/README.md#command-options) +> Note: This document shows curl requests. Requests can also be sent via vaclient using the --request-file option see [VA Client Command Options](../vaclient/README.md#command-options) Pipeline requests sent to Video Analytics Serving REST API are JSON documents that have the following attributes: @@ -43,6 +43,8 @@ curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ "threshold": 0.90 } }' +``` +``` 2 ``` @@ -50,7 +52,9 @@ The number returned on the console is the pipeline instance id (e.g. 2). As the video is being analyzed and as objects are detected, results are added to the `destination` file which can be viewed using: ```bash -$ tail -f /tmp/results.jsonl +tail -f /tmp/results.jsonl +``` +``` {"objects":[{"detection":{"bounding_box":{"x_max":0.7503407597541809,"x_min":0.6836109757423401,"y_max":0.9968345165252686,"y_min":0.7712376117706299},"confidence":0.93408203125,"label":"person","label_id":1},"h":97,"roi_type":"person","w":51,"x":525,"y":333}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":1916666666} {"objects":[{"detection":{"bounding_box":{"x_max":0.7554543018341064,"x_min":0.6827328205108643,"y_max":0.9928492903709412,"y_min":0.7551988959312439},"confidence":0.92578125,"label":"person","label_id":1},"h":103,"roi_type":"person","w":56,"x":524,"y":326}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":2000000000} @@ -64,6 +68,8 @@ Some of the common video sources are: * IP Camera (RTSP Source) * Web Camera +> Note: See [Source Abstraction](./defining_pipelines.md#source-abstraction) to learn about GStreamer source elements set per request. + ### File Source The following example shows a media `source` from a video file in GitHub: @@ -122,26 +128,15 @@ The request `source` object would be updated to: ``` ### Web Camera Source -Web cameras accessible through the `Video4Linux` api and device drivers can be referenced using the `v4l2` uri scheme. `v4l2` uris have the format: `v4l2:///dev/` where `` is the path of the `v4l2` device, typically `video`. +Web cameras accessible through the `Video4Linux` api and device drivers are supported via `type=webcam`. `device` is the path of the `v4l2` device, typically `video`. -Depending on the default output of the `v4l2` device, the pipeline may need additional elements to convert the output to a format that gvadetect can process. - -Following is an example of a pipeline with videoconvert to handle format conversion: - -```json -"template": ["uridecodebin name=source ! videoconvert", - " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", - " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", - " ! appsink name=appsink" - ], -``` ```bash curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ 'Content-Type: application/json' -d \ '{ "source": { - "uri": "v4l2:///dev/video0", - "type": "uri" + "device": "/dev/video0", + "type": "webcam" }, "destination": { "metadata": { @@ -153,6 +148,40 @@ curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ }' ``` +## Setting source properties +For any of the sources mentioned above, it is possible to set properties on the source element via the request. + +### Setting a property on source bin element +For example, to set property `buffer-size` on urisourcebin, source section can be set as follows: +```json +{ + "source": { + "uri": "file:///tmp/person-bicycle-car-detection.mp4", + "type": "uri", + "properties": { + "buffer-size": 4096 + } + } +} +``` + +### Setting a property on underlying element +For example, if you'd like to set `ntp-sync` property of the `rtspsrc` element to synchronize timestamps across RTSP source(s). + +> Note: This feature, enabled via GStreamer `source-setup` callback signal is only supported for `urisourcebin` element. + +```json +{ + "source": { + "uri": "rtsp://:/", + "type": "uri", + "properties": { + "ntp-sync": true + } + } +} +``` + ## Destination Pipelines can be configured to output `frames`, `metadata` or both. The destination object within the request contains sections to configure each. @@ -196,11 +225,12 @@ The following are available properties: - host (required) expects a format of host:port - topic (required) MQTT topic on which broker messages are sent - timeout (optional) Broker timeout +- mqtt-client-id (optional) Unique identifier for the MQTT client Steps to run MQTT: 1. Start the MQTT broker, here we use [Eclipse Mosquitto](https://hub.docker.com/_/eclipse-mosquitto/), an open source message broker. ```bash - docker run --network=host -d eclipse-mosquitto:1.6 + docker run --network=host eclipse-mosquitto:1.6 ``` 2. Start VA Serving with host network enabled ```bash @@ -219,14 +249,15 @@ Steps to run MQTT: "metadata": { "type": "mqtt", "host": "localhost:1883", - "topic": "vaserving" + "topic": "vaserving", + "mqtt-client-id": "gva-meta-publish" } } }' ``` 4. Connect to MQTT broker to view inference results ```bash - docker run -it --network=host --entrypoint mosquitto_sub eclipse-mosquitto:1.6 --topic vaserving + docker run -it --network=host --entrypoint mosquitto_sub eclipse-mosquitto:1.6 --topic vaserving --id mosquitto-sub ``` ```bash @@ -234,6 +265,93 @@ Steps to run MQTT: {"objects":[{"detection":{"bounding_box":{"x_max":0.3472719192504883,"x_min":0.12164716422557831,"y_max":1.0,"y_min":0.839308500289917},"confidence":0.6197869777679443,"label":"vehicle","label_id":2},"h":69,"roi_type":"vehicle","w":173,"x":93,"y":363}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":14333333333} {"objects":[{"detection":{"bounding_box":{"x_max":0.3529694750905037,"x_min":0.12145502120256424,"y_max":1.0,"y_min":0.8094810247421265},"confidence":0.7172137498855591,"label":"vehicle","label_id":2},"h":82,"roi_type":"vehicle","w":178,"x":93,"y":350}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":14416666666} ``` + 5. In the MQTT broker terminal, you should see the connection from client with specified `mqtt-client-id` + ``` + + 1632949258: New connection from 127.0.0.1 on port 1883. + 1632949258: New client connected from 127.0.0.1 as gva-meta-publish (p2, c1, k20). + 1632949271: New connection from 127.0.0.1 on port 1883. + 1632949271: New client connected from 127.0.0.1 as mosquitto-sub (p2, c1, k60). + 1632949274: Client gva-meta-publish disconnected. + ``` + +#### Kafka +The following are available properties: +- type : "kafka" +- host (required) expects a format of host:port +- topic (required) Kafka topic on which broker messages are sent + +Steps to run Kafka: +1. Prepare to run a Kafka broker. Since Kafka relies on ZooKeeper for management, let's create `docker-compose-kafka.yml` with the following content: + + ```bash + version: "2" + services: + zookeeper: + image: docker.io/bitnami/zookeeper:3.7 + ports: + - "2181:2181" + volumes: + - "zookeeper_data:/bitnami" + environment: + - ALLOW_ANONYMOUS_LOGIN=yes + kafka: + image: docker.io/bitnami/kafka:2 + ports: + - "9092:9092" + volumes: + - "kafka_data:/bitnami" + environment: + - KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE=true + - KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181 + - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://localhost:9092 + - ALLOW_PLAINTEXT_LISTENER=yes + depends_on: + - zookeeper + volumes: + zookeeper_data: + driver: local + kafka_data: + driver: local + ``` + +2. Run the following command to launch Kafka broker as a detached service: + ```bash + docker-compose -p vaserving -f docker-compose-kafka.yml up -d + ``` + +3. Start VA Serving with host network enabled: + ```bash + docker/run.sh -v /tmp:/tmp --network host + ``` + +4. Launch pipeline with parameters to emit on the Kafka topic we are listening for: + ``` + ./vaclient/vaclient.sh start object_detection/person_vehicle_bike \ + https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true \ + --destination type kafka \ + --destination host localhost \ + --destination port 9092 \ + --destination topic vaserving.person_vehicle_bike + ``` + +5. Connect to Kafka broker to view inference results: + ```bash + docker exec -it vaserving_kafka_1 /opt/bitnami/kafka/bin/kafka-console-consumer.sh \ + --bootstrap-server localhost:9092 --topic vaserving.person_vehicle_bike + ``` + + ```bash + {"objects":[{"detection":{"bounding_box":{"x_max":0.7448995113372803,"x_min":0.6734093427658081,"y_max":0.9991495609283447,"y_min":0.8781012296676636},"confidence":0.5402464866638184,"label":"person","label_id":1},"h":52,"roi_type":"person","w":55,"x":517,"y":379}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":1500000000} + {"objects":[{"detection":{"bounding_box":{"x_max":0.7442193031311035,"x_min":0.6763269901275635,"y_max":1.0,"y_min":0.8277983069419861},"confidence":0.5505848526954651,"label":"person","label_id":1},"h":74,"roi_type":"person","w":52,"x":519,"y":358}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":1666666666} + {"objects":[{"detection":{"bounding_box":{"x_max":0.7465137243270874,"x_min":0.6821863651275635,"y_max":1.0,"y_min":0.810469388961792},"confidence":0.6447391510009766,"label":"person","label_id":1},"h":82,"roi_type":"person","w":49,"x":524,"y":350}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":1750000000} + {"objects":[{"detection":{"bounding_box":{"x_max":0.7481285929679871,"x_min":0.6836653351783752,"y_max":0.9999656677246094,"y_min":0.7867168188095093},"confidence":0.8825281858444214,"label":"person","label_id":1},"h":92,"roi_type":"person","w":50,"x":525,"y":340}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":1833333333} + ``` + +6. NOTE: When finished, remember to close running containers with this command: + ```bash + docker-compose -f docker-compose-kafka.yml down + ``` ### Frame Frame is another aspect of destination and it can be set to RTSP. @@ -242,6 +360,10 @@ Frame is another aspect of destination and it can be set to RTSP. RTSP is a type of frame destination supported. The following are available properties: - type : "rtsp" - path (required): custom string to uniquely identify the stream +- cache-length (default 30): number of frames to buffer in rtsp pipeline. +- encoding-quality (default 85): jpeg encoding quality (0 - 100). Lower values increase compression but sacrifice quality. +- sync-with-source (default True): rate limit processing pipeline to encoded frame rate (e.g. 30 fps) +- sync-with-destination (default True): block processing pipeline if rtsp pipeline is blocked. For more information, see [RTSP re-streaming](running_video_analytics_serving.md#real-time-streaming-protocol-rtsp-re-streaming) diff --git a/docs/defining_pipelines.md b/docs/defining_pipelines.md index 64058e6..9c69622 100644 --- a/docs/defining_pipelines.md +++ b/docs/defining_pipelines.md @@ -1,5 +1,5 @@ # Defining Media Analytics Pipelines -| [Pipeline Definition Files](#pipeline-definition-files) | [Pipeline Discovery](#how-pipeline-definition-files-are-discovered-and-loaded) | [Pipeline Templates](#pipeline-templates) | [Pipeline Parameters](#pipeline-parameters) | [Deep Learning Models](#deep-learning-models) | +| [Pipeline Definition Files](#pipeline-definition-files) | [Pipeline Discovery](#how-pipeline-definition-files-are-discovered-and-loaded) | [Pipeline Templates](#pipeline-templates) | [Source Abstraction](#source-abstraction) | [Pipeline Parameters](#pipeline-parameters) | [Deep Learning Models](#deep-learning-models) | Media analytics pipelines are directed graphs of audio/video processing, computer vision, and deep learning inference @@ -95,6 +95,132 @@ the calling application. ] ``` +#### Source Abstraction +`{auto_source}` is a virtual source that is updated with the appropriate GStreamer element and properties at request time. +The GStreamer element is chosen based on the `type` specified in the source section of the request (shown below), making pipelines flexible as they can be reused for source media of different types. + +**Sample video pipeline** +``` +"template": ["{auto_source}", + " ! gvadetect model={models[person_vehicle_bike_detection][1][network]} name=detection", + " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", + " ! appsink name=appsink" + ] +``` + +**Sample audio pipeline** +``` +"template": ["{auto_source} ! audioresample ! audioconvert", + " ! audio/x-raw, channels=1,format=S16LE,rate=16000 ! audiomixer name=audiomixer", + " ! level name=level", + " ! gvaaudiodetect model={models[audio_detection][environment][network]} name=detection", + " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", + " ! appsink name=appsink" + ], +``` + + + + + + + + + + + + + + + + +
SourceGStreamer ElementSource section of curl requestSource pipeline snippet
ApplicationappsrcN/A +N/A
Fileurisourcebin +
+
+"source": {
+  "uri": "file://path",
+  "type": "uri"
+}
+
+
urisourcebin uri=file://path name=source
RTSPurisourcebin +
+
+"source": {
+  "uri": "rtsp://url",
+  "type": "uri",
+}
+
+
urisourcebin uri=rtsp://url name=source 
URLurisourcebin +
+
+"source": {
+    "uri": "https://url",
+    "type": "uri"
+}
+
+
urisourcebin uri=https://url name=source 
Web camerav4l2src +
+
+"source": {
+  "device": "/dev/video0",
+  "type": "webcam",
+}
+
+
v4l2src device=/dev/video0 name=source ! capsfilter caps="image/jpeg"
Custom GStreamer ElementValue specified in "element" field of source request +
+
+"source": {
+  "element": GStreamer Element name,
+  "type": "gst",
+}
+
+Example for microphone for an audio pipeline: +
+
+  "source": {
+    "element": "alsasrc",
+    "type": "gst",
+    "properties": {
+        "device": "hw:1,0"
+      }
+
+
alsasrc device=hw:1,0 name=source
+ +> Note: For request of `type=gst`, the container must support the corresponding element. + +Source request accepts the following optional fields set via the request: +- `capsfilter` if set is applied right after the source element as shown in example below. + The default value of capsfilter for webcam is `image/jpeg` but it can be set via the request to another valid format. + ```json + "source": { + "device": "/dev/video0", + "type": "webcam", + "capsfilter": "video/x-h264" + } + ``` + The source pipeline resolves to: + ``` + v4l2src device=/dev/video0 name=source ! capsfilter caps=video/x-h264 ! .. + ``` +- `postproc` if set is applied _after_ the source and capsfilter element (if specified). + Below is an example of the use of `capsfilter` and `postproc` + ```json + "source": { + "element": "videotestsrc", + "type": "gst", + "capsfilter": "video/x-raw,format=GRAY8", + "postproc": "rawvideoparse", + "properties": { + "pattern": "snow" + } + } + ``` + The source pipeline resolves to: + ``` + videotestsrc name=source ! capsfilter caps=video/x-raw,format=GRAY8 ! rawvideoparse ! .. + ``` + #### Element Names Each element in a GStreamer pipeline has a name that is either @@ -362,6 +488,41 @@ The JSON schema for a GStreamer pipeline parameter can include an } } ``` + +1. **Object** with dictionary of properties.

A dictionary specifying properties that apply to a pipeline element by name. + + **Example:** + + The following snippet defines `detection-properties` which can be used to pass + GStreamer element properties for the `detection` element without explicitly defining each one. + > **Note:** The property names are expected to match the GStreamer properties for the corresponding element. + + ```json + "parameters": { + "type": "object", + "detection-properties" : { + "element": { + "name": "detection", + "property": "properties" + } + } + } + ``` + + Pipeline Request + ```json + "source": { + "uri":"file:///temp.mp4", + "type": "uri" + }, + "parameters" : { + "detection-properties": { + "threshold": 0.1, + "device": "CPU" + } + } + ``` + #### Parameters and FFmpeg Filters Parameters in FFmpeg pipeline definitions can include information on @@ -431,9 +592,6 @@ The JSON schema for a FFmpeg pipeline parameter can include a } ``` - - - ### Parameter Resolution in Pipeline Templates Parameters passed in through a request are resolved in a pipeline @@ -524,7 +682,10 @@ Pipeline Parameters: "properties": { "scale_method": { "type": "string", - "element": { "name": "videoscale", "property": "method" }, + "element": { + "name": "videoscale", + "property": "method" + }, "enum": ["nearest-neighbour","bilinear"], "default": "bilinear" } @@ -556,7 +717,6 @@ Parameter Resolution: " ! video/x-raw,height=300,width=300" \ " ! appsink name=appsink" ``` - ### Reserved Parameters The following parameters have built-in handling within the Video diff --git a/docs/run_script_reference.md b/docs/run_script_reference.md index 17f21a3..5e6d1e4 100644 --- a/docs/run_script_reference.md +++ b/docs/run_script_reference.md @@ -4,7 +4,9 @@ The `run.sh` script passes common options to the underlying `docker run` command Use the --help option to see how to use the script. All arguments are optional. ``` -$ docker/run.sh --help +docker/run.sh --help +``` +``` usage: run.sh [--image image] [--framework ffmpeg || gstreamer] @@ -82,6 +84,6 @@ The following parameters simply map to docker run arguments: [--network additional network] [--user to pass to docker run] [--group-add to pass to docker run] - [--name to pass to docker run] - [--device to pass to docker run] + [--name to pass to docker run] + [--device to pass to docker run] ``` diff --git a/docs/running_video_analytics_serving.md b/docs/running_video_analytics_serving.md index 67ba9c7..17e103d 100644 --- a/docs/running_video_analytics_serving.md +++ b/docs/running_video_analytics_serving.md @@ -64,14 +64,14 @@ provided utility script. ### DL Streamer based Microservice **Example:** ```bash -$ docker/run.sh -v /tmp:/tmp +docker/run.sh -v /tmp:/tmp ``` ### FFmpeg Video Analytics based Microservice **Example:** ```bash -$ docker/run.sh --framework ffmpeg -v /tmp:/tmp +docker/run.sh --framework ffmpeg -v /tmp:/tmp ``` ## Issuing Requests @@ -83,7 +83,9 @@ microservice. **Example:** > **Note:** In this example we assume you are running FFmpeg Video Analytics based Microservice ```bash -$ curl localhost:8080/pipelines +curl localhost:8080/pipelines +``` +``` [ { "description": "Object Detection Pipeline", @@ -121,7 +123,11 @@ curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ } } }' -$ tail -f /tmp/results.txt +``` +``` +tail -f /tmp/results.txt +``` +``` {"objects":[{"detection":{"bounding_box":{"x_max":0.0503933560103178,"x_min":0.0,"y_max":0.34233352541923523,"y_min":0.14351698756217957},"confidence":0.6430817246437073,"label":"vehicle","label_id":2},"h":86,"roi_type":"vehicle","w":39,"x":0,"y":62}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":49250000000} ``` @@ -137,13 +143,13 @@ kill` commands with the name of the running container. **Example:** ```bash -$ docker stop video-analytics-serving-gstreamer +docker stop video-analytics-serving-gstreamer ``` ### FFmpeg Video Analytics based Microservice **Example:** ```bash -$ docker stop video-analytics-serving-ffmpeg +docker stop video-analytics-serving-ffmpeg ``` # Real Time Streaming Protocol (RTSP) Re-streaming @@ -153,7 +159,7 @@ VA Serving contains an [RTSP](https://en.wikipedia.org/wiki/Real_Time_Streaming_ ### Enable RTSP in service ```bash -$ docker/run.sh --enable-rtsp +docker/run.sh --enable-rtsp ``` > **Note:** RTSP server starts at service start-up for all pipelines. It uses port 8554 and has been tested with [VLC](https://www.videolan.org/vlc/index.html). @@ -186,10 +192,19 @@ curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ * Re-stream pipeline using VLC network stream with url `rtsp://localhost:8554/person-detection`. ### RTSP destination params. + +> **Note:** If the RTSP stream playback is choppy this may be due to +> network bandwidth. Decreasing the encoding-quality or increasing the +> cache-length can help. + ```bash "frame": { "type": "rtsp", - "path" : (required. When path already exists, throws error) + "path" : (required. When path already exists, throws error), + "cache-length": (default 30) number of frames to buffer in rtsp pipeline. + "encoding-quality": (default 85): jpeg encoding quality (0 - 100). Lower values increase compression but sacrifice quality. + "sync-with-source": (default True) process media at the encoded frame rate (e.g. 30 fps) + "sync-with-destination": (default True) block processing pipeline if rtsp pipeline is blocked. } ``` @@ -210,7 +225,7 @@ appropriate directories when starting the container. **Example:** ```bash -$ ./docker/run.sh --framework gstreamer --pipelines /path/to/my-pipelines --models /path/to/my-models +./docker/run.sh --framework gstreamer --pipelines /path/to/my-pipelines --models /path/to/my-models ``` # Enabling Hardware Accelerators @@ -232,28 +247,33 @@ The following the table shows docker configuration and inference device name for > **Note:** Open Visual Cloud base images only support the GPU accelerator. > OpenVINO base images support all accelerators. -|Accelerator|Docker Device|Volume Mount |CGroup Rule|Inference Device| -|-----------|-------------|--------------|-----------|----------------| -| GPU | /dev/dri | | | GPU | -| NCS2 | | /dev/bus/usb |c 189:* rmw| MYRIAD | -| HDDL-R | /dev/ion | /var/tmp | | HDDL | +|Accelerator| Device | Volume Mount(s) |CGroup Rule|Inference Device| +|-----------|-------------|------------------- |-----------|----------------| +| GPU | /dev/dri | | | GPU | +| NCS2 | | /dev/bus/usb |c 189:* rmw| MYRIAD | +| HDDL-R | | /var/tmp, /dev/shm | | HDDL | + +> **Note:** NCS2 and HDDL-R accelerators are incompatible and cannot be used on the same system. -## Specific Instructions for NCS2 +## GPU +The first time inference is run on a GPU there will be a 30s delay while OpenCL kernels are built for the specific device. To prevent the same delay from occurring on subsequent runs a [model instance id](docs/defining_pipelines.md#model-persistance-in-openvino-gstreamer-elements) can be specified in the request. -### User Permissions -NCS2 accelerators require users to have special permissions for hardware access. To configure your system please follow the steps outlined in the OpenVINO [documentation](https://docs.openvinotoolkit.org/latest/openvino_docs_install_guides_installing_openvino_linux.html#additional-NCS-steps) +On Ubuntu20 and later hosts [extra configuration](https://github.com/openvinotoolkit/docker_ci/blob/master/configure_gpu_ubuntu20.md), not shown in the above table, is necessary to allow access to the GPU. The [docker/run.sh](../docker/run.sh) script takes care of this for you, but other deployments will have to be updated accordingly. + +## NCS2 + +Configure your host by following the steps outlined in the OpenVINO [documentation](https://docs.openvinotoolkit.org/latest/openvino_docs_install_guides_installing_openvino_linux.html#additional-NCS-steps) > **Note:** These steps require the file `97-myriad-usbboot.rules` which can be extracted from the Video Analytics Serving docker container using the following command: -> -> ```bash -> ./docker/run.sh -v ${PWD}:/tmp --entrypoint cp --entrypoint-args "/opt/intel/openvino_2021/inference_engine/external/97-myriad-usbboot.rules /tmp" -> ``` -> +```bash +./docker/run.sh -v ${PWD}:/tmp --entrypoint cp --entrypoint-args "/opt/intel/openvino/inference_engine/external/97-myriad-usbboot.rules /tmp" +``` > Once extracted the file will be in the current directory. Follow the instructions given in the OpenVINO documentation to copy it to the correct location. -### Limitations -DL Streamer pipelines can only target a single neural network model to each NCS2 accelerator in a system. For pipelines that contain multiple models -(for example, [object_classification](/pipelines/gstreamer/object_classification/vehicle_attributes/pipeline.json)), only a single element can have its device property set to MYRIAD. Other elements in the pipeline must target other accelerators (for example, CPU, GPU). In the case the system has `N` NCS2 accelerators available then up to `N` elements can have their device property set to MYRIAD. +## HDDL-R +Configure your host by downloading the [HDDL driver package](https://storage.openvinotoolkit.org/drivers/vpu/hddl/2021.4.2/hddl_ubuntu20_1886.tgz) then installing dependencies and run the hddldaemon on the host as per the [HDDL install guide](https://github.com/openvinotoolkit/docker_ci/blob/releases/2021/4/install_guide_vpu_hddl.md). + +> The HDDL plug-in in the container communicates with the daemon on the host, so the daemon must be started before running the container. # Developer Mode @@ -280,7 +300,9 @@ Developer mode: **Example:** ```bash -$ docker/run.sh --dev +docker/run.sh --dev +``` +``` vaserving@my-host:~$ python3 -m vaserving ``` diff --git a/extensions/gva_event_meta/gva_event_convert.py b/extensions/gva_event_meta/gva_event_convert.py index 0c93b90..adf9cdc 100644 --- a/extensions/gva_event_meta/gva_event_convert.py +++ b/extensions/gva_event_meta/gva_event_convert.py @@ -14,8 +14,8 @@ def process_frame(frame): try: add_events_message(frame) except Exception as error: - logger.error(error) - return False + logger.error(error) + return False return True def add_events_message(frame): diff --git a/extensions/gva_event_meta/schema.py b/extensions/gva_event_meta/schema.py index 61cf2be..0ce2054 100644 --- a/extensions/gva_event_meta/schema.py +++ b/extensions/gva_event_meta/schema.py @@ -1,23 +1,24 @@ -{ - "$schema": "https://json-schema.org/draft/2019-09/schema", - "type": "array", - "items": { - "properties": { - "event-type": { - "description": "Event type, known by caller", - "type": "string" +gva_event_schema = { + "$schema": "https://json-schema.org/draft/2019-09/schema", + "type": "array", + "items": { + "properties": { + "event-type": { + "description": "Event type, known by caller", + "type": "string" + }, + "related-objects": { + "description": ("Array of related detections, each refers " + "to index of associated detected object"), + "type": "array", + "items": { + "type": "integer" + } + }, }, - "related-objects": { - "description": "Array of related detections, each entry refers to index of associated detected object", - "type": "array", - "items": { - "type": "integer" - } - }, - }, - "required": [ - "event-type" - ], - "additionalProperties": True + "required": [ + "event-type" + ], + "additionalProperties": True + } } -} diff --git a/extensions/spatial_analytics/object_line_crossing.md b/extensions/spatial_analytics/object_line_crossing.md index fb5a677..4f0f349 100644 --- a/extensions/spatial_analytics/object_line_crossing.md +++ b/extensions/spatial_analytics/object_line_crossing.md @@ -63,3 +63,49 @@ JSON example is shown below ## Line Crossing Algorithm The algorithm to calculate line crossing is based on the following article: https://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/ + +## Example Run +VA Serving comes with an [example configuration](../../vaclient/parameter_files/object-line-crossing.json) for object-line-crossing + +1. [Build](../../README.md#building-the-microservice) & [Run](../../README.md#running-the-microservice) VA Serving + +2. Run object-line-crossing pipeline with vaclient using example parameter file: + ``` + vaclient/vaclient.sh run object_tracking/object_line_crossing https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=true --parameter-file vaclient/parameter_files/object-line-crossing.json + ``` + You will see events among the detections in vaclient output: + ``` + Timestamp 43916666666 + - person (1.00) [0.38, 0.47, 0.60, 0.91] {'id': 7} + Timestamp 44000000000 + - person (1.00) [0.36, 0.38, 0.58, 0.81] {'id': 7} + Event: event-type: object-line-crossing, line-name: hallway_bottom, related-objects: [0], directions: ['counterclockwise'], clockwise-total: 1, counterclockwise-total: 8, total: 9 + Timestamp 44083333333 + - person (0.92) [0.38, 0.29, 0.58, 0.98] {'id': 7} + Event: event-type: object-line-crossing, line-name: hallway_bottom, related-objects: [0], directions: ['clockwise'], clockwise-total: 2, counterclockwise-total: 8, total: 10 + Timestamp 44166666666 + - person (0.99) [0.38, 0.31, 0.57, 0.98] {'id': 7} + ``` + +## Watermark Example +1. Open the [example configuration](../../vaclient/parameter_files/object-line-crossing.json) and add `enable_watermark` as follows: + ``` + "object-line-crossing-config": { + "lines": [ + + ], + "enable_watermark": true + } + ``` +2. Rebuild and Run VA Serving with additional flag `--enable-rtsp` + ``` + ./docker/run.sh -v /tmp:/tmp --enable-rtsp + +3. Run object-line-crossing pipeline with vaclient using example parameter file with additional parameter `rtsp-path`. Note that `rtsp-path` is set to `vaserving`, this path is what will be used to view the rtsp stream: + ``` + vaclient/vaclient.sh run object_tracking/object_line_crossing https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=true --parameter-file vaclient/parameter_files/object-line-crossing.json --rtsp-path vaserving + ``` + +4. Open up a media player with network stream viewing (VLC for example) and connect to `rtsp:://:8554/vaserving`. The stream is real time so you make need to rerun the pipeline request to see the stream. You will see people-detection.mp4 with an overlay of points. Each line x, has a start point (x_Start) and end point (x_End). At the midpoint between start and end, a count displays how many objects have crossed the line. + + ![object_line_crossing_watermark](object_line_crossing_watermark.png) \ No newline at end of file diff --git a/extensions/spatial_analytics/object_line_crossing.py b/extensions/spatial_analytics/object_line_crossing.py index 41f8d01..090e038 100644 --- a/extensions/spatial_analytics/object_line_crossing.py +++ b/extensions/spatial_analytics/object_line_crossing.py @@ -24,14 +24,16 @@ class ObjectLineCrossing: # pylint: disable=too-few-public-methods - def __init__(self, lines=[], enable_watermark=False, log_level="INFO"): + def __init__(self, lines=None, enable_watermark=False, log_level="INFO"): self._detected_objects = {} self._lines = [] self._enable_watermark = enable_watermark logger.log_level = log_level if self._enable_watermark and os.getenv("ENABLE_RTSP") != "true": logger.warning("RTSP output is not enabled by environment variable.") - + if not lines: + logger.warning("No line configuration was supplied to ObjectLineCrossing.") + return for line in lines: try: self._lines.append(SpatialAnalysisCrossingLine(line)) @@ -39,7 +41,7 @@ def __init__(self, lines=[], enable_watermark=False, log_level="INFO"): logger.error(error) logger.error("Exception creating SpatialAnalysisCrossingLine: {}".format(line)) if not self._lines: - logger.warn("Empty line configuration. No lines to check against.") + raise Exception('Empty line configuration. No lines to check against.') def process_frame(self, frame): try: @@ -88,7 +90,7 @@ def _add_point(self, frame, point, label): tensor.set_name("watermark_region") def _add_watermark(self, frame): - for index in range(0, len(self._lines)): + for index, _ in enumerate(self._lines): self._add_point(frame, self._lines[index].line_segment.start_point, "{}_Start".format(index)) self._add_point(frame, self._lines[index].line_segment.end_point, "{}_End".format(index)) self._add_point(frame, self._lines[index].get_segment_midpoint(), @@ -127,8 +129,8 @@ def __init__(self, line): if 'focus' in line: try: self._focus_point = self.FocusPoint[line['focus']] - except: - raise ValueError('Got invalid focus point: {}'.format(line['focus'])) + except Exception as exception: + raise ValueError('Got invalid focus point: {}'.format(line['focus'])) from exception def detect_line_crossing(self, previous_position, current_position): previous_position_point = self._get_focus_point(previous_position) diff --git a/extensions/spatial_analytics/object_line_crossing_watermark.png b/extensions/spatial_analytics/object_line_crossing_watermark.png new file mode 100644 index 0000000..ae1b548 Binary files /dev/null and b/extensions/spatial_analytics/object_line_crossing_watermark.png differ diff --git a/extensions/spatial_analytics/object_zone_count.md b/extensions/spatial_analytics/object_zone_count.md index 6af5085..34ac6cd 100644 --- a/extensions/spatial_analytics/object_zone_count.md +++ b/extensions/spatial_analytics/object_zone_count.md @@ -55,3 +55,45 @@ If a tracked object crosses any of the lines, an event of type `object-zone-coun "zone-count" : 3 } ``` +## Example Run +VA Serving comes with an [example configuration](../../vaclient/parameter_files/object-zone-count.json) for object-zone-count + +1. [Build](../../README.md#building-the-microservice) & [Run](../../README.md#running-the-microservice) VA Serving + +2. Run object-zone-count pipeline with vaclient using example parameter file: + ``` + vaclient/vaclient.sh run object_detection/object_zone_count https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=true --parameter-file vaclient/parameter_files/object-zone-count.json + ``` + You will see events among the detections in vaclient output: + ``` + Timestamp 45000000000 + - person (0.76) [0.28, 0.15, 0.42, 0.72] + Event: event-type: object-zone-count, zone-name: Zone2, related-objects: [0], status: ['intersects'], zone-count: 1 + Event: event-type: object-zone-count, zone-name: Zone3, related-objects: [0], status: ['intersects'], zone-count: 1 + Timestamp 45166666666 + - person (0.57) [0.24, 0.16, 0.35, 0.69] + Event: event-type: object-zone-count, zone-name: Zone2, related-objects: [0], status: ['intersects'], zone-count: 1 + ``` + +## Watermark Example +1. Open the [example configuration](../../vaclient/parameter_files/object-zone-count.json) and add `enable_watermark` as follows: + ``` + "object-zone-count-config": { + "zones": [ + + ], + "enable_watermark": true + } + ``` +2. Rebuild and Run VA Serving with additional flag `--enable-rtsp` + ``` + ./docker/run.sh -v /tmp:/tmp --enable-rtsp + +3. Run object-zone-count pipeline with vaclient using example parameter file with additional parameter `rtsp-path`. Note that `rtsp-path` is set to `vaserving`, this path is what will be used to view the rtsp stream: + ``` + vaclient/vaclient.sh run object_detection/object_zone_count https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=true --parameter-file vaclient/parameter_files/object-zone-count.json --rtsp-path vaserving + ``` + +4. Open up a media player with network stream viewing (VLC for example) and connect to `rtsp:://:8554/vaserving`. The stream is real time so you might want to setup your media player ahead of time. You will see people-detection.mp4 with an overlay of points. Each zone has a start point which has a label of the zone name. Other points of the zone are not labeled. If an object `intersects` or is `within` a zone the label is updated to reflect that. + + ![object_zone_count_watermark](object_zone_count_watermark.png) \ No newline at end of file diff --git a/extensions/spatial_analytics/object_zone_count.py b/extensions/spatial_analytics/object_zone_count.py index 06bc54e..addc2d2 100644 --- a/extensions/spatial_analytics/object_zone_count.py +++ b/extensions/spatial_analytics/object_zone_count.py @@ -20,14 +20,17 @@ class ObjectZoneCount: DEFAULT_DETECTION_CONFIDENCE_THRESHOLD = 0.0 # Caller supplies one or more zones via request parameter - def __init__(self, zones=[], enable_watermark=False, log_level="INFO"): + def __init__(self, zones=None, enable_watermark=False, log_level="INFO"): self._zones = [] self._logger = logger self._logger.log_level = log_level self._enable_watermark = enable_watermark + if not zones: + logger.warning("No zone configuration was supplied to ObjectZoneCount.") + return self._zones = self._assign_defaults(zones) if not self._zones: - logger.warn("Empty zone configuration. No zones to check against.") + raise Exception('Empty zone configuration. No zones to check against.') # Note that the pipeline already applies a pipeline-specific threshold value, but # this method serves as an example for handling optional zone-specific parameters. diff --git a/extensions/spatial_analytics/object_zone_count_watermark.png b/extensions/spatial_analytics/object_zone_count_watermark.png new file mode 100644 index 0000000..e1d5650 Binary files /dev/null and b/extensions/spatial_analytics/object_zone_count_watermark.png differ diff --git a/pipelines/gstreamer/action_recognition/general/README.md b/pipelines/gstreamer/action_recognition/general/README.md index d0fb2e7..c4f9b8c 100644 --- a/pipelines/gstreamer/action_recognition/general/README.md +++ b/pipelines/gstreamer/action_recognition/general/README.md @@ -14,7 +14,7 @@ A detailed description can be found [here](https://github.com/openvinotoolkit/op A composite model is used, consisting of: -- [action-recognition-0001-encoder](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/action-recognition-0001/action-recognition-0001-encoder) +- [action-recognition-0001-encoder](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/action-recognition-0001/action-recognition-0001-encoder) - [action-recognition-0001-decoder](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/action-recognition-0001/action-recognition-0001-decoder) These are based on (400 actions) models for [Kinetics-400 dataset](https://deepmind.com/research/open-source/kinetics). @@ -89,8 +89,7 @@ The corresponding vaclient output resembles: For example: ```code -Starting pipeline... -Pipeline running: action_recognition/general, instance = 1 +Starting pipeline action_recognition/general, instance = 1 Timestamp 0 - surfing crowd (0.01) Timestamp 83333333 diff --git a/pipelines/gstreamer/action_recognition/general/pipeline.json b/pipelines/gstreamer/action_recognition/general/pipeline.json index 976ca84..98be7dd 100644 --- a/pipelines/gstreamer/action_recognition/general/pipeline.json +++ b/pipelines/gstreamer/action_recognition/general/pipeline.json @@ -1,6 +1,6 @@ { "type": "GStreamer", - "template": ["uridecodebin name=source ! videoconvert ! video/x-raw,format=BGRx", + "template": ["{auto_source} ! decodebin ! videoconvert ! video/x-raw,format=BGRx", " ! gvaactionrecognitionbin enc-model={models[action_recognition][encoder][network]} dec-model={models[action_recognition][decoder][network]} model-proc={models[action_recognition][decoder][proc]} name=action_recognition", " ! gvametaconvert add-tensor-data=true name=metaconvert", " ! gvametapublish name=destination", diff --git a/pipelines/gstreamer/audio_detection/environment/pipeline.json b/pipelines/gstreamer/audio_detection/environment/pipeline.json index 1732c11..5dd6076 100755 --- a/pipelines/gstreamer/audio_detection/environment/pipeline.json +++ b/pipelines/gstreamer/audio_detection/environment/pipeline.json @@ -1,7 +1,7 @@ { "name": "audio_detection", "type": "GStreamer", - "template": ["uridecodebin name=source ! audioresample ! audioconvert", + "template": ["{auto_source} ! decodebin ! audioresample ! audioconvert", " ! audio/x-raw, channels=1,format=S16LE,rate=16000 ! audiomixer name=audiomixer", " ! level name=level", " ! gvaaudiodetect model={models[audio_detection][environment][network]} name=detection", diff --git a/pipelines/gstreamer/object_classification/vehicle_attributes/pipeline.json b/pipelines/gstreamer/object_classification/vehicle_attributes/pipeline.json index c87906e..f6e2b7b 100755 --- a/pipelines/gstreamer/object_classification/vehicle_attributes/pipeline.json +++ b/pipelines/gstreamer/object_classification/vehicle_attributes/pipeline.json @@ -1,6 +1,6 @@ { "type": "GStreamer", - "template": ["uridecodebin name=source", + "template": ["{auto_source} ! decodebin", " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", " ! gvaclassify model={models[object_classification][vehicle_attributes][network]} name=classification", " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", diff --git a/pipelines/gstreamer/object_detection/app_src_dst/pipeline.json b/pipelines/gstreamer/object_detection/app_src_dst/pipeline.json index 1a182d9..d8e2c48 100644 --- a/pipelines/gstreamer/object_detection/app_src_dst/pipeline.json +++ b/pipelines/gstreamer/object_detection/app_src_dst/pipeline.json @@ -1,6 +1,6 @@ { "type": "GStreamer", - "template": ["appsrc name=source", + "template": ["{auto_source} ! decodebin", " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", " ! appsink name=destination"], "description": "Person Vehicle Bike Detection based on person-vehicle-bike-detection-crossroad-0078", diff --git a/pipelines/gstreamer/object_detection/object_zone_count/pipeline.json b/pipelines/gstreamer/object_detection/object_zone_count/pipeline.json index 5757997..23ed3de 100644 --- a/pipelines/gstreamer/object_detection/object_zone_count/pipeline.json +++ b/pipelines/gstreamer/object_detection/object_zone_count/pipeline.json @@ -1,6 +1,6 @@ { "type": "GStreamer", - "template": ["uridecodebin name=source", + "template": ["{auto_source} ! decodebin", " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", " ! gvapython name=object-zone-count class=ObjectZoneCount module=/home/video-analytics-serving/extensions/spatial_analytics/object_zone_count.py", " ! gvametaconvert name=metaconvert", diff --git a/pipelines/gstreamer/object_detection/person_vehicle_bike/pipeline.json b/pipelines/gstreamer/object_detection/person_vehicle_bike/pipeline.json index 9e11a87..b6acd6e 100755 --- a/pipelines/gstreamer/object_detection/person_vehicle_bike/pipeline.json +++ b/pipelines/gstreamer/object_detection/person_vehicle_bike/pipeline.json @@ -1,6 +1,6 @@ { "type": "GStreamer", - "template": ["uridecodebin name=source", + "template": ["{auto_source} ! decodebin", " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", " ! appsink name=appsink" diff --git a/pipelines/gstreamer/object_tracking/object_line_crossing/pipeline.json b/pipelines/gstreamer/object_tracking/object_line_crossing/pipeline.json index 39f6a4a..9931c3e 100644 --- a/pipelines/gstreamer/object_tracking/object_line_crossing/pipeline.json +++ b/pipelines/gstreamer/object_tracking/object_line_crossing/pipeline.json @@ -1,6 +1,6 @@ { "type": "GStreamer", - "template": ["uridecodebin name=source", + "template": ["{auto_source} ! decodebin", " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", " ! gvatrack name=tracking", " ! gvaclassify model={models[object_classification][vehicle_attributes][network]} name=classification", diff --git a/pipelines/gstreamer/object_tracking/person_vehicle_bike/pipeline.json b/pipelines/gstreamer/object_tracking/person_vehicle_bike/pipeline.json index b0881a8..9f4eaf7 100755 --- a/pipelines/gstreamer/object_tracking/person_vehicle_bike/pipeline.json +++ b/pipelines/gstreamer/object_tracking/person_vehicle_bike/pipeline.json @@ -1,6 +1,6 @@ { "type": "GStreamer", - "template": ["uridecodebin name=source", + "template": ["{auto_source} ! decodebin", " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", " ! gvatrack name=tracking", " ! gvaclassify model={models[object_classification][vehicle_attributes][network]} name=classification", diff --git a/pipelines/gstreamer/video_decode/app_dst/pipeline.json b/pipelines/gstreamer/video_decode/app_dst/pipeline.json index 2a97174..137b265 100644 --- a/pipelines/gstreamer/video_decode/app_dst/pipeline.json +++ b/pipelines/gstreamer/video_decode/app_dst/pipeline.json @@ -1,6 +1,6 @@ { "type": "GStreamer", - "template": ["uridecodebin name=source", + "template": ["{auto_source} ! decodebin", " ! appsink name=destination"], "description": "Decode Pipeline" diff --git a/requirements.service.txt b/requirements.service.txt index 1673a06..da33af9 100644 --- a/requirements.service.txt +++ b/requirements.service.txt @@ -1,8 +1,8 @@ -connexion == 2.6.0 +connexion == 2.8.0 swagger-ui-bundle == 0.0.5 python_dateutil == 2.8.0 setuptools >= 41.2.0 rfc3986-validator == 0.1.1 # uri uri-reference rfc3339-validator == 0.1.2 # date-time time tornado == 6.1 -paho-mqtt == 1.5.1 \ No newline at end of file +paho-mqtt == 1.5.1 diff --git a/samples/app_source_destination/README.md b/samples/app_source_destination/README.md index 0939de5..e2250cc 100644 --- a/samples/app_source_destination/README.md +++ b/samples/app_source_destination/README.md @@ -65,11 +65,15 @@ whose contents are displayed. To run, do the following: ``` -$ docker/build.sh - -$ docker/run.sh --dev - +docker/build.sh +``` +``` +docker/run.sh --dev +``` +``` openvino@host:~$ python3 samples/app_source_destination/app_source_destination.py +``` +``` {"levelname": "INFO", "asctime": "2021-04-09 05:24:43,626", "message": "Creating Instance of Pipeline object_detection/app_src_dst", "module": "pipeline_manager"} {"levelname": "INFO", "asctime": "2021-04-09 05:24:43,628", "message": "Creating Instance of Pipeline video_decode/app_dst", "module": "pipeline_manager"} {"levelname": "INFO", "asctime": "2021-04-09 05:24:43,908", "message": "Setting Pipeline 2 State to RUNNING", "module": "gstreamer_pipeline"} diff --git a/samples/ava_ai_extension/README.md b/samples/ava_ai_extension/README.md deleted file mode 100644 index 5489479..0000000 --- a/samples/ava_ai_extension/README.md +++ /dev/null @@ -1,524 +0,0 @@ -# OpenVINO™ DL Streamer – Edge AI Extension Module - -| [Getting Started](#getting-started) | [Edge AI Extension Module Options](#edge-ai-extension-module-options) | [Additional Examples](#additional-standalone-edge-ai-extension-examples) | [Spatial Analytics](#spatial-analytics-pipelines)| [Test Client](#test-client) | -[Changing Models](#updating-or-changing-detection-and-classification-models) - -The OpenVINO™ DL Streamer - Edge AI Extension module is a microservice based on [Video Analytics Serving](/README.md) that provides video analytics pipelines built with OpenVINO™ DL Streamer. Developers can send decoded video frames to the AI Extension module which performs detection, classification, or tracking and returns the results. The AI Extension module exposes [gRPC APIs](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/grpc-extension-protocol) that are compatible with [Azure Video Analyzer](https://azure.microsoft.com/en-us/products/video-analyzer/) (AVA). Powered by OpenVINO™ toolkit, the AI Extension module enables developers to build, optimize and deploy deep learning inference workloads for maximum performance across Intel® architectures. - -## Highlights - -- Spatial analytics features: [Object Line Crossing](#object-line-crossing) and [Object Zone Count](#object-zone-count) similar to [Azure Video Analyzer Spatial Analysis](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/computer-vision-for-spatial-analysis?tabs=azure-stack-edge) -- Scalable, high-performance solution for serving video analytics pipelines on Intel® architectures -- gRPC API enabling fast data transfer rate and low latency -- Supported Configuration: Pre-built Ubuntu Linux container for CPU and iGPU -- Pre-loaded Object Detection, Object Classification, Object Tracking and Action Recognition pipelines to get started quickly -- Pre-loaded models - see table below. - -| Name | Version | Model | -| -----|-----------| ------| -| person_vehicle_bike_detection| 1 |[person-vehicle-bike-detection-crossroad-0078](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/person-vehicle-bike-detection-crossroad-0078/README.md)| -| object_detection|person|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/person-detection-retail-0013/README.md)| -| object_detection|vehicle|[vehicle-detection-0202](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/vehicle-detection-0202/README.md)| -| vehicle_attributes_recognition|1|[vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/vehicle-attributes-recognition-barrier-0039/README.md)| -| action_recognition|decoder|[action-recognition-0001-decoder](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/action-recognition-0001/README.md)| -| action_recognition|encoder|[action-recognition-0001-encoder](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/action-recognition-0001/README.md)| - - -## What's New - -- Action Recognition pipeline (preview feature). -- Deployment manifest, topology and operations file are now provided by the [Intel OpenVINO™ DL Streamer – Edge AI Extension Tutorial](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/use-intel-grpc-video-analytics-serving-tutorial). - - -# Getting Started - -The OpenVINO™ DL Streamer - Edge AI Extension module can run as a standalone microservice or as a module within an Azure Video Analyzer graph. For more information on deploying the module as part of a Azure Video Analyzer graph please see [Configuring the AI Extension Module for Azure Video Analyzer](#configuring-the-ai-extension-module-for-live-video-analytics) and refer to the [Azure Video Analyzer documentation](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/overview). The following instructions demonstrate building and running the microservice and test client outside of Azure Video Analyzer. - -## Building the Edge AI Extension Module Image - -### Prerequisites -Building the image requires a modern Linux distro with the following packages installed: - -| | | -|---------------------------------------------|------------------| -| **Docker** | Video Analytics Serving requires Docker for it's build, development, and runtime environments. Please install the latest for your platform. [Docker](https://docs.docker.com/install). | -| **bash** | Video Analytics Serving's build and run scripts require bash and have been tested on systems using versions greater than or equal to: `GNU bash, version 4.3.48(1)-release (x86_64-pc-linux-gnu)`. Most users shouldn't need to update their version but if you run into issues please install the latest for your platform. Instructions for macOS®* users [here](/docs/installing_bash_macos.md). | - -### Building the Image - -Run the docker image build script. -``` -$ ./docker/build.sh -``` -Resulting image name is `video-analytics-serving:0.6.1-dlstreamer-edge-ai-extension` - -## Running the Edge AI Extension Module - -To run the module as a standalone microservice with an `object_detection` pipeline use the `run_server.sh` script with default options. For examples of additional options see [Additional Standalone Edge AI Extension Examples](#additional-standalone-edge-ai-extension-examples). - -```bash -$ ./docker/run_server.sh - -{"levelname": "INFO", "asctime": "2021-01-22 15:27:00,009", "message": "Starting DL Streamer Edge AI Extension on port: 5001", "module": "__main__"} -``` - -## Sending a Test Frame for Object Detection - -To send a test frame to the microservice and receive `object_detection` results use the `run_client.sh` script. - -```bash -$ ./docker/run_client.sh -[AIXC] [2021-01-22 15:28:06,956] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: Options for __main__.py -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: grpc_server_address == localhost:5001 -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: grpc_server_ip == localhost -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: grpc_server_port == 5001 -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: sample_file == /home/video-analytics-serving/samples/ava_ai_extension/sampleframes/sample01.png -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: loop_count == 0 -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: fps_interval == 2 -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: frame_rate == -1 -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: frame_queue_size == 200 -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,958] [MainThread ] [INFO]: use_shared_memory == False -[AIXC] [2021-01-22 15:28:06,958] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,958] [MainThread ] [INFO]: output_file == /tmp/result.jsonl -[AIXC] [2021-01-22 15:28:06,958] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:07,341] [Thread-2 ] [INFO]: MediaStreamDescriptor request #1 -[AIXC] [2021-01-22 15:28:07,364] [Thread-2 ] [INFO]: MediaSample request #2 -[AIXC] [2021-01-22 15:28:07,365] [MainThread ] [INFO]: [Received] AckNum: 1 -[AIXC] [2021-01-22 15:28:07,371] [Thread-2 ] [INFO]: MediaSample request #3 -[AIXC] [2021-01-22 15:28:07,940] [Thread-3 ] [INFO]: [Received] AckNum: 2 -[AIXC] [2021-01-22 15:28:07,940] [MainThread ] [INFO]: Inference result 2 -[AIXC] [2021-01-22 15:28:07,941] [MainThread ] [INFO]: - person (1.00) [0.30, 0.47, 0.09, 0.39] [] -[AIXC] [2021-01-22 15:28:07,941] [MainThread ] [INFO]: - person (0.97) [0.36, 0.40, 0.05, 0.24] [] -[AIXC] [2021-01-22 15:28:07,941] [MainThread ] [INFO]: - person (0.94) [0.44, 0.42, 0.08, 0.43] [] -[AIXC] [2021-01-22 15:28:07,941] [MainThread ] [INFO]: - person (0.92) [0.57, 0.38, 0.05, 0.25] [] -[AIXC] [2021-01-22 15:28:07,941] [MainThread ] [INFO]: - person (0.91) [0.69, 0.56, 0.12, 0.43] [] -[AIXC] [2021-01-22 15:28:07,941] [MainThread ] [INFO]: - person (0.90) [0.68, 0.42, 0.04, 0.24] [] -[AIXC] [2021-01-22 15:28:07,941] [MainThread ] [INFO]: - person (0.82) [0.64, 0.36, 0.05, 0.27] [] -[AIXC] [2021-01-22 15:28:07,941] [MainThread ] [INFO]: - person (0.60) [0.84, 0.44, 0.05, 0.29] [] -[AIXC] [2021-01-22 15:28:07,943] [MainThread ] [INFO]: Start Time: 1611347287.3661082 End Time: 1611347287.9434469 Frames Recieved: 1 FPS: 1.7320855292554225 -[AIXC] [2021-01-22 15:28:07,943] [MainThread ] [INFO]: Client finished execution -``` - -# Edge AI Extension Module Options - -The module can be configured using command line options or environment variables (command line options take precedence). - -| Setting | Command line option | Environment variable | Default value | -|---------------------|-----------------------|----------------------|------------------| -| gRPC port | -p | PORT | 5001 | -| RSTP Re-Streaming | --enable-rtsp | ENABLE_RTSP | false | -| Pipeline name | --pipeline-name | PIPELINE_NAME | object_detection | -| Pipeline version | --pipeline-version | PIPELINE_VERSION | person_vehicle_bike_detection | -| Use debug pipeline | --debug | DEBUG_PIPELINE | | - -## Video Analytics Pipelines - -The following pipelines are included in the AI Extension: - -| Name | Version | Definition | Diagram | -| ------------- | ------------- | --------------- | ------- | -| object_detection | person_vehicle_bike_detection | [definition](/samples/ava_ai_extension/pipelines/object_detection/person_vehicle_bike_detection/pipeline.json)|![diagram](pipeline_diagrams/object-detection.png)| -| object_detection | object_zone_count | [definition](/samples/ava_ai_extension/pipelines/object_detection/object_zone_count/pipeline.json)|![diagram](pipeline_diagrams/zone-detection.png)| -| object_classification | vehicle_attributes_recognition | [definition](/samples/ava_ai_extension/pipelines/object_classification/vehicle_attributes_recognition/pipeline.json)|![diagram](pipeline_diagrams/object-classification.png)| -| object_tracking | person_vehicle_bike_tracking | [definition](/samples/ava_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json)|![diagram](pipeline_diagrams/object-tracking.png)| -| object_tracking | object_line_crossing | [definition](/samples/ava_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json)|![diagram](pipeline_diagrams/line-crossing.png)| -| action_recognition | general | [definition](/samples/ava_ai_extension/pipelines/action_recognition/general/pipeline.json)|![diagram](pipeline_diagrams/action-recognition.png)| - -There are three versions of the object zone count pipeline. They are all based on the same pipeline design but use different detection models. - -| Pipeline Version | Model | -| ---------------- |-------| -| object_zone_count| [person-vehicle-bike-detection-crossroad-0078](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/person-vehicle-bike-detection-crossroad-0078/README.md)| -| object_zone_count_person| [person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/person-detection-retail-0013/README.md)| -| object_zone_count_vehicle| [vehicle-detection-0202](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/vehicle-detection-0202/README.md)| - - -## Extension Configuration - -The Azure Video Analyzer (AVA) Server supports the extension_configuration field in the [MediaStreamDescriptor message](https://raw.githubusercontent.com/Azure/video-analyzer/main/contracts/grpc/extension.proto#L69). This field contains a JSON string that must match the extension configuration schema. See example below. Note that pipeline name and version fields are required but parameters and frame-destination are optional. -``` -{ - "pipeline": { - "name": "object_detection", - "version": "person_vehicle_bike_detection", - "parameters": {}, - "frame-destination": {} - } -} -``` - -## Inference Accelerators - -Pipelines can be configured to perform inference using a range of accelerators. -This is a two step process: -1. Give docker access to the accelerator's resources -2. Set the inference accelerator device name when starting the pipeline - -See [Enabling Hardware Accelerators](/docs/running_video_analytics_serving.md#enabling-hardware-accelerators) -for details on docker resources and inference device name for supported accelerators. -This will allow you to customize the deployment manifest for a given accelerator. - -The run server script will automatically detect installed accelerators and provide access to their resources. - -Pipelines will define a default accelerator in their .json files. To run a pipeline on a different accelerator modify the pipeline json or send in a gRPC request with a extension_configuration. The Azure Video Analyzer (AVA) client generates this gRPC request with the extension configuration - -Example extension_configuration -``` -{ - "pipeline": { - "name": "object_detection", - "version": "person_vehicle_bike_detection", - "parameters": { "detection-device": "GPU"} - } -} -``` -## Configuring the AI Extension Module for Azure Video Analyzer - -Please refer to the [Analyze live video with Intel OpenVINO™ DL Streamer – Edge AI Extension](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/use-intel-grpc-video-analytics-serving-tutorial) tutorial for deployment manifests, topologies or operations files and other details. - - -# Additional Standalone Edge AI Extension Examples - -### Specifying VA Serving parameters for AVA Server - -The AVA Server application will filter command line arguments between the AVA layer and VA Serving layer. -Command line arguments are first handled by run_server.sh; if not specifically handled by run_server.sh the argument -is passed into the AVA Server application. -Command line arguments that are not recognized by AVA Server are then passed to VA Serving, if VA Serving does not recognize -the arguments an error will be reported. - -```bash -./docker/run_server.sh --log_level DEBUG -``` - -### Real Time Streaming Protocol (RTSP) Re-streaming - -Pipelines can be configured to connect and visualize input video with superimposed bounding boxes. - -* Enable RTSP at Server start -``` -$ ./docker/run_server.sh --enable-rtsp -``` -* Run client with frame destination set. For demonstration, path set as `person-detection` in example request below. -``` -$ ./docker/run_client.sh --pipeline-name object_detection --pipeline-version person_vehicle_bike_detection --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=true --frame-destination '{\"type\":\"rtsp\",\"path\":\"person-detection\"}' -``` -* Connect and visualize: Re-stream pipeline using VLC network stream with url `rtsp://localhost:8554/person-detection`. - -* Example extension_configuration for re streaming pipeline. -``` -{ - "pipeline": { - "name": "object_detection", - "version": "person_vehicle_bike_detection", - "frame-destination": { "type":"rtsp", "path":"person-detection"} - } -} -``` - -### Logging -Run the following command to monitor the logs from the docker container -```bash -$ docker logs video-analytics-serving_0.6.1-dlstreamer-edge-ai-extension -f -``` - -### Developer Mode -The server run script includes a `--dev` flag which starts the container in "developer" mode. -This mode runs with files from the host, not the container, which is useful for quick iteration and development. -```bash -$ ./docker/run_server.sh --dev -``` - -### Selecting Pipelines ->**Note:** These features are deprecated and will be removed in a future release. Please use extension configuration instead. - -Specify the default pipeline via command line and run the server - -```bash -$ ./docker/run_server.sh --pipeline-name object_classification --pipeline-version vehicle_attributes_recognition -``` - -Specify the default pipeline via environment variables and run the server -``` -$ export PIPELINE_NAME=object_classification -$ export PIPELINE_VERSION=vehicle_attributes_recognition -$ ./docker/run_server.sh -``` - -Notes: -* If selecting a pipeline both name and version must be specified -* The `--debug` option selects debug pipelines that watermark inference results and saves images in `/tmp/vaserving/{--pipeline-version}/{timestamp}/` and can also be set using the environment variable DEBUG_PIPELINE - -### Debug Mode ->**Note:** This feature is deprecated and will be removed in a future release. Please use RTSP re-streaming instead. - -Debug pipelines can be selected using the `--debug` command line parameter or setting the `DEBUG_PIPELINE` environment variable. Debug pipelines save watermarked frames to `/tmp/vaserving/{--pipeline-version}/{timestamp}/` as JPEG images. - -Run default pipeline in debug mode -```bash -$ ./docker/run_server.sh --debug -``` - -# Spatial Analytics Pipelines -## Object Zone Count -The [object_detection/object_zone_count](./pipelines/object_detection/object_zone_count/pipeline.json) pipeline generates events containing objects detected in zones defined by the AVA extension configuration. For more information on the underlying zone event operation, see object_zone_count [README](../../extensions/spatial_analytics/object_zone_count.md). - -### Build and Run - -1. Build and run AVA server as normal - -2. Run client with example extension configuration. The `object_zone_count.json` extension configuration contains zone definitions to generate `object-zone-count` events for a media stream. Look for the below events in client output: - - ``` - $ ./docker/run_client.sh \ - --extension-config /home/video-analytics-serving/samples/ava_ai_extension/client/extension-config/object_zone_count.json - ``` - ``` - - [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (1.00) [0.30, 0.47, 0.09, 0.39] ['inferenceId: 4ea7a39d41eb4befae87894a48e1ea6a', 'subtype: objectDetection'] - [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.97) [0.36, 0.40, 0.05, 0.24] ['inferenceId: 287b569a93fb4d4386af3cb0871b52ca', 'subtype: objectDetection'] - [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.94) [0.44, 0.42, 0.08, 0.43] ['inferenceId: 4e82d111fccc4649a650fe205f70d079', 'subtype: objectDetection'] - [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.92) [0.57, 0.38, 0.05, 0.25] ['inferenceId: cdc5e1dfa20a41b69bb05d3289e773d5', 'subtype: objectDetection'] - [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.91) [0.69, 0.56, 0.12, 0.43] ['inferenceId: d873d43a9e274e5b8693b1df87764e30', 'subtype: objectDetection'] - [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.90) [0.68, 0.42, 0.04, 0.24] ['inferenceId: ab759106752a45279007bae98eabd032', 'subtype: objectDetection'] - [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.82) [0.64, 0.36, 0.05, 0.27] ['inferenceId: 908960e242334549a52bafb33f6a29a0', 'subtype: objectDetection'] - [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.60) [0.84, 0.44, 0.05, 0.29] ['inferenceId: 1a74f84445cf49cbb517ff2ea83f74c3', 'subtype: objectDetection'] - [AIXC] [2021-09-09 19:50:45,608] [MainThread ] [INFO]: EVENT - Zone2: ['inferenceId: fe65126e0db64e659b6414345d52a96c', 'subtype: object-zone-count', "relatedInferences: ['4ea7a39d41eb4befae87894a48e1ea6a']", "status: ['intersects']", 'zone-count: 1'] - [AIXC] [2021-09-09 19:50:45,608] [MainThread ] [INFO]: EVENT - Zone3: ['inferenceId: 2b1685ebe9914805b962615e19116b87', 'subtype: object-zone-count', "relatedInferences: ['287b569a93fb4d4386af3cb0871b52ca', '4e82d111fccc4649a650fe205f70d079', 'cdc5e1dfa20a41b69bb05d3289e773d5', 'd873d43a9e274e5b8693b1df87764e30', 'ab759106752a45279007bae98eabd032', '908960e242334549a52bafb33f6a29a0', '1a74f84445cf49cbb517ff2ea83f74c3']", "status: ['intersects', 'intersects', 'within', 'intersects', 'within', 'within', 'intersects']", 'zone-count: 7'] - ``` - -### Enabling RTSP Output - -To get a visual of `object_zone_count` extension, run with `object_zone_count_rendered.json` extension configuration which sets `enable_watermark` and `frame-destination` parameters for RTSP re streaming. - -> gvawatermark does not draw the polygon lines but markers/dots showing the boundary of the defined polygon regions, so the viewer must currently "connect the dots" themself. - -1. Build and run AVA server as normal but with `--enable-rtsp` flag - -2. Run client with example extension configuration, with rendering support: - - ``` - $ ./docker/run_client.sh \ - --extension-config /home/video-analytics-serving/samples/ava_ai_extension/client/extension-config/object_zone_count_rendered.json \ - --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true - ``` -3. Connect and visualize: Re-stream pipeline using VLC network stream with url `rtsp://localhost:8554/zone-events`. - -## Object Line Crossing -The [object_tracking/object_line_crossing](./pipelines/object_tracking/object_line_crossing/pipeline.json) pipeline generates events containing objects which crossed lines defined by the AVA extension configuration. For more information on the underlying line crossing operation, see object_line_crossing [README](../../extensions/spatial_analytics/object_line_crossing.md). - -### Build and Run - -1. Build and run AVA server as normal - -2. Run client with example extension configuration. The `line_cross_tracking_config.json` extension configuration contains example line definitions needed to generate`object_line_crossing` events for a media stream. Look for the below events in client output: - - ``` - $ ./docker/run_client.sh \ - --extension-config /home/video-analytics-serving/samples/ava_ai_extension/client/extension-config/object_line_crossing.json \ - --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=True - ``` - ``` - - [AIXC] [2021-05-12 18:57:01,315] [MainThread ] [INFO]: ENTITY - person (1.00) [0.40, 0.27, 0.12, 0.62] ['inferenceId: d47a4192ca4b4933a6c6c588220f59de', 'subtype: objectDetection', 'id: 1'] - [AIXC] [2021-05-12 18:57:01,315] [MainThread ] [INFO]: EVENT - hallway_bottom: ['inferenceId: 520d7506e5c94f3b9aeb1d157af6311c', 'subtype: lineCrossing', "relatedInferences: ['d47a4192ca4b4933a6c6c588220f59de']", 'counterclockwiseTotal: 1', 'total: 1', 'clockwiseTotal: 0', 'direction: counterclockwise'] - ``` - -### Enabling RTSP Output - -Adding a configuration parameter to specify the frame-destination enables a secondary workflow, with VA Serving rendering visualization of lines and entity detections/events (shown below). - -By setting `enable_watermark` and `frame-destination` parameter for RTSP re streaming, the caller may visualize the output. This added to the `line_cross_tracking_config_rtsp.json` extension configuration. So following the same instructions above but swapping the extension configuration used will dynamically produce the scene using rudimentary markers/dots showing the start and end points of defined lines. This allows the DL Streamer `gvawatermark` element (used in the frame-destination) to handle rendering. - - - -To get a visual of `object_line_crossing` extension, run with `object_line_crossing_rendered.json` extension configuration which sets `enable_watermark` and `frame-destination` parameters for RTSP re streaming. - -> gvawatermark does not draw the lines, so the viewer must currently "connect the dots" themself. - -1. Build and run AVA server as normal but with `--enable-rtsp` flag - -2. Run client with example extension configuration, with rendering support: - - ``` - $ ./docker/run_client.sh \ - --extension-config /home/video-analytics-serving/samples/ava_ai_extension/client/extension-config/object_line_crossing_rendered.json \ - --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=True - ``` - -3. Connect and visualize: Re-stream pipeline using VLC network stream with url `rtsp://localhost:8554/vaserving`. - -# Test Client -A test client is provided to demonstrate the capabilities of the Edge AI Extension module. -The test client script `run_client.sh` sends frames(s) to the extension module and prints inference results. -Use the --help option to see how to use the script. All arguments are optional. - -``` -$ ./docker/run_client.sh -All arguments are optional, usage is as follows - [ -s : gRPC server address, defaults to None] - [ --server-ip : Specify the server ip to connect to ] (defaults to 127.0.0.1) - [ --server-port : Specify the server port to connect to ] (defaults to 5001) - [ --sample-file-path : Specify the sample file path to run] (defaults to samples/ava_ai_extension/sampleframes/sample01.png) - [ --loop-count : How many times to loop the source after it finishes ] - [ --number-of-streams : Specify number of streams (one client process per stream)] - [ --fps-interval FPS_INTERVAL] (interval between frames in seconds, defaults to 0) - [ --frame-rate FRAME_RATE] (send frames at given fps, default is no limit) - [ --frame-queue-size : Max number of frames to buffer in client, defaults to 200] - [ --shared-memory : Enables and uses shared memory between client and server ] (defaults to off) - [ --output-file-path : Specify the output file path to save inference results in jsonl format] (defaults to /tmp/results.jsonl) - [ --extension-config : JSON string or file containing extension configuration] - [ --pipeline-name : Name of the pipeline to run] - [ --pipeline-version : Name of the pipeline version to run] - [ --pipeline-parameters : Pipeline parameters] - [ --pipeline-extensions : JSON string containing tags to be added to extensions field in results] - [ --frame-destination : Frame destination for rtsp restreaming] - [ --dev : Mount local source code] (use for development) - ``` -Notes: -* If using `--extension-config`, you must not set any of the following options - * --pipeline-name - * --pipeline-version - * --pipeline-parameters - * --pipeline-extensions - * --frame-destination -* Media or log file must be inside container or in volume mounted path -* Either png or mp4 media files are supported -* If not using shared memory, decoded image frames must be less than 4MB (the maximum gPRC message size) -* If you are behind a firewall ensure `no_proxy` contains `127.0.0.1` in docker config and system settings. - -# Updating or Changing Detection and Classification Models -Before updating the models used by a pipeline please see the format of -[pipeline definition files](/docs/defining_pipelines.md) and read the -tutorial on [changing object detection models](/docs/changing_object_detection_models.md). - -Most of the steps to changes models used by AVA extension are the same as for the above tutorial, but it assumes you are working with the REST service and not the AI -Extension module. The AVA specific steps are called out in the following sections. - -## Run Existing Object Detection Pipeline -Get baseline results for existing object_detection model `person-vehicle-bike-detection-crossroad-0078` - -``` -$./docker/run_server.sh - -/object_classification/vehicle_attributes_recognition/pipeline.json", "module": "pipeline_manager"} -{"levelname": "INFO", "asctime": "2021-01-21 12:10:10,288", "message": "===========================", "module": "pipeline_manager"} -{"levelname": "INFO", "asctime": "2021-01-21 12:10:10,288", "message": "Completed Loading Pipelines", "module": "pipeline_manager"} -{"levelname": "INFO", "asctime": "2021-01-21 12:10:10,289", "message": "===========================", "module": "pipeline_manager"} -{"levelname": "INFO", "asctime": "2021-01-21 12:10:10,292", "message": "Starting DL Streamer Edge AI Extension on port: 5001", "module": "__main__"} -``` - -In a separate terminal: - -``` -$ ./docker/run_client.sh - -[AIXC] [2020-11-20 23:29:11,417] [MainThread ] [INFO]: - person (1.00) [0.30, 0.47, 0.09, 0.39] -[AIXC] [2020-11-20 23:29:11,417] [MainThread ] [INFO]: - person (0.97) [0.36, 0.40, 0.05, 0.24] -[AIXC] [2020-11-20 23:29:11,417] [MainThread ] [INFO]: - person (0.94) [0.44, 0.42, 0.08, 0.43] -[AIXC] [2020-11-20 23:29:11,418] [MainThread ] [INFO]: - person (0.92) [0.57, 0.38, 0.05, 0.25] -[AIXC] [2020-11-20 23:29:11,418] [MainThread ] [INFO]: - person (0.91) [0.69, 0.56, 0.12, 0.43] -[AIXC] [2020-11-20 23:29:11,418] [MainThread ] [INFO]: - person (0.90) [0.68, 0.42, 0.04, 0.24] -[AIXC] [2020-11-20 23:29:11,418] [MainThread ] [INFO]: - person (0.82) [0.64, 0.36, 0.05, 0.27] -[AIXC] [2020-11-20 23:29:11,418] [MainThread ] [INFO]: - person (0.60) [0.84, 0.44, 0.05, 0.29] - -``` - -## Send a request to the server to run a different pipeline -``` -$ ./docker/run_client.sh --pipeline-name object_classification --pipeline-version vehicle_attributes_recognition -``` - -## Send a request to the server to run a different pipeline on the GPU -``` -$ ./docker/run_client.sh --pipeline-name object_detection --pipeline-version person_vehicle_bike_detection --pipeline-parameters '{\"detection-device\":\"GPU\"}' -``` - -## Add New Model to Models List - -Copy the existing model list `models/models.list.yml` to `models/yolo-models.list.yml` then add the following entry: - -```yml -- model: yolo-v2-tiny-tf - alias: yolo - version: 1 - precision: [FP16,FP32] -``` - -## Update Pipeline Definition File to Use New Model - -Copy, rename and update the existing object detection pipeline to reference `yolo-v2-tiny-tf` model: - -```bash -$ cp -r pipelines/object_detection/person_vehicle_bike_detection pipelines/object_detection/yolo -$ sed -i -e s/person_vehicle_bike_detection/yolo/g pipelines/object_detection/yolo/pipeline.json -``` - -## Rebuild Edge AI Extension with new Model and Pipeline - -``` -$ ./docker/build.sh --models models/yolo-models.list.yml -``` - -The model will now be in `models` folder in the root of the project: - -``` -models -└── yolo - └── 1 - ├── FP16 - │   ├── yolo-v2-tiny-tf.bin - │   ├── yolo-v2-tiny-tf.mapping - │   └── yolo-v2-tiny-tf.xml - ├── FP32 - │   ├── yolo-v2-tiny-tf.bin - │   ├── yolo-v2-tiny-tf.mapping - │   └── yolo-v2-tiny-tf.xml - └── yolo-v2-tiny-tf.json -``` - -Check that expected model and pipeline are present in the built image: - -```bash -$ docker run -it --entrypoint /bin/bash video-analytics-serving:0.6.1-dlstreamer-edge-ai-extension -vaserving@82dd59743ca3:~$ ls models -person_vehicle_bike_detection vehicle_attributes_recognition yolo -vaserving@82dd59743ca3:~$ ls pipelines/object_detection/ -debug_person_vehicle_bike_detection person_vehicle_bike_detection yolo -``` - -## Run Edge AI Extension with new Model and Pipeline - -### Restart service -Restart the service to ensure we are using the image with the yolo-v2-tiny-tf model -``` -$ docker stop video-analytics-serving_0.6.1-dlstreamer-edge-ai-extension -$ docker/run_server.sh --pipeline-name object_detection --pipeline-version yolo -``` -### Run the client -Note different results due to different model -``` -$ docker/run_client.sh - -[AIXC] [2021-01-07 06:51:13,081] [MainThread ] [INFO]: - person (0.82) [0.63, 0.36, 0.06, 0.24] [] -[AIXC] [2021-01-07 06:51:13,081] [MainThread ] [INFO]: - person (0.78) [0.56, 0.37, 0.06, 0.23] [] -[AIXC] [2021-01-07 06:51:13,081] [MainThread ] [INFO]: - person (0.63) [0.44, 0.43, 0.11, 0.43] [] -[AIXC] [2021-01-07 06:51:13,081] [MainThread ] [INFO]: - person (0.63) [0.31, 0.45, 0.09, 0.23] [] -[AIXC] [2021-01-07 06:51:13,081] [MainThread ] [INFO]: - person (0.62) [0.69, 0.38, 0.06, 0.23] [] -[AIXC] [2021-01-07 06:51:13,081] [MainThread ] [INFO]: - person (0.60) [0.40, 0.44, 0.07, 0.27] [] -[AIXC] [2021-01-07 06:51:13,081] [MainThread ] [INFO]: - person (0.59) [0.45, 0.43, 0.08, 0.29] [] -[AIXC] [2021-01-07 06:51:13,082] [MainThread ] [INFO]: - person (0.57) [0.33, 0.40, 0.07, 0.20] [] -[AIXC] [2021-01-07 06:51:13,082] [MainThread ] [INFO]: - person (0.57) [0.76, 0.46, 0.13, 0.23] [] -[AIXC] [2021-01-07 06:51:13,082] [MainThread ] [INFO]: - person (0.55) [0.41, 0.44, 0.03, 0.10] [] - -``` diff --git a/samples/ava_ai_extension/client/__main__.py b/samples/ava_ai_extension/client/__main__.py deleted file mode 100644 index 0e15b85..0000000 --- a/samples/ava_ai_extension/client/__main__.py +++ /dev/null @@ -1,326 +0,0 @@ -""" -* Copyright (C) 2019-2020 Intel Corporation. -* -* SPDX-License-Identifier: MIT License -* -***** -* -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -""" -import logging -import os -import sys -import queue -import json -import time -import cv2 -import jsonschema - -from google.protobuf.json_format import MessageToDict -import samples.ava_ai_extension.common.grpc_autogen.inferencing_pb2 as inferencing_pb2 -from samples.ava_ai_extension.common.exception_handler import log_exception -import samples.ava_ai_extension.common.extension_schema as extension_schema -from arguments import parse_args -from media_stream_processor import MediaStreamProcessor - - -class VideoSource: - def __init__(self, filename, loop_count, scale_factor = 1.0): - self._loop_count = loop_count - self._filename = filename - self._scale_factor = scale_factor - self._open_video_source() - - def _open_video_source(self): - self._vid_cap = cv2.VideoCapture(self._filename, cv2.CAP_GSTREAMER) - if self._vid_cap is None or not self._vid_cap.isOpened(): - raise Exception("Error opening video source: {}".format(self._filename)) - - def dimensions(self): - width = int(self._vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH) * self._scale_factor) - height = int(self._vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * self._scale_factor) - return width, height - - def get_frame(self): - ret, frame = self._vid_cap.read() - if ret: - width = int(self._vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH) * self._scale_factor) - height = int(self._vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * self._scale_factor) - dsize = (width, height) - frame = cv2.resize(frame, dsize) - return frame.tobytes() - self._loop_count -= 1 - if self._loop_count > 0: - self._open_video_source() - ret, frame = self._vid_cap.read() - if ret: - width = int(self._vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH) * self._scale_factor) - height = int(self._vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT) * self._scale_factor) - dsize = (width, height) - frame = cv2.resize(frame, dsize) - return frame.tobytes() - return None - - def close(self): - self._vid_cap.release() - - -def _log_options(args): - heading = "Options for {}".format(os.path.basename(__file__)) - banner = "=" * len(heading) - logging.info(banner) - logging.info(heading) - logging.info(banner) - for arg in vars(args): - logging.info("{} == {}".format(arg, getattr(args, arg))) - logging.info(banner) - -def _log_entity(inference): - tag = inference.entity.tag - box = inference.entity.box - attributes = [] - if inference.inference_id: - attribute_string = "{}: {}".format('inferenceId', inference.inference_id) - attributes.append(attribute_string) - if inference.subtype: - attribute_string = "{}: {}".format('subtype', inference.subtype) - attributes.append(attribute_string) - if inference.entity.id: - attribute_string = "{}: {}".format('id', inference.entity.id) - attributes.append(attribute_string) - for attribute in inference.entity.attributes: - attribute_string = "{}: {}".format(attribute.name, attribute.value) - attributes.append(attribute_string) - logging.info( - "ENTITY - {} ({:.2f}) [{:.2f}, {:.2f}, {:.2f}, {:.2f}] {}".format( - tag.value, tag.confidence, box.l, box.t, box.w, box.h, attributes - ) - ) - -def _log_event(inference): - name = inference.event.name - attributes = [] - if inference.inference_id: - attribute_string = "{}: {}".format('inferenceId', inference.inference_id) - attributes.append(attribute_string) - if inference.subtype: - attribute_string = "{}: {}".format('subtype', inference.subtype) - attributes.append(attribute_string) - if inference.related_inferences: - attribute_string = "{}: {}".format('relatedInferences', inference.related_inferences) - attributes.append(attribute_string) - for attribute in inference.event.properties: - attribute_string = "{}: {}".format(attribute, inference.event.properties[attribute]) - attributes.append(attribute_string) - logging.info( - "EVENT - {}: {}".format(name, attributes) - ) - -def _log_classification(inference): - tag = inference.classification.tag - logging.info("CLASSIFICATION - {} ({:.2f})".format(tag.value, tag.confidence)) - -def _log_result(response, output, log_result=True): - if not log_result: - return - if not response: - return - logging.debug("Inference result {}".format(response.ack_sequence_number)) - for inference in response.media_sample.inferences: - if inference.type == inferencing_pb2.Inference.InferenceType.ENTITY: # pylint: disable=no-member - _log_entity(inference) - - if inference.type == inferencing_pb2.Inference.InferenceType.EVENT: # pylint: disable=no-member - _log_event(inference) - - if inference.type == inferencing_pb2.Inference.InferenceType.CLASSIFICATION: # pylint: disable=no-member - _log_classification(inference) - - # default value field is used to avoid not including values set to 0, - # but it also causes empty lists to be included - returned_dict = MessageToDict( - response.media_sample, including_default_value_fields=True - ) - output.write("{}\n".format(json.dumps(returned_dict))) - -def _log_fps(start_time, frames_received, prev_fps_delta, fps_interval): - delta = int(time.time() - start_time) - if (fps_interval > 0) and (delta != prev_fps_delta) and (delta % fps_interval == 0): - logging.info( - "FPS: {} Frames Recieved: {}".format( - (frames_received / delta), frames_received - ) - ) - return delta - return prev_fps_delta - -def validate_extension_config(extension_config): - try: - validator = jsonschema.Draft4Validator(schema=extension_schema.extension_config, - format_checker=jsonschema.draft4_format_checker) - validator.validate(extension_config) - except jsonschema.exceptions.ValidationError as err: - raise Exception("Error validating pipeline request: {},: error: {}".format(extension_config, err.message)) - -def create_extension_config(args): - extension_config = {} - pipeline_config = {} - if args.pipeline_name: - pipeline_config["name"] = args.pipeline_name - if args.pipeline_version: - pipeline_config["version"] = args.pipeline_version - if args.pipeline_parameters: - try: - pipeline_config["parameters"] = json.loads(args.pipeline_parameters) - except ValueError: - raise Exception("Issue loading pipeline parameters: {}".format(args.pipeline_parameters)) - if args.frame_destination: - try: - pipeline_config["frame-destination"] = json.loads(args.frame_destination) - except ValueError: - raise Exception("Issue loading frame destination: {}".format(args.frame_destination)) - if args.pipeline_extensions: - try: - pipeline_config["pipeline_extensions"] = json.loads(args.pipeline_extensions) - except ValueError: - raise Exception("Issue loading pipeline extensions: {}".format(args.pipeline_extensions)) - - if len(pipeline_config) > 0: - extension_config.setdefault("pipeline", pipeline_config) - - return extension_config - -def main(): - msp = None - frame_source = None - args = parse_args() - _log_options(args) - try: - frame_delay = 1 / args.frame_rate if args.frame_rate > 0 else 0 - frame_queue = queue.Queue(args.frame_queue_size) - result_queue = queue.Queue() - frames_sent = 0 - frames_received = 0 - prev_fps_delta = 0 - start_time = None - frame_source = VideoSource(args.sample_file, args.loop_count, args.scale_factor) - width, height = frame_source.dimensions() - image = frame_source.get_frame() - - if not image: - raise Exception("Error getting frame from video source: {}".format(args.sample_file)) - - extension_config = {} - if args.extension_config: - if args.extension_config.endswith(".json"): - with open(args.extension_config, "r") as config: - extension_config = json.loads(config.read()) - else: - extension_config = json.loads(args.extension_config) - else: - extension_config = create_extension_config(args) - - validate_extension_config(extension_config) - logging.info("Extension Configuration: {}".format(extension_config)) - - msp = MediaStreamProcessor( - args.grpc_server_address, - args.use_shared_memory, - args.frame_queue_size, - len(image), - ) - - msp.start(width, height, frame_queue, result_queue, json.dumps(extension_config)) - - with open(args.output_file, "w") as output: - start_time = time.time() - result = True - while image and result and frames_sent < args.max_frames: - frame_queue.put(image) - while not result_queue.empty(): - result = result_queue.get() - if isinstance(result, Exception): - raise result - frames_received += 1 - prev_fps_delta = _log_fps( - start_time, frames_received, prev_fps_delta, args.fps_interval - ) - _log_result(result, output) - image = frame_source.get_frame() - time.sleep(frame_delay) - frames_sent += 1 - - if result: - frame_queue.put(None) - result = result_queue.get() - while result: - if isinstance(result, Exception): - raise result - frames_received += 1 - prev_fps_delta = _log_fps( - start_time, frames_received, prev_fps_delta, args.fps_interval - ) - _log_result(result, output) - result = result_queue.get() - - delta = time.time() - start_time - logging.info( - "Start Time: {} End Time: {} Frames: Tx {} Rx {} FPS: {}".format( - start_time, - start_time + delta, - frames_sent, - frames_received, - (frames_received / delta) if delta > 0 else None, - ) - ) - - if frames_sent != frames_received: - raise Exception("Sent {} requests, received {} responses".format( - frames_sent, frames_received)) - - return True - - except (KeyboardInterrupt, SystemExit, Exception): - log_exception() - return False - finally: - if msp: - msp.stop() - if frame_source: - frame_source.close() - -if __name__ == "__main__": - # Set logging parameters - logging.basicConfig( - level=logging.INFO, - format="[AIXC] [%(asctime)-15s] [%(threadName)-12.12s] [%(levelname)s]: %(message)s", - handlers=[ - # logging.FileHandler(LOG_FILE_NAME), # write in a log file - logging.StreamHandler(sys.stdout) # write in stdout - ], - ) - - # Call Main logic - if not main(): - sys.exit(1) - logging.info("Client finished execution") diff --git a/samples/ava_ai_extension/client/arguments.py b/samples/ava_ai_extension/client/arguments.py deleted file mode 100644 index 1c4b3cf..0000000 --- a/samples/ava_ai_extension/client/arguments.py +++ /dev/null @@ -1,194 +0,0 @@ -''' -* Copyright (C) 2019-2020 Intel Corporation. -* -* SPDX-License-Identifier: MIT License -* -***** -* -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -''' - -import sys -import argparse - -def parse_args(args=None, program_name="DL Streamer Edge AI Extension Client"): - parser = argparse.ArgumentParser( - prog=program_name, - fromfile_prefix_chars="@", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - "-s", - metavar=("grpc_server_address"), - dest="grpc_server_address", - help="gRPC server address.", - default=None, - ) - parser.add_argument( - "--server-ip", - dest="grpc_server_ip", - help="gRPC server ip.", - default="localhost", - ) - - parser.add_argument( - "--server-port", - dest="grpc_server_port", - help="gRPC server port.", - type=int, - default=5001, - ) - - parser.add_argument( - "-f", - "--sample-file-path", - metavar=("sample_file"), - dest="sample_file", - help="Name of the sample video frame.", - default="/home/video-analytics-serving/samples/ava_ai_extension/sampleframes/sample01.png", - ) - parser.add_argument( - "--max-frames", - metavar=("max_frames"), - dest="max_frames", - help="How many frames to send from video.", - type=int, - default=sys.maxsize, - ) - parser.add_argument( - "-l", - "--loop-count", - metavar=("loop_count"), - dest="loop_count", - help="How many times to loop the source after it finishes.", - type=int, - default=0, - ) - parser.add_argument( - "--fps-interval", - dest="fps_interval", - help="How often to report FPS (every N seconds)", - type=int, - default=2, - ) - parser.add_argument( - "--frame-rate", - dest="frame_rate", - help="How many frames to send per second (-1 is no limit)", - type=int, - default=-1, - ) - parser.add_argument( - "--frame-queue-size", - dest="frame_queue_size", - help="Max number of frames to buffer in client (0 is no limit)", - type=int, - default=200, - ) - parser.add_argument( - "-m", - "--shared-memory", - action="store_const", - dest="use_shared_memory", - const=True, - default=False, - help="set to use shared memory", - ) - # nosec skips pybandit hits - parser.add_argument( - "-o", - "--output-file-path", - metavar=("output_file"), - dest="output_file", - help="Output file path", - default="/tmp/results.jsonl", - ) # nosec - - parser.add_argument( - "--pipeline-name", - action="store", - dest="pipeline_name", - help="name of the pipeline to run", - type=str, - default="", - ) - - parser.add_argument( - "--pipeline-version", - action="store", - dest="pipeline_version", - help="name of the pipeline to run", - type=str, - default="", - ) - - parser.add_argument( - "--pipeline-parameters", - action="store", - dest="pipeline_parameters", - type=str, - default="", - ) - - parser.add_argument( - "--pipeline-extensions", - action="store", - dest="pipeline_extensions", - type=str, - default="", - ) - - parser.add_argument( - "--frame-destination", - action="store", - dest="frame_destination", - type=str, - default="", - ) - - parser.add_argument( - "--scale-factor", - action="store", - dest="scale_factor", - help="scale factor for decoded images", - type=float, - default=1.0, - ) - - parser.add_argument( - "--extension-config", - action="store", - dest="extension_config", - help="extension config in .json file path or as string", - default="", - ) # nosec - - parser.add_argument("--version", action="version", version="%(prog)s 1.0") - if isinstance(args, dict): - args = ["--{}={}".format(key, value) for key, value in args.items() if value] - result = parser.parse_args(args) - if not result.grpc_server_address: - result.grpc_server_address = "{}:{}".format( - result.grpc_server_ip, result.grpc_server_port - ) - return result diff --git a/samples/ava_ai_extension/client/extension-config/object_line_crossing.json b/samples/ava_ai_extension/client/extension-config/object_line_crossing.json deleted file mode 100644 index 9c1dda3..0000000 --- a/samples/ava_ai_extension/client/extension-config/object_line_crossing.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "pipeline": { - "name": "object_tracking", - "version": "object_line_crossing", - "parameters": { - "object-line-crossing-config": { - "lines": [ - { - "name": "hallway_right", - "line": [[0.9,0.8],[0.8,0.45]] - }, - { - "name": "hallway_left", - "line": [[0.15,0.45],[0.05,0.75]] - }, - { - "name": "hallway_bottom", - "line": [[0.1,0.9],[0.8,0.9]] - } - ] - } - } - } - } diff --git a/samples/ava_ai_extension/client/extension-config/object_line_crossing_rendered.json b/samples/ava_ai_extension/client/extension-config/object_line_crossing_rendered.json deleted file mode 100644 index 210c247..0000000 --- a/samples/ava_ai_extension/client/extension-config/object_line_crossing_rendered.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "pipeline": { - "name": "object_tracking", - "version": "object_line_crossing", - "frame-destination": { "type":"rtsp", "path":"vaserving"}, - "parameters": { - "object-line-crossing-config": { - "lines": [ - { - "name": "hallway_right", - "line": [[0.9,0.8],[0.8,0.45]] - }, - { - "name": "hallway_left", - "line": [[0.15,0.45],[0.05,0.75]] - }, - { - "name": "hallway_bottom", - "line": [[0.1,0.9],[0.8,0.9]] - } - ], - "enable_watermark":true, - "log_level":"DEBUG" - } - } - } - } diff --git a/samples/ava_ai_extension/client/extension-config/object_zone_count.json b/samples/ava_ai_extension/client/extension-config/object_zone_count.json deleted file mode 100644 index e159894..0000000 --- a/samples/ava_ai_extension/client/extension-config/object_zone_count.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "pipeline": { - "name": "object_detection", - "version": "object_zone_count", - "parameters": { - "object-zone-count-config": { - "zones": [ - { - "name": "Zone1", - "polygon": [[0.01,0.10],[0.005,0.53],[0.11,0.53],[0.095,0.10]] - }, - { - "name": "Zone2", - "polygon": [[0.14,0.20],[0.18,0.67],[0.35,0.67],[0.26,0.20]] - }, - { - "name": "Zone3", - "polygon": [[0.40,0.30],[0.50,0.83],[0.85,0.83],[0.57,0.30]] - } - ] - } - } - } -} diff --git a/samples/ava_ai_extension/client/extension-config/object_zone_count_person.json b/samples/ava_ai_extension/client/extension-config/object_zone_count_person.json deleted file mode 100644 index 8138dc9..0000000 --- a/samples/ava_ai_extension/client/extension-config/object_zone_count_person.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "pipeline": { - "name": "object_detection", - "version": "object_zone_count_person", - "parameters": { - "detection-device": "CPU", - "threshold":0.7, - "object-zone-count-config": { - "zones": [ - { - "name": "Zone", - "polygon": [[0.25,0.25],[0.25,0.75],[0.75,0.75],[0.75,0.25]] - } - ] - } - } - } -} diff --git a/samples/ava_ai_extension/client/extension-config/object_zone_count_person_rendered.json b/samples/ava_ai_extension/client/extension-config/object_zone_count_person_rendered.json deleted file mode 100644 index 765fbc7..0000000 --- a/samples/ava_ai_extension/client/extension-config/object_zone_count_person_rendered.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "pipeline": { - "name": "object_detection", - "version": "object_zone_count_person", - "frame-destination": {"type":"rtsp","path":"vaserving"}, - "parameters": { - "detection-device": "CPU", - "threshold":0.7, - "object-zone-count-config": { - "zones": [ - { - "name": "Zone", - "polygon": [[0.25,0.25],[0.25,0.75],[0.75,0.75],[0.75,0.25]] - } - ], - "enable_watermark":true, - "log_level":"DEBUG" - } - } - } -} diff --git a/samples/ava_ai_extension/client/extension-config/object_zone_count_rendered.json b/samples/ava_ai_extension/client/extension-config/object_zone_count_rendered.json deleted file mode 100644 index b45313a..0000000 --- a/samples/ava_ai_extension/client/extension-config/object_zone_count_rendered.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "pipeline": { - "name": "object_detection", - "version": "object_zone_count", - "frame-destination": {"type":"rtsp","path":"zone-events"}, - "parameters": { - "detection-device": "CPU", - "threshold":0.75, - "object-zone-count-config": { - "zones": [ - { - "name": "Zone1", - "polygon": [[0.01,0.10],[0.005,0.53],[0.11,0.53],[0.095,0.10]] - }, - { - "name": "Zone2", - "polygon": [[0.14,0.20],[0.18,0.67],[0.35,0.67],[0.26,0.20]] - }, - { - "name": "Zone3", - "polygon": [[0.40,0.30],[0.50,0.83],[0.85,0.83],[0.57,0.30]] - } - ], - "enable_watermark":true, - "log_level":"DEBUG" - } - } - } -} diff --git a/samples/ava_ai_extension/client/extension-config/object_zone_count_vehicle.json b/samples/ava_ai_extension/client/extension-config/object_zone_count_vehicle.json deleted file mode 100644 index 63feccb..0000000 --- a/samples/ava_ai_extension/client/extension-config/object_zone_count_vehicle.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "pipeline": { - "name": "object_detection", - "version": "object_zone_count_vehicle", - "parameters": { - "object-zone-count-config": { - "zones": [ - { - "name": "Zone", - "polygon": [[0.25,0.25],[0.25,0.75],[0.75,0.75],[0.75,0.25]] - } - ] - } - } - } -} diff --git a/samples/ava_ai_extension/client/extension-config/object_zone_count_vehicle_rendered.json b/samples/ava_ai_extension/client/extension-config/object_zone_count_vehicle_rendered.json deleted file mode 100644 index 62b5453..0000000 --- a/samples/ava_ai_extension/client/extension-config/object_zone_count_vehicle_rendered.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "pipeline": { - "name": "object_detection", - "version": "object_zone_count_vehicle", - "frame-destination": { - "type":"rtsp", - "path":"zone-event" - }, - "parameters": { - "object-zone-count-config": { - "zones": [ - { - "name": "Zone", - "polygon": [[0.01,0.6],[0.25,0.125],[0.5,0.125],[0.6,0.95]] - } - ], - "enable_watermark":true, - "log_level":"DEBUG" - } - } - } -} diff --git a/samples/ava_ai_extension/client/media_stream_processor.py b/samples/ava_ai_extension/client/media_stream_processor.py deleted file mode 100644 index fec46cc..0000000 --- a/samples/ava_ai_extension/client/media_stream_processor.py +++ /dev/null @@ -1,228 +0,0 @@ -''' -* Copyright (C) 2019-2020 Intel Corporation. -* -* SPDX-License-Identifier: MIT License -* -***** -* -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -''' -import logging -import os -import time -import threading -import grpc -from samples.ava_ai_extension.common.exception_handler import log_exception -from samples.ava_ai_extension.common.shared_memory import SharedMemoryManager -import samples.ava_ai_extension.common.grpc_autogen.media_pb2 as media_pb2 -import samples.ava_ai_extension.common.grpc_autogen.extension_pb2 as extension_pb2 -import samples.ava_ai_extension.common.grpc_autogen.extension_pb2_grpc as extension_pb2_grpc - - -class MediaStreamProcessor: - class RequestGenerator: - def __init__(self, descriptor, shared_memory_manager, queue): - try: - self._request_seq_num = 1 - self._descriptor = descriptor - self._shared_memory_manager = shared_memory_manager - self._queue = queue - except: - log_exception() - raise - - def __iter__(self): - return self - - def __next__(self): - if self._request_seq_num == 1: - logging.info( - "MediaStreamDescriptor request #{}".format(self._request_seq_num) - ) - request = extension_pb2.MediaStreamMessage( - sequence_number=self._request_seq_num, - ack_sequence_number=0, - media_stream_descriptor=self._descriptor, - ) - else: - logging.debug("MediaSample request #{}".format(self._request_seq_num)) - image = self._queue.get() - if image is None: - raise StopIteration - media_sample = extension_pb2.MediaSample( - timestamp=0, content_bytes=media_pb2.ContentBytes(bytes=image) - ) - if self._shared_memory_manager: - media_sample = self.get_shared_memory_request(media_sample) - request = extension_pb2.MediaStreamMessage( - sequence_number=self._request_seq_num, - ack_sequence_number=0, - media_sample=media_sample, - ) - - self._request_seq_num += 1 - return request - - def get_memory_slot(self, sequence_number, content_bytes): - try: - memory_slot = self._shared_memory_manager.get_empty_slot( - sequence_number, len(content_bytes) - ) - if memory_slot is None: - return None - - self._shared_memory_manager.write_bytes(memory_slot[0], content_bytes) - - except Exception: - log_exception() - raise - return memory_slot - - def get_shared_memory_request(self, media_sample): - memory_slot = self.get_memory_slot( - self._request_seq_num, media_sample.content_bytes.bytes - ) - while memory_slot is None: - logging.info("***************** Shared Memory Full *****************") - time.sleep(1) - memory_slot = self.get_memory_slot( - self._request_seq_num, media_sample.content_bytes.bytes - ) - - memory_slot_offset = memory_slot[0] - memory_slot_length = (memory_slot[1] - memory_slot[0]) + 1 - - content_reference = media_pb2.ContentReference( - address_offset=memory_slot_offset, length_bytes=memory_slot_length - ) - media_sample = extension_pb2.MediaSample( - timestamp=0, content_reference=content_reference - ) - return media_sample - - def __init__( - self, grpc_server_address, use_shared_memory, frame_queue_size, frame_size - ): - try: - # Full address including port number i.e. "localhost:44001" - self._grpc_server_address = grpc_server_address - self._shared_memory_manager = None - if use_shared_memory: - shared_memory_size = ( - frame_queue_size * frame_size - if frame_queue_size - else 100 * frame_size - ) - self._shared_memory_manager = SharedMemoryManager( - os.O_RDWR | os.O_SYNC | os.O_CREAT, - name=None, - size=shared_memory_size, - ) - self._grpc_channel = grpc.insecure_channel(self._grpc_server_address) - self._grpc_stub = extension_pb2_grpc.MediaGraphExtensionStub( - self._grpc_channel - ) - self._stop = False - self._thread = None - - except Exception: - log_exception() - raise - - def get_media_stream_descriptor(self, width, height, extension_config): - try: - smbtp = None - if self._shared_memory_manager: - smbtp = extension_pb2.SharedMemoryBufferTransferProperties( - handle_name=self._shared_memory_manager.shm_file_name, - length_bytes=self._shared_memory_manager.shm_file_size, - ) - media_stream_descriptor = extension_pb2.MediaStreamDescriptor( - graph_identifier=extension_pb2.GraphIdentifier( - media_services_arm_id="", - graph_instance_name="SampleGraph1", - graph_node_name="SampleGraph1", - ), - extension_configuration=extension_config, - media_descriptor=media_pb2.MediaDescriptor( - timescale=90000, - # pylint: disable=no-member - # E1101: Class 'VideoFrameSampleFormat' has no 'Encoding' member (no-member) - # E1101: Class 'VideoFrameSampleFormat' has no 'PixelFormat' member (no-member) - video_frame_sample_format=media_pb2.VideoFrameSampleFormat( - encoding=media_pb2.VideoFrameSampleFormat.Encoding.Value("RAW"), - pixel_format=media_pb2.VideoFrameSampleFormat.PixelFormat.Value( - "BGR24" - ), - dimensions=media_pb2.Dimensions( - width=width, - height=height, - ), - ), - ), - shared_memory_buffer_transfer_properties=smbtp, - ) - except Exception: - log_exception() - raise - - return media_stream_descriptor - - def start(self, width, height, frame_queue, result_queue, extension_config): - descriptor = self.get_media_stream_descriptor(width, height, extension_config) - request_generator = self.RequestGenerator( - descriptor, self._shared_memory_manager, frame_queue - ) - # Use "wait_for_ready" (still in grpc preview...) - # to handle failure in case server not ready yet - sequence_iterator = self._grpc_stub.ProcessMediaStream( - request_generator, wait_for_ready=True - ) - response = next(sequence_iterator) - ack_seq_no = response.ack_sequence_number - logging.info("[Received] AckNum: {0}".format(ack_seq_no)) - self._thread = threading.Thread( - target=self.run, args=(sequence_iterator, result_queue) - ) - self._thread.start() - - def stop(self): - self._stop = True - if self._thread: - self._thread.join() - - def run(self, sequence_iterator, result_queue): - try: - for response in sequence_iterator: - if self._stop: - break - ack_seq_no = response.ack_sequence_number - logging.debug("[Received] AckNum: {0}".format(ack_seq_no)) - result_queue.put(response) - if self._shared_memory_manager: - self._shared_memory_manager.delete_slot(ack_seq_no) - except Exception as error: - result_queue.put(error) - - # Signal end of stream - result_queue.put(None) diff --git a/samples/ava_ai_extension/common/__init__.py b/samples/ava_ai_extension/common/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/samples/ava_ai_extension/common/exception_handler.py b/samples/ava_ai_extension/common/exception_handler.py deleted file mode 100644 index b9c896d..0000000 --- a/samples/ava_ai_extension/common/exception_handler.py +++ /dev/null @@ -1,47 +0,0 @@ -''' -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -''' - -import linecache -import sys -import logging - -def log_exception(logger=None): - ex_type, ex_value, ex_traceback = sys.exc_info() - - tb_frame = ex_traceback.tb_frame - line_no = ex_traceback.tb_lineno - file_name = tb_frame.f_code.co_filename - - linecache.checkcache(file_name) - line = linecache.getline(file_name, line_no, tb_frame.f_globals) - - ex_message = 'Exception:\n\tFile name: {0}\n\tLine number: {1}\n\tLine: {2}\n\tValue: {3}'\ - .format(file_name, line_no, line.strip(), ex_value) - - if (logger): - logger.info(ex_message) - else: - logging.info(ex_message) - - return ex_type, ex_value, ex_traceback diff --git a/samples/ava_ai_extension/common/extension_schema.py b/samples/ava_ai_extension/common/extension_schema.py deleted file mode 100644 index eae552c..0000000 --- a/samples/ava_ai_extension/common/extension_schema.py +++ /dev/null @@ -1,42 +0,0 @@ -''' -* Copyright (C) 2019-2020 Intel Corporation. -* -* SPDX-License-Identifier: BSD-3-Clause -''' -#pylint: disable=R0801 - -extension_config = { - "$schema":"https://json-schema.org/draft/2019-09/schema", - "type":"object", - "properties":{ - "pipeline":{ - "type":"object", - "properties":{ - "name":{ - "type":"string" - }, - "version":{ - "type":"string" - }, - "parameters":{ - "type":"object" - }, - "frame-destination":{ - "type":"object" - }, - "extensions":{ - "type":"object", - "additionalProperties": { - "type": "string" - } - } - }, - "required":[ - "name", - "version" - ], - "additionalProperties":False - } - }, - "additionalProperties":False -} diff --git a/samples/ava_ai_extension/common/grpc_autogen/__init__.py b/samples/ava_ai_extension/common/grpc_autogen/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/samples/ava_ai_extension/common/grpc_autogen/extension_pb2.py b/samples/ava_ai_extension/common/grpc_autogen/extension_pb2.py deleted file mode 100644 index 52a25f9..0000000 --- a/samples/ava_ai_extension/common/grpc_autogen/extension_pb2.py +++ /dev/null @@ -1,385 +0,0 @@ -''' -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -''' - -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: extension.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -import inferencing_pb2 as inferencing__pb2 -import media_pb2 as media__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='extension.proto', - package='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1', - syntax='proto3', - serialized_pb=_b('\n\x0f\x65xtension.proto\x12@microsoft.azure.media.live_video_analytics.extensibility.grpc.v1\x1a\x11inferencing.proto\x1a\x0bmedia.proto\"\xb8\x02\n\x12MediaStreamMessage\x12\x17\n\x0fsequence_number\x18\x01 \x01(\x04\x12\x1b\n\x13\x61\x63k_sequence_number\x18\x02 \x01(\x04\x12z\n\x17media_stream_descriptor\x18\x05 \x01(\x0b\x32W.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamDescriptorH\x00\x12\x65\n\x0cmedia_sample\x18\x06 \x01(\x0b\x32M.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaSampleH\x00\x42\t\n\x07payload\"\x82\x04\n\x15MediaStreamDescriptor\x12m\n\x10graph_identifier\x18\x01 \x01(\x0b\x32Q.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.GraphIdentifierH\x00\x12\x1e\n\x14\x65xtension_identifier\x18\x02 \x01(\tH\x00\x12\x1f\n\x17\x65xtension_configuration\x18\x03 \x01(\t\x12k\n\x10media_descriptor\x18\x05 \x01(\x0b\x32Q.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaDescriptor\x12\x9a\x01\n(shared_memory_buffer_transfer_properties\x18\n \x01(\x0b\x32\x66.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.SharedMemoryBufferTransferPropertiesH\x01\x42\x13\n\x11stream_identifierB\x1a\n\x18\x64\x61ta_transfer_properties\"\xe6\x02\n\x0bMediaSample\x12\x11\n\ttimestamp\x18\x01 \x01(\x04\x12g\n\rcontent_bytes\x18\x05 \x01(\x0b\x32N.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.ContentBytesH\x00\x12o\n\x11\x63ontent_reference\x18\x06 \x01(\x0b\x32R.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.ContentReferenceH\x00\x12_\n\ninferences\x18\n \x03(\x0b\x32K.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.InferenceB\t\n\x07\x63ontent\"f\n\x0fGraphIdentifier\x12\x1d\n\x15media_services_arm_id\x18\x01 \x01(\t\x12\x1b\n\x13graph_instance_name\x18\x02 \x01(\t\x12\x17\n\x0fgraph_node_name\x18\x03 \x01(\t\"Q\n$SharedMemoryBufferTransferProperties\x12\x13\n\x0bhandle_name\x18\x01 \x01(\t\x12\x14\n\x0clength_bytes\x18\x02 \x01(\x04\x32\xdc\x01\n\x13MediaGraphExtension\x12\xc4\x01\n\x12ProcessMediaStream\x12T.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamMessage\x1aT.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamMessage(\x01\x30\x01\x62\x06proto3') - , - dependencies=[inferencing__pb2.DESCRIPTOR,media__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_MEDIASTREAMMESSAGE = _descriptor.Descriptor( - name='MediaStreamMessage', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamMessage', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='sequence_number', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamMessage.sequence_number', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ack_sequence_number', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamMessage.ack_sequence_number', index=1, - number=2, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='media_stream_descriptor', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamMessage.media_stream_descriptor', index=2, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='media_sample', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamMessage.media_sample', index=3, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='payload', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamMessage.payload', - index=0, containing_type=None, fields=[]), - ], - serialized_start=118, - serialized_end=430, -) - - -_MEDIASTREAMDESCRIPTOR = _descriptor.Descriptor( - name='MediaStreamDescriptor', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamDescriptor', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='graph_identifier', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamDescriptor.graph_identifier', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='extension_identifier', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamDescriptor.extension_identifier', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='extension_configuration', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamDescriptor.extension_configuration', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='media_descriptor', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamDescriptor.media_descriptor', index=3, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shared_memory_buffer_transfer_properties', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamDescriptor.shared_memory_buffer_transfer_properties', index=4, - number=10, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='stream_identifier', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamDescriptor.stream_identifier', - index=0, containing_type=None, fields=[]), - _descriptor.OneofDescriptor( - name='data_transfer_properties', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamDescriptor.data_transfer_properties', - index=1, containing_type=None, fields=[]), - ], - serialized_start=433, - serialized_end=947, -) - - -_MEDIASAMPLE = _descriptor.Descriptor( - name='MediaSample', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaSample', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='timestamp', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaSample.timestamp', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='content_bytes', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaSample.content_bytes', index=1, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='content_reference', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaSample.content_reference', index=2, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='inferences', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaSample.inferences', index=3, - number=10, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='content', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaSample.content', - index=0, containing_type=None, fields=[]), - ], - serialized_start=950, - serialized_end=1308, -) - - -_GRAPHIDENTIFIER = _descriptor.Descriptor( - name='GraphIdentifier', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.GraphIdentifier', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='media_services_arm_id', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.GraphIdentifier.media_services_arm_id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='graph_instance_name', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.GraphIdentifier.graph_instance_name', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='graph_node_name', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.GraphIdentifier.graph_node_name', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1310, - serialized_end=1412, -) - - -_SHAREDMEMORYBUFFERTRANSFERPROPERTIES = _descriptor.Descriptor( - name='SharedMemoryBufferTransferProperties', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.SharedMemoryBufferTransferProperties', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='handle_name', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.SharedMemoryBufferTransferProperties.handle_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='length_bytes', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.SharedMemoryBufferTransferProperties.length_bytes', index=1, - number=2, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1414, - serialized_end=1495, -) - -_MEDIASTREAMMESSAGE.fields_by_name['media_stream_descriptor'].message_type = _MEDIASTREAMDESCRIPTOR -_MEDIASTREAMMESSAGE.fields_by_name['media_sample'].message_type = _MEDIASAMPLE -_MEDIASTREAMMESSAGE.oneofs_by_name['payload'].fields.append( - _MEDIASTREAMMESSAGE.fields_by_name['media_stream_descriptor']) -_MEDIASTREAMMESSAGE.fields_by_name['media_stream_descriptor'].containing_oneof = _MEDIASTREAMMESSAGE.oneofs_by_name['payload'] -_MEDIASTREAMMESSAGE.oneofs_by_name['payload'].fields.append( - _MEDIASTREAMMESSAGE.fields_by_name['media_sample']) -_MEDIASTREAMMESSAGE.fields_by_name['media_sample'].containing_oneof = _MEDIASTREAMMESSAGE.oneofs_by_name['payload'] -_MEDIASTREAMDESCRIPTOR.fields_by_name['graph_identifier'].message_type = _GRAPHIDENTIFIER -_MEDIASTREAMDESCRIPTOR.fields_by_name['media_descriptor'].message_type = media__pb2._MEDIADESCRIPTOR -_MEDIASTREAMDESCRIPTOR.fields_by_name['shared_memory_buffer_transfer_properties'].message_type = _SHAREDMEMORYBUFFERTRANSFERPROPERTIES -_MEDIASTREAMDESCRIPTOR.oneofs_by_name['stream_identifier'].fields.append( - _MEDIASTREAMDESCRIPTOR.fields_by_name['graph_identifier']) -_MEDIASTREAMDESCRIPTOR.fields_by_name['graph_identifier'].containing_oneof = _MEDIASTREAMDESCRIPTOR.oneofs_by_name['stream_identifier'] -_MEDIASTREAMDESCRIPTOR.oneofs_by_name['stream_identifier'].fields.append( - _MEDIASTREAMDESCRIPTOR.fields_by_name['extension_identifier']) -_MEDIASTREAMDESCRIPTOR.fields_by_name['extension_identifier'].containing_oneof = _MEDIASTREAMDESCRIPTOR.oneofs_by_name['stream_identifier'] -_MEDIASTREAMDESCRIPTOR.oneofs_by_name['data_transfer_properties'].fields.append( - _MEDIASTREAMDESCRIPTOR.fields_by_name['shared_memory_buffer_transfer_properties']) -_MEDIASTREAMDESCRIPTOR.fields_by_name['shared_memory_buffer_transfer_properties'].containing_oneof = _MEDIASTREAMDESCRIPTOR.oneofs_by_name['data_transfer_properties'] -_MEDIASAMPLE.fields_by_name['content_bytes'].message_type = media__pb2._CONTENTBYTES -_MEDIASAMPLE.fields_by_name['content_reference'].message_type = media__pb2._CONTENTREFERENCE -_MEDIASAMPLE.fields_by_name['inferences'].message_type = inferencing__pb2._INFERENCE -_MEDIASAMPLE.oneofs_by_name['content'].fields.append( - _MEDIASAMPLE.fields_by_name['content_bytes']) -_MEDIASAMPLE.fields_by_name['content_bytes'].containing_oneof = _MEDIASAMPLE.oneofs_by_name['content'] -_MEDIASAMPLE.oneofs_by_name['content'].fields.append( - _MEDIASAMPLE.fields_by_name['content_reference']) -_MEDIASAMPLE.fields_by_name['content_reference'].containing_oneof = _MEDIASAMPLE.oneofs_by_name['content'] -DESCRIPTOR.message_types_by_name['MediaStreamMessage'] = _MEDIASTREAMMESSAGE -DESCRIPTOR.message_types_by_name['MediaStreamDescriptor'] = _MEDIASTREAMDESCRIPTOR -DESCRIPTOR.message_types_by_name['MediaSample'] = _MEDIASAMPLE -DESCRIPTOR.message_types_by_name['GraphIdentifier'] = _GRAPHIDENTIFIER -DESCRIPTOR.message_types_by_name['SharedMemoryBufferTransferProperties'] = _SHAREDMEMORYBUFFERTRANSFERPROPERTIES - -MediaStreamMessage = _reflection.GeneratedProtocolMessageType('MediaStreamMessage', (_message.Message,), dict( - DESCRIPTOR = _MEDIASTREAMMESSAGE, - __module__ = 'extension_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamMessage) - )) -_sym_db.RegisterMessage(MediaStreamMessage) - -MediaStreamDescriptor = _reflection.GeneratedProtocolMessageType('MediaStreamDescriptor', (_message.Message,), dict( - DESCRIPTOR = _MEDIASTREAMDESCRIPTOR, - __module__ = 'extension_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaStreamDescriptor) - )) -_sym_db.RegisterMessage(MediaStreamDescriptor) - -MediaSample = _reflection.GeneratedProtocolMessageType('MediaSample', (_message.Message,), dict( - DESCRIPTOR = _MEDIASAMPLE, - __module__ = 'extension_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaSample) - )) -_sym_db.RegisterMessage(MediaSample) - -GraphIdentifier = _reflection.GeneratedProtocolMessageType('GraphIdentifier', (_message.Message,), dict( - DESCRIPTOR = _GRAPHIDENTIFIER, - __module__ = 'extension_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.GraphIdentifier) - )) -_sym_db.RegisterMessage(GraphIdentifier) - -SharedMemoryBufferTransferProperties = _reflection.GeneratedProtocolMessageType('SharedMemoryBufferTransferProperties', (_message.Message,), dict( - DESCRIPTOR = _SHAREDMEMORYBUFFERTRANSFERPROPERTIES, - __module__ = 'extension_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.SharedMemoryBufferTransferProperties) - )) -_sym_db.RegisterMessage(SharedMemoryBufferTransferProperties) - - -# @@protoc_insertion_point(module_scope) diff --git a/samples/ava_ai_extension/common/grpc_autogen/extension_pb2_grpc.py b/samples/ava_ai_extension/common/grpc_autogen/extension_pb2_grpc.py deleted file mode 100644 index a68ec2d..0000000 --- a/samples/ava_ai_extension/common/grpc_autogen/extension_pb2_grpc.py +++ /dev/null @@ -1,116 +0,0 @@ -''' -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -''' - -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -import extension_pb2 as extension__pb2 - - -class MediaGraphExtensionStub(object): - """ - Media Graph Extension Service - - Media graph extension service definition allows graphs to be extended through a - gRPC server implementation of a graph processor node. - - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.ProcessMediaStream = channel.stream_stream( - '/microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaGraphExtension/ProcessMediaStream', - request_serializer=extension__pb2.MediaStreamMessage.SerializeToString, - response_deserializer=extension__pb2.MediaStreamMessage.FromString, - ) - - -class MediaGraphExtensionServicer(object): - """ - Media Graph Extension Service - - Media graph extension service definition allows graphs to be extended through a - gRPC server implementation of a graph processor node. - - """ - - def ProcessMediaStream(self, request_iterator, context): - """ - Continuously process a single media stream (audio/video) - - Note to Implementers: - Client authentication can be achieved through an authentication token set on the "x-ms-authentication" - request metadata key. The token format follows standard HTTP Basic auth scheme. Implementers of this - service are responsible for validating this token. This token may be set through the REST API. - - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_MediaGraphExtensionServicer_to_server(servicer, server): - rpc_method_handlers = { - 'ProcessMediaStream': grpc.stream_stream_rpc_method_handler( - servicer.ProcessMediaStream, - request_deserializer=extension__pb2.MediaStreamMessage.FromString, - response_serializer=extension__pb2.MediaStreamMessage.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaGraphExtension', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - - # This class is part of an EXPERIMENTAL API. -class MediaGraphExtension(object): - """ - Media Graph Extension Service - - Media graph extension service definition allows graphs to be extended through a - gRPC server implementation of a graph processor node. - - """ - - @staticmethod - def ProcessMediaStream(request_iterator, - target, - options=(), - channel_credentials=None, - call_credentials=None, - insecure=False, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None): - return grpc.experimental.stream_stream(request_iterator, target, '/microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaGraphExtension/ProcessMediaStream', - extension__pb2.MediaStreamMessage.SerializeToString, - extension__pb2.MediaStreamMessage.FromString, - options, channel_credentials, - insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/samples/ava_ai_extension/common/grpc_autogen/inferencing_pb2.py b/samples/ava_ai_extension/common/grpc_autogen/inferencing_pb2.py deleted file mode 100644 index 3835a33..0000000 --- a/samples/ava_ai_extension/common/grpc_autogen/inferencing_pb2.py +++ /dev/null @@ -1,796 +0,0 @@ -''' -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -''' - -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: inferencing.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='inferencing.proto', - package='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1', - syntax='proto3', - serialized_pb=_b('\n\x11inferencing.proto\x12@microsoft.azure.media.live_video_analytics.extensibility.grpc.v1\"\x84\x08\n\tInference\x12g\n\x04type\x18\x01 \x01(\x0e\x32Y.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.InferenceType\x12\x0f\n\x07subtype\x18\x02 \x01(\t\x12\x14\n\x0cinference_id\x18\x03 \x01(\t\x12\x1a\n\x12related_inferences\x18\x04 \x03(\t\x12j\n\x0e\x63lassification\x18\x05 \x01(\x0b\x32P.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.ClassificationH\x00\x12Z\n\x06motion\x18\x06 \x01(\x0b\x32H.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MotionH\x00\x12Z\n\x06\x65ntity\x18\x07 \x01(\x0b\x32H.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.EntityH\x00\x12V\n\x04text\x18\x08 \x01(\x0b\x32\x46.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.TextH\x00\x12X\n\x05\x65vent\x18\t \x01(\x0b\x32G.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.EventH\x00\x12\x61\n\x05other\x18\r \x01(\x0b\x32P.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.InferenceOtherH\x00\x12o\n\nextensions\x18\x0f \x03(\x0b\x32[.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.ExtensionsEntry\x1a\x31\n\x0f\x45xtensionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"e\n\rInferenceType\x12\x08\n\x04\x41UTO\x10\x00\x12\x12\n\x0e\x43LASSIFICATION\x10\x01\x12\n\n\x06MOTION\x10\x02\x12\n\n\x06\x45NTITY\x10\x03\x12\x08\n\x04TEXT\x10\x04\x12\t\n\x05\x45VENT\x10\x05\x12\t\n\x05OTHER\x10\x0f\x42\x07\n\x05value\"\xc5\x01\n\x0e\x43lassification\x12R\n\x03tag\x18\x01 \x01(\x0b\x32\x45.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Tag\x12_\n\nattributes\x18\x02 \x03(\x0b\x32K.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Attribute\"b\n\x06Motion\x12X\n\x03\x62ox\x18\x01 \x01(\x0b\x32K.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Rectangle\"\xa3\x02\n\x06\x45ntity\x12R\n\x03tag\x18\x01 \x01(\x0b\x32\x45.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Tag\x12_\n\nattributes\x18\x02 \x03(\x0b\x32K.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Attribute\x12X\n\x03\x62ox\x18\x03 \x01(\x0b\x32K.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Rectangle\x12\n\n\x02id\x18\x04 \x01(\t\"W\n\x04Text\x12\r\n\x05value\x18\x01 \x01(\t\x12\x10\n\x08language\x18\x02 \x01(\t\x12\x17\n\x0fstart_timestamp\x18\x05 \x01(\x04\x12\x15\n\rend_timestamp\x18\x06 \x01(\x04\"\xb5\x01\n\x05\x45vent\x12\x0c\n\x04name\x18\x01 \x01(\t\x12k\n\nproperties\x18\x07 \x03(\x0b\x32W.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Event.PropertiesEntry\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"=\n\x0eInferenceOther\x12\x14\n\x0c\x63ontent_type\x18\x01 \x01(\t\x12\x15\n\rcontent_bytes\x18\x02 \x01(\x0c\"<\n\tAttribute\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x12\n\nconfidence\x18\x03 \x01(\x02\"(\n\x03Tag\x12\r\n\x05value\x18\x02 \x01(\t\x12\x12\n\nconfidence\x18\x03 \x01(\x02\"7\n\tRectangle\x12\t\n\x01l\x18\x01 \x01(\x02\x12\t\n\x01t\x18\x02 \x01(\x02\x12\t\n\x01w\x18\x03 \x01(\x02\x12\t\n\x01h\x18\x04 \x01(\x02\x62\x06proto3') -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_INFERENCE_INFERENCETYPE = _descriptor.EnumDescriptor( - name='InferenceType', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.InferenceType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='AUTO', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CLASSIFICATION', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MOTION', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ENTITY', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='TEXT', index=4, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='EVENT', index=5, number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='OTHER', index=6, number=15, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1006, - serialized_end=1107, -) -_sym_db.RegisterEnumDescriptor(_INFERENCE_INFERENCETYPE) - - -_INFERENCE_EXTENSIONSENTRY = _descriptor.Descriptor( - name='ExtensionsEntry', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.ExtensionsEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.ExtensionsEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.ExtensionsEntry.value', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=955, - serialized_end=1004, -) - -_INFERENCE = _descriptor.Descriptor( - name='Inference', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.type', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='subtype', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.subtype', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='inference_id', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.inference_id', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='related_inferences', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.related_inferences', index=3, - number=4, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='classification', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.classification', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='motion', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.motion', index=5, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='entity', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.entity', index=6, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='text', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.text', index=7, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='event', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.event', index=8, - number=9, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='other', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.other', index=9, - number=13, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='extensions', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.extensions', index=10, - number=15, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_INFERENCE_EXTENSIONSENTRY, ], - enum_types=[ - _INFERENCE_INFERENCETYPE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='value', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.value', - index=0, containing_type=None, fields=[]), - ], - serialized_start=88, - serialized_end=1116, -) - - -_CLASSIFICATION = _descriptor.Descriptor( - name='Classification', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Classification', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tag', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Classification.tag', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='attributes', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Classification.attributes', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1119, - serialized_end=1316, -) - - -_MOTION = _descriptor.Descriptor( - name='Motion', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Motion', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='box', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Motion.box', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1318, - serialized_end=1416, -) - - -_ENTITY = _descriptor.Descriptor( - name='Entity', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Entity', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tag', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Entity.tag', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='attributes', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Entity.attributes', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='box', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Entity.box', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='id', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Entity.id', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1419, - serialized_end=1710, -) - - -_TEXT = _descriptor.Descriptor( - name='Text', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Text', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='value', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Text.value', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='language', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Text.language', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='start_timestamp', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Text.start_timestamp', index=2, - number=5, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_timestamp', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Text.end_timestamp', index=3, - number=6, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1712, - serialized_end=1799, -) - - -_EVENT_PROPERTIESENTRY = _descriptor.Descriptor( - name='PropertiesEntry', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Event.PropertiesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Event.PropertiesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Event.PropertiesEntry.value', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1934, - serialized_end=1983, -) - -_EVENT = _descriptor.Descriptor( - name='Event', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Event', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Event.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='properties', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Event.properties', index=1, - number=7, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_EVENT_PROPERTIESENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1802, - serialized_end=1983, -) - - -_INFERENCEOTHER = _descriptor.Descriptor( - name='InferenceOther', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.InferenceOther', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='content_type', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.InferenceOther.content_type', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='content_bytes', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.InferenceOther.content_bytes', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1985, - serialized_end=2046, -) - - -_ATTRIBUTE = _descriptor.Descriptor( - name='Attribute', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Attribute', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Attribute.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Attribute.value', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='confidence', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Attribute.confidence', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2048, - serialized_end=2108, -) - - -_TAG = _descriptor.Descriptor( - name='Tag', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Tag', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='value', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Tag.value', index=0, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='confidence', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Tag.confidence', index=1, - number=3, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2110, - serialized_end=2150, -) - - -_RECTANGLE = _descriptor.Descriptor( - name='Rectangle', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Rectangle', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='l', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Rectangle.l', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='t', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Rectangle.t', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='w', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Rectangle.w', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='h', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Rectangle.h', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2152, - serialized_end=2207, -) - -_INFERENCE_EXTENSIONSENTRY.containing_type = _INFERENCE -_INFERENCE.fields_by_name['type'].enum_type = _INFERENCE_INFERENCETYPE -_INFERENCE.fields_by_name['classification'].message_type = _CLASSIFICATION -_INFERENCE.fields_by_name['motion'].message_type = _MOTION -_INFERENCE.fields_by_name['entity'].message_type = _ENTITY -_INFERENCE.fields_by_name['text'].message_type = _TEXT -_INFERENCE.fields_by_name['event'].message_type = _EVENT -_INFERENCE.fields_by_name['other'].message_type = _INFERENCEOTHER -_INFERENCE.fields_by_name['extensions'].message_type = _INFERENCE_EXTENSIONSENTRY -_INFERENCE_INFERENCETYPE.containing_type = _INFERENCE -_INFERENCE.oneofs_by_name['value'].fields.append( - _INFERENCE.fields_by_name['classification']) -_INFERENCE.fields_by_name['classification'].containing_oneof = _INFERENCE.oneofs_by_name['value'] -_INFERENCE.oneofs_by_name['value'].fields.append( - _INFERENCE.fields_by_name['motion']) -_INFERENCE.fields_by_name['motion'].containing_oneof = _INFERENCE.oneofs_by_name['value'] -_INFERENCE.oneofs_by_name['value'].fields.append( - _INFERENCE.fields_by_name['entity']) -_INFERENCE.fields_by_name['entity'].containing_oneof = _INFERENCE.oneofs_by_name['value'] -_INFERENCE.oneofs_by_name['value'].fields.append( - _INFERENCE.fields_by_name['text']) -_INFERENCE.fields_by_name['text'].containing_oneof = _INFERENCE.oneofs_by_name['value'] -_INFERENCE.oneofs_by_name['value'].fields.append( - _INFERENCE.fields_by_name['event']) -_INFERENCE.fields_by_name['event'].containing_oneof = _INFERENCE.oneofs_by_name['value'] -_INFERENCE.oneofs_by_name['value'].fields.append( - _INFERENCE.fields_by_name['other']) -_INFERENCE.fields_by_name['other'].containing_oneof = _INFERENCE.oneofs_by_name['value'] -_CLASSIFICATION.fields_by_name['tag'].message_type = _TAG -_CLASSIFICATION.fields_by_name['attributes'].message_type = _ATTRIBUTE -_MOTION.fields_by_name['box'].message_type = _RECTANGLE -_ENTITY.fields_by_name['tag'].message_type = _TAG -_ENTITY.fields_by_name['attributes'].message_type = _ATTRIBUTE -_ENTITY.fields_by_name['box'].message_type = _RECTANGLE -_EVENT_PROPERTIESENTRY.containing_type = _EVENT -_EVENT.fields_by_name['properties'].message_type = _EVENT_PROPERTIESENTRY -DESCRIPTOR.message_types_by_name['Inference'] = _INFERENCE -DESCRIPTOR.message_types_by_name['Classification'] = _CLASSIFICATION -DESCRIPTOR.message_types_by_name['Motion'] = _MOTION -DESCRIPTOR.message_types_by_name['Entity'] = _ENTITY -DESCRIPTOR.message_types_by_name['Text'] = _TEXT -DESCRIPTOR.message_types_by_name['Event'] = _EVENT -DESCRIPTOR.message_types_by_name['InferenceOther'] = _INFERENCEOTHER -DESCRIPTOR.message_types_by_name['Attribute'] = _ATTRIBUTE -DESCRIPTOR.message_types_by_name['Tag'] = _TAG -DESCRIPTOR.message_types_by_name['Rectangle'] = _RECTANGLE - -Inference = _reflection.GeneratedProtocolMessageType('Inference', (_message.Message,), dict( - - ExtensionsEntry = _reflection.GeneratedProtocolMessageType('ExtensionsEntry', (_message.Message,), dict( - DESCRIPTOR = _INFERENCE_EXTENSIONSENTRY, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference.ExtensionsEntry) - )) - , - DESCRIPTOR = _INFERENCE, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Inference) - )) -_sym_db.RegisterMessage(Inference) -_sym_db.RegisterMessage(Inference.ExtensionsEntry) - -Classification = _reflection.GeneratedProtocolMessageType('Classification', (_message.Message,), dict( - DESCRIPTOR = _CLASSIFICATION, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Classification) - )) -_sym_db.RegisterMessage(Classification) - -Motion = _reflection.GeneratedProtocolMessageType('Motion', (_message.Message,), dict( - DESCRIPTOR = _MOTION, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Motion) - )) -_sym_db.RegisterMessage(Motion) - -Entity = _reflection.GeneratedProtocolMessageType('Entity', (_message.Message,), dict( - DESCRIPTOR = _ENTITY, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Entity) - )) -_sym_db.RegisterMessage(Entity) - -Text = _reflection.GeneratedProtocolMessageType('Text', (_message.Message,), dict( - DESCRIPTOR = _TEXT, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Text) - )) -_sym_db.RegisterMessage(Text) - -Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict( - - PropertiesEntry = _reflection.GeneratedProtocolMessageType('PropertiesEntry', (_message.Message,), dict( - DESCRIPTOR = _EVENT_PROPERTIESENTRY, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Event.PropertiesEntry) - )) - , - DESCRIPTOR = _EVENT, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Event) - )) -_sym_db.RegisterMessage(Event) -_sym_db.RegisterMessage(Event.PropertiesEntry) - -InferenceOther = _reflection.GeneratedProtocolMessageType('InferenceOther', (_message.Message,), dict( - DESCRIPTOR = _INFERENCEOTHER, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.InferenceOther) - )) -_sym_db.RegisterMessage(InferenceOther) - -Attribute = _reflection.GeneratedProtocolMessageType('Attribute', (_message.Message,), dict( - DESCRIPTOR = _ATTRIBUTE, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Attribute) - )) -_sym_db.RegisterMessage(Attribute) - -Tag = _reflection.GeneratedProtocolMessageType('Tag', (_message.Message,), dict( - DESCRIPTOR = _TAG, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Tag) - )) -_sym_db.RegisterMessage(Tag) - -Rectangle = _reflection.GeneratedProtocolMessageType('Rectangle', (_message.Message,), dict( - DESCRIPTOR = _RECTANGLE, - __module__ = 'inferencing_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Rectangle) - )) -_sym_db.RegisterMessage(Rectangle) - - -_INFERENCE_EXTENSIONSENTRY.has_options = True -_INFERENCE_EXTENSIONSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -_EVENT_PROPERTIESENTRY.has_options = True -_EVENT_PROPERTIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) -# @@protoc_insertion_point(module_scope) diff --git a/samples/ava_ai_extension/common/grpc_autogen/media_pb2.py b/samples/ava_ai_extension/common/grpc_autogen/media_pb2.py deleted file mode 100644 index 85231ff..0000000 --- a/samples/ava_ai_extension/common/grpc_autogen/media_pb2.py +++ /dev/null @@ -1,398 +0,0 @@ -''' -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -''' - -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: media.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='media.proto', - package='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1', - syntax='proto3', - serialized_pb=_b('\n\x0bmedia.proto\x12@microsoft.azure.media.live_video_analytics.extensibility.grpc.v1\"\xba\x01\n\x0fMediaDescriptor\x12\x11\n\ttimescale\x18\x01 \x01(\r\x12}\n\x19video_frame_sample_format\x18\x05 \x01(\x0b\x32X.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormatH\x00\x42\x15\n\x13media_sample_format\"\xce\x04\n\x16VideoFrameSampleFormat\x12s\n\x08\x65ncoding\x18\x01 \x01(\x0e\x32\x61.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormat.Encoding\x12z\n\x0cpixel_format\x18\x02 \x01(\x0e\x32\x64.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormat.PixelFormat\x12`\n\ndimensions\x18\x03 \x01(\x0b\x32L.microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Dimensions\x12\x14\n\x0cstride_bytes\x18\x04 \x01(\r\".\n\x08\x45ncoding\x12\x07\n\x03\x42MP\x10\x00\x12\x07\n\x03JPG\x10\x01\x12\x07\n\x03PNG\x10\x02\x12\x07\n\x03RAW\x10\x03\"\x9a\x01\n\x0bPixelFormat\x12\x08\n\x04NONE\x10\x00\x12\x0b\n\x07YUV420P\x10\x14\x12\x0c\n\x08RGB565BE\x10(\x12\x0c\n\x08RGB565LE\x10)\x12\x0c\n\x08RGB555BE\x10*\x12\x0c\n\x08RGB555LE\x10+\x12\t\n\x05RGB24\x10<\x12\t\n\x05\x42GR24\x10=\x12\x08\n\x04\x41RGB\x10P\x12\x08\n\x04RGBA\x10Q\x12\x08\n\x04\x41\x42GR\x10R\x12\x08\n\x04\x42GRA\x10S\"+\n\nDimensions\x12\r\n\x05width\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\r\"\x1d\n\x0c\x43ontentBytes\x12\r\n\x05\x62ytes\x18\x01 \x01(\x0c\"@\n\x10\x43ontentReference\x12\x16\n\x0e\x61\x64\x64ress_offset\x18\x01 \x01(\x04\x12\x14\n\x0clength_bytes\x18\x02 \x01(\x04\x62\x06proto3') -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_VIDEOFRAMESAMPLEFORMAT_ENCODING = _descriptor.EnumDescriptor( - name='Encoding', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormat.Encoding', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='BMP', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='JPG', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PNG', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RAW', index=3, number=3, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=658, - serialized_end=704, -) -_sym_db.RegisterEnumDescriptor(_VIDEOFRAMESAMPLEFORMAT_ENCODING) - -_VIDEOFRAMESAMPLEFORMAT_PIXELFORMAT = _descriptor.EnumDescriptor( - name='PixelFormat', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormat.PixelFormat', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='NONE', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='YUV420P', index=1, number=20, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RGB565BE', index=2, number=40, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RGB565LE', index=3, number=41, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RGB555BE', index=4, number=42, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RGB555LE', index=5, number=43, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RGB24', index=6, number=60, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BGR24', index=7, number=61, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ARGB', index=8, number=80, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RGBA', index=9, number=81, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ABGR', index=10, number=82, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BGRA', index=11, number=83, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=707, - serialized_end=861, -) -_sym_db.RegisterEnumDescriptor(_VIDEOFRAMESAMPLEFORMAT_PIXELFORMAT) - - -_MEDIADESCRIPTOR = _descriptor.Descriptor( - name='MediaDescriptor', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaDescriptor', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='timescale', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaDescriptor.timescale', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='video_frame_sample_format', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaDescriptor.video_frame_sample_format', index=1, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='media_sample_format', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaDescriptor.media_sample_format', - index=0, containing_type=None, fields=[]), - ], - serialized_start=82, - serialized_end=268, -) - - -_VIDEOFRAMESAMPLEFORMAT = _descriptor.Descriptor( - name='VideoFrameSampleFormat', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormat', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='encoding', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormat.encoding', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pixel_format', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormat.pixel_format', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dimensions', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormat.dimensions', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stride_bytes', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormat.stride_bytes', index=3, - number=4, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _VIDEOFRAMESAMPLEFORMAT_ENCODING, - _VIDEOFRAMESAMPLEFORMAT_PIXELFORMAT, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=271, - serialized_end=861, -) - - -_DIMENSIONS = _descriptor.Descriptor( - name='Dimensions', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Dimensions', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='width', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Dimensions.width', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='height', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Dimensions.height', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=863, - serialized_end=906, -) - - -_CONTENTBYTES = _descriptor.Descriptor( - name='ContentBytes', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.ContentBytes', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='bytes', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.ContentBytes.bytes', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=908, - serialized_end=937, -) - - -_CONTENTREFERENCE = _descriptor.Descriptor( - name='ContentReference', - full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.ContentReference', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='address_offset', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.ContentReference.address_offset', index=0, - number=1, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='length_bytes', full_name='microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.ContentReference.length_bytes', index=1, - number=2, type=4, cpp_type=4, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=939, - serialized_end=1003, -) - -_MEDIADESCRIPTOR.fields_by_name['video_frame_sample_format'].message_type = _VIDEOFRAMESAMPLEFORMAT -_MEDIADESCRIPTOR.oneofs_by_name['media_sample_format'].fields.append( - _MEDIADESCRIPTOR.fields_by_name['video_frame_sample_format']) -_MEDIADESCRIPTOR.fields_by_name['video_frame_sample_format'].containing_oneof = _MEDIADESCRIPTOR.oneofs_by_name['media_sample_format'] -_VIDEOFRAMESAMPLEFORMAT.fields_by_name['encoding'].enum_type = _VIDEOFRAMESAMPLEFORMAT_ENCODING -_VIDEOFRAMESAMPLEFORMAT.fields_by_name['pixel_format'].enum_type = _VIDEOFRAMESAMPLEFORMAT_PIXELFORMAT -_VIDEOFRAMESAMPLEFORMAT.fields_by_name['dimensions'].message_type = _DIMENSIONS -_VIDEOFRAMESAMPLEFORMAT_ENCODING.containing_type = _VIDEOFRAMESAMPLEFORMAT -_VIDEOFRAMESAMPLEFORMAT_PIXELFORMAT.containing_type = _VIDEOFRAMESAMPLEFORMAT -DESCRIPTOR.message_types_by_name['MediaDescriptor'] = _MEDIADESCRIPTOR -DESCRIPTOR.message_types_by_name['VideoFrameSampleFormat'] = _VIDEOFRAMESAMPLEFORMAT -DESCRIPTOR.message_types_by_name['Dimensions'] = _DIMENSIONS -DESCRIPTOR.message_types_by_name['ContentBytes'] = _CONTENTBYTES -DESCRIPTOR.message_types_by_name['ContentReference'] = _CONTENTREFERENCE - -MediaDescriptor = _reflection.GeneratedProtocolMessageType('MediaDescriptor', (_message.Message,), dict( - DESCRIPTOR = _MEDIADESCRIPTOR, - __module__ = 'media_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.MediaDescriptor) - )) -_sym_db.RegisterMessage(MediaDescriptor) - -VideoFrameSampleFormat = _reflection.GeneratedProtocolMessageType('VideoFrameSampleFormat', (_message.Message,), dict( - DESCRIPTOR = _VIDEOFRAMESAMPLEFORMAT, - __module__ = 'media_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.VideoFrameSampleFormat) - )) -_sym_db.RegisterMessage(VideoFrameSampleFormat) - -Dimensions = _reflection.GeneratedProtocolMessageType('Dimensions', (_message.Message,), dict( - DESCRIPTOR = _DIMENSIONS, - __module__ = 'media_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.Dimensions) - )) -_sym_db.RegisterMessage(Dimensions) - -ContentBytes = _reflection.GeneratedProtocolMessageType('ContentBytes', (_message.Message,), dict( - DESCRIPTOR = _CONTENTBYTES, - __module__ = 'media_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.ContentBytes) - )) -_sym_db.RegisterMessage(ContentBytes) - -ContentReference = _reflection.GeneratedProtocolMessageType('ContentReference', (_message.Message,), dict( - DESCRIPTOR = _CONTENTREFERENCE, - __module__ = 'media_pb2' - # @@protoc_insertion_point(class_scope:microsoft.azure.media.live_video_analytics.extensibility.grpc.v1.ContentReference) - )) -_sym_db.RegisterMessage(ContentReference) - - -# @@protoc_insertion_point(module_scope) diff --git a/samples/ava_ai_extension/common/shared_memory.py b/samples/ava_ai_extension/common/shared_memory.py deleted file mode 100644 index f02df09..0000000 --- a/samples/ava_ai_extension/common/shared_memory.py +++ /dev/null @@ -1,151 +0,0 @@ -''' -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -''' - -import tempfile -import mmap -import os -import logging -from .exception_handler import log_exception - -# *********************************************************************************** -# Shared memory management -# -class SharedMemoryManager: - def __init__(self, shm_flags=None, name=None, size=None): - try: - #nosec skips pybandit hits - self.shm_file_path = '/dev/shm' # nosec - self.shm_file_name = name - if self.shm_file_name is None: - self.shm_file_name = next(tempfile._get_candidate_names()) - - self.shm_file_size = size - if self.shm_file_size is None: - self.shm_file_size = 1024 * 1024 * 10 # Bytes (10MB) - - self._shm_file_full_path = os.path.join(self.shm_file_path, - self.shm_file_name) - self._shm_flags = shm_flags - - # See the NOTE section here: https://docs.python.org/2/library/os.html#os.open - # for details on shmFlags - if self._shm_flags is None: - self._shm_file = open(self._shm_file_full_path, 'r+b') - self._shm = mmap.mmap(self._shm_file.fileno(), self.shm_file_size) - else: - self._shm_file = os.open(self._shm_file_full_path, self._shm_flags) - os.ftruncate(self._shm_file, self.shm_file_size) - self._shm = mmap.mmap(self._shm_file, - self.shm_file_size, - mmap.MAP_SHARED, - mmap.PROT_WRITE | mmap.PROT_READ) - - # Dictionary to host reserved mem blocks - # self._mem_slots[sequenceNo] = [Begin, End] (closed interval) - self._mem_slots = dict() - - logging.info('Shared memory name: {0}'.format(self._shm_file_full_path)) - except: - log_exception() - raise - - def read_bytes(self, memory_slot_offset, memory_slot_length): - try: - # This is Non-Zero Copy operation - # self._shm.seek(memorySlotOffset, os.SEEK_SET) - # bytesRead = self._shm.read(memorySlotLength) - # return bytesRead - - #Zero-copy version - return memoryview(self._shm)[memory_slot_offset:memory_slot_offset+memory_slot_length] - - except: - log_exception() - raise - - def write_bytes(self, memory_slot_offset, bytes_to_write): - try: - #Zero-copy version - memoryview(self._shm)[memory_slot_offset: - memory_slot_offset+ - len(bytes_to_write)] = bytes_to_write - - except: - log_exception() - raise - - # Returns None if no availability - # Returns closed interval [Begin, End] address with available slot - def get_empty_slot(self, seq_no, size_needed): - address = None - - if size_needed < 1: - return address - - # Empty memory - if len(self._mem_slots) < 1: - if self.shm_file_size >= size_needed: - self._mem_slots[seq_no] = (0, size_needed - 1) - address = (0, size_needed - 1) - else: - address = None - else: - self._mem_slots = {k: v for k, v in sorted( - self._mem_slots.items(), key=lambda item: item[1])} - - # find an available memory gap = sizeNeeded - prev_slot_end = 0 - for _, memory_slot in self._mem_slots.items(): - if (memory_slot[0] - prev_slot_end - 1) >= size_needed: - address = (prev_slot_end + 1, prev_slot_end + size_needed) - self._mem_slots[seq_no] = (address[0], address[1]) - break - prev_slot_end = memory_slot[1] - - # no gap in between, check last possible gap - if address is None: - if (self.shm_file_size - prev_slot_end + 1) >= size_needed: - address = (prev_slot_end, prev_slot_end + size_needed) - self._mem_slots[seq_no] = (address[0], address[1]) - - # interval [Begin, End] - return address - - def delete_slot(self, seq_no): - try: - del self._mem_slots[seq_no] - return True - except KeyError: - return False - - def __del__(self): - try: - if self._shm_flags is None: - self._shm_file.close() - else: - os.close(self._shm_file) - os.remove(self._shm_file_full_path) - except: - log_exception() - raise diff --git a/samples/ava_ai_extension/contracts/extension.proto b/samples/ava_ai_extension/contracts/extension.proto deleted file mode 100644 index 471f72c..0000000 --- a/samples/ava_ai_extension/contracts/extension.proto +++ /dev/null @@ -1,116 +0,0 @@ -// ----------------------------------------------------------------------- -// -// Copyright (C) Microsoft Corporation. All rights reserved. -// -// ----------------------------------------------------------------------- - -syntax = "proto3"; - -package microsoft.azure.media.live_video_analytics.extensibility.grpc.v1; - -import "inferencing.proto"; -import "media.proto"; - -// -// Media Graph Extension Service -// -// Media graph extension service definition allows graphs to be extended through a -// gRPC server implementation of a graph processor node. -// -service MediaGraphExtension { - // - // Continuously process a single media stream (audio/video) - // - // Note to Implementers: - // Client authentication can be achieved through an authentication token set on the "x-ms-authentication" - // request metadata key. The token format follows standard HTTP Basic auth scheme. Implementers of this - // service are responsible for validating this token. This token may be set through the REST API. - // - rpc ProcessMediaStream(stream MediaStreamMessage) returns (stream MediaStreamMessage); -} - -// -// Media Stream Message -// -// Encapsulates different kinds of payloads that can be exchanged by the client and server. -// -// Key aspects: -// -// 1) MediaStreamDescriptor MUST be the first message exchanged from both parties. -// 2) Messages containing shared memory reference MUST be acknowledged -// a) This ensures release of shared memory when processing sample references. -// b) Acknowledgments can be sent as part of a payload, thus reducing the traffic in half. -// 3) It is encouraged that servers process messages asynchronously and possibly in parallel, -// in order to leverage pipelining and achieve best performance. -// -message MediaStreamMessage { - - uint64 sequence_number = 1; // Monotonically increasing directional message identifier starting from 1 when the gRPC connection is created - uint64 ack_sequence_number = 2; // 0 if this message is not referencing any sent message. - - // Possible payloads are strongly defined by the contract below - oneof payload { - MediaStreamDescriptor media_stream_descriptor = 5; - MediaSample media_sample = 6; - } -} - -// -// Media Stream Descriptor -// -// Describes the media type that will flow in a single direction of the stream. -// -message MediaStreamDescriptor { - - oneof stream_identifier { - GraphIdentifier graph_identifier = 1; // Media Stream graph identifier - string extension_identifier = 2; // Media Stream extension identifier - } - string extension_configuration = 3; // Optional extension configuration string provided by the user - - MediaDescriptor media_descriptor = 5; // Session media information. - - // Additional data transfer properties. If none is set, it is assumed - // that all content will be transferred through messages (embedded transfer). - oneof data_transfer_properties { - SharedMemoryBufferTransferProperties shared_memory_buffer_transfer_properties = 10; - } -} - -// -// Media Sample -// -// Encapsulates a media sample or chunk transfer. -// -message MediaSample { - - // Sample timestamp (PTS) (first sample timestamp in case of chunked samples) - uint64 timestamp = 1; - - // Sample (can be none when sending inferences only) - oneof content { - ContentBytes content_bytes = 5; - ContentReference content_reference = 6; - } - - // Inferences associated with the sample/timestamp - repeated Inference inferences = 10; -} - -// -// Identifies the graph node to the extension AI -// -message GraphIdentifier { - string media_services_arm_id = 1; - string graph_instance_name = 2; - string graph_node_name = 3; -} - -// -// Media sample transfer through single shared buffer allocation -// of shared memory. -// -message SharedMemoryBufferTransferProperties{ - string handle_name = 1; - uint64 length_bytes = 2; -} diff --git a/samples/ava_ai_extension/contracts/inferencing.proto b/samples/ava_ai_extension/contracts/inferencing.proto deleted file mode 100644 index 4d6c70c..0000000 --- a/samples/ava_ai_extension/contracts/inferencing.proto +++ /dev/null @@ -1,128 +0,0 @@ -// ----------------------------------------------------------------------- -// -// Copyright (C) Microsoft Corporation. All rights reserved. -// -// ----------------------------------------------------------------------- - -syntax = "proto3"; - -package microsoft.azure.media.live_video_analytics.extensibility.grpc.v1; - -// -// Wrapper for different inference result types -// -message Inference { - enum InferenceType { - AUTO = 0; // Automatically set by the graph based on content - CLASSIFICATION = 1; // Image tagging/classification - MOTION = 2; // Motion detection - ENTITY = 3; // Entity detection & identification - TEXT = 4; // Timed text - EVENT = 5; // A generic event with key value pairs - OTHER = 15; // Not a match - } - - InferenceType type = 1; - string subtype = 2; // Free form subtype id - string inference_id = 3; // Optional identifier for this inference event - repeated string related_inferences = 4; // Ids of inferences which are related to this inference - - oneof value { // Must match type - Classification classification = 5; - Motion motion = 6; - Entity entity = 7; - Text text = 8; - Event event = 9; - InferenceOther other = 13; - } - - // Complementary data that can be used to augment the original inference. These - // are transmitted opaquely through the pipeline and are meant for application - // consumption only. - map extensions = 15; -} - -// -// Classification -// -message Classification { - Tag tag = 1; // Class tag. Examples: daylight, moonlight, etc. - repeated Attribute attributes = 2; // Additional entity attributes. Examples: isBlackWhite=false -} - -// -// Motion Detection -// -message Motion { - Rectangle box = 1; // Motion bounding box -} - -// -// Entity Detection & Identification -// -message Entity { - Tag tag = 1; // Entity tag. Examples: person, bicycle, car, ... - repeated Attribute attributes = 2; // Additional entity attributes. Examples: color=red, body=sedan, etc. - Rectangle box = 3; // Entity bounding box - string id = 4; // Optional entity id for identification and/or tracking -} - -// -// OCR and Captions -// -message Text { - string value = 1; // Inferred text - string language = 2; // Optional BCP47 Language Code (https://tools.ietf.org/html/bcp47) - uint64 start_timestamp = 5; // Optional start PTS - uint64 end_timestamp = 6; // Optional end PTS -} - -// -// Generic Events -// -message Event { - string name = 1; // Event name - map properties = 7; // Event properties -} - -// -// Generic content to be returned as inference results. -// -message InferenceOther { - string content_type = 1; // Content type (IANA Media Type identifier: https://www.iana.org/assignments/media-types/media-types.xhtml) - bytes content_bytes = 2; // Content Bytes. For textual formats which do not specify an encoding, UTF-8 should be used. -} - -// -// Generic attributes. Attributes are use to augment an entity. -// -message Attribute { - string name = 1; // Attribute name: color, make, model, etc. - string value = 2; // Attribute value: red, honda, civic, etc. - float confidence = 3; // Confidence (normalized between 0.0 and 1.0) -} - -// -// Generic tags. -// -message Tag { - string value = 2; // Tag value - float confidence = 3; // Confidence (normalized between 0.0 and 1.0) -} - -// -// Generic rectangle for region bounding boxes -// -// - Values are normalized between 0.0 and 1.0 as fraction of the input image. -// - Extensions which are receiving padded images (pillarbox or letterbox) should return the bounding boxes -// based on the padded image. The Live Video Analytics pipeline will adjust the bounding boxes based on -// the padding before publishing the inferences. For example, if the image has .2 padding on the left and -// right sides of the image, the rectangle (0.2, 0.0, 0.6, 1.0) will be adjusted to (0.0, 0.0, 1.0, 1.0) -// by Live Video Analytics. -// -message Rectangle { - float l = 1; // Left: distance from the image's left edge to the rectangle's left edge - float t = 2; // Top: distance from the image's top edge to the rectangle's top edge - float w = 3; // Width: rectangle width - float h = 4; // Height: rectangle height -} \ No newline at end of file diff --git a/samples/ava_ai_extension/contracts/media.proto b/samples/ava_ai_extension/contracts/media.proto deleted file mode 100644 index e28e8ea..0000000 --- a/samples/ava_ai_extension/contracts/media.proto +++ /dev/null @@ -1,96 +0,0 @@ -// ----------------------------------------------------------------------- -// -// Copyright (C) Microsoft Corporation. All rights reserved. -// -// ----------------------------------------------------------------------- - -syntax = "proto3"; - -package microsoft.azure.media.live_video_analytics.extensibility.grpc.v1; - -// -// Media information -// -message MediaDescriptor { - uint32 timescale = 1; // Example: 90000 when the media clock tick is 90KHz - - // None indicates that the sample contains only inferences - oneof media_sample_format { - VideoFrameSampleFormat video_frame_sample_format = 5; // Sample is a video frame - } -}; - -// -// Video Frame Format information -// -message VideoFrameSampleFormat { - - // Encoding Type - enum Encoding { - BMP = 0; - JPG = 1; - PNG = 2; - RAW = 3; - }; - Encoding encoding = 1; - - // Pixel Format when transferring RAW samples - enum PixelFormat { - NONE = 0; - - // 12 bpp (native) - YUV420P = 20; - - // 16 bpp - RGB565BE = 40; - RGB565LE = 41; - RGB555BE = 42; - RGB555LE = 43; - - // 24 bpp - RGB24 = 60; - BGR24 = 61; - - // 32 bpp - ARGB = 80; - RGBA = 81; - ABGR = 82; - BGRA = 83; - }; - PixelFormat pixel_format = 2; - - // Frame Dimensions in pixels - Dimensions dimensions = 3; - - // - // The number of bytes from one row of pixels in memory to the next row of pixels in memory. - // If padding bytes are present, than the stride is wider than the width of the image. - // Only set when transferring RAW samples - // - uint32 stride_bytes = 4; -} - -// -// Generic Dimensions message -// -message Dimensions { - uint32 width = 1; - uint32 height = 2; -}; - -// -// Media sample with embedded content -// -message ContentBytes { - bytes bytes = 1; -}; - -// -// Media sample with memory address reference within a pre-established shared -// memory -// -message ContentReference { - uint64 address_offset = 1; // Relative to the beginning of the shared memory file - uint64 length_bytes = 2; -} - diff --git a/samples/ava_ai_extension/docker/Dockerfile b/samples/ava_ai_extension/docker/Dockerfile deleted file mode 100644 index 0fa7c46..0000000 --- a/samples/ava_ai_extension/docker/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -ARG BASE=video-analytics-serving-gstreamer -FROM $BASE - -USER root - -# Dependencies installed via pip -COPY ./requirements.txt / -RUN pip3 install --no-cache-dir -r /requirements.txt -RUN rm -f /requirements.txt - -RUN mkdir -p /home/video-analytics-serving/samples -COPY ./client /home/video-analytics-serving/samples/ava_ai_extension/client -COPY ./common /home/video-analytics-serving/samples/ava_ai_extension/common -COPY ./models /home/video-analytics-serving/samples/ava_ai_extension/models -COPY ./pipelines /home/video-analytics-serving/samples/ava_ai_extension/pipelines -COPY ./sampleframes /home/video-analytics-serving/samples/ava_ai_extension/sampleframes -COPY ./server /home/video-analytics-serving/samples/ava_ai_extension/server - -ENV PYTHONPATH=$PYTHONPATH:/home/video-analytics-serving -ENV PYTHONPATH=$PYTHONPATH:/home/video-analytics-serving/samples/ava_ai_extension/common/grpc_autogen - -USER vaserving - -EXPOSE 5001 - -#ENTRYPOINT [ "/bin/bash" ] -ENTRYPOINT [ "python3", "/home/video-analytics-serving/samples/ava_ai_extension/server"] diff --git a/samples/ava_ai_extension/docker/build.sh b/samples/ava_ai_extension/docker/build.sh deleted file mode 100755 index 0f44a66..0000000 --- a/samples/ava_ai_extension/docker/build.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/bash -e - -WORK_DIR=$(dirname $(readlink -f "$0")) -SAMPLE_DIR=$(dirname $WORK_DIR) -SAMPLE_BUILD_ARGS=$(env | cut -f1 -d= | grep -E '_(proxy|REPO|VER)$' | sed 's/^/--build-arg / ' | tr '\n' ' ') -MODELS="models/models.list.yml" -TAG="video-analytics-serving:0.6.1-dlstreamer-edge-ai-extension" - -#Get options passed into script -function get_options { - while :; do - case $1 in - -h | -\? | --help) - show_help - exit - ;; - --models) - if [ "$2" ]; then - MODELS=$2 - shift - else - error 'ERROR: "--models" requires an argument.' - fi - ;; - *) - break - ;; - esac - - done -} - -function show_help { - echo "usage: ./build.sh" - echo " [ --models : Model list, must be a relative path ] " -} - -function launch { echo $@ - $@ - local exit_code=$? - if [ $exit_code -ne 0 ]; then - echo "ERROR: error with $1" >&2 - exit $exit_code - fi - return $exit_code -} - -get_options "$@" - -# Build VA Serving -launch "$SAMPLE_DIR/../../docker/build.sh --framework gstreamer --create-service false --pipelines samples/ava_ai_extension/pipelines --models $SAMPLE_DIR/$MODELS" - -# Build AI Extention -echo $SAMPLE_DIR/.. -launch "docker build -f $WORK_DIR/Dockerfile $SAMPLE_BUILD_ARGS -t $TAG $SAMPLE_DIR" diff --git a/samples/ava_ai_extension/docker/dlstreamer-edge-ai-extension-third-party-programs.txt b/samples/ava_ai_extension/docker/dlstreamer-edge-ai-extension-third-party-programs.txt deleted file mode 100644 index 634c6fc..0000000 --- a/samples/ava_ai_extension/docker/dlstreamer-edge-ai-extension-third-party-programs.txt +++ /dev/null @@ -1,609 +0,0 @@ -This file contains the list of third party software ("third party programs") contained in the Intel software and their required notices and/or license terms. This third party software, even if included with the distribution of the Intel software, may be governed by separate license terms, including without limitation, third party license terms, other Intel software license terms, and open source software license terms. These separate license terms govern your use of the third party programs as set forth in the "third-party-programs.txt" or other similarly-named text file. - - -Third party programs and their corresponding required notices and/or license terms are listed below. - -------------------------------------------------------------- - - -1. setuptools - Copyright (C) 2016 Jason R Coombs - - pip3 - Copyright (c) 2008-2020 The pip developers (see AUTHORS.txt file) - - jsonschema - Copyright (c) 2013 Julian Berman - - rfc3986-validator - Copyright (c) 2019, Nicolas Aimetti - - rfc3339-validator - Copyright (c) 2019, Nicolas Aimetti - - pyyaml - Copyright (c) 2017-2020 Ingy döt Net - Copyright (c) 2006-2016 Kirill Simonov - - Live Video Analytics - https://azure.microsoft.com/en-us/services/media-services/live-video-analytics/ - Copyright 2020 Microsoft Corporation - Modifications Copyright 2020 Intel Corporation - - VAAPI driver for the Intel GEN8+ Graphics family - Copyright (c) 2017-2021, Intel Corporation - - Intel OpenCL - Copyright (C) 2018-2021 Intel Corporation - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -------------------------------------------------------------- - - -2. grpcio - Copyright 2014 gRPC authors. - - aclnet - https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/aclnet - Copyright (c) 2020 Intel Corporation - - person-vehicle-bike-detection-crossroad-0078 - vehicle-attributes-recognition-barrier-0039 - vehicle-detection-0202 - person-detection-retail-0013 - action-recognition-0001-decoder - action-recognition-0001-encoder - driver-action-recognition-adas-0002-decoder - driver-action-recognition-adas-0002-encoder - https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel - Copyright (c) 2021 Intel Corporation - - Requests - Copyright 2019 Kenneth Reitz - - swagger-ui-bundle - Copyright 2020 SmartBear Software Inc. - - Tornado Web Server - Copyright: 2009-2011 Facebook - - Zalando Connexion - Copyright 2015 Zalando SE - -Apache 2.0 License - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -------------------------------------------------------------- - -3. Python 3 - Copyright © 2001-2020 Python Software Foundation; All Rights - Reserved - - 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and - the Individual or Organization ("Licensee") accessing and otherwise using Python - 3.9.0 software in source or binary form and its associated documentation. - - 2. Subject to the terms and conditions of this License Agreement, PSF hereby - grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, - analyze, test, perform and/or display publicly, prepare derivative works, - distribute, and otherwise use Python 3.9.0 alone or in any derivative - version, provided, however, that PSF's License Agreement and PSF's notice of - copyright, i.e., "Copyright © 2001-2020 Python Software Foundation; All Rights - Reserved" are retained in Python 3.9.0 alone or in any derivative version - prepared by Licensee. - - 3. In the event Licensee prepares a derivative work that is based on or - incorporates Python 3.9.0 or any part thereof, and wants to make the - derivative work available to others as provided herein, then Licensee hereby - agrees to include in any such work a brief summary of the changes made to Python - 3.9.0. - - 4. PSF is making Python 3.9.0 available to Licensee on an "AS IS" basis. - PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF - EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR - WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE - USE OF PYTHON 3.9.0 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. - - 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 3.9.0 - FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF - MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 3.9.0, OR ANY DERIVATIVE - THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - - 6. This License Agreement will automatically terminate upon a material breach of - its terms and conditions. - - 7. Nothing in this License Agreement shall be deemed to create any relationship - of agency, partnership, or joint venture between PSF and Licensee. This License - Agreement does not grant permission to use PSF trademarks or trade name in a - trademark sense to endorse or promote products or services of Licensee, or any - third party. - - 8. By copying, installing or otherwise using Python 3.9.0, Licensee agrees - to be bound by the terms and conditions of this License Agreement. - -------------------------------------------------------------- - -4. Numpy (BSD 3-clause Numpy Customized) - Copyright (c) 2005-2020, NumPy Developers. - All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------- - -5. protobuf (BSD 3-clause Google Customized) - Copyright 2008 Google Inc. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - Code generated by the Protocol Buffer compiler is owned by the owner - of the input file used when generating it. This code is not - standalone and requires a support library to be linked with it. This - support library is itself covered by the above license. - -------------------------------------------------------------- - -6. python-psutil - Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola' - All rights reserved. - -BSD 3-Clause License - -Copyright (c) 2009, Jay Loden, Dave Daeschler, Giampaolo Rodola' -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of the psutil authors nor the names of its contributors - may be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------- - -7. jemalloc - -Unless otherwise specified, files in the jemalloc source distribution are -subject to the following license: - -Copyright (C) 2002-present Jason Evans . -All rights reserved. -Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-present Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice(s), - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice(s), - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------- - -8. Python Paho MQTT Client - -Eclipse Distribution License - v 1.0 -Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------- - -9. python-dateutil - -Copyright 2017- Paul Ganssle -Copyright 2017- dateutil contributors (see AUTHORS file) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -The above license applies to all contributions after 2017-12-01, as well as -all contributions that have been re-licensed (see AUTHORS file for the list of -contributors who have re-licensed their code). -------------------------- -dateutil - Extensions to the standard Python datetime module. - -Copyright (c) 2003-2011 - Gustavo Niemeyer -Copyright (c) 2012-2014 - Tomi Pieviläinen -Copyright (c) 2014-2016 - Yaron de Leeuw -Copyright (c) 2015- - Paul Ganssle -Copyright (c) 2015- - dateutil contributors (see AUTHORS file) - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The above BSD License Applies to all code, even that also covered by Apache 2.0. - -------------------------------------------------------------- -10. Media-Driver - Copyright (c) 2007-2017 Intel Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -Copyright (c) 2010, The WebM Project authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -Neither the name of Google, nor the WebM Project, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. Copyright (c) 2008 Red Hat Inc. Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA Copyright (c) 2007-2008 Intel Corporation Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. Copyright © 2014 NVIDIA Corporation Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright © 2007 Red Hat Inc. Copyright © 2007-2012 Intel Corporation Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. Sun Apr 18 09:35:45 1999 by faith@precisioninsight.com Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -Copyright 2008, Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------- - -* Docker Base Images - - OpenVINO Runtime Base Image - https://hub.docker.com/r/openvino/ubuntu18_runtime - https://hub.docker.com/r/openvino/ubuntu20_runtime - https://github.com/openvinotoolkit/docker_ci - Copyright (C) 2019-2021 Intel Corporation - All rights reserved. - - OpenVINO Data Runtime Base Image - https://hub.docker.com/r/openvino/ubuntu18_data_runtime - https://hub.docker.com/r/openvino/ubuntu20_data_runtime - https://github.com/openvinotoolkit/docker_ci - Copyright (C) 2019-2021 Intel Corporation - All rights reserved. - - OpenVisualCloud Docker Base Image - https://hub.docker.com/r/openvisualcloud/xeone3-ubuntu1804-analytics-gst - https://hub.docker.com/r/openvisualcloud/xeone3-ubuntu1804-analytics-ffmpeg - Copyright (c) 2018,Intel Corporation - All rights reserved. - -This docker installs third party components licensed under various open source licenses. The terms under which those components may be used and distributed can be found with the license document that is provided with those components. Please familiarize yourself with those terms to ensure your distribution of those components complies with the terms of those licenses. - -------------------------------------------------------------- -Other names and brands may be claimed as the property of others. - diff --git a/samples/ava_ai_extension/docker/run_client.sh b/samples/ava_ai_extension/docker/run_client.sh deleted file mode 100755 index 2111013..0000000 --- a/samples/ava_ai_extension/docker/run_client.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/bash - -SERVER_IP=127.0.0.1 -SERVER_PORT=5001 -AVA_ROOT=/home/video-analytics-serving/samples/ava_ai_extension -OUTPUT_FILE_PATH=/tmp/result -INTERACTIVE= -IMAGE=video-analytics-serving:0.6.1-dlstreamer-edge-ai-extension -NAME=${IMAGE//[\:]/_}"_client" -NUMBER_OF_STREAMS=1 -SCRIPT_DIR=$(dirname $(readlink -f "$0")) -SAMPLE_DIR=$(dirname $SCRIPT_DIR) -ROOT_DIR=$(readlink -f "$SCRIPT_DIR/../../..") -MODE= -VOLUME_MOUNT= -ENTRYPOINT_ARGS= - -function show_help { - echo "" - echo "**Run Script**" - echo "" - echo "usage: ./run_client.sh" - echo " [ -it : Run client in interactive mode ] " - echo " [ --output-file-path : Specify the output file path without extension to save inference results to (file must be inside container or in volume mounted path)] " - echo " [ --number-of-streams : Specify number of streams (one client process per stream)] " - echo " [ --dev : Mount local source code] " - echo "" - echo "**Application**" - echo "" - if [ "${MODE}" == "DEV" ]; then - VOLUME_MOUNT+="-v $SAMPLE_DIR:$AVA_ROOT " - fi - RUN_COMMAND="'python3 ${AVA_ROOT}/client --help'" - "$ROOT_DIR/docker/run.sh" --name $NAME --image $IMAGE $VOLUME_MOUNT --entrypoint "/bin/bash" --entrypoint-args "-c" --entrypoint-args "$RUN_COMMAND" - } - -function error { - printf '%s\n' "$1" >&2 - exit -} - -while [[ "$#" -gt 0 ]]; do - case $1 in - -h | -\? | --help) - show_help - exit - ;; - -it) - INTERACTIVE="-it" - ;; - --output-file-path) - if [ "$2" ]; then - OUTPUT_FILE_PATH=$2 - shift - else - error "--output-file-path expects a value" - fi - ;; - --number-of-streams) - if [ "$2" ]; then - NUMBER_OF_STREAMS=$2 - shift - else - error "--number-of-streams expects a value" - fi - ;; - --dev) - MODE=DEV - ;; - *) - ENTRYPOINT_ARGS+=" '$1' " - ;; - esac - - shift -done - -VOLUME_MOUNT+="-v /tmp:/tmp " -VOLUME_MOUNT+="-v /dev/shm:/dev/shm " -PIDS= -CONTAINERS= - -if [ "${MODE}" == "DEV" ]; then - VOLUME_MOUNT+="-v $SAMPLE_DIR:$AVA_ROOT " -fi - -function clean_up { - kill -9 $PIDS - docker kill $CONTAINERS - exit -} - -if [ "$NUMBER_OF_STREAMS" -gt "1" ]; then - trap clean_up SIGHUP SIGINT SIGTERM - for i in $(seq "$NUMBER_OF_STREAMS") - do - echo "Starting Client $i Results to ${OUTPUT_FILE_PATH}_client_$i.jsonl, Output to: client_${i}.stdout.txt" - RUN_COMMAND='"'" python3 $AVA_ROOT/client $ENTRYPOINT_ARGS -o ${OUTPUT_FILE_PATH}_client_$i.jsonl "'"' - "$ROOT_DIR/docker/run.sh" --non-interactive --name "${NAME}_${i}" --network host --image $IMAGE $VOLUME_MOUNT --entrypoint "/bin/bash" --entrypoint-args "-c" --entrypoint-args "$RUN_COMMAND" >client_${i}.stdout.txt 2>&1 & - PIDS+=" $!" - CONTAINERS+=" ${NAME}_${i}" - sleep 1 - done - echo "waiting for clients to finish" - wait -else - RUN_COMMAND='"'" python3 $AVA_ROOT/client $ENTRYPOINT_ARGS -o ${OUTPUT_FILE_PATH}.jsonl "'"' - "$ROOT_DIR/docker/run.sh" --name $NAME --network host --image $IMAGE $VOLUME_MOUNT --entrypoint "/bin/bash" --entrypoint-args "-c" --entrypoint-args "$RUN_COMMAND" -fi diff --git a/samples/ava_ai_extension/docker/run_server.sh b/samples/ava_ai_extension/docker/run_server.sh deleted file mode 100755 index f64e17f..0000000 --- a/samples/ava_ai_extension/docker/run_server.sh +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash - -CURRENT_DIR=$(dirname $(readlink -f "$0")) -ROOT_DIR=$(readlink -f "$CURRENT_DIR/../../..") -AVA_DIR=$(dirname $CURRENT_DIR) -AVA_ROOT=/home/video-analytics-serving/samples/ava_ai_extension -IMAGE=video-analytics-serving:0.6.1-dlstreamer-edge-ai-extension -VASERVING_ROOT=/home/video-analytics-serving -NAME=${IMAGE//[\:]/_} -PORT=5001 -PIPELINES= -ENTRYPOINT_ARGS= -MODE= -VOLUME_MOUNT= -RTSP_ARGS= - -function show_help { - echo "" - echo "**Run Script**" - echo "" - echo "usage: ./run_server.sh" - echo " [ -p : Specify the port to use ] " - echo " [ --dev : Mount local source code] " - echo " [ --enable-rtsp : To enable rtsp re-streaming ] " - echo "" - echo "**Application**" - echo "" - if [ "${MODE}" == "DEV" ]; then - VOLUME_MOUNT+="-v $AVA_DIR:$AVA_ROOT " - VOLUME_MOUNT+="-v $ROOT_DIR:$VASERVING_ROOT " - PIPELINES="--pipelines $AVA_DIR/pipelines " - fi - ENTRYPOINT_ARGS+="--entrypoint-args --help " - "$ROOT_DIR/docker/run.sh" -p $PORT:$PORT --image $IMAGE $VOLUME_MOUNT $ENTRYPOINT_ARGS $PIPELINES -} - -function error { - printf '%s\n' "$1" >&2 - exit -} - -while [[ "$#" -gt 0 ]]; do - case $1 in - -h | -\? | --help) - show_help - exit - ;; - -p) - if [ "$2" ]; then - PORT=$2 - shift - else - error "-p expects a value" - fi - ;; - --dev) - PIPELINES="--pipelines $AVA_DIR/pipelines " - MODE="DEV" - ;; - --enable-rtsp) - RTSP_ARGS="--enable-rtsp" - ;; - *) - ENTRYPOINT_ARGS+="--entrypoint-args '$1' " - ;; - esac - - shift -done - -ENV= - -if [ ! -z "$PIPELINE_NAME" ]; then - ENV+="-e PIPELINE_NAME=$PIPELINE_NAME " -fi - -if [ ! -z "$PIPELINE_VERSION" ]; then - ENV+="-e PIPELINE_VERSION=$PIPELINE_VERSION " -fi - -if [ ! -z "$DEBUG_PIPELINE" ]; then - ENV+="-e DEBUG_PIPELINE=$DEBUG_PIPELINE " -fi - -if [ ! -z "$GST_DEBUG" ]; then - ENV+="-e GST_DEBUG=$GST_DEBUG " -fi - -VOLUME_MOUNT+="-v /tmp:/tmp " -VOLUME_MOUNT+="-v /dev/shm:/dev/shm " - -if [ "${MODE}" == "DEV" ]; then - VOLUME_MOUNT+="-v $AVA_DIR:$AVA_ROOT " - VOLUME_MOUNT+="-v $ROOT_DIR:$VASERVING_ROOT " -fi - -"$ROOT_DIR/docker/run.sh" --image $IMAGE $VOLUME_MOUNT -p $PORT:$PORT $RTSP_ARGS $ENTRYPOINT_ARGS $PIPELINES $ENV diff --git a/samples/ava_ai_extension/models/action-recognition-0001.json b/samples/ava_ai_extension/models/action-recognition-0001.json deleted file mode 100644 index 8fdac5d..0000000 --- a/samples/ava_ai_extension/models/action-recognition-0001.json +++ /dev/null @@ -1,424 +0,0 @@ -{ - "json_schema_version": "2.0.0", - "input_preproc": [ - { - "format": "image", - "layer_name": "0", - "params": { - "color_space": "BGR", - "resize": "aspect-ratio", - "crop": "central" - } - } - ], - "output_postproc": [ - { - "attribute_name": "action", - "layer_name": "data", - "converter": "tensor_to_label", - "method": "softmax", - "labels": [ - "abseiling", - "air drumming", - "answering questions", - "applauding", - "applying cream", - "archery", - "arm wrestling", - "arranging flowers", - "assembling computer", - "auctioning", - "baby waking up", - "baking cookies", - "balloon blowing", - "bandaging", - "barbequing", - "bartending", - "beatboxing", - "bee keeping", - "belly dancing", - "bench pressing", - "bending back", - "bending metal", - "biking through snow", - "blasting sand", - "blowing glass", - "blowing leaves", - "blowing nose", - "blowing out candles", - "bobsledding", - "bookbinding", - "bouncing on trampoline", - "bowling", - "braiding hair", - "breading or breadcrumbing", - "breakdancing", - "brush painting", - "brushing hair", - "brushing teeth", - "building cabinet", - "building shed", - "bungee jumping", - "busking", - "canoeing or kayaking", - "capoeira", - "carrying baby", - "cartwheeling", - "carving pumpkin", - "catching fish", - "catching or throwing baseball", - "catching or throwing frisbee", - "catching or throwing softball", - "celebrating", - "changing oil", - "changing wheel", - "checking tires", - "cheerleading", - "chopping wood", - "clapping", - "clay pottery making", - "clean and jerk", - "cleaning floor", - "cleaning gutters", - "cleaning pool", - "cleaning shoes", - "cleaning toilet", - "cleaning windows", - "climbing a rope", - "climbing ladder", - "climbing tree", - "contact juggling", - "cooking chicken", - "cooking egg", - "cooking on campfire", - "cooking sausages", - "counting money", - "country line dancing", - "cracking neck", - "crawling baby", - "crossing river", - "crying", - "curling hair", - "cutting nails", - "cutting pineapple", - "cutting watermelon", - "dancing ballet", - "dancing charleston", - "dancing gangnam style", - "dancing macarena", - "deadlifting", - "decorating the christmas tree", - "digging", - "dining", - "disc golfing", - "diving cliff", - "dodgeball", - "doing aerobics", - "doing laundry", - "doing nails", - "drawing", - "dribbling basketball", - "drinking", - "drinking beer", - "drinking shots", - "driving car", - "driving tractor", - "drop kicking", - "drumming fingers", - "dunking basketball", - "dying hair", - "eating burger", - "eating cake", - "eating carrots", - "eating chips", - "eating doughnuts", - "eating hotdog", - "eating ice cream", - "eating spaghetti", - "eating watermelon", - "egg hunting", - "exercising arm", - "exercising with an exercise ball", - "extinguishing fire", - "faceplanting", - "feeding birds", - "feeding fish", - "feeding goats", - "filling eyebrows", - "finger snapping", - "fixing hair", - "flipping pancake", - "flying kite", - "folding clothes", - "folding napkins", - "folding paper", - "front raises", - "frying vegetables", - "garbage collecting", - "gargling", - "getting a haircut", - "getting a tattoo", - "giving or receiving award", - "golf chipping", - "golf driving", - "golf putting", - "grinding meat", - "grooming dog", - "grooming horse", - "gymnastics tumbling", - "hammer throw", - "headbanging", - "headbutting", - "high jump", - "high kick", - "hitting baseball", - "hockey stop", - "holding snake", - "hopscotch", - "hoverboarding", - "hugging", - "hula hooping", - "hurdling", - "hurling (sport)", - "ice climbing", - "ice fishing", - "ice skating", - "ironing", - "javelin throw", - "jetskiing", - "jogging", - "juggling balls", - "juggling fire", - "juggling soccer ball", - "jumping into pool", - "jumpstyle dancing", - "kicking field goal", - "kicking soccer ball", - "kissing", - "kitesurfing", - "knitting", - "krumping", - "laughing", - "laying bricks", - "long jump", - "lunge", - "making a cake", - "making a sandwich", - "making bed", - "making jewelry", - "making pizza", - "making snowman", - "making sushi", - "making tea", - "marching", - "massaging back", - "massaging feet", - "massaging legs", - "massaging person's head", - "milking cow", - "mopping floor", - "motorcycling", - "moving furniture", - "mowing lawn", - "news anchoring", - "opening bottle", - "opening present", - "paragliding", - "parasailing", - "parkour", - "passing American football (in game)", - "passing American football (not in game)", - "peeling apples", - "peeling potatoes", - "petting animal (not cat)", - "petting cat", - "picking fruit", - "planting trees", - "plastering", - "playing accordion", - "playing badminton", - "playing bagpipes", - "playing basketball", - "playing bass guitar", - "playing cards", - "playing cello", - "playing chess", - "playing clarinet", - "playing controller", - "playing cricket", - "playing cymbals", - "playing didgeridoo", - "playing drums", - "playing flute", - "playing guitar", - "playing harmonica", - "playing harp", - "playing ice hockey", - "playing keyboard", - "playing kickball", - "playing monopoly", - "playing organ", - "playing paintball", - "playing piano", - "playing poker", - "playing recorder", - "playing saxophone", - "playing squash or racquetball", - "playing tennis", - "playing trombone", - "playing trumpet", - "playing ukulele", - "playing violin", - "playing volleyball", - "playing xylophone", - "pole vault", - "presenting weather forecast", - "pull ups", - "pumping fist", - "pumping gas", - "punching bag", - "punching person (boxing)", - "push up", - "pushing car", - "pushing cart", - "pushing wheelchair", - "reading book", - "reading newspaper", - "recording music", - "riding a bike", - "riding camel", - "riding elephant", - "riding mechanical bull", - "riding mountain bike", - "riding mule", - "riding or walking with horse", - "riding scooter", - "riding unicycle", - "ripping paper", - "robot dancing", - "rock climbing", - "rock scissors paper", - "roller skating", - "running on treadmill", - "sailing", - "salsa dancing", - "sanding floor", - "scrambling eggs", - "scuba diving", - "setting table", - "shaking hands", - "shaking head", - "sharpening knives", - "sharpening pencil", - "shaving head", - "shaving legs", - "shearing sheep", - "shining shoes", - "shooting basketball", - "shooting goal (soccer)", - "shot put", - "shoveling snow", - "shredding paper", - "shuffling cards", - "side kick", - "sign language interpreting", - "singing", - "situp", - "skateboarding", - "ski jumping", - "skiing (not slalom or crosscountry)", - "skiing crosscountry", - "skiing slalom", - "skipping rope", - "skydiving", - "slacklining", - "slapping", - "sled dog racing", - "smoking", - "smoking hookah", - "snatch weight lifting", - "sneezing", - "sniffing", - "snorkeling", - "snowboarding", - "snowkiting", - "snowmobiling", - "somersaulting", - "spinning poi", - "spray painting", - "spraying", - "springboard diving", - "squat", - "sticking tongue out", - "stomping grapes", - "stretching arm", - "stretching leg", - "strumming guitar", - "surfing crowd", - "surfing water", - "sweeping floor", - "swimming backstroke", - "swimming breast stroke", - "swimming butterfly stroke", - "swing dancing", - "swinging legs", - "swinging on something", - "sword fighting", - "tai chi", - "taking a shower", - "tango dancing", - "tap dancing", - "tapping guitar", - "tapping pen", - "tasting beer", - "tasting food", - "testifying", - "texting", - "throwing axe", - "throwing ball", - "throwing discus", - "tickling", - "tobogganing", - "tossing coin", - "tossing salad", - "training dog", - "trapezing", - "trimming or shaving beard", - "trimming trees", - "triple jump", - "tying bow tie", - "tying knot (not on a tie)", - "tying tie", - "unboxing", - "unloading truck", - "using computer", - "using remote controller (not gaming)", - "using segway", - "vault", - "waiting in line", - "walking the dog", - "washing dishes", - "washing feet", - "washing hair", - "washing hands", - "water skiing", - "water sliding", - "watering plants", - "waxing back", - "waxing chest", - "waxing eyebrows", - "waxing legs", - "weaving basket", - "welding", - "whistling", - "windsurfing", - "wrapping present", - "wrestling", - "writing", - "yawning", - "yoga", - "zumba" - ] - } - ] -} diff --git a/samples/ava_ai_extension/models/models.list.yml b/samples/ava_ai_extension/models/models.list.yml deleted file mode 100644 index a6928f1..0000000 --- a/samples/ava_ai_extension/models/models.list.yml +++ /dev/null @@ -1,28 +0,0 @@ -- model: person-vehicle-bike-detection-crossroad-0078 - alias: person_vehicle_bike_detection - version: 1 - precision: [FP16,FP32] -- model: vehicle-attributes-recognition-barrier-0039 - alias: vehicle_attributes_recognition - version: 1 - precision: [FP16,FP32] -- model: person-detection-retail-0013 - alias: object_detection - version: person - precision: [FP16,FP32] - model-proc: person-detection-retail-0013.json -- model: vehicle-detection-0202 - alias: object_detection - version: vehicle - precision: [FP16,FP32] - model-proc: vehicle-detection-0202.json -- model: action-recognition-0001-decoder - alias: action_recognition - version: decoder - precision: [FP16,FP32] - model-proc: action-recognition-0001.json -- model: action-recognition-0001-encoder - alias: action_recognition - version: encoder - precision: [FP16,FP32] - diff --git a/samples/ava_ai_extension/models/person-detection-retail-0013.json b/samples/ava_ai_extension/models/person-detection-retail-0013.json deleted file mode 100644 index c2ce13e..0000000 --- a/samples/ava_ai_extension/models/person-detection-retail-0013.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "json_schema_version": "2.0.0", - "input_preproc": [], - "output_postproc": [ - { - "labels": [ - "", - "person" - ], - "converter": "tensor_to_bbox_ssd" - } - ] -} diff --git a/samples/ava_ai_extension/models/vehicle-detection-0202.json b/samples/ava_ai_extension/models/vehicle-detection-0202.json deleted file mode 100644 index 0090b89..0000000 --- a/samples/ava_ai_extension/models/vehicle-detection-0202.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "json_schema_version": "2.0.0", - "input_preproc": [], - "output_postproc": [ - { - "labels": [ - "vehicle" - ] - } - ] -} diff --git a/samples/ava_ai_extension/pipeline_diagrams/action-recognition.png b/samples/ava_ai_extension/pipeline_diagrams/action-recognition.png deleted file mode 100644 index 8321651..0000000 Binary files a/samples/ava_ai_extension/pipeline_diagrams/action-recognition.png and /dev/null differ diff --git a/samples/ava_ai_extension/pipeline_diagrams/line-crossing.png b/samples/ava_ai_extension/pipeline_diagrams/line-crossing.png deleted file mode 100644 index 6943fc7..0000000 Binary files a/samples/ava_ai_extension/pipeline_diagrams/line-crossing.png and /dev/null differ diff --git a/samples/ava_ai_extension/pipeline_diagrams/object-classification.png b/samples/ava_ai_extension/pipeline_diagrams/object-classification.png deleted file mode 100644 index 19d821c..0000000 Binary files a/samples/ava_ai_extension/pipeline_diagrams/object-classification.png and /dev/null differ diff --git a/samples/ava_ai_extension/pipeline_diagrams/object-detection.png b/samples/ava_ai_extension/pipeline_diagrams/object-detection.png deleted file mode 100644 index 72b716a..0000000 Binary files a/samples/ava_ai_extension/pipeline_diagrams/object-detection.png and /dev/null differ diff --git a/samples/ava_ai_extension/pipeline_diagrams/object-tracking.png b/samples/ava_ai_extension/pipeline_diagrams/object-tracking.png deleted file mode 100644 index 0bcd0f5..0000000 Binary files a/samples/ava_ai_extension/pipeline_diagrams/object-tracking.png and /dev/null differ diff --git a/samples/ava_ai_extension/pipeline_diagrams/pipeline_diagrams.mermaid b/samples/ava_ai_extension/pipeline_diagrams/pipeline_diagrams.mermaid deleted file mode 100644 index b2ef2aa..0000000 --- a/samples/ava_ai_extension/pipeline_diagrams/pipeline_diagrams.mermaid +++ /dev/null @@ -1,72 +0,0 @@ -## AVA Pipeline Diagrams -### Object Detection -```mermaid -graph LR - frame-->appsrc - subgraph "Object Detection" - appsrc-->gvadetect - gvadetect-->appsink - end - appsink-->meta_data -``` - -### Object Classification -```mermaid -graph LR - frame-->appsrc - subgraph "Object Classification" - appsrc-->gvadetect - gvadetect-->gvaclassify - gvaclassify-->appsink - end - appsink-->meta_data -``` - -### Object Tracking -```mermaid -graph LR - frame-->appsrc - subgraph "Object Tracking" - appsrc-->gvadetect - gvadetect-->gvatrack - gvatrack-->gvaclassify - gvaclassify-->appsink - end - appsink-->meta_data -``` - -### Action Recognition -```mermaid -graph LR - frame-->appsrc - subgraph "Action Recognition" - appsrc-->gvaactionrecognitionbin - gvaactionrecognitionbin-->appsink - end - appsink-->meta_data -``` - -### Line Crossing -```mermaid -graph LR - frame-->appsrc - subgraph "Line Crossing (gvapython element generates line crossing events)" - appsrc-->gvadetect - gvadetect-->gvatrack - gvatrack-->gvaclassify - gvaclassify-->gvapython - gvapython-->appsink - end - appsink-->meta_data``` - -### Zone Event -```mermaid -graph LR - frame-->appsrc - subgraph "Zone Event (gvapython element generates 'in zone' detection events)" - appsrc-->gvadetect - gvadetect-->gvapython - gvapython-->appsink - end - appsink-->meta_data -``` diff --git a/samples/ava_ai_extension/pipeline_diagrams/zone-detection.png b/samples/ava_ai_extension/pipeline_diagrams/zone-detection.png deleted file mode 100644 index 8de0c77..0000000 Binary files a/samples/ava_ai_extension/pipeline_diagrams/zone-detection.png and /dev/null differ diff --git a/samples/ava_ai_extension/pipelines/action_recognition/general/pipeline.json b/samples/ava_ai_extension/pipelines/action_recognition/general/pipeline.json deleted file mode 100644 index 33b2f88..0000000 --- a/samples/ava_ai_extension/pipelines/action_recognition/general/pipeline.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source ! videoconvert ! video/x-raw,format=BGRx", - " ! gvaactionrecognitionbin enc-model={models[action_recognition][encoder][network]} dec-model={models[action_recognition][decoder][network]} model-proc={models[action_recognition][decoder][proc]} name=action_recognition", - " ! appsink name=destination"], - "description": "General action recognition based on action-recognition-0001", - "parameters": { - "type": "object", - "properties": { - "dec-device": { - "element": "action_recognition", - "description": "Decoder inference device: [CPU, GPU]", - "type": "string", - "default": "CPU" - }, - "enc-device": { - "element": "action_recognition", - "description": "Encoder inference device: [CPU, GPU]", - "type": "string", - "default": "CPU" - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/object_classification/debug_vehicle_attributes_recognition/pipeline.json b/samples/ava_ai_extension/pipelines/object_classification/debug_vehicle_attributes_recognition/pipeline.json deleted file mode 100755 index 9ffd57b..0000000 --- a/samples/ava_ai_extension/pipelines/object_classification/debug_vehicle_attributes_recognition/pipeline.json +++ /dev/null @@ -1,171 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source", - " ! gvadetect model={models[person_vehicle_bike_detection][1][network]} name=detection", - " ! gvaclassify model={models[vehicle_attributes_recognition][1][network]} name=classification", - " ! tee name = t ! queue ! gvawatermark ! videoconvert ! jpegenc", - " ! multifilesink name=filesink t. ! queue", - " ! appsink name=destination"], - "description": "Object Classification using gstreamer gvadetect & gvaclassify elements, check more about gvadetect at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvadetect and gvaclassify at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvaclassify", - "parameters": { - "type": "object", - "properties": { - "detection-device": { - "element": { - "name": "detection", - "property": "device" - }, - "type": "string", - "default": "CPU" - }, - "classification-device": { - "element": { - "name": "classification", - "property": "device" - }, - "type": "string", - "default": "CPU" - }, - "batch-size": { - "element": - [ {"name":"detection", - "property":"batch-size"}, - {"name":"classification", - "property":"batch-size"} - ], - "type": "integer" - }, - "inference-interval": { - "element": - [ {"name":"detection", - "property":"inference-interval"}, - {"name":"classification", - "property":"inference-interval"} - ], - "type": "integer", - "minimum": 1, - "maximum": 4294967295 - }, - "device-extensions": { - "element": - [ {"name":"detection", - "property":"device-extensions"}, - {"name":"classification", - "property":"device-extensions"} - ], - "type": "string" - }, - "cpu-throughput-streams": { - "element": - [ {"name":"detection", - "property":"cpu-throughput-streams"}, - {"name":"classification", - "property":"cpu-throughput-streams"} - ], - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "gpu-throughput-streams": { - "element": - [ {"name":"detection", - "property":"gpu-throughput-streams"}, - {"name":"classification", - "property":"gpu-throughput-streams"} - ], - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "ie-config": { - "element": - [ {"name":"detection", - "property":"ie-config"}, - {"name":"classification", - "property":"ie-config"} - ], - "type": "string" - }, - "detection-model-instance-id": { - "element": { - "name": "detection", - "property": "model-instance-id" - }, - "type": "string" - }, - "classification-model-instance-id": { - "element": { - "name": "classification", - "property": "model-instance-id" - }, - "type": "string" - }, - "pre-process-backend": { - "element": - [ {"name":"detection", - "property":"pre-process-backend"}, - {"name":"classification", - "property":"pre-process-backend"} - ], - "type": "string" - }, - "nireq": { - "element": - [ {"name":"detection", - "property":"nireq"}, - {"name":"classification", - "property":"nireq"} - ], - "type": "integer", - "minimum": 1, - "maximum": 64 - }, - "reshape": { - "element": - [ {"name":"detection", - "property":"reshape"}, - {"name":"classification", - "property":"reshape"} - ], - "type": "boolean" - }, - "reshape-height": { - "element": - [ {"name":"detection", - "property":"reshape-height"}, - {"name":"classification", - "property":"reshape-height"} - ], - "type": "integer" - }, - "reshape-width": { - "element": - [ {"name":"detection", - "property":"reshape-width"}, - {"name":"classification", - "property":"reshape-width"} - ], - "type": "integer" - }, - "object-class": { - "element": "classification", - "type": "string", - "default": "vehicle" - }, - "reclassify-interval": { - "element": "classification", - "type": "integer" - }, - "location": { - "element": "filesink", - "type":"string", - "default":"/tmp/frame_%07d.jpeg" - }, - "max-files": { - "element": "filesink", - "type":"integer", - "default": 10 - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/object_classification/vehicle_attributes_recognition/pipeline.json b/samples/ava_ai_extension/pipelines/object_classification/vehicle_attributes_recognition/pipeline.json deleted file mode 100755 index 38b0956..0000000 --- a/samples/ava_ai_extension/pipelines/object_classification/vehicle_attributes_recognition/pipeline.json +++ /dev/null @@ -1,159 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source", - " ! gvadetect model={models[person_vehicle_bike_detection][1][network]} name=detection", - " ! gvaclassify model={models[vehicle_attributes_recognition][1][network]} name=classification", - " ! appsink name=destination"], - "description": "Object Classification using gstreamer gvadetect & gvaclassify elements, check more about gvadetect at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvadetect and gvaclassify at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvaclassify", - "parameters": { - "type": "object", - "properties": { - "detection-device": { - "element": { - "name": "detection", - "property": "device" - }, - "type": "string", - "default": "CPU" - }, - "classification-device": { - "element": { - "name": "classification", - "property": "device" - }, - "type": "string", - "default": "CPU" - }, - "batch-size": { - "element": - [ {"name":"detection", - "property":"batch-size"}, - {"name":"classification", - "property":"batch-size"} - ], - "type": "integer" - }, - "inference-interval": { - "element": - [ {"name":"detection", - "property":"inference-interval"}, - {"name":"classification", - "property":"inference-interval"} - ], - "type": "integer", - "minimum": 1, - "maximum": 4294967295 - }, - "device-extensions": { - "element": - [ {"name":"detection", - "property":"device-extensions"}, - {"name":"classification", - "property":"device-extensions"} - ], - "type": "string" - }, - "cpu-throughput-streams": { - "element": - [ {"name":"detection", - "property":"cpu-throughput-streams"}, - {"name":"classification", - "property":"cpu-throughput-streams"} - ], - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "gpu-throughput-streams": { - "element": - [ {"name":"detection", - "property":"gpu-throughput-streams"}, - {"name":"classification", - "property":"gpu-throughput-streams"} - ], - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "ie-config": { - "element": - [ {"name":"detection", - "property":"ie-config"}, - {"name":"classification", - "property":"ie-config"} - ], - "type": "string" - }, - "detection-model-instance-id": { - "element": { - "name": "detection", - "property": "model-instance-id" - }, - "type": "string" - }, - "classification-model-instance-id": { - "element": { - "name": "classification", - "property": "model-instance-id" - }, - "type": "string" - }, - "pre-process-backend": { - "element": - [ {"name":"detection", - "property":"pre-process-backend"}, - {"name":"classification", - "property":"pre-process-backend"} - ], - "type": "string" - }, - "nireq": { - "element": - [ {"name":"detection", - "property":"nireq"}, - {"name":"classification", - "property":"nireq"} - ], - "type": "integer", - "minimum": 1, - "maximum": 64 - }, - "reshape": { - "element": - [ {"name":"detection", - "property":"reshape"}, - {"name":"classification", - "property":"reshape"} - ], - "type": "boolean" - }, - "reshape-height": { - "element": - [ {"name":"detection", - "property":"reshape-height"}, - {"name":"classification", - "property":"reshape-height"} - ], - "type": "integer" - }, - "reshape-width": { - "element": - [ {"name":"detection", - "property":"reshape-width"}, - {"name":"classification", - "property":"reshape-width"} - ], - "type": "integer" - }, - "object-class": { - "element": "classification", - "type": "string", - "default": "vehicle" - }, - "reclassify-interval": { - "element": "classification", - "type": "integer" - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/object_detection/debug_person_vehicle_bike_detection/pipeline.json b/samples/ava_ai_extension/pipelines/object_detection/debug_person_vehicle_bike_detection/pipeline.json deleted file mode 100755 index a73a9f9..0000000 --- a/samples/ava_ai_extension/pipelines/object_detection/debug_person_vehicle_bike_detection/pipeline.json +++ /dev/null @@ -1,95 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source", - " ! gvadetect model={models[person_vehicle_bike_detection][1][network]} name=detection", - " ! tee name = t ! queue ! gvawatermark ! videoconvert ! jpegenc", - " ! multifilesink name=filesink t. ! queue", - " ! appsink name=destination"], - "description": "Object Detection using gstreamer gvadetect element, check more about gvadetect at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvadetect", - "parameters": { - "type": "object", - "properties": { - "detection-device": { - "element": { - "name": "detection", - "property": "device" - }, - "type": "string", - "default": "CPU" - }, - "detection-model-instance-id": { - "element": { - "name": "detection", - "property": "model-instance-id" - }, - "type": "string" - }, - "batch-size": { - "element": "detection", - "type": "integer" - }, - "inference-interval": { - "element": "detection", - "type": "integer", - "minimum": 1, - "maximum": 4294967295 - }, - "device-extensions": { - "element": "detection", - "type": "string" - }, - "cpu-throughput-streams": { - "element": "detection", - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "gpu-throughput-streams": { - "element": "detection", - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "ie-config": { - "element": "detection", - "type": "string" - }, - "pre-process-backend": { - "element": "detection", - "type": "string" - }, - "nireq": { - "element": "detection", - "type": "integer", - "minimum": 1, - "maximum": 64 - }, - "reshape": { - "element": "detection", - "type": "boolean" - }, - "reshape-height": { - "element": "detection", - "type": "integer" - }, - "reshape-width": { - "element": "detection", - "type": "integer" - }, - "threshold": { - "element": "detection", - "type": "number" - }, - "location": { - "element": "filesink", - "type":"string", - "default":"/tmp/frame_%07d.jpeg" - }, - "max-files": { - "element": "filesink", - "type":"integer", - "default": 10 - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/object_detection/object_zone_count/pipeline.json b/samples/ava_ai_extension/pipelines/object_detection/object_zone_count/pipeline.json deleted file mode 100644 index b732624..0000000 --- a/samples/ava_ai_extension/pipelines/object_detection/object_zone_count/pipeline.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source", - " ! gvadetect model={models[person_vehicle_bike_detection][1][network]} name=detection", - " ! gvapython name=object-zone-count class=ObjectZoneCount module=/home/video-analytics-serving/extensions/spatial_analytics/object_zone_count.py", - " ! appsink name=destination"], - "description": "Object Detection using gstreamer gvapython element to transmit Spatial Analytics events", - "parameters": { - "type": "object", - "properties": { - "detection-device": { - "element": { - "name": "detection", - "property": "device" - }, - "type": "string" - }, - "detection-model-instance-id": { - "element": { - "name": "detection", - "property": "model-instance-id" - }, - "type": "string" - }, - "inference-interval": { - "element": "detection", - "type": "integer" - }, - "threshold": { - "element": "detection", - "type": "number" - }, - "object-zone-count-config": { - "element": { - "name": "object-zone-count", - "property": "kwarg", - "format": "json" - }, - "type": "object", - "properties": { - "zones": { - "type": "array", - "items": { - "type": "object" - } - }, - "enable_watermark": { - "type": "boolean" - }, - "log_level": { - "type": "string" - } - } - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_person/pipeline.json b/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_person/pipeline.json deleted file mode 100644 index 731cffe..0000000 --- a/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_person/pipeline.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source", - " ! gvadetect model={models[object_detection][person][network]} name=detection", - " ! gvapython name=object-zone-count class=ObjectZoneCount module=/home/video-analytics-serving/extensions/spatial_analytics/object_zone_count.py", - " ! appsink name=destination"], - "description": "Object Detection using gstreamer gvapython element to transmit Spatial Analytics events", - "parameters": { - "type": "object", - "properties": { - "detection-device": { - "element": { - "name": "detection", - "property": "device" - }, - "type": "string" - }, - "detection-model-instance-id": { - "element": { - "name": "detection", - "property": "model-instance-id" - }, - "type": "string" - }, - "inference-interval": { - "element": "detection", - "type": "integer" - }, - "threshold": { - "element": "detection", - "type": "number" - }, - "object-zone-count-config": { - "element": { - "name": "object-zone-count", - "property": "kwarg", - "format": "json" - }, - "type": "object", - "properties": { - "zones": { - "type": "array", - "items": { - "type": "object" - } - }, - "enable_watermark": { - "type": "boolean" - }, - "log_level": { - "type": "string" - } - } - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_vehicle/README.md b/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_vehicle/README.md deleted file mode 100644 index c606a9b..0000000 --- a/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_vehicle/README.md +++ /dev/null @@ -1,5 +0,0 @@ -## Extension Configuration - Spatial Analytics - -### Zone Detection for Vehicles - -The `object_detection/zone_event_vehicle` pipeline is a copy of the `object_detection/zone_events` updated to use a model for vehicle detection. diff --git a/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_vehicle/pipeline.json b/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_vehicle/pipeline.json deleted file mode 100644 index 096a09c..0000000 --- a/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_vehicle/pipeline.json +++ /dev/null @@ -1,57 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source", - " ! gvadetect model={models[object_detection][vehicle][network]} name=detection", - " ! gvapython name=object-zone-count class=ObjectZoneCount module=/home/video-analytics-serving/extensions/spatial_analytics/object_zone_count.py", - " ! appsink name=destination"], - "description": "Spatial Analytics zone events with model trained for vehicle detection", - "parameters": { - "type": "object", - "properties": { - "detection-device": { - "element": { - "name": "detection", - "property": "device" - }, - "type": "string" - }, - "detection-model-instance-id": { - "element": { - "name": "detection", - "property": "model-instance-id" - }, - "type": "string" - }, - "inference-interval": { - "element": "detection", - "type": "integer" - }, - "threshold": { - "element": "detection", - "type": "number" - }, - "object-zone-count-config": { - "element": { - "name": "object-zone-count", - "property": "kwarg", - "format": "json" - }, - "type": "object", - "properties": { - "zones": { - "type": "array", - "items": { - "type": "object" - } - }, - "enable_watermark": { - "type": "boolean" - }, - "log_level": { - "type": "string" - } - } - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/object_detection/person_vehicle_bike_detection/pipeline.json b/samples/ava_ai_extension/pipelines/object_detection/person_vehicle_bike_detection/pipeline.json deleted file mode 100755 index 52d67f4..0000000 --- a/samples/ava_ai_extension/pipelines/object_detection/person_vehicle_bike_detection/pipeline.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source", - " ! gvadetect model={models[person_vehicle_bike_detection][1][network]} name=detection", - " ! appsink name=destination"], - "description": "Object Detection using gstreamer gvadetect element, check more about gvadetect at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvadetect", - "parameters": { - "type": "object", - "properties": { - "detection-device": { - "element": { - "name": "detection", - "property": "device" - }, - "type": "string", - "default": "CPU" - }, - "detection-model-instance-id": { - "element": { - "name": "detection", - "property": "model-instance-id" - }, - "type": "string" - }, - "batch-size": { - "element": "detection", - "type": "integer" - }, - "inference-interval": { - "element": "detection", - "type": "integer", - "minimum": 1, - "maximum": 4294967295 - }, - "device-extensions": { - "element": "detection", - "type": "string" - }, - "cpu-throughput-streams": { - "element": "detection", - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "gpu-throughput-streams": { - "element": "detection", - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "ie-config": { - "element": "detection", - "type": "string" - }, - "pre-process-backend": { - "element": "detection", - "type": "string" - }, - "nireq": { - "element": "detection", - "type": "integer", - "minimum": 1, - "maximum": 64 - }, - "reshape": { - "element": "detection", - "type": "boolean" - }, - "reshape-height": { - "element": "detection", - "type": "integer" - }, - "reshape-width": { - "element": "detection", - "type": "integer" - }, - "threshold": { - "element": "detection", - "type": "number" - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/object_tracking/debug_person_vehicle_bike_tracking/pipeline.json b/samples/ava_ai_extension/pipelines/object_tracking/debug_person_vehicle_bike_tracking/pipeline.json deleted file mode 100755 index abaf6b4..0000000 --- a/samples/ava_ai_extension/pipelines/object_tracking/debug_person_vehicle_bike_tracking/pipeline.json +++ /dev/null @@ -1,187 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source", - " ! gvadetect model={models[person_vehicle_bike_detection][1][network]} name=detection", - " ! queue ! gvatrack name=tracking", - " ! gvaclassify model={models[vehicle_attributes_recognition][1][network]} name=classification", - " ! tee name = t ! queue ! gvawatermark ! videoconvert ! jpegenc", - " ! multifilesink name=filesink t. ! queue", - " ! appsink name=destination"], - "description": "Object Tracking using gstreamer gvadetect, gvaclassify & gvatrack elements, check more about gvadetect at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvadetect, gvaclassify at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvaclassify and gvatrack at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvatrack", - "parameters": { - "type": "object", - "properties": { - "detection-device": { - "element": { - "name": "detection", - "property": "device" - }, - "type": "string", - "default": "CPU" - }, - "classification-device": { - "element": { - "name": "classification", - "property": "device" - }, - "type": "string", - "default": "CPU" - }, - "tracking-device": { - "element": - [ {"name":"tracking", - "property":"device"} - ], - "type": "string", - "enum": [ - "CPU", - "VPU" - ] - }, - "batch-size": { - "element": - [ {"name":"detection", - "property":"batch-size"}, - {"name":"classification", - "property":"batch-size"} - ], - "type": "integer" - }, - "inference-interval": { - "element": - [ {"name":"detection", - "property":"inference-interval"}, - {"name":"classification", - "property":"inference-interval"} - ], - "type": "integer", - "minimum": 1, - "maximum": 4294967295 - }, - "device-extensions": { - "element": - [ {"name":"detection", - "property":"device-extensions"}, - {"name":"classification", - "property":"device-extensions"} - ], - "type": "string" - }, - "cpu-throughput-streams": { - "element": - [ {"name":"detection", - "property":"cpu-throughput-streams"}, - {"name":"classification", - "property":"cpu-throughput-streams"} - ], - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "gpu-throughput-streams": { - "element": - [ {"name":"detection", - "property":"gpu-throughput-streams"}, - {"name":"classification", - "property":"gpu-throughput-streams"} - ], - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "ie-config": { - "element": - [ {"name":"detection", - "property":"ie-config"}, - {"name":"classification", - "property":"ie-config"} - ], - "type": "string" - }, - "detection-model-instance-id": { - "element": { - "name": "detection", - "property": "model-instance-id" - }, - "type": "string" - }, - "classification-model-instance-id": { - "element": { - "name": "classification", - "property": "model-instance-id" - }, - "type": "string" - }, - "pre-process-backend": { - "element": - [ {"name":"detection", - "property":"pre-process-backend"}, - {"name":"classification", - "property":"pre-process-backend"} - ], - "type": "string" - }, - "nireq": { - "element": - [ {"name":"detection", - "property":"nireq"}, - {"name":"classification", - "property":"nireq"} - ], - "type": "integer", - "minimum": 1, - "maximum": 64 - }, - "reshape": { - "element": - [ {"name":"detection", - "property":"reshape"}, - {"name":"classification", - "property":"reshape"} - ], - "type": "boolean" - }, - "reshape-height": { - "element": - [ {"name":"detection", - "property":"reshape-height"}, - {"name":"classification", - "property":"reshape-height"} - ], - "type": "integer" - }, - "reshape-width": { - "element": - [ {"name":"detection", - "property":"reshape-width"}, - {"name":"classification", - "property":"reshape-width"} - ], - "type": "integer" - }, - "object-class": { - "element": "classification", - "type": "string", - "default": "vehicle" - }, - "reclassify-interval": { - "element": "classification", - "type": "integer" - }, - "tracking-type": { - "element": "tracking", - "type": "string" - }, - "location": { - "element": "filesink", - "type":"string", - "default":"/tmp/frame_%07d.jpeg" - }, - "max-files": { - "element": "filesink", - "type":"integer", - "default": 10 - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json b/samples/ava_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json deleted file mode 100755 index 1c0ad0c..0000000 --- a/samples/ava_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json +++ /dev/null @@ -1,108 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source", - " ! gvadetect model={models[person_vehicle_bike_detection][1][network]} name=detection", - " ! queue ! gvatrack name=tracking", - " ! gvaclassify model={models[vehicle_attributes_recognition][1][network]} name=classification", - " ! gvapython class=ObjectLineCrossing module=/home/video-analytics-serving/extensions/spatial_analytics/object_line_crossing.py name=object-line-crossing", - " ! appsink name=destination"], - "description": "Object Tracking pipeline with Line Crossing Tracking module", - "parameters": { - "type": "object", - "properties": { - "object-line-crossing-config": { - "element": { - "name": "object-line-crossing", - "property": "kwarg", - "format": "json" - }, - "type": "object", - "properties": { - "lines": { - "type": "array", - "items": { - "type": "object" - } - }, - "enable_watermark": { - "type": "boolean" - }, - "log_level": { - "type": "string" - } - } - }, - "detection-device": { - "element": { - "name": "detection", - "property": "device" - }, - "type": "string" - }, - "classification-device": { - "element": { - "name": "classification", - "property": "device" - }, - "type": "string" - }, - "tracking-device": { - "element": - [ {"name":"tracking", - "property":"device"} - ], - "type": "string" - }, - "inference-interval": { - "element": - [ {"name":"detection", - "property":"inference-interval"}, - {"name":"classification", - "property":"inference-interval"} - ], - "type": "integer" - }, - "detection-model-instance-id": { - "element": { - "name": "detection", - "property": "model-instance-id" - }, - "type": "string" - }, - "classification-model-instance-id": { - "element": { - "name": "classification", - "property": "model-instance-id" - }, - "type": "string" - }, - "object-class": { - "element": "classification", - "type": "string", - "default": "vehicle" - }, - "reclassify-interval": { - "element": "classification", - "type": "integer" - }, - "tracking-type": { - "element": "tracking", - "type": "string" - }, - "detection-threshold": { - "element": { - "name": "detection", - "property": "threshold" - }, - "type": "number" - }, - "classification-threshold": { - "element": { - "name": "classification", - "property": "threshold" - }, - "type": "number" - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json b/samples/ava_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json deleted file mode 100755 index 8594a30..0000000 --- a/samples/ava_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json +++ /dev/null @@ -1,175 +0,0 @@ -{ - "type": "GStreamer", - "template": ["appsrc name=source", - " ! gvadetect model={models[person_vehicle_bike_detection][1][network]} name=detection", - " ! queue ! gvatrack name=tracking", - " ! gvaclassify model={models[vehicle_attributes_recognition][1][network]} name=classification", - " ! appsink name=destination"], - "description": "Object Tracking using gstreamer gvadetect, gvaclassify & gvatrack elements, check more about gvadetect at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvadetect, gvaclassify at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvaclassify and gvatrack at https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvatrack", - "parameters": { - "type": "object", - "properties": { - "detection-device": { - "element": { - "name": "detection", - "property": "device" - }, - "type": "string", - "default": "CPU" - }, - "classification-device": { - "element": { - "name": "classification", - "property": "device" - }, - "type": "string", - "default": "CPU" - }, - "tracking-device": { - "element": - [ {"name":"tracking", - "property":"device"} - ], - "type": "string", - "enum": [ - "CPU", - "VPU" - ] - }, - "batch-size": { - "element": - [ {"name":"detection", - "property":"batch-size"}, - {"name":"classification", - "property":"batch-size"} - ], - "type": "integer" - }, - "inference-interval": { - "element": - [ {"name":"detection", - "property":"inference-interval"}, - {"name":"classification", - "property":"inference-interval"} - ], - "type": "integer", - "minimum": 1, - "maximum": 4294967295 - }, - "device-extensions": { - "element": - [ {"name":"detection", - "property":"device-extensions"}, - {"name":"classification", - "property":"device-extensions"} - ], - "type": "string" - }, - "cpu-throughput-streams": { - "element": - [ {"name":"detection", - "property":"cpu-throughput-streams"}, - {"name":"classification", - "property":"cpu-throughput-streams"} - ], - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "gpu-throughput-streams": { - "element": - [ {"name":"detection", - "property":"gpu-throughput-streams"}, - {"name":"classification", - "property":"gpu-throughput-streams"} - ], - "type": "integer", - "minimum": 0, - "maximum": 4294967295 - }, - "ie-config": { - "element": - [ {"name":"detection", - "property":"ie-config"}, - {"name":"classification", - "property":"ie-config"} - ], - "type": "string" - }, - "detection-model-instance-id": { - "element": { - "name": "detection", - "property": "model-instance-id" - }, - "type": "string" - }, - "classification-model-instance-id": { - "element": { - "name": "classification", - "property": "model-instance-id" - }, - "type": "string" - }, - "pre-process-backend": { - "element": - [ {"name":"detection", - "property":"pre-process-backend"}, - {"name":"classification", - "property":"pre-process-backend"} - ], - "type": "string" - }, - "nireq": { - "element": - [ {"name":"detection", - "property":"nireq"}, - {"name":"classification", - "property":"nireq"} - ], - "type": "integer", - "minimum": 1, - "maximum": 64 - }, - "reshape": { - "element": - [ {"name":"detection", - "property":"reshape"}, - {"name":"classification", - "property":"reshape"} - ], - "type": "boolean" - }, - "reshape-height": { - "element": - [ {"name":"detection", - "property":"reshape-height"}, - {"name":"classification", - "property":"reshape-height"} - ], - "type": "integer" - }, - "reshape-width": { - "element": - [ {"name":"detection", - "property":"reshape-width"}, - {"name":"classification", - "property":"reshape-width"} - ], - "type": "integer" - }, - "object-class": { - "element": "classification", - "type": "string", - "default": "vehicle" - }, - "reclassify-interval": { - "element": "classification", - "type": "integer" - }, - "tracking-type": { - "element": "tracking", - "type": "string" - } - } - } -} diff --git a/samples/ava_ai_extension/pipelines/video_decode/app_dst/pipeline.json b/samples/ava_ai_extension/pipelines/video_decode/app_dst/pipeline.json deleted file mode 100644 index 2a97174..0000000 --- a/samples/ava_ai_extension/pipelines/video_decode/app_dst/pipeline.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "type": "GStreamer", - "template": ["uridecodebin name=source", - " ! appsink name=destination"], - "description": "Decode Pipeline" - -} diff --git a/samples/ava_ai_extension/requirements.txt b/samples/ava_ai_extension/requirements.txt deleted file mode 100644 index 9c6b3ea..0000000 --- a/samples/ava_ai_extension/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -grpcio==1.33.2 -protobuf==3.13.0 diff --git a/samples/ava_ai_extension/sampleframes/sample01.png b/samples/ava_ai_extension/sampleframes/sample01.png deleted file mode 100644 index 1ff7754..0000000 Binary files a/samples/ava_ai_extension/sampleframes/sample01.png and /dev/null differ diff --git a/samples/ava_ai_extension/server/__init__.py b/samples/ava_ai_extension/server/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/samples/ava_ai_extension/server/__main__.py b/samples/ava_ai_extension/server/__main__.py deleted file mode 100644 index 5e14b3c..0000000 --- a/samples/ava_ai_extension/server/__main__.py +++ /dev/null @@ -1,153 +0,0 @@ -''' -* Copyright (C) 2019-2020 Intel Corporation. -* -* SPDX-License-Identifier: MIT License -* -***** -* -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -''' - -import argparse -import os -import sys -from concurrent import futures -import grpc -import extension_pb2_grpc # pylint: disable=import-error -from vaserving.vaserving import VAServing -from vaserving.common.utils.logging import get_logger -from media_graph_extension import MediaGraphExtension -from samples.ava_ai_extension.common.exception_handler import log_exception - -PROGRAM_NAME = "DL Streamer Edge AI Extension" - -def parse_args(args=None, program_name=PROGRAM_NAME): - - parser = argparse.ArgumentParser( - prog=program_name, - fromfile_prefix_chars="@", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - - parser.add_argument( - "-p", - action="store", - dest="port", - help="Port number to serve gRPC server", - type=int, - default=int(os.getenv("PORT", "5001")), - ) - - parser.add_argument( - "--pipeline-name", - action="store", - dest="pipeline_name", - help="name of the pipeline to run", - type=str, - default=os.getenv("PIPELINE_NAME", "object_detection"), - ) - - parser.add_argument( - "--pipeline-version", - action="store", - dest="pipeline_version", - help="name of the pipeline to run", - type=str, - default=os.getenv("PIPELINE_VERSION", "person_vehicle_bike_detection"), - ) - parser.add_argument( - "--debug", - action="store_true", - dest="debug", - help="Use debug pipeline", - default=(os.getenv("DEBUG_PIPELINE", None) is not None), - ) - - parser.add_argument( - "--max-running-pipelines", - action="store", - dest="max_running_pipelines", - type=int, - default=int(os.getenv("MAX_RUNNING_PIPELINES", "10")), - ) - - if isinstance(args, dict): - args = ["--{}={}".format(key, value) for key, value in args.items() if value] - - return parser.parse_known_args(args) - - -def append_default_server_args(va_serving_args, max_running_pipelines): - va_serving_args.append("--max_running_pipelines") - va_serving_args.append(str(max_running_pipelines)) - return va_serving_args - - -if __name__ == "__main__": - - args, va_serving_args = parse_args() - logger = get_logger("Main") - server = None - try: - server_args = append_default_server_args( - va_serving_args, args.max_running_pipelines - ) - - try: - VAServing.start(server_args) - except Exception as error: - logger.error(error) - logger.error("Exception encountered during VAServing start") - raise - - if ( - (args.pipeline_name and not args.pipeline_version) - or (not args.pipeline_name and args.pipeline_version) - ): - logger.error("Pipeline name or version set but not both") - raise ValueError('Pipeline name or version set but not both') - - # create gRPC server and start running - server = grpc.server( - futures.ThreadPoolExecutor(max_workers=args.max_running_pipelines) - ) - extension_pb2_grpc.add_MediaGraphExtensionServicer_to_server( - MediaGraphExtension( - args.pipeline_name, - args.pipeline_version, - args.debug, - ), - server, - ) - server.add_insecure_port(f"[::]:{args.port}") - logger.info("Starting %s on port: %d", PROGRAM_NAME, args.port) - server.start() - server.wait_for_termination() - - except (KeyboardInterrupt, SystemExit, Exception): - log_exception() - sys.exit(-1) - finally: - if server: - server.stop(None) - VAServing.stop() diff --git a/samples/ava_ai_extension/server/media_graph_extension.py b/samples/ava_ai_extension/server/media_graph_extension.py deleted file mode 100644 index a4ac1af..0000000 --- a/samples/ava_ai_extension/server/media_graph_extension.py +++ /dev/null @@ -1,544 +0,0 @@ -''' -* Copyright (C) 2019-2020 Intel Corporation. -* -* SPDX-License-Identifier: MIT License -* -***** -* -* MIT License -* -* Copyright (c) Microsoft Corporation. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and associated documentation files (the "Software"), to deal -* in the Software without restriction, including without limitation the rights -* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -* copies of the Software, and to permit persons to whom the Software is -* furnished to do so, subject to the following conditions: -* -* The above copyright notice and this permission notice shall be included in all -* copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -* SOFTWARE -''' - -import os -import json -from queue import Queue -import tempfile -import time -import datetime -import uuid -from enum import Enum -import jsonschema - -import samples.ava_ai_extension.common.grpc_autogen.inferencing_pb2 as inferencing_pb2 -import samples.ava_ai_extension.common.grpc_autogen.media_pb2 as media_pb2 -import samples.ava_ai_extension.common.grpc_autogen.extension_pb2 as extension_pb2 -import samples.ava_ai_extension.common.grpc_autogen.extension_pb2_grpc as extension_pb2_grpc - -from samples.ava_ai_extension.common.shared_memory import SharedMemoryManager -from samples.ava_ai_extension.common.exception_handler import log_exception -import samples.ava_ai_extension.common.extension_schema as extension_schema - -from vaserving.vaserving import VAServing -from vaserving.pipeline import Pipeline -from vaserving.gstreamer_app_source import GvaFrameData -from vaserving.common.utils.logging import get_logger - - -class TransferType(Enum): - BYTES = 1 # Embedded Content - REFERENCE = 2 # Shared Memory - HANDLE = 3 # Reserved - - -class State: - def __init__(self, media_stream_descriptor): - try: - # media descriptor holding input data format - self.media_stream_descriptor = media_stream_descriptor - - # Get how data will be transferred - if ( - self.media_stream_descriptor.WhichOneof("data_transfer_properties") - is None - ): - self.content_transfer_type = TransferType.BYTES - elif self.media_stream_descriptor.HasField( - "shared_memory_buffer_transfer_properties" - ): - self.content_transfer_type = TransferType.REFERENCE - elif self.media_stream_descriptor.HasField( - "shared_memory_segments_transfer_properties" - ): - self.content_transfer_type = TransferType.HANDLE - - # Setup if shared mem used - if self.content_transfer_type == TransferType.REFERENCE: - # Create shared memory accessor specific to the client - self.shared_memory_manager = SharedMemoryManager( - name=self.media_stream_descriptor.shared_memory_buffer_transfer_properties.handle_name, - size=self.media_stream_descriptor.shared_memory_buffer_transfer_properties.length_bytes, - ) - else: - self.shared_memory_manager = None - - except: - log_exception(get_logger("State")) - raise - - -class MediaGraphExtension(extension_pb2_grpc.MediaGraphExtensionServicer): - def __init__( - self, - pipeline, - version, - debug=False, - input_queue_size=1, - ): - self._pipeline = pipeline - self._version = version - self._input_queue_size = input_queue_size - self._logger = get_logger("MediaGraphExtension") - self._debug = debug - self._extension_config_schema = extension_schema.extension_config - self._extension_config_validator = jsonschema.Draft7Validator(schema=self._extension_config_schema, - format_checker=jsonschema.draft7_format_checker) - - def _generate_media_stream_message(self, gva_sample, extensions): - message = json.loads(list(gva_sample.video_frame.messages())[0]) - - msg = extension_pb2.MediaStreamMessage() - msg.ack_sequence_number = message["sequence_number"] - msg.media_sample.timestamp = message["timestamp"] - inferences = msg.media_sample.inferences - events = self._get_events(gva_sample) - - # gvaactionrecognitionbin element has no video frame regions - if not list(gva_sample.video_frame.regions()): - for tensor in gva_sample.video_frame.tensors(): - if tensor.name() == "action": - try: - label = tensor.label() - confidence = tensor.confidence() - classification = inferencing_pb2.Classification( - tag=inferencing_pb2.Tag( - value=label, confidence=confidence - ) - ) - except: - log_exception(self._logger) - raise - inference = inferences.add() - inference.type = ( - # pylint: disable=no-member - inferencing_pb2.Inference.InferenceType.CLASSIFICATION - ) - inference.classification.CopyFrom(classification) - - for region_index, region in enumerate(gva_sample.video_frame.regions()): - - attributes = [] - obj_id = None - obj_label = None - obj_confidence = 0 - obj_left = 0 - obj_width = 0 - obj_top = 0 - obj_height = 0 - - for tensor in region.tensors(): - if tensor.is_detection(): - obj_confidence = region.confidence() - obj_label = region.label() - - obj_left, obj_top, obj_width, obj_height = region.normalized_rect() - if region.object_id(): # Tracking - obj_id = str(region.object_id()) - elif tensor["label"]: # Classification - attr_name = tensor.name() - attr_label = tensor["label"] - attr_confidence = region.confidence() - attributes.append([attr_name, attr_label, attr_confidence]) - - if obj_label is not None: - try: - entity = inferencing_pb2.Entity( - tag=inferencing_pb2.Tag( - value=obj_label, confidence=obj_confidence - ), - box=inferencing_pb2.Rectangle( - l=obj_left, t=obj_top, w=obj_width, h=obj_height - ), - ) - for attr in attributes: - attribute = inferencing_pb2.Attribute( - name=attr[0], value=attr[1], confidence=attr[2] - ) - entity.attributes.append(attribute) - if obj_id: - entity.id = obj_id - except: - log_exception(self._logger) - raise - inference = inferences.add() - inference.type = ( - # pylint: disable=no-member - inferencing_pb2.Inference.InferenceType.ENTITY - ) - if extensions: - for key in extensions: - inference.extensions[key] = extensions[key] - inference.entity.CopyFrom(entity) - self._update_inference_ids(events, inference, region_index) - self._process_events(events, inferences) - return msg - - def _get_events(self, gva_sample): - events = [] - for message in gva_sample.video_frame.messages(): - message_obj = json.loads(message) - if "events" in message_obj.keys(): - events = message_obj["events"] - break - return events - - def _update_inference_ids(self, events, inference, region_index): - for event in events: - for i in range(len(event['related-objects'])): - if region_index == event['related-objects'][i]: - if not inference.inference_id: - inference.inference_id = uuid.uuid4().hex - inference.subtype = "objectDetection" - event['related-objects'][i] = inference.inference_id - - def _process_events(self, events, inferences): - for event in events: - self._add_event(inferences, event) - - def _add_event(self, inferences, event): - event_name = "" - event_properties = {} - inference_event = inferences.add() - inference_event.type = ( - # pylint: disable=no-member - inferencing_pb2.Inference.InferenceType.EVENT - ) - inference_event.inference_id = uuid.uuid4().hex - inference_event.subtype = event["event-type"] - - for inference_id in event['related-objects']: - inference_event.related_inferences.append(inference_id) - - for key, value in event.items(): - if key in ('event-type', 'related-objects'): - continue - if "name" in key: - event_name = value - else: - event_properties[key] = str(value) - - inference_event.event.CopyFrom(inferencing_pb2.Event( - name=event_name, - properties=event_properties, - )) - - def _generate_gva_sample(self, client_state, request): - - new_sample = None - - try: - # Get reference to raw bytes - if client_state.content_transfer_type == TransferType.BYTES: - raw_bytes = memoryview(request.media_sample.content_bytes.bytes) - elif client_state.content_transfer_type == TransferType.REFERENCE: - # Data sent over shared memory buffer - address_offset = request.media_sample.content_reference.address_offset - length_bytes = request.media_sample.content_reference.length_bytes - - # Get memory reference to (in readonly mode) data sent over shared memory - raw_bytes = client_state.shared_memory_manager.read_bytes( - address_offset, length_bytes - ) - - # Get encoding details of the media sent by client - encoding = ( - client_state.media_stream_descriptor.media_descriptor.video_frame_sample_format.encoding - ) - - # Handle RAW content (Just place holder for the user to handle each variation...) - if ( - encoding - == client_state.media_stream_descriptor.media_descriptor.video_frame_sample_format.Encoding.RAW - ): - pixel_format = ( - client_state.media_stream_descriptor.media_descriptor.video_frame_sample_format.pixel_format - ) - caps_format = None - - if pixel_format == media_pb2.VideoFrameSampleFormat.PixelFormat.RGBA: - caps_format = "RGBA" - elif pixel_format == media_pb2.VideoFrameSampleFormat.PixelFormat.RGB24: - caps_format = "RGB" - elif pixel_format == media_pb2.VideoFrameSampleFormat.PixelFormat.BGR24: - caps_format = "BGR" - if caps_format is not None: - caps = "".join( - ( - "video/x-raw,format=", - caps_format, - ",width=", - str( - client_state.media_stream_descriptor. - media_descriptor.video_frame_sample_format.dimensions.width - ), - ",height=", - str( - client_state.media_stream_descriptor. - media_descriptor.video_frame_sample_format.dimensions.height - ), - ) - ) - new_sample = GvaFrameData( - bytes(raw_bytes), - caps, - message={ - "sequence_number": request.sequence_number, - "timestamp": request.media_sample.timestamp, - }, - ) - else: - self._logger.info("Sample format is not RAW") - except: - log_exception(self._logger) - raise - return new_sample - - def _get_queued_samples(self, queue, block=False): - samples = [] - if block: - samples.append(queue.get()) - while not queue.empty(): - samples.append(queue.get()) - return samples - - def _validate_ext_config_against_schema(self, extension_config): - try: - self._extension_config_validator.validate(extension_config) - except jsonschema.exceptions.ValidationError as err: - self._logger.error("Error occured during validation: {}".format(err.message)) - raise - - def _set_debug_properties(self, pipeline_config): - if self._debug: - pipeline_config["version"] = "debug_" + pipeline_config["version"] - timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") - location = os.path.join( - tempfile.gettempdir(), "vaserving", self._version, timestamp - ) - os.makedirs(os.path.abspath(location)) - debug_parameters = { - "location": os.path.join(location, "frame_%07d.jpeg") - } - pipeline_config["parameters"].update(debug_parameters) - - return pipeline_config - - def _set_pipeline_properties(self, request): - # Set deployment pipeline name, version, and args if set - - pipeline_config = { - "name" : self._pipeline, - "version" : self._version, - "parameters" : {}, - "frame-destination" : {}, - "extensions" : {} - } - - # Set pipeline values if passed through request - extension_configuration = None - if request.media_stream_descriptor.extension_configuration: - # Load the extension_config - try: - extension_configuration = json.loads(request.media_stream_descriptor.extension_configuration) - except ValueError: - self._logger.error("Decoding extension_configuration field has failed: {}".format( - request.media_stream_descriptor.extension_configuration)) - raise - # Validate the extension_config against the schema - self._validate_ext_config_against_schema(extension_configuration) - - # If extension_config has pipeline values, set the properties - if "pipeline" in extension_configuration: - pipeline_config.update(extension_configuration["pipeline"]) - - # Reject pipeline if it has debug in its version - if pipeline_config["version"].startswith("debug"): - raise ValueError("Cannot specify debug pipelines in request") - - # Set debug properties if debug flag is set - pipeline_config = self._set_debug_properties(pipeline_config) - - return pipeline_config - - # gRPC stubbed function - # client/gRPC will call this function to send frames/descriptions - def ProcessMediaStream(self, requestIterator, context): - requests_received = 0 - responses_sent = 0 - # First message from the client is (must be) MediaStreamDescriptor - request = next(requestIterator) - requests_received += 1 - # Extract message IDs - request_seq_num = request.sequence_number - request_ack_seq_num = request.ack_sequence_number - # State object per client - client_state = State(request.media_stream_descriptor) - self._logger.info( - "[Received] SeqNum: {0:07d} | " - "AckNum: {1}\nMediaStreamDescriptor:\n{2}".format( - request_seq_num, - request_ack_seq_num, - client_state.media_stream_descriptor, - ) - ) - # First message response ... - media_stream_message = extension_pb2.MediaStreamMessage( - sequence_number=1, - ack_sequence_number=request_seq_num, - media_stream_descriptor=extension_pb2.MediaStreamDescriptor( - media_descriptor=media_pb2.MediaDescriptor( - timescale=client_state.media_stream_descriptor.media_descriptor.timescale - ) - ), - ) - - responses_sent += 1 - yield media_stream_message - - pipeline_config = self._set_pipeline_properties(request) - - pipeline_name = pipeline_config["name"] - pipeline_version = pipeline_config["version"] - pipeline_parameters = pipeline_config.get("parameters") - extensions = pipeline_config.get("extensions") - frame_destination = pipeline_config.get("frame-destination") - - self._logger.info("Pipeline Name : {}".format(pipeline_name)) - self._logger.info("Pipeline Version : {}".format(pipeline_version)) - self._logger.info("Pipeline Parameters : {}".format(pipeline_parameters)) - self._logger.info("Frame Destination : {}".format(frame_destination)) - detect_input = Queue(maxsize=self._input_queue_size) - detect_output = Queue() - - destination = { - "metadata":{ - "type": "application", - "class": "GStreamerAppDestination", - "output": detect_output, - "mode": "frames", - } - } - - if frame_destination: - destination["frame"] = frame_destination - - # Start object detection pipeline - # It will wait until it receives frames via the detect_input queue - vas_pipeline = VAServing.pipeline(pipeline_name, pipeline_version) - vas_pipeline.start( - source={ - "type": "application", - "class": "GStreamerAppSource", - "input": detect_input, - "mode": "push", - }, - destination=destination, - parameters=pipeline_parameters, - ) - - # Process rest of the MediaStream message sequence - for request in requestIterator: - try: - if requests_received - responses_sent >= self._input_queue_size: - queued_output = self._get_queued_samples(detect_output, block=True) - else: - queued_output = [] - # Read request id, sent by client - request_seq_num = request.sequence_number - self._logger.debug("[Received] SeqNum: {0:07d}".format(request_seq_num)) - requests_received += 1 - input_sample = self._generate_gva_sample(client_state, request) - detect_input.put(input_sample) - queued_output.extend(self._get_queued_samples(detect_output)) - if context.is_active(): - # If any processed samples are queued, drain them and yield back to client - for output_sample in queued_output: - if output_sample: - media_stream_message = self._generate_media_stream_message( - output_sample, extensions - ) - responses_sent += 1 - self._logger.debug( - "[Sent] AckSeqNum: {0:07d}".format( - media_stream_message.ack_sequence_number - ) - ) - yield media_stream_message - else: - break - if vas_pipeline.status().state.stopped(): - break - except: - log_exception(self._logger) - raise - - if vas_pipeline.status().state.stopped(): - try: - raise Exception("Pipeline encountered an issue, pipeline state: {}".format( - vas_pipeline.status().state)) - except: - log_exception(self._logger) - raise - - # After the server has finished processing all the request iterator objects - # Push a None object into the input queue. - # When the None object comes out of the output queue, we know we've finished - # processing all requests - output_sample = None - if not vas_pipeline.status().state.stopped(): - detect_input.put(None) - output_sample = detect_output.get() - while output_sample: - media_stream_message = self._generate_media_stream_message(output_sample, extensions) - responses_sent += 1 - self._logger.debug( - "[Sent] AckSeqNum: {0:07d}".format( - media_stream_message.ack_sequence_number - ) - ) - if context.is_active(): - yield media_stream_message - output_sample = detect_output.get() - - # One final check on the pipeline to ensure it worked properly - status = vas_pipeline.wait(10) - self._logger.info("Pipeline Ended Status: {}".format(status)) - if (not status) or (status.state == Pipeline.State.ERROR): - raise Exception("Pipeline did not complete successfully") - - self._logger.info( - "Done processing messages: Received: {}, Sent: {}".format( - requests_received, responses_sent - ) - ) - self._logger.debug( - "MediaStreamDescriptor:\n{0}".format(client_state.media_stream_descriptor) - ) diff --git a/samples/ava_ai_extension/server/readme.md b/samples/ava_ai_extension/server/readme.md deleted file mode 100644 index 7f5e2f4..0000000 --- a/samples/ava_ai_extension/server/readme.md +++ /dev/null @@ -1,18 +0,0 @@ -## Using gRPC Server -To build and run the server, open a terminal and issue these commands: -``` -../docker/build.sh -../docker/run_server.sh -``` - -To confirm connectivity to the server, open a second terminal and run the gRPC client: -``` -../docker/run_client.sh -[AIXC] [MainThread ] [INFO]: ======================= -[AIXC] [MainThread ] [INFO]: Options for __main__.py -[AIXC] [MainThread ] [INFO]: ======================= - -[AIXC] [MainThread ] [INFO]: Client finished execution -``` - -Refer to [Documentation here](../README.md#running-the-edge-ai-extension-module) for more details and settings. diff --git a/samples/edgex_bridge/README.md b/samples/edgex_bridge/README.md index 267846b..b7ea799 100644 --- a/samples/edgex_bridge/README.md +++ b/samples/edgex_bridge/README.md @@ -6,6 +6,10 @@ This sample demonstrates how to emit events into [EdgeX Foundry](http://edgexfou # Overview + +> **WARNING:** This sample is currently encountering issues with integration to upstream EdgeXFoundry repositories. This causes problems on launch of the EdgeX stack. Monitor issue [#97](https://github.com/intel/video-analytics-serving/issues/97) for workarounds. + + ## EdgeX Foundry EdgeX Foundry consists of vendor-neutral open-source middleware that provides a common framework to assemble and deploy solutions that utilize edge-based sensors and interoperates with operational technology and information technology systems. Especially suited for industrial IoT computing, EdgeX consists of a core set of loosely coupled microservices organized in different layers. At the [_South Side_](https://en.wikipedia.org/wiki/EdgeX_Foundry) the framework provides extensive integration of devices and software by use of a number of available device services. Each EdgeX device service is able to support a range of devices so long as they conform to a particular protocol. EdgeX also includes a [device-sdk](https://github.com/edgexfoundry/device-sdk-go/) to create new device services as needed. @@ -40,23 +44,24 @@ This self-contained tutorial walks through a working example to fetch and prepar 1. Clone this repository and prepare EdgeX integration: ``` - $ git clone https://github.com/intel/video-analytics-serving.git vasEdge - $ cd vasEdge + git clone https://github.com/intel/video-analytics-serving.git vasEdge + cd vasEdge ``` 1. Run this command to automatically fetch the EdgeX developer scripts repository. These contain the Hanoi release of [EdgeX docker compose files](https://github.com/edgexfoundry/developer-scripts/blob/master/releases/hanoi/compose-files/README.md) we will use to bootstrap our launch of the EdgeX Framework. This script also pulls the base configuration from the device-mqtt container. When it completes you will find a `./edgex` subfolder is created with these contents. ``` - $ ./samples/edgex_bridge/fetch_edgex.sh - - $ ls ./samples/edgex_bridge/edgex + ./samples/edgex_bridge/fetch_edgex.sh + ls ./samples/edgex_bridge/edgex + ``` + ``` developer-scripts docker-compose.yml res ``` 1. Build the sample edgex-video-analytics-serving image. ``` - $ ./samples/edgex_bridge/docker/build.sh + ./samples/edgex_bridge/docker/build.sh ``` This also generates the needed EdgeX resources to augment the `./edgex` project subfolder located on your host (created in step 2). To do this the build script has invoked the [edgex_bridge.py](./edgex_bridge.md) entrypoint, passing in the `--generate` parameter. In this way, the sample will inform EdgeX to listen for VA Serving events as they are emitted to the MQTT broker. @@ -67,10 +72,10 @@ This self-contained tutorial walks through a working example to fetch and prepar 1. Now that we have the docker-compose and override configuration for device-mqtt prepared, we are ready to launch the EdgeX platform which will now include our built image. In the host terminal session, launch EdgeX platform. > **Note:** This sample can only run with Display. - ``` - $ xhost local:root - $ export DISPLAY= - $ ./samples/edgex_bridge/start_edgex.sh + ```bash + xhost local:root + export DISPLAY= + ./samples/edgex_bridge/start_edgex.sh ``` NOTE: The first time this runs, each of the EdgeX microservice images will download to your host. Subsequent runs will make use of these as containers get started. @@ -78,7 +83,9 @@ NOTE: The first time this runs, each of the EdgeX microservice images will downl 1. You will find that EdgeX Core Data has received inference events for vehicles detected on frames within the source video input. With this out-of-the-box configuration there are no other events being transmitted to EdgeX, so you can inspect the count of events received using this command: ``` - $ curl -i --get http://localhost:48080/api/v1/event/count + curl -i --get http://localhost:48080/api/v1/event/count + ``` + ``` HTTP/1.1 200 OK Date: Mon, 29 Mar 2021 03:19:18 GMT Content-Length: 3 @@ -91,7 +98,9 @@ NOTE: The first time this runs, each of the EdgeX microservice images will downl 1. You are able to explore the event data within EdgeX, by issuing this command. For example, filtering to retrieve three (3) vehicle detection events by the registered `videoAnalytics-mqtt` device: ``` - $ curl -i --get http://localhost:48080/api/v1/event/device/videoAnalytics-mqtt/3 + curl -i --get http://localhost:48080/api/v1/event/device/videoAnalytics-mqtt/3 + ``` + ``` HTTP/1.1 200 OK Content-Type: application/json Date: Mon, 05 Apr 2021 04:40:42 GMT @@ -127,9 +136,9 @@ For example, you may wish to remove the visual output by updating the last line After you launch build.sh and start_edgex.sh, you will find these events emitted: ``` - $ curl -i --get http://localhost:48080/api/v1/event/device/videoAnalytics-mqtt/2 - - + curl -i --get http://localhost:48080/api/v1/event/device/videoAnalytics-mqtt/2 + ``` + ``` HTTP/1.1 200 OK Content-Type: application/json Date: Mon, 29 Mar 2021 03:22:11 GMT @@ -144,9 +153,9 @@ After you launch build.sh and start_edgex.sh, you will find these events emitted > TIP: You can also monitor the MQTT broker when troubleshooting connectivity by subscribing with a client right on your host. ``` - $ sudo apt-get update && sudo apt-get install mosquitto-clients + sudo apt-get update && sudo apt-get install mosquitto-clients - $ mosquitto_sub -t edgex_bridge/objects_detected + mosquitto_sub -t edgex_bridge/objects_detected ``` This will reveal events received by the EdgeX MQTT Broker as they scroll by. @@ -163,8 +172,9 @@ After you launch build.sh and start_edgex.sh, you will find these events emitted 1. When finished remember to stop the EdgeX stack. ``` - $ ./samples/edgex_bridge/stop_edgex.sh - + ./samples/edgex_bridge/stop_edgex.sh + ``` + ``` /vasEdge/samples/edgex_bridge$ ./stop_edgex.sh Stopping edgex-kuiper ... done Stopping edgex-app-service-configurable-rules ... done @@ -198,24 +208,29 @@ After you launch build.sh and start_edgex.sh, you will find these events emitted 1. To remove all data persisted in EdgeX docker volumes and quickly re-run this exercise from scratch: ``` - $ ./samples/edgex_bridge/clear_edgex.sh + ./samples/edgex_bridge/clear_edgex.sh - $ ./samples/edgex_bridge/docker/build.sh + ./samples/edgex_bridge/docker/build.sh - $ ./samples/edgex_bridge/start_edgex.sh + ./samples/edgex_bridge/start_edgex.sh ``` 1. If you are in creative mode and want to more quickly update to try new things, you may directly modify the docker-compose-override.yml to alter `command:` parameters, update `environment:` variables and so on. With new input, Docker compose will automatically launch the container with the new parameters. ``` - $ nano ./samples/edgex_bridge/edgex/docker-compose-override.yml + nano ./samples/edgex_bridge/edgex/docker-compose-override.yml + ``` + ``` ... image: edgex-video-analytics-serving:0.5.0 ... command: "--source=https://github.com/intel-iot-devkit/sample-videos/blob/master/car-detection.mp4?raw=true --topic=vehicles_detected" ... - - $ ./samples/edgex_bridge/start_edgex.sh + ``` + ``` + ./samples/edgex_bridge/start_edgex.sh + ``` + ``` edgex-core-data is up-to-date edgex-core-command is up-to-date edgex-app-service-configurable-rules is up-to-date @@ -223,11 +238,10 @@ After you launch build.sh and start_edgex.sh, you will find these events emitted edgex-sys-mgmt-agent is up-to-date Recreating edgex-video-analytics-serving ... Recreating edgex-video-analytics-serving ... done - ``` > TIP: Use an environment variable to dynamically adjust runtime parameters. - ``` + ``` ... command: $ENTRYPOINT_ARGS ... @@ -246,8 +260,9 @@ https://github.com/edgexfoundry/edgex-go/blob/master/README.md#running-edgex-wit To independently run edgex-video-analytics-serving in DEV mode, issue this command: ``` - $ ./samples/edgex_bridge/docker/run.sh --dev - + ./samples/edgex_bridge/docker/run.sh --dev + ``` + ``` vaserving@your-hostname:~$ _ ``` diff --git a/samples/record_frames/README.md b/samples/record_frames/README.md index 780ace2..d012554 100644 --- a/samples/record_frames/README.md +++ b/samples/record_frames/README.md @@ -9,32 +9,18 @@ Inference results generated by VA Serving pipelines can be sent to a "destinatio often to generate "events" (e.g. an object is in a pre-defined zone). In some scenarios the observing module may wish to save or display the frame that generated the event. -This document first explains pipeline updates and parameters relevant for this sample. Next, general steps to run a pipeline with frames stored to a specific location and for a client to retrieve a specific frame later. Finally, steps for running the out of the box sample. - -This sample shows how to configure a pipeline to save frames to disk while sending inference results to an MQTT client. -The MQTT client is designed to receive inference results and to retrieve and display the frame where the first vehicle was detected. - -Enabling a pipeline and client for saving and retrieving frames requires the following steps: -1. Extend an existing pipeline to save frames -2. Add a pipeline parameter that defines where to save frames (termed frame store) with a given file name pattern -3. Run the REST service volume mounting the pipeline's frame store to the host -4. Start the pipeline specifying path frame store directory -5. Start an mqtt client that looks for a appearance of a vehicle and displays the frame when it is first detected - -The following python modules will need to be installed on the host system prior to running the sample: -1. paho-mqtt -2. opencv-python +There are three sections in this document: +1. Modifying a pipeline definition file to add ability to record frames. +2. Setting pipeline parameters that configure frame storage and then showing how to retrieve the stored frames. +3. Running the sample that displays the frame where the first vehicle is detected. ## Modifying a Pipeline to Save Frames -When customizing an existing pipeline to record frames and perform inference we will make use of the standard GStreamer elements [tee](https://gstreamer.freedesktop.org/documentation/coreelements/tee.html?gi-language=c), [multifilesink](https://gstreamer.freedesktop.org/documentation/multifile/multifilesink.html) and -[jpegenc](https://gstreamer.freedesktop.org/documentation/jpeg/jpegenc.html?gi-language=c). - -Tees split a pipeline into n branches which can then operate on a frame in parallel. In this example we use a tee element to split a pipeline into two branches. The first branch generates inference results while the second branch saves frames as jpeg images for later retrieval. - -Multifilesinks save incoming buffers to a customizable location on disk. In this example we use a multifilesink element to save frames to disk with a specific naming pattern based on the frame index. +When customizing an existing pipeline to record frames and perform inference we will make use of the standard GStreamer elements [tee](https://gstreamer.freedesktop.org/documentation/coreelements/tee.html?gi-language=c), [jpegenc](https://gstreamer.freedesktop.org/documentation/jpeg/jpegenc.html?gi-language=c) and [multifilesink](https://gstreamer.freedesktop.org/documentation/multifile/multifilesink.html) -Jpegenc elements encode incoming buffers into jpeg images. In this example we use jpegenc to transform raw decoded images into jpeg images that can be saved and retrieved for display. +* `tee` splits a pipeline into _N_ branches which can then operate on a frame in parallel. In this example we use a tee element to split a pipeline into two branches. The first branch generates inference results while the second branch saves frames as jpeg images for later retrieval. +* `jpegenc` encodes incoming buffers into jpeg images. In this example we use jpegenc to transform raw decoded images into jpeg images that can be saved and retrieved for display. +* `multifilesink` saves incoming buffers to a customizable location on disk. In this example we use the element to save frames to disk with a specific naming pattern based on the frame index. ### Updating a Pipeline Template We will use the `object_detection/person_vehicle_bike` pipeline as an example to demonstrate how to modify an existing pipeline to save frames. @@ -73,12 +59,11 @@ The updated template is shown below: ### Pipeline Parameters -To enable clients to customize the number of frames stored as well as the location of stored frames we add the following pipeline parameters to expose selected properties of the multifilesink. +To enable clients to customize the number of frames stored as well as the location of stored frames we add the following pipeline parameters to expose selected properties of the [multifilesink](https://gstreamer.freedesktop.org/documentation/multifile/multifilesink.html) element. * **max-files** will set a limit on number of files created (erasing the oldest generated files). Default is 1000 which is just over 30s assuming 30fps content. A value of 0 indicates no limit. -* **file-location** is the path specifier used to generate files. The default path has **%08d** in its name, -this is used by the multifilesink element to set frame filenames using the [c language printf specifier format](https://www.cplusplus.com/reference/cstdio/printf/). +* **file-location** is the path specifier for generating filenames that uses the [c language printf specifier format](https://www.cplusplus.com/reference/cstdio/printf/). ``` "max-files": { @@ -116,7 +101,7 @@ Start the pipeline * Specify the frame store file specifier (via the file-location parameter). * Request would look like this in curl format assume pipeline is `object_detection/record_frames` ``` -$ curl localhost:8080/pipelines/object_detection/record_frames -X POST -H \ +curl localhost:8080/pipelines/object_detection/record_frames -X POST -H \ 'Content-Type: application/json' -d \ '{ "source": { @@ -180,18 +165,38 @@ The sample includes the following to show frame saving and retrieval in action * pipeline with frame recording support * gvapython extension that adds frame_id to the inference meta-data * script to start service and volume mount folders for gvapython extension and frame store -* mqtt client that subscribes to inference meta-data * script to start pipeline with frame store filename specifier then start mqtt client +* an mqtt client that subscribes to inference meta-data and displays the frame where the first vehicle was detected -For the frame store path we'll keep with /path/to/samples/record_frames/frame_store. Server run script is invoked as follows: +### Server +For the frame store path we'll keep with `/path/to/samples/record_frames/frame_store`. + +The server must do two things: +1. Mount the folder with the `add_frame_id` extension to `home/video-analytics-server/extensions` +2. Mount the frame store + +Then run script as follows: ``` -$ samples/record_frames/run_server.sh --frame-store samples/record_frames/frame_store +samples/record_frames/run_server.sh --frame-store samples/record_frames/frame_store +``` + +Check the pipeline is loaded: ``` -In a different terminal start the mqtt broker +vaclient/vaclient.sh list-pipelines ``` -$ docker run --network=host -d eclipse-mosquitto:1.6 ``` -The run_client script does the following + + - object_detection/record_frames +``` + +### MQTT Broker +In a different terminal start the mqtt broker: +``` +docker run --network=host -d eclipse-mosquitto:1.6 +``` + +### Client +The run_client script does the following: * Uses the `frame-store` argument to specify frame store directory. * Sets the filename specifier to `%08d` * Starts the file saving pipeline with following request @@ -205,10 +210,11 @@ The run_client script does the following Run the script as follows ``` -$ samples/record_frames/run_client.sh --frame-store samples/record_frames/frame_store +samples/record_frames/run_client.sh --frame-store samples/record_frames/frame_store +``` +``` -Starting pipeline... -Pipeline running: object_detection/record_frames, instance = 1 +Starting pipeline object_detection/record_frames, instance = 1 Frame store file location = /path/to/video-analytics-serving/samples/record_frames/frame_store/%08d.jpg Starting mqtt client Connected to broker diff --git a/samples/record_playback/pipelines/recording_playback/playback/pipeline.json b/samples/record_playback/pipelines/recording_playback/playback/pipeline.json index 28bf0d0..1d087e7 100644 --- a/samples/record_playback/pipelines/recording_playback/playback/pipeline.json +++ b/samples/record_playback/pipelines/recording_playback/playback/pipeline.json @@ -1,6 +1,6 @@ { "type": "GStreamer", - "template": ["splitfilesrc name=source ! decodebin", + "template": ["{auto_source} ! decodebin", " ! gvapython name=gvapython", " ! gvawatermark ! videoconvert ! ximagesink" ], diff --git a/samples/record_playback/preproc_callbacks/insert_metadata.py b/samples/record_playback/preproc_callbacks/insert_metadata.py index 8bfecfd..5f6be10 100644 --- a/samples/record_playback/preproc_callbacks/insert_metadata.py +++ b/samples/record_playback/preproc_callbacks/insert_metadata.py @@ -28,12 +28,12 @@ def __init__(self, metadata_file_path, offset_timestamp=0): def load_file(self, file_name): if path.exists(file_name): - json_file = open(file_name, "r") - lines = json_file.readlines() - lines = lines[:-1] - for line in lines: - data = json.loads(line) - self.json_objects.append(data) + with open(file_name, "r") as json_file: + lines = json_file.readlines() + lines = lines[:-1] + for line in lines: + data = json.loads(line) + self.json_objects.append(data) def process_frame(self, frame: VideoFrame, _: float = DETECT_THRESHOLD) -> bool: while self.json_objects: diff --git a/samples/record_playback/record_playback.py b/samples/record_playback/record_playback.py index 169e478..f3cc31b 100644 --- a/samples/record_playback/record_playback.py +++ b/samples/record_playback/record_playback.py @@ -6,7 +6,6 @@ ''' import argparse -import json import os import re import sys @@ -63,8 +62,8 @@ def gst_record(options): # Check if have write permissions for metadata file location try: - file_handler = open(options_metadata_file, 'w') - file_handler.close() + with open(options_metadata_file, 'w') as _: + pass except IOError: print("No write permissions for metadata file location") return -1 @@ -80,8 +79,8 @@ def gst_record(options): # Check if directory has write permissions try: file_check_write_permissions = os.path.join(options.output_video_folder, "checkDirWritable.txt") - file_handler = open(file_check_write_permissions, 'w') - file_handler.close() + with open(file_check_write_permissions, 'w') as _: + pass os.remove(file_check_write_permissions) except IOError: print("No write permissions for video output directory") @@ -144,8 +143,11 @@ def gst_playback(options): metadata_args = {"metadata_file_path": options.metadata_file_path, "offset_timestamp": start_pts} request = { "source": { - "type": "path", - "path": location + "type": "gst", + "element": "splitfilesrc", + "properties": { + "location": location + } }, "parameters": { "module": module, diff --git a/third-party-programs.txt b/third-party-programs.txt index ac26c80..bdad54e 100644 --- a/third-party-programs.txt +++ b/third-party-programs.txt @@ -2,10 +2,31 @@ This file contains the list of third party software ("third party programs") con Third party programs and their corresponding required notices and/or license terms are listed below. + ------------------------------------------------------------- -1. Live Video Analytics - Copyright 2020 Microsoft Corporation - Modifications Copyright 2020 Intel Corporation + + +1. Intel OpenCL + Copyright (C) 2018-2021 Intel Corporation + + jsonschema + Copyright (c) 2013 Julian Berman + + pip3 + Copyright (c) 2008-2020 The pip developers (see AUTHORS.txt file) + + pyyaml + Copyright (c) 2017-2020 Ingy döt Net + Copyright (c) 2006-2016 Kirill Simonov + + rfc3339-validator + Copyright (c) 2019, Nicolas Aimetti + + rfc3986-validator + Copyright (c) 2019, Nicolas Aimetti + + setuptools + Copyright (C) 2016 Jason R Coombs MIT License @@ -15,4 +36,1213 @@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLI ------------------------------------------------------------- -Other names and brands may be claimed as the property of others. + +2. aclnet + https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/aclnet + Copyright (c) 2020 Intel Corporation + + person-vehicle-bike-detection-crossroad-0078 + vehicle-attributes-recognition-barrier-0039 + action-recognition-0001-decoder + action-recognition-0001-encoder + emotions-recognition-retail-0003 + face-detection-retail-0004 + https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel + Copyright (c) 2021 Intel Corporation + + Requests + Copyright 2019 Kenneth Reitz + + swagger-ui-bundle + Copyright 2020 SmartBear Software Inc. + + Tornado Web Server + Copyright: 2009-2011 Facebook + + Zalando Connexion + Copyright 2015 Zalando SE + Requests + Copyright 2019 Kenneth Reitz + +Apache 2.0 License + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------------------------------------------------- + +3. Python 3 + Copyright © 2001-2020 Python Software Foundation; All Rights + Reserved + + 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and + the Individual or Organization ("Licensee") accessing and otherwise using Python + 3.9.0 software in source or binary form and its associated documentation. + + 2. Subject to the terms and conditions of this License Agreement, PSF hereby + grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, + analyze, test, perform and/or display publicly, prepare derivative works, + distribute, and otherwise use Python 3.9.0 alone or in any derivative + version, provided, however, that PSF's License Agreement and PSF's notice of + copyright, i.e., "Copyright © 2001-2020 Python Software Foundation; All Rights + Reserved" are retained in Python 3.9.0 alone or in any derivative version + prepared by Licensee. + + 3. In the event Licensee prepares a derivative work that is based on or + incorporates Python 3.9.0 or any part thereof, and wants to make the + derivative work available to others as provided herein, then Licensee hereby + agrees to include in any such work a brief summary of the changes made to Python + 3.9.0. + + 4. PSF is making Python 3.9.0 available to Licensee on an "AS IS" basis. + PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF + EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR + WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE + USE OF PYTHON 3.9.0 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. + + 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 3.9.0 + FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF + MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 3.9.0, OR ANY DERIVATIVE + THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + + 6. This License Agreement will automatically terminate upon a material breach of + its terms and conditions. + + 7. Nothing in this License Agreement shall be deemed to create any relationship + of agency, partnership, or joint venture between PSF and Licensee. This License + Agreement does not grant permission to use PSF trademarks or trade name in a + trademark sense to endorse or promote products or services of Licensee, or any + third party. + + 8. By copying, installing or otherwise using Python 3.9.0, Licensee agrees + to be bound by the terms and conditions of this License Agreement. + +------------------------------------------------------------- + +4. Numpy + Copyright (c) 2005-2020, NumPy Developers. + All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NumPy Developers nor the names of any + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------- + +5. jemalloc (BSD2) + +Unless otherwise specified, files in the jemalloc source distribution are +subject to the following license: + +Copyright (C) 2002-present Jason Evans . +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +Copyright (C) 2009-present Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------- + +6. Python Paho MQTT Client + +Eclipse Distribution License - v 1.0 +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------- + +7. python-dateutil + +Copyright 2017- Paul Ganssle +Copyright 2017- dateutil contributors (see AUTHORS file) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +The above license applies to all contributions after 2017-12-01, as well as +all contributions that have been re-licensed (see AUTHORS file for the list of +contributors who have re-licensed their code). +------------------------- +dateutil - Extensions to the standard Python datetime module. + +Copyright (c) 2003-2011 - Gustavo Niemeyer +Copyright (c) 2012-2014 - Tomi Pieviläinen +Copyright (c) 2014-2016 - Yaron de Leeuw +Copyright (c) 2015- - Paul Ganssle +Copyright (c) 2015- - dateutil contributors (see AUTHORS file) + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The above BSD License Applies to all code, even that also covered by Apache 2.0. + +------------------------------------------------------------- +8. Media-Driver + Copyright (c) 2007-2017 Intel Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Copyright (c) 2010, The WebM Project authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +Neither the name of Google, nor the WebM Project, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. Copyright (c) 2008 Red Hat Inc. Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA Copyright (c) 2007-2008 Intel Corporation Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA. Copyright © 2014 NVIDIA Corporation Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright © 2007 Red Hat Inc. Copyright © 2007-2012 Intel Corporation Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. Sun Apr 18 09:35:45 1999 by faith@precisioninsight.com Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas. Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +Copyright 2008, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +------------------------------------------------------------- +9. Boost C++ libraries + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +------------------------------------------------------------- +10. Open SSL + + LICENSE ISSUES + ============== + + The OpenSSL toolkit stays under a double license, i.e. both the conditions of + the OpenSSL License and the original SSLeay license apply to the toolkit. + See below for the actual license texts. + + OpenSSL License + --------------- + +/* ==================================================================== + * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * 3. All advertising materials mentioning features or use of this + * software must display the following acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" + * + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to + * endorse or promote products derived from this software without + * prior written permission. For written permission, please contact + * openssl-core@openssl.org. + * + * 5. Products derived from this software may not be called "OpenSSL" + * nor may "OpenSSL" appear in their names without prior written + * permission of the OpenSSL Project. + * + * 6. Redistributions of any form whatsoever must retain the following + * acknowledgment: + * "This product includes software developed by the OpenSSL Project + * for use in the OpenSSL Toolkit (http://www.openssl.org/)" + * + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + * OF THE POSSIBILITY OF SUCH DAMAGE. + * ==================================================================== + * + * This product includes cryptographic software written by Eric Young + * (eay@cryptsoft.com). This product includes software written by Tim + * Hudson (tjh@cryptsoft.com). + * + */ + + Original SSLeay License + ----------------------- + +/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) + * All rights reserved. + * + * This package is an SSL implementation written + * by Eric Young (eay@cryptsoft.com). + * The implementation was written so as to conform with Netscapes SSL. + * + * This library is free for commercial and non-commercial use as long as + * the following conditions are aheared to. The following conditions + * apply to all code found in this distribution, be it the RC4, RSA, + * lhash, DES, etc., code; not just the SSL code. The SSL documentation + * included with this distribution is covered by the same copyright terms + * except that the holder is Tim Hudson (tjh@cryptsoft.com). + * + * Copyright remains Eric Young's, and as such any Copyright notices in + * the code are not to be removed. + * If this package is used in a product, Eric Young should be given attribution + * as the author of the parts of the library used. + * This can be in the form of a textual message at program startup or + * in documentation (online or textual) provided with the package. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * "This product includes cryptographic software written by + * Eric Young (eay@cryptsoft.com)" + * The word 'cryptographic' can be left out if the rouines from the library + * being used are not cryptographic related :-). + * 4. If you include any Windows specific code (or a derivative thereof) from + * the apps directory (application code) you must include an acknowledgement: + * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" + * + * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * The licence and distribution terms for any publically available version or + * derivative of this code cannot be changed. i.e. this code cannot simply be + * copied and put under another distribution licence + * [including the GNU Public Licence.] + */ + +------------------------------------------------------------- +11. libusb + Copyright © 2001 Johannes Erdfelt + Copyright © 2007-2009 Daniel Drake + Copyright © 2010-2012 Peter Stuge + Copyright © 2008-2016 Nathan Hjelm + Copyright © 2009-2013 Pete Batard + Copyright © 2009-2013 Ludovic Rousseau + Copyright © 2010-2012 Michael Plante + Copyright © 2011-2013 Hans de Goede + Copyright © 2012-2013 Martin Pieuchot + Copyright © 2012-2013 Toby Gray + Copyright © 2013-2018 Chris Dickens + + Systemd - libudev + +GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + +------------------------------------------------------------- +12. JSON-C + Copyright (c) 2009-2012 Eric Haszlakiewicz + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +Copyright (c) 2004, 2005 Metaparadigm Pte Ltd + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +------------------------------------------------------------- + +* Docker Base Images + + OpenVINO Runtime Base Image + https://hub.docker.com/r/openvino/ubuntu18_runtime + https://hub.docker.com/r/openvino/ubuntu20_runtime + https://github.com/openvinotoolkit/docker_ci + Copyright (C) 2019-2021 Intel Corporation + All rights reserved. + + OpenVINO Data Runtime Base Image + https://hub.docker.com/r/openvino/ubuntu18_data_runtime + https://hub.docker.com/r/openvino/ubuntu20_data_runtime + https://github.com/openvinotoolkit/docker_ci + Copyright (C) 2019-2021 Intel Corporation + All rights reserved. + + OpenVisualCloud Docker Base Image + https://hub.docker.com/r/openvisualcloud/xeone3-ubuntu1804-analytics-gst + https://hub.docker.com/r/openvisualcloud/xeone3-ubuntu1804-analytics-ffmpeg + Copyright (c) 2018,Intel Corporation + All rights reserved. + +This docker installs third party components licensed under various open source licenses. The terms under which those components may be used and distributed can be found with the license document that is provided with those components. Please familiarize yourself with those terms to ensure your distribution of those components complies with the terms of those licenses. + +------------------------------------------------------------- +Other names and brands may be claimed as the property of others. + + + diff --git a/tools/model_downloader/README.md b/tools/model_downloader/README.md index 9e9759a..c7bd4b4 100644 --- a/tools/model_downloader/README.md +++ b/tools/model_downloader/README.md @@ -77,7 +77,7 @@ Example: ## Downloading Models with the standalone tool When run as a standalone tool, the model downloader will run within an -`openvino/ubuntu20_data_dev:2021.4.1` docker image and download models listed in +`openvino/ubuntu20_data_dev:2021.4.2` docker image and download models listed in a yaml file that can be specified via the `--model-list` argument. Example: diff --git a/tools/model_downloader/downloader.py b/tools/model_downloader/downloader.py index d550c70..970ceb8 100644 --- a/tools/model_downloader/downloader.py +++ b/tools/model_downloader/downloader.py @@ -14,7 +14,7 @@ import requests import yaml from jsonschema import Draft7Validator, FormatChecker -from schema import model_list_schema +from mdt_schema import model_list_schema MODEL_OPTIMIZER_ROOT = ( "/opt/intel/dldt" @@ -45,9 +45,9 @@ def _validate_schema(model_list): try: validator = Draft7Validator(model_list_schema, format_checker=FormatChecker()) validator.validate(model_list) - except Exception as err: + except Exception as error: print("Yaml input schema validation error.") - print(err) + print(error) sys.exit(1) @@ -104,29 +104,30 @@ def _download_model_proc(target_dir, model_name, dl_streamer_version): for filepath in files: if os.path.splitext(filepath)[0] == model_name: model_proc = os.path.join(root, filepath) + if model_proc: + shutil.move(model_proc, os.path.join(target_dir, "{}.json".format(model_name))) else: url = "{0}/{1}/samples/model_proc/{2}.json".format( DL_STREAMER_REPO_ROOT, dl_streamer_version, model_name ) response = requests.get(url) - temp_dir = tempfile.TemporaryDirectory() - if response.status_code == 200: - with open( - "{0}/{1}.json".format(temp_dir.name, model_name), "wb" - ) as out_file: - out_file.write(response.content) - print( - "Downloaded {0} model-proc file from gst-video-analytics repo".format( - model_name + with tempfile.TemporaryDirectory() as temp_dir: + if response.status_code == 200: + with open( + "{0}/{1}.json".format(temp_dir, model_name), "wb" + ) as out_file: + out_file.write(response.content) + print( + "Downloaded {0} model-proc file from gst-video-analytics repo".format( + model_name + ) ) - ) - model_proc = os.path.abspath( - "{0}/{1}.json".format(temp_dir.name, model_name) - ) - else: - print("WARNING: model-proc not found in gst-video-analytics repo!") - if model_proc: - shutil.move(model_proc, os.path.join(target_dir, "{}.json".format(model_name))) + model_proc = os.path.abspath( + "{0}/{1}.json".format(temp_dir, model_name) + ) + shutil.move(model_proc, os.path.join(target_dir, "{}.json".format(model_name))) + else: + print("WARNING: model-proc not found in gst-video-analytics repo!") def _create_convert_command(model_name, output_dir, precisions): diff --git a/tools/model_downloader/schema.py b/tools/model_downloader/mdt_schema.py similarity index 90% rename from tools/model_downloader/schema.py rename to tools/model_downloader/mdt_schema.py index 55513d1..65b454e 100644 --- a/tools/model_downloader/schema.py +++ b/tools/model_downloader/mdt_schema.py @@ -20,10 +20,10 @@ "FP16-INT8", "FP32-INT8", "FP32-INT1", "FP16-INT1", "INT1"]} }, - "model-proc":{"type": "string"} + "model-proc" : {"type": "string"} }, "required" : ["model"], - "additionalProperties": False + "additionalProperties" : False }, { "type" : "string" diff --git a/tools/model_downloader/model_downloader.sh b/tools/model_downloader/model_downloader.sh index 51d3c7d..baaa184 100755 --- a/tools/model_downloader/model_downloader.sh +++ b/tools/model_downloader/model_downloader.sh @@ -12,7 +12,8 @@ OUTPUT_DIR=$(realpath $( pwd )) FORCE= RUN_PREFIX= OPEN_MODEL_ZOO_TOOLS_IMAGE=${OPEN_MODEL_ZOO_TOOLS_IMAGE:-"${CACHE_PREFIX}openvino/ubuntu20_data_dev"} -OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-"2021.4.1"} +OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-"2021.4.2"} +NAME="video-analytics-serving-model-downloader" DL_STREAMER_VERSION= MODE= @@ -129,4 +130,4 @@ if [ ! -d "$OUTPUT_DIR/models" ]; then echo "Created output models folder as UID: $UID" fi -$SOURCE_DIR/docker/run.sh --user "$UID" -e HOME=/tmp $NON_INTERACTIVE --image $OPEN_MODEL_ZOO_TOOLS_IMAGE:$OPEN_MODEL_ZOO_VERSION $VOLUME_MOUNT $DRY_RUN --entrypoint /bin/bash --entrypoint-args "\"-i\" \"-c\" \"pip3 install -r /home/video-analytics-serving/tools/model_downloader/requirements.txt ; python3 -u /home/video-analytics-serving/tools/model_downloader --model-proc-version $DL_STREAMER_VERSION --model-list /models_yml/$YML_FILE_NAME --output /output $FORCE\"" +$SOURCE_DIR/docker/run.sh --user "$UID" -e HOME=/tmp $NON_INTERACTIVE --name $NAME --image $OPEN_MODEL_ZOO_TOOLS_IMAGE:$OPEN_MODEL_ZOO_VERSION $VOLUME_MOUNT $DRY_RUN --entrypoint /bin/bash --entrypoint-args "\"-i\" \"-c\" \"pip3 install -r /home/video-analytics-serving/tools/model_downloader/requirements.txt ; python3 -u /home/video-analytics-serving/tools/model_downloader --model-proc-version $DL_STREAMER_VERSION --model-list /models_yml/$YML_FILE_NAME --output /output $FORCE\"" diff --git a/vaclient/README.md b/vaclient/README.md index 5290d82..05b59b8 100644 --- a/vaclient/README.md +++ b/vaclient/README.md @@ -2,16 +2,18 @@ vaclient is a python app intended to be a reference for using VA Serving REST API. vaclient is included in the main container and can be easily launched using the accompanying run script, `vaclient.sh`. >**Note:** -This document assumes you are familiar with vaserving. See the main [README](../README.md) for details on building and running the service. +This document assumes you are familiar with VA Serving. See the main [README](../README.md) for details on building and running the service. ## Basic Usage ### Listing Supported Pipelines and Models To see which models and pipelines are loaded by the service run the following commands. Both models and pipelines are displayed in the tuplet form of name/version. > **Note:** Results will vary depending on your service configuration -Fist models: +First models: +``` + ./vaclient/vaclient.sh list-models +``` ``` -~/video-analytics-serving$ ./vaclient/vaclient.sh list-models - emotion_recognition/1 - object_detection/person_vehicle_bike @@ -22,7 +24,9 @@ Fist models: Now pipelines: ``` -~/video-analytics-serving$ ./vaclient/vaclient.sh list-pipelines +./vaclient/vaclient.sh list-pipelines +``` +``` - audio_detection/environment - object_classification/vehicle_attributes @@ -35,11 +39,15 @@ Now pipelines: ### Running Pipelines vaclient can be used to send pipeline start requests using the `run` command. With the `run` command you will need to enter two additional arguments the `pipeline` (in the form of pipeline_name/pipeline_version) you wish to use and the `uri` pointing to the media of your choice. ``` -~/video-analytics-serving$ ./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true ``` -As the pipeline runs, the output file is processed by vaclient and inference information is printed to the screen in the following format: `label (confidence) [top left width height] {meta-data}` At the end of the pipeline run, the average fps is printed as well. If you wish to stop the pipeline mid-run, `Ctrl+C` will signal the client to send a `stop` command to the service. Once the pipeline is stopped, vaclient will output the average fps. More on `stop` below +If the pipeline request is successful, an instance id is created and vaclient will print the instance. More on `instance_id` below. +Once pre-roll is completed and pipeline begins running, the output file is processed by vaclient and inference information is printed to the screen in the following format: `label (confidence) [top left width height] {meta-data}` At the end of the pipeline run, the average fps is printed as well. If you wish to stop the pipeline mid-run, `Ctrl+C` will signal the client to send a `stop` command to the service. Once the pipeline is stopped, vaclient will output the average fps. More on `stop` below ``` +Pipeline instance = 1 +Pipeline running + Timestamp 48583333333 - vehicle (0.95) [0.00, 0.12, 0.15, 0.36] {} Timestamp 48666666666 @@ -58,21 +66,42 @@ Timestamp 49250000000 - vehicle (0.64) [0.00, 0.14, 0.05, 0.34] {} avg_fps: 39.66 ``` +However, if there are errors during pipeline execution i.e GPU is specified as detection device but is not present, vaclient will terminate with an error message +``` +Pipeline instance = 2 +Error in pipeline, please check vaserving log messages +``` + ### Starting Pipelines The `run` command is helpful for quickly showing inference results but `run` blocks until completion. If you want to do your own processing and only want to kickoff a pipeline, this can be done with the `start` command. `start` arguments are the same as `run`, you'll need to provide the `pipeline` and `uri`. Run the following command: ``` -~/video-analytics-serving$ ./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +``` +Similar to `run`, if the pipeline request is successful, an instance id is created and vaclient will print the instance. More on `instance_id` below. +``` +Pipeline instance = 1 ``` +Errors during pipeline execution are not flagged as vaclient exits after receiving instance id for a successful request. However, both `start` and `run` will flag invalid requests, for example: +``` +./vaclient/vaclient.sh start object_detection/person_vehicle_bke https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +``` +The pipeline name has a typo `object_detection/person_vehicle_bke` making it invalid, this results in the error message: +``` +Got unsuccessful status code: 400 +"Invalid Pipeline or Version" + +Pipeline failed to start +``` + #### Instance ID -On a successful start of a pipeline, vaserving assigns a pipeline `instance_id` which is a unique number which can be used to reference the pipeline in subsequent requests. In this example, the `instance_id` is `1` +On a successful start of a pipeline, VA Serving assigns a pipeline `instance_id` which is a unique number which can be used to reference the pipeline in subsequent requests. In this example, the `instance_id` is `1` ``` -Starting pipeline... -Pipeline running: object_detection/person_vehicle_bike, instance = 1 +Starting pipeline object_detection/person_vehicle_bike, instance = 1 ``` ### Stopping Pipelines Stopping a pipeline can be accomplished using the `stop` command along with the `pipeline` and `instance id`: ``` -~/video-analytics-serving$ ./vaclient/vaclient.sh stop object_detection/person_vehicle_bike 1 +./vaclient/vaclient.sh stop object_detection/person_vehicle_bike 1 ``` Expected output: Average fps also printed for stopped pipeline. ``` @@ -83,63 +112,189 @@ avg_fps: 42.07 ### Getting Pipeline Status Querying the current state of the pipeline is done using the `status` command along with the `pipeline` and `instance id`: ``` -~/video-analytics-serving$ ./vaclient/vaclient.sh status object_detection/person_vehicle_bike 1 +./vaclient/vaclient.sh status object_detection/person_vehicle_bike 1 +``` +vaclient will print the status of `QUEUED`, `RUNNING`, `ABORTED`, `COMPLETED` or `ERROR` like so +``` + +RUNNING ``` -vaclient will print the status of either `QUEUED`, `RUNNING`, `ABORTED`, or `COMPLETED` ### Waiting for a pipeline to finish If you wish to wait for a pipeline to finish running you can use the `wait` command along with the `pipeline` and `instance id`: ``` -~/video-analytics-serving$ ./vaclient/vaclient.sh wait object_detection/person_vehicle_bike 1 +./vaclient/vaclient.sh wait object_detection/person_vehicle_bike 1 ``` -The client will print the inital status of the pipeline. Then wait for completion and print the average fps. +The client will print the initial status of the pipeline. Then wait for completion and print the average fps. +## Command Line Arguments +See [customizing pipeline requests](../docs/customizing_pipeline_requests.md) to further understand how pipeline request options can be customized. -## Command Options -As described in [customizing pipeline requests](../docs/customizing_pipeline_requests.md), pipeline request options can be customized. This section describes ways to customize vaclient `run` and `start` commands. +### --quiet +This optional argument is meant to handle logging verbosity common across all commands to vaclient. +> **Note**: If specified, --quiet needs to be placed ahead of the specific command i.e start, run etc. -### --destination +#### Start +vaclient output will just be the pipeline instance. +``` +./vaclient/vaclient.sh --quiet start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +``` +``` + +2 +``` +#### Run +vaclient output will be the pipeline instance followed by inference results. +``` +./vaclient/vaclient.sh --quiet run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +``` +``` + +1 +Timestamp 1500000000 +- person (0.54) [0.67, 0.88, 0.74, 1.00] +Timestamp 1666666666 +- person (0.55) [0.68, 0.83, 0.74, 1.00] +``` + +### Run/Start Arguments +This section summarizes all the arguments for vaclient `run` and `start` commands. + +#### pipeline (required) +Positional argument (first) that specifies the pipeline to be launched in the form of `pipeline name/pipeline version`. + +#### uri (optional) +Positional argument (second) that specifies the location of the content to play/analyze. +> Note: uri argument can be skipped only if passed in via --request-file + +#### --destination By default, vaclient uses a generic template for destination: ```json { "destination": { - "metadata": { - "type": "file", - "path": "/tmp/results.jsonl", - "format": "json-lines" - } + "metadata": { + "type": "file", + "path": "/tmp/results.jsonl", + "format": "json-lines" + } }} ``` -Destination configuration can be updated with `--destination`. For example, passing in `--destination path /new/filepath/results.jsonl` will update filepath for results saving (Note you may need to volume mount this new location when running vaserving.) - -### --parameter -By default, vaclient relies on pipeline parameter defaults. This can be updated with `--parameter` option. For exmaple add `--parameter detection-device GPU` +Destination configuration can be updated with `--destination`. This argument affects only metadata part of the destination. +In the following example, passing in `--destination path /tmp/newfile.jsonl` will update the filepath for saving inference result. +> **Note**: You may need to volume mount this new location when running VA Serving. +``` +./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --destination path /tmp/newfile.jsonl +``` -### --rtsp-path -If you are utiziling RTSP restreaming, `--rtsp-path` can be used to update the `server_url` path. +#### --rtsp-path +If you are utilizing RTSP restreaming, `--rtsp-path` can be used to update the `server_url` path. This updates the frame part of destination under the hood. For example, adding `--rtsp-path new_path` will able you to view the stream at `rtsp://:/new_path`. More details on RTSP restreaming in [running_video_analytics_serving](../docs/running_video_analytics_serving.md) documentation. -### --show-request -All vaclient commands can be used with the `--show-request` option which will print out the HTTP request but will not send. Here are some examples: +#### --parameter +By default, vaclient relies on pipeline parameter defaults. This can be updated with `--parameter` option. See [Defining Pipelines](../docs/defining_pipelines.md) to know how parameters are defined. The following example adds `--parameter detection-device GPU` +``` +./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --parameter detection-device GPU +``` + +#### --parameter-file +Specifies a JSON file that contains parameters in key, value pairs. Parameters from this file take precedence over those set by `--parameter`. +> **Note**: As vaclient volume mounts /tmp, the parameter file may be placed there. + +A sample parameter file can look like +```json +{ + "parameters": { + "detection-device": "GPU" + } +} +``` +The above file, say /tmp/sample_parameters.json may be used as follows: +``` +./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --parameter-file /tmp/sample_parameters.json +``` + +#### --request-file +Specifies a JSON file that contains the complete request i.e source, destination, tags and parameters. +See [Customizing Pipeline Requests](../docs/customizing_pipeline_requests.md) for examples of requests in JSON format. +> **Note**: As vaclient volume mounts /tmp, the request file may be placed there. + +A sample request file can look like +```json +{ + "source": { + "uri": "https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true", + "type": "uri" + }, + "destination": { + "metadata": { + "type": "file", + "path": "/tmp/results.jsonl", + "format": "json-lines" + } + }, + "parameters": { + "detection-device": "GPU" + } +} +``` +The above file, named for instance as /tmp/sample_request.json may be used as follows: +``` +./vaclient/vaclient.sh start object_detection/person_vehicle_bike --request-file /tmp/sample_request.json +``` + +#### --tag +Specifies a key, value pair to update request with. This information is added to each frame's metadata. +This example adds tags for direction and location of video capture +``` +./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --tag direction east --tag camera_location parking_lot +``` -#### Run: +#### --show-request +All vaclient commands can be used with the `--show-request` option which will print out the HTTP request and exit i.e it will not be sent to VA Serving. +This example shows the result of `--show-request` when the pipeline is started with options passed in +``` +./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --destination path /tmp/newfile.jsonl --parameter detection-device GPU --tag direction east --tag camera_location parking_lot --show-request +``` ``` -~/video-analytics-serving$ ./vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --show-request POST http://localhost:8080/pipelines/object_detection/person_vehicle_bike -Body:{'source': {'uri': 'https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true', 'type': 'uri'}, 'destination': {'metadata': {'type': 'file', 'path': '/tmp/results.jsonl', 'format': 'json-lines'}}} +Body:{'source': {'uri': 'https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true', 'type': 'uri'}, 'destination': {'metadata': {'type': 'file', 'path': '/tmp/newfile.jsonl', 'format': 'json-lines'}}, 'parameters': {'detection-device': 'GPU'}, 'tags': {'direction': 'east', 'camera_location': 'parking_lot'}} ``` +See [View REST request](../README.md#view-rest-request) to see how the output from `--show-request` can be mapped to a curl command. + +### Status/Wait/Stop Arguments +This section summarizes all the arguments for vaclient `status`, `wait` and `stop` commands. + +#### pipeline (required) +Positional argument (first) that specifies the pipeline to wait on/query status of/stop as indicated in the form of `pipeline name/pipeline version` + +#### instance (required) +Positional argument (second) that specifies pipeline instance id to wait on/query status of/stop based on the command. -#### Stop: +#### --show-request +As mentioned before, `--show-request` option which will print out the HTTP request and exit. + +##### Status +``` +./vaclient/vaclient.sh status object_detection/person_vehicle_bike 1 --show-request +``` ``` -~/video-analytics-serving$ ./vaclient/vaclient.sh stop object_detection/person_vehicle_bike 1 --show-request -DELETE http://localhost:8080/pipelines/object_detection/person_vehicle_bike/1 +GET http://localhost:8080/pipelines/object_detection/person_vehicle_bike/1/status +``` +##### Wait +``` +./vaclient/vaclient.sh wait object_detection/person_vehicle_bike 1 --show-request ``` - -#### Status: ``` -~/video-analytics-serving$ ./vaclient/vaclient.sh status object_detection/person_vehicle_bike 1 --show-request GET http://localhost:8080/pipelines/object_detection/person_vehicle_bike/1/status ``` +##### Stop +``` +./vaclient/vaclient.sh stop object_detection/person_vehicle_bike 1 --show-request +``` +``` + +DELETE http://localhost:8080/pipelines/object_detection/person_vehicle_bike/1 +``` \ No newline at end of file diff --git a/vaclient/arguments.py b/vaclient/arguments.py index 0474d9e..7476558 100644 --- a/vaclient/arguments.py +++ b/vaclient/arguments.py @@ -28,30 +28,42 @@ * SOFTWARE ''' import sys +import json import argparse import vaclient + +def get_typed_value(value): + try: + return json.loads(value) + except ValueError: + return value + + def parse_args(program_name="Video Analytics Serving Client"): """Process command line options""" + #pylint: disable=too-many-statements parser = argparse.ArgumentParser( prog=program_name, formatter_class=argparse.ArgumentDefaultsHelpFormatter) - subparsers = parser.add_subparsers() + subparsers = parser.add_subparsers(dest='subparsers') parser_run = subparsers.add_parser('run', help='Start specified pipeline with specified source. \ Meta-data will be displayed as pipeline runs. Once pipeline ends the average fps is displayed') parser_run.set_defaults(command=vaclient.run) parser_run.add_argument('pipeline', type=str, help='Vaserving pipeline which to run instance of. \ In the form of pipeline_name/pipeline_version') - parser_run.add_argument('uri', type=str, help='Location of the content to play/analyze') + parser_run.add_argument('uri', type=str, nargs="?", default=None, help='Location of the content to play/analyze') parser_run.add_argument('--destination', action='append', nargs=2, metavar=('key', 'value'), type=str, \ help='Update destination information with key and value pair') parser_run.add_argument('--rtsp-path', type=str, help='RTSP endpoint path') - parser_run.add_argument('--parameter', action='append', nargs=2, metavar=('key', 'value'), type=str, \ + parser_run.add_argument('--parameter', action='append', nargs=2, metavar=('key', 'value'), type=get_typed_value, \ dest='parameters', help='Update request parameter with key and value pair') parser_run.add_argument('--parameter-file', type=str, dest='parameter_file', help='Update request parameter \ with key and value pairs from file. Parameters from this file take precedence over those set by --parameter') + parser_run.add_argument('--request-file', type=str, dest='request_file', \ + help='Update any/all sections of request with values from file') parser_run.add_argument('--tag', action='append', nargs=2, metavar=('key', 'value'), type=str, \ dest='tags', help='Update request tags with key and value pair') parser_run.add_argument("--show-request", action='store_true', help='Print HTTP requests and exit') @@ -60,14 +72,16 @@ def parse_args(program_name="Video Analytics Serving Client"): parser_start.set_defaults(command=vaclient.start) parser_start.add_argument('pipeline', type=str, help='Vaserving pipeline which to run instance of. \ In the form of pipeline_name/pipeline_version') - parser_start.add_argument('uri', type=str, help='Location of the content to play/analyze') + parser_start.add_argument('uri', type=str, nargs="?", default=None, help='Location of the content to play/analyze') parser_start.add_argument('--destination', action='append', nargs=2, metavar=('key', 'value'), type=str, \ help='Update destination information with key and value pair') parser_start.add_argument('--rtsp-path', type=str, help='RTSP endpoint path') - parser_start.add_argument('--parameter', action='append', nargs=2, metavar=('key', 'value'), type=str, \ - dest='parameters', help='Update requeset parameter with key and value pair') + parser_start.add_argument('--parameter', action='append', nargs=2, metavar=('key', 'value'), type=get_typed_value, \ + dest='parameters', help='Update request parameter with key and value pair') parser_start.add_argument('--parameter-file', type=str, dest='parameter_file', help='Update request parameter \ with key and value pairs from file. Parameters from this file take precedence over those set by --parameter') + parser_start.add_argument('--request-file', type=str, dest='request_file', \ + help='Update any/all sections of request with values from file') parser_start.add_argument('--tag', action='append', nargs=2, metavar=('key', 'value'), type=str, \ dest='tags', help='Update request tags with key and value pair') parser_start.add_argument("--show-request", action='store_true', help='Print HTTP requests and exit') @@ -107,4 +121,8 @@ def parse_args(program_name="Video Analytics Serving Client"): parser.print_help(sys.stderr) sys.exit(1) - return parser.parse_args() + args = parser.parse_args() + if args.subparsers in ['start', 'run'] and not args.uri and not args.request_file: + parser.error("at least one of uri or --request-file is required") + + return args diff --git a/vaclient/results_watcher.py b/vaclient/results_watcher.py index c266c5d..3ca490e 100755 --- a/vaclient/results_watcher.py +++ b/vaclient/results_watcher.py @@ -31,18 +31,18 @@ def stop(self): def watch_method(self): try: - file = open(self.filename, 'r') - while not self.trigger_stop: - where = file.tell() - line = file.readline() - if not line: - time.sleep(self.sleep_time) - file.seek(where) - else: - try: - ResultsWatcher.print_results(json.loads(line)) - except ValueError: - pass + with open(self.filename, 'r') as file: + while not self.trigger_stop: + where = file.tell() + line = file.readline() + if not line: + time.sleep(self.sleep_time) + file.seek(where) + else: + try: + ResultsWatcher.print_results(json.loads(line)) + except ValueError: + pass except OSError: self.error_message = "Unable to read from destination metadata file {}".format(self.filename) @@ -51,37 +51,7 @@ def watch_method(self): def print_results(cls, results): object_output = [] for detected_object in results.get("objects", []): - meta = {} - current_object = [] - for key in detected_object: - if key == "detection": - confidence = detected_object[key]["confidence"] - label = detected_object[key]["label"] - x_min = detected_object[key]["bounding_box"]["x_min"] - y_min = detected_object[key]["bounding_box"]["y_min"] - x_max = detected_object[key]["bounding_box"]["x_max"] - y_max = detected_object[key]["bounding_box"]["y_max"] - current_object.append(label) - current_object.append("({:.2f})".format(confidence)) - current_object.append("[{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(x_min, - y_min, - x_max, - y_max)) - elif key == "id": - meta[key] = detected_object[key] - elif isinstance(detected_object[key], dict) and "label" in detected_object[key]: - meta[key] = detected_object[key]["label"] - elif key == "tensors": - for tensor in detected_object[key]: - if "name" in tensor and tensor["name"] == "action": - confidence = tensor["confidence"] - label = tensor["label"] - current_object.append(label) - current_object.append("({:.2f})".format(confidence)) - if meta: - current_object.append(str(meta)) - if current_object: - object_output.append("- {}".format(" ".join(current_object))) + ResultsWatcher.process_detections(detected_object, object_output) event_output = [] for event in results.get("events", []): current_event = [] @@ -95,3 +65,35 @@ def print_results(cls, results): print("{}".format("\n".join(object_output))) if event_output: print("{}".format("\n".join(event_output))) + + @staticmethod + def process_detections(detected_object, object_output): + meta = {} + current_object = [] + for key in detected_object: + if key == "detection": + confidence = detected_object[key]["confidence"] + label = detected_object[key]["label"] + bbox = detected_object[key]["bounding_box"] + current_object.append(label) + current_object.append("({:.2f})".format(confidence)) + current_object.append("[{:.2f}, {:.2f}, {:.2f}, {:.2f}]" + .format(bbox["x_min"], + bbox["y_min"], + bbox["x_max"], + bbox["y_max"])) + elif key == "id": + meta[key] = detected_object[key] + elif isinstance(detected_object[key], dict) and "label" in detected_object[key]: + meta[key] = detected_object[key]["label"] + elif key == "tensors": + for tensor in detected_object[key]: + if "name" in tensor and tensor["name"] == "action": + confidence = tensor["confidence"] + label = tensor["label"] + current_object.append(label) + current_object.append("({:.2f})".format(confidence)) + if meta: + current_object.append(str(meta)) + if current_object: + object_output.append("- {}".format(" ".join(current_object))) diff --git a/vaclient/vaclient.py b/vaclient/vaclient.py index f4d1b77..7da042b 100755 --- a/vaclient/vaclient.py +++ b/vaclient/vaclient.py @@ -54,10 +54,11 @@ def run(args): if started_instance_id is None: sys.exit(1) try: - if request['destination']['metadata']['type'] == 'file'and \ - os.path.exists(request['destination']['metadata']['path']): - watcher = ResultsWatcher(request['destination']['metadata']['path']) - watcher.watch() + if request['destination']['metadata']['type'] == 'file': + watcher = launch_results_watcher(request, + args.pipeline, + started_instance_id, + verbose=args.verbose) except KeyError: pass print_fps(wait_for_pipeline_completion(args.pipeline, started_instance_id)) @@ -120,13 +121,20 @@ def update_request_options(request, request["parameters"] = dict(args.parameters) if hasattr(args, 'parameter_file') and args.parameter_file: with open(args.parameter_file, 'r') as parameter_file: - request.update(json.load(parameter_file)) + parameter_data = json.load(parameter_file) + if request.get("parameters"): + request["parameters"].update(parameter_data.get("parameters")) + else: + request["parameters"] = parameter_data.get("parameters") if hasattr(args, 'tags') and args.tags: request["tags"] = dict(args.tags) if hasattr(args, 'rtsp_path') and args.rtsp_path: rtsp_template = RTSP_TEMPLATE rtsp_template['frame']['path'] = args.rtsp_path request['destination'].update(rtsp_template) + if hasattr(args, 'request_file') and args.request_file: + with open(args.request_file, 'r') as request_file: + request.update(json.load(request_file)) def start_pipeline(request, pipeline, @@ -141,17 +149,15 @@ def start_pipeline(request, pass except FileNotFoundError: pass - except OSError: - raise OSError("Unable to delete destination metadata file {}".format(output_file)) - if verbose and not show_request: - print("Starting pipeline...") + except OSError as error: + raise OSError("Unable to delete destination metadata file {}".format(output_file)) from error pipeline_url = urljoin(SERVER_ADDRESS, "pipelines/" + pipeline) instance_id = post(pipeline_url, request, show_request) if instance_id: if verbose: - print("Pipeline running: {}, instance = {}".format(pipeline, instance_id)) + print("Starting pipeline {}, instance = {}".format(pipeline, instance_id)) else: print(instance_id) return instance_id @@ -173,6 +179,24 @@ def stop_pipeline(pipeline, instance_id, show_request=False): else: print("Pipeline NOT stopped") +def wait_for_pipeline_running(pipeline, + instance_id, + timeout_sec = 30): + status = {"state" : "QUEUED"} + timeout_count = 0 + while status and not Pipeline.State[status["state"]] == Pipeline.State.RUNNING: + status = get_pipeline_status(pipeline, + instance_id) + if status and status["state"] == "ERROR": + raise ValueError("Error in pipeline, please check vaserving log messages") + time.sleep(SLEEP_FOR_STATUS) + timeout_count += 1 + if timeout_count * SLEEP_FOR_STATUS >= timeout_sec: + print("Timed out waiting for RUNNING status") + break + + return status + def wait_for_pipeline_completion(pipeline, instance_id): status = {"state" : "RUNNING"} @@ -193,6 +217,20 @@ def get_pipeline_status(pipeline, instance_id, show_request=False): "status"])) return get(status_url, show_request) +def launch_results_watcher(request, pipeline, pipeline_instance_id, verbose=True): + status = wait_for_pipeline_running(pipeline, pipeline_instance_id) + watcher = None + if Pipeline.State[status["state"]] == Pipeline.State.RUNNING: + if verbose: + print("Pipeline running") + if os.path.exists(request['destination']['metadata']['path']): + watcher = ResultsWatcher(request['destination']['metadata']['path']) + watcher.watch() + else: + print("Can not find results file {}. Are you missing a volume mount?"\ + .format(request['destination']['metadata']['path'])) + return watcher + def _list(list_name, show_request=False): url = urljoin(SERVER_ADDRESS, list_name) response = get(url, show_request) @@ -212,8 +250,8 @@ def post(url, body, show_request=False): return instance_id print("Got unsuccessful status code: {}".format(launch_response.status_code)) print(launch_response.text) - except requests.exceptions.ConnectionError: - raise ConnectionError(SERVER_CONNECTION_FAILURE_MESSAGE) + except requests.exceptions.ConnectionError as error: + raise ConnectionError(SERVER_CONNECTION_FAILURE_MESSAGE) from error return None def get(url, show_request=False): @@ -226,8 +264,8 @@ def get(url, show_request=False): return json.loads(status_response.text) print("Got unsuccessful status code: {}".format(status_response.status_code)) print(status_response.text) - except requests.exceptions.ConnectionError: - raise ConnectionError(SERVER_CONNECTION_FAILURE_MESSAGE) + except requests.exceptions.ConnectionError as error: + raise ConnectionError(SERVER_CONNECTION_FAILURE_MESSAGE) from error return None def delete(url, show_request=False): @@ -239,8 +277,8 @@ def delete(url, show_request=False): if stop_response.status_code != RESPONSE_SUCCESS: print("Unsuccessful status code {} - {}".format(stop_response.status_code, stop_response.text)) return stop_response.status_code - except requests.exceptions.ConnectionError: - raise ConnectionError(SERVER_CONNECTION_FAILURE_MESSAGE) + except requests.exceptions.ConnectionError as error: + raise ConnectionError(SERVER_CONNECTION_FAILURE_MESSAGE) from error return None def print_fps(status): diff --git a/vaserving/app_destination.py b/vaserving/app_destination.py index 0f1907c..bf36093 100644 --- a/vaserving/app_destination.py +++ b/vaserving/app_destination.py @@ -73,5 +73,5 @@ def create_app_destination(cls, request, pipeline, dest_type): raise Exception("Error Creating App Destination: {}," "Exception: {} {}".format(requested_destination_class, type(error), - error)) + error)) from error return None diff --git a/vaserving/app_source.py b/vaserving/app_source.py index a32a6a0..e8d0cb7 100644 --- a/vaserving/app_source.py +++ b/vaserving/app_source.py @@ -81,5 +81,5 @@ def create_app_source(cls, request, pipeline): raise Exception("Error Creating App Source: {}," "Exception: {} {}".format(requested_source_class, type(error), - error)) + error)) from error return None diff --git a/vaserving/arguments.py b/vaserving/arguments.py index 1c1275d..826b6c9 100644 --- a/vaserving/arguments.py +++ b/vaserving/arguments.py @@ -6,8 +6,7 @@ import os import argparse import json -import distutils.util as util - +from distutils import util def parse_options(args=None): @@ -27,7 +26,7 @@ def parse_options(args=None): type=str, default=os.getenv('NETWORK_PREFERENCE', '{}')) parser.add_argument("--max_running_pipelines", action="store", dest="max_running_pipelines", - type=int, default=int(os.getenv('MAX_RUNNING_PIPELINES', '1'))) + type=int, default=int(os.getenv('MAX_RUNNING_PIPELINES', '-1'))) parser.add_argument("--log_level", action="store", dest="log_level", choices=['INFO', 'DEBUG'], default=os.getenv('LOG_LEVEL', 'INFO')) diff --git a/vaserving/common/settings.py b/vaserving/common/settings.py deleted file mode 100644 index 4291230..0000000 --- a/vaserving/common/settings.py +++ /dev/null @@ -1,17 +0,0 @@ -''' -* Copyright (C) 2019-2020 Intel Corporation. -* -* SPDX-License-Identifier: BSD-3-Clause -''' - - -# define logging settings here - -LOG_LEVEL = "INFO" -LOG_ATTRS = ['levelname', 'asctime', 'message', 'module'] - - -def set_log_level(level): - # pylint: disable=W0603 - global LOG_LEVEL - LOG_LEVEL = level diff --git a/vaserving/common/utils/logging.py b/vaserving/common/utils/logging.py index 44cf7db..b34a096 100644 --- a/vaserving/common/utils/logging.py +++ b/vaserving/common/utils/logging.py @@ -8,30 +8,38 @@ import json import logging -from vaserving.common import settings _static_loggers = [] +LOG_LEVEL = "INFO" +LOG_ATTRS = ['levelname', 'asctime', 'message', 'module'] + + +def set_default_log_level(level): + # pylint: disable=global-statement + global LOG_LEVEL + LOG_LEVEL = level + + +def _set_log_level(logger, level): + try: + logger.setLevel(level) + except Exception: + print('Unable to set log level, defaulting to "DEBUG"') + logger.setLevel('DEBUG') def set_log_level(level): for logger in _static_loggers: - logger.setLevel(level) + _set_log_level(logger, level) def get_logger(name, is_static=False): - try: - level = settings.LOG_LEVEL - attrs = settings.LOG_ATTRS - except SyntaxError: - print('Unable to read logger settings, defaulting to "DEBUG"') - level = 'DEBUG' - attrs = ['levelname', 'asctime', 'message', 'name'] logger = logging.getLogger(name) if not logger.handlers: json_handler = logging.StreamHandler() - json_handler.setFormatter(JSONFormatter(attrs)) + json_handler.setFormatter(JSONFormatter(LOG_ATTRS)) json_handler.set_name('JSON_Handler') logger.addHandler(json_handler) - logger.setLevel(level) + _set_log_level(logger, LOG_LEVEL) logger.propagate = False if is_static: _static_loggers.append(logger) diff --git a/vaserving/ffmpeg_pipeline.py b/vaserving/ffmpeg_pipeline.py index 64a98df..c10b4cc 100644 --- a/vaserving/ffmpeg_pipeline.py +++ b/vaserving/ffmpeg_pipeline.py @@ -206,7 +206,7 @@ def _spawn(self, args): with self._create_delete_lock: if not self.state is Pipeline.State.ABORTED: - self._process = subprocess.Popen(args, + self._process = subprocess.Popen(args, #pylint: disable=consider-using-with stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, @@ -427,7 +427,7 @@ def _set_properties(self): self._set_section_properties([], []) def _get_outputs(self, args): - # pylint: disable=R1724,E1137,E1136 + # pylint: disable=unsupported-assignment-operation,unsubscriptable-object result = [] args_remaining = len(args) indices = [args_remaining - (x + 1) for x in range(len(args))] @@ -443,18 +443,17 @@ def _get_outputs(self, args): ] = args[index + 1] args_remaining -= 2 continue - else: - output_index = self._output_format_index[args[current_start + 1]] - output = FFmpegPipeline.Output((current_start, index - 1), "-f", - args[current_start + 1], - current_output_properties) - result.append(output) - self._output_format_map[( - args[current_start + 1], output_index)] = output - self._output_format_index[args[current_start + 1]] += 1 - current_output_list = None - current_output_properties = None - current_start = None + output_index = self._output_format_index[args[current_start + 1]] + output = FFmpegPipeline.Output((current_start, index - 1), "-f", + args[current_start + 1], + current_output_properties) + result.append(output) + self._output_format_map[( + args[current_start + 1], output_index)] = output + self._output_format_index[args[current_start + 1]] += 1 + current_output_list = None + current_output_properties = None + current_start = None else: current_output_list = current_output_properties["_ARGS_"] current_output_list.append(args[index]) diff --git a/vaserving/gstreamer_app_destination.py b/vaserving/gstreamer_app_destination.py index e302f23..9886c39 100644 --- a/vaserving/gstreamer_app_destination.py +++ b/vaserving/gstreamer_app_destination.py @@ -6,9 +6,9 @@ from collections import namedtuple from enum import Enum, auto +from gstgva.video_frame import VideoFrame from vaserving.app_destination import AppDestination from vaserving.gstreamer_pipeline import GStreamerPipeline -from gstgva.video_frame import VideoFrame GvaSample = namedtuple('GvaSample', ['sample', 'video_frame']) GvaSample.__new__.__defaults__ = (None, None) @@ -24,7 +24,7 @@ class Mode(Enum): def _missing_(cls, name): return cls[name.upper()] - def __init__(self, request, pipeline, *args, **kwargs): + def __init__(self, request, pipeline): AppDestination.__init__(self, request, pipeline) request_config = request.get("destination", {}) diff --git a/vaserving/gstreamer_pipeline.py b/vaserving/gstreamer_pipeline.py index 99580e5..0612c51 100755 --- a/vaserving/gstreamer_pipeline.py +++ b/vaserving/gstreamer_pipeline.py @@ -32,6 +32,8 @@ class GStreamerPipeline(Pipeline): "GstGvaInference", "GvaAudioDetect", "GstGvaActionRecognitionBin"] + SOURCE_ALIAS = "auto_source" + GST_ELEMENTS_WITH_SOURCE_SETUP = ("GstURISourceBin") _inference_element_cache = {} _mainloop = None @@ -57,6 +59,8 @@ def __init__(self, identifier, config, model_manager, request, finished_callback self.models = model_manager.models self.model_manager = model_manager self.request = request + self._auto_source = None + self._unset_properties = [] self.state = Pipeline.State.QUEUED self.frame_count = 0 self.start_time = None @@ -182,9 +186,6 @@ def stop(self): return self.status() def params(self): - # TODO: refactor - # pylint: disable=R0801 - request = copy.deepcopy(self.request) if "models" in request: del request["models"] @@ -233,7 +234,7 @@ def _get_element_property(self, element, key): if isinstance(element, str): return (element, key, None) if isinstance(element, dict): - return (element["name"], element["property"], element.get("format", None)) + return (element["name"], element.get("property", None), element.get("format", None)) return None def _set_bus_messages_flag(self): @@ -247,7 +248,7 @@ def _set_bus_messages_flag(self): def _set_section_properties(self, request_section, config_section): # TODO: refactor - # pylint: disable=R1702 + # pylint: disable=too-many-nested-blocks request, config = Pipeline.get_section_and_config( self.request, self.config, request_section, config_section) @@ -260,29 +261,38 @@ def _set_section_properties(self, request_section, config_section): else: element_properties = [self._get_element_property( config[key]["element"], key)] - for element_name, property_name, format_type in element_properties: element = self.pipeline.get_by_name(element_name) - if element: - if (property_name in [x.name for x in element.list_properties()]): - if (format_type == "json"): - element.set_property( - property_name, json.dumps(request[key])) - else: - element.set_property( - property_name, request[key]) - self._logger.debug("Setting element: {}, property: {}, value: {}".format( - element_name, - property_name, - element.get_property(property_name))) - else: - self._logger.debug("Parameter {} given for element {}" - " but no property found".format( - property_name, element_name)) + if not element: + self._logger.debug("Parameter {} given for element {} but no element found".format( + property_name, element_name)) + continue + + if format_type == "element-properties": + for property_name, property_value in request[key].items(): + self._set_element_property( + element, property_name, property_value, format_type) else: - self._logger.debug( - "Parameter {} given for element {}" - " but no element found".format(property_name, element_name)) + self._set_element_property( + element, property_name, request[key], format_type) + + def _set_element_property(self, element, property_name, property_value, format_type=None): + if (property_name in [x.name for x in element.list_properties()]): + if (format_type == "json"): + element.set_property( + property_name, json.dumps(property_value)) + else: + element.set_property( + property_name, property_value) + self._logger.debug("Setting element: {}, property: {}, value: {}".format( + element.__gtype__.name, + property_name, + element.get_property(property_name))) + else: + self._logger.debug("Parameter {} given for element {}" + " but no property found".format( + property_name, element.__gtype__.name)) + self._unset_properties.append([element.__gtype__.name, property_name, property_value]) def _cache_inference_elements(self): model_instance_id = "model-instance-id" @@ -338,6 +348,9 @@ def _set_model_proc(self): @staticmethod def validate_config(config): template = config["template"] + field_names = [fname for _, fname, _, _ in string.Formatter().parse(template)] + if GStreamerPipeline.SOURCE_ALIAS in field_names: + template = template.replace("{"+ GStreamerPipeline.SOURCE_ALIAS +"}", "fakesrc") pipeline = Gst.parse_launch(template) appsink_elements = GStreamerPipeline._get_elements_by_type(pipeline, ["GstAppSink"]) metaconvert = pipeline.get_by_name("metaconvert") @@ -426,6 +439,18 @@ def _set_properties(self): ["source", self.request["source"]["type"], "properties"]) self._set_section_properties([], []) + def _set_auto_source(self): + element = self.request["source"].get("element") + capsfilter = self.request["source"].get("capsfilter", None) + postproc = self.request["source"].get("postproc", None) + + source = "{} name=source".format(element) + if capsfilter: + source = "{} ! capsfilter caps={}".format(source, capsfilter) + if postproc: + source = "{} ! {}".format(source, postproc) + + self._auto_source = source def _get_any_source(self): src = self.pipeline.get_by_name("source") @@ -448,6 +473,10 @@ def _set_model_instance_id(self): def start(self): self.request["models"] = self.models + field_names = [fname for _, fname, _, _ in string.Formatter().parse(self.template)] + if self.SOURCE_ALIAS in field_names: + self._set_auto_source() + self.request[self.SOURCE_ALIAS] = self._auto_source self._gst_launch_string = string.Formatter().vformat( self.template, [], self.request) @@ -468,6 +497,8 @@ def start(self): self._set_model_instance_id() src = self._get_any_source() + if self._auto_source and src.__gtype__.name in self.GST_ELEMENTS_WITH_SOURCE_SETUP: + src.connect("source_setup", self.source_setup_callback, src) sink = self.pipeline.get_by_name("appsink") if (not sink): @@ -590,6 +621,11 @@ def source_probe_callback(unused_pad, info, self): self.latency_times[pts] = time.time() return Gst.PadProbeReturn.OK + def source_setup_callback(self, unused_bin, src_element, unused_udata): + for (element_name, property_name, property_value) in self._unset_properties: + if element_name in self.GST_ELEMENTS_WITH_SOURCE_SETUP: + self._set_element_property(src_element, property_name, property_value, None) + @staticmethod def appsink_probe_callback(unused_pad, info, self): buffer = info.get_buffer() diff --git a/vaserving/model_manager.py b/vaserving/model_manager.py index 74a3206..28898b3 100644 --- a/vaserving/model_manager.py +++ b/vaserving/model_manager.py @@ -128,13 +128,9 @@ def convert_version(self, version): def load_models(self, model_dir, network_preference): #TODO: refactor - #pylint: disable=R1702 + #pylint: disable=too-many-nested-blocks - heading = "Loading Models" - banner = "="*len(heading) - self.logger.info(banner) - self.logger.info(heading) - self.logger.info(banner) + self.log_banner("Loading Models") error_occurred = False self.logger.info("Loading Models from Path {path}".format( @@ -198,15 +194,15 @@ def load_models(self, model_dir, network_preference): self.logger.error("Error Loading Model {model_name}" " from: {model_dir}: {err}".format( err=error, model_name=model_name, model_dir=model_dir)) - self.models = models + self.log_banner("Completed Loading Models") + return not error_occurred - heading = "Completed Loading Models" + def log_banner(self, heading): banner = "="*len(heading) self.logger.info(banner) self.logger.info(heading) self.logger.info(banner) - return not error_occurred def get_model_parameters(self, name, version): if name not in self.models or version not in self.models[name]: diff --git a/vaserving/pipeline_manager.py b/vaserving/pipeline_manager.py index e44af33..8c85f3d 100644 --- a/vaserving/pipeline_manager.py +++ b/vaserving/pipeline_manager.py @@ -13,12 +13,11 @@ import jsonschema from vaserving.common.utils import logging from vaserving.pipeline import Pipeline -import vaserving.schema as schema - +from vaserving import schema class PipelineManager: - def __init__(self, model_manager, pipeline_dir, max_running_pipelines=-1, + def __init__(self, model_manager, pipeline_dir, max_running_pipelines, ignore_init_errors=False): self.max_running_pipelines = max_running_pipelines self.model_manager = model_manager @@ -62,24 +61,13 @@ def _import_pipeline_types(self): def _load_pipelines(self): # TODO: refactor - # pylint: disable=R0912,R1702 - - heading = "Loading Pipelines" - banner = "="*len(heading) - self.logger.info(banner) - self.logger.info(heading) - self.logger.info(banner) - + # pylint: disable=too-many-branches,too-many-nested-blocks + self.log_banner("Loading Pipelines") error_occurred = False self.pipeline_types = self._import_pipeline_types() self.logger.info("Loading Pipelines from Config Path {path}".format( path=self.pipeline_dir)) - if os.path.islink(self.pipeline_dir): - self.logger.warning( - "Pipelines directory is symbolic link") - if os.path.ismount(self.pipeline_dir): - self.logger.warning( - "Pipelines directory is mount point") + self.warn_if_mounted() pipelines = defaultdict(dict) for root, subdirs, files in os.walk(self.pipeline_dir): if os.path.abspath(root) == os.path.abspath(self.pipeline_dir): @@ -145,13 +133,22 @@ def _load_pipelines(self): pipelines = {pipeline: versions for pipeline, versions in pipelines.items() if len(versions) > 0} self.pipelines = pipelines + self.log_banner("Completed Loading Pipelines") + return not error_occurred - heading = "Completed Loading Pipelines" + def warn_if_mounted(self): + if os.path.islink(self.pipeline_dir): + self.logger.warning( + "Pipelines directory is symbolic link") + if os.path.ismount(self.pipeline_dir): + self.logger.warning( + "Pipelines directory is mount point") + + def log_banner(self, heading): banner = "="*len(heading) self.logger.info(banner) self.logger.info(heading) self.logger.info(banner) - return not error_occurred def get_loaded_pipelines(self): results = [] diff --git a/vaserving/rest_api/video-analytics-serving.yaml b/vaserving/rest_api/video-analytics-serving.yaml index 62c2421..7b2a104 100644 --- a/vaserving/rest_api/video-analytics-serving.yaml +++ b/vaserving/rest_api/video-analytics-serving.yaml @@ -2,7 +2,7 @@ openapi: 3.0.0 info: description: Video Analytics Serving API title: Video Analytics Serving API - version: 0.0.2 + version: 0.0.3 servers: - url: / paths: @@ -280,22 +280,52 @@ components: uri: format: uri type: string + properties: + type: object + capsfilter: + type: string + postproc: + type: string required: - type - uri type: object - DeviceSource: + WebcamSource: properties: type: enum: - - device + - webcam type: string - path: + device: type: string format: path + properties: + type: object + capsfilter: + type: string + postproc: + type: string required: - type - - path + - device + type: object + GstSource: + properties: + type: + enum: + - gst + type: string + element: + type: string + properties: + type: object + capsfilter: + type: string + postproc: + type: string + required: + - type + - element type: object FileDestination: properties: @@ -389,7 +419,8 @@ components: propertyName: type oneOf: - $ref: '#/components/schemas/URISource' - - $ref: '#/components/schemas/DeviceSource' + - $ref: '#/components/schemas/GstSource' + - $ref: '#/components/schemas/WebcamSource' type: object destination: oneOf: diff --git a/vaserving/rtsp/gstreamer_rtsp_destination.py b/vaserving/rtsp/gstreamer_rtsp_destination.py index b566b2d..57e4909 100644 --- a/vaserving/rtsp/gstreamer_rtsp_destination.py +++ b/vaserving/rtsp/gstreamer_rtsp_destination.py @@ -4,7 +4,6 @@ * SPDX-License-Identifier: BSD-3-Clause ''' -import time import gi gi.require_version('Gst', '1.0') # pylint: disable=wrong-import-position @@ -21,44 +20,75 @@ def __init__(self, request, pipeline): self._rtsp_path = pipeline.rtsp_path self._rtsp_server = pipeline.rtsp_server self._identifier = pipeline.identifier - self._number_frames = 0 self._app_src = None self._is_audio_pipeline = False - self._fps = 30 self._logger = logging.get_logger('GStreamerRtspDestination', is_static=True) - self._start_time = None self._need_data = False + self._pts = 0 + self._last_timestamp = 0 + self._frame_size = 0 + self._clock = Gst.SystemClock() caps = Gst.Caps.from_string("video/x-raw") if self._pipeline.appsink_element.props.caps: caps = caps.intersect(self._pipeline.appsink_element.props.caps) self._pipeline.appsink_element.props.caps = caps + self._get_request_parameters(request) + def _get_request_parameters(self, request): + destination_config = request.get("destination", + {}) + frame_config = destination_config.get("frame", + {}) + self._cache_length = frame_config.get("cache-length") + self._sync_with_source = frame_config.get("sync-with-source") + self._sync_with_destination = frame_config.get("sync-with-destination") + self._encode_quality = frame_config.get("encode-quality") def _init_stream(self, sample): + self._frame_size = sample.get_buffer().get_size() caps = sample.get_caps() + self._need_data = False self._rtsp_server.add_stream(self._identifier, self._rtsp_path, caps, self) - self._start_time = time.time() + self._last_timestamp = self._clock.get_time() + if self._sync_with_source: + self._pipeline.appsink_element.set_property("sync", + True) def on_need_data(self, _src, _): self._need_data = True - def enough_data(self, _): + def on_enough_data(self, _): self._need_data = False - def set_app_src(self, app_src, is_audio_pipeline): + def set_app_src(self, app_src, is_audio_pipeline, rtsp_pipeline): self._app_src = app_src self._is_audio_pipeline = is_audio_pipeline + self._pts = 0 + self._app_src.set_property("is-live", True) + self._app_src.set_property("do-timestamp", True) + self._app_src.set_property("blocksize", self._frame_size) + if self._sync_with_destination: + self._app_src.set_property("block", True) + self._app_src.set_property("min-percent", 100) + if self._cache_length: + self._app_src.set_property("max-bytes", + int(self._frame_size*self._cache_length)) + encoder = rtsp_pipeline.get_by_name("jpegencoder") + if self._encode_quality and encoder: + encoder.set_property("quality", self._encode_quality) def _push_buffer(self, buffer): - if not self._is_audio_pipeline: - buffer.duration = 1/self._fps * Gst.SECOND - timestamp = self._number_frames * buffer.duration - buffer.pts = buffer.dts = int(timestamp) + timestamp = self._clock.get_time() + delta = timestamp - self._last_timestamp + buffer.pts = buffer.dts = self._pts + buffer.duration = delta + self._pts += delta + self._last_timestamp = timestamp retval = self._app_src.emit('push-buffer', buffer) - self._number_frames += 1 if retval != Gst.FlowReturn.OK: self._logger.debug( "Push buffer failed for stream {} with {}".format(self._rtsp_path, retval)) + self._end_stream() def process_frame(self, frame): # pylint: disable=method-hidden @@ -67,13 +97,19 @@ def process_frame(self, frame): self.process_frame(frame) def _process_frame(self, frame): - if self._pipeline.frame_count > 0: - self._fps = self._pipeline.frame_count / (time.time() - self._start_time) if self._need_data: self._push_buffer(frame.get_buffer()) + else: + self._last_timestamp = self._clock.get_time() - def finish(self): + def _end_stream(self): + self._need_data = False if self._app_src: self._app_src.end_of_stream() + del self._app_src + self._app_src = None + + def finish(self): + self._end_stream() if self._rtsp_server: self._rtsp_server.remove_stream(self._rtsp_path) diff --git a/vaserving/rtsp/gstreamer_rtsp_factory.py b/vaserving/rtsp/gstreamer_rtsp_factory.py index 25ce467..0548c65 100644 --- a/vaserving/rtsp/gstreamer_rtsp_factory.py +++ b/vaserving/rtsp/gstreamer_rtsp_factory.py @@ -15,8 +15,8 @@ class GStreamerRtspFactory(GstRtspServer.RTSPMediaFactory): _source = "appsrc name=source format=GST_FORMAT_TIME" - _RtspVideoPipeline = " ! videoconvert ! video/x-raw,format=I420 ! gvawatermark " \ - " ! jpegenc ! rtpjpegpay name=pay0 pt=96" + _RtspVideoPipeline = " ! videoconvert ! video/x-raw,format=I420 \ + ! gvawatermark ! jpegenc name=jpegencoder ! rtpjpegpay name=pay0" # Decoding audio again as there is issue with audio pipeline element audiomixer _RtspAudioPipeline = " ! queue ! decodebin ! audioresample ! audioconvert " \ @@ -69,7 +69,7 @@ def do_create_element(self, url): pipeline = Gst.parse_launch(launch_string) pipeline.caps = caps appsrc = pipeline.get_by_name("source") - source.set_app_src(appsrc, is_audio_pipeline) + source.set_app_src(appsrc, is_audio_pipeline, pipeline) appsrc.connect('need-data', source.on_need_data) - appsrc.connect('enough-data', source.enough_data) + appsrc.connect('enough-data', source.on_enough_data) return pipeline diff --git a/vaserving/schema.py b/vaserving/schema.py index 59c5af3..17fb810 100644 --- a/vaserving/schema.py +++ b/vaserving/schema.py @@ -3,7 +3,6 @@ * * SPDX-License-Identifier: BSD-3-Clause ''' -#pylint: disable=R0801 tags = { "type": "object", @@ -30,7 +29,12 @@ }, "class":{ "type":"string" - } + }, + "element": {"enum": ["appsrc"], "default" : "appsrc"}, + "properties": {"type": "object", + "element": {"name": "source", "format": "element-properties"}}, + "capsfilter": {"type": "string"}, + "postproc": {"type": "string"} }, "required":["type", "class"] }, @@ -48,45 +52,51 @@ {"name": "i", "property": "_INPUT_ARG_", "type": "input"}], "element": [{"name": "source", "property": "uri"}, - {"name": "source", - "property": "location"}, - {"name": "metaconvert", "property": "source"}]} + {"name": "metaconvert", "property": "source"}]}, + "element": {"enum": ["urisourcebin"], "default": "urisourcebin"}, + "properties": {"type": "object", + "element": {"name": "source", "format": "element-properties"}}, + "capsfilter": {"type": "string"}, + "postproc": {"type": "string"} }, "required": ["type", "uri"] }, - "path": { + "webcam": { "type": "object", "properties": { - "type": { - "type": "string", - "enum": ["path"] - }, - "path": { - "type": "string", - "element": [{"name": "source", - "property": "location"}]} + "type": {"type": "string", "enum": ["webcam"]}, + "device": {"type": "string", + "format": "path", "element": [{"name": "source", "property": "device"}, + {"name": "metaconvert", "property": "source"}]}, + "element": {"enum": ["v4l2src"], "default": "v4l2src"}, + "properties": {"type": "object", + "element": {"name": "source", "format": "element-properties"}}, + "capsfilter": {"type": "string", "default": "image/jpeg"}, + "postproc": {"type": "string"} }, - "required": ["type", "path"] + "required": ["type", "device"] }, - "device": { + "gst": { "type": "object", "properties": { - "type": {"type": "string", "enum": ["device"]}, - "path": {"type": "string", - "format": "path", "element": [{"name": "source", "property": "device"}, - {"name": "metaconvert", "property": "source"}]} + "type": {"type": "string", "enum": ["gst"]}, + "element": {"type": "string"}, + "properties": {"type": "object", + "element": {"name": "source", "format": "element-properties"}}, + "capsfilter": {"type": "string"}, + "postproc": {"type": "string"} }, - "required": ["type", "path"] + "required": ["type", "element"] }, "oneOf": [ { "$ref": "#/uri" }, { - "$ref": "#/path" + "$ref": "#/webcam" }, { - "$ref": "#/device" + "$ref": "#/gst" }, { "$ref": "#/application" @@ -178,7 +188,7 @@ "type": "string", "element": "destination" }, - "clientId": { + "mqtt-client-id": { "type": "string", "element": "destination" }, @@ -263,6 +273,25 @@ "type":"string", "minLength": 1, "pattern" : "^[a-zA-Z0-9][a-zA-Z0-9_/-]*[a-zA-Z0-9]$" + }, + "cache-length": { + "type":"integer", + "default":30, + "minimum":0 + }, + "sync-with-source": { + "type":"boolean", + "default":True + }, + "sync-with-destination":{ + "type":"boolean", + "default":True + }, + "encode-quality":{ + "type":"integer", + "minimum":0, + "maximum":100, + "default":85 } }, "required": [ diff --git a/vaserving/vaserving.py b/vaserving/vaserving.py index bc9c821..241be07 100644 --- a/vaserving/vaserving.py +++ b/vaserving/vaserving.py @@ -11,9 +11,9 @@ from vaserving.pipeline_manager import PipelineManager from vaserving.model_manager import ModelManager from vaserving.common.utils import logging -from vaserving.common import settings -#pylint: disable=C0103 +# Allow non-PascalCase class name for __VAServing +#pylint: disable=invalid-name class __VAServing: @@ -129,7 +129,7 @@ def _log_options(self): def start(self, _options=None): if (self._stopped): self.options = parse_options(_options) - settings.set_log_level(self.options.log_level) + logging.set_default_log_level(self.options.log_level) self._log_options() self.model_manager = ModelManager( os.path.abspath( @@ -142,6 +142,7 @@ def start(self, _options=None): self.model_manager, os.path.abspath(os.path.join(self.options.config_path, self.options.pipeline_dir)), + max_running_pipelines=self.options.max_running_pipelines, ignore_init_errors=self.options.ignore_init_errors) self._stopped = False