diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 858f4b3..0000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# Each line is a file pattern followed by one or more owners. -# last matching pattern takes the most precedence. - -* @nnshah1 @whbruce diff --git a/.github/PULL_REQUEST_TEMPLATE/architectural_decision_record.md b/.github/PULL_REQUEST_TEMPLATE/architectural_decision_record.md deleted file mode 100644 index 8555c8e..0000000 --- a/.github/PULL_REQUEST_TEMPLATE/architectural_decision_record.md +++ /dev/null @@ -1,61 +0,0 @@ -# Title (Brief Meaningful Title) - -Describe the problem statement, context, and/or rationale for the proposal being made. Describe the basic technical requirements along with the underlying technical, business, or strategic issues / motivations. The section should describe the facts and be value neutral. In ADR examples found online this section is generally labeled _Context_. For simplicity we've chosen to forego the separate section heading and use the first section immediately following the ADR title. This section may contain a list of specific questions in addition to introductory material. This section is intended as a summary and may refer to the background section for more detailed information if needed. The context should be written with the intended audience in mind (i.e. a person engaged and working in the project). It is not meant to provide the context required for someone not familiar with the basic architecture or goals of the project but can refer to other ADRs or documents whose purpose is to provide that background. Provide the context and background relevant for the discussion / component at hand. - -Note: The title and context are meant for the intended audience. If you have a question on why something is titled or described a certain way - reach out to the authors for clarification. - -[Opening / Context / Rationale section is __required__] - - -### Status - -| Status | Date | Authors | Deciders | -| ------ | ---- | ------- | -------- | -| Proposed, Accepted, Rejected, Deprecated or Superseded by [New ADR](adr) | 3/10/2020 | @author1, @author2, @author3 | [VPL Engineering or VPL Workgroup ] | - -[Status section is __required__] - -## Decision - -Summarize the proposal in a few paragraphs with as much detail as required. ADRs documenting high level design will often require diagrams. Please see gitlab's integration of mermaid / plantuml support. Focus on the details which support the constraints / requirements / rationale outlined in the context section. Section is free form (may have links to additional information, sub-sections, tables, diagrams, etc.). - - _Note_: The decision section represents the fully formed recommendation / proposal of the authors. It is not a _ratified_ decision until the status is moved to _Accepted_. - - _Note_: Consider adding a _Rationale_ sub-section to help articulate the key arguments in favor of the decision. A _Rationale_ sub-section is optional. See [Architecture Decision Records in Action](https://resources.sei.cmu.edu/asset_files/Presentation/2017_017_001_497746.pdf) for an example. - -[Decision section is __required__] - -## Background - -Provide supplemental material here. List out relevant options that were discussed but discarded (with relevant pros / cons listed). Provide links to external documentation as reference. Section is free form (may have links to additional information, subsections, tables, diagrams, etc.). Typically will have subsection(s) for options. Formatting left to the author. - -[Background section is __optional__] - -### Options - - 1. Option 1 - - Description of option 1. Description of pros / cons. - - 1. Option 2 - - Description of option 2. Description of Pros / Cons - - 1. Recommendation - - Collect additional discussion - -### References -1. [one](one) -2. [two](two) - -## Implications and Next Steps - -List out the impact to other modules and the logical next steps for execution of the proposal. List any additional ADRs that are required. - - -[Implications and Next Steps section is __required__ ] - - - - diff --git a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md b/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md deleted file mode 100644 index 913d058..0000000 --- a/.github/PULL_REQUEST_TEMPLATE/pull_request_template.md +++ /dev/null @@ -1,8 +0,0 @@ -Thank you for your contribution to the Video Analytics Serving repository. - -Before submitting this PR, please make sure: - -- [ ] Your code builds clean without any errors or warnings -- [ ] You are using approved terminology -- [ ] You have added unit tests - diff --git a/.gitignore b/.gitignore index 56575cc..388a597 100644 --- a/.gitignore +++ b/.gitignore @@ -7,5 +7,5 @@ docker/Dockerfile.env docker/final.env models tests/results/**/* -samples/lva_ai_extension/tests/results/**/* +samples/ava_ai_extension/tests/results/**/* samples/edgex_bridge/edgex/**/* diff --git a/README.md b/README.md index b652fc3..fb0aa40 100644 --- a/README.md +++ b/README.md @@ -156,6 +156,8 @@ The file path is specified in the `destination` section of the REST request and ### Queued, Running and Completed The vaclient `run` command starts the pipeline. The underlying REST request returns a `pipeline instance` which is used to query the state of the pipeline. All being well it will go into `QUEUED` then `RUNNING` state. We can interrogate the pipeline status by using the vaclient `start` command that kicks off the pipeline like `run` and then exits displaying the `pipeline instance` which is used by the `status` command to view pipeline state. +> **NOTE:** The pipeline instance value depends on the number of pipelines started while the server is running so may differ from the value shown in the following examples. + ``` $ ./vaclient/vaclient.sh start object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true diff --git a/docker/build.sh b/docker/build.sh index 67d5011..e04cdab 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -10,7 +10,8 @@ DOCKERFILE_DIR=$(dirname "$(readlink -f "$0")") SOURCE_DIR=$(dirname "$DOCKERFILE_DIR") BASE_IMAGE_FFMPEG="openvisualcloud/xeone3-ubuntu1804-analytics-ffmpeg:20.10" -BASE_IMAGE_GSTREAMER="openvino/ubuntu20_data_runtime:2021.4" +BASE_IMAGE_GSTREAMER="openvino/ubuntu20_data_runtime:2021.4.1" + BASE_IMAGE=${BASE_IMAGE:-""} BASE_BUILD_CONTEXT= BASE_BUILD_DOCKERFILE= @@ -35,7 +36,7 @@ BASE_BUILD_OPTIONS="--network=host " SUPPORTED_IMAGES=($BASE_IMAGE_GSTREAMER $BASE_IMAGE_FFMPEG) OPEN_MODEL_ZOO_TOOLS_IMAGE=${OPEN_MODEL_ZOO_TOOLS_IMAGE:-"openvino/ubuntu20_data_dev"} -OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-"2021.4"} +OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-"2021.4.1"} FORCE_MODEL_DOWNLOAD= DEFAULT_GSTREAMER_BASE_BUILD_TAG="video-analytics-serving-gstreamer-base" @@ -240,9 +241,9 @@ get_options() { if [ -z "$BASE_IMAGE" ]; then if [ $FRAMEWORK = 'ffmpeg' ]; then - BASE_IMAGE=$BASE_IMAGE_FFMPEG + BASE_IMAGE=${CACHE_PREFIX}$BASE_IMAGE_FFMPEG else - BASE_IMAGE=$BASE_IMAGE_GSTREAMER + BASE_IMAGE=${CACHE_PREFIX}$BASE_IMAGE_GSTREAMER fi fi @@ -258,10 +259,14 @@ get_options() { echo " " echo "----------------------------" echo "Running Model Downloader..." - echo "OMZ Tools Image: $OPEN_MODEL_ZOO_TOOLS_IMAGE" + echo "OMZ Tools Image: ${CACHE_PREFIX}$OPEN_MODEL_ZOO_TOOLS_IMAGE" echo "OMZ Version: $OPEN_MODEL_ZOO_VERSION" echo "----------------------------" - $SOURCE_DIR/tools/model_downloader/model_downloader.sh --model-list $MODELS --output $SOURCE_DIR $FORCE_MODEL_DOWNLOAD --open-model-zoo-image $OPEN_MODEL_ZOO_TOOLS_IMAGE --open-model-zoo-version $OPEN_MODEL_ZOO_VERSION $DRY_RUN + $SOURCE_DIR/tools/model_downloader/model_downloader.sh --model-list $MODELS \ + --output $SOURCE_DIR $FORCE_MODEL_DOWNLOAD \ + --open-model-zoo-image ${CACHE_PREFIX}$OPEN_MODEL_ZOO_TOOLS_IMAGE \ + --open-model-zoo-version $OPEN_MODEL_ZOO_VERSION \ + $DRY_RUN elif [ -d "$MODELS" ]; then if [ ! -d "$SOURCE_DIR/models" ]; then $RUN_PREFIX mkdir $SOURCE_DIR/models @@ -380,16 +385,15 @@ if [ "$BASE" == "BUILD" ]; then show_base_options launch "$RUN_PREFIX docker build "$BASE_BUILD_CONTEXT" -f "$BASE_BUILD_DOCKERFILE" $BASE_BUILD_OPTIONS $BASE_BUILD_ARGS -t $BASE_BUILD_TAG" - BASE_IMAGE=$BASE_BUILD_TAG else # Ensure image is latest from Docker Hub - launch "$RUN_PREFIX docker pull ${CACHE_PREFIX}$BASE_IMAGE" + launch "$RUN_PREFIX docker pull $BASE_IMAGE" fi # BUILD IMAGE -BUILD_ARGS+=" --build-arg BASE=${CACHE_PREFIX}$BASE_IMAGE " +BUILD_ARGS+=" --build-arg BASE=$BASE_IMAGE " BUILD_ARGS+=" --build-arg FRAMEWORK=$FRAMEWORK " if [ -n "$MODELS" ]; then BUILD_ARGS+="--build-arg MODELS_PATH=$MODELS_PATH " @@ -429,8 +433,8 @@ if [ ! -z "$ENVIRONMENT_FILE_LIST" ]; then cat $ENVIRONMENT_FILE_LIST | grep -E '=' | tr '\n' ' ' | tr '\r' ' ' > $DOCKERFILE_DIR/final.env echo " HOME=/home/video-analytics-serving " >> $DOCKERFILE_DIR/final.env echo "ENV " | cat - $DOCKERFILE_DIR/final.env | tr -d '\n' >> $DOCKERFILE_DIR/Dockerfile.env - printf "\nENV PYTHONPATH=\$PYTHONPATH:/home/video-analytics-serving\n" >> $DOCKERFILE_DIR/Dockerfile.env -fi + printf "\nENV PYTHONPATH=\$PYTHONPATH:/home/video-analytics-serving\nENV GST_PLUGIN_PATH=\$GST_PLUGIN_PATH:/usr/lib/x86_64-linux-gnu/gstreamer-1.0/" >> $DOCKERFILE_DIR/Dockerfile.env +fi show_image_options diff --git a/docker/run.sh b/docker/run.sh index 98c1850..2cf3f2b 100755 --- a/docker/run.sh +++ b/docker/run.sh @@ -86,9 +86,9 @@ enable_hardware_access() { USER_GROUPS+="--group-add $(stat -c '%g' /dev/dri/render*) " fi - # NCS2 + # Intel(R) NCS2 if [ -d /dev/bus/usb ]; then - echo "Found /dev/bus/usb - enabling for NCS2" + echo "Found /dev/bus/usb - enabling for Intel(R) NCS2" DEVICE_CGROUP_RULE=--device-cgroup-rule=\'c\ 189:*\ rmw\' VOLUME_MOUNT+="-v /dev/bus/usb:/dev/bus/usb " fi diff --git a/docs/building_video_analytics_serving.md b/docs/building_video_analytics_serving.md index 40fbefd..cb55587 100644 --- a/docs/building_video_analytics_serving.md +++ b/docs/building_video_analytics_serving.md @@ -26,7 +26,7 @@ can be customized to meet an application's requirements. | Command | Media Analytics Base Image | Image Name | Description | | --- | --- | --- | ---- | -| `./docker/build.sh`| **ubuntu20_data_runtime:2021.4** docker [image](https://hub.docker.com/r/openvino/ubuntu20_data_runtime) |`video-analytics-serving-gstreamer` | DL Streamer based microservice with default pipeline definitions and deep learning models. | +| `./docker/build.sh`| **ubuntu20_data_runtime:2021.4.1** docker [image](https://hub.docker.com/r/openvino/ubuntu20_data_runtime) |`video-analytics-serving-gstreamer` | DL Streamer based microservice with default pipeline definitions and deep learning models. | | `./docker/build.sh --framework ffmpeg --open-model-zoo...`| **xeone3-ubuntu1804-analytics-ffmpeg:20.10** docker [image](https://hub.docker.com/r/openvisualcloud/xeon-ubuntu1804-analytics-ffmpeg) |`video-analytics-serving-ffmpeg`| FFmpeg Video Analytics based microservice with default pipeline definitions and deep learning models. | ### Building with OpenVINO, Ubuntu 20.04 and DL Streamer Support **Example:** @@ -70,7 +70,7 @@ All validation is done in docker environment. Host built (aka "bare metal") conf | **Base Image** | **Framework** | **Openvino Version** | **Link** | **Default** | |---------------------|---------------|---------------|------------------------|-------------| -| OpenVINO 2021.4 ubuntu20_data_runtime | GStreamer | 2021.4 | [Docker Hub](https://hub.docker.com/r/openvino/ubuntu20_data_runtime) | Y | +| OpenVINO 2021.4.1 ubuntu20_data_runtime | GStreamer | 2021.4.1 | [Docker Hub](https://hub.docker.com/r/openvino/ubuntu20_data_runtime) | Y | | Open Visual Cloud 20.10 xeone3-ubuntu1804-analytics-ffmpeg | FFmpeg | 2021.1 | [Docker Hub](https://hub.docker.com/r/openvisualcloud/xeone3-ubuntu1804-analytics-ffmpeg) | Y | --- diff --git a/docs/changing_object_detection_models.md b/docs/changing_object_detection_models.md index 7997061..6e997ae 100644 --- a/docs/changing_object_detection_models.md +++ b/docs/changing_object_detection_models.md @@ -52,7 +52,6 @@ Use [vaclient](/vaclient/README.md) to list the models. Check that `object_detec ``` $ ./vaclient/vaclient.sh list-models - emotion_recognition/1 - - object_detection/1 - object_detection/person_vehicle_bike - object_classification/vehicle_attributes - audio_detection/environment @@ -129,13 +128,12 @@ Expected output (abbreviated): ``` [ SUCCESS ] Generated IR version 10 model. -[ SUCCESS ] XML file: /tmp/tmp8mq6f1ti/public/yolo-v2-tiny-tf/FP32/yolo-v2-tiny-tf.xml -[ SUCCESS ] BIN file: /tmp/tmp8mq6f1ti/public/yolo-v2-tiny-tf/FP32/yolo-v2-tiny-tf.bin -[ SUCCESS ] Total execution time: 5.75 seconds. -[ SUCCESS ] Memory consumed: 533 MB. -It's been a while, check for a new version of Intel(R) Distribution of OpenVINO(TM) toolkit here https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit/choose-download.html?cid=other&source=Prod&campid=ww_2020_bu_IOTG_OpenVINO-2021-1&content=upg_pro&medium=organic_uid_agjj or on the GitHub* +[ SUCCESS ] XML file: /tmp/tmps4pxnu7y/public/yolo-v2-tiny-tf/FP32/yolo-v2-tiny-tf.xml +[ SUCCESS ] BIN file: /tmp/tmps4pxnu7y/public/yolo-v2-tiny-tf/FP32/yolo-v2-tiny-tf.bin +[ SUCCESS ] Total execution time: 9.70 seconds. +[ SUCCESS ] Memory consumed: 584 MB. -Downloaded yolo-v2-tiny-tf model-proc file from gst-video-analytics repo +Copied model_proc to: /output/models/object_detection/yolo-v2-tiny-tf/yolo-v2-tiny-tf.json ``` The model will now be in `models` folder in the root of the project: @@ -195,7 +193,7 @@ Edited pipeline template: ``` "template": ["uridecodebin name=source", - " ! gvadetect model={models[person_vehicle_bike_detection][yolo-v2-tiny-tf][network]} name=detection", + " ! gvadetect model={models[object_detection][yolo-v2-tiny-tf][network]} name=detection", " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", " ! appsink name=appsink" ] @@ -215,7 +213,6 @@ The `list-models` command now shows 9 models, including `object_detection/yolo-v ```bash $ ./vaclient/vaclient.sh list-models - emotion_recognition/1 - - object_detection/1 - object_detection/yolo-v2-tiny-tf - object_detection/person_vehicle_bike - object_classification/vehicle_attributes @@ -275,7 +272,6 @@ Once started you can verify that the new model has been loaded. ```bash $ ./vaclient/vaclient.sh list-models - emotion_recognition/1 - - object_detection/1 - object_detection/yolo-v2-tiny-tf - object_detection/person_vehicle_bike - object_classification/vehicle_attributes diff --git a/docs/creating_extensions.md b/docs/creating_extensions.md index 54830f2..5341015 100644 --- a/docs/creating_extensions.md +++ b/docs/creating_extensions.md @@ -1,77 +1,204 @@ # Creating Extensions -| [Events](#events) | [ Extensions ](#extensions) | [Adding Extensions to Pipelines](#adding-extensions-to-pipelines) | [References](#references) | - -Extensions are a simple way to add functionality to a DL Streamer -pipeline using it's [python -bindings](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/Python) -and [GVAPython -element](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvapython). By -extending pipelines using `gvapython` a developer can access and alter -frames as they pass through a pipeline, add or change metadata such as -detected objects or JSON messages. The DL Streamer [gvapython -samples](https://github.com/openvinotoolkit/dlstreamer_gst/blob/master/samples/gst_launch/gvapython/face_detection_and_classification/README.md) -provide more examples of the full breadth of capabilities. - -# Creating an Object Count Exceeded Extension - -The following sections demonstrate how to create an extension that -detects when the number of objects in a frame exceeds a specific -threshold and how to publish that event using the -[`gva_event_meta`](/extensions/gva_event_meta/gva_event_meta.py) module along with -[`gva_event_convert`](/extensions/gva_event_meta/gva_event_convert.py), [`gvametaconvert`](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvametaconvert) -and [`gvametapublish`](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvametapublish). -## Events +| [ Extensions ](#extensions) | [Event based Extensions](#event-based-extensions) | [References](#references) | -Events are a new type of metadata that can be added and read from a -frame using methods from the -[`gva_event_meta`](/extensions/gva_event_meta/gva_event_meta.py) -module. As they are in preview, their schema may change but they -illustrate how to add and publish additional information using the -underlying DL Streamer python bindings. +# Extensions -Events are also used to publish results of the new set of Video -Analytics Serving spatial analytics extensions: [object_line_crossing](/extensions/spatial_analytics/object_line_crossing.md) -and [object_zone_count](/extensions/spatial_analytics/object_zone_count.md). +Extensions are a simple way to add functionality to a DL Streamer pipeline using its [python bindings](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/Python) and [GVAPython element](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvapython). By extending pipelines using `gvapython` a developer can access frames and analyze and/or update metadata such as detected objects or JSON messages. The DL Streamer [gvapython samples](https://github.com/openvinotoolkit/dlstreamer_gst/blob/master/samples/gst_launch/gvapython/face_detection_and_classification/README.md) provide more examples of the full breadth of capabilities. -## Event Schema +An extension is a GVA Python script that is called during pipeline execution. The script is given a frame and any VA Serving parameters defined by the pipeline request. +The extension must be added after the last element that generates data it requires. Below are some examples that cover how extensions can be used to process inference results. +> Note: Make sure to either build VA Serving container with `docker/build.sh` after editing extension code and/or pipeline per [Build script reference](build_script_reference.md) or use --dev mode during run as outlined in [Running Video Analytics Serving](running_video_analytics_serving.md#developer-mode) + +## Example: Processing Inference Results + +This section will outline an example to show how inference results of object detection can be accessed and used to generate alerts. + +### Extension + +`process_frame` is the default function invoked by GVA Python. +> Note: The `process_frame` function needs to `return True` in order for the rest of the pipeline to function. In the absence of this statement, the extension runs and exits without executing the subsequent parts of the pipeline which might be useful for extension debug. + +In the example below, in `process_frame`, the number of objects in the frame is obtained by counting the number of detected regions. A statement is printed if the number of objects exceeds a threshold value. + +```python +class ObjectCounter: + def __init__(self): + self._threshold = 0 + + def process_frame(self, frame): + num_objects = len(list(frame.regions())) + if num_objects > self._threshold: + print("Object count {} exceeded threshold {}".format( + num_objects, self._threshold)) + return True +``` +### Pipeline -Events are added to a frame and stored as a JSON message containing a -list of event objects. The only required key is `event-type` which is -the type of event. It is defined by the extension. An optional field -is `related-objects` which is an array of indices to the list of -detected objects. This allows a many-to-many relationship between -events and the objects that create them (e.g. a social distancing -algorithm would have one violate event and a number of objects that -were too close to each other). +The extension must be added after the last element that generates data it requires. In this case it is placed after `gvadetect`. The template of the object_detection/person_vehicle_bike pipeline is modified by adding a line for the GVA python extension as shown below. +`class`, `module` and `name` are `gvapython` parameters. See [`gvapython` documentation](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvapython) for a full list of parameters. + +```json + +"template": ["uridecodebin name=source", + " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", + " ! gvapython class=ObjectCounter module=/home/video-analytics-serving/extensions/object_counter.py name=object-counter", + " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", + " ! appsink name=appsink" + ] + +``` +### Output + +The pipeline can be run with VA Client as follows: +```bash +vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +``` + +As process_frame runs once per frame, VA Serving output would resemble +```bash +{"levelname": "INFO", "asctime": "2021-08-31 23:12:44,838", "message": "Setting Pipeline 1 State to RUNNING", "module": "gstreamer_pipeline"} +Object count 1 exceeded threshold 0 +Object count 1 exceeded threshold 0 + +``` + +## Example: Setting Extension Parameters + +The same object_detection/person_vehicle_bike pipeline will be used to illustrate setting parameters. Instead of hard-coding the value for object count threshold in the previous example, it's possible to set it at request time. +> Note: The `kwarg` parameter is a JSON object and requires the pipeline parameter format to be set to `json` as in the `object-counter` example below. The default value for the object needs to be specified at the parameter level and not in the individual properties due to how default values are applied. + +### Extension + +The extension can take in the defined parameters via the constructor and use them. In this case, a `count_threshold` parameter is passed in and in `process_frame`, the value is used to check if number of detected regions exceeds the threshold. + +```python +class ObjectCounter: + def __init__(self, count_threshold): + self._threshold = count_threshold + + def process_frame(self, frame): + num_objects = len(list(frame.regions())) + if num_objects > self._threshold: + print("Object count {} exceeded threshold {}".format( + num_objects, self._threshold)) + return True +``` + +### Pipeline ```json { - "$schema": "https://json-schema.org/draft/2019-09/schema", - "type": "array", - "items" : { - "properties": { - "event-type": { - "description": "Event type, known by caller", - "type": "string" - }, - "related-objects": { - "description": "Array of related detections, each entry refers to index of associated detected object", - "type" : "array", - "items" : { - "type" : "integer" - } - }, - }, - "required": [ - "event-type" + "type": "GStreamer", + "template": [ + "uridecodebin name=source", + " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", + " ! gvapython class=ObjectCounter module=/home/video-analytics-serving/extensions/object_counter.py name=object-counter", + " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", + " ! appsink name=appsink" ], - "additionalProperties": true - } + "description": "Person Vehicle Bike Detection based on person-vehicle-bike-detection-crossroad-0078", + "parameters": { + "type": "object", + "properties": { + "detection-device": { + "element": { + "name": "detection", + "property": "device" + }, + "type": "string" + }, + "detection-model-instance-id": { + "element": { + "name": "detection", + "property": "model-instance-id" + }, + "type": "string" + }, + "inference-interval": { + "element": "detection", + "type": "integer" + }, + "threshold": { + "element": "detection", + "type": "number" + }, + "object-counter": { + "element": { + "name": "object-counter", + "property": "kwarg", + "format": "json" + }, + "type": "object", + "properties": { + "count_threshold": { + "type": "integer" + } + }, + "default": { + "count_threshold": 0 + } + } + } + } } ``` +### Output + +- Running VA Client as shown (parameter-file is optional for extension parameters if defaults are set in pipeline JSON) + ```bash + vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true + ``` + Output reflects the default count_threshold i.e 0 + ```bash + + {"levelname": "INFO", "asctime": "2021-08-31 18:49:19,360", "message": "Setting Pipeline 1 State to RUNNING", "module": "gstreamer_pipeline"} + Object count 1 exceeded threshold 0 + Object count 1 exceeded threshold 0 + + ``` +- Running VA Client with the following parameter file + ```json + { + "parameters": { + "object-counter": { + "count_threshold": 1 + } + } + } + ``` + ```bash + vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true --parameter-file /tmp/sample_parameters.json + ``` + VA Serving output shows count_threshold is set to 1 per parameter file + ```bash + + {"levelname": "INFO", "asctime": "2021-09-01 16:28:30,037", "message": "Setting Pipeline 1 State to RUNNING", "module": "gstreamer_pipeline"} + Object count 2 exceeded threshold 1 + Object count 2 exceeded threshold 1 + + ``` + + +# Event based Extensions + +The following sections build on the previous extension example that detects when the number of objects in a frame exceeds a specific threshold and additionally demonstrates how to publish a corresponding event using the [`gva_event_meta`](/extensions/gva_event_meta/gva_event_meta.py) module along with [`gva_event_convert`](/extensions/gva_event_meta/gva_event_convert.py), [`gvametaconvert`](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvametaconvert) and [`gvametapublish`](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvametapublish). + +## Events + +> Note: Events are a preview feature and related aspects like their schema are subject to change. + +Events are a type of metadata that can be added and read from a frame using methods from the [`gva_event_meta`](/extensions/gva_event_meta/gva_event_meta.py) module. They illustrate how to add and publish additional information using the underlying DL Streamer python bindings. + +Events are also used to publish results of the new set of Video Analytics Serving spatial analytics extensions: [object_line_crossing](/extensions/spatial_analytics/object_line_crossing.md) +and [object_zone_count](/extensions/spatial_analytics/object_zone_count.md). + +## Event Schema + +Events are added to a frame and stored as a JSON message containing a list of event objects. The only required key is `event-type` which is the type of event. It is defined by the extension. An optional field is [related-objects](../extensions/gva_event_meta/schema.py#L10) which is an array of indices to the list of detected objects. This allows a many-to-many relationship between events and the objects that create them (e.g. a social distancing algorithm would have one violate event and a number of objects that were too close to each other). + Here is an example of how events are added to existing metadata showing an event type `object-count-exceeded` with two related objects. ```json @@ -92,66 +219,150 @@ Here is an example of how events are added to existing metadata showing an event ] ``` -## Extensions -An extension is a GVA Python script that is called during pipeline -execution. The script is given a frame and any VA Serving parameters -defined by the pipeline request. The following example generates an -event type `object-count-exceeded` if more than a pre-defined number -of objects are detected in a frame. +## Example: Event Generation in Extensions + +The following example generates an event type `object-count-exceeded` if more than a pre-defined number of objects are detected in a frame. + + +### Extension -A couple of things to note: +Few things to note: -* The `threshold` pipeline parameter is picked up in the - constructor. Other parameters can be accessed in the same way. +* The `count_threshold` pipeline parameter is picked up in the constructor as it was defined in the parameters section of the pipeline. * Use of the [`gva_event_meta`](/extensions/gva_event_meta/gva_event_meta.py) module's function `add_event()` to populate an event object. +* `attributes` dictionary is meant for storing event related information. It needs to be set to a non-empty value. ```python import gva_event_meta class ObjectCounter: - def __init__(self, threshold): - self._threshold = threshold + def __init__(self, count_threshold): + self._threshold = count_threshold def process_frame(self, frame): num_objects = len(list(frame.regions())) if num_objects > self._threshold: + attributes = {'num_objects': num_objects} gva_event_meta.add_event(frame, - event_type="object_count_exceeded") + event_type="object_count_exceeded", + attributes=attributes) + return True ``` -## Adding Extensions to Pipelines +### Pipeline -The extension must be added after the last element that generates data -it requires. As events are not part of the DL Streamer message schema -we also add an extension called [`gva_event_convert`](/extensions/gva_event_meta/gva_event_convert.py) -after [`gvametaconvert`](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvametaconvert) -and before [`gvametapublish`](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvametapublish). This -reusable extension adds an event list to the published result. +The extension must be added after the last element that generates data it requires. Following the event based extension example, as events are not part of the DL Streamer message schema we also add an extension called [`gva_event_convert`](/extensions/gva_event_meta/gva_event_convert.py) after [`gvametaconvert`](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvametaconvert) and before [`gvametapublish`](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvametapublish). This reusable extension adds an event list to the published result. A pipeline using a sample object counting extension would look like this: ![diagram](images/object_counter_pipeline.png) -and the template would look like this: +and the pipeline JSON would look like this: + +```json +{ + "type": "GStreamer", + "template": [ + "uridecodebin name=source", + " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", + " ! gvapython class=ObjectCounter module=/home/video-analytics-serving/extensions/object_counter.py name=object-counter", + " ! gvametaconvert name=metaconvert", + " ! gvapython module=/home/video-analytics-serving/extensions/gva_event_meta/gva_event_convert.py", + " ! gvametapublish name=destination", + " ! appsink name=appsink" + ], + "description": "Person Vehicle Bike Detection based on person-vehicle-bike-detection-crossroad-0078", + "parameters": { + "type": "object", + "properties": { + "detection-device": { + "element": { + "name": "detection", + "property": "device" + }, + "type": "string" + }, + "detection-model-instance-id": { + "element": { + "name": "detection", + "property": "model-instance-id" + }, + "type": "string" + }, + "inference-interval": { + "element": "detection", + "type": "integer" + }, + "threshold": { + "element": "detection", + "type": "number" + }, + "object-counter": { + "element": { + "name": "object-counter", + "property": "kwarg", + "format": "json" + }, + "type": "object", + "properties": { + "count_threshold": { + "type": "integer" + } + }, + "default": { + "count_threshold": 1 + } + } + } + } +} ``` -"template": ["uridecodebin name=source", - "! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", - " ! gvapython class=ObjectCounter module=/home/video-analytics-serving/extensions/object_counter.py name=object-counter", - " ! gvametaconvert name=metaconvert", - " ! gvapython module=/home/video-analytics-serving/extensions/gva_event_meta/gva_event_convert.py", - " ! gvametapublish name=destination", - " ! appsink name=appsink"], + + +### Event Metadata + +`event_type` is a required field set during add_event in the extension above. +`num_objects` is a custom optional field set as part of `attributes` in the extension above. +```json +{ + "events": [ + { + "event-type": "object_count_exceeded", + "num_objects": 2 + } + ] +} ``` +Another optional field (unused here) is `related_objects` as shown in [line crossing](/extensions/spatial_analytics/object_line_crossing.py) and [zone counting](/extensions/spatial_analytics/object_zone_count.py). + +### Output + +VA Client can be launched as follows, as no parameter-file is given, the default count_threshold is picked up i.e 1. +```bash + vaclient/vaclient.sh run object_detection/person_vehicle_bike https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true +``` +Output snippet is shown, events are fired for object count > 1: +```bash + +Timestamp 43666666666 +- person (0.80) [0.25, 0.03, 0.28, 0.21] +Timestamp 43750000000 +- person (0.84) [0.26, 0.02, 0.29, 0.21] +Timestamp 43833333333 +- person (0.98) [0.27, 0.02, 0.31, 0.19] +Timestamp 43916666666 +- person (0.97) [0.27, 0.02, 0.31, 0.20] +- vehicle (0.76) [0.10, 0.85, 0.30, 1.00] +Event: event-type: object_count_exceeded, num_objects: 2 + +``` + +# References -## References +- For details on frame inference data classes i.e video frame, regions of interest, tensors see [DL Streamer gstgva Python classes]( https://github.com/openvinotoolkit/dlstreamer_gst/tree/master/python/gstgva). -For details on more advanced extensions, see the [line crossing](/extensions/spatial_analytics/object_line_crossing.py) and [zone counting](/extensions/spatial_analytics/object_zone_count.py). These include more -complex parameters, guidance on how to break down algorithmic -implementation to simplify event generation and how to use -watermarking for visualizing output. +- For details on more advanced extensions, see the [line crossing](/extensions/spatial_analytics/object_line_crossing.py) and [zone counting](/extensions/spatial_analytics/object_zone_count.py). These include more complex parameters, guidance on how to break down algorithmic implementation to simplify event generation and how to use watermarking for visualizing output. -Note also how the pipeline definition is used to validate parameter schemas. +- Note also how the pipeline definition is used to validate parameter schemas. -See [object_line_crossing.md](/extensions/spatial_analytics/object_line_crossing.md) -and [object_zone_count.md](/extensions/spatial_analytics/object_zone_count.md) for -more information and a format for extension documentation. +- See [object_line_crossing.md](/extensions/spatial_analytics/object_line_crossing.md) and [object_zone_count.md](/extensions/spatial_analytics/object_zone_count.md) for more information and a format for extension documentation. diff --git a/docs/customizing_pipeline_requests.md b/docs/customizing_pipeline_requests.md index 0fa6e77..6a4ad5c 100644 --- a/docs/customizing_pipeline_requests.md +++ b/docs/customizing_pipeline_requests.md @@ -1,81 +1,388 @@ # Customizing Video Analytics Pipeline Requests -| [Request Format](#request-format) | [Source](#source) | [Destination](#destination) | [Parameters](#parameters) | +| [Request Format](#request-format) | [Source](#source) | [Destination](#destination) | [Parameters](#parameters) | [Tags](#tags) | -Pipeline requests are initiated to exercise the Video Analytics Serving REST-API. Each pipeline in VA Serving has a specific endpoint. A pipeline can be started by issuing a `POST` request and a running pipeline can be stopped using a `DELETE` request. The `source` and `destination` elements of VA Serving [pipeline templates](./defining_pipelines.md#pipeline_templates) are configured and constructed based on the `source` and `destination` from the incoming requests. +Pipeline requests are initiated to exercise the Video Analytics Serving REST API. Each pipeline in VA Serving has a specific endpoint. A pipeline can be started by issuing a `POST` request and a running pipeline can be stopped using a `DELETE` request. The `source` and `destination` elements of VA Serving [pipeline templates](defining_pipelines.md#pipeline-templates) are configured and constructed based on the `source` and `destination` from the incoming requests. ## Request Format -Pipeline requests sent to Video Analytics Serving REST-API are json documents that have the following attributes: + +> Note: This document shows curl requests. Requests can also be sent via vaclient, see [VA Client Command Options](../vaclient/README.md#command-options) + +Pipeline requests sent to Video Analytics Serving REST API are JSON documents that have the following attributes: |Attribute | Description | |---------|-----| -|`source`|The video source that needs to be analyzed. It consists of -
`uri` : the uri of the video source that needs to be analyzed
`type` : is the value `uri` | -|`destination`|The output to which analysis results need to be send/saved. It consists of `type`, `path` and `format`. | -|`parameters`|Optional attribute specifying pipeline parameters that can be customized when the pipeline is launched.| +|`source`| Required attribute specifying the video source that needs to be analyzed. It consists of :
`uri` : the uri of the video source that needs to be analyzed
`type` : is the value `uri` | +|`destination`| Optional attribute specifying the output to which analysis results need to be sent/saved. It consists of `metadata` and `frame`| +|`parameters`| Optional attribute specifying pipeline parameters that can be customized when the pipeline is launched.| +|`tags`| Optional attribute specifying a JSON object of additional properties that will be added to each frame's metadata.| -### Example Request: -```json -{ - "source": { - "uri": "https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true", - "type": "uri" - }, - "destination": { - "type": "file", - "path": "/tmp/results.txt", - "format": "json-lines" - }, - "parameters":{ - "height":300, - "width":300 - } -} +### Example Request +Below is a sample request using curl to start an `object_detection/person_vehicle_bike` pipeline that analyzes the video [person-bicycle-car-detection.mp4](https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4) and sends its results to `/tmp/results.json`. + +> Note: Files specified as a source or destination need to be accessible from within the VA Serving container. Local files and directories can be volume mounted using standard docker runtime options. As an example the following command launches a VA Serving container with the local `/tmp` directory volume mounted. Results to `/tmp/results.jsonl` are persisted after the container exits. +> ```bash +> docker/run.sh -v /tmp:/tmp +> ``` + +```bash +curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ +'Content-Type: application/json' -d \ +'{ + "source": { + "uri": "https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true", + "type": "uri" + }, + "destination": { + "metadata": { + "type": "file", + "path": "/tmp/results.jsonl", + "format": "json-lines" + } + }, + "parameters":{ + "threshold": 0.90 + } +}' +2 ``` -## Source +The number returned on the console is the pipeline instance id (e.g. 2). +As the video is being analyzed and as objects are detected, results are added to the `destination` file which can be viewed using: + +```bash +$ tail -f /tmp/results.jsonl +{"objects":[{"detection":{"bounding_box":{"x_max":0.7503407597541809,"x_min":0.6836109757423401,"y_max":0.9968345165252686,"y_min":0.7712376117706299},"confidence":0.93408203125,"label":"person","label_id":1},"h":97,"roi_type":"person","w":51,"x":525,"y":333}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":1916666666} +{"objects":[{"detection":{"bounding_box":{"x_max":0.7554543018341064,"x_min":0.6827328205108643,"y_max":0.9928492903709412,"y_min":0.7551988959312439},"confidence":0.92578125,"label":"person","label_id":1},"h":103,"roi_type":"person","w":56,"x":524,"y":326}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":2000000000} + +``` +## Source The `source` attribute specifies the video source that needs to be analyzed. It can be changed to use media from different sources. Some of the common video sources are: + * File Source * IP Camera (RTSP Source) * Web Camera ### File Source -The example request shown in above section has media `source` from a video file checked in github. With the service running, you can use curl command line program to start an object detection pipeline with video source from a video file as follows: +The following example shows a media `source` from a video file in GitHub: + ```bash -$ curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H 'Content-Type: application/json' -d '{ "source": { "uri": "https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true", "type": "uri" }, "destination": { "type": "file", "path": "/tmp/results.txt", "format":"json-lines"}}' -2 +curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ +'Content-Type: application/json' -d \ +'{ + "source": { + "uri": "https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true", + "type": "uri" + }, + "destination": { + "metadata": { + "type": "file", + "path": "/tmp/results.jsonl", + "format": "json-lines" + } + } +}' ``` -The number returned on the console is the pipeline instance id (e.g. 2). - -As the video is being analyzed and as events start and stop you will see detection results in the `destination` file and can be viewed using: +A local file can also be used as a source. In the following example person-bicycle-car-detection.mp4 has been downloaded to /tmp and VA Serving was started as: ```bash -$ tail -f /tmp/results.txt -{"objects":[{"detection":{"bounding_box":{"x_max":0.0503933560103178,"x_min":0.0,"y_max":0.34233352541923523,"y_min":0.14351698756217957},"confidence":0.6430817246437073,"label":"vehicle","label_id":2},"h":86,"roi_type":"vehicle","w":39,"x":0,"y":62}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":49250000000} +docker/run.sh -v /tmp:/tmp +``` +```bash +curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ +'Content-Type: application/json' -d \ +'{ + "source": { + "uri": "file:///tmp/person-bicycle-car-detection.mp4", + "type": "uri" + }, + "destination": { + "metadata": { + "type": "file", + "path": "/tmp/results.jsonl", + "format": "json-lines" + } + } +}' ``` ### RTSP Source -In real world the media `source` would most likely be from live IP camera feeds or rtsp streams. RTSP url's will normally be of the format `rtsp://:/`. The request `source` object would be updated to: +RTSP streams from IP cameras can be referenced using the `rtsp` uri scheme. RTSP uris will normally be of the format `rtsp://:@:/` where `` and `password` are optional authentication credentials. + +The request `source` object would be updated to: + ```json { - "source": { - "uri": "rtsp://:/", - "type": "uri" - } + "source": { + "uri": "rtsp://:/", + "type": "uri" + } } ``` +### Web Camera Source +Web cameras accessible through the `Video4Linux` api and device drivers can be referenced using the `v4l2` uri scheme. `v4l2` uris have the format: `v4l2:///dev/` where `` is the path of the `v4l2` device, typically `video`. -> **NOTE:** The below sections are TBD and will be expanded later. +Depending on the default output of the `v4l2` device, the pipeline may need additional elements to convert the output to a format that gvadetect can process. -### Web Camera Source +Following is an example of a pipeline with videoconvert to handle format conversion: + +```json +"template": ["uridecodebin name=source ! videoconvert", + " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", + " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", + " ! appsink name=appsink" + ], +``` +```bash +curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ +'Content-Type: application/json' -d \ +'{ + "source": { + "uri": "v4l2:///dev/video0", + "type": "uri" + }, + "destination": { + "metadata": { + "type": "file", + "path": "/tmp/results.jsonl", + "format": "json-lines" + } + } +}' +``` ## Destination +Pipelines can be configured to output `frames`, `metadata` or both. The destination object within the request contains sections to configure each. + +- Metadata (inference results) +- Frame -### File +### Metadata +For metadata, the destination type can be set to file, mqtt, or kafka as needed. -### MQTT +#### File +The following are available properties: +- type : "file" +- path (required): Path to the file. +- format (optional): Format can be of the following types (default is json): + - json-lines : Each line is a valid JSON. + - json : Entire file is formatted as a JSON. -### KAFKA +Below is an example for JSON format + +```bash +curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ +'Content-Type: application/json' -d \ +'{ + "source": { + "uri": "https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true", + "type": "uri" + }, + "destination": { + "metadata": { + "type": "file", + "path": "/tmp/results.json", + "format": "json" + } + } +}' +``` + +#### MQTT +The following are available properties: +- type : "mqtt" +- host (required) expects a format of host:port +- topic (required) MQTT topic on which broker messages are sent +- timeout (optional) Broker timeout + +Steps to run MQTT: + 1. Start the MQTT broker, here we use [Eclipse Mosquitto](https://hub.docker.com/_/eclipse-mosquitto/), an open source message broker. + ```bash + docker run --network=host -d eclipse-mosquitto:1.6 + ``` + 2. Start VA Serving with host network enabled + ```bash + docker/run.sh -v /tmp:/tmp --network host + ``` + 3. Send the REST request : Using the default 1883 MQTT port. + ```bash + curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ + 'Content-Type: application/json' -d \ + '{ + "source": { + "uri": "https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true", + "type": "uri" + }, + "destination": { + "metadata": { + "type": "mqtt", + "host": "localhost:1883", + "topic": "vaserving" + } + } + }' + ``` + 4. Connect to MQTT broker to view inference results + ```bash + docker run -it --network=host --entrypoint mosquitto_sub eclipse-mosquitto:1.6 --topic vaserving + ``` + + ```bash + {"objects":[{"detection":{"bounding_box":{"x_max":1.0,"x_min":0.11904853582382202,"y_max":0.9856844246387482,"y_min":0.019983917474746704},"confidence":0.5811731815338135,"label":"vehicle","label_id":2},"h":417,"roi_type":"vehicle","w":677,"x":91,"y":9}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":13916666666} + {"objects":[{"detection":{"bounding_box":{"x_max":0.3472719192504883,"x_min":0.12164716422557831,"y_max":1.0,"y_min":0.839308500289917},"confidence":0.6197869777679443,"label":"vehicle","label_id":2},"h":69,"roi_type":"vehicle","w":173,"x":93,"y":363}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":14333333333} + {"objects":[{"detection":{"bounding_box":{"x_max":0.3529694750905037,"x_min":0.12145502120256424,"y_max":1.0,"y_min":0.8094810247421265},"confidence":0.7172137498855591,"label":"vehicle","label_id":2},"h":82,"roi_type":"vehicle","w":178,"x":93,"y":350}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":14416666666} + ``` + +### Frame +Frame is another aspect of destination and it can be set to RTSP. + +#### RTSP +RTSP is a type of frame destination supported. The following are available properties: +- type : "rtsp" +- path (required): custom string to uniquely identify the stream + +For more information, see [RTSP re-streaming](running_video_analytics_serving.md#real-time-streaming-protocol-rtsp-re-streaming) ## Parameters +Pipeline parameters as specified in the pipeline definition file, can be set in the REST request. +For example, below is a pipeline definition file: + +```json +{ + "type": "GStreamer", + "template": ["uridecodebin name=source", + " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", + " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", + " ! appsink name=appsink" + ], + "description": "Person Vehicle Bike Detection based on person-vehicle-bike-detection-crossroad-0078", + "parameters": { + "type": "object", + "properties": { + "detection-device": { + "element": { + "name": "detection", + "property": "device" + }, + "type": "string" + }, + "detection-model-instance-id": { + "element": { + "name": "detection", + "property": "model-instance-id" + }, + "type": "string" + }, + "inference-interval": { + "element": "detection", + "type": "integer" + }, + "threshold": { + "element": "detection", + "type": "number" + } + } + } +} +``` + +Any or all of the parameters defined i.e detection-device, detection-model-instance-id, inference-interval and threshold can be set via the request. + +```bash +curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ +'Content-Type: application/json' -d \ +'{ + "source": { + "uri": "https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true", + "type": "uri" + }, + "destination": { + "metadata": { + "type": "file", + "path": "/tmp/results.jsonl", + "format": "json-lines" + } + }, + "parameters": { + "detection-device": "GPU", + "detection-model-instance-id": "1", + "threshold": 0.90 + } +}' +``` + +For the example above as threshold was set to 0.90 (default value 0.5), the metadata would only contain results where the confidence exceeds 0.90 + +```json +{"objects":[{"detection":{"bounding_box":{"x_max":0.7503407597541809,"x_min":0.6836109757423401,"y_max":0.9968345165252686,"y_min":0.7712376117706299},"confidence":0.93408203125,"label":"person","label_id":1},"h":97,"roi_type":"person","w":51,"x":525,"y":333}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":1916666666} +{"objects":[{"detection":{"bounding_box":{"x_max":0.7554543018341064,"x_min":0.6827328205108643,"y_max":0.9928492903709412,"y_min":0.7551988959312439},"confidence":0.92578125,"label":"person","label_id":1},"h":103,"roi_type":"person","w":56,"x":524,"y":326}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":2000000000} +{"objects":[{"detection":{"bounding_box":{"x_max":0.7566969394683838,"x_min":0.683247447013855,"y_max":0.9892041087150574,"y_min":0.7453113198280334},"confidence":0.95263671875,"label":"person","label_id":1},"h":105,"roi_type":"person","w":56,"x":525,"y":322}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":2083333333} +{"objects":[{"detection":{"bounding_box":{"x_max":0.7583206295967102,"x_min":0.6872420907020569,"y_max":0.9740238189697266,"y_min":0.7231987714767456},"confidence":0.95947265625,"label":"person","label_id":1},"h":108,"roi_type":"person","w":55,"x":528,"y":312}],"resolution":{"height":432,"width":768},"source":"https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true","timestamp":2166666666} + +``` +For more details on parameters, see [Pipeline Parameters](defining_pipelines.md#pipeline-parameters) + +## Tags + +Tags are pieces of information specified at the time of request, stored with frames metadata. In the example below, tags are used to describe the location and orientation of video input. + +```bash +curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ +'Content-Type: application/json' -d \ +'{ + "source": { + "uri": "https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true", + "type": "uri" + }, + "destination": { + "metadata": { + "type": "file", + "path": "/tmp/results.json", + "format": "json" + } + }, + "tags": { + "camera_location": "parking_lot", + "direction" : "east" + } +}' +``` + +Inference results are updated with tags + +```json +{ + "objects": [ + { + "detection": { + "bounding_box": { + "x_max": 0.7448995113372803, + "x_min": 0.6734093427658081, + "y_max": 0.9991495609283447, + "y_min": 0.8781012296676636 + }, + "confidence": 0.5402464866638184, + "label": "person", + "label_id": 1 + }, + "h": 52, + "roi_type": "person", + "w": 55, + "x": 517, + "y": 379 + } + ], + "resolution": { + "height": 432, + "width": 768 + }, + "source": "https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true", + "tags": { + "camera_location": "parking_lot", + "direction": "east" + }, + "timestamp": 1500000000 +} +``` diff --git a/docs/defining_pipelines.md b/docs/defining_pipelines.md index da7964b..64058e6 100644 --- a/docs/defining_pipelines.md +++ b/docs/defining_pipelines.md @@ -189,8 +189,8 @@ with DL Streamer please see the DL Streamer [tutorial](https://github.com/opencv ``` "template": [ "-i \"{source[uri]}\" ", - "-vf \"detect=model={models[object_detection][1][network]}", - ":model_proc=\"{models[object_detection][1][proc]}\":interval={parameters[inference-interval]}\",", + "-vf \"detect=model={models[object_detection][person_vehicle_bike][network]}", + ":model_proc=\"{models[object_detection][person_vehicle_bike][proc]}\":interval={parameters[inference-interval]}\",", "metaconvert", " -an -y -f metapublish" ] diff --git a/docs/running_video_analytics_serving.md b/docs/running_video_analytics_serving.md index 441b474..67ba9c7 100644 --- a/docs/running_video_analytics_serving.md +++ b/docs/running_video_analytics_serving.md @@ -1,5 +1,5 @@ # Running Video Analytics Serving -| [Video Analytics Serving Microservice](#video-analytics-serving-microservice) | [Interacting with the Microservice](#interacting-with-the-microservice) | [Selecting Pipelines and Models at Runtime](#selecting-pipelines-and-models-at-runtime) | [Developer Mode](#developer-mode) | [Enabling Hardware Accelerators](#enabling-hardware-accelerators) | +| [Video Analytics Serving Microservice](#video-analytics-serving-microservice) | [Interacting with the Microservice](#interacting-with-the-microservice) | [Real Time Streaming Protocol (RTSP) Re-streaming](#real-time-streaming-protocol-rtsp-re-streaming) | [Selecting Pipelines and Models at Runtime](#selecting-pipelines-and-models-at-runtime) | [Developer Mode](#developer-mode) | [Enabling Hardware Accelerators](#enabling-hardware-accelerators) | Video Analytics Serving docker images can be started using standard `docker run` and `docker compose` commands. For convenience a simplified run script is provided to pass common options to `docker run` such as proxies, device mounts, and to expose the default microservice port (8080). @@ -114,9 +114,11 @@ curl localhost:8080/pipelines/object_detection/person_vehicle_bike -X POST -H \ "type": "uri" }, "destination": { - "type": "file", - "path": "/tmp/results.txt", - "format": "json-lines" + "metadata": { + "type": "file", + "path": "/tmp/results.txt", + "format": "json-lines" + } } }' $ tail -f /tmp/results.txt diff --git a/extensions/gva_event_meta/schema.py b/extensions/gva_event_meta/schema.py new file mode 100644 index 0000000..61cf2be --- /dev/null +++ b/extensions/gva_event_meta/schema.py @@ -0,0 +1,23 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "type": "array", + "items": { + "properties": { + "event-type": { + "description": "Event type, known by caller", + "type": "string" + }, + "related-objects": { + "description": "Array of related detections, each entry refers to index of associated detected object", + "type": "array", + "items": { + "type": "integer" + } + }, + }, + "required": [ + "event-type" + ], + "additionalProperties": True + } +} diff --git a/extensions/spatial_analytics/object_line_crossing.md b/extensions/spatial_analytics/object_line_crossing.md index 1e5c7b2..fb5a677 100644 --- a/extensions/spatial_analytics/object_line_crossing.md +++ b/extensions/spatial_analytics/object_line_crossing.md @@ -2,13 +2,15 @@ The object line crossing spatial analytics extension is used in tandem with gvatrack to determine when tracked objects cross virtually defined lines supplied to the extension. ## Parameters -The extension takes the following parameters. The `lines` parameter is required, all others are optional. +The extension takes the following parameters. All parameters are optional for the pipeline to run. ### lines A list of line definitions, which are objects containing the following fields: * `name` the name of the line for use in event reporting. * `line` a tuple of (x,y) coordinates defining the start and end of the directional line segment. +**If this parameter is not set, the extension defaults to an empty list and will not check for line crossings.** + ```json "lines": [ { @@ -34,7 +36,7 @@ The [logging level](https://docs.python.org/3.8/library/logging.html#logging-lev ## Event Output If a tracked object crosses any of the lines, an event of type `object-line-crossing` will be created with the following fields. * `line-name`: name of the associated line -* `related-detections`: array containing indexes of the detected objects that crossed the line +* `related-objects`: array containing indexes of the detected objects that crossed the line * `directions` : array containing directions which can be `clockwise`, `counterclockwise`, or `parallel`. The orientation is determined from from line-start to line-end. * `clockwise-total` : total number of clockwise crossings * `counterclockwise-total` : total number of counter clockwise crossings diff --git a/extensions/spatial_analytics/object_line_crossing.py b/extensions/spatial_analytics/object_line_crossing.py index bde5e96..41f8d01 100644 --- a/extensions/spatial_analytics/object_line_crossing.py +++ b/extensions/spatial_analytics/object_line_crossing.py @@ -24,7 +24,7 @@ class ObjectLineCrossing: # pylint: disable=too-few-public-methods - def __init__(self, lines, enable_watermark=False, log_level="INFO"): + def __init__(self, lines=[], enable_watermark=False, log_level="INFO"): self._detected_objects = {} self._lines = [] self._enable_watermark = enable_watermark @@ -38,6 +38,8 @@ def __init__(self, lines, enable_watermark=False, log_level="INFO"): except Exception as error: logger.error(error) logger.error("Exception creating SpatialAnalysisCrossingLine: {}".format(line)) + if not self._lines: + logger.warn("Empty line configuration. No lines to check against.") def process_frame(self, frame): try: @@ -83,7 +85,7 @@ def _update_object_positions(self, frame): def _add_point(self, frame, point, label): region = frame.add_region(point.x, point.y, 0, 0, label=label, normalized=True) for tensor in region.tensors(): - tensor.set_name("line_crossing") + tensor.set_name("watermark_region") def _add_watermark(self, frame): for index in range(0, len(self._lines)): diff --git a/extensions/spatial_analytics/object_zone_count.md b/extensions/spatial_analytics/object_zone_count.md index 0fc20b3..6af5085 100644 --- a/extensions/spatial_analytics/object_zone_count.md +++ b/extensions/spatial_analytics/object_zone_count.md @@ -2,13 +2,15 @@ The Zone Event Detection AI Skill is used to determine if detected objects reside in polygons that this Skill takes as inputs. ## Parameters -The extension takes the following parameters. The `zones` parameter is required, all others are optional. +The extension takes the following parameters. All parameters are optional for the pipeline to run. ### zones A list of zone definitions which are objects containing the following fields. * `name` : the name of the zone for use in event reporting. * `polygon` : A list of four vertices (a tuple of x,y coordinates) which make up the bounds of the polygon. +**If this parameter is not set, the extension defaults to an empty list and will not check for zone detections.** + ```json "zones": [ { diff --git a/extensions/spatial_analytics/object_zone_count.py b/extensions/spatial_analytics/object_zone_count.py index 0c354e9..06bc54e 100644 --- a/extensions/spatial_analytics/object_zone_count.py +++ b/extensions/spatial_analytics/object_zone_count.py @@ -20,13 +20,14 @@ class ObjectZoneCount: DEFAULT_DETECTION_CONFIDENCE_THRESHOLD = 0.0 # Caller supplies one or more zones via request parameter - def __init__(self, zones, enable_watermark=False, log_level="INFO"): + def __init__(self, zones=[], enable_watermark=False, log_level="INFO"): self._zones = [] self._logger = logger self._logger.log_level = log_level self._enable_watermark = enable_watermark - self._assign_tensor_name = not self._enable_watermark self._zones = self._assign_defaults(zones) + if not self._zones: + logger.warn("Empty zone configuration. No zones to check against.") # Note that the pipeline already applies a pipeline-specific threshold value, but # this method serves as an example for handling optional zone-specific parameters. @@ -43,23 +44,30 @@ def process_frame(self, frame): statuses = [] related_objects = [] for object_index, detected_object in enumerate(frame.regions()): - zone_status = self._detect_zone_count(frame, detected_object, zone) - if zone_status: - statuses.append(zone_status) - related_objects.append(object_index) + if not self._is_watermark_region(detected_object): + zone_status = self._detect_zone_count(frame, detected_object, zone) + if zone_status: + statuses.append(zone_status) + related_objects.append(object_index) if related_objects: gva_event_meta.add_event(frame, - event_type=ObjectZoneCount.DEFAULT_EVENT_TYPE, - attributes={'zone-name':zone['name'], - 'related-objects':related_objects, - 'status':statuses, - 'zone-count': len(related_objects)}) + event_type=ObjectZoneCount.DEFAULT_EVENT_TYPE, + attributes={'zone-name':zone['name'], + 'related-objects':related_objects, + 'status':statuses, + 'zone-count': len(related_objects)}) if self._enable_watermark: self._add_watermark_regions(frame) except Exception: print_message("Error processing frame: {}".format(traceback.format_exc())) return True + def _is_watermark_region(self, region): + for tensor in region.tensors(): + if tensor.name() == "watermark_region": + return True + return False + def _add_watermark_regions(self, frame): for zone in self._zones: self._add_watermark_region(frame, zone, zone["name"], False) @@ -82,8 +90,7 @@ def _add_watermark_region(self, frame, zone, frame_label, draw_label): # Rendering color is currently assigned using position of zone, within extension configuration # list, for simplicity. tensor['label_id'] = self._zones.index(zone) - if self._assign_tensor_name: - tensor.set_name(zone["name"]) + tensor.set_name("watermark_region") if draw_label: break diff --git a/models_list/models.list.yml b/models_list/models.list.yml index 2c64ba6..4229ae1 100644 --- a/models_list/models.list.yml +++ b/models_list/models.list.yml @@ -10,10 +10,6 @@ alias: audio_detection version: environment precision: [FP16,FP32] -- model: mobilenet-ssd - alias: object_detection - version: 1 - precision: [FP16,FP32] - model: emotions-recognition-retail-0003 alias: emotion_recognition version: 1 diff --git a/pipelines/ffmpeg/object_detection/1/pipeline.json b/pipelines/ffmpeg/object_detection/1/pipeline.json index 520f52a..2c768a2 100644 --- a/pipelines/ffmpeg/object_detection/1/pipeline.json +++ b/pipelines/ffmpeg/object_detection/1/pipeline.json @@ -2,8 +2,8 @@ "type": "FFmpeg", "template": [ "-i \"{source[uri]}\" ", - "-vf \"detect=model={models[object_detection][1][network]}", - ":model_proc=\"{models[object_detection][1][proc]}\":interval={parameters[inference-interval]}\",", + "-vf \"detect=model={models[object_detection][person_vehicle_bike][network]}", + ":model_proc=\"{models[object_detection][person_vehicle_bike][proc]}\":interval={parameters[inference-interval]}\",", "metaconvert", " -an -y -f metapublish" ], diff --git a/pipelines/gstreamer/object_tracking/object_line_crossing/pipeline.json b/pipelines/gstreamer/object_tracking/object_line_crossing/pipeline.json index 05198ed..39f6a4a 100644 --- a/pipelines/gstreamer/object_tracking/object_line_crossing/pipeline.json +++ b/pipelines/gstreamer/object_tracking/object_line_crossing/pipeline.json @@ -90,8 +90,7 @@ }, "tracking-type": { "element": "tracking", - "type": "string", - "default": "short-term" + "type": "string" }, "detection-threshold": { "element": { diff --git a/pipelines/gstreamer/object_tracking/person_vehicle_bike/pipeline.json b/pipelines/gstreamer/object_tracking/person_vehicle_bike/pipeline.json index f6f08e9..b0881a8 100755 --- a/pipelines/gstreamer/object_tracking/person_vehicle_bike/pipeline.json +++ b/pipelines/gstreamer/object_tracking/person_vehicle_bike/pipeline.json @@ -65,8 +65,7 @@ }, "tracking-type": { "element": "tracking", - "type": "string", - "default": "zero-term-imageless" + "type": "string" }, "detection-threshold": { "element": { diff --git a/samples/app_source_destination/README.md b/samples/app_source_destination/README.md index 0f29c68..0939de5 100644 --- a/samples/app_source_destination/README.md +++ b/samples/app_source_destination/README.md @@ -37,15 +37,15 @@ The destination part of the request object would look like this: destination = { "type": "application", "class": "GStreamerAppDestination", - "output": dst_queue + "output": dst_queue, "mode": "frames" } ``` -The destination will signal and of stream (EOS) by sending a null result. +The destination will signal end of stream (EOS) by sending a null result. ## Pipeline -Here is a sample pipeline. To be ready for for `application` source and destination the template must define source to be `appsrc` and sink to be `appsink`. +Here is a sample pipeline. To be ready for `application` source and destination the template must define source to be `appsrc` and sink to be `appsink`. ```json { "type": "GStreamer", diff --git a/samples/lva_ai_extension/README.md b/samples/ava_ai_extension/README.md similarity index 58% rename from samples/lva_ai_extension/README.md rename to samples/ava_ai_extension/README.md index b10ec1a..5489479 100644 --- a/samples/lva_ai_extension/README.md +++ b/samples/ava_ai_extension/README.md @@ -1,32 +1,38 @@ # OpenVINO™ DL Streamer – Edge AI Extension Module -| [Getting Started](#getting-started) | [Edge AI Extension Module Options](#edge-ai-extension-module-options) | [Additional Examples](#additional-standalone-edge-ai-extension-examples) | [Test Client](#test-client) | +| [Getting Started](#getting-started) | [Edge AI Extension Module Options](#edge-ai-extension-module-options) | [Additional Examples](#additional-standalone-edge-ai-extension-examples) | [Spatial Analytics](#spatial-analytics-pipelines)| [Test Client](#test-client) | [Changing Models](#updating-or-changing-detection-and-classification-models) -The OpenVINO™ DL Streamer - Edge AI Extension module is a microservice based on [Video Analytics Serving](/README.md) that provides video analytics pipelines built with OpenVINO™ DL Streamer. Developers can send decoded video frames to the AI Extension module which performs detection, classification, or tracking and returns the results. The AI Extension module exposes gRPC APIs that are compatible with [Azure Video Analyzer](https://azure.microsoft.com/en-us/products/video-analyzer/). Powered by OpenVINO™ toolkit, the AI Extension module enables developers to build, optimize and deploy deep learning inference workloads for maximum performance across Intel® architectures. +The OpenVINO™ DL Streamer - Edge AI Extension module is a microservice based on [Video Analytics Serving](/README.md) that provides video analytics pipelines built with OpenVINO™ DL Streamer. Developers can send decoded video frames to the AI Extension module which performs detection, classification, or tracking and returns the results. The AI Extension module exposes [gRPC APIs](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/grpc-extension-protocol) that are compatible with [Azure Video Analyzer](https://azure.microsoft.com/en-us/products/video-analyzer/) (AVA). Powered by OpenVINO™ toolkit, the AI Extension module enables developers to build, optimize and deploy deep learning inference workloads for maximum performance across Intel® architectures. ## Highlights +- Spatial analytics features: [Object Line Crossing](#object-line-crossing) and [Object Zone Count](#object-zone-count) similar to [Azure Video Analyzer Spatial Analysis](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/computer-vision-for-spatial-analysis?tabs=azure-stack-edge) - Scalable, high-performance solution for serving video analytics pipelines on Intel® architectures -- Pre-loaded Object Detection, Object Classification and Object Tracking pipelines to get started quickly -- Pre-loaded [person-vehicle-bike-detection-crossroad-0078](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/person-vehicle-bike-detection-crossroad-0078/README.md) and [vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/vehicle-attributes-recognition-barrier-0039/README.md) models. - gRPC API enabling fast data transfer rate and low latency -- Validated support for [Azure Video Analyzer Spatial Analysis](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/computer-vision-for-spatial-analysis?tabs=azure-stack-edge). - Supported Configuration: Pre-built Ubuntu Linux container for CPU and iGPU +- Pre-loaded Object Detection, Object Classification, Object Tracking and Action Recognition pipelines to get started quickly +- Pre-loaded models - see table below. + +| Name | Version | Model | +| -----|-----------| ------| +| person_vehicle_bike_detection| 1 |[person-vehicle-bike-detection-crossroad-0078](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/person-vehicle-bike-detection-crossroad-0078/README.md)| +| object_detection|person|[person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/person-detection-retail-0013/README.md)| +| object_detection|vehicle|[vehicle-detection-0202](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/vehicle-detection-0202/README.md)| +| vehicle_attributes_recognition|1|[vehicle-attributes-recognition-barrier-0039](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/vehicle-attributes-recognition-barrier-0039/README.md)| +| action_recognition|decoder|[action-recognition-0001-decoder](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/action-recognition-0001/README.md)| +| action_recognition|encoder|[action-recognition-0001-encoder](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/action-recognition-0001/README.md)| + ## What's New -- Added line crossing and zone counting spatial analytics pipelines. -- Added `XPU` operations file that runs three pipelines executing detection inference on CPU, GPU and VPU (NCS2) respectively. -- Removed scaling from topology file as DL Streamer will automatically scale to match the selected model. -- Allow properties specified in media stream descriptor to be added to the [extensions](https://github.com/Azure/video-analyzer/blob/main/contracts/data-schema/Extension%20Data%20Schema.json#L322) field in results object. -- [Preview] Action Recognition general purpose pipeline. +- Action Recognition pipeline (preview feature). +- Deployment manifest, topology and operations file are now provided by the [Intel OpenVINO™ DL Streamer – Edge AI Extension Tutorial](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/use-intel-grpc-video-analytics-serving-tutorial). -> Note: VA Serving pipeline selection is still supported via deployment file but this is a deprecated feature. Pipeline parameterization (e.g. setting inference accelerator device) is no longer possible via deployment file. # Getting Started -The OpenVINO™ DL Streamer - Edge AI Extension module can run as a standalone microservice or as a module within an Azure Video Analyzer graph. For more information on deploying the module as part of a Azure Video Analyzer graph please see [Configuring the AI Extension Module for Azure Video Analyzer](#configuring-the-ai-extension-module-for-live-video-analytics) and refer to the [Azure Video Analyzer documentation](https://azure.microsoft.com/en-us/products/video-analyzer/#overview). The following instructions demonstrate building and running the microservice and test client outside of Azure Video Analyzer. +The OpenVINO™ DL Streamer - Edge AI Extension module can run as a standalone microservice or as a module within an Azure Video Analyzer graph. For more information on deploying the module as part of a Azure Video Analyzer graph please see [Configuring the AI Extension Module for Azure Video Analyzer](#configuring-the-ai-extension-module-for-live-video-analytics) and refer to the [Azure Video Analyzer documentation](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/overview). The following instructions demonstrate building and running the microservice and test client outside of Azure Video Analyzer. ## Building the Edge AI Extension Module Image @@ -44,7 +50,7 @@ Run the docker image build script. ``` $ ./docker/build.sh ``` -Resulting image name is `video-analytics-serving:0.6.0-dlstreamer-edge-ai-extension` +Resulting image name is `video-analytics-serving:0.6.1-dlstreamer-edge-ai-extension` ## Running the Edge AI Extension Module @@ -71,7 +77,7 @@ $ ./docker/run_client.sh [AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= [AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: grpc_server_port == 5001 [AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= -[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: sample_file == /home/video-analytics-serving/samples/lva_ai_extension/sampleframes/sample01.png +[AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: sample_file == /home/video-analytics-serving/samples/ava_ai_extension/sampleframes/sample01.png [AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= [AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: loop_count == 0 [AIXC] [2021-01-22 15:28:06,957] [MainThread ] [INFO]: ======================= @@ -110,6 +116,7 @@ The module can be configured using command line options or environment variables | Setting | Command line option | Environment variable | Default value | |---------------------|-----------------------|----------------------|------------------| | gRPC port | -p | PORT | 5001 | +| RSTP Re-Streaming | --enable-rtsp | ENABLE_RTSP | false | | Pipeline name | --pipeline-name | PIPELINE_NAME | object_detection | | Pipeline version | --pipeline-version | PIPELINE_VERSION | person_vehicle_bike_detection | | Use debug pipeline | --debug | DEBUG_PIPELINE | | @@ -120,43 +127,25 @@ The following pipelines are included in the AI Extension: | Name | Version | Definition | Diagram | | ------------- | ------------- | --------------- | ------- | -| object_detection | person_vehicle_bike_detection | [definition](/samples/lva_ai_extension/pipelines/object_detection/person_vehicle_bike_detection/pipeline.json)|![diagram](pipeline_diagrams/object-detection.png)| -| object_classification | vehicle_attributes_recognition | [definition](/samples/lva_ai_extension/pipelines/object_classification/vehicle_attributes_recognition/pipeline.json)|![diagram](pipeline_diagrams/object-classification.png)| -| object_tracking | person_vehicle_bike_tracking | [definition](/samples/lva_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json)|![diagram](pipeline_diagrams/object-tracking.png)| -| [Preview] action_recognition | general | [definition](/samples/lva_ai_extension/pipelines/action_recognition/general/pipeline.json)|![diagram](pipeline_diagrams/action-recognition.png)| - -## Configuring the AI Extension Module for Azure Video Analyzer - -Update the [deployment manifest](https://raw.githubusercontent.com/Azure-Samples/video-analyzer-iot-edge-csharp/main/src/edge/deployment.openvino.grpc.template.json) so that the 'avaExtension'->'image' property shows the Azure URI of the OpenVINO™ DL Streamer – Edge AI Extension docker image. +| object_detection | person_vehicle_bike_detection | [definition](/samples/ava_ai_extension/pipelines/object_detection/person_vehicle_bike_detection/pipeline.json)|![diagram](pipeline_diagrams/object-detection.png)| +| object_detection | object_zone_count | [definition](/samples/ava_ai_extension/pipelines/object_detection/object_zone_count/pipeline.json)|![diagram](pipeline_diagrams/zone-detection.png)| +| object_classification | vehicle_attributes_recognition | [definition](/samples/ava_ai_extension/pipelines/object_classification/vehicle_attributes_recognition/pipeline.json)|![diagram](pipeline_diagrams/object-classification.png)| +| object_tracking | person_vehicle_bike_tracking | [definition](/samples/ava_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json)|![diagram](pipeline_diagrams/object-tracking.png)| +| object_tracking | object_line_crossing | [definition](/samples/ava_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json)|![diagram](pipeline_diagrams/line-crossing.png)| +| action_recognition | general | [definition](/samples/ava_ai_extension/pipelines/action_recognition/general/pipeline.json)|![diagram](pipeline_diagrams/action-recognition.png)| -You will also need to create a pipeline topology with the extension and then create a live pipeline using that topology. Here is a sample [operations.json](/samples/lva_ai_extension/topologies/operations.json). +There are three versions of the object zone count pipeline. They are all based on the same pipeline design but use different detection models. -### Topology files -An AVA pipeline topology enables you to define the blueprint of a pipeline, with parameters as placeholders for values. The topology defines the nodes used in the pipeline -and how they are inter-connected. -Here is a [sample toplogy file](https://raw.githubusercontent.com/Azure/azure-video-analyzer/main/pipelines/live/topologies/grpcExtensionOpenVINO/topology.json). An [operations file](/samples/lva_ai_extension/topologies/operations.json) is an instruction set used by AVA to perform actions on the IOT Edge and refers to the pipeline topology through a URL or a file path. +| Pipeline Version | Model | +| ---------------- |-------| +| object_zone_count| [person-vehicle-bike-detection-crossroad-0078](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/person-vehicle-bike-detection-crossroad-0078/README.md)| +| object_zone_count_person| [person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/person-detection-retail-0013/README.md)| +| object_zone_count_vehicle| [vehicle-detection-0202](https://github.com/openvinotoolkit/open_model_zoo/blob/2021.4/models/intel/vehicle-detection-0202/README.md)| -An AVA pipeline topology enables you to define the blueprint of a pipeline, with parameters as placeholders for values. The topology defines the nodes used in the pipeline and how they are inter-connected. -Here is a [sample topology file](https://raw.githubusercontent.com/Azure/video-analyzer/main/pipelines/live/topologies/grpcExtensionOpenVINO/topology.json). An [operations file](/samples/lva_ai_extension/topologies/operations.json) is an instruction set used by AVA to perform actions on the IOT Edge and refers to the pipeline topology through a URL or a file path. +## Extension Configuration -The operations file sets the inference accelerator target using the `extensionConfiguration` feature. Here is a sample, setting GPU as target [operations_gpu.json](/samples/lva_ai_extension/topologies/operations_gpu.json) - -If changes are made locally to the topology file, the operations file will need change to point to the local topology. - -Replace -``` -topologyURL: -``` -to - -``` -topologyFile: -``` - -### Extension Configuration - -The AVA Server supports the extension_configuration field in the [MediaStreamDescriptor message](https://raw.githubusercontent.com/Azure/video-analyzer/main/contracts/grpc/extension.proto#L69). This field contains a JSON string that must match the extension configuration schema. See example below. Note that pipeline name and version fields are required but parameters and frame-destination are optional. +The Azure Video Analyzer (AVA) Server supports the extension_configuration field in the [MediaStreamDescriptor message](https://raw.githubusercontent.com/Azure/video-analyzer/main/contracts/grpc/extension.proto#L69). This field contains a JSON string that must match the extension configuration schema. See example below. Note that pipeline name and version fields are required but parameters and frame-destination are optional. ``` { "pipeline": { @@ -168,7 +157,7 @@ The AVA Server supports the extension_configuration field in the [MediaStreamDes } ``` -### Inference Accelerators +## Inference Accelerators Pipelines can be configured to perform inference using a range of accelerators. This is a two step process: @@ -181,54 +170,48 @@ This will allow you to customize the deployment manifest for a given accelerator The run server script will automatically detect installed accelerators and provide access to their resources. -Pipelines will define a default accelerator in their .json files. To run a pipeline on a different accelerator modify the pipeline json or send in a gRPC request with a extension_configuration. The LVA client generates this gRPC request with the extension configuration +Pipelines will define a default accelerator in their .json files. To run a pipeline on a different accelerator modify the pipeline json or send in a gRPC request with a extension_configuration. The Azure Video Analyzer (AVA) client generates this gRPC request with the extension configuration Example extension_configuration ``` { "pipeline": { "name": "object_detection", - "version": "person_vehicle_bike_detection" + "version": "person_vehicle_bike_detection", "parameters": { "detection-device": "GPU"} } } ``` +## Configuring the AI Extension Module for Azure Video Analyzer + +Please refer to the [Analyze live video with Intel OpenVINO™ DL Streamer – Edge AI Extension](https://docs.microsoft.com/en-us/azure/azure-video-analyzer/video-analyzer-docs/use-intel-grpc-video-analytics-serving-tutorial) tutorial for deployment manifests, topologies or operations files and other details. + # Additional Standalone Edge AI Extension Examples -### Specifying VA Serving parameters for LVA Server +### Specifying VA Serving parameters for AVA Server -The LVA Server application will filter command line arguments between the LVA layer and VA Serving layer. +The AVA Server application will filter command line arguments between the AVA layer and VA Serving layer. Command line arguments are first handled by run_server.sh; if not specifically handled by run_server.sh the argument -is passed into the LVA Server application. -Command line arguments that are not recognized by LVA Server are then passed to VA Serving, if VA Serving does not recognize +is passed into the AVA Server application. +Command line arguments that are not recognized by AVA Server are then passed to VA Serving, if VA Serving does not recognize the arguments an error will be reported. ```bash ./docker/run_server.sh --log_level DEBUG ``` -### Debug Mode - -Debug pipelines can be selected using the `--debug` command line parameter or setting the `DEBUG_PIPELINE` environment variable. Debug pipelines save watermarked frames to `/tmp/vaserving/{--pipeline-version}/{timestamp}/` as JPEG images. - -Run default pipeline in debug mode -```bash -$ ./docker/run_server.sh --debug -``` - ### Real Time Streaming Protocol (RTSP) Re-streaming Pipelines can be configured to connect and visualize input video with superimposed bounding boxes. * Enable RTSP at Server start ``` -$ export ENABLE_RTSP=true -$ ./docker/run_server.sh +$ ./docker/run_server.sh --enable-rtsp ``` * Run client with frame destination set. For demonstration, path set as `person-detection` in example request below. ``` -$ ./docker/run_client.sh --pipeline-name object_detection --pipeline-version person_vehicle_bike_detection --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=true --frame-destination '{\"type\":\"rtsp\",\"path\":\"person-detection\"}' --loop-count 1000 +$ ./docker/run_client.sh --pipeline-name object_detection --pipeline-version person_vehicle_bike_detection --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=true --frame-destination '{\"type\":\"rtsp\",\"path\":\"person-detection\"}' ``` * Connect and visualize: Re-stream pipeline using VLC network stream with url `rtsp://localhost:8554/person-detection`. @@ -237,7 +220,7 @@ $ ./docker/run_client.sh --pipeline-name object_detection --pipeline-version per { "pipeline": { "name": "object_detection", - "version": "person_vehicle_bike_detection" + "version": "person_vehicle_bike_detection", "frame-destination": { "type":"rtsp", "path":"person-detection"} } } @@ -246,7 +229,7 @@ $ ./docker/run_client.sh --pipeline-name object_detection --pipeline-version per ### Logging Run the following command to monitor the logs from the docker container ```bash -$ docker logs video-analytics-serving_0.6.0-dlstreamer-edge-ai-extension -f +$ docker logs video-analytics-serving_0.6.1-dlstreamer-edge-ai-extension -f ``` ### Developer Mode @@ -276,6 +259,105 @@ Notes: * If selecting a pipeline both name and version must be specified * The `--debug` option selects debug pipelines that watermark inference results and saves images in `/tmp/vaserving/{--pipeline-version}/{timestamp}/` and can also be set using the environment variable DEBUG_PIPELINE +### Debug Mode +>**Note:** This feature is deprecated and will be removed in a future release. Please use RTSP re-streaming instead. + +Debug pipelines can be selected using the `--debug` command line parameter or setting the `DEBUG_PIPELINE` environment variable. Debug pipelines save watermarked frames to `/tmp/vaserving/{--pipeline-version}/{timestamp}/` as JPEG images. + +Run default pipeline in debug mode +```bash +$ ./docker/run_server.sh --debug +``` + +# Spatial Analytics Pipelines +## Object Zone Count +The [object_detection/object_zone_count](./pipelines/object_detection/object_zone_count/pipeline.json) pipeline generates events containing objects detected in zones defined by the AVA extension configuration. For more information on the underlying zone event operation, see object_zone_count [README](../../extensions/spatial_analytics/object_zone_count.md). + +### Build and Run + +1. Build and run AVA server as normal + +2. Run client with example extension configuration. The `object_zone_count.json` extension configuration contains zone definitions to generate `object-zone-count` events for a media stream. Look for the below events in client output: + + ``` + $ ./docker/run_client.sh \ + --extension-config /home/video-analytics-serving/samples/ava_ai_extension/client/extension-config/object_zone_count.json + ``` + ``` + + [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (1.00) [0.30, 0.47, 0.09, 0.39] ['inferenceId: 4ea7a39d41eb4befae87894a48e1ea6a', 'subtype: objectDetection'] + [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.97) [0.36, 0.40, 0.05, 0.24] ['inferenceId: 287b569a93fb4d4386af3cb0871b52ca', 'subtype: objectDetection'] + [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.94) [0.44, 0.42, 0.08, 0.43] ['inferenceId: 4e82d111fccc4649a650fe205f70d079', 'subtype: objectDetection'] + [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.92) [0.57, 0.38, 0.05, 0.25] ['inferenceId: cdc5e1dfa20a41b69bb05d3289e773d5', 'subtype: objectDetection'] + [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.91) [0.69, 0.56, 0.12, 0.43] ['inferenceId: d873d43a9e274e5b8693b1df87764e30', 'subtype: objectDetection'] + [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.90) [0.68, 0.42, 0.04, 0.24] ['inferenceId: ab759106752a45279007bae98eabd032', 'subtype: objectDetection'] + [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.82) [0.64, 0.36, 0.05, 0.27] ['inferenceId: 908960e242334549a52bafb33f6a29a0', 'subtype: objectDetection'] + [AIXC] [2021-09-09 19:50:45,607] [MainThread ] [INFO]: ENTITY - person (0.60) [0.84, 0.44, 0.05, 0.29] ['inferenceId: 1a74f84445cf49cbb517ff2ea83f74c3', 'subtype: objectDetection'] + [AIXC] [2021-09-09 19:50:45,608] [MainThread ] [INFO]: EVENT - Zone2: ['inferenceId: fe65126e0db64e659b6414345d52a96c', 'subtype: object-zone-count', "relatedInferences: ['4ea7a39d41eb4befae87894a48e1ea6a']", "status: ['intersects']", 'zone-count: 1'] + [AIXC] [2021-09-09 19:50:45,608] [MainThread ] [INFO]: EVENT - Zone3: ['inferenceId: 2b1685ebe9914805b962615e19116b87', 'subtype: object-zone-count', "relatedInferences: ['287b569a93fb4d4386af3cb0871b52ca', '4e82d111fccc4649a650fe205f70d079', 'cdc5e1dfa20a41b69bb05d3289e773d5', 'd873d43a9e274e5b8693b1df87764e30', 'ab759106752a45279007bae98eabd032', '908960e242334549a52bafb33f6a29a0', '1a74f84445cf49cbb517ff2ea83f74c3']", "status: ['intersects', 'intersects', 'within', 'intersects', 'within', 'within', 'intersects']", 'zone-count: 7'] + ``` + +### Enabling RTSP Output + +To get a visual of `object_zone_count` extension, run with `object_zone_count_rendered.json` extension configuration which sets `enable_watermark` and `frame-destination` parameters for RTSP re streaming. + +> gvawatermark does not draw the polygon lines but markers/dots showing the boundary of the defined polygon regions, so the viewer must currently "connect the dots" themself. + +1. Build and run AVA server as normal but with `--enable-rtsp` flag + +2. Run client with example extension configuration, with rendering support: + + ``` + $ ./docker/run_client.sh \ + --extension-config /home/video-analytics-serving/samples/ava_ai_extension/client/extension-config/object_zone_count_rendered.json \ + --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true + ``` +3. Connect and visualize: Re-stream pipeline using VLC network stream with url `rtsp://localhost:8554/zone-events`. + +## Object Line Crossing +The [object_tracking/object_line_crossing](./pipelines/object_tracking/object_line_crossing/pipeline.json) pipeline generates events containing objects which crossed lines defined by the AVA extension configuration. For more information on the underlying line crossing operation, see object_line_crossing [README](../../extensions/spatial_analytics/object_line_crossing.md). + +### Build and Run + +1. Build and run AVA server as normal + +2. Run client with example extension configuration. The `line_cross_tracking_config.json` extension configuration contains example line definitions needed to generate`object_line_crossing` events for a media stream. Look for the below events in client output: + + ``` + $ ./docker/run_client.sh \ + --extension-config /home/video-analytics-serving/samples/ava_ai_extension/client/extension-config/object_line_crossing.json \ + --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=True + ``` + ``` + + [AIXC] [2021-05-12 18:57:01,315] [MainThread ] [INFO]: ENTITY - person (1.00) [0.40, 0.27, 0.12, 0.62] ['inferenceId: d47a4192ca4b4933a6c6c588220f59de', 'subtype: objectDetection', 'id: 1'] + [AIXC] [2021-05-12 18:57:01,315] [MainThread ] [INFO]: EVENT - hallway_bottom: ['inferenceId: 520d7506e5c94f3b9aeb1d157af6311c', 'subtype: lineCrossing', "relatedInferences: ['d47a4192ca4b4933a6c6c588220f59de']", 'counterclockwiseTotal: 1', 'total: 1', 'clockwiseTotal: 0', 'direction: counterclockwise'] + ``` + +### Enabling RTSP Output + +Adding a configuration parameter to specify the frame-destination enables a secondary workflow, with VA Serving rendering visualization of lines and entity detections/events (shown below). + +By setting `enable_watermark` and `frame-destination` parameter for RTSP re streaming, the caller may visualize the output. This added to the `line_cross_tracking_config_rtsp.json` extension configuration. So following the same instructions above but swapping the extension configuration used will dynamically produce the scene using rudimentary markers/dots showing the start and end points of defined lines. This allows the DL Streamer `gvawatermark` element (used in the frame-destination) to handle rendering. + + + +To get a visual of `object_line_crossing` extension, run with `object_line_crossing_rendered.json` extension configuration which sets `enable_watermark` and `frame-destination` parameters for RTSP re streaming. + +> gvawatermark does not draw the lines, so the viewer must currently "connect the dots" themself. + +1. Build and run AVA server as normal but with `--enable-rtsp` flag + +2. Run client with example extension configuration, with rendering support: + + ``` + $ ./docker/run_client.sh \ + --extension-config /home/video-analytics-serving/samples/ava_ai_extension/client/extension-config/object_line_crossing_rendered.json \ + --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=True + ``` + +3. Connect and visualize: Re-stream pipeline using VLC network stream with url `rtsp://localhost:8554/vaserving`. + # Test Client A test client is provided to demonstrate the capabilities of the Edge AI Extension module. The test client script `run_client.sh` sends frames(s) to the extension module and prints inference results. @@ -287,7 +369,7 @@ All arguments are optional, usage is as follows [ -s : gRPC server address, defaults to None] [ --server-ip : Specify the server ip to connect to ] (defaults to 127.0.0.1) [ --server-port : Specify the server port to connect to ] (defaults to 5001) - [ --sample-file-path : Specify the sample file path to run] (defaults to samples/lva_ai_extension/sampleframes/sample01.png) + [ --sample-file-path : Specify the sample file path to run] (defaults to samples/ava_ai_extension/sampleframes/sample01.png) [ --loop-count : How many times to loop the source after it finishes ] [ --number-of-streams : Specify number of streams (one client process per stream)] [ --fps-interval FPS_INTERVAL] (interval between frames in seconds, defaults to 0) @@ -320,8 +402,8 @@ Before updating the models used by a pipeline please see the format of [pipeline definition files](/docs/defining_pipelines.md) and read the tutorial on [changing object detection models](/docs/changing_object_detection_models.md). -Most of the steps to changes models used by LVA extension are the same as for the above tutorial, but it assumes you are working with the REST service and not the AI -Extension module. The LVA specific steps are called out in the following sections. +Most of the steps to changes models used by AVA extension are the same as for the above tutorial, but it assumes you are working with the REST service and not the AI +Extension module. The AVA specific steps are called out in the following sections. ## Run Existing Object Detection Pipeline Get baseline results for existing object_detection model `person-vehicle-bike-detection-crossroad-0078` @@ -359,7 +441,7 @@ $ ./docker/run_client.sh --pipeline-name object_classification --pipeline-versio ## Send a request to the server to run a different pipeline on the GPU ``` -$ ./docker/run_client.sh --pipeline-name object_detection --pipeline-version person_vehicle_bike_detection --pipeline-parameters '{"detection-device":"GPU"}' +$ ./docker/run_client.sh --pipeline-name object_detection --pipeline-version person_vehicle_bike_detection --pipeline-parameters '{\"detection-device\":\"GPU\"}' ``` ## Add New Model to Models List @@ -408,7 +490,7 @@ models Check that expected model and pipeline are present in the built image: ```bash -$ docker run -it --entrypoint /bin/bash video-analytics-serving:0.6.0-dlstreamer-edge-ai-extension +$ docker run -it --entrypoint /bin/bash video-analytics-serving:0.6.1-dlstreamer-edge-ai-extension vaserving@82dd59743ca3:~$ ls models person_vehicle_bike_detection vehicle_attributes_recognition yolo vaserving@82dd59743ca3:~$ ls pipelines/object_detection/ @@ -420,7 +502,7 @@ debug_person_vehicle_bike_detection person_vehicle_bike_detection yolo ### Restart service Restart the service to ensure we are using the image with the yolo-v2-tiny-tf model ``` -$ docker stop video-analytics-serving_0.6.0-dlstreamer-edge-ai-extension +$ docker stop video-analytics-serving_0.6.1-dlstreamer-edge-ai-extension $ docker/run_server.sh --pipeline-name object_detection --pipeline-version yolo ``` ### Run the client diff --git a/samples/lva_ai_extension/client/__main__.py b/samples/ava_ai_extension/client/__main__.py similarity index 98% rename from samples/lva_ai_extension/client/__main__.py rename to samples/ava_ai_extension/client/__main__.py index 2ea13b0..0e15b85 100644 --- a/samples/lva_ai_extension/client/__main__.py +++ b/samples/ava_ai_extension/client/__main__.py @@ -37,9 +37,9 @@ import jsonschema from google.protobuf.json_format import MessageToDict -import samples.lva_ai_extension.common.grpc_autogen.inferencing_pb2 as inferencing_pb2 -from samples.lva_ai_extension.common.exception_handler import log_exception -import samples.lva_ai_extension.common.extension_schema as extension_schema +import samples.ava_ai_extension.common.grpc_autogen.inferencing_pb2 as inferencing_pb2 +from samples.ava_ai_extension.common.exception_handler import log_exception +import samples.ava_ai_extension.common.extension_schema as extension_schema from arguments import parse_args from media_stream_processor import MediaStreamProcessor @@ -213,9 +213,9 @@ def create_extension_config(args): def main(): msp = None frame_source = None + args = parse_args() + _log_options(args) try: - args = parse_args() - _log_options(args) frame_delay = 1 / args.frame_rate if args.frame_rate > 0 else 0 frame_queue = queue.Queue(args.frame_queue_size) result_queue = queue.Queue() diff --git a/samples/lva_ai_extension/client/arguments.py b/samples/ava_ai_extension/client/arguments.py similarity index 98% rename from samples/lva_ai_extension/client/arguments.py rename to samples/ava_ai_extension/client/arguments.py index 2ea1cce..1c4b3cf 100644 --- a/samples/lva_ai_extension/client/arguments.py +++ b/samples/ava_ai_extension/client/arguments.py @@ -65,7 +65,7 @@ def parse_args(args=None, program_name="DL Streamer Edge AI Extension Client"): metavar=("sample_file"), dest="sample_file", help="Name of the sample video frame.", - default="/home/video-analytics-serving/samples/lva_ai_extension/sampleframes/sample01.png", + default="/home/video-analytics-serving/samples/ava_ai_extension/sampleframes/sample01.png", ) parser.add_argument( "--max-frames", diff --git a/samples/lva_ai_extension/client/extension-config/object_line_crossing.json b/samples/ava_ai_extension/client/extension-config/object_line_crossing.json similarity index 100% rename from samples/lva_ai_extension/client/extension-config/object_line_crossing.json rename to samples/ava_ai_extension/client/extension-config/object_line_crossing.json diff --git a/samples/lva_ai_extension/client/extension-config/object_line_crossing_rendered.json b/samples/ava_ai_extension/client/extension-config/object_line_crossing_rendered.json similarity index 100% rename from samples/lva_ai_extension/client/extension-config/object_line_crossing_rendered.json rename to samples/ava_ai_extension/client/extension-config/object_line_crossing_rendered.json diff --git a/samples/lva_ai_extension/client/extension-config/object_zone_count.json b/samples/ava_ai_extension/client/extension-config/object_zone_count.json similarity index 100% rename from samples/lva_ai_extension/client/extension-config/object_zone_count.json rename to samples/ava_ai_extension/client/extension-config/object_zone_count.json diff --git a/samples/lva_ai_extension/client/extension-config/object_zone_count_person.json b/samples/ava_ai_extension/client/extension-config/object_zone_count_person.json similarity index 100% rename from samples/lva_ai_extension/client/extension-config/object_zone_count_person.json rename to samples/ava_ai_extension/client/extension-config/object_zone_count_person.json diff --git a/samples/lva_ai_extension/client/extension-config/object_zone_count_person_rendered.json b/samples/ava_ai_extension/client/extension-config/object_zone_count_person_rendered.json similarity index 100% rename from samples/lva_ai_extension/client/extension-config/object_zone_count_person_rendered.json rename to samples/ava_ai_extension/client/extension-config/object_zone_count_person_rendered.json diff --git a/samples/lva_ai_extension/client/extension-config/object_zone_count_rendered.json b/samples/ava_ai_extension/client/extension-config/object_zone_count_rendered.json similarity index 100% rename from samples/lva_ai_extension/client/extension-config/object_zone_count_rendered.json rename to samples/ava_ai_extension/client/extension-config/object_zone_count_rendered.json diff --git a/samples/lva_ai_extension/client/extension-config/object_zone_count_vehicle.json b/samples/ava_ai_extension/client/extension-config/object_zone_count_vehicle.json similarity index 100% rename from samples/lva_ai_extension/client/extension-config/object_zone_count_vehicle.json rename to samples/ava_ai_extension/client/extension-config/object_zone_count_vehicle.json diff --git a/samples/lva_ai_extension/client/extension-config/object_zone_count_vehicle_rendered.json b/samples/ava_ai_extension/client/extension-config/object_zone_count_vehicle_rendered.json similarity index 100% rename from samples/lva_ai_extension/client/extension-config/object_zone_count_vehicle_rendered.json rename to samples/ava_ai_extension/client/extension-config/object_zone_count_vehicle_rendered.json diff --git a/samples/lva_ai_extension/client/media_stream_processor.py b/samples/ava_ai_extension/client/media_stream_processor.py similarity index 96% rename from samples/lva_ai_extension/client/media_stream_processor.py rename to samples/ava_ai_extension/client/media_stream_processor.py index 93546ad..fec46cc 100644 --- a/samples/lva_ai_extension/client/media_stream_processor.py +++ b/samples/ava_ai_extension/client/media_stream_processor.py @@ -32,11 +32,11 @@ import time import threading import grpc -from samples.lva_ai_extension.common.exception_handler import log_exception -from samples.lva_ai_extension.common.shared_memory import SharedMemoryManager -import samples.lva_ai_extension.common.grpc_autogen.media_pb2 as media_pb2 -import samples.lva_ai_extension.common.grpc_autogen.extension_pb2 as extension_pb2 -import samples.lva_ai_extension.common.grpc_autogen.extension_pb2_grpc as extension_pb2_grpc +from samples.ava_ai_extension.common.exception_handler import log_exception +from samples.ava_ai_extension.common.shared_memory import SharedMemoryManager +import samples.ava_ai_extension.common.grpc_autogen.media_pb2 as media_pb2 +import samples.ava_ai_extension.common.grpc_autogen.extension_pb2 as extension_pb2 +import samples.ava_ai_extension.common.grpc_autogen.extension_pb2_grpc as extension_pb2_grpc class MediaStreamProcessor: diff --git a/samples/lva_ai_extension/common/__init__.py b/samples/ava_ai_extension/common/__init__.py similarity index 100% rename from samples/lva_ai_extension/common/__init__.py rename to samples/ava_ai_extension/common/__init__.py diff --git a/samples/lva_ai_extension/common/exception_handler.py b/samples/ava_ai_extension/common/exception_handler.py similarity index 100% rename from samples/lva_ai_extension/common/exception_handler.py rename to samples/ava_ai_extension/common/exception_handler.py diff --git a/samples/lva_ai_extension/common/extension_schema.py b/samples/ava_ai_extension/common/extension_schema.py similarity index 100% rename from samples/lva_ai_extension/common/extension_schema.py rename to samples/ava_ai_extension/common/extension_schema.py diff --git a/samples/lva_ai_extension/common/grpc_autogen/__init__.py b/samples/ava_ai_extension/common/grpc_autogen/__init__.py similarity index 100% rename from samples/lva_ai_extension/common/grpc_autogen/__init__.py rename to samples/ava_ai_extension/common/grpc_autogen/__init__.py diff --git a/samples/lva_ai_extension/common/grpc_autogen/extension_pb2.py b/samples/ava_ai_extension/common/grpc_autogen/extension_pb2.py similarity index 100% rename from samples/lva_ai_extension/common/grpc_autogen/extension_pb2.py rename to samples/ava_ai_extension/common/grpc_autogen/extension_pb2.py diff --git a/samples/lva_ai_extension/common/grpc_autogen/extension_pb2_grpc.py b/samples/ava_ai_extension/common/grpc_autogen/extension_pb2_grpc.py similarity index 100% rename from samples/lva_ai_extension/common/grpc_autogen/extension_pb2_grpc.py rename to samples/ava_ai_extension/common/grpc_autogen/extension_pb2_grpc.py diff --git a/samples/lva_ai_extension/common/grpc_autogen/inferencing_pb2.py b/samples/ava_ai_extension/common/grpc_autogen/inferencing_pb2.py similarity index 100% rename from samples/lva_ai_extension/common/grpc_autogen/inferencing_pb2.py rename to samples/ava_ai_extension/common/grpc_autogen/inferencing_pb2.py diff --git a/samples/lva_ai_extension/common/grpc_autogen/media_pb2.py b/samples/ava_ai_extension/common/grpc_autogen/media_pb2.py similarity index 100% rename from samples/lva_ai_extension/common/grpc_autogen/media_pb2.py rename to samples/ava_ai_extension/common/grpc_autogen/media_pb2.py diff --git a/samples/lva_ai_extension/common/shared_memory.py b/samples/ava_ai_extension/common/shared_memory.py similarity index 100% rename from samples/lva_ai_extension/common/shared_memory.py rename to samples/ava_ai_extension/common/shared_memory.py diff --git a/samples/lva_ai_extension/contracts/extension.proto b/samples/ava_ai_extension/contracts/extension.proto similarity index 100% rename from samples/lva_ai_extension/contracts/extension.proto rename to samples/ava_ai_extension/contracts/extension.proto diff --git a/samples/lva_ai_extension/contracts/inferencing.proto b/samples/ava_ai_extension/contracts/inferencing.proto similarity index 100% rename from samples/lva_ai_extension/contracts/inferencing.proto rename to samples/ava_ai_extension/contracts/inferencing.proto diff --git a/samples/lva_ai_extension/contracts/media.proto b/samples/ava_ai_extension/contracts/media.proto similarity index 100% rename from samples/lva_ai_extension/contracts/media.proto rename to samples/ava_ai_extension/contracts/media.proto diff --git a/samples/lva_ai_extension/docker/Dockerfile b/samples/ava_ai_extension/docker/Dockerfile similarity index 54% rename from samples/lva_ai_extension/docker/Dockerfile rename to samples/ava_ai_extension/docker/Dockerfile index 25dbf67..0fa7c46 100644 --- a/samples/lva_ai_extension/docker/Dockerfile +++ b/samples/ava_ai_extension/docker/Dockerfile @@ -9,19 +9,19 @@ RUN pip3 install --no-cache-dir -r /requirements.txt RUN rm -f /requirements.txt RUN mkdir -p /home/video-analytics-serving/samples -COPY ./client /home/video-analytics-serving/samples/lva_ai_extension/client -COPY ./common /home/video-analytics-serving/samples/lva_ai_extension/common -COPY ./models /home/video-analytics-serving/samples/lva_ai_extension/models -COPY ./pipelines /home/video-analytics-serving/samples/lva_ai_extension/pipelines -COPY ./sampleframes /home/video-analytics-serving/samples/lva_ai_extension/sampleframes -COPY ./server /home/video-analytics-serving/samples/lva_ai_extension/server +COPY ./client /home/video-analytics-serving/samples/ava_ai_extension/client +COPY ./common /home/video-analytics-serving/samples/ava_ai_extension/common +COPY ./models /home/video-analytics-serving/samples/ava_ai_extension/models +COPY ./pipelines /home/video-analytics-serving/samples/ava_ai_extension/pipelines +COPY ./sampleframes /home/video-analytics-serving/samples/ava_ai_extension/sampleframes +COPY ./server /home/video-analytics-serving/samples/ava_ai_extension/server ENV PYTHONPATH=$PYTHONPATH:/home/video-analytics-serving -ENV PYTHONPATH=$PYTHONPATH:/home/video-analytics-serving/samples/lva_ai_extension/common/grpc_autogen +ENV PYTHONPATH=$PYTHONPATH:/home/video-analytics-serving/samples/ava_ai_extension/common/grpc_autogen USER vaserving EXPOSE 5001 #ENTRYPOINT [ "/bin/bash" ] -ENTRYPOINT [ "python3", "/home/video-analytics-serving/samples/lva_ai_extension/server"] +ENTRYPOINT [ "python3", "/home/video-analytics-serving/samples/ava_ai_extension/server"] diff --git a/samples/lva_ai_extension/docker/build.sh b/samples/ava_ai_extension/docker/build.sh similarity index 90% rename from samples/lva_ai_extension/docker/build.sh rename to samples/ava_ai_extension/docker/build.sh index 6cdf6dd..0f44a66 100755 --- a/samples/lva_ai_extension/docker/build.sh +++ b/samples/ava_ai_extension/docker/build.sh @@ -4,7 +4,7 @@ WORK_DIR=$(dirname $(readlink -f "$0")) SAMPLE_DIR=$(dirname $WORK_DIR) SAMPLE_BUILD_ARGS=$(env | cut -f1 -d= | grep -E '_(proxy|REPO|VER)$' | sed 's/^/--build-arg / ' | tr '\n' ' ') MODELS="models/models.list.yml" -TAG="video-analytics-serving:0.6.0-dlstreamer-edge-ai-extension" +TAG="video-analytics-serving:0.6.1-dlstreamer-edge-ai-extension" #Get options passed into script function get_options { @@ -48,7 +48,7 @@ function launch { echo $@ get_options "$@" # Build VA Serving -launch "$SAMPLE_DIR/../../docker/build.sh --framework gstreamer --create-service false --pipelines samples/lva_ai_extension/pipelines --models $SAMPLE_DIR/$MODELS" +launch "$SAMPLE_DIR/../../docker/build.sh --framework gstreamer --create-service false --pipelines samples/ava_ai_extension/pipelines --models $SAMPLE_DIR/$MODELS" # Build AI Extention echo $SAMPLE_DIR/.. diff --git a/samples/lva_ai_extension/docker/dlstreamer-edge-ai-extension-third-party-programs.txt b/samples/ava_ai_extension/docker/dlstreamer-edge-ai-extension-third-party-programs.txt similarity index 100% rename from samples/lva_ai_extension/docker/dlstreamer-edge-ai-extension-third-party-programs.txt rename to samples/ava_ai_extension/docker/dlstreamer-edge-ai-extension-third-party-programs.txt diff --git a/samples/lva_ai_extension/docker/run_client.sh b/samples/ava_ai_extension/docker/run_client.sh similarity index 87% rename from samples/lva_ai_extension/docker/run_client.sh rename to samples/ava_ai_extension/docker/run_client.sh index 5e64b08..2111013 100755 --- a/samples/lva_ai_extension/docker/run_client.sh +++ b/samples/ava_ai_extension/docker/run_client.sh @@ -2,10 +2,10 @@ SERVER_IP=127.0.0.1 SERVER_PORT=5001 -LVA_ROOT=/home/video-analytics-serving/samples/lva_ai_extension +AVA_ROOT=/home/video-analytics-serving/samples/ava_ai_extension OUTPUT_FILE_PATH=/tmp/result INTERACTIVE= -IMAGE=video-analytics-serving:0.6.0-dlstreamer-edge-ai-extension +IMAGE=video-analytics-serving:0.6.1-dlstreamer-edge-ai-extension NAME=${IMAGE//[\:]/_}"_client" NUMBER_OF_STREAMS=1 SCRIPT_DIR=$(dirname $(readlink -f "$0")) @@ -28,9 +28,9 @@ function show_help { echo "**Application**" echo "" if [ "${MODE}" == "DEV" ]; then - VOLUME_MOUNT+="-v $SAMPLE_DIR:$LVA_ROOT " + VOLUME_MOUNT+="-v $SAMPLE_DIR:$AVA_ROOT " fi - RUN_COMMAND="'python3 ${LVA_ROOT}/client --help'" + RUN_COMMAND="'python3 ${AVA_ROOT}/client --help'" "$ROOT_DIR/docker/run.sh" --name $NAME --image $IMAGE $VOLUME_MOUNT --entrypoint "/bin/bash" --entrypoint-args "-c" --entrypoint-args "$RUN_COMMAND" } @@ -81,7 +81,7 @@ PIDS= CONTAINERS= if [ "${MODE}" == "DEV" ]; then - VOLUME_MOUNT+="-v $SAMPLE_DIR:$LVA_ROOT " + VOLUME_MOUNT+="-v $SAMPLE_DIR:$AVA_ROOT " fi function clean_up { @@ -95,7 +95,7 @@ if [ "$NUMBER_OF_STREAMS" -gt "1" ]; then for i in $(seq "$NUMBER_OF_STREAMS") do echo "Starting Client $i Results to ${OUTPUT_FILE_PATH}_client_$i.jsonl, Output to: client_${i}.stdout.txt" - RUN_COMMAND='"'" python3 $LVA_ROOT/client $ENTRYPOINT_ARGS -o ${OUTPUT_FILE_PATH}_client_$i.jsonl "'"' + RUN_COMMAND='"'" python3 $AVA_ROOT/client $ENTRYPOINT_ARGS -o ${OUTPUT_FILE_PATH}_client_$i.jsonl "'"' "$ROOT_DIR/docker/run.sh" --non-interactive --name "${NAME}_${i}" --network host --image $IMAGE $VOLUME_MOUNT --entrypoint "/bin/bash" --entrypoint-args "-c" --entrypoint-args "$RUN_COMMAND" >client_${i}.stdout.txt 2>&1 & PIDS+=" $!" CONTAINERS+=" ${NAME}_${i}" @@ -104,6 +104,6 @@ if [ "$NUMBER_OF_STREAMS" -gt "1" ]; then echo "waiting for clients to finish" wait else - RUN_COMMAND='"'" python3 $LVA_ROOT/client $ENTRYPOINT_ARGS -o ${OUTPUT_FILE_PATH}.jsonl "'"' + RUN_COMMAND='"'" python3 $AVA_ROOT/client $ENTRYPOINT_ARGS -o ${OUTPUT_FILE_PATH}.jsonl "'"' "$ROOT_DIR/docker/run.sh" --name $NAME --network host --image $IMAGE $VOLUME_MOUNT --entrypoint "/bin/bash" --entrypoint-args "-c" --entrypoint-args "$RUN_COMMAND" fi diff --git a/samples/lva_ai_extension/docker/run_server.sh b/samples/ava_ai_extension/docker/run_server.sh similarity index 80% rename from samples/lva_ai_extension/docker/run_server.sh rename to samples/ava_ai_extension/docker/run_server.sh index d278f95..f64e17f 100755 --- a/samples/lva_ai_extension/docker/run_server.sh +++ b/samples/ava_ai_extension/docker/run_server.sh @@ -2,9 +2,9 @@ CURRENT_DIR=$(dirname $(readlink -f "$0")) ROOT_DIR=$(readlink -f "$CURRENT_DIR/../../..") -LVA_DIR=$(dirname $CURRENT_DIR) -LVA_ROOT=/home/video-analytics-serving/samples/lva_ai_extension -IMAGE=video-analytics-serving:0.6.0-dlstreamer-edge-ai-extension +AVA_DIR=$(dirname $CURRENT_DIR) +AVA_ROOT=/home/video-analytics-serving/samples/ava_ai_extension +IMAGE=video-analytics-serving:0.6.1-dlstreamer-edge-ai-extension VASERVING_ROOT=/home/video-analytics-serving NAME=${IMAGE//[\:]/_} PORT=5001 @@ -21,13 +21,14 @@ function show_help { echo "usage: ./run_server.sh" echo " [ -p : Specify the port to use ] " echo " [ --dev : Mount local source code] " + echo " [ --enable-rtsp : To enable rtsp re-streaming ] " echo "" echo "**Application**" echo "" if [ "${MODE}" == "DEV" ]; then - VOLUME_MOUNT+="-v $LVA_DIR:$LVA_ROOT " + VOLUME_MOUNT+="-v $AVA_DIR:$AVA_ROOT " VOLUME_MOUNT+="-v $ROOT_DIR:$VASERVING_ROOT " - PIPELINES="--pipelines $LVA_DIR/pipelines " + PIPELINES="--pipelines $AVA_DIR/pipelines " fi ENTRYPOINT_ARGS+="--entrypoint-args --help " "$ROOT_DIR/docker/run.sh" -p $PORT:$PORT --image $IMAGE $VOLUME_MOUNT $ENTRYPOINT_ARGS $PIPELINES @@ -53,9 +54,12 @@ while [[ "$#" -gt 0 ]]; do fi ;; --dev) - PIPELINES="--pipelines $LVA_DIR/pipelines " + PIPELINES="--pipelines $AVA_DIR/pipelines " MODE="DEV" ;; + --enable-rtsp) + RTSP_ARGS="--enable-rtsp" + ;; *) ENTRYPOINT_ARGS+="--entrypoint-args '$1' " ;; @@ -78,10 +82,6 @@ if [ ! -z "$DEBUG_PIPELINE" ]; then ENV+="-e DEBUG_PIPELINE=$DEBUG_PIPELINE " fi -if [ ! -z "$ENABLE_RTSP" ]; then - RTSP_ARGS="--enable-rtsp" -fi - if [ ! -z "$GST_DEBUG" ]; then ENV+="-e GST_DEBUG=$GST_DEBUG " fi @@ -90,7 +90,7 @@ VOLUME_MOUNT+="-v /tmp:/tmp " VOLUME_MOUNT+="-v /dev/shm:/dev/shm " if [ "${MODE}" == "DEV" ]; then - VOLUME_MOUNT+="-v $LVA_DIR:$LVA_ROOT " + VOLUME_MOUNT+="-v $AVA_DIR:$AVA_ROOT " VOLUME_MOUNT+="-v $ROOT_DIR:$VASERVING_ROOT " fi diff --git a/samples/lva_ai_extension/models/action-recognition-0001.json b/samples/ava_ai_extension/models/action-recognition-0001.json similarity index 100% rename from samples/lva_ai_extension/models/action-recognition-0001.json rename to samples/ava_ai_extension/models/action-recognition-0001.json diff --git a/samples/lva_ai_extension/models/models.list.yml b/samples/ava_ai_extension/models/models.list.yml similarity index 100% rename from samples/lva_ai_extension/models/models.list.yml rename to samples/ava_ai_extension/models/models.list.yml diff --git a/samples/lva_ai_extension/models/person-detection-retail-0013.json b/samples/ava_ai_extension/models/person-detection-retail-0013.json similarity index 100% rename from samples/lva_ai_extension/models/person-detection-retail-0013.json rename to samples/ava_ai_extension/models/person-detection-retail-0013.json diff --git a/samples/lva_ai_extension/models/vehicle-detection-0202.json b/samples/ava_ai_extension/models/vehicle-detection-0202.json similarity index 100% rename from samples/lva_ai_extension/models/vehicle-detection-0202.json rename to samples/ava_ai_extension/models/vehicle-detection-0202.json diff --git a/samples/lva_ai_extension/pipeline_diagrams/action-recognition.png b/samples/ava_ai_extension/pipeline_diagrams/action-recognition.png similarity index 100% rename from samples/lva_ai_extension/pipeline_diagrams/action-recognition.png rename to samples/ava_ai_extension/pipeline_diagrams/action-recognition.png diff --git a/samples/ava_ai_extension/pipeline_diagrams/line-crossing.png b/samples/ava_ai_extension/pipeline_diagrams/line-crossing.png new file mode 100644 index 0000000..6943fc7 Binary files /dev/null and b/samples/ava_ai_extension/pipeline_diagrams/line-crossing.png differ diff --git a/samples/lva_ai_extension/pipeline_diagrams/object-classification.png b/samples/ava_ai_extension/pipeline_diagrams/object-classification.png similarity index 100% rename from samples/lva_ai_extension/pipeline_diagrams/object-classification.png rename to samples/ava_ai_extension/pipeline_diagrams/object-classification.png diff --git a/samples/lva_ai_extension/pipeline_diagrams/object-detection.png b/samples/ava_ai_extension/pipeline_diagrams/object-detection.png similarity index 100% rename from samples/lva_ai_extension/pipeline_diagrams/object-detection.png rename to samples/ava_ai_extension/pipeline_diagrams/object-detection.png diff --git a/samples/lva_ai_extension/pipeline_diagrams/object-tracking.png b/samples/ava_ai_extension/pipeline_diagrams/object-tracking.png similarity index 100% rename from samples/lva_ai_extension/pipeline_diagrams/object-tracking.png rename to samples/ava_ai_extension/pipeline_diagrams/object-tracking.png diff --git a/samples/lva_ai_extension/pipeline_diagrams/pipeline_diagrams.mermaid b/samples/ava_ai_extension/pipeline_diagrams/pipeline_diagrams.mermaid similarity index 59% rename from samples/lva_ai_extension/pipeline_diagrams/pipeline_diagrams.mermaid rename to samples/ava_ai_extension/pipeline_diagrams/pipeline_diagrams.mermaid index 7cd3447..b2ef2aa 100644 --- a/samples/lva_ai_extension/pipeline_diagrams/pipeline_diagrams.mermaid +++ b/samples/ava_ai_extension/pipeline_diagrams/pipeline_diagrams.mermaid @@ -1,4 +1,4 @@ -## LVA Pipeline Diagrams +## AVA Pipeline Diagrams ### Object Detection ```mermaid graph LR @@ -45,3 +45,28 @@ graph LR end appsink-->meta_data ``` + +### Line Crossing +```mermaid +graph LR + frame-->appsrc + subgraph "Line Crossing (gvapython element generates line crossing events)" + appsrc-->gvadetect + gvadetect-->gvatrack + gvatrack-->gvaclassify + gvaclassify-->gvapython + gvapython-->appsink + end + appsink-->meta_data``` + +### Zone Event +```mermaid +graph LR + frame-->appsrc + subgraph "Zone Event (gvapython element generates 'in zone' detection events)" + appsrc-->gvadetect + gvadetect-->gvapython + gvapython-->appsink + end + appsink-->meta_data +``` diff --git a/samples/ava_ai_extension/pipeline_diagrams/zone-detection.png b/samples/ava_ai_extension/pipeline_diagrams/zone-detection.png new file mode 100644 index 0000000..8de0c77 Binary files /dev/null and b/samples/ava_ai_extension/pipeline_diagrams/zone-detection.png differ diff --git a/samples/lva_ai_extension/pipelines/action_recognition/general/pipeline.json b/samples/ava_ai_extension/pipelines/action_recognition/general/pipeline.json similarity index 100% rename from samples/lva_ai_extension/pipelines/action_recognition/general/pipeline.json rename to samples/ava_ai_extension/pipelines/action_recognition/general/pipeline.json diff --git a/samples/lva_ai_extension/pipelines/object_classification/debug_vehicle_attributes_recognition/pipeline.json b/samples/ava_ai_extension/pipelines/object_classification/debug_vehicle_attributes_recognition/pipeline.json similarity index 100% rename from samples/lva_ai_extension/pipelines/object_classification/debug_vehicle_attributes_recognition/pipeline.json rename to samples/ava_ai_extension/pipelines/object_classification/debug_vehicle_attributes_recognition/pipeline.json diff --git a/samples/lva_ai_extension/pipelines/object_classification/vehicle_attributes_recognition/pipeline.json b/samples/ava_ai_extension/pipelines/object_classification/vehicle_attributes_recognition/pipeline.json similarity index 100% rename from samples/lva_ai_extension/pipelines/object_classification/vehicle_attributes_recognition/pipeline.json rename to samples/ava_ai_extension/pipelines/object_classification/vehicle_attributes_recognition/pipeline.json diff --git a/samples/lva_ai_extension/pipelines/object_detection/debug_person_vehicle_bike_detection/pipeline.json b/samples/ava_ai_extension/pipelines/object_detection/debug_person_vehicle_bike_detection/pipeline.json similarity index 100% rename from samples/lva_ai_extension/pipelines/object_detection/debug_person_vehicle_bike_detection/pipeline.json rename to samples/ava_ai_extension/pipelines/object_detection/debug_person_vehicle_bike_detection/pipeline.json diff --git a/samples/lva_ai_extension/pipelines/object_detection/object_zone_count/pipeline.json b/samples/ava_ai_extension/pipelines/object_detection/object_zone_count/pipeline.json similarity index 100% rename from samples/lva_ai_extension/pipelines/object_detection/object_zone_count/pipeline.json rename to samples/ava_ai_extension/pipelines/object_detection/object_zone_count/pipeline.json diff --git a/samples/lva_ai_extension/pipelines/object_detection/object_zone_count_person/pipeline.json b/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_person/pipeline.json similarity index 100% rename from samples/lva_ai_extension/pipelines/object_detection/object_zone_count_person/pipeline.json rename to samples/ava_ai_extension/pipelines/object_detection/object_zone_count_person/pipeline.json diff --git a/samples/lva_ai_extension/pipelines/object_detection/object_zone_count_vehicle/README.md b/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_vehicle/README.md similarity index 100% rename from samples/lva_ai_extension/pipelines/object_detection/object_zone_count_vehicle/README.md rename to samples/ava_ai_extension/pipelines/object_detection/object_zone_count_vehicle/README.md diff --git a/samples/lva_ai_extension/pipelines/object_detection/object_zone_count_vehicle/pipeline.json b/samples/ava_ai_extension/pipelines/object_detection/object_zone_count_vehicle/pipeline.json similarity index 100% rename from samples/lva_ai_extension/pipelines/object_detection/object_zone_count_vehicle/pipeline.json rename to samples/ava_ai_extension/pipelines/object_detection/object_zone_count_vehicle/pipeline.json diff --git a/samples/lva_ai_extension/pipelines/object_detection/person_vehicle_bike_detection/pipeline.json b/samples/ava_ai_extension/pipelines/object_detection/person_vehicle_bike_detection/pipeline.json similarity index 100% rename from samples/lva_ai_extension/pipelines/object_detection/person_vehicle_bike_detection/pipeline.json rename to samples/ava_ai_extension/pipelines/object_detection/person_vehicle_bike_detection/pipeline.json diff --git a/samples/lva_ai_extension/pipelines/object_tracking/debug_person_vehicle_bike_tracking/pipeline.json b/samples/ava_ai_extension/pipelines/object_tracking/debug_person_vehicle_bike_tracking/pipeline.json similarity index 98% rename from samples/lva_ai_extension/pipelines/object_tracking/debug_person_vehicle_bike_tracking/pipeline.json rename to samples/ava_ai_extension/pipelines/object_tracking/debug_person_vehicle_bike_tracking/pipeline.json index 97aedcc..abaf6b4 100755 --- a/samples/lva_ai_extension/pipelines/object_tracking/debug_person_vehicle_bike_tracking/pipeline.json +++ b/samples/ava_ai_extension/pipelines/object_tracking/debug_person_vehicle_bike_tracking/pipeline.json @@ -170,8 +170,7 @@ }, "tracking-type": { "element": "tracking", - "type": "string", - "default": "zero-term-imageless" + "type": "string" }, "location": { "element": "filesink", diff --git a/samples/lva_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json b/samples/ava_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json similarity index 98% rename from samples/lva_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json rename to samples/ava_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json index 926dbd8..1c0ad0c 100755 --- a/samples/lva_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json +++ b/samples/ava_ai_extension/pipelines/object_tracking/object_line_crossing/pipeline.json @@ -87,8 +87,7 @@ }, "tracking-type": { "element": "tracking", - "type": "string", - "default": "short-term" + "type": "string" }, "detection-threshold": { "element": { diff --git a/samples/lva_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json b/samples/ava_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json similarity index 98% rename from samples/lva_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json rename to samples/ava_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json index d69422d..8594a30 100755 --- a/samples/lva_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json +++ b/samples/ava_ai_extension/pipelines/object_tracking/person_vehicle_bike_tracking/pipeline.json @@ -168,8 +168,7 @@ }, "tracking-type": { "element": "tracking", - "type": "string", - "default": "zero-term-imageless" + "type": "string" } } } diff --git a/samples/lva_ai_extension/pipelines/video_decode/app_dst/pipeline.json b/samples/ava_ai_extension/pipelines/video_decode/app_dst/pipeline.json similarity index 100% rename from samples/lva_ai_extension/pipelines/video_decode/app_dst/pipeline.json rename to samples/ava_ai_extension/pipelines/video_decode/app_dst/pipeline.json diff --git a/samples/lva_ai_extension/requirements.txt b/samples/ava_ai_extension/requirements.txt similarity index 100% rename from samples/lva_ai_extension/requirements.txt rename to samples/ava_ai_extension/requirements.txt diff --git a/samples/lva_ai_extension/sampleframes/sample01.png b/samples/ava_ai_extension/sampleframes/sample01.png similarity index 100% rename from samples/lva_ai_extension/sampleframes/sample01.png rename to samples/ava_ai_extension/sampleframes/sample01.png diff --git a/samples/lva_ai_extension/server/__init__.py b/samples/ava_ai_extension/server/__init__.py similarity index 100% rename from samples/lva_ai_extension/server/__init__.py rename to samples/ava_ai_extension/server/__init__.py diff --git a/samples/lva_ai_extension/server/__main__.py b/samples/ava_ai_extension/server/__main__.py similarity index 98% rename from samples/lva_ai_extension/server/__main__.py rename to samples/ava_ai_extension/server/__main__.py index 9b27422..5e14b3c 100644 --- a/samples/lva_ai_extension/server/__main__.py +++ b/samples/ava_ai_extension/server/__main__.py @@ -37,7 +37,7 @@ from vaserving.vaserving import VAServing from vaserving.common.utils.logging import get_logger from media_graph_extension import MediaGraphExtension -from samples.lva_ai_extension.common.exception_handler import log_exception +from samples.ava_ai_extension.common.exception_handler import log_exception PROGRAM_NAME = "DL Streamer Edge AI Extension" diff --git a/samples/lva_ai_extension/server/media_graph_extension.py b/samples/ava_ai_extension/server/media_graph_extension.py similarity index 97% rename from samples/lva_ai_extension/server/media_graph_extension.py rename to samples/ava_ai_extension/server/media_graph_extension.py index 2d0fcb5..a4ac1af 100644 --- a/samples/lva_ai_extension/server/media_graph_extension.py +++ b/samples/ava_ai_extension/server/media_graph_extension.py @@ -38,14 +38,14 @@ from enum import Enum import jsonschema -import samples.lva_ai_extension.common.grpc_autogen.inferencing_pb2 as inferencing_pb2 -import samples.lva_ai_extension.common.grpc_autogen.media_pb2 as media_pb2 -import samples.lva_ai_extension.common.grpc_autogen.extension_pb2 as extension_pb2 -import samples.lva_ai_extension.common.grpc_autogen.extension_pb2_grpc as extension_pb2_grpc +import samples.ava_ai_extension.common.grpc_autogen.inferencing_pb2 as inferencing_pb2 +import samples.ava_ai_extension.common.grpc_autogen.media_pb2 as media_pb2 +import samples.ava_ai_extension.common.grpc_autogen.extension_pb2 as extension_pb2 +import samples.ava_ai_extension.common.grpc_autogen.extension_pb2_grpc as extension_pb2_grpc -from samples.lva_ai_extension.common.shared_memory import SharedMemoryManager -from samples.lva_ai_extension.common.exception_handler import log_exception -import samples.lva_ai_extension.common.extension_schema as extension_schema +from samples.ava_ai_extension.common.shared_memory import SharedMemoryManager +from samples.ava_ai_extension.common.exception_handler import log_exception +import samples.ava_ai_extension.common.extension_schema as extension_schema from vaserving.vaserving import VAServing from vaserving.pipeline import Pipeline @@ -530,6 +530,7 @@ def ProcessMediaStream(self, requestIterator, context): # One final check on the pipeline to ensure it worked properly status = vas_pipeline.wait(10) + self._logger.info("Pipeline Ended Status: {}".format(status)) if (not status) or (status.state == Pipeline.State.ERROR): raise Exception("Pipeline did not complete successfully") @@ -538,6 +539,6 @@ def ProcessMediaStream(self, requestIterator, context): requests_received, responses_sent ) ) - self._logger.info( + self._logger.debug( "MediaStreamDescriptor:\n{0}".format(client_state.media_stream_descriptor) ) diff --git a/samples/lva_ai_extension/server/readme.md b/samples/ava_ai_extension/server/readme.md similarity index 100% rename from samples/lva_ai_extension/server/readme.md rename to samples/ava_ai_extension/server/readme.md diff --git a/samples/edgex_bridge/README.md b/samples/edgex_bridge/README.md index 131ded0..267846b 100644 --- a/samples/edgex_bridge/README.md +++ b/samples/edgex_bridge/README.md @@ -1,6 +1,6 @@ # Video Analytics Serving EdgeX Bridge -This sample demonstrates how to emit events into [EdgeX Foundry](http://edgexfoundry.org/) from an object detection pipeline based on Video Analytics Serving and [DL Streamer](https://github.com/openvinotoolkit/dlstreamer_gst). The sample uses the [mobilenet-ssd](https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/public/mobilenet-ssd/mobilenet-ssd.md) model for detection but can be customized to use any detection or recognition model. +This sample demonstrates how to emit events into [EdgeX Foundry](http://edgexfoundry.org/) from an object detection pipeline based on Video Analytics Serving and [DL Streamer](https://github.com/openvinotoolkit/dlstreamer_gst). The sample uses the [person-vehicle-bike-detection-crossroad-0078](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-vehicle-bike-detection-crossroad-0078) model for detection but can be customized to use any detection or recognition model. | [Overview](#overview) | [Prerequisites](#prerequisites) | [Tutorial](#tutorial) | [Script Arguments](#script-arguments) | @@ -27,26 +27,9 @@ The EdgeX Bridge sample uses a DL Streamer based pipeline definition with a vers The reference pipeline makes use of an object detection model to detect and label regions of interest. Any objects detected within the region are reported with a label and confidence value, along with location details and other contextually relevant metadata. Multiple objects may be reported in a media frame or image, and behavior may be refined further by assigning a confidence `threshold` property of the [gvadetect](https://github.com/openvinotoolkit/dlstreamer_gst/wiki/gvadetect) element. The list of objects that this network can detect are: -- aeroplane -- bicycle -- bird -- boat -- bottle -- bus -- car -- cat -- chair -- cow -- diningtable -- dog -- horse -- motorbike - person -- pottedplant -- sheep -- sofa -- train -- tvmonitor +- vehicle +- bike # Tutorial @@ -132,30 +115,12 @@ The `edgex-video-analytics-serving` image may be extended by updating sources on 1. You may customize the pipeline to use other models. For example, you may wish to remove the visual output by updating the last line of the pipeline template to replace with the following: -```suggestion:-0+0 --" ! queue ! gvawatermark ! videoconvert ! fpsdisplaysink video-sink=ximagesink" -+" ! appsink name=appsink" -``` - -Refer to [Changing Object Detection Models](/docs/changing_object_detection_models.md) for creative guidance. For example, apply the mobilenet-ssd model to detect bottles instead of vehicles. - -1. Add a second pipeline at ./samples/edgex_bridge/pipelines/object_detection/ssd/pipeline.json. Modify the attributes assigned for the gvadetect element's model attribute: - -``` -... -! gvadetect model={models[object_detection][ssd][network]} name=detection", -... -``` - -1. Extend the ./samples/models_list/models.list.yml to append: -``` -- model: mobilenet-ssd - alias: object_detection - version: ssd - precision: [FP32] -``` + ```suggestion:-0+0 + -" ! queue ! gvawatermark ! videoconvert ! fpsdisplaysink video-sink=ximagesink" + +" ! appsink name=appsink" + ``` -This will allow OpenVINO to resolve the `ssd` object_detection model we described for the pipeline in the previous step. You will either optionally pass a --pipeline parameter or update the default to start your new bottle detection pipeline (we named `object_detection/ssd` in the previous step). + Refer to [Changing Object Detection Models](/docs/changing_object_detection_models.md) for creative guidance. 1. Pass in a new source, representing a camera watching bottles being added or removed, such as "https://github.com/intel-iot-devkit/sample-videos/blob/master/bottle-detection.mp4?raw=true" diff --git a/samples/lva_ai_extension/deployment/.env b/samples/lva_ai_extension/deployment/.env deleted file mode 100755 index b1de79a..0000000 --- a/samples/lva_ai_extension/deployment/.env +++ /dev/null @@ -1,12 +0,0 @@ -SUBSCRIPTION_ID="" -RESOURCE_GROUP="" -AMS_ACCOUNT="" -AAD_TENANT_ID="" -AAD_SERVICE_PRINCIPAL_ID="" -AAD_SERVICE_PRINCIPAL_SECRET="" -IOTHUB_CONNECTION_STRING="" -INPUT_VIDEO_FOLDER_ON_DEVICE="" -OUTPUT_VIDEO_FOLDER_ON_DEVICE="" -APPDATA_FOLDER_ON_DEVICE="" -CONTAINER_REGISTRY_USERNAME_myacr= -CONTAINER_REGISTRY_PASSWORD_myacr= \ No newline at end of file diff --git a/samples/lva_ai_extension/deployment/deployment.cpu.grpc.template.json b/samples/lva_ai_extension/deployment/deployment.cpu.grpc.template.json deleted file mode 100755 index 1ab72f8..0000000 --- a/samples/lva_ai_extension/deployment/deployment.cpu.grpc.template.json +++ /dev/null @@ -1,170 +0,0 @@ -{ - "$schema-template": "2.0.0", - "modulesContent": { - "$edgeAgent": { - "properties.desired": { - "schemaVersion": "1.0", - "runtime": { - "type": "docker", - "settings": { - "minDockerVersion": "v1.25", - "loggingOptions": "", - "registryCredentials": { - "$CONTAINER_REGISTRY_USERNAME_myacr":{ - "username":"$CONTAINER_REGISTRY_USERNAME_myacr", - "password":"$CONTAINER_REGISTRY_PASSWORD_myacr", - "address": "$CONTAINER_REGISTRY_USERNAME_myacr.azurecr.io" - } - } - } - }, - "systemModules": { - "edgeAgent": { - "type": "docker", - "settings": { - "image": "mcr.microsoft.com/azureiotedge-agent:1.0", - "createOptions": {} - } - }, - "edgeHub": { - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/azureiotedge-hub:1.0", - "createOptions": { - "HostConfig": { - "PortBindings": { - "5671/tcp": [ - { - "HostPort": "5671" - } - ], - "8883/tcp": [ - { - "HostPort": "8883" - } - ], - "443/tcp": [ - { - "HostPort": "443" - } - ] - } - } - } - } - } - }, - "modules": { - "lvaEdge": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/media/live-video-analytics:2", - "createOptions": { - "HostConfig": { - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "IpcMode" : "shareable" - } - } - } - }, - "rtspsim": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/lva-utilities/rtspsim-live555:1.2", - "createOptions": { - "HostConfig": { - "Binds": [ - "$INPUT_VIDEO_FOLDER_ON_DEVICE:/live/mediaServer/media" - ] - } - } - } - }, - "lvaExtension" : { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "intel/video-analytics-serving:latest-dlstreamer-edge-ai-extension", - "createOptions": { - "ExposedPorts": { - "80/tcp": {}, - "5001/tcp" : {} - }, - "HostConfig": { - "Binds": [ - "/tmp/:/tmp/" - ], - "PortBindings": { - "80/tcp": [ - { - "HostPort": "8080" - } - ], - "5001/tcp" : [ - { - "HostPort" : "5001" - } - ] - }, - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "IpcMode": "container:lvaEdge" - } - } - } - } - } - } - }, - "$edgeHub": { - "properties.desired": { - "schemaVersion": "1.0", - "routes": { - "LVAToHub": "FROM /messages/modules/lvaEdge/outputs/* INTO $upstream" - }, - "storeAndForwardConfiguration": { - "timeToLiveSecs": 7200 - } - } - }, - "lvaEdge": { - "properties.desired": { - "applicationDataDirectory": "/var/lib/azuremediaservices", - "azureMediaServicesArmId": "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/microsoft.media/mediaservices/$AMS_ACCOUNT", - "aadTenantId": "$AAD_TENANT_ID", - "aadServicePrincipalAppId": "$AAD_SERVICE_PRINCIPAL_ID", - "aadServicePrincipalSecret": "$AAD_SERVICE_PRINCIPAL_SECRET", - "aadEndpoint": "https://login.microsoftonline.com", - "aadResourceId": "https://management.core.windows.net/", - "armEndpoint": "https://management.azure.com/", - "diagnosticsEventsOutputName": "AmsDiagnostics", - "operationalEventsOutputName": "AmsOperational", - "logLevel": "Information", - "logCategories": "Application,Events", - "allowUnsecuredEndpoints": true, - "telemetryOptOut": false - } - } - } -} \ No newline at end of file diff --git a/samples/lva_ai_extension/deployment/deployment.gpu.grpc.template.json b/samples/lva_ai_extension/deployment/deployment.gpu.grpc.template.json deleted file mode 100755 index 0d81386..0000000 --- a/samples/lva_ai_extension/deployment/deployment.gpu.grpc.template.json +++ /dev/null @@ -1,176 +0,0 @@ -{ - "$schema-template": "2.0.0", - "modulesContent": { - "$edgeAgent": { - "properties.desired": { - "schemaVersion": "1.0", - "runtime": { - "type": "docker", - "settings": { - "minDockerVersion": "v1.25", - "loggingOptions": "", - "registryCredentials": { - "$CONTAINER_REGISTRY_USERNAME_myacr":{ - "username":"$CONTAINER_REGISTRY_USERNAME_myacr", - "password":"$CONTAINER_REGISTRY_PASSWORD_myacr", - "address": "$CONTAINER_REGISTRY_USERNAME_myacr.azurecr.io" - } - } - } - }, - "systemModules": { - "edgeAgent": { - "type": "docker", - "settings": { - "image": "mcr.microsoft.com/azureiotedge-agent:1.0", - "createOptions": {} - } - }, - "edgeHub": { - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/azureiotedge-hub:1.0", - "createOptions": { - "HostConfig": { - "PortBindings": { - "5671/tcp": [ - { - "HostPort": "5671" - } - ], - "8883/tcp": [ - { - "HostPort": "8883" - } - ], - "443/tcp": [ - { - "HostPort": "443" - } - ] - } - } - } - } - } - }, - "modules": { - "lvaEdge": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/media/live-video-analytics:2", - "createOptions": { - "HostConfig": { - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "IpcMode" : "shareable" - } - } - } - }, - "rtspsim": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/lva-utilities/rtspsim-live555:1.2", - "createOptions": { - "HostConfig": { - "Binds": [ - "$INPUT_VIDEO_FOLDER_ON_DEVICE:/live/mediaServer/media" - ] - } - } - } - }, - "lvaExtension" : { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "intel/video-analytics-serving:latest-dlstreamer-edge-ai-extension", - "createOptions": { - "ExposedPorts": { - "80/tcp": {}, - "5001/tcp" : {} - }, - "HostConfig": { - "Binds": [ - "/tmp/:/tmp/" - ], - "Devices": [ - { - "PathOnHost":"/dev/dri", - "PathInContainer":"/dev/dri", - "CgroupPermissions":"rwm" - }], - "PortBindings": { - "80/tcp": [ - { - "HostPort": "8080" - } - ], - "5001/tcp" : [ - { - "HostPort" : "5001" - } - ] - }, - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "IpcMode": "container:lvaEdge" - } - } - } - } - } - } - }, - "$edgeHub": { - "properties.desired": { - "schemaVersion": "1.0", - "routes": { - "LVAToHub": "FROM /messages/modules/lvaEdge/outputs/* INTO $upstream" - }, - "storeAndForwardConfiguration": { - "timeToLiveSecs": 7200 - } - } - }, - "lvaEdge": { - "properties.desired": { - "applicationDataDirectory": "/var/lib/azuremediaservices", - "azureMediaServicesArmId": "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/microsoft.media/mediaservices/$AMS_ACCOUNT", - "aadTenantId": "$AAD_TENANT_ID", - "aadServicePrincipalAppId": "$AAD_SERVICE_PRINCIPAL_ID", - "aadServicePrincipalSecret": "$AAD_SERVICE_PRINCIPAL_SECRET", - "aadEndpoint": "https://login.microsoftonline.com", - "aadResourceId": "https://management.core.windows.net/", - "armEndpoint": "https://management.azure.com/", - "diagnosticsEventsOutputName": "AmsDiagnostics", - "operationalEventsOutputName": "AmsOperational", - "logLevel": "Information", - "logCategories": "Application,Events", - "allowUnsecuredEndpoints": true, - "telemetryOptOut": false - } - } - } -} diff --git a/samples/lva_ai_extension/deployment/deployment.hddl.grpc.template.json b/samples/lva_ai_extension/deployment/deployment.hddl.grpc.template.json deleted file mode 100755 index 76fcbb0..0000000 --- a/samples/lva_ai_extension/deployment/deployment.hddl.grpc.template.json +++ /dev/null @@ -1,179 +0,0 @@ -{ - "$schema-template": "2.0.0", - "modulesContent": { - "$edgeAgent": { - "properties.desired": { - "schemaVersion": "1.0", - "runtime": { - "type": "docker", - "settings": { - "minDockerVersion": "v1.25", - "loggingOptions": "", - "registryCredentials": { - "$CONTAINER_REGISTRY_USERNAME_myacr":{ - "username":"$CONTAINER_REGISTRY_USERNAME_myacr", - "password":"$CONTAINER_REGISTRY_PASSWORD_myacr", - "address": "$CONTAINER_REGISTRY_USERNAME_myacr.azurecr.io" - } - } - } - }, - "systemModules": { - "edgeAgent": { - "type": "docker", - "settings": { - "image": "mcr.microsoft.com/azureiotedge-agent:1.0", - "createOptions": {} - } - }, - "edgeHub": { - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/azureiotedge-hub:1.0", - "createOptions": { - "HostConfig": { - "PortBindings": { - "5671/tcp": [ - { - "HostPort": "5671" - } - ], - "8883/tcp": [ - { - "HostPort": "8883" - } - ], - "443/tcp": [ - { - "HostPort": "443" - } - ] - } - } - } - } - } - }, - "modules": { - "lvaEdge": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/media/live-video-analytics:2", - "createOptions": { - "HostConfig": { - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "IpcMode" : "shareable" - } - } - } - }, - "rtspsim": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/lva-utilities/rtspsim-live555:1.2", - "createOptions": { - "HostConfig": { - "Binds": [ - "$INPUT_VIDEO_FOLDER_ON_DEVICE:/live/mediaServer/media" - ] - } - } - } - }, - "lvaExtension" : { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "intel/video-analytics-serving:latest-dlstreamer-edge-ai-extension", - "createOptions": { - "ExposedPorts": { - "80/tcp": {}, - "5001/tcp" : {} - }, - "HostConfig": { - "Binds": [ - "/var/tmp:/var/tmp", - "/tmp:/tmp", - "/dev/shm:/dev/shm" - ], - "Devices": [ - { - "PathOnHost":"/dev/ion", - "PathInContainer":"/dev/ion", - "CgroupPermissions":"rwm" - } - ], - "PortBindings": { - "80/tcp": [ - { - "HostPort": "8080" - } - ], - "5001/tcp" : [ - { - "HostPort" : "5001" - } - ] - }, - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "IpcMode": "container:lvaEdge" - } - } - } - } - } - } - }, - "$edgeHub": { - "properties.desired": { - "schemaVersion": "1.0", - "routes": { - "LVAToHub": "FROM /messages/modules/lvaEdge/outputs/* INTO $upstream" - }, - "storeAndForwardConfiguration": { - "timeToLiveSecs": 7200 - } - } - }, - "lvaEdge": { - "properties.desired": { - "applicationDataDirectory": "/var/lib/azuremediaservices", - "azureMediaServicesArmId": "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/microsoft.media/mediaservices/$AMS_ACCOUNT", - "aadTenantId": "$AAD_TENANT_ID", - "aadServicePrincipalAppId": "$AAD_SERVICE_PRINCIPAL_ID", - "aadServicePrincipalSecret": "$AAD_SERVICE_PRINCIPAL_SECRET", - "aadEndpoint": "https://login.microsoftonline.com", - "aadResourceId": "https://management.core.windows.net/", - "armEndpoint": "https://management.azure.com/", - "diagnosticsEventsOutputName": "AmsDiagnostics", - "operationalEventsOutputName": "AmsOperational", - "logLevel": "Information", - "logCategories": "Application,Events", - "allowUnsecuredEndpoints": true, - "telemetryOptOut": false - } - } - } -} diff --git a/samples/lva_ai_extension/deployment/deployment.myriad.grpc.template.json b/samples/lva_ai_extension/deployment/deployment.myriad.grpc.template.json deleted file mode 100755 index cc33484..0000000 --- a/samples/lva_ai_extension/deployment/deployment.myriad.grpc.template.json +++ /dev/null @@ -1,174 +0,0 @@ -{ - "$schema-template": "2.0.0", - "modulesContent": { - "$edgeAgent": { - "properties.desired": { - "schemaVersion": "1.0", - "runtime": { - "type": "docker", - "settings": { - "minDockerVersion": "v1.25", - "loggingOptions": "", - "registryCredentials": { - "$CONTAINER_REGISTRY_USERNAME_myacr":{ - "username":"$CONTAINER_REGISTRY_USERNAME_myacr", - "password":"$CONTAINER_REGISTRY_PASSWORD_myacr", - "address": "$CONTAINER_REGISTRY_USERNAME_myacr.azurecr.io" - } - } - } - }, - "systemModules": { - "edgeAgent": { - "type": "docker", - "settings": { - "image": "mcr.microsoft.com/azureiotedge-agent:1.0", - "createOptions": {} - } - }, - "edgeHub": { - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/azureiotedge-hub:1.0", - "createOptions": { - "HostConfig": { - "PortBindings": { - "5671/tcp": [ - { - "HostPort": "5671" - } - ], - "8883/tcp": [ - { - "HostPort": "8883" - } - ], - "443/tcp": [ - { - "HostPort": "443" - } - ] - } - } - } - } - } - }, - "modules": { - "lvaEdge": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/media/live-video-analytics:2", - "createOptions": { - "HostConfig": { - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "IpcMode" : "shareable" - } - } - } - }, - "rtspsim": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/lva-utilities/rtspsim-live555:1.2", - "createOptions": { - "HostConfig": { - "Binds": [ - "$INPUT_VIDEO_FOLDER_ON_DEVICE:/live/mediaServer/media" - ] - } - } - } - }, - "lvaExtension" : { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "intel/video-analytics-serving:latest-dlstreamer-edge-ai-extension", - "createOptions": { - "ExposedPorts": { - "80/tcp": {}, - "5001/tcp" : {} - }, - "HostConfig": { - "Binds": [ - "/tmp:/tmp", - "/dev/bus/usb:/dev/bus/usb" - ], - "DeviceCgroupRules":[ - "c 189:* rmw" - ], - "PortBindings": { - "80/tcp": [ - { - "HostPort": "8080" - } - ], - "5001/tcp" : [ - { - "HostPort" : "5001" - } - ] - }, - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "IpcMode": "container:lvaEdge" - } - } - } - } - } - } - }, - "$edgeHub": { - "properties.desired": { - "schemaVersion": "1.0", - "routes": { - "LVAToHub": "FROM /messages/modules/lvaEdge/outputs/* INTO $upstream" - }, - "storeAndForwardConfiguration": { - "timeToLiveSecs": 7200 - } - } - }, - "lvaEdge": { - "properties.desired": { - "applicationDataDirectory": "/var/lib/azuremediaservices", - "azureMediaServicesArmId": "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/microsoft.media/mediaservices/$AMS_ACCOUNT", - "aadTenantId": "$AAD_TENANT_ID", - "aadServicePrincipalAppId": "$AAD_SERVICE_PRINCIPAL_ID", - "aadServicePrincipalSecret": "$AAD_SERVICE_PRINCIPAL_SECRET", - "aadEndpoint": "https://login.microsoftonline.com", - "aadResourceId": "https://management.core.windows.net/", - "armEndpoint": "https://management.azure.com/", - "diagnosticsEventsOutputName": "AmsDiagnostics", - "operationalEventsOutputName": "AmsOperational", - "logLevel": "Information", - "logCategories": "Application,Events", - "allowUnsecuredEndpoints": true, - "telemetryOptOut": false - } - } - } -} \ No newline at end of file diff --git a/samples/lva_ai_extension/deployment/deployment.xpu.grpc.template.json b/samples/lva_ai_extension/deployment/deployment.xpu.grpc.template.json deleted file mode 100755 index c579010..0000000 --- a/samples/lva_ai_extension/deployment/deployment.xpu.grpc.template.json +++ /dev/null @@ -1,180 +0,0 @@ -{ - "$schema-template": "2.0.0", - "modulesContent": { - "$edgeAgent": { - "properties.desired": { - "schemaVersion": "1.0", - "runtime": { - "type": "docker", - "settings": { - "minDockerVersion": "v1.25", - "loggingOptions": "", - "registryCredentials": { - "$CONTAINER_REGISTRY_USERNAME_myacr":{ - "username":"$CONTAINER_REGISTRY_USERNAME_myacr", - "password":"$CONTAINER_REGISTRY_PASSWORD_myacr", - "address": "$CONTAINER_REGISTRY_USERNAME_myacr.azurecr.io" - } - } - } - }, - "systemModules": { - "edgeAgent": { - "type": "docker", - "settings": { - "image": "mcr.microsoft.com/azureiotedge-agent:1.0", - "createOptions": {} - } - }, - "edgeHub": { - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/azureiotedge-hub:1.0", - "createOptions": { - "HostConfig": { - "PortBindings": { - "5671/tcp": [ - { - "HostPort": "5671" - } - ], - "8883/tcp": [ - { - "HostPort": "8883" - } - ], - "443/tcp": [ - { - "HostPort": "443" - } - ] - } - } - } - } - } - }, - "modules": { - "lvaEdge": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/media/live-video-analytics:2", - "createOptions": { - "HostConfig": { - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "IpcMode" : "shareable" - } - } - } - }, - "rtspsim": { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "mcr.microsoft.com/lva-utilities/rtspsim-live555:1.2", - "createOptions": { - "HostConfig": { - "Binds": [ - "$INPUT_VIDEO_FOLDER_ON_DEVICE:/live/mediaServer/media" - ] - } - } - } - }, - "lvaExtension" : { - "version": "1.0", - "type": "docker", - "status": "running", - "restartPolicy": "always", - "settings": { - "image": "intel/video-analytics-serving:latest-dlstreamer-edge-ai-extension", - "createOptions": { - "ExposedPorts": { - "80/tcp": {}, - "5001/tcp" : {} - }, - "HostConfig": { - "Binds": [ - "/tmp:/tmp", - "/dev/bus/usb:/dev/bus/usb" - ], - "DeviceCgroupRules":[ - "c 189:* rmw" - ], - "Devices": [ - { - "PathOnHost":"/dev/dri", - "PathInContainer":"/dev/dri", - "CgroupPermissions":"rwm" - }], - "PortBindings": { - "80/tcp": [ - { - "HostPort": "8080" - } - ], - "5001/tcp" : [ - { - "HostPort" : "5001" - } - ] - }, - "LogConfig": { - "Type": "", - "Config": { - "max-size": "10m", - "max-file": "10" - } - }, - "IpcMode": "container:lvaEdge" - } - } - } - } - } - } - }, - "$edgeHub": { - "properties.desired": { - "schemaVersion": "1.0", - "routes": { - "LVAToHub": "FROM /messages/modules/lvaEdge/outputs/* INTO $upstream" - }, - "storeAndForwardConfiguration": { - "timeToLiveSecs": 7200 - } - } - }, - "lvaEdge": { - "properties.desired": { - "applicationDataDirectory": "/var/lib/azuremediaservices", - "azureMediaServicesArmId": "/subscriptions/$SUBSCRIPTION_ID/resourceGroups/$RESOURCE_GROUP/providers/microsoft.media/mediaservices/$AMS_ACCOUNT", - "aadTenantId": "$AAD_TENANT_ID", - "aadServicePrincipalAppId": "$AAD_SERVICE_PRINCIPAL_ID", - "aadServicePrincipalSecret": "$AAD_SERVICE_PRINCIPAL_SECRET", - "aadEndpoint": "https://login.microsoftonline.com", - "aadResourceId": "https://management.core.windows.net/", - "armEndpoint": "https://management.azure.com/", - "diagnosticsEventsOutputName": "AmsDiagnostics", - "operationalEventsOutputName": "AmsOperational", - "logLevel": "Information", - "logCategories": "Application,Events", - "allowUnsecuredEndpoints": true, - "telemetryOptOut": false - } - } - } -} diff --git a/samples/lva_ai_extension/pipelines/object_detection/object_zone_count/README.md b/samples/lva_ai_extension/pipelines/object_detection/object_zone_count/README.md deleted file mode 100644 index 2f635d5..0000000 --- a/samples/lva_ai_extension/pipelines/object_detection/object_zone_count/README.md +++ /dev/null @@ -1,70 +0,0 @@ -## Extension Configuration - Spatial Analytics - -### Zone Detection - -The `object_detection/zone_events` pipeline demonstrates how to integrate LVA with Spatial Analytics. The calling application may define one or more polygon regions as extension configuration. - -When objects are detected as input that `intersect` the zone or are/become contained `within` a zone, LVA counts number of objects and emits `zoneCrossing` event. -#### Build and Run - -1. Build: - - ``` - $ ./samples/lva_ai_extension/docker/build.sh - ``` - -2. Run server: - - ``` - $ ./samples/lva_ai_extension/docker/run_server.sh - ``` - -3. Run client with example extension configuration: - - ``` - $ ./samples/lva_ai_extension/docker/run_client.sh \ - --extension-config /home/video-analytics-serving/samples/lva_ai_extension/client/extension-config/zones-spatial-analytics.json - ``` - -#### Expected Spatial Analytics Output - -The primary workflow is driven by `zones-spatial-analytics.json` which contains the minimal required configuration to produce `zoneCrossing` events for a media stream. - -``` - -[AIXC] [2021-05-12 18:47:50,711] [MainThread ] [INFO]: ENTITY - person (1.00) [0.30, 0.47, 0.09, 0.39] ['inferenceId: c8f074e8575d4266a6010d5ed0ea6daf', 'subtype: objectDetection'] -[AIXC] [2021-05-12 18:47:50,711] [MainThread ] [INFO]: ENTITY - person (0.97) [0.36, 0.40, 0.05, 0.24] ['inferenceId: 34cd8129edc34bfe8c96c49600a251df', 'subtype: objectDetection'] -[AIXC] [2021-05-12 18:47:50,711] [MainThread ] [INFO]: ENTITY - person (0.94) [0.44, 0.42, 0.08, 0.43] ['inferenceId: a1e278d7b68d4eefbfbff35e80086a94', 'subtype: objectDetection'] -[AIXC] [2021-05-12 18:47:50,711] [MainThread ] [INFO]: ENTITY - person (0.92) [0.57, 0.38, 0.05, 0.25] ['inferenceId: 697329a697694649aff3f25703bf9b16', 'subtype: objectDetection'] -[AIXC] [2021-05-12 18:47:50,711] [MainThread ] [INFO]: ENTITY - person (0.91) [0.69, 0.56, 0.12, 0.43] ['inferenceId: 8e9323d8ed264b648887ede89767b830', 'subtype: objectDetection'] -[AIXC] [2021-05-12 18:47:50,711] [MainThread ] [INFO]: ENTITY - person (0.90) [0.68, 0.42, 0.04, 0.24] ['inferenceId: 40f8d54d302b4438845046b991de48ee', 'subtype: objectDetection'] -[AIXC] [2021-05-12 18:47:50,711] [MainThread ] [INFO]: ENTITY - person (0.82) [0.64, 0.36, 0.05, 0.27] ['inferenceId: fd2c302794184209b58c6b922ae1ae8c', 'subtype: objectDetection'] -[AIXC] [2021-05-12 18:47:50,711] [MainThread ] [INFO]: ENTITY - person (0.60) [0.84, 0.44, 0.05, 0.29] ['inferenceId: e3e9f520defd4642a09f2bf702ec23f4', 'subtype: objectDetection'] -[AIXC] [2021-05-12 18:47:50,711] [MainThread ] [INFO]: EVENT - Zone2: ['inferenceId: 7a9adc6a29dd48eeae83b3fadc1719af', 'subtype: zoneCrossing', "relatedInferences: ['c8f074e8575d4266a6010d5ed0ea6daf']", 'zoneCount: 1'] -[AIXC] [2021-05-12 18:47:50,711] [MainThread ] [INFO]: EVENT - Zone3: ['inferenceId: e3f983a0c2fd45b9b0bba9a4d8939d71', 'subtype: zoneCrossing', "relatedInferences: ['34cd8129edc34bfe8c96c49600a251df', 'a1e278d7b68d4eefbfbff35e80086a94', '697329a697694649aff3f25703bf9b16', '8e9323d8ed264b648887ede89767b830', '40f8d54d302b4438845046b991de48ee', 'fd2c302794184209b58c6b922ae1ae8c', 'e3e9f520defd4642a09f2bf702ec23f4']", 'zoneCount: 7'] -``` - -#### VA Serving Rendered Pipeline - -Adding a configuration parameter to specify the frame-destination enables a secondary workflow, with VA Serving rendering visualization of regions and entity detections/events (shown below). - -By setting `enable_watermark` and `frame-destination` parameter for RTSP re streaming, the caller may visualize the output. This added to the `zones-spatial-analytics-rendered.json` extension configuration. So following the same instructions above but swapping the extension configuration used will dynamically produce the scene using rudimentary markers/dots showing the boundary of the defined polygon regions. This allows the DL Streamer `gvawatermark` element (used in the frame-destination) to handle rendering. - -> gvawatermark does not draw the polygon lines, so the view must currently "connect the dots" themself. - - We add a label when a detection entity intersects or is within a defined region. - -1. Run server: - - ``` - $ ENABLE_RTSP=true ./samples/lva_ai_extension/docker/run_server.sh - ``` - -2. Run client with example extension configuration, with rendering support: - - ``` - $ ./samples/lva_ai_extension/docker/run_client.sh \ - --extension-config /home/video-analytics-serving/samples/lva_ai_extension/client/extension-config/zones-vaserving-rendered.json \ - --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/person-bicycle-car-detection.mp4?raw=true - ``` -3. Connect and visualize: Re-stream pipeline using VLC network stream with url `rtsp://localhost:8554/zone-events`. \ No newline at end of file diff --git a/samples/lva_ai_extension/pipelines/object_tracking/object_line_crossing/README.md b/samples/lva_ai_extension/pipelines/object_tracking/object_line_crossing/README.md deleted file mode 100644 index c3d2588..0000000 --- a/samples/lva_ai_extension/pipelines/object_tracking/object_line_crossing/README.md +++ /dev/null @@ -1,64 +0,0 @@ -## Extension Configuration - Spatial Analytics - -### Line Crossing Events - -The `object_tracking/line_crossing` pipeline demonstrates how to integrate LVA with Spatial Analytics. The calling application may define one or more lines in extension configuration. - -When objects are detected as input that crosses a line, along with the inference entity LVA emits a `lineCrossing` event. - - -#### Build and Run - -1. Build: - - ``` - $ ./samples/lva_ai_extension/docker/build.sh - ``` - -2. Run server: - - ``` - $ ./samples/lva_ai_extension/docker/run_server.sh - ``` - -3. Run client with example extension configuration: - - ``` - $ ./samples/lva_ai_extension/docker/run_client.sh \ - --extension-config /home/video-analytics-serving/samples/lva_ai_extension/client/extension-config/line_cross_tracking_config.json \ - --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=True - ``` - -#### Expected Spatial Analytics Output - -The primary workflow is driven by `line_cross_tracking_config.json` which contains the minimal required configuration to produce `lineCrossing` events for a media stream. - -``` - -[AIXC] [2021-05-12 18:57:01,315] [MainThread ] [INFO]: ENTITY - person (1.00) [0.40, 0.27, 0.12, 0.62] ['inferenceId: d47a4192ca4b4933a6c6c588220f59de', 'subtype: objectDetection', 'id: 1'] -[AIXC] [2021-05-12 18:57:01,315] [MainThread ] [INFO]: EVENT - hallway_bottom: ['inferenceId: 520d7506e5c94f3b9aeb1d157af6311c', 'subtype: lineCrossing', "relatedInferences: ['d47a4192ca4b4933a6c6c588220f59de']", 'counterclockwiseTotal: 1', 'total: 1', 'clockwiseTotal: 0', 'direction: counterclockwise'] -``` - -#### VA Serving Rendered Pipeline - -Adding a configuration parameter to specify the frame-destination enables a secondary workflow, with VA Serving rendering visualization of lines and entity detections/events (shown below). - -By setting `enable_watermark` and `frame-destination` parameter for RTSP re streaming, the caller may visualize the output. This added to the `line_cross_tracking_config_rtsp.json` extension configuration. So following the same instructions above but swapping the extension configuration used will dynamically produce the scene using rudimentary markers/dots showing the start and end points of defined lines. This allows the DL Streamer `gvawatermark` element (used in the frame-destination) to handle rendering. - -> gvawatermark does not draw the lines, so the view must currently "connect the dots" themself. - -1. Run server: - - ``` - $ ENABLE_RTSP=true ./samples/lva_ai_extension/docker/run_server.sh - ``` - -2. Run client with example extension configuration, with rendering support: - - ``` - $ ./samples/lva_ai_extension/docker/run_client.sh \ - --extension-config /home/video-analytics-serving/samples/lva_ai_extension/client/extension-config/line_cross_tracking_config_rtsp.json \ - --sample-file-path https://github.com/intel-iot-devkit/sample-videos/blob/master/people-detection.mp4?raw=True - ``` - -3. Connect and visualize: Re-stream pipeline using VLC network stream with url `rtsp://localhost:8554/vaserving`. \ No newline at end of file diff --git a/samples/lva_ai_extension/topologies/operations.json b/samples/lva_ai_extension/topologies/operations.json deleted file mode 100644 index 03cd573..0000000 --- a/samples/lva_ai_extension/topologies/operations.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "apiVersion": "2.0", - "operations": [ - { - "opName": "GraphTopologySet", - "opParams": { - "topologyUrl": "https://raw.githubusercontent.com/intel/video-analytics-serving/master/samples/lva_ai_extension/topologies/topology.json" - } - }, - { - "opName": "GraphInstanceSet", - "opParams": { - "name": "SampleGraph1", - "properties": { - "topologyName": "InferencingWithGrpcExtension", - "description": "Sample graph description", - "parameters": [ - { - "name": "rtspUrl", - "value": "rtsp://rtspsim:554/media/camera-300s.mkv" - }, - { - "name": "rtspUserName", - "value": "testuser" - }, - { - "name": "rtspPassword", - "value": "testpassword" - }, - { - "name": "grpcExtensionAddress", - "value": "tcp://lvaExtension:5001" - }, - { - "name": "extensionConfiguration", - "value": "{}" - } - ] - } - } - }, - { - "opName": "GraphInstanceActivate", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "WaitForInput", - "opParams": { - "message": "The topology will now be deactivated. Press Enter to continue" - } - }, - { - "opName": "GraphInstanceDeactivate", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "GraphInstanceDelete", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "GraphTopologyDelete", - "opParams": { - "name": "InferencingWithGrpcExtension" - } - } - ] -} \ No newline at end of file diff --git a/samples/lva_ai_extension/topologies/operations_3_pipelines.json b/samples/lva_ai_extension/topologies/operations_3_pipelines.json deleted file mode 100644 index c5a6535..0000000 --- a/samples/lva_ai_extension/topologies/operations_3_pipelines.json +++ /dev/null @@ -1,173 +0,0 @@ -{ - "apiVersion": "2.0", - "operations": [ - { - "opName": "GraphTopologySet", - "opParams": { - "topologyUrl": "https://raw.githubusercontent.com/intel/video-analytics-serving/master/samples/lva_ai_extension/topologies/topology.json" - } - }, - { - "opName": "GraphInstanceSet", - "opParams": { - "name": "SampleGraphDetection", - "properties": { - "topologyName": "InferencingWithGrpcExtension", - "description": "Sample graph description", - "parameters": [ - { - "name": "rtspUrl", - "value": "rtsp://rtspsim:554/media/camera-300s.mkv" - }, - { - "name": "rtspUserName", - "value": "testuser" - }, - { - "name": "rtspPassword", - "value": "testpassword" - }, - { - "name": "grpcExtensionAddress", - "value": "tcp://lvaExtension:5001" - }, - { - "name": "extensionConfiguration", - "value": "{\"pipeline\":{\"name\":\"object_detection\",\"version\":\"person_vehicle_bike_detection\"}}" - } - ] - } - } - }, - { - "opName": "GraphInstanceSet", - "opParams": { - "name": "SampleGraphClassification", - "properties": { - "topologyName": "InferencingWithGrpcExtension", - "description": "Sample graph description", - "parameters": [ - { - "name": "rtspUrl", - "value": "rtsp://rtspsim:554/media/camera-300s.mkv" - }, - { - "name": "rtspUserName", - "value": "testuser" - }, - { - "name": "rtspPassword", - "value": "testpassword" - }, - { - "name": "grpcExtensionAddress", - "value": "tcp://lvaExtension:5001" - }, - { - "name": "extensionConfiguration", - "value": "{\"pipeline\":{\"name\":\"object_classification\",\"version\":\"vehicle_attributes_recognition\"}}" - } - ] - } - } - }, - { - "opName": "GraphInstanceSet", - "opParams": { - "name": "SampleGraphTracking", - "properties": { - "topologyName": "InferencingWithGrpcExtension", - "description": "Sample graph description", - "parameters": [ - { - "name": "rtspUrl", - "value": "rtsp://rtspsim:554/media/camera-300s.mkv" - }, - { - "name": "rtspUserName", - "value": "testuser" - }, - { - "name": "rtspPassword", - "value": "testpassword" - }, - { - "name": "grpcExtensionAddress", - "value": "tcp://lvaExtension:5001" - }, - { - "name": "extensionConfiguration", - "value": "{\"pipeline\":{\"name\":\"object_tracking\",\"version\":\"person_vehicle_bike_tracking\"}}" - } - ] - } - } - }, - { - "opName": "GraphInstanceActivate", - "opParams": { - "name": "SampleGraphDetection" - } - }, - { - "opName": "GraphInstanceActivate", - "opParams": { - "name": "SampleGraphClassification" - } - }, - { - "opName": "GraphInstanceActivate", - "opParams": { - "name": "SampleGraphTracking" - } - }, - { - "opName": "WaitForInput", - "opParams": { - "message": "The topology will now be deactivated. Press Enter to continue" - } - }, - { - "opName": "GraphInstanceDeactivate", - "opParams": { - "name": "SampleGraphDetection" - } - }, - { - "opName": "GraphInstanceDeactivate", - "opParams": { - "name": "SampleGraphClassification" - } - }, - { - "opName": "GraphInstanceDeactivate", - "opParams": { - "name": "SampleGraphTracking" - } - }, - { - "opName": "GraphInstanceDelete", - "opParams": { - "name": "SampleGraphDetection" - } - }, - { - "opName": "GraphInstanceDelete", - "opParams": { - "name": "SampleGraphClassification" - } - }, - { - "opName": "GraphInstanceDelete", - "opParams": { - "name": "SampleGraphTracking" - } - }, - { - "opName": "GraphTopologyDelete", - "opParams": { - "name": "InferencingWithGrpcExtension" - } - } - ] -} diff --git a/samples/lva_ai_extension/topologies/operations_gpu.json b/samples/lva_ai_extension/topologies/operations_gpu.json deleted file mode 100644 index 41324fb..0000000 --- a/samples/lva_ai_extension/topologies/operations_gpu.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "apiVersion": "2.0", - "operations": [ - { - "opName": "GraphTopologySet", - "opParams": { - "topologyUrl": "https://raw.githubusercontent.com/intel/video-analytics-serving/master/samples/lva_ai_extension/topologies/topology.json" - } - }, - { - "opName": "GraphInstanceSet", - "opParams": { - "name": "SampleGraph1", - "properties": { - "topologyName": "InferencingWithGrpcExtension", - "description": "Sample graph description", - "parameters": [ - { - "name": "rtspUrl", - "value": "rtsp://rtspsim:554/media/camera-300s.mkv" - }, - { - "name": "rtspUserName", - "value": "testuser" - }, - { - "name": "rtspPassword", - "value": "testpassword" - }, - { - "name": "grpcExtensionAddress", - "value": "tcp://lvaExtension:5001" - }, - { - "name": "extensionConfiguration", - "value": "{\"pipeline\":{\"name\":\"object_detection\",\"version\":\"person_vehicle_bike_detection\",\"parameters\":{\"detection-device\":\"GPU\", \"detection-model-instance-id\":\"GPU_Detection\"}}}" - } - ] - } - } - }, - { - "opName": "GraphInstanceActivate", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "WaitForInput", - "opParams": { - "message": "The topology will now be deactivated. Press Enter to continue" - } - }, - { - "opName": "GraphInstanceDeactivate", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "GraphInstanceDelete", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "GraphTopologyDelete", - "opParams": { - "name": "InferencingWithGrpcExtension" - } - } - ] -} \ No newline at end of file diff --git a/samples/lva_ai_extension/topologies/operations_hddl.json b/samples/lva_ai_extension/topologies/operations_hddl.json deleted file mode 100644 index 4456274..0000000 --- a/samples/lva_ai_extension/topologies/operations_hddl.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "apiVersion": "2.0", - "operations": [ - { - "opName": "GraphTopologySet", - "opParams": { - "topologyUrl": "https://raw.githubusercontent.com/intel/video-analytics-serving/master/samples/lva_ai_extension/topologies/topology.json" - } - }, - { - "opName": "GraphInstanceSet", - "opParams": { - "name": "SampleGraph1", - "properties": { - "topologyName": "InferencingWithGrpcExtension", - "description": "Sample graph description", - "parameters": [ - { - "name": "rtspUrl", - "value": "rtsp://rtspsim:554/media/camera-300s.mkv" - }, - { - "name": "rtspUserName", - "value": "testuser" - }, - { - "name": "rtspPassword", - "value": "testpassword" - }, - { - "name": "grpcExtensionAddress", - "value": "tcp://lvaExtension:5001" - }, - { - "name": "extensionConfiguration", - "value": "{\"pipeline\":{\"name\":\"object_detection\",\"version\":\"person_vehicle_bike_detection\",\"parameters\":{\"detection-device\":\"HDDL\"}}}" - } - ] - } - } - }, - { - "opName": "GraphInstanceActivate", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "WaitForInput", - "opParams": { - "message": "The topology will now be deactivated. Press Enter to continue" - } - }, - { - "opName": "GraphInstanceDeactivate", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "GraphInstanceDelete", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "GraphTopologyDelete", - "opParams": { - "name": "InferencingWithGrpcExtension" - } - } - ] -} \ No newline at end of file diff --git a/samples/lva_ai_extension/topologies/operations_myriad.json b/samples/lva_ai_extension/topologies/operations_myriad.json deleted file mode 100644 index f5512ad..0000000 --- a/samples/lva_ai_extension/topologies/operations_myriad.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "apiVersion": "2.0", - "operations": [ - { - "opName": "GraphTopologySet", - "opParams": { - "topologyUrl": "https://raw.githubusercontent.com/intel/video-analytics-serving/master/samples/lva_ai_extension/topologies/topology.json" - } - }, - { - "opName": "GraphInstanceSet", - "opParams": { - "name": "SampleGraph1", - "properties": { - "topologyName": "InferencingWithGrpcExtension", - "description": "Sample graph description", - "parameters": [ - { - "name": "rtspUrl", - "value": "rtsp://rtspsim:554/media/camera-300s.mkv" - }, - { - "name": "rtspUserName", - "value": "testuser" - }, - { - "name": "rtspPassword", - "value": "testpassword" - }, - { - "name": "grpcExtensionAddress", - "value": "tcp://lvaExtension:5001" - }, - { - "name": "extensionConfiguration", - "value": "{\"pipeline\":{\"name\":\"object_detection\",\"version\":\"person_vehicle_bike_detection\",\"parameters\":{\"detection-device\":\"MYRIAD\"}}}" - } - ] - } - } - }, - { - "opName": "GraphInstanceActivate", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "WaitForInput", - "opParams": { - "message": "The topology will now be deactivated. Press Enter to continue" - } - }, - { - "opName": "GraphInstanceDeactivate", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "GraphInstanceDelete", - "opParams": { - "name": "SampleGraph1" - } - }, - { - "opName": "GraphTopologyDelete", - "opParams": { - "name": "InferencingWithGrpcExtension" - } - } - ] -} \ No newline at end of file diff --git a/samples/lva_ai_extension/topologies/operations_xpu.json b/samples/lva_ai_extension/topologies/operations_xpu.json deleted file mode 100644 index 3c2d0cf..0000000 --- a/samples/lva_ai_extension/topologies/operations_xpu.json +++ /dev/null @@ -1,173 +0,0 @@ -{ - "apiVersion": "2.0", - "operations": [ - { - "opName": "GraphTopologySet", - "opParams": { - "topologyUrl": "https://raw.githubusercontent.com/intel/video-analytics-serving/master/samples/lva_ai_extension/topologies/topology.json" - } - }, - { - "opName": "GraphInstanceSet", - "opParams": { - "name": "SampleGraphDetection", - "properties": { - "topologyName": "InferencingWithGrpcExtension", - "description": "Sample graph description", - "parameters": [ - { - "name": "rtspUrl", - "value": "rtsp://rtspsim:554/media/homes_00425.mkv" - }, - { - "name": "rtspUserName", - "value": "testuser" - }, - { - "name": "rtspPassword", - "value": "testpassword" - }, - { - "name": "grpcExtensionAddress", - "value": "tcp://lvaExtension:5001" - }, - { - "name": "extensionConfiguration", - "value": "{\"pipeline\":{\"name\":\"object_detection\",\"version\":\"person_vehicle_bike_detection\",\"parameters\":{\"detection-device\":\"CPU\",\"detection-model-instance-id\":\"CPU_Detection\"}}}" - } - ] - } - } - }, - { - "opName": "GraphInstanceSet", - "opParams": { - "name": "SampleGraphClassification", - "properties": { - "topologyName": "InferencingWithGrpcExtension", - "description": "Sample graph description", - "parameters": [ - { - "name": "rtspUrl", - "value": "rtsp://rtspsim:554/media/homes_00425.mkv" - }, - { - "name": "rtspUserName", - "value": "testuser" - }, - { - "name": "rtspPassword", - "value": "testpassword" - }, - { - "name": "grpcExtensionAddress", - "value": "tcp://lvaExtension:5001" - }, - { - "name": "extensionConfiguration", - "value": "{\"pipeline\":{\"name\":\"object_detection\",\"version\":\"person_vehicle_bike_detection\",\"parameters\":{\"detection-device\":\"GPU\",\"detection-model-instance-id\":\"GPU_Detection\"}}}" - } - ] - } - } - }, - { - "opName": "GraphInstanceSet", - "opParams": { - "name": "SampleGraphTracking", - "properties": { - "topologyName": "InferencingWithGrpcExtension", - "description": "Sample graph description", - "parameters": [ - { - "name": "rtspUrl", - "value": "rtsp://rtspsim:554/media/homes_00425.mkv" - }, - { - "name": "rtspUserName", - "value": "testuser" - }, - { - "name": "rtspPassword", - "value": "testpassword" - }, - { - "name": "grpcExtensionAddress", - "value": "tcp://lvaExtension:5001" - }, - { - "name": "extensionConfiguration", - "value": "{\"pipeline\":{\"name\":\"object_detection\",\"version\":\"person_vehicle_bike_detection\",\"parameters\":{\"detection-device\":\"MYRIAD\",\"detection-model-instance-id\":\"MYRIAD_Detection\"}}}" - } - ] - } - } - }, - { - "opName": "GraphInstanceActivate", - "opParams": { - "name": "SampleGraphDetection" - } - }, - { - "opName": "GraphInstanceActivate", - "opParams": { - "name": "SampleGraphClassification" - } - }, - { - "opName": "GraphInstanceActivate", - "opParams": { - "name": "SampleGraphTracking" - } - }, - { - "opName": "WaitForInput", - "opParams": { - "message": "The topology will now be deactivated. Press Enter to continue" - } - }, - { - "opName": "GraphInstanceDeactivate", - "opParams": { - "name": "SampleGraphDetection" - } - }, - { - "opName": "GraphInstanceDeactivate", - "opParams": { - "name": "SampleGraphClassification" - } - }, - { - "opName": "GraphInstanceDeactivate", - "opParams": { - "name": "SampleGraphTracking" - } - }, - { - "opName": "GraphInstanceDelete", - "opParams": { - "name": "SampleGraphDetection" - } - }, - { - "opName": "GraphInstanceDelete", - "opParams": { - "name": "SampleGraphClassification" - } - }, - { - "opName": "GraphInstanceDelete", - "opParams": { - "name": "SampleGraphTracking" - } - }, - { - "opName": "GraphTopologyDelete", - "opParams": { - "name": "InferencingWithGrpcExtension" - } - } - ] - } diff --git a/samples/lva_ai_extension/topologies/topology.json b/samples/lva_ai_extension/topologies/topology.json deleted file mode 100644 index 25bb15f..0000000 --- a/samples/lva_ai_extension/topologies/topology.json +++ /dev/null @@ -1,120 +0,0 @@ -{ - "@apiVersion": "2.0", - "name": "InferencingWithGrpcExtension", - "properties": { - "description": "Record on motion to AMS Asset and record events from external models through gRPC Extension", - "parameters": [ - { - "name": "rtspUrl", - "type": "String", - "description": "Rtsp source Url address" - }, - { - "name": "rtspUserName", - "type": "String", - "description": "Rtsp source user name.", - "default": "dummyUsername" - }, - { - "name": "rtspPassword", - "type": "String", - "description": "Rtsp source password.", - "default": "dummyPassword" - }, - { - "name": "motionSensitivity", - "type": "String", - "description": "Motion detection sensitivity", - "default": "medium" - }, - { - "name": "grpcExtensionAddress", - "type": "String", - "description": "grpc LVA Extension Address", - "default": "tcp://lvaextension:44000" - }, - { - "name": "grpcExtensionUserName", - "type": "String", - "description": "inferencing endpoint user name.", - "default": "dummyUserName" - }, - { - "name": "grpcExtensionPassword", - "type": "String", - "description": "inferencing endpoint password.", - "default": "dummyPassword" - }, - { - "name": "hubSinkOutputName", - "type": "String", - "description": "Hub sink output name", - "default": "iothubsinkoutput" - }, - { - "name": "extensionConfiguration", - "type": "String", - "description": "Optional extension configuration for the gRPC server", - "default": "{}" - } - ], - "sources": [ - { - "@type": "#Microsoft.Media.MediaGraphRtspSource", - "name": "rtspSource", - "endpoint": { - "@type": "#Microsoft.Media.MediaGraphUnsecuredEndpoint", - "url": "${rtspUrl}", - "credentials": { - "@type": "#Microsoft.Media.MediaGraphUsernamePasswordCredentials", - "username": "${rtspUserName}", - "password": "${rtspPassword}" - } - } - } - ], - "processors": [ - { - "@type": "#Microsoft.Media.MediaGraphGrpcExtension", - "name": "grpcExtension", - "endpoint": { - "@type": "#Microsoft.Media.MediaGraphUnsecuredEndpoint", - "url": "${grpcExtensionAddress}", - "credentials": { - "@type": "#Microsoft.Media.MediaGraphUsernamePasswordCredentials", - "username": "${grpcExtensionUserName}", - "password": "${grpcExtensionPassword}" - } - }, - "extensionConfiguration": "${extensionConfiguration}", - "dataTransfer": { - "mode": "sharedMemory", - "SharedMemorySizeMiB": "64" - }, - "image": { - "format": { - "@type": "#Microsoft.Media.MediaGraphImageFormatRaw", - "pixelFormat": "bgr24" - } - }, - "inputs": [ - { - "nodeName": "rtspSource" - } - ] - } - ], - "sinks": [ - { - "@type": "#Microsoft.Media.MediaGraphIoTHubMessageSink", - "name": "hubSink", - "hubOutputName": "${hubSinkOutputName}", - "inputs": [ - { - "nodeName": "grpcExtension" - } - ] - } - ] - } -} \ No newline at end of file diff --git a/samples/record_playback/pipelines/object_detection/segment_record/pipeline.json b/samples/record_playback/pipelines/object_detection/segment_record/pipeline.json index b197dc3..76b2bad 100644 --- a/samples/record_playback/pipelines/object_detection/segment_record/pipeline.json +++ b/samples/record_playback/pipelines/object_detection/segment_record/pipeline.json @@ -1,7 +1,7 @@ { "type": "GStreamer", "template": ["urisourcebin name=source ! tee name=t ! queue ! decodebin ! video/x-raw ! videoconvert name=videoconvert", - " ! gvadetect model={models[object_detection][1][network]} name=detection", + " ! gvadetect model={models[object_detection][person_vehicle_bike][network]} name=detection", " ! gvametaconvert name=metaconvert ! gvametapublish name=destination", " ! appsink name=appsink", " t. ! queue ! qtdemux ! splitmuxsink name=splitmuxsink" diff --git a/tools/model_downloader/README.md b/tools/model_downloader/README.md index 807e578..9e9759a 100644 --- a/tools/model_downloader/README.md +++ b/tools/model_downloader/README.md @@ -77,7 +77,7 @@ Example: ## Downloading Models with the standalone tool When run as a standalone tool, the model downloader will run within an -`openvino/ubuntu20_data_dev:2021.4` docker image and download models listed in +`openvino/ubuntu20_data_dev:2021.4.1` docker image and download models listed in a yaml file that can be specified via the `--model-list` argument. Example: diff --git a/tools/model_downloader/model_downloader.sh b/tools/model_downloader/model_downloader.sh index 19758fe..51d3c7d 100755 --- a/tools/model_downloader/model_downloader.sh +++ b/tools/model_downloader/model_downloader.sh @@ -11,8 +11,9 @@ SOURCE_DIR=$(dirname "$TOOLS_DIR") OUTPUT_DIR=$(realpath $( pwd )) FORCE= RUN_PREFIX= -OPEN_MODEL_ZOO_TOOLS_IMAGE=${OPEN_MODEL_ZOO_TOOLS_IMAGE:-"openvino/ubuntu20_data_dev"} -OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-"2021.4"} +OPEN_MODEL_ZOO_TOOLS_IMAGE=${OPEN_MODEL_ZOO_TOOLS_IMAGE:-"${CACHE_PREFIX}openvino/ubuntu20_data_dev"} +OPEN_MODEL_ZOO_VERSION=${OPEN_MODEL_ZOO_VERSION:-"2021.4.1"} + DL_STREAMER_VERSION= MODE= MODEL_LIST=$SOURCE_DIR/"models_list/models.list.yml" diff --git a/vaclient/README.md b/vaclient/README.md index 8f3b4d9..5290d82 100644 --- a/vaclient/README.md +++ b/vaclient/README.md @@ -14,7 +14,6 @@ Fist models: ~/video-analytics-serving$ ./vaclient/vaclient.sh list-models - emotion_recognition/1 - - object_detection/1 - object_detection/person_vehicle_bike - object_classification/vehicle_attributes - audio_detection/environment diff --git a/vaclient/results_watcher.py b/vaclient/results_watcher.py index 5424835..c266c5d 100755 --- a/vaclient/results_watcher.py +++ b/vaclient/results_watcher.py @@ -49,12 +49,10 @@ def watch_method(self): # Print Functions @classmethod def print_results(cls, results): - """Output as JSON formatted data""" - if "timestamp" in results: - print("Timestamp {}".format(results["timestamp"])) - for index, detected_object in enumerate(results.get("objects", [])): + object_output = [] + for detected_object in results.get("objects", []): meta = {} - results_output = [] + current_object = [] for key in detected_object: if key == "detection": confidence = detected_object[key]["confidence"] @@ -63,9 +61,9 @@ def print_results(cls, results): y_min = detected_object[key]["bounding_box"]["y_min"] x_max = detected_object[key]["bounding_box"]["x_max"] y_max = detected_object[key]["bounding_box"]["y_max"] - results_output.append(label) - results_output.append("({:.2f})".format(confidence)) - results_output.append("[{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(x_min, + current_object.append(label) + current_object.append("({:.2f})".format(confidence)) + current_object.append("[{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(x_min, y_min, x_max, y_max)) @@ -78,15 +76,22 @@ def print_results(cls, results): if "name" in tensor and tensor["name"] == "action": confidence = tensor["confidence"] label = tensor["label"] - results_output.append(label) - results_output.append("({:.2f})".format(confidence)) + current_object.append(label) + current_object.append("({:.2f})".format(confidence)) if meta: - results_output.append(str(meta)) - print("- {}".format(" ".join(results_output))) - + current_object.append(str(meta)) + if current_object: + object_output.append("- {}".format(" ".join(current_object))) + event_output = [] for event in results.get("events", []): - event_str = "Event: " + current_event = [] for key in event: - event_str += "{}: {}, ".format(key, event[key]) - print(event_str.rstrip(', ')) - + current_event.append("{}: {}".format(key, event[key])) + if current_event: + event_output.append("Event: {}".format(", ".join(current_event))) + if "timestamp" in results and (object_output or event_output): + print("Timestamp {}".format(results["timestamp"])) + if object_output: + print("{}".format("\n".join(object_output))) + if event_output: + print("{}".format("\n".join(event_output)))