From 0901151c696569e989c3a46ea8e9fcf73f3cd3a5 Mon Sep 17 00:00:00 2001 From: zhangwei Date: Fri, 6 Dec 2024 17:19:28 +0800 Subject: [PATCH] [Android] update readme --- Android/APIExample/README.md | 18 +- Android/APIExample/README.zh.md | 18 +- .../src/main/cpp/AgoraRtcKit/AgoraBase.h | 2161 ++++++++++------- .../src/main/cpp/AgoraRtcKit/AgoraMediaBase.h | 581 +++-- .../cpp/AgoraRtcKit/IAgoraMediaRecorder.h | 89 + .../main/cpp/AgoraRtcKit/NGIAgoraAudioTrack.h | 85 +- .../AgoraRtcKit/NGIAgoraExtensionProvider.h | 15 + .../main/cpp/AgoraRtcKit/NGIAgoraLocalUser.h | 14 +- .../AgoraRtcKit/NGIAgoraMediaNodeFactory.h | 52 +- .../cpp/AgoraRtcKit/NGIAgoraRtcConnection.h | 5 +- .../cpp/AgoraRtcKit/NGIAgoraScreenCapturer.h | 22 +- .../main/cpp/AgoraRtcKit/NGIAgoraSyncClient.h | 12 +- .../src/main/cpp/AgoraRtcKit/api/aosl_defs.h | 8 + .../src/main/cpp/AgoraRtcKit/api/aosl_ref.h | 95 +- .../src/main/cpp/AgoraRtcKit/api/aosl_types.h | 26 + .../cpp/AgoraRtcKit/api/cpp/aosl_ares_class.h | 2 +- .../cpp/AgoraRtcKit/api/cpp/aosl_poll_class.h | 6 +- .../cpp/AgoraRtcKit/api/cpp/aosl_ref_class.h | 43 +- .../agora-stream-encrypt/src/main/agoraLibs | 1 - .../src/main/cpp/include/agora/AgoraBase.h | 2161 ++++++++++------- .../main/cpp/include/agora/AgoraMediaBase.h | 581 +++-- .../cpp/include/agora/IAgoraMediaEngine.h | 18 + .../cpp/include/agora/IAgoraMediaRecorder.h | 1 - .../main/cpp/include/agora/IAgoraRtcEngine.h | 187 +- .../cpp/include/agora/IAgoraRtcEngineEx.h | 76 + .../agora/{internal => rte_base}/c/bridge.h | 2 +- .../agora/{internal => rte_base}/c/c_error.h | 0 .../agora/{internal => rte_base}/c/c_player.h | 269 +- .../agora/{internal => rte_base}/c/c_rte.h | 9 +- .../agora/{internal => rte_base}/c/channel.h | 0 .../agora/{internal => rte_base}/c/common.h | 0 .../{internal => rte_base}/c/device/audio.h | 6 +- .../c/device/audio_device_manager.h | 8 +- .../{internal => rte_base}/c/device/device.h | 4 +- .../{internal => rte_base}/c/device/video.h | 6 +- .../c/device/video_device_manager.h | 8 +- .../agora/{internal => rte_base}/c/handle.h | 0 .../agora/{internal => rte_base}/c/info.h | 0 .../agora/{internal => rte_base}/c/log.h | 0 .../agora/{internal => rte_base}/c/metadata.h | 0 .../agora/{internal => rte_base}/c/observer.h | 0 .../agora/{internal => rte_base}/c/old.h | 0 .../agora/{internal => rte_base}/c/options.h | 0 .../c/stream/cdn_stream.h | 6 +- .../c/stream/local_cdn_stream.h | 6 +- .../c/stream/local_realtime_stream.h | 6 +- .../c/stream/local_stream.h | 6 +- .../c/stream/realtime_stream.h | 6 +- .../c/stream/remote_cdn_stream.h | 6 +- .../c/stream/remote_realtime_stream.h | 6 +- .../c/stream/remote_stream.h | 8 +- .../{internal => rte_base}/c/stream/stream.h | 8 +- .../c/track/camera_video_track.h | 6 +- .../{internal => rte_base}/c/track/canvas.h | 22 +- .../{internal => rte_base}/c/track/layout.h | 0 .../c/track/local_audio_track.h | 8 +- .../c/track/local_track.h | 6 +- .../c/track/local_video_track.h | 2 +- .../c/track/mic_audio_track.h | 6 +- .../c/track/mixed_video_track.h | 6 +- .../c/track/remote_audio_track.h | 6 +- .../c/track/remote_track.h | 6 +- .../c/track/remote_video_track.h | 2 +- .../c/track/screen_video_track.h | 10 +- .../{internal => rte_base}/c/track/track.h | 6 +- .../c/track/video_track.h | 6 +- .../{internal => rte_base}/c/track/view.h | 6 +- .../c/user/local_user.h | 8 +- .../c/user/remote_user.h | 6 +- .../{internal => rte_base}/c/user/user.h | 12 +- .../{internal => rte_base}/c/utils/buf.h | 4 +- .../{internal => rte_base}/c/utils/frame.h | 0 .../{internal => rte_base}/c/utils/rect.h | 0 .../{internal => rte_base}/c/utils/string.h | 2 +- .../{internal => rte_base}/c/utils/uuid.h | 0 .../{ => rte_base}/rte_cpp_callback_utils.h | 78 +- .../include/agora/rte_base/rte_cpp_canvas.h | 227 ++ .../agora/{ => rte_base}/rte_cpp_error.h | 26 +- .../include/agora/rte_base/rte_cpp_player.h | 1007 ++++++++ .../cpp/include/agora/rte_base/rte_cpp_rte.h | 403 +++ .../agora/{ => rte_base}/rte_cpp_stream.h | 7 +- .../agora/{ => rte_base}/rte_cpp_string.h | 7 +- .../src/main/cpp/include/agora/rte_cpp.h | 12 +- .../main/cpp/include/agora/rte_cpp_canvas.h | 109 - .../main/cpp/include/agora/rte_cpp_player.h | 443 ---- .../src/main/cpp/include/agora/rte_cpp_rte.h | 218 -- Android/APIExample/app/.gitignore | 3 +- 87 files changed, 6170 insertions(+), 3147 deletions(-) create mode 100644 Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaRecorder.h delete mode 120000 Android/APIExample/agora-stream-encrypt/src/main/agoraLibs rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/bridge.h (94%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/c_error.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/c_player.h (61%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/c_rte.h (94%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/channel.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/common.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/device/audio.h (90%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/device/audio_device_manager.h (96%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/device/device.h (87%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/device/video.h (89%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/device/video_device_manager.h (94%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/handle.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/info.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/log.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/metadata.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/observer.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/old.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/options.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/stream/cdn_stream.h (87%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/stream/local_cdn_stream.h (94%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/stream/local_realtime_stream.h (85%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/stream/local_stream.h (88%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/stream/realtime_stream.h (88%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/stream/remote_cdn_stream.h (93%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/stream/remote_realtime_stream.h (85%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/stream/remote_stream.h (88%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/stream/stream.h (98%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/camera_video_track.h (87%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/canvas.h (79%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/layout.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/local_audio_track.h (96%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/local_track.h (94%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/local_video_track.h (91%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/mic_audio_track.h (97%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/mixed_video_track.h (88%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/remote_audio_track.h (96%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/remote_track.h (90%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/remote_video_track.h (88%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/screen_video_track.h (88%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/track.h (91%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/video_track.h (90%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/track/view.h (93%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/user/local_user.h (97%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/user/remote_user.h (96%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/user/user.h (94%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/utils/buf.h (97%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/utils/frame.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/utils/rect.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/utils/string.h (98%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{internal => rte_base}/c/utils/uuid.h (100%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{ => rte_base}/rte_cpp_callback_utils.h (61%) create mode 100644 Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_canvas.h rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{ => rte_base}/rte_cpp_error.h (65%) create mode 100644 Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_player.h create mode 100644 Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_rte.h rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{ => rte_base}/rte_cpp_stream.h (69%) rename Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/{ => rte_base}/rte_cpp_string.h (89%) delete mode 100644 Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_canvas.h delete mode 100644 Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_player.h delete mode 100644 Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_rte.h diff --git a/Android/APIExample/README.md b/Android/APIExample/README.md index 37c26351a..ee36e5f0c 100644 --- a/Android/APIExample/README.md +++ b/Android/APIExample/README.md @@ -90,7 +90,12 @@ Since version 4.0.0, Agora SDK provides an Extension Interface Framework. Develo In order to enable it, you could do as follows: 1. Download [opencv](https://agora-adc-artifacts.s3.cn-north-1.amazonaws.com.cn/androidLibs/opencv4.zip) library, unzip it and copy into Android/APIExample/agora-simple-filter/src/main/jniLibs -2. Download [Agora SDK包](https://doc.shengwang.cn/doc/rtc/android/resources), unzip it and copy c++ .so library (keeps arch folder) to Android/APIExample/agora-simple-filter/src/main/agoraLibs +```text +Android/APIExample/agora-simple-filter/src/main/jniLibs +├── arm64-v8a +└── armeabi-v7a +``` +2. Download [Agora SDK RESOURCE](https://docs.agora.io/en/sdks?platform=android), unzip it and copy c++ .so library (keeps arch folder) to Android/APIExample/agora-simple-filter/src/main/agoraLibs; Replace the low_level_api/include .h files in the Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit directory. ```text Android/APIExample/agora-simple-filter/src/main/agoraLibs @@ -99,6 +104,10 @@ Android/APIExample/agora-simple-filter/src/main/agoraLibs ├── x86 └── x86_64 ``` +```text +Android/APIExample/agora-simple-filter/src/main/cpp +└── AgoraRtcKit +``` 3. Modify simpleFilter to true in Android/APIExample/gradle.properties @@ -107,7 +116,7 @@ Android/APIExample/agora-simple-filter/src/main/agoraLibs This project contains custom stream encrypt examples, which cannot be enabled by default. The configuration method is as follows: -1. Download [Agora SDK包](https://doc.shengwang.cn/doc/rtc/android/resources), unzip it and copy c++ .so library (keeps arch folder) to Android/APIExample/agora-stream-encrypt/src/main/agoraLibs +1. Download [Agora SDK RESOURCE](https://docs.agora.io/en/sdks?platform=android), unzip it and copy c++ .so library (keeps arch folder) to Android/APIExample/agora-stream-encrypt/src/main/agoraLibs; Replace the hight_level_api/include .h files in the Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora directory. ```text Android/APIExample/agora-stream-encrypt/src/main/agoraLibs @@ -116,6 +125,11 @@ Android/APIExample/agora-stream-encrypt/src/main/agoraLibs ├── x86 └── x86_64 ``` +```text +Android/APIExample/agora-stream-encrypt/src/main/cpp +└── include + └── agora +``` 2. Modify streamEncrypt to true in Android/APIExample/gradle.properties diff --git a/Android/APIExample/README.zh.md b/Android/APIExample/README.zh.md index 8e9142829..b372acf0b 100644 --- a/Android/APIExample/README.zh.md +++ b/Android/APIExample/README.zh.md @@ -86,7 +86,12 @@ 从4.0.0SDK开始,Agora SDK支持插件系统和开放的云市场帮助开发者发布自己的音视频插件,本项目包含了一个SimpleFilter示例,默认是禁用的状态,如果需要开启编译和使用需要完成以下步骤: 1. 下载 [opencv](https://agora-adc-artifacts.s3.cn-north-1.amazonaws.com.cn/androidLibs/opencv4.zip) 解压后复制到 Android/APIExample/agora-simple-filter/src/main/jniLibs -2. 手动下载[Agora SDK包](https://doc.shengwang.cn/doc/rtc/android/resources), 解压后将c++动态库(包括架构文件夹)copy到Android/APIExample/agora-simple-filter/src/main/agoraLibs +```text +Android/APIExample/agora-simple-filter/src/main/jniLibs +├── arm64-v8a +└── armeabi-v7a +``` +2. 手动下载[Agora SDK包](https://doc.shengwang.cn/doc/rtc/android/resources), 解压后将c++动态库(包括架构文件夹)copy到Android/APIExample/agora-simple-filter/src/main/agoraLibs, 将 low_level_api/include 头文件替换到 Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit 目录中 ```text Android/APIExample/agora-simple-filter/src/main/agoraLibs @@ -95,6 +100,10 @@ Android/APIExample/agora-simple-filter/src/main/agoraLibs ├── x86 └── x86_64 ``` +```text +Android/APIExample/agora-simple-filter/src/main/cpp +└── AgoraRtcKit +``` 3. 修改Android/APIExample/gradle.properties配置文件中simpleFilter值为true @@ -102,7 +111,7 @@ Android/APIExample/agora-simple-filter/src/main/agoraLibs 本项目包含自定义加密示例,默认是不启用的。配置方法如下: -1. 手动下载[Agora SDK包](https://doc.shengwang.cn/doc/rtc/android/resources), 解压后将c++动态库(包括架构文件夹)copy到Android/APIExample/agora-stream-encrypt/src/main/agoraLibs +1. 手动下载[Agora SDK包](https://doc.shengwang.cn/doc/rtc/android/resources), 解压后将c++动态库(包括架构文件夹)copy到Android/APIExample/agora-stream-encrypt/src/main/agoraLibs,将 hight_level_api/include 头文件替换到 Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora 目录中 ```text Android/APIExample/agora-stream-encrypt/src/main/agoraLibs @@ -111,6 +120,11 @@ Android/APIExample/agora-stream-encrypt/src/main/agoraLibs ├── x86 └── x86_64 ``` +```text +Android/APIExample/agora-stream-encrypt/src/main/cpp +└── include + └── agora +``` 2. 修改Android/APIExample/gradle.properties配置文件中streamEncrypt值为true diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraBase.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraBase.h index 792137209..c3bfa34cb 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraBase.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraBase.h @@ -559,7 +559,8 @@ enum ERROR_CODE_TYPE { /** * 101: The App ID is invalid, usually because the data format of the App ID is incorrect. * - * Solution: Check the data format of your App ID. Ensure that you use the correct App ID to initialize the Agora service. + * Solution: Check the data format of your App ID. Ensure that you use the correct App ID to + * initialize the Agora service. */ ERR_INVALID_APP_ID = 101, /** @@ -578,9 +579,9 @@ enum ERROR_CODE_TYPE { * - Timeout for token authorization: Once a token is generated, you must use it to access the * Agora service within 24 hours. Otherwise, the token times out and you can no longer use it. * - The token privilege expires: To generate a token, you need to set a timestamp for the token - * privilege to expire. For example, If you set it as seven days, the token expires seven days after - * its usage. In that case, you can no longer access the Agora service. The users cannot make calls, - * or are kicked out of the channel. + * privilege to expire. For example, If you set it as seven days, the token expires seven days + * after its usage. In that case, you can no longer access the Agora service. The users cannot + * make calls, or are kicked out of the channel. * * Solution: Regardless of whether token authorization times out or the token privilege expires, * you need to generate a new token on your server, and try to join the channel. @@ -588,19 +589,19 @@ enum ERROR_CODE_TYPE { ERR_TOKEN_EXPIRED = 109, /** * 110: The token is invalid, usually for one of the following reasons: - * - Did not provide a token when joining a channel in a situation where the project has enabled the - * App Certificate. + * - Did not provide a token when joining a channel in a situation where the project has enabled + * the App Certificate. * - Tried to join a channel with a token in a situation where the project has not enabled the App * Certificate. - * - The App ID, user ID and channel name that you use to generate the token on the server do not match - * those that you use when joining a channel. + * - The App ID, user ID and channel name that you use to generate the token on the server do not + * match those that you use when joining a channel. * * Solution: - * - Before joining a channel, check whether your project has enabled the App certificate. If yes, you - * must provide a token when joining a channel; if no, join a channel without a token. - * - When using a token to join a channel, ensure that the App ID, user ID, and channel name that you - * use to generate the token is the same as the App ID that you use to initialize the Agora service, and - * the user ID and channel name that you use to join the channel. + * - Before joining a channel, check whether your project has enabled the App certificate. If yes, + * you must provide a token when joining a channel; if no, join a channel without a token. + * - When using a token to join a channel, ensure that the App ID, user ID, and channel name that + * you use to generate the token is the same as the App ID that you use to initialize the Agora + * service, and the user ID and channel name that you use to join the channel. */ ERR_INVALID_TOKEN = 110, /** @@ -672,13 +673,15 @@ enum ERROR_CODE_TYPE { ERR_LICENSE_CREDENTIAL_INVALID = 131, /** - * 134: The user account is invalid, usually because the data format of the user account is incorrect. + * 134: The user account is invalid, usually because the data format of the user account is + * incorrect. */ ERR_INVALID_USER_ACCOUNT = 134, /** 157: The necessary dynamical library is not integrated. For example, if you call - * the \ref agora::rtc::IRtcEngine::enableDeepLearningDenoise "enableDeepLearningDenoise" but do not integrate the dynamical - * library for the deep-learning noise reduction into your project, the SDK reports this error code. + * the \ref agora::rtc::IRtcEngine::enableDeepLearningDenoise "enableDeepLearningDenoise" but do + * not integrate the dynamical library for the deep-learning noise reduction into your project, + * the SDK reports this error code. * */ ERR_MODULE_NOT_FOUND = 157, @@ -698,7 +701,7 @@ enum ERROR_CODE_TYPE { ERR_CERT_REQUEST = 168, // PcmSend Error num - ERR_PCMSEND_FORMAT = 200, // unsupport pcm format + ERR_PCMSEND_FORMAT = 200, // unsupport pcm format ERR_PCMSEND_BUFFEROVERFLOW = 201, // buffer overflow, the pcm send rate too quickly /// @cond @@ -752,27 +755,27 @@ enum ERROR_CODE_TYPE { enum LICENSE_ERROR_TYPE { /** * 1: Invalid license - */ + */ LICENSE_ERR_INVALID = 1, /** * 2: License expired - */ + */ LICENSE_ERR_EXPIRE = 2, /** * 3: Exceed license minutes limit - */ + */ LICENSE_ERR_MINUTES_EXCEED = 3, /** * 4: License use in limited period - */ + */ LICENSE_ERR_LIMITED_PERIOD = 4, /** * 5: Same license used in different devices at the same time - */ + */ LICENSE_ERR_DIFF_DEVICES = 5, /** * 99: SDK internal error - */ + */ LICENSE_ERR_INTERNAL = 99, }; @@ -845,9 +848,9 @@ enum USER_OFFLINE_REASON_TYPE { */ USER_OFFLINE_QUIT = 0, /** - * 1: The SDK times out and the user drops offline because no data packet was received within a certain - * period of time. If a user quits the call and the message is not passed to the SDK (due to an - * unreliable channel), the SDK assumes that the user drops offline. + * 1: The SDK times out and the user drops offline because no data packet was received within a + * certain period of time. If a user quits the call and the message is not passed to the SDK (due + * to an unreliable channel), the SDK assumes that the user drops offline. */ USER_OFFLINE_DROPPED = 1, /** @@ -870,7 +873,7 @@ enum INTERFACE_ID_TYPE { AGORA_IID_STATE_SYNC = 13, AGORA_IID_META_SERVICE = 14, AGORA_IID_MUSIC_CONTENT_CENTER = 15, - AGORA_IID_H265_TRANSCODER = 16, + AGORA_IID_H265_TRANSCODER = 16, }; /** @@ -999,7 +1002,6 @@ enum FRAME_HEIGHT { FRAME_HEIGHT_540 = 540, }; - /** * Types of the video frame. */ @@ -1032,9 +1034,9 @@ enum ORIENTATION_MODE { ORIENTATION_MODE_ADAPTIVE = 0, /** * 1: Landscape mode. In this mode, the SDK always outputs videos in landscape (horizontal) mode. - * If the captured video is in portrait mode, the video encoder crops it to fit the output. Applies - * to situations where the receiving end cannot process the rotational information. For example, - * CDN live streaming. + * If the captured video is in portrait mode, the video encoder crops it to fit the output. + * Applies to situations where the receiving end cannot process the rotational information. For + * example, CDN live streaming. */ ORIENTATION_MODE_FIXED_LANDSCAPE = 1, /** @@ -1051,9 +1053,16 @@ enum ORIENTATION_MODE { */ enum DEGRADATION_PREFERENCE { /** - * 0: (Default) Prefers to reduce the video frame rate while maintaining video quality during video - * encoding under limited bandwidth. This degradation preference is suitable for scenarios where - * video quality is prioritized. + * -1: (Default) SDK uses degradation preference according to setVideoScenario API settings, real-time network state and other relevant data information. + * If API setVideoScenario set video scenario to APPLICATION_SCENARIO_LIVESHOW, then MAINTAIN_BALANCED is used. If not, then MAINTAIN_RESOLUTION is used. + * Also if network state has changed, SDK may change this parameter between MAINTAIN_FRAMERATE、MAINTAIN_BALANCED and MAINTAIN_RESOLUTION automatically to get the best QOE. + * We recommend using this option. + */ + MAINTAIN_AUTO = -1, + /** + * 0: (Deprecated) Prefers to reduce the video frame rate while maintaining video quality during + * video encoding under limited bandwidth. This degradation preference is suitable for scenarios + * where video quality is prioritized. * @note In the COMMUNICATION channel profile, the resolution of the video sent may change, so * remote users need to handle this issue. */ @@ -1066,9 +1075,9 @@ enum DEGRADATION_PREFERENCE { MAINTAIN_FRAMERATE = 1, /** * 2: Reduces the video frame rate and video quality simultaneously during video encoding under - * limited bandwidth. MAINTAIN_BALANCED has a lower reduction than MAINTAIN_QUALITY and MAINTAIN_FRAMERATE, - * and this preference is suitable for scenarios where both smoothness and video quality are a - * priority. + * limited bandwidth. MAINTAIN_BALANCED has a lower reduction than MAINTAIN_RESOLUTION and + * MAINTAIN_FRAMERATE, and this preference is suitable for scenarios where both smoothness and + * video quality are a priority. */ MAINTAIN_BALANCED = 2, /** @@ -1155,6 +1164,11 @@ enum VIDEO_CODEC_CAPABILITY_LEVEL { * The video codec types. */ enum VIDEO_CODEC_TYPE { + /** + * 0: (Default) SDK will automatically adjust the codec type according to country and region or real-time network state and other relevant data information. + * Also if network state is changed, SDK may change codec automatically to get the best QOE. + * We recommend use this option. + */ VIDEO_CODEC_NONE = 0, /** * 1: Standard VP8. @@ -1170,11 +1184,13 @@ enum VIDEO_CODEC_TYPE { VIDEO_CODEC_H265 = 3, /** * 6: Generic. This type is used for transmitting raw video data, such as encrypted video frames. - * The SDK returns this type of video frames in callbacks, and you need to decode and render the frames yourself. + * The SDK returns this type of video frames in callbacks, and you need to decode and render the + * frames yourself. */ VIDEO_CODEC_GENERIC = 6, /** * 7: Generic H264. + * @deprecated This codec type is deprecated. */ VIDEO_CODEC_GENERIC_H264 = 7, /** @@ -1237,7 +1253,8 @@ struct SenderOptions { */ TCcMode ccMode; /** - * The codec type used for the encoded images: \ref agora::rtc::VIDEO_CODEC_TYPE "VIDEO_CODEC_TYPE". + * The codec type used for the encoded images: \ref agora::rtc::VIDEO_CODEC_TYPE + * "VIDEO_CODEC_TYPE". */ VIDEO_CODEC_TYPE codecType; @@ -1249,12 +1266,14 @@ struct SenderOptions { * - \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE": (Recommended) Standard bitrate. * - Communication profile: The encoding bitrate equals the base bitrate. * - Live-broadcast profile: The encoding bitrate is twice the base bitrate. - * - \ref agora::rtc::COMPATIBLE_BITRATE "COMPATIBLE_BITRATE": Compatible bitrate. The bitrate stays the same + * - \ref agora::rtc::COMPATIBLE_BITRATE "COMPATIBLE_BITRATE": Compatible bitrate. The bitrate + stays the same * regardless of the profile. * * The Communication profile prioritizes smoothness, while the Live Broadcast * profile prioritizes video quality (requiring a higher bitrate). Agora - * recommends setting the bitrate mode as \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE" or simply to + * recommends setting the bitrate mode as \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE" or + simply to * address this difference. * * The following table lists the recommended video encoder configurations, @@ -1262,7 +1281,8 @@ struct SenderOptions { * bitrate based on this table. If the bitrate you set is beyond the proper * range, the SDK automatically sets it to within the range. - | Resolution | Frame Rate (fps) | Base Bitrate (Kbps, for Communication) | Live Bitrate (Kbps, for Live Broadcast)| + | Resolution | Frame Rate (fps) | Base Bitrate (Kbps, for Communication) | Live + Bitrate (Kbps, for Live Broadcast)| |------------------------|------------------|----------------------------------------|----------------------------------------| | 160 × 120 | 15 | 65 | 130 | | 120 × 120 | 15 | 50 | 100 | @@ -1299,10 +1319,7 @@ struct SenderOptions { */ int targetBitrate; - SenderOptions() - : ccMode(CC_ENABLED), - codecType(VIDEO_CODEC_H265), - targetBitrate(6500) {} + SenderOptions() : ccMode(CC_ENABLED), codecType(VIDEO_CODEC_H265), targetBitrate(6500) {} }; /** @@ -1365,8 +1382,8 @@ enum AUDIO_ENCODING_TYPE { */ AUDIO_ENCODING_TYPE_AAC_16000_LOW = 0x010101, /** - * AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_16000_MEDIUM = 0x010102, /** @@ -1375,18 +1392,18 @@ enum AUDIO_ENCODING_TYPE { */ AUDIO_ENCODING_TYPE_AAC_32000_LOW = 0x010201, /** - * AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_32000_MEDIUM = 0x010202, /** - * AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio duration of - * 10 minutes is approximately 3.5 MB after encoding. + * AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio duration + * of 10 minutes is approximately 3.5 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_32000_HIGH = 0x010203, /** - * AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_48000_MEDIUM = 0x010302, /** @@ -1400,18 +1417,18 @@ enum AUDIO_ENCODING_TYPE { */ AUDIO_ENCODING_TYPE_OPUS_16000_LOW = 0x020101, /** - * OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_16000_MEDIUM = 0x020102, /** - * OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM = 0x020302, /** - * OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration of - * 10 minutes is approximately 3.5 MB after encoding. + * OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration + * of 10 minutes is approximately 3.5 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_48000_HIGH = 0x020303, }; @@ -1421,13 +1438,13 @@ enum AUDIO_ENCODING_TYPE { */ enum WATERMARK_FIT_MODE { /** - * Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in #WatermarkOptions. - * The settings in `WatermarkRatio` are invalid. + * Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in + * #WatermarkOptions. The settings in `WatermarkRatio` are invalid. */ FIT_MODE_COVER_POSITION, /** - * Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and `positionInPortraitMode` - * in `WatermarkOptions` are invalid. + * Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and + * `positionInPortraitMode` in `WatermarkOptions` are invalid. */ FIT_MODE_USE_IMAGE_RATIO }; @@ -1436,9 +1453,7 @@ enum WATERMARK_FIT_MODE { * The advanced settings of encoded audio frame. */ struct EncodedAudioFrameAdvancedSettings { - EncodedAudioFrameAdvancedSettings() - : speech(true), - sendEvenIfEmpty(true) {} + EncodedAudioFrameAdvancedSettings() : speech(true), sendEvenIfEmpty(true) {} /** * Determines whether the audio source is speech. @@ -1459,19 +1474,19 @@ struct EncodedAudioFrameAdvancedSettings { */ struct EncodedAudioFrameInfo { EncodedAudioFrameInfo() - : codec(AUDIO_CODEC_AACLC), - sampleRateHz(0), - samplesPerChannel(0), - numberOfChannels(0), - captureTimeMs(0) {} + : codec(AUDIO_CODEC_AACLC), + sampleRateHz(0), + samplesPerChannel(0), + numberOfChannels(0), + captureTimeMs(0) {} EncodedAudioFrameInfo(const EncodedAudioFrameInfo& rhs) - : codec(rhs.codec), - sampleRateHz(rhs.sampleRateHz), - samplesPerChannel(rhs.samplesPerChannel), - numberOfChannels(rhs.numberOfChannels), - advancedSettings(rhs.advancedSettings), - captureTimeMs(rhs.captureTimeMs) {} + : codec(rhs.codec), + sampleRateHz(rhs.sampleRateHz), + samplesPerChannel(rhs.samplesPerChannel), + numberOfChannels(rhs.numberOfChannels), + advancedSettings(rhs.advancedSettings), + captureTimeMs(rhs.captureTimeMs) {} /** * The audio codec: #AUDIO_CODEC_TYPE. */ @@ -1504,14 +1519,15 @@ struct EncodedAudioFrameInfo { * The definition of the AudioPcmDataInfo struct. */ struct AudioPcmDataInfo { - AudioPcmDataInfo() : samplesPerChannel(0), channelNum(0), samplesOut(0), elapsedTimeMs(0), ntpTimeMs(0) {} + AudioPcmDataInfo() + : samplesPerChannel(0), channelNum(0), samplesOut(0), elapsedTimeMs(0), ntpTimeMs(0) {} AudioPcmDataInfo(const AudioPcmDataInfo& rhs) - : samplesPerChannel(rhs.samplesPerChannel), - channelNum(rhs.channelNum), - samplesOut(rhs.samplesOut), - elapsedTimeMs(rhs.elapsedTimeMs), - ntpTimeMs(rhs.ntpTimeMs) {} + : samplesPerChannel(rhs.samplesPerChannel), + channelNum(rhs.channelNum), + samplesOut(rhs.samplesOut), + elapsedTimeMs(rhs.elapsedTimeMs), + ntpTimeMs(rhs.ntpTimeMs) {} /** * The sample count of the PCM data that you expect. @@ -1545,7 +1561,7 @@ enum H264PacketizeMode { /** * Single NAL unit mode. See RFC 6184. */ - SingleNalUnit, // Mode 0 - only single NALU allowed + SingleNalUnit, // Mode 0 - only single NALU allowed }; /** @@ -1588,64 +1604,63 @@ enum VIDEO_STREAM_TYPE { }; struct VideoSubscriptionOptions { - /** - * The type of the video stream to subscribe to. - * - * The default value is `VIDEO_STREAM_HIGH`, which means the high-quality - * video stream. - */ - Optional type; - /** - * Whether to subscribe to encoded video data only: - * - `true`: Subscribe to encoded video data only. - * - `false`: (Default) Subscribe to decoded video data. - */ - Optional encodedFrameOnly; + /** + * The type of the video stream to subscribe to. + * + * The default value is `VIDEO_STREAM_HIGH`, which means the high-quality + * video stream. + */ + Optional type; + /** + * Whether to subscribe to encoded video data only: + * - `true`: Subscribe to encoded video data only. + * - `false`: (Default) Subscribe to decoded video data. + */ + Optional encodedFrameOnly; - VideoSubscriptionOptions() {} + VideoSubscriptionOptions() {} }; - /** The maximum length of the user account. */ -enum MAX_USER_ACCOUNT_LENGTH_TYPE -{ +enum MAX_USER_ACCOUNT_LENGTH_TYPE { /** The maximum length of the user account is 256 bytes. */ MAX_USER_ACCOUNT_LENGTH = 256 }; /** - * The definition of the EncodedVideoFrameInfo struct, which contains the information of the external encoded video frame. + * The definition of the EncodedVideoFrameInfo struct, which contains the information of the + * external encoded video frame. */ struct EncodedVideoFrameInfo { EncodedVideoFrameInfo() - : uid(0), - codecType(VIDEO_CODEC_H264), - width(0), - height(0), - framesPerSecond(0), - frameType(VIDEO_FRAME_TYPE_BLANK_FRAME), - rotation(VIDEO_ORIENTATION_0), - trackId(0), - captureTimeMs(0), - decodeTimeMs(0), - streamType(VIDEO_STREAM_HIGH), - presentationMs(-1) {} + : uid(0), + codecType(VIDEO_CODEC_H264), + width(0), + height(0), + framesPerSecond(0), + frameType(VIDEO_FRAME_TYPE_BLANK_FRAME), + rotation(VIDEO_ORIENTATION_0), + trackId(0), + captureTimeMs(0), + decodeTimeMs(0), + streamType(VIDEO_STREAM_HIGH), + presentationMs(-1) {} EncodedVideoFrameInfo(const EncodedVideoFrameInfo& rhs) - : uid(rhs.uid), - codecType(rhs.codecType), - width(rhs.width), - height(rhs.height), - framesPerSecond(rhs.framesPerSecond), - frameType(rhs.frameType), - rotation(rhs.rotation), - trackId(rhs.trackId), - captureTimeMs(rhs.captureTimeMs), - decodeTimeMs(rhs.decodeTimeMs), - streamType(rhs.streamType), - presentationMs(rhs.presentationMs) {} + : uid(rhs.uid), + codecType(rhs.codecType), + width(rhs.width), + height(rhs.height), + framesPerSecond(rhs.framesPerSecond), + frameType(rhs.frameType), + rotation(rhs.rotation), + trackId(rhs.trackId), + captureTimeMs(rhs.captureTimeMs), + decodeTimeMs(rhs.decodeTimeMs), + streamType(rhs.streamType), + presentationMs(rhs.presentationMs) {} EncodedVideoFrameInfo& operator=(const EncodedVideoFrameInfo& rhs) { if (this == &rhs) return *this; @@ -1669,7 +1684,8 @@ struct EncodedVideoFrameInfo { */ uid_t uid; /** - * The codec type of the local video stream. See #VIDEO_CODEC_TYPE. The default value is `VIDEO_CODEC_H265 (3)`. + * The codec type of the local video stream. See #VIDEO_CODEC_TYPE. The default value is + * `VIDEO_CODEC_H265 (3)`. */ VIDEO_CODEC_TYPE codecType; /** @@ -1717,33 +1733,40 @@ struct EncodedVideoFrameInfo { }; /** -* Video compression preference. -*/ + * Video compression preference. + */ enum COMPRESSION_PREFERENCE { /** - * (Default) Low latency is preferred, usually used in real-time communication where low latency is the number one priority. + * (Default) SDK uses compression preference according to setVideoScenario API settings, real-time network state and other relevant data information. + * If API setVideoScenario set video scenario to APPLICATION_SCENARIO_LIVESHOW, then PREFER_QUALITY is used. If not, then PREFER_LOW_LATENCY is used. + * Also if network state has changed, SDK may change this parameter between PREFER_QUALITY and PREFER_LOW_LATENCY automatically to get the best QOE. + * We recommend using this option. */ - PREFER_LOW_LATENCY, + PREFER_COMPRESSION_AUTO = -1, /** - * Prefer quality in sacrifice of a degree of latency, usually around 30ms ~ 150ms, depends target fps + * Prefer low latency, usually used in real-time communication where low latency is the number one priority. */ - PREFER_QUALITY, + PREFER_LOW_LATENCY = 0, + /** + * Prefer quality in sacrifice of a degree of latency, usually around 30ms ~ 150ms, depends target fps + */ + PREFER_QUALITY = 1, }; /** -* The video encoder type preference. -*/ + * The video encoder type preference. + */ enum ENCODING_PREFERENCE { /** - *Default . + *Default . */ PREFER_AUTO = -1, /** - * Software encoding. - */ + * Software encoding. + */ PREFER_SOFTWARE = 0, /** - * Hardware encoding + * Hardware encoding */ PREFER_HARDWARE = 1, }; @@ -1752,15 +1775,14 @@ enum ENCODING_PREFERENCE { * The definition of the AdvanceOptions struct. */ struct AdvanceOptions { - /** * The video encoder type preference.. */ ENCODING_PREFERENCE encodingPreference; /** - * Video compression preference. - */ + * Video compression preference. + */ COMPRESSION_PREFERENCE compressionPreference; /** @@ -1770,7 +1792,7 @@ struct AdvanceOptions { bool encodeAlpha; AdvanceOptions() : encodingPreference(PREFER_AUTO), - compressionPreference(PREFER_LOW_LATENCY), + compressionPreference(PREFER_COMPRESSION_AUTO), encodeAlpha(false) {} AdvanceOptions(ENCODING_PREFERENCE encoding_preference, @@ -1785,7 +1807,6 @@ struct AdvanceOptions { compressionPreference == rhs.compressionPreference && encodeAlpha == rhs.encodeAlpha; } - }; /** @@ -1818,6 +1839,30 @@ enum CAMERA_FORMAT_TYPE { }; #endif +enum VIDEO_MODULE_TYPE { + /** Video capture module */ + VIDEO_MODULE_CAPTURER = 0, + /** Video software encoder module */ + VIDEO_MODULE_SOFTWARE_ENCODER = 1, + /** Video hardware encoder module */ + VIDEO_MODULE_HARDWARE_ENCODER = 2, + /** Video software decoder module */ + VIDEO_MODULE_SOFTWARE_DECODER = 3, + /** Video hardware decoder module */ + VIDEO_MODULE_HARDWARE_DECODER = 4, + /** Video render module */ + VIDEO_MODULE_RENDERER = 5, +}; + +enum HDR_CAPABILITY { + /** The result of static check is not reliable, by defualt*/ + HDR_CAPABILITY_UNKNOWN = -1, + /** The module you query doesn't support HDR */ + HDR_CAPABILITY_UNSUPPORTED = 0, + /** The module you query supports HDR */ + HDR_CAPABILITY_SUPPORTED = 1, +}; + /** Supported codec type bit mask. */ enum CODEC_CAP_MASK { /** 0: No codec support. */ @@ -1840,7 +1885,9 @@ struct CodecCapLevels { VIDEO_CODEC_CAPABILITY_LEVEL hwDecodingLevel; VIDEO_CODEC_CAPABILITY_LEVEL swDecodingLevel; - CodecCapLevels(): hwDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED), swDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED) {} + CodecCapLevels() + : hwDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED), + swDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED) {} }; /** The codec support information. */ @@ -1852,10 +1899,11 @@ struct CodecCapInfo { /** The codec capability level, estimated based on the device hardware.*/ CodecCapLevels codecLevels; - CodecCapInfo(): codecType(VIDEO_CODEC_NONE), codecCapMask(0) {} + CodecCapInfo() : codecType(VIDEO_CODEC_NONE), codecCapMask(0) {} }; -/** FocalLengthInfo contains the IDs of the front and rear cameras, along with the wide-angle types. */ +/** FocalLengthInfo contains the IDs of the front and rear cameras, along with the wide-angle types. + */ struct FocalLengthInfo { /** The camera direction. */ int cameraDirection; @@ -1882,21 +1930,22 @@ struct VideoEncoderConfiguration { /** * The bitrate (Kbps) of the video. * - * Refer to the **Video Bitrate Table** below and set your bitrate. If you set a bitrate beyond the - * proper range, the SDK automatically adjusts it to a value within the range. You can also choose - * from the following options: + * Refer to the **Video Bitrate Table** below and set your bitrate. If you set a bitrate beyond + * the proper range, the SDK automatically adjusts it to a value within the range. You can also + * choose from the following options: * - * - #STANDARD_BITRATE: (Recommended) Standard bitrate mode. In this mode, the bitrates differ between - * the Live Broadcast and Communication profiles: + * - #STANDARD_BITRATE: (Recommended) Standard bitrate mode. In this mode, the bitrates differ + * between the Live Broadcast and Communication profiles: * - In the Communication profile, the video bitrate is the same as the base bitrate. * - In the Live Broadcast profile, the video bitrate is twice the base bitrate. - * - #COMPATIBLE_BITRATE: Compatible bitrate mode. The compatible bitrate mode. In this mode, the bitrate - * stays the same regardless of the profile. If you choose this mode for the Live Broadcast profile, - * the video frame rate may be lower than the set value. + * - #COMPATIBLE_BITRATE: Compatible bitrate mode. The compatible bitrate mode. In this mode, the + * bitrate stays the same regardless of the profile. If you choose this mode for the Live + * Broadcast profile, the video frame rate may be lower than the set value. * - * Agora uses different video codecs for different profiles to optimize the user experience. For example, - * the communication profile prioritizes the smoothness while the live-broadcast profile prioritizes the - * video quality (a higher bitrate). Therefore, We recommend setting this parameter as #STANDARD_BITRATE. + * Agora uses different video codecs for different profiles to optimize the user experience. For + * example, the communication profile prioritizes the smoothness while the live-broadcast profile + * prioritizes the video quality (a higher bitrate). Therefore, We recommend setting this + * parameter as #STANDARD_BITRATE. * * | Resolution | Frame Rate (fps) | Base Bitrate (Kbps) | Live Bitrate (Kbps)| * |------------------------|------------------|---------------------|--------------------| @@ -1964,7 +2013,8 @@ struct VideoEncoderConfiguration { /** * The mirror mode is disabled by default - * If mirror_type is set to VIDEO_MIRROR_MODE_ENABLED, then the video frame would be mirrored before encoding. + * If mirror_type is set to VIDEO_MIRROR_MODE_ENABLED, then the video frame would be mirrored + * before encoding. */ VIDEO_MIRROR_MODE_TYPE mirrorMode; @@ -1980,9 +2030,9 @@ struct VideoEncoderConfiguration { bitrate(b), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(m), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(mirror), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration(int width, int height, int f, int b, ORIENTATION_MODE m, VIDEO_MIRROR_MODE_TYPE mirror = VIDEO_MIRROR_MODE_DISABLED) : codecType(VIDEO_CODEC_NONE), dimensions(width, height), @@ -1990,19 +2040,19 @@ struct VideoEncoderConfiguration { bitrate(b), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(m), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(mirror), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration(const VideoEncoderConfiguration& config) - : codecType(config.codecType), - dimensions(config.dimensions), - frameRate(config.frameRate), - bitrate(config.bitrate), - minBitrate(config.minBitrate), - orientationMode(config.orientationMode), - degradationPreference(config.degradationPreference), - mirrorMode(config.mirrorMode), - advanceOptions(config.advanceOptions) {} + : codecType(config.codecType), + dimensions(config.dimensions), + frameRate(config.frameRate), + bitrate(config.bitrate), + minBitrate(config.minBitrate), + orientationMode(config.orientationMode), + degradationPreference(config.degradationPreference), + mirrorMode(config.mirrorMode), + advanceOptions(config.advanceOptions) {} VideoEncoderConfiguration() : codecType(VIDEO_CODEC_NONE), dimensions(FRAME_WIDTH_960, FRAME_HEIGHT_540), @@ -2010,9 +2060,9 @@ struct VideoEncoderConfiguration { bitrate(STANDARD_BITRATE), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(ORIENTATION_MODE_ADAPTIVE), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(VIDEO_MIRROR_MODE_DISABLED), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration& operator=(const VideoEncoderConfiguration& rhs) { if (this == &rhs) return *this; @@ -2040,9 +2090,9 @@ struct DataStreamConfig { * * When you set the data packet to synchronize with the audio, then if the data packet delay is * within the audio delay, the SDK triggers the `onStreamMessage` callback when the synchronized - * audio packet is played out. Do not set this parameter as true if you need the receiver to receive - * the data packet immediately. Agora recommends that you set this parameter to `true` only when you - * need to implement specific functions, for example lyric synchronization. + * audio packet is played out. Do not set this parameter as true if you need the receiver to + * receive the data packet immediately. Agora recommends that you set this parameter to `true` + * only when you need to implement specific functions, for example lyric synchronization. */ bool syncWithAudio; /** @@ -2050,7 +2100,8 @@ struct DataStreamConfig { * - `true`: Guarantee that the receiver receives the data in the sent order. * - `false`: Do not guarantee that the receiver receives the data in the sent order. * - * Do not set this parameter as `true` if you need the receiver to receive the data packet immediately. + * Do not set this parameter as `true` if you need the receiver to receive the data packet + * immediately. */ bool ordered; }; @@ -2060,16 +2111,16 @@ struct DataStreamConfig { */ enum SIMULCAST_STREAM_MODE { /* - * disable simulcast stream until receive request for enable simulcast stream by other broadcaster - */ + * disable simulcast stream until receive request for enable simulcast stream by other broadcaster + */ AUTO_SIMULCAST_STREAM = -1, /* - * disable simulcast stream - */ + * disable simulcast stream + */ DISABLE_SIMULCAST_STREAM = 0, /* - * always enable simulcast stream - */ + * always enable simulcast stream + */ ENABLE_SIMULCAST_STREAM = 1, }; @@ -2082,7 +2133,8 @@ struct SimulcastStreamConfig { */ VideoDimensions dimensions; /** - * The video bitrate (Kbps), represented by an instantaneous value. The default value of the log level is 5. + * The video bitrate (Kbps), represented by an instantaneous value. The default value of the log + * level is 5. */ int kBitrate; /** @@ -2187,28 +2239,31 @@ struct Rectangle { /** * The position and size of the watermark on the screen. * - * The position and size of the watermark on the screen are determined by `xRatio`, `yRatio`, and `widthRatio`: - * - (`xRatio`, `yRatio`) refers to the coordinates of the upper left corner of the watermark, which determines - * the distance from the upper left corner of the watermark to the upper left corner of the screen. - * The `widthRatio` determines the width of the watermark. + * The position and size of the watermark on the screen are determined by `xRatio`, `yRatio`, and + * `widthRatio`: + * - (`xRatio`, `yRatio`) refers to the coordinates of the upper left corner of the watermark, which + * determines the distance from the upper left corner of the watermark to the upper left corner of + * the screen. The `widthRatio` determines the width of the watermark. */ struct WatermarkRatio { /** * The x-coordinate of the upper left corner of the watermark. The horizontal position relative to - * the origin, where the upper left corner of the screen is the origin, and the x-coordinate is the - * upper left corner of the watermark. The value range is [0.0,1.0], and the default value is 0. + * the origin, where the upper left corner of the screen is the origin, and the x-coordinate is + * the upper left corner of the watermark. The value range is [0.0,1.0], and the default value is + * 0. */ float xRatio; /** - * The y-coordinate of the upper left corner of the watermark. The vertical position relative to the - * origin, where the upper left corner of the screen is the origin, and the y-coordinate is the upper - * left corner of the screen. The value range is [0.0,1.0], and the default value is 0. + * The y-coordinate of the upper left corner of the watermark. The vertical position relative to + * the origin, where the upper left corner of the screen is the origin, and the y-coordinate is + * the upper left corner of the screen. The value range is [0.0,1.0], and the default value is 0. */ float yRatio; /** - * The width of the watermark. The SDK calculates the height of the watermark proportionally according - * to this parameter value to ensure that the enlarged or reduced watermark image is not distorted. - * The value range is [0,1], and the default value is 0, which means no watermark is displayed. + * The width of the watermark. The SDK calculates the height of the watermark proportionally + * according to this parameter value to ensure that the enlarged or reduced watermark image is not + * distorted. The value range is [0,1], and the default value is 0, which means no watermark is + * displayed. */ float widthRatio; @@ -2247,10 +2302,10 @@ struct WatermarkOptions { WATERMARK_FIT_MODE mode; WatermarkOptions() - : visibleInPreview(true), - positionInLandscapeMode(0, 0, 0, 0), - positionInPortraitMode(0, 0, 0, 0), - mode(FIT_MODE_COVER_POSITION) {} + : visibleInPreview(true), + positionInLandscapeMode(0, 0, 0, 0), + positionInPortraitMode(0, 0, 0, 0), + mode(FIT_MODE_COVER_POSITION) {} }; /** @@ -2321,7 +2376,8 @@ struct RtcStats { * The app CPU usage (%). * @note * - The value of `cpuAppUsage` is always reported as 0 in the `onLeaveChannel` callback. - * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations. + * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system + * limitations. */ double cpuAppUsage; /** @@ -2331,13 +2387,15 @@ struct RtcStats { * value = (100 - System Idle Progress in Task Manager)/100. * @note * - The value of `cpuTotalUsage` is always reported as 0 in the `onLeaveChannel` callback. - * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations. + * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system + * limitations. */ double cpuTotalUsage; /** * The round-trip time delay from the client to the local router. - * @note On Android, to get `gatewayRtt`, ensure that you add the `android.permission.ACCESS_WIFI_STATE` - * permission after `` in the `AndroidManifest.xml` file in your project. + * @note On Android, to get `gatewayRtt`, ensure that you add the + * `android.permission.ACCESS_WIFI_STATE` permission after `` in the + * `AndroidManifest.xml` file in your project. */ int gatewayRtt; /** @@ -2414,39 +2472,39 @@ struct RtcStats { */ int rxPacketLossRate; RtcStats() - : duration(0), - txBytes(0), - rxBytes(0), - txAudioBytes(0), - txVideoBytes(0), - rxAudioBytes(0), - rxVideoBytes(0), - txKBitRate(0), - rxKBitRate(0), - rxAudioKBitRate(0), - txAudioKBitRate(0), - rxVideoKBitRate(0), - txVideoKBitRate(0), - lastmileDelay(0), - userCount(0), - cpuAppUsage(0.0), - cpuTotalUsage(0.0), - gatewayRtt(0), - memoryAppUsageRatio(0.0), - memoryTotalUsageRatio(0.0), - memoryAppUsageInKbytes(0), - connectTimeMs(0), - firstAudioPacketDuration(0), - firstVideoPacketDuration(0), - firstVideoKeyFramePacketDuration(0), - packetsBeforeFirstKeyFramePacket(0), - firstAudioPacketDurationAfterUnmute(0), - firstVideoPacketDurationAfterUnmute(0), - firstVideoKeyFramePacketDurationAfterUnmute(0), - firstVideoKeyFrameDecodedDurationAfterUnmute(0), - firstVideoKeyFrameRenderedDurationAfterUnmute(0), - txPacketLossRate(0), - rxPacketLossRate(0) {} + : duration(0), + txBytes(0), + rxBytes(0), + txAudioBytes(0), + txVideoBytes(0), + rxAudioBytes(0), + rxVideoBytes(0), + txKBitRate(0), + rxKBitRate(0), + rxAudioKBitRate(0), + txAudioKBitRate(0), + rxVideoKBitRate(0), + txVideoKBitRate(0), + lastmileDelay(0), + userCount(0), + cpuAppUsage(0.0), + cpuTotalUsage(0.0), + gatewayRtt(0), + memoryAppUsageRatio(0.0), + memoryTotalUsageRatio(0.0), + memoryAppUsageInKbytes(0), + connectTimeMs(0), + firstAudioPacketDuration(0), + firstVideoPacketDuration(0), + firstVideoKeyFramePacketDuration(0), + packetsBeforeFirstKeyFramePacket(0), + firstAudioPacketDurationAfterUnmute(0), + firstVideoPacketDurationAfterUnmute(0), + firstVideoKeyFramePacketDurationAfterUnmute(0), + firstVideoKeyFrameDecodedDurationAfterUnmute(0), + firstVideoKeyFrameRenderedDurationAfterUnmute(0), + txPacketLossRate(0), + rxPacketLossRate(0) {} }; /** @@ -2464,7 +2522,8 @@ enum CLIENT_ROLE_TYPE { }; /** - * Quality change of the local video in terms of target frame rate and target bit rate since last count. + * Quality change of the local video in terms of target frame rate and target bit rate since last + * count. */ enum QUALITY_ADAPT_INDICATION { /** @@ -2482,11 +2541,10 @@ enum QUALITY_ADAPT_INDICATION { }; /** - * The latency level of an audience member in interactive live streaming. This enum takes effect only - * when the user role is set to `CLIENT_ROLE_AUDIENCE`. + * The latency level of an audience member in interactive live streaming. This enum takes effect + * only when the user role is set to `CLIENT_ROLE_AUDIENCE`. */ -enum AUDIENCE_LATENCY_LEVEL_TYPE -{ +enum AUDIENCE_LATENCY_LEVEL_TYPE { /** * 1: Low latency. */ @@ -2500,15 +2558,14 @@ enum AUDIENCE_LATENCY_LEVEL_TYPE /** * The detailed options of a user. */ -struct ClientRoleOptions -{ +struct ClientRoleOptions { /** - * The latency level of an audience member in interactive live streaming. See `AUDIENCE_LATENCY_LEVEL_TYPE`. + * The latency level of an audience member in interactive live streaming. See + * `AUDIENCE_LATENCY_LEVEL_TYPE`. */ AUDIENCE_LATENCY_LEVEL_TYPE audienceLatencyLevel; - ClientRoleOptions() - : audienceLatencyLevel(AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY) {} + ClientRoleOptions() : audienceLatencyLevel(AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY) {} }; /** @@ -2542,8 +2599,8 @@ enum EXPERIENCE_POOR_REASON { */ WIRELESS_SIGNAL_POOR = 4, /** - * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each other. - * As a result, audio transmission quality is undermined. + * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each + * other. As a result, audio transmission quality is undermined. */ WIFI_BLUETOOTH_COEXIST = 8, }; @@ -2552,18 +2609,18 @@ enum EXPERIENCE_POOR_REASON { * Audio AINS mode */ enum AUDIO_AINS_MODE { - /** - * AINS mode with soft suppression level. - */ - AINS_MODE_BALANCED = 0, - /** - * AINS mode with high suppression level. - */ - AINS_MODE_AGGRESSIVE = 1, - /** - * AINS mode with high suppression level and ultra-low-latency - */ - AINS_MODE_ULTRALOWLATENCY = 2 + /** + * AINS mode with soft suppression level. + */ + AINS_MODE_BALANCED = 0, + /** + * AINS mode with high suppression level. + */ + AINS_MODE_AGGRESSIVE = 1, + /** + * AINS mode with high suppression level and ultra-low-latency + */ + AINS_MODE_ULTRALOWLATENCY = 2 }; /** @@ -2574,9 +2631,10 @@ enum AUDIO_PROFILE_TYPE { * 0: The default audio profile. * - For the Communication profile: * - Windows: A sample rate of 16 kHz, audio encoding, mono, and a bitrate of up to 16 Kbps. - * - Android/macOS/iOS: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18 Kbps. - * of up to 16 Kbps. - * - For the Live-broadcast profile: A sample rate of 48 kHz, music encoding, mono, and a bitrate of up to 64 Kbps. + * - Android/macOS/iOS: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18 + * Kbps. of up to 16 Kbps. + * - For the Live-broadcast profile: A sample rate of 48 kHz, music encoding, mono, and a bitrate + * of up to 64 Kbps. */ AUDIO_PROFILE_DEFAULT = 0, /** @@ -2590,8 +2648,8 @@ enum AUDIO_PROFILE_TYPE { /** * 3: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 80 Kbps. * - * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels` - * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. + * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set + * `audioProcessingChannels` to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. */ AUDIO_PROFILE_MUSIC_STANDARD_STEREO = 3, /** @@ -2601,8 +2659,8 @@ enum AUDIO_PROFILE_TYPE { /** * 5: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 128 Kbps. * - * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels` - * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. + * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set + * `audioProcessingChannels` to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. */ AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO = 5, /** @@ -2634,7 +2692,8 @@ enum AUDIO_SCENARIO_TYPE { */ AUDIO_SCENARIO_CHATROOM = 5, /** - * 7: Real-time chorus scenario, where users have good network conditions and require ultra-low latency. + * 7: Real-time chorus scenario, where users have good network conditions and require ultra-low + * latency. */ AUDIO_SCENARIO_CHORUS = 7, /** @@ -2651,19 +2710,19 @@ enum AUDIO_SCENARIO_TYPE { * The format of the video frame. */ struct VideoFormat { - OPTIONAL_ENUM_SIZE_T { - /** The maximum value (px) of the width. */ - kMaxWidthInPixels = 3840, - /** The maximum value (px) of the height. */ - kMaxHeightInPixels = 2160, - /** The maximum value (fps) of the frame rate. */ - kMaxFps = 60, + OPTIONAL_ENUM_SIZE_T{ + /** The maximum value (px) of the width. */ + kMaxWidthInPixels = 3840, + /** The maximum value (px) of the height. */ + kMaxHeightInPixels = 2160, + /** The maximum value (fps) of the frame rate. */ + kMaxFps = 60, }; /** * The width (px) of the video. */ - int width; // Number of pixels. + int width; // Number of pixels. /** * The height (px) of the video. */ @@ -2687,9 +2746,7 @@ struct VideoFormat { bool operator==(const VideoFormat& fmt) const { return width == fmt.width && height == fmt.height && fps == fmt.fps; } - bool operator!=(const VideoFormat& fmt) const { - return !operator==(fmt); - } + bool operator!=(const VideoFormat& fmt) const { return !operator==(fmt); } }; /** @@ -2742,7 +2799,6 @@ enum SCREEN_SCENARIO_TYPE { SCREEN_SCENARIO_RDC = 4, }; - /** * The video application scenario type. */ @@ -2759,6 +2815,10 @@ enum VIDEO_APPLICATION_SCENARIO_TYPE { * 2: Video Call Scenario. This scenario is used to optimize the video experience in video application, like 1v1 video call. */ APPLICATION_SCENARIO_1V1 = 2, + /** + * 3: Live Show Scenario. This scenario is used to optimize the video experience in video live show. + */ + APPLICATION_SCENARIO_LIVESHOW = 3, }; /** @@ -2789,7 +2849,8 @@ enum VIDEO_QOE_PREFERENCE_TYPE { */ enum CAPTURE_BRIGHTNESS_LEVEL_TYPE { /** -1: The SDK does not detect the brightness level of the video image. - * Wait a few seconds to get the brightness level from `CAPTURE_BRIGHTNESS_LEVEL_TYPE` in the next callback. + * Wait a few seconds to get the brightness level from `CAPTURE_BRIGHTNESS_LEVEL_TYPE` in the next + * callback. */ CAPTURE_BRIGHTNESS_LEVEL_INVALID = -1, /** 0: The brightness level of the video image is normal. @@ -2804,20 +2865,20 @@ enum CAPTURE_BRIGHTNESS_LEVEL_TYPE { }; enum CAMERA_STABILIZATION_MODE { - /** The camera stabilization mode is disabled. - */ + /** The camera stabilization mode is disabled. + */ CAMERA_STABILIZATION_MODE_OFF = -1, - /** device choose stabilization mode automatically. - */ + /** device choose stabilization mode automatically. + */ CAMERA_STABILIZATION_MODE_AUTO = 0, - /** stabilization mode level 1. - */ + /** stabilization mode level 1. + */ CAMERA_STABILIZATION_MODE_LEVEL_1 = 1, - /** stabilization mode level 2. - */ + /** stabilization mode level 2. + */ CAMERA_STABILIZATION_MODE_LEVEL_2 = 2, - /** stabilization mode level 3. - */ + /** stabilization mode level 3. + */ CAMERA_STABILIZATION_MODE_LEVEL_3 = 3, /** The maximum level of the camera stabilization mode. */ @@ -2855,7 +2916,8 @@ enum LOCAL_AUDIO_STREAM_REASON { */ LOCAL_AUDIO_STREAM_REASON_OK = 0, /** - * 1: No specified reason for the local audio failure. Remind your users to try to rejoin the channel. + * 1: No specified reason for the local audio failure. Remind your users to try to rejoin the + * channel. */ LOCAL_AUDIO_STREAM_REASON_FAILURE = 1, /** @@ -2968,7 +3030,7 @@ enum LOCAL_VIDEO_STREAM_REASON { */ LOCAL_VIDEO_STREAM_REASON_DEVICE_NOT_FOUND = 8, /** - * 9: (macOS only) The video capture device currently in use is disconnected (such as being + * 9: (macOS and Windows only) The video capture device currently in use is disconnected (such as being * unplugged). */ LOCAL_VIDEO_STREAM_REASON_DEVICE_DISCONNECTED = 9, @@ -2983,8 +3045,8 @@ enum LOCAL_VIDEO_STREAM_REASON { */ LOCAL_VIDEO_STREAM_REASON_DEVICE_INTERRUPT = 14, /** - * 15: (Android only) The device may need to be shut down and restarted to restore camera function, - * or there may be a persistent hardware problem. + * 15: (Android only) The device may need to be shut down and restarted to restore camera + * function, or there may be a persistent hardware problem. */ LOCAL_VIDEO_STREAM_REASON_DEVICE_FATAL_ERROR = 15, /** @@ -3021,20 +3083,21 @@ enum LOCAL_VIDEO_STREAM_REASON { /** 22: No permision to capture screen. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_NO_PERMISSION = 22, /** - * 24: (Windows Only) An unexpected error (possibly due to window block failure) occurs during the screen - * sharing process, resulting in performance degradation. However, the screen sharing process itself is - * functioning normally. + * 24: (Windows Only) An unexpected error (possibly due to window block failure) occurs during the + * screen sharing process, resulting in performance degradation. However, the screen sharing + * process itself is functioning normally. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_AUTO_FALLBACK = 24, - /** 25: (Windows only) The local screen capture window is currently hidden and not visible on the desktop. */ + /** 25: (Windows only) The local screen capture window is currently hidden and not visible on the + desktop. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_HIDDEN = 25, /** 26: (Windows only) The local screen capture window is recovered from its hidden state. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_HIDDEN = 26, /** 27: (Windows and macOS only) The window is recovered from miniminzed */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_MINIMIZED = 27, - /** + /** * 28: The screen capture paused. - * + * * Common scenarios for reporting this error code: * - When the desktop switch to the secure desktop such as UAC dialog or the Winlogon desktop on * Windows platform, the SDK reports this error code. @@ -3050,41 +3113,41 @@ enum LOCAL_VIDEO_STREAM_REASON { /** * Remote audio states. */ -enum REMOTE_AUDIO_STATE -{ +enum REMOTE_AUDIO_STATE { /** * 0: The remote audio is in the default state. The SDK reports this state in the case of * `REMOTE_AUDIO_REASON_LOCAL_MUTED(3)`, `REMOTE_AUDIO_REASON_REMOTE_MUTED(5)`, or * `REMOTE_AUDIO_REASON_REMOTE_OFFLINE(7)`. */ - REMOTE_AUDIO_STATE_STOPPED = 0, // Default state, audio is started or remote user disabled/muted audio stream + REMOTE_AUDIO_STATE_STOPPED = + 0, // Default state, audio is started or remote user disabled/muted audio stream /** * 1: The first remote audio packet is received. */ REMOTE_AUDIO_STATE_STARTING = 1, // The first audio frame packet has been received /** - * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the case of - * `REMOTE_AUDIO_REASON_NETWORK_RECOVERY(2)`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED(4)`, or + * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the + * case of `REMOTE_AUDIO_REASON_NETWORK_RECOVERY(2)`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED(4)`, or * `REMOTE_AUDIO_REASON_REMOTE_UNMUTED(6)`. */ - REMOTE_AUDIO_STATE_DECODING = 2, // The first remote audio frame has been decoded or fronzen state ends + REMOTE_AUDIO_STATE_DECODING = + 2, // The first remote audio frame has been decoded or fronzen state ends /** * 3: The remote audio is frozen. The SDK reports this state in the case of * `REMOTE_AUDIO_REASON_NETWORK_CONGESTION(1)`. */ - REMOTE_AUDIO_STATE_FROZEN = 3, // Remote audio is frozen, probably due to network issue + REMOTE_AUDIO_STATE_FROZEN = 3, // Remote audio is frozen, probably due to network issue /** * 4: The remote audio fails to start. The SDK reports this state in the case of * `REMOTE_AUDIO_REASON_INTERNAL(0)`. */ - REMOTE_AUDIO_STATE_FAILED = 4, // Remote audio play failed + REMOTE_AUDIO_STATE_FAILED = 4, // Remote audio play failed }; /** * Reasons for the remote audio state change. */ -enum REMOTE_AUDIO_STATE_REASON -{ +enum REMOTE_AUDIO_STATE_REASON { /** * 0: The SDK reports this reason when the video state changes. */ @@ -3138,7 +3201,8 @@ enum REMOTE_VIDEO_STATE { /** * 0: The remote video is in the default state. The SDK reports this state in the case of * `REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED (3)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED (5)`, - * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE (7)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK (8)`. + * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE (7)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK + * (8)`. */ REMOTE_VIDEO_STATE_STOPPED = 0, /** @@ -3146,9 +3210,10 @@ enum REMOTE_VIDEO_STATE { */ REMOTE_VIDEO_STATE_STARTING = 1, /** - * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the case of - * `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY (2)`, `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED (4)`, - * `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED (6)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY (9)`. + * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the + * case of `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY (2)`, + * `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED (4)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED (6)`, + * or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY (9)`. */ REMOTE_VIDEO_STATE_DECODING = 2, /** 3: The remote video is frozen, probably due to @@ -3165,36 +3230,36 @@ enum REMOTE_VIDEO_STATE { */ enum REMOTE_VIDEO_STATE_REASON { /** - * 0: The SDK reports this reason when the video state changes. - */ + * 0: The SDK reports this reason when the video state changes. + */ REMOTE_VIDEO_STATE_REASON_INTERNAL = 0, /** - * 1: Network congestion. - */ + * 1: Network congestion. + */ REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION = 1, /** - * 2: Network recovery. - */ + * 2: Network recovery. + */ REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY = 2, /** - * 3: The local user stops receiving the remote video stream or disables the video module. - */ + * 3: The local user stops receiving the remote video stream or disables the video module. + */ REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED = 3, /** - * 4: The local user resumes receiving the remote video stream or enables the video module. - */ + * 4: The local user resumes receiving the remote video stream or enables the video module. + */ REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED = 4, /** - * 5: The remote user stops sending the video stream or disables the video module. - */ + * 5: The remote user stops sending the video stream or disables the video module. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED = 5, /** - * 6: The remote user resumes sending the video stream or enables the video module. - */ + * 6: The remote user resumes sending the video stream or enables the video module. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED = 6, /** - * 7: The remote user leaves the channel. - */ + * 7: The remote user leaves the channel. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE = 7, /** 8: The remote audio-and-video stream falls back to the audio-only stream * due to poor network conditions. @@ -3210,7 +3275,7 @@ enum REMOTE_VIDEO_STATE_REASON { /** (Internal use only) 11: The remote video stream type change to high stream type */ REMOTE_VIDEO_STATE_REASON_VIDEO_STREAM_TYPE_CHANGE_TO_HIGH = 11, - /** (iOS only) 12: The app of the remote user is in background. + /** (iOS only) 12: The app of the remote user is in background. */ REMOTE_VIDEO_STATE_REASON_SDK_IN_BACKGROUND = 12, @@ -3248,10 +3313,14 @@ enum REMOTE_USER_STATE { */ struct VideoTrackInfo { VideoTrackInfo() - : isLocal(false), ownerUid(0), trackId(0), channelId(OPTIONAL_NULLPTR) - , codecType(VIDEO_CODEC_H265) - , encodedFrameOnly(false), sourceType(VIDEO_SOURCE_CAMERA_PRIMARY) - , observationPosition(agora::media::base::POSITION_POST_CAPTURER) {} + : isLocal(false), + ownerUid(0), + trackId(0), + channelId(OPTIONAL_NULLPTR), + codecType(VIDEO_CODEC_H265), + encodedFrameOnly(false), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + observationPosition(agora::media::base::POSITION_POST_CAPTURER) {} /** * Whether the video track is local or remote. * - true: The video track is local. @@ -3291,7 +3360,8 @@ struct VideoTrackInfo { }; /** - * The downscale level of the remote video stream . The higher the downscale level, the more the video downscales. + * The downscale level of the remote video stream . The higher the downscale level, the more the + * video downscales. */ enum REMOTE_VIDEO_DOWNSCALE_LEVEL { /** @@ -3340,7 +3410,8 @@ struct AudioVolumeInfo { * @note * - The `vad` parameter does not report the voice activity status of remote users. In a remote * user's callback, the value of `vad` is always 1. - * - To use this parameter, you must set `reportVad` to true when calling `enableAudioVolumeIndication`. + * - To use this parameter, you must set `reportVad` to true when calling + * `enableAudioVolumeIndication`. */ unsigned int vad; /** @@ -3464,7 +3535,8 @@ enum VIDEO_CODEC_PROFILE_TYPE { */ VIDEO_CODEC_PROFILE_BASELINE = 66, /** - * 77: Main video codec profile. Generally used in mainstream electronics, such as MP4 players, portable video players, PSP, and iPads. + * 77: Main video codec profile. Generally used in mainstream electronics, such as MP4 players, + * portable video players, PSP, and iPads. */ VIDEO_CODEC_PROFILE_MAIN = 77, /** @@ -3473,7 +3545,6 @@ enum VIDEO_CODEC_PROFILE_TYPE { VIDEO_CODEC_PROFILE_HIGH = 100, }; - /** * Self-defined audio codec profile. */ @@ -3495,8 +3566,7 @@ enum AUDIO_CODEC_PROFILE_TYPE { /** * Local audio statistics. */ -struct LocalAudioStats -{ +struct LocalAudioStats { /** * The number of audio channels. */ @@ -3514,7 +3584,8 @@ struct LocalAudioStats */ int internalCodec; /** - * The packet loss rate (%) from the local client to the Agora server before applying the anti-packet loss strategies. + * The packet loss rate (%) from the local client to the Agora server before applying the + * anti-packet loss strategies. */ unsigned short txPacketLossRate; /** @@ -3535,35 +3606,45 @@ struct LocalAudioStats int aecEstimatedDelay; }; - /** * States of the Media Push. */ enum RTMP_STREAM_PUBLISH_STATE { /** - * 0: The Media Push has not started or has ended. This state is also triggered after you remove a RTMP or RTMPS stream from the CDN by calling `removePublishStreamUrl`. + * 0: The Media Push has not started or has ended. This state is also triggered after you remove a + * RTMP or RTMPS stream from the CDN by calling `removePublishStreamUrl`. */ RTMP_STREAM_PUBLISH_STATE_IDLE = 0, /** - * 1: The SDK is connecting to Agora's streaming server and the CDN server. This state is triggered after you call the `addPublishStreamUrl` method. + * 1: The SDK is connecting to Agora's streaming server and the CDN server. This state is + * triggered after you call the `addPublishStreamUrl` method. */ RTMP_STREAM_PUBLISH_STATE_CONNECTING = 1, /** - * 2: The RTMP or RTMPS streaming publishes. The SDK successfully publishes the RTMP or RTMPS streaming and returns this state. + * 2: The RTMP or RTMPS streaming publishes. The SDK successfully publishes the RTMP or RTMPS + * streaming and returns this state. */ RTMP_STREAM_PUBLISH_STATE_RUNNING = 2, /** - * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the streaming is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this state. - * - If the SDK successfully resumes the streaming, #RTMP_STREAM_PUBLISH_STATE_RUNNING (2) returns. - * - If the streaming does not resume within 60 seconds or server errors occur, #RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. You can also reconnect to the server by calling the `removePublishStreamUrl` and `addPublishStreamUrl` methods. + * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the + * streaming is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this + * state. + * - If the SDK successfully resumes the streaming, #RTMP_STREAM_PUBLISH_STATE_RUNNING (2) + * returns. + * - If the streaming does not resume within 60 seconds or server errors occur, + * #RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. You can also reconnect to the server by calling + * the `removePublishStreamUrl` and `addPublishStreamUrl` methods. */ RTMP_STREAM_PUBLISH_STATE_RECOVERING = 3, /** - * 4: The RTMP or RTMPS streaming fails. See the `errCode` parameter for the detailed error information. You can also call the `addPublishStreamUrl` method to publish the RTMP or RTMPS streaming again. + * 4: The RTMP or RTMPS streaming fails. See the `errCode` parameter for the detailed error + * information. You can also call the `addPublishStreamUrl` method to publish the RTMP or RTMPS + * streaming again. */ RTMP_STREAM_PUBLISH_STATE_FAILURE = 4, /** - * 5: The SDK is disconnecting to Agora's streaming server and the CDN server. This state is triggered after you call the `removePublishStreamUrl` method. + * 5: The SDK is disconnecting to Agora's streaming server and the CDN server. This state is + * triggered after you call the `removePublishStreamUrl` method. */ RTMP_STREAM_PUBLISH_STATE_DISCONNECTING = 5, }; @@ -3577,8 +3658,10 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_OK = 0, /** - * 1: Invalid argument used. If, for example, you do not call the `setLiveTranscoding` method to configure the LiveTranscoding parameters before calling the addPublishStreamUrl method, - * the SDK returns this error. Check whether you set the parameters in the `setLiveTranscoding` method properly. + * 1: Invalid argument used. If, for example, you do not call the `setLiveTranscoding` method to + * configure the LiveTranscoding parameters before calling the addPublishStreamUrl method, the SDK + * returns this error. Check whether you set the parameters in the `setLiveTranscoding` method + * properly. */ RTMP_STREAM_PUBLISH_REASON_INVALID_ARGUMENT = 1, /** @@ -3586,11 +3669,13 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_ENCRYPTED_STREAM_NOT_ALLOWED = 2, /** - * 3: Timeout for the RTMP or RTMPS streaming. Call the `addPublishStreamUrl` method to publish the streaming again. + * 3: Timeout for the RTMP or RTMPS streaming. Call the `addPublishStreamUrl` method to publish + * the streaming again. */ RTMP_STREAM_PUBLISH_REASON_CONNECTION_TIMEOUT = 3, /** - * 4: An error occurs in Agora's streaming server. Call the `addPublishStreamUrl` method to publish the streaming again. + * 4: An error occurs in Agora's streaming server. Call the `addPublishStreamUrl` method to + * publish the streaming again. */ RTMP_STREAM_PUBLISH_REASON_INTERNAL_SERVER_ERROR = 4, /** @@ -3614,17 +3699,23 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_STREAM_NOT_FOUND = 9, /** - * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL format is correct. + * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL + * format is correct. */ RTMP_STREAM_PUBLISH_REASON_FORMAT_NOT_SUPPORTED = 10, /** - * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check your application code logic. + * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check + * your application code logic. */ - RTMP_STREAM_PUBLISH_REASON_NOT_BROADCASTER = 11, // Note: match to ERR_PUBLISH_STREAM_NOT_BROADCASTER in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_NOT_BROADCASTER = + 11, // Note: match to ERR_PUBLISH_STREAM_NOT_BROADCASTER in AgoraBase.h /** - * 13: The `updateRtmpTranscoding` or `setLiveTranscoding` method is called to update the transcoding configuration in a scenario where there is streaming without transcoding. Check your application code logic. + * 13: The `updateRtmpTranscoding` or `setLiveTranscoding` method is called to update the + * transcoding configuration in a scenario where there is streaming without transcoding. Check + * your application code logic. */ - RTMP_STREAM_PUBLISH_REASON_TRANSCODING_NO_MIX_STREAM = 13, // Note: match to ERR_PUBLISH_STREAM_TRANSCODING_NO_MIX_STREAM in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_TRANSCODING_NO_MIX_STREAM = + 13, // Note: match to ERR_PUBLISH_STREAM_TRANSCODING_NO_MIX_STREAM in AgoraBase.h /** * 14: Errors occurred in the host's network. */ @@ -3632,11 +3723,13 @@ enum RTMP_STREAM_PUBLISH_REASON { /** * 15: Your App ID does not have permission to use the CDN live streaming function. */ - RTMP_STREAM_PUBLISH_REASON_INVALID_APPID = 15, // Note: match to ERR_PUBLISH_STREAM_APPID_INVALID in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_INVALID_APPID = + 15, // Note: match to ERR_PUBLISH_STREAM_APPID_INVALID in AgoraBase.h /** invalid privilege. */ RTMP_STREAM_PUBLISH_REASON_INVALID_PRIVILEGE = 16, /** - * 100: The streaming has been stopped normally. After you call `removePublishStreamUrl` to stop streaming, the SDK returns this value. + * 100: The streaming has been stopped normally. After you call `removePublishStreamUrl` to stop + * streaming, the SDK returns this value. */ RTMP_STREAM_UNPUBLISH_REASON_OK = 100, }; @@ -3644,11 +3737,13 @@ enum RTMP_STREAM_PUBLISH_REASON { /** Events during the RTMP or RTMPS streaming. */ enum RTMP_STREAMING_EVENT { /** - * 1: An error occurs when you add a background image or a watermark image to the RTMP or RTMPS stream. + * 1: An error occurs when you add a background image or a watermark image to the RTMP or RTMPS + * stream. */ RTMP_STREAMING_EVENT_FAILED_LOAD_IMAGE = 1, /** - * 2: The streaming URL is already being used for CDN live streaming. If you want to start new streaming, use a new streaming URL. + * 2: The streaming URL is already being used for CDN live streaming. If you want to start new + * streaming, use a new streaming URL. */ RTMP_STREAMING_EVENT_URL_ALREADY_IN_USE = 2, /** @@ -3666,15 +3761,18 @@ enum RTMP_STREAMING_EVENT { */ typedef struct RtcImage { /** - *The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter is 1024 bytes. + *The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter + *is 1024 bytes. */ const char* url; /** - * The x coordinate (pixel) of the image on the video frame (taking the upper left corner of the video frame as the origin). + * The x coordinate (pixel) of the image on the video frame (taking the upper left corner of the + * video frame as the origin). */ int x; /** - * The y coordinate (pixel) of the image on the video frame (taking the upper left corner of the video frame as the origin). + * The y coordinate (pixel) of the image on the video frame (taking the upper left corner of the + * video frame as the origin). */ int y; /** @@ -3705,18 +3803,21 @@ typedef struct RtcImage { /** * The configuration for advanced features of the RTMP or RTMPS streaming with transcoding. * - * If you want to enable the advanced features of streaming with transcoding, contact support@agora.io. + * If you want to enable the advanced features of streaming with transcoding, contact + * support@agora.io. */ struct LiveStreamAdvancedFeature { LiveStreamAdvancedFeature() : featureName(OPTIONAL_NULLPTR), opened(false) {} - LiveStreamAdvancedFeature(const char* feat_name, bool open) : featureName(feat_name), opened(open) {} + LiveStreamAdvancedFeature(const char* feat_name, bool open) + : featureName(feat_name), opened(open) {} /** The advanced feature for high-quality video with a lower bitrate. */ // static const char* LBHQ = "lbhq"; /** The advanced feature for the optimized video encoder. */ // static const char* VEO = "veo"; /** - * The feature names, including LBHQ (high-quality video with a lower bitrate) and VEO (optimized video encoder). + * The feature names, including LBHQ (high-quality video with a lower bitrate) and VEO (optimized + * video encoder). */ const char* featureName; @@ -3726,15 +3827,15 @@ struct LiveStreamAdvancedFeature { * - `false`: (Default) Disable the advanced feature. */ bool opened; -} ; +}; /** * Connection state types. */ -enum CONNECTION_STATE_TYPE -{ +enum CONNECTION_STATE_TYPE { /** - * 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of the following phases: + * 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of + * the following phases: * - The initial state before calling the `joinChannel` method. * - The app calls the `leaveChannel` method. */ @@ -3786,11 +3887,15 @@ struct TranscodingUser { */ uid_t uid; /** - * The x coordinate (pixel) of the host's video on the output video frame (taking the upper left corner of the video frame as the origin). The value range is [0, width], where width is the `width` set in `LiveTranscoding`. + * The x coordinate (pixel) of the host's video on the output video frame (taking the upper left + * corner of the video frame as the origin). The value range is [0, width], where width is the + * `width` set in `LiveTranscoding`. */ int x; /** - * The y coordinate (pixel) of the host's video on the output video frame (taking the upper left corner of the video frame as the origin). The value range is [0, height], where height is the `height` set in `LiveTranscoding`. + * The y coordinate (pixel) of the host's video on the output video frame (taking the upper left + * corner of the video frame as the origin). The value range is [0, height], where height is the + * `height` set in `LiveTranscoding`. */ int y; /** @@ -3807,7 +3912,7 @@ struct TranscodingUser { * - 100: The host's video is the top layer. * * If the value is beyond this range, the SDK reports the error code `ERR_INVALID_ARGUMENT`. - */ + */ int zOrder; /** * The transparency of the host's video. The value range is [0.0, 1.0]. @@ -3816,28 +3921,29 @@ struct TranscodingUser { */ double alpha; /** - * The audio channel used by the host's audio in the output audio. The default value is 0, and the value range is [0, 5]. - * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on the upstream of the host. - * - `1`: The host's audio uses the FL audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `2`: The host's audio uses the FC audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `3`: The host's audio uses the FR audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `4`: The host's audio uses the BL audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `5`: The host's audio uses the BR audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `0xFF` or a value greater than 5: The host's audio is muted, and the Agora server removes the host's audio. + * The audio channel used by the host's audio in the output audio. The default value is 0, and the + * value range is [0, 5]. + * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on + * the upstream of the host. + * - `1`: The host's audio uses the FL audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `2`: The host's audio uses the FC audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `3`: The host's audio uses the FR audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `4`: The host's audio uses the BL audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `5`: The host's audio uses the BR audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `0xFF` or a value greater than 5: The host's audio is muted, and the Agora server removes the + * host's audio. * * @note If the value is not `0`, a special player is required. */ int audioChannel; TranscodingUser() - : uid(0), - x(0), - y(0), - width(0), - height(0), - zOrder(0), - alpha(1.0), - audioChannel(0) {} + : uid(0), x(0), y(0), width(0), height(0), zOrder(0), alpha(1.0), audioChannel(0) {} }; /** @@ -3860,10 +3966,12 @@ struct LiveTranscoding { int height; /** Bitrate of the CDN live output video stream. The default value is 400 Kbps. - Set this parameter according to the Video Bitrate Table. If you set a bitrate beyond the proper range, the SDK automatically adapts it to a value within the range. + Set this parameter according to the Video Bitrate Table. If you set a bitrate beyond the proper + range, the SDK automatically adapts it to a value within the range. */ int videoBitrate; - /** Frame rate of the output video stream set for the CDN live streaming. The default value is 15 fps, and the value range is (0,30]. + /** Frame rate of the output video stream set for the CDN live streaming. The default value is 15 + fps, and the value range is (0,30]. @note The Agora server adjusts any value over 30 to 30. */ @@ -3884,7 +3992,8 @@ struct LiveTranscoding { @note If you set this parameter to other values, Agora adjusts it to the default value of 100. */ VIDEO_CODEC_PROFILE_TYPE videoCodecProfile; - /** The background color in RGB hex value. Value only. Do not include a preceeding #. For example, 0xFFB6C1 (light pink). The default value is 0x000000 (black). + /** The background color in RGB hex value. Value only. Do not include a preceeding #. For example, + * 0xFFB6C1 (light pink). The default value is 0x000000 (black). */ unsigned int backgroundColor; /** Video codec profile types for Media Push. See VIDEO_CODEC_TYPE_FOR_STREAM. */ @@ -3893,10 +4002,12 @@ struct LiveTranscoding { * The value range is [0, 17]. */ unsigned int userCount; - /** Manages the user layout configuration in the Media Push. Agora supports a maximum of 17 transcoding users in a Media Push channel. See `TranscodingUser`. + /** Manages the user layout configuration in the Media Push. Agora supports a maximum of 17 + * transcoding users in a Media Push channel. See `TranscodingUser`. */ TranscodingUser* transcodingUsers; - /** Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream to the CDN live client. Maximum length: 4096 Bytes. + /** Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream + to the CDN live client. Maximum length: 4096 Bytes. For more information on SEI frame, see [SEI-related questions](https://docs.agora.io/en/faq/sei). */ @@ -3907,31 +4018,38 @@ struct LiveTranscoding { const char* metadata; /** The watermark on the live video. The image format needs to be PNG. See `RtcImage`. - You can add one watermark, or add multiple watermarks using an array. This parameter is used with `watermarkCount`. + You can add one watermark, or add multiple watermarks using an array. This parameter is used with + `watermarkCount`. */ RtcImage* watermark; /** - * The number of watermarks on the live video. The total number of watermarks and background images can range from 0 to 10. This parameter is used with `watermark`. + * The number of watermarks on the live video. The total number of watermarks and background + * images can range from 0 to 10. This parameter is used with `watermark`. */ unsigned int watermarkCount; - /** The number of background images on the live video. The image format needs to be PNG. See `RtcImage`. + /** The number of background images on the live video. The image format needs to be PNG. See + * `RtcImage`. * - * You can add a background image or use an array to add multiple background images. This parameter is used with `backgroundImageCount`. + * You can add a background image or use an array to add multiple background images. This + * parameter is used with `backgroundImageCount`. */ RtcImage* backgroundImage; /** - * The number of background images on the live video. The total number of watermarks and background images can range from 0 to 10. This parameter is used with `backgroundImage`. + * The number of background images on the live video. The total number of watermarks and + * background images can range from 0 to 10. This parameter is used with `backgroundImage`. */ unsigned int backgroundImageCount; /** The audio sampling rate (Hz) of the output media stream. See #AUDIO_SAMPLE_RATE_TYPE. */ AUDIO_SAMPLE_RATE_TYPE audioSampleRate; - /** Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the highest value is 128. + /** Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the + * highest value is 128. */ int audioBitrate; - /** The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo) audio channels. Special players are required if you choose 3, 4, or 5. + /** The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo) + * audio channels. Special players are required if you choose 3, 4, or 5. * - 1: (Default) Mono. * - 2: Stereo. * - 3: Three audio channels. @@ -3942,7 +4060,8 @@ struct LiveTranscoding { /** Audio codec profile type for Media Push. See #AUDIO_CODEC_PROFILE_TYPE. */ AUDIO_CODEC_PROFILE_TYPE audioCodecProfile; - /** Advanced features of the RTMP or RTMPS streaming with transcoding. See LiveStreamAdvancedFeature. + /** Advanced features of the RTMP or RTMPS streaming with transcoding. See + * LiveStreamAdvancedFeature. */ LiveStreamAdvancedFeature* advancedFeatures; @@ -3959,7 +4078,7 @@ struct LiveTranscoding { videoCodecProfile(VIDEO_CODEC_PROFILE_HIGH), backgroundColor(0x000000), videoCodecType(VIDEO_CODEC_H264_FOR_STREAM), - userCount(0), + userCount(0), transcodingUsers(OPTIONAL_NULLPTR), transcodingExtraInfo(OPTIONAL_NULLPTR), metadata(OPTIONAL_NULLPTR), @@ -3985,12 +4104,14 @@ struct TranscodingVideoStream { VIDEO_SOURCE_TYPE sourceType; /** * The ID of the remote user. - * @note Use this parameter only when the source type of the video for the video mixing on the local client is `VIDEO_SOURCE_REMOTE`. + * @note Use this parameter only when the source type of the video for the video mixing on the + * local client is `VIDEO_SOURCE_REMOTE`. */ uid_t remoteUserUid; /** * The URL of the image. - * @note Use this parameter only when the source type of the video for the video mixing on the local client is `RTC_IMAGE`. + * @note Use this parameter only when the source type of the video for the video mixing on the + * local client is `RTC_IMAGE`. */ const char* imageUrl; /** @@ -3998,11 +4119,13 @@ struct TranscodingVideoStream { */ int mediaPlayerId; /** - * The horizontal displacement of the top-left corner of the video for the video mixing on the client relative to the top-left corner (origin) of the canvas for this video mixing. + * The horizontal displacement of the top-left corner of the video for the video mixing on the + * client relative to the top-left corner (origin) of the canvas for this video mixing. */ int x; /** - * The vertical displacement of the top-left corner of the video for the video mixing on the client relative to the top-left corner (origin) of the canvas for this video mixing. + * The vertical displacement of the top-left corner of the video for the video mixing on the + * client relative to the top-left corner (origin) of the canvas for this video mixing. */ int y; /** @@ -4014,13 +4137,16 @@ struct TranscodingVideoStream { */ int height; /** - * The number of the layer to which the video for the video mixing on the local client belongs. The value range is [0,100]. + * The number of the layer to which the video for the video mixing on the local client belongs. + * The value range is [0,100]. * - 0: (Default) The layer is at the bottom. * - 100: The layer is at the top. */ int zOrder; /** - * The transparency of the video for the video mixing on the local client. The value range is [0.0,1.0]. 0.0 means the transparency is completely transparent. 1.0 means the transparency is opaque. + * The transparency of the video for the video mixing on the local client. The value range is + * [0.0,1.0]. 0.0 means the transparency is completely transparent. 1.0 means the transparency is + * opaque. */ double alpha; /** @@ -4032,16 +4158,16 @@ struct TranscodingVideoStream { bool mirror; TranscodingVideoStream() - : sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), - remoteUserUid(0), - imageUrl(OPTIONAL_NULLPTR), - x(0), - y(0), - width(0), - height(0), - zOrder(0), - alpha(1.0), - mirror(false) {} + : sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + remoteUserUid(0), + imageUrl(OPTIONAL_NULLPTR), + x(0), + y(0), + width(0), + height(0), + zOrder(0), + alpha(1.0), + mirror(false) {} }; /** @@ -4057,17 +4183,25 @@ struct LocalTranscoderConfiguration { */ TranscodingVideoStream* videoInputStreams; /** - * The encoding configuration of the mixed video stream after the video mixing on the local client. See VideoEncoderConfiguration. + * The encoding configuration of the mixed video stream after the video mixing on the local + * client. See VideoEncoderConfiguration. */ VideoEncoderConfiguration videoOutputConfiguration; /** - * Whether to use the timestamp when the primary camera captures the video frame as the timestamp of the mixed video frame. - * - true: (Default) Use the timestamp of the captured video frame as the timestamp of the mixed video frame. - * - false: Do not use the timestamp of the captured video frame as the timestamp of the mixed video frame. Instead, use the timestamp when the mixed video frame is constructed. + * Whether to use the timestamp when the primary camera captures the video frame as the timestamp + * of the mixed video frame. + * - true: (Default) Use the timestamp of the captured video frame as the timestamp of the mixed + * video frame. + * - false: Do not use the timestamp of the captured video frame as the timestamp of the mixed + * video frame. Instead, use the timestamp when the mixed video frame is constructed. */ bool syncWithPrimaryCamera; - LocalTranscoderConfiguration() : streamCount(0), videoInputStreams(OPTIONAL_NULLPTR), videoOutputConfiguration(), syncWithPrimaryCamera(true) {} + LocalTranscoderConfiguration() + : streamCount(0), + videoInputStreams(OPTIONAL_NULLPTR), + videoOutputConfiguration(), + syncWithPrimaryCamera(true) {} }; enum VIDEO_TRANSCODER_ERROR { @@ -4097,6 +4231,77 @@ enum VIDEO_TRANSCODER_ERROR { VT_ERR_INTERNAL = 20 }; + +/** + * The audio streams for the video mixing on the local client. + */ +struct MixedAudioStream { + /** + * The source type of audio for the audio mixing on the local client. See #AUDIO_SOURCE_TYPE. + */ + AUDIO_SOURCE_TYPE sourceType; + /** + * The ID of the remote user. + * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`. + */ + uid_t remoteUserUid; + /** + * The channel ID of the remote user. + * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`. + */ + const char* channelId; + /** + * The track ID of the local track. + * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`. + */ + track_id_t trackId; + + MixedAudioStream(AUDIO_SOURCE_TYPE source) + : sourceType(source), + remoteUserUid(0), + channelId(NULL), + trackId(-1) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, track_id_t track) + : sourceType(source), + trackId(track) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, uid_t uid, const char* channel) + : sourceType(source), + remoteUserUid(uid), + channelId(channel) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, uid_t uid, const char* channel, track_id_t track) + : sourceType(source), + remoteUserUid(uid), + channelId(channel), + trackId(track) {} + +}; + +/** + * The configuration of the audio mixing on the local client. + */ +struct LocalAudioMixerConfiguration { + /** + * The number of the audio streams for the audio mixing on the local client. + */ + unsigned int streamCount; + /** + * The source of the streams to mixed; + */ + MixedAudioStream* audioInputStreams; + + /** + * Whether to use the timestamp follow the local mic's audio frame. + * - true: (Default) Use the timestamp of the captured audio frame as the timestamp of the mixed audio frame. + * - false: Do not use the timestamp of the captured audio frame as the timestamp of the mixed audio frame. Instead, use the timestamp when the mixed audio frame is constructed. + */ + bool syncWithLocalMic; + + LocalAudioMixerConfiguration() : streamCount(0), syncWithLocalMic(true) {} +}; + /** * Configurations of the last-mile network test. */ @@ -4115,12 +4320,14 @@ struct LastmileProbeConfig { */ bool probeDownlink; /** - * The expected maximum sending bitrate (bps) of the local user. The value range is [100000, 5000000]. We recommend setting this parameter - * according to the bitrate value set by `setVideoEncoderConfiguration`. + * The expected maximum sending bitrate (bps) of the local user. The value range is [100000, + * 5000000]. We recommend setting this parameter according to the bitrate value set by + * `setVideoEncoderConfiguration`. */ unsigned int expectedUplinkBitrate; /** - * The expected maximum receiving bitrate (bps) of the local user. The value range is [100000,5000000]. + * The expected maximum receiving bitrate (bps) of the local user. The value range is + * [100000,5000000]. */ unsigned int expectedDownlinkBitrate; }; @@ -4134,11 +4341,13 @@ enum LASTMILE_PROBE_RESULT_STATE { */ LASTMILE_PROBE_RESULT_COMPLETE = 1, /** - * 2: The last-mile network probe test is incomplete because the bandwidth estimation is not available due to limited test resources. + * 2: The last-mile network probe test is incomplete because the bandwidth estimation is not + * available due to limited test resources. */ LASTMILE_PROBE_RESULT_INCOMPLETE_NO_BWE = 2, /** - * 3: The last-mile network probe test is not carried out, probably due to poor network conditions. + * 3: The last-mile network probe test is not carried out, probably due to poor network + * conditions. */ LASTMILE_PROBE_RESULT_UNAVAILABLE = 3 }; @@ -4160,9 +4369,7 @@ struct LastmileProbeOneWayResult { */ unsigned int availableBandwidth; - LastmileProbeOneWayResult() : packetLossRate(0), - jitter(0), - availableBandwidth(0) {} + LastmileProbeOneWayResult() : packetLossRate(0), jitter(0), availableBandwidth(0) {} }; /** @@ -4186,16 +4393,13 @@ struct LastmileProbeResult { */ unsigned int rtt; - LastmileProbeResult() - : state(LASTMILE_PROBE_RESULT_UNAVAILABLE), - rtt(0) {} + LastmileProbeResult() : state(LASTMILE_PROBE_RESULT_UNAVAILABLE), rtt(0) {} }; /** * Reasons causing the change of the connection state. */ -enum CONNECTION_CHANGED_REASON_TYPE -{ +enum CONNECTION_CHANGED_REASON_TYPE { /** * 0: The SDK is connecting to the server. */ @@ -4209,11 +4413,13 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_INTERRUPTED = 2, /** - * 3: The connection between the SDK and the server is banned by the server. This error occurs when the user is kicked out of the channel by the server. + * 3: The connection between the SDK and the server is banned by the server. This error occurs + * when the user is kicked out of the channel by the server. */ CONNECTION_CHANGED_BANNED_BY_SERVER = 3, /** - * 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20 minutes, this error occurs and the SDK stops reconnecting to the channel. + * 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20 + * minutes, this error occurs and the SDK stops reconnecting to the channel. */ CONNECTION_CHANGED_JOIN_FAILED = 4, /** @@ -4225,13 +4431,17 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_INVALID_APP_ID = 6, /** - * 7: The connection fails because the channel name is not valid. Please rejoin the channel with a valid channel name. + * 7: The connection fails because the channel name is not valid. Please rejoin the channel with a + * valid channel name. */ CONNECTION_CHANGED_INVALID_CHANNEL_NAME = 7, /** * 8: The connection fails because the token is not valid. Typical reasons include: - * - The App Certificate for the project is enabled in Agora Console, but you do not use a token when joining the channel. If you enable the App Certificate, you must use a token to join the channel. - * - The `uid` specified when calling `joinChannel` to join the channel is inconsistent with the `uid` passed in when generating the token. + * - The App Certificate for the project is enabled in Agora Console, but you do not use a token + * when joining the channel. If you enable the App Certificate, you must use a token to join the + * channel. + * - The `uid` specified when calling `joinChannel` to join the channel is inconsistent with the + * `uid` passed in when generating the token. */ CONNECTION_CHANGED_INVALID_TOKEN = 8, /** @@ -4240,8 +4450,10 @@ enum CONNECTION_CHANGED_REASON_TYPE CONNECTION_CHANGED_TOKEN_EXPIRED = 9, /** * 10: The connection is rejected by the server. Typical reasons include: - * - The user is already in the channel and still calls a method, for example, `joinChannel`, to join the channel. Stop calling this method to clear this error. - * - The user tries to join the channel when conducting a pre-call test. The user needs to call the channel after the call test ends. + * - The user is already in the channel and still calls a method, for example, `joinChannel`, to + * join the channel. Stop calling this method to clear this error. + * - The user tries to join the channel when conducting a pre-call test. The user needs to call + * the channel after the call test ends. */ CONNECTION_CHANGED_REJECTED_BY_SERVER = 10, /** @@ -4253,11 +4465,13 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_RENEW_TOKEN = 12, /** - * 13: The IP address of the client has changed, possibly because the network type, IP address, or port has been changed. + * 13: The IP address of the client has changed, possibly because the network type, IP address, or + * port has been changed. */ CONNECTION_CHANGED_CLIENT_IP_ADDRESS_CHANGED = 13, /** - * 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The connection state changes to CONNECTION_STATE_RECONNECTING. + * 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The + * connection state changes to CONNECTION_STATE_RECONNECTING. */ CONNECTION_CHANGED_KEEP_ALIVE_TIMEOUT = 14, /** @@ -4354,11 +4568,13 @@ enum WLACC_SUGGEST_ACTION { */ WLACC_SUGGEST_ACTION_CONNECT_SSID = 1, /** - * The user is advised to check whether the AP supports 5G band and enable 5G band (the aciton link is attached), or purchases an AP that supports 5G. AP does not support 5G band. + * The user is advised to check whether the AP supports 5G band and enable 5G band (the aciton + * link is attached), or purchases an AP that supports 5G. AP does not support 5G band. */ WLACC_SUGGEST_ACTION_CHECK_5G = 2, /** - * The user is advised to change the SSID of the 2.4G or 5G band (the aciton link is attached). The SSID of the 2.4G band AP is the same as that of the 5G band. + * The user is advised to change the SSID of the 2.4G or 5G band (the aciton link is attached). + * The SSID of the 2.4G band AP is the same as that of the 5G band. */ WLACC_SUGGEST_ACTION_MODIFY_SSID = 3, }; @@ -4447,8 +4663,9 @@ struct VideoCanvas { uid_t uid; /** - * The uid of video stream composing the video stream from transcoder which will be drawn on this video canvas. - */ + * The uid of video stream composing the video stream from transcoder which will be drawn on this + * video canvas. + */ uid_t subviewUid; /** * Video display window. @@ -4467,7 +4684,7 @@ struct VideoCanvas { * The video mirror mode. See \ref VIDEO_MIRROR_MODE_TYPE "VIDEO_MIRROR_MODE_TYPE". * The default value is VIDEO_MIRROR_MODE_AUTO. * @note - * - For the mirror mode of the local video view: + * - For the mirror mode of the local video view: * If you use a front camera, the SDK enables the mirror mode by default; * if you use a rear camera, the SDK disables the mirror mode by default. * - For the remote user: The mirror mode is disabled by default. @@ -4484,14 +4701,14 @@ struct VideoCanvas { */ VIDEO_SOURCE_TYPE sourceType; /** - * The media player id of AgoraMediaPlayer. It should set this parameter when the + * The media player id of AgoraMediaPlayer. It should set this parameter when the * sourceType is VIDEO_SOURCE_MEDIA_PLAYER to show the video that AgoraMediaPlayer is playing. * You can get this value by calling the method \ref getMediaPlayerId(). */ int mediaPlayerId; /** - * If you want to display a certain part of a video frame, you can set - * this value to crop the video frame to show. + * If you want to display a certain part of a video frame, you can set + * this value to crop the video frame to show. * The default value is empty(that is, if it has zero width or height), which means no cropping. */ Rectangle cropArea; @@ -4508,62 +4725,115 @@ struct VideoCanvas { media::base::VIDEO_MODULE_POSITION position; VideoCanvas() - : uid(0), subviewUid(0), view(NULL), backgroundColor(0x00000000), renderMode(media::base::RENDER_MODE_HIDDEN), mirrorMode(VIDEO_MIRROR_MODE_AUTO), - setupMode(VIDEO_VIEW_SETUP_REPLACE), sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(0), + subviewUid(0), + view(NULL), + backgroundColor(0x00000000), + renderMode(media::base::RENDER_MODE_HIDDEN), + mirrorMode(VIDEO_MIRROR_MODE_AUTO), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt) - : uid(0), subviewUid(0), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(0), + subviewUid(0), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u) - : uid(u), subviewUid(0), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} - - VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u, uid_t subu) - : uid(u), subviewUid(subu), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(u), + subviewUid(0), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} + + VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u, + uid_t subu) + : uid(u), + subviewUid(subu), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} }; /** Image enhancement options. */ struct BeautyOptions { /** The contrast level. - */ + */ enum LIGHTENING_CONTRAST_LEVEL { - /** Low contrast level. */ - LIGHTENING_CONTRAST_LOW = 0, - /** (Default) Normal contrast level. */ - LIGHTENING_CONTRAST_NORMAL = 1, - /** High contrast level. */ - LIGHTENING_CONTRAST_HIGH = 2, + /** Low contrast level. */ + LIGHTENING_CONTRAST_LOW = 0, + /** (Default) Normal contrast level. */ + LIGHTENING_CONTRAST_NORMAL = 1, + /** High contrast level. */ + LIGHTENING_CONTRAST_HIGH = 2, }; - /** The contrast level, used with the `lighteningLevel` parameter. The larger the value, the greater the contrast between light and dark. See #LIGHTENING_CONTRAST_LEVEL. - */ + /** The contrast level, used with the `lighteningLevel` parameter. The larger the value, the + * greater the contrast between light and dark. See #LIGHTENING_CONTRAST_LEVEL. + */ LIGHTENING_CONTRAST_LEVEL lighteningContrastLevel; - /** The brightness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, the greater the degree of whitening. */ + /** The brightness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. + * The greater the value, the greater the degree of whitening. */ float lighteningLevel; - /** The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, the greater the degree of skin grinding. - */ + /** The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, + * the greater the degree of skin grinding. + */ float smoothnessLevel; - /** The redness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The larger the value, the greater the rosy degree. - */ + /** The redness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The + * larger the value, the greater the rosy degree. + */ float rednessLevel; - /** The sharpness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The larger the value, the greater the sharpening degree. - */ + /** The sharpness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. + * The larger the value, the greater the sharpening degree. + */ float sharpnessLevel; - BeautyOptions(LIGHTENING_CONTRAST_LEVEL contrastLevel, float lightening, float smoothness, float redness, float sharpness) : lighteningContrastLevel(contrastLevel), lighteningLevel(lightening), smoothnessLevel(smoothness), rednessLevel(redness), sharpnessLevel(sharpness) {} + BeautyOptions(LIGHTENING_CONTRAST_LEVEL contrastLevel, float lightening, float smoothness, + float redness, float sharpness) + : lighteningContrastLevel(contrastLevel), + lighteningLevel(lightening), + smoothnessLevel(smoothness), + rednessLevel(redness), + sharpnessLevel(sharpness) {} - BeautyOptions() : lighteningContrastLevel(LIGHTENING_CONTRAST_NORMAL), lighteningLevel(0), smoothnessLevel(0), rednessLevel(0), sharpnessLevel(0) {} + BeautyOptions() + : lighteningContrastLevel(LIGHTENING_CONTRAST_NORMAL), + lighteningLevel(0), + smoothnessLevel(0), + rednessLevel(0), + sharpnessLevel(0) {} }; /** Face shape area options. This structure defines options for facial adjustments on different facial areas. @@ -4645,12 +4915,45 @@ struct FaceShapeBeautyOptions { FaceShapeBeautyOptions() : shapeStyle(FACE_SHAPE_BEAUTY_STYLE_FEMALE), styleIntensity(50) {} }; +/** Filter effect options. This structure defines options for filter effect. + * + * @since v4.4.1 + */ +struct FilterEffectOptions { + /** + * The local absolute path of the custom 3D Cube path. Only cube format is supported. + * The cube file must strictly comply with the Cube LUT Specification; otherwise, the filter effects will not take effect. + * + * The following is an example of the Cube file format. The cube file starts with `LUT_3D_SIZE`, which indicates the cube size. In filter effects, the cube size is limited to 32. + + * LUT_3D_SIZE 32 + * 0.0039215689 0 0.0039215682 + * 0.0086021447 0.0037950677 0 + * 0.0728652592 0.0039215689 0 + * ... + * + * The SDK provides a built-in cube named `built_in_whiten.cube` for whitening. To use this cube, specify the path to `built_in_whiten_filter` + */ + const char * path; + + /** + * The intensity of specified filter effect. The value ranges from 0.0 to 1.0. The default value is 0.5. The greater the value, the stronger the intensity of the filter. + */ + float strength; + + FilterEffectOptions(const char * lut3dPath, float filterStrength) : path(lut3dPath), strength(filterStrength) {} + + FilterEffectOptions() : path(OPTIONAL_NULLPTR), strength(0.5) {} +}; + struct LowlightEnhanceOptions { /** * The low-light enhancement mode. */ enum LOW_LIGHT_ENHANCE_MODE { - /** 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light enhancement feature according to the ambient light to compensate for the lighting level or prevent overexposure, as necessary. */ + /** 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light + enhancement feature according to the ambient light to compensate for the lighting level or + prevent overexposure, as necessary. */ LOW_LIGHT_ENHANCE_AUTO = 0, /** Manual mode. Users need to enable or disable the low-light enhancement feature manually. */ LOW_LIGHT_ENHANCE_MANUAL = 1, @@ -4660,11 +4963,14 @@ struct LowlightEnhanceOptions { */ enum LOW_LIGHT_ENHANCE_LEVEL { /** - * 0: (Default) Promotes video quality during low-light enhancement. It processes the brightness, details, and noise of the video image. The performance consumption is moderate, the processing speed is moderate, and the overall video quality is optimal. + * 0: (Default) Promotes video quality during low-light enhancement. It processes the + * brightness, details, and noise of the video image. The performance consumption is moderate, + * the processing speed is moderate, and the overall video quality is optimal. */ LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY = 0, /** - * Promotes performance during low-light enhancement. It processes the brightness and details of the video image. The processing speed is faster. + * Promotes performance during low-light enhancement. It processes the brightness and details of + * the video image. The processing speed is faster. */ LOW_LIGHT_ENHANCE_LEVEL_FAST = 1, }; @@ -4677,9 +4983,11 @@ struct LowlightEnhanceOptions { */ LOW_LIGHT_ENHANCE_LEVEL level; - LowlightEnhanceOptions(LOW_LIGHT_ENHANCE_MODE lowlightMode, LOW_LIGHT_ENHANCE_LEVEL lowlightLevel) : mode(lowlightMode), level(lowlightLevel) {} + LowlightEnhanceOptions(LOW_LIGHT_ENHANCE_MODE lowlightMode, LOW_LIGHT_ENHANCE_LEVEL lowlightLevel) + : mode(lowlightMode), level(lowlightLevel) {} - LowlightEnhanceOptions() : mode(LOW_LIGHT_ENHANCE_AUTO), level(LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY) {} + LowlightEnhanceOptions() + : mode(LOW_LIGHT_ENHANCE_AUTO), level(LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY) {} }; /** * The video noise reduction options. @@ -4690,7 +4998,8 @@ struct VideoDenoiserOptions { /** The video noise reduction mode. */ enum VIDEO_DENOISER_MODE { - /** 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise reduction feature according to the ambient light. */ + /** 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise + reduction feature according to the ambient light. */ VIDEO_DENOISER_AUTO = 0, /** Manual mode. Users need to enable or disable the video noise reduction feature manually. */ VIDEO_DENOISER_MANUAL = 1, @@ -4700,21 +5009,20 @@ struct VideoDenoiserOptions { */ enum VIDEO_DENOISER_LEVEL { /** - * 0: (Default) Promotes video quality during video noise reduction. `HIGH_QUALITY` balances performance consumption and video noise reduction quality. - * The performance consumption is moderate, the video noise reduction speed is moderate, and the overall video quality is optimal. + * 0: (Default) Promotes video quality during video noise reduction. `HIGH_QUALITY` balances + * performance consumption and video noise reduction quality. The performance consumption is + * moderate, the video noise reduction speed is moderate, and the overall video quality is + * optimal. */ VIDEO_DENOISER_LEVEL_HIGH_QUALITY = 0, /** - * Promotes reducing performance consumption during video noise reduction. `FAST` prioritizes reducing performance consumption over video noise reduction quality. - * The performance consumption is lower, and the video noise reduction speed is faster. To avoid a noticeable shadowing effect (shadows trailing behind moving objects) in the processed video, Agora recommends that you use `FAST` when the camera is fixed. + * Promotes reducing performance consumption during video noise reduction. `FAST` prioritizes + * reducing performance consumption over video noise reduction quality. The performance + * consumption is lower, and the video noise reduction speed is faster. To avoid a noticeable + * shadowing effect (shadows trailing behind moving objects) in the processed video, Agora + * recommends that you use `FAST` when the camera is fixed. */ VIDEO_DENOISER_LEVEL_FAST = 1, - /** - * Enhanced video noise reduction. `STRENGTH` prioritizes video noise reduction quality over reducing performance consumption. - * The performance consumption is higher, the video noise reduction speed is slower, and the video noise reduction quality is better. - * If `HIGH_QUALITY` is not enough for your video noise reduction needs, you can use `STRENGTH`. - */ - VIDEO_DENOISER_LEVEL_STRENGTH = 2, }; /** The video noise reduction mode. See #VIDEO_DENOISER_MODE. */ @@ -4724,7 +5032,8 @@ struct VideoDenoiserOptions { */ VIDEO_DENOISER_LEVEL level; - VideoDenoiserOptions(VIDEO_DENOISER_MODE denoiserMode, VIDEO_DENOISER_LEVEL denoiserLevel) : mode(denoiserMode), level(denoiserLevel) {} + VideoDenoiserOptions(VIDEO_DENOISER_MODE denoiserMode, VIDEO_DENOISER_LEVEL denoiserLevel) + : mode(denoiserMode), level(denoiserLevel) {} VideoDenoiserOptions() : mode(VIDEO_DENOISER_AUTO), level(VIDEO_DENOISER_LEVEL_HIGH_QUALITY) {} }; @@ -4734,17 +5043,24 @@ struct VideoDenoiserOptions { * @since v4.0.0 */ struct ColorEnhanceOptions { - /** The level of color enhancement. The value range is [0.0,1.0]. `0.0` is the default value, which means no color enhancement is applied to the video. The higher the value, the higher the level of color enhancement. + /** The level of color enhancement. The value range is [0.0,1.0]. `0.0` is the default value, + * which means no color enhancement is applied to the video. The higher the value, the higher the + * level of color enhancement. */ float strengthLevel; - /** The level of skin tone protection. The value range is [0.0,1.0]. `0.0` means no skin tone protection. The higher the value, the higher the level of skin tone protection. - * The default value is `1.0`. When the level of color enhancement is higher, the portrait skin tone can be significantly distorted, so you need to set the level of skin tone protection; when the level of skin tone protection is higher, the color enhancement effect can be slightly reduced. - * Therefore, to get the best color enhancement effect, Agora recommends that you adjust `strengthLevel` and `skinProtectLevel` to get the most appropriate values. + /** The level of skin tone protection. The value range is [0.0,1.0]. `0.0` means no skin tone + * protection. The higher the value, the higher the level of skin tone protection. The default + * value is `1.0`. When the level of color enhancement is higher, the portrait skin tone can be + * significantly distorted, so you need to set the level of skin tone protection; when the level + * of skin tone protection is higher, the color enhancement effect can be slightly reduced. + * Therefore, to get the best color enhancement effect, Agora recommends that you adjust + * `strengthLevel` and `skinProtectLevel` to get the most appropriate values. */ float skinProtectLevel; - ColorEnhanceOptions(float stength, float skinProtect) : strengthLevel(stength), skinProtectLevel(skinProtect) {} + ColorEnhanceOptions(float stength, float skinProtect) + : strengthLevel(stength), skinProtectLevel(skinProtect) {} ColorEnhanceOptions() : strengthLevel(0), skinProtectLevel(1) {} }; @@ -4768,12 +5084,12 @@ struct VirtualBackgroundSource { * The background source is a file in PNG or JPG format. */ BACKGROUND_IMG = 2, - /** + /** * The background source is the blurred original video frame. * */ BACKGROUND_BLUR = 3, - /** - * The background source is a file in MP4, AVI, MKV, FLV format. + /** + * The background source is a file in MP4, AVI, MKV, FLV format. * */ BACKGROUND_VIDEO = 4, }; @@ -4781,11 +5097,14 @@ struct VirtualBackgroundSource { /** The degree of blurring applied to the background source. */ enum BACKGROUND_BLUR_DEGREE { - /** 1: The degree of blurring applied to the custom background image is low. The user can almost see the background clearly. */ + /** 1: The degree of blurring applied to the custom background image is low. The user can almost + see the background clearly. */ BLUR_DEGREE_LOW = 1, - /** 2: The degree of blurring applied to the custom background image is medium. It is difficult for the user to recognize details in the background. */ + /** 2: The degree of blurring applied to the custom background image is medium. It is difficult + for the user to recognize details in the background. */ BLUR_DEGREE_MEDIUM = 2, - /** 3: (Default) The degree of blurring applied to the custom background image is high. The user can barely see any distinguishing features in the background. */ + /** 3: (Default) The degree of blurring applied to the custom background image is high. The user + can barely see any distinguishing features in the background. */ BLUR_DEGREE_HIGH = 3, }; @@ -4794,34 +5113,41 @@ struct VirtualBackgroundSource { BACKGROUND_SOURCE_TYPE background_source_type; /** - * The color of the custom background image. The format is a hexadecimal integer defined by RGB, without the # sign, - * such as 0xFFB6C1 for light pink. The default value is 0xFFFFFF, which signifies white. The value range - * is [0x000000,0xFFFFFF]. If the value is invalid, the SDK replaces the original background image with a white - * background image. + * The color of the custom background image. The format is a hexadecimal integer defined by RGB, + * without the # sign, such as 0xFFB6C1 for light pink. The default value is 0xFFFFFF, which + * signifies white. The value range is [0x000000,0xFFFFFF]. If the value is invalid, the SDK + * replaces the original background image with a white background image. * - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_COLOR`. + * @note This parameter takes effect only when the type of the custom background image is + * `BACKGROUND_COLOR`. */ unsigned int color; /** - * The local absolute path of the custom background image. PNG and JPG formats are supported. If the path is invalid, - * the SDK replaces the original background image with a white background image. + * The local absolute path of the custom background image. PNG and JPG formats are supported. If + * the path is invalid, the SDK replaces the original background image with a white background + * image. * - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_IMG`. + * @note This parameter takes effect only when the type of the custom background image is + * `BACKGROUND_IMG`. */ const char* source; /** The degree of blurring applied to the custom background image. See BACKGROUND_BLUR_DEGREE. - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_BLUR`. + * @note This parameter takes effect only when the type of the custom background image is + * `BACKGROUND_BLUR`. */ BACKGROUND_BLUR_DEGREE blur_degree; - VirtualBackgroundSource() : background_source_type(BACKGROUND_COLOR), color(0xffffff), source(OPTIONAL_NULLPTR), blur_degree(BLUR_DEGREE_HIGH) {} + VirtualBackgroundSource() + : background_source_type(BACKGROUND_COLOR), + color(0xffffff), + source(OPTIONAL_NULLPTR), + blur_degree(BLUR_DEGREE_HIGH) {} }; struct SegmentationProperty { - - enum SEG_MODEL_TYPE { + enum SEG_MODEL_TYPE { SEG_MODEL_AI = 1, SEG_MODEL_GREEN = 2 @@ -4831,34 +5157,33 @@ struct SegmentationProperty { float greenCapacity; - - SegmentationProperty() : modelType(SEG_MODEL_AI), greenCapacity(0.5){} + SegmentationProperty() : modelType(SEG_MODEL_AI), greenCapacity(0.5) {} }; /** The type of custom audio track -*/ + */ enum AUDIO_TRACK_TYPE { - /** + /** * -1: Invalid audio track */ AUDIO_TRACK_INVALID = -1, - /** + /** * 0: Mixable audio track - * You can push more than one mixable Audio tracks into one RTC connection(channel id + uid), + * You can push more than one mixable Audio tracks into one RTC connection(channel id + uid), * and SDK will mix these tracks into one audio track automatically. * However, compare to direct audio track, mixable track might cause extra 30ms+ delay. */ AUDIO_TRACK_MIXABLE = 0, /** * 1: Direct audio track - * You can only push one direct (non-mixable) audio track into one RTC connection(channel id + uid). - * Compare to mixable stream, you can have lower lantency using direct audio track. + * You can only push one direct (non-mixable) audio track into one RTC connection(channel id + + * uid). Compare to mixable stream, you can have lower lantency using direct audio track. */ AUDIO_TRACK_DIRECT = 1, }; /** The configuration of custom audio track -*/ + */ struct AudioTrackConfig { /** * Enable local playback, enabled by default @@ -4866,9 +5191,14 @@ struct AudioTrackConfig { * false: Do not enable local playback */ bool enableLocalPlayback; + /** + * Whether to enable APM (AEC/ANS/AGC) processing when the trackType is AUDIO_TRACK_DIRECT. + * false: (Default) Do not enable APM processing. + * true: Enable APM processing. + */ + bool enableAudioProcessing; - AudioTrackConfig() - : enableLocalPlayback(true) {} + AudioTrackConfig() : enableLocalPlayback(true),enableAudioProcessing(false) {} }; /** @@ -4915,11 +5245,12 @@ enum VOICE_BEAUTIFIER_PRESET { CHAT_BEAUTIFIER_VITALITY = 0x01010300, /** * Singing beautifier effect. - * - If you call `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER), you can beautify a male-sounding voice and add a reverberation effect - * that sounds like singing in a small room. Agora recommends not using `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER) to process - * a female-sounding voice; otherwise, you may experience vocal distortion. - * - If you call `setVoiceBeautifierParameters`(SINGING_BEAUTIFIER, param1, param2), you can beautify a male- or - * female-sounding voice and add a reverberation effect. + * - If you call `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER), you can beautify a male-sounding + * voice and add a reverberation effect that sounds like singing in a small room. Agora recommends + * not using `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER) to process a female-sounding voice; + * otherwise, you may experience vocal distortion. + * - If you call `setVoiceBeautifierParameters`(SINGING_BEAUTIFIER, param1, param2), you can + * beautify a male- or female-sounding voice and add a reverberation effect. */ SINGING_BEAUTIFIER = 0x01020100, /** A more vigorous voice. @@ -4949,8 +5280,9 @@ enum VOICE_BEAUTIFIER_PRESET { /** * A ultra-high quality voice, which makes the audio clearer and restores more details. * - To achieve better audio effect quality, Agora recommends that you call `setAudioProfile` - * and set the `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` - * and `scenario` to `AUDIO_SCENARIO_HIGH_DEFINITION(6)` before calling `setVoiceBeautifierPreset`. + * and set the `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or + * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` and `scenario` to + * `AUDIO_SCENARIO_HIGH_DEFINITION(6)` before calling `setVoiceBeautifierPreset`. * - If you have an audio capturing device that can already restore audio details to a high * degree, Agora recommends that you do not enable ultra-high quality; otherwise, the SDK may * over-restore audio details, and you may not hear the anticipated voice effect. @@ -4960,7 +5292,9 @@ enum VOICE_BEAUTIFIER_PRESET { /** Preset voice effects. * - * For better voice effects, Agora recommends setting the `profile` parameter of `setAudioProfile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` before using the following presets: + * For better voice effects, Agora recommends setting the `profile` parameter of `setAudioProfile` + * to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` before using + * the following presets: * * - `ROOM_ACOUSTICS_KTV` * - `ROOM_ACOUSTICS_VOCAL_CONCERT` @@ -5008,8 +5342,8 @@ enum AUDIO_EFFECT_PRESET { */ ROOM_ACOUSTICS_ETHEREAL = 0x02010700, /** A 3D voice effect that makes the voice appear to be moving around the user. The default cycle - * period of the 3D voice effect is 10 seconds. To change the cycle period, call `setAudioEffectParameters` - * after this method. + * period of the 3D voice effect is 10 seconds. To change the cycle period, call + * `setAudioEffectParameters` after this method. * * @note * - Before using this preset, set the `profile` parameter of `setAudioProfile` to @@ -5031,12 +5365,12 @@ enum AUDIO_EFFECT_PRESET { */ ROOM_ACOUSTICS_VIRTUAL_SURROUND_SOUND = 0x02010900, /** The voice effect for chorus. - * + * * @note: To achieve better audio effect quality, Agora recommends calling \ref * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile` parameter to * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before * setting this enumerator. - */ + */ ROOM_ACOUSTICS_CHORUS = 0x02010D00, /** A middle-aged man's voice. * @@ -5047,14 +5381,14 @@ enum AUDIO_EFFECT_PRESET { VOICE_CHANGER_EFFECT_UNCLE = 0x02020100, /** A senior man's voice. * - * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you + * may not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_OLDMAN = 0x02020200, /** A boy's voice. * - * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you + * may not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_BOY = 0x02020300, /** A young woman's voice. @@ -5066,8 +5400,8 @@ enum AUDIO_EFFECT_PRESET { VOICE_CHANGER_EFFECT_SISTER = 0x02020400, /** A girl's voice. * - * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you + * may not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_GIRL = 0x02020500, /** The voice of Pig King, a character in Journey to the West who has a voice like a growling @@ -5092,8 +5426,8 @@ enum AUDIO_EFFECT_PRESET { */ STYLE_TRANSFORMATION_POPULAR = 0x02030200, /** A pitch correction effect that corrects the user's pitch based on the pitch of the natural C - * major scale. After setting this voice effect, you can call `setAudioEffectParameters` to adjust - * the basic mode of tuning and the pitch of the main tone. + * major scale. After setting this voice effect, you can call `setAudioEffectParameters` to adjust + * the basic mode of tuning and the pitch of the main tone. */ PITCH_CORRECTION = 0x02040100, @@ -5108,16 +5442,20 @@ enum VOICE_CONVERSION_PRESET { /** Turn off voice conversion and use the original voice. */ VOICE_CONVERSION_OFF = 0x00000000, - /** A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to process a female-sounding voice. + /** A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to + * process a female-sounding voice. */ VOICE_CHANGER_NEUTRAL = 0x03010100, - /** A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a female-sounding voice. + /** A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a + * female-sounding voice. */ VOICE_CHANGER_SWEET = 0x03010200, - /** A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a male-sounding voice. + /** A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a + * male-sounding voice. */ VOICE_CHANGER_SOLID = 0x03010300, - /** A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a male-sounding voice. + /** A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a + * male-sounding voice. */ VOICE_CHANGER_BASS = 0x03010400, /** A voice like a cartoon character. @@ -5224,9 +5562,9 @@ struct ScreenCaptureParameters { */ VideoDimensions dimensions; /** - * On Windows and macOS, it represents the video encoding frame rate (fps) of the shared screen stream. - * The frame rate (fps) of the shared region. The default value is 5. We do not recommend setting - * this to a value greater than 15. + * On Windows and macOS, it represents the video encoding frame rate (fps) of the shared screen + * stream. The frame rate (fps) of the shared region. The default value is 5. We do not recommend + * setting this to a value greater than 15. */ int frameRate; /** @@ -5241,52 +5579,109 @@ struct ScreenCaptureParameters { */ bool captureMouseCursor; /** - * Whether to bring the window to the front when calling the `startScreenCaptureByWindowId` method to share it: + * Whether to bring the window to the front when calling the `startScreenCaptureByWindowId` method + * to share it: * - `true`: Bring the window to the front. * - `false`: (Default) Do not bring the window to the front. - */ + */ bool windowFocus; /** - * A list of IDs of windows to be blocked. When calling `startScreenCaptureByDisplayId` to start screen sharing, - * you can use this parameter to block a specified window. When calling `updateScreenCaptureParameters` to update - * screen sharing configurations, you can use this parameter to dynamically block the specified windows during - * screen sharing. + * A list of IDs of windows to be blocked. When calling `startScreenCaptureByDisplayId` to start + * screen sharing, you can use this parameter to block a specified window. When calling + * `updateScreenCaptureParameters` to update screen sharing configurations, you can use this + * parameter to dynamically block the specified windows during screen sharing. */ - view_t *excludeWindowList; + view_t* excludeWindowList; /** * The number of windows to be blocked. */ int excludeWindowCount; /** The width (px) of the border. Defaults to 0, and the value range is [0,50]. - * - */ + * + */ int highLightWidth; /** The color of the border in RGBA format. The default value is 0xFF8CBF26. - * - */ + * + */ unsigned int highLightColor; /** Whether to place a border around the shared window or screen: - * - true: Place a border. - * - false: (Default) Do not place a border. - * - * @note When you share a part of a window or screen, the SDK places a border around the entire window or screen if you set `enableHighLight` as true. - * - */ + * - true: Place a border. + * - false: (Default) Do not place a border. + * + * @note When you share a part of a window or screen, the SDK places a border around the entire + * window or screen if you set `enableHighLight` as true. + * + */ bool enableHighLight; ScreenCaptureParameters() - : dimensions(1920, 1080), frameRate(5), bitrate(STANDARD_BITRATE), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : dimensions(1920, 1080), + frameRate(5), + bitrate(STANDARD_BITRATE), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(const VideoDimensions& d, int f, int b) - : dimensions(d), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : dimensions(d), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(int width, int height, int f, int b) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false){} + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(cur), windowFocus(fcs), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} - ScreenCaptureParameters(int width, int height, int f, int b, view_t *ex, int cnt) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(ex), excludeWindowCount(cnt), highLightWidth(0), highLightColor(0), enableHighLight(false) {} - ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs, view_t *ex, int cnt) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(cur), windowFocus(fcs), excludeWindowList(ex), excludeWindowCount(cnt), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(cur), + windowFocus(fcs), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} + ScreenCaptureParameters(int width, int height, int f, int b, view_t* ex, int cnt) + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(ex), + excludeWindowCount(cnt), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} + ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs, view_t* ex, + int cnt) + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(cur), + windowFocus(fcs), + excludeWindowList(ex), + excludeWindowCount(cnt), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} }; /** @@ -5294,15 +5689,18 @@ struct ScreenCaptureParameters { */ enum AUDIO_RECORDING_QUALITY_TYPE { /** - * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes of recording. + * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes + * of recording. */ AUDIO_RECORDING_QUALITY_LOW = 0, /** - * 1: Medium quality. The sample rate is 32 kHz, and the file size is around 2 MB after 10 minutes of recording. + * 1: Medium quality. The sample rate is 32 kHz, and the file size is around 2 MB after 10 minutes + * of recording. */ AUDIO_RECORDING_QUALITY_MEDIUM = 1, /** - * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 minutes of recording. + * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 + * minutes of recording. */ AUDIO_RECORDING_QUALITY_HIGH = 2, /** @@ -5334,16 +5732,16 @@ enum AUDIO_FILE_RECORDING_TYPE { */ enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION { /** - * 1: Only records the audio of the local user. - */ + * 1: Only records the audio of the local user. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD = 1, /** - * 2: Only records the audio of all remote users. - */ + * 2: Only records the audio of all remote users. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK = 2, /** - * 3: Records the mixed audio of the local and all remote users. - */ + * 3: Records the mixed audio of the local and all remote users. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED = 3, }; @@ -5352,7 +5750,8 @@ enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION { */ struct AudioRecordingConfiguration { /** - * The absolute path (including the filename extensions) of the recording file. For example: `C:\music\audio.mp4`. + * The absolute path (including the filename extensions) of the recording file. For example: + * `C:\music\audio.mp4`. * @note Ensure that the directory for the log files exists and is writable. */ const char* filePath; @@ -5368,8 +5767,9 @@ struct AudioRecordingConfiguration { * - (Default) 32000 * - 44100 * - 48000 - * @note If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC files with quality - * to be `AUDIO_RECORDING_QUALITY_MEDIUM` or `AUDIO_RECORDING_QUALITY_HIGH` for better recording quality. + * @note If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC + * files with quality to be `AUDIO_RECORDING_QUALITY_MEDIUM` or `AUDIO_RECORDING_QUALITY_HIGH` for + * better recording quality. */ int sampleRate; /** @@ -5390,131 +5790,142 @@ struct AudioRecordingConfiguration { int recordingChannel; AudioRecordingConfiguration() - : filePath(OPTIONAL_NULLPTR), - encode(false), - sampleRate(32000), - fileRecordingType(AUDIO_FILE_RECORDING_MIXED), - quality(AUDIO_RECORDING_QUALITY_LOW), - recordingChannel(1) {} - - AudioRecordingConfiguration(const char* file_path, int sample_rate, AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) - : filePath(file_path), - encode(false), - sampleRate(sample_rate), - fileRecordingType(AUDIO_FILE_RECORDING_MIXED), - quality(quality_type), - recordingChannel(channel) {} - - AudioRecordingConfiguration(const char* file_path, bool enc, int sample_rate, AUDIO_FILE_RECORDING_TYPE type, AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) - : filePath(file_path), - encode(enc), - sampleRate(sample_rate), - fileRecordingType(type), - quality(quality_type), - recordingChannel(channel) {} - - AudioRecordingConfiguration(const AudioRecordingConfiguration &rhs) - : filePath(rhs.filePath), - encode(rhs.encode), - sampleRate(rhs.sampleRate), - fileRecordingType(rhs.fileRecordingType), - quality(rhs.quality), - recordingChannel(rhs.recordingChannel) {} + : filePath(OPTIONAL_NULLPTR), + encode(false), + sampleRate(32000), + fileRecordingType(AUDIO_FILE_RECORDING_MIXED), + quality(AUDIO_RECORDING_QUALITY_LOW), + recordingChannel(1) {} + + AudioRecordingConfiguration(const char* file_path, int sample_rate, + AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) + : filePath(file_path), + encode(false), + sampleRate(sample_rate), + fileRecordingType(AUDIO_FILE_RECORDING_MIXED), + quality(quality_type), + recordingChannel(channel) {} + + AudioRecordingConfiguration(const char* file_path, bool enc, int sample_rate, + AUDIO_FILE_RECORDING_TYPE type, + AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) + : filePath(file_path), + encode(enc), + sampleRate(sample_rate), + fileRecordingType(type), + quality(quality_type), + recordingChannel(channel) {} + + AudioRecordingConfiguration(const AudioRecordingConfiguration& rhs) + : filePath(rhs.filePath), + encode(rhs.encode), + sampleRate(rhs.sampleRate), + fileRecordingType(rhs.fileRecordingType), + quality(rhs.quality), + recordingChannel(rhs.recordingChannel) {} }; /** * Observer settings for the encoded audio. */ struct AudioEncodedFrameObserverConfig { - /** - * Audio profile. For details, see `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`. - */ - AUDIO_ENCODED_FRAME_OBSERVER_POSITION postionType; - /** - * Audio encoding type. For details, see `AUDIO_ENCODING_TYPE`. - */ - AUDIO_ENCODING_TYPE encodingType; - - AudioEncodedFrameObserverConfig() - : postionType(AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK), - encodingType(AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM){} + /** + * Audio profile. For details, see `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`. + */ + AUDIO_ENCODED_FRAME_OBSERVER_POSITION postionType; + /** + * Audio encoding type. For details, see `AUDIO_ENCODING_TYPE`. + */ + AUDIO_ENCODING_TYPE encodingType; + AudioEncodedFrameObserverConfig() + : postionType(AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK), + encodingType(AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM) {} }; /** * The encoded audio observer. */ class IAudioEncodedFrameObserver { -public: -/** -* Gets the encoded audio data of the local user. -* -* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD`, -* you can get the encoded audio data of the local user from this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onRecordAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + public: + /** + * Gets the encoded audio data of the local user. + * + * After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as + * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD`, you can get the encoded audio data of the local + * user from this callback. + * + * @param frameBuffer The pointer to the audio frame buffer. + * @param length The data length (byte) of the audio frame. + * @param audioEncodedFrameInfo Audio information after encoding. For details, see + * `EncodedAudioFrameInfo`. + */ + virtual void onRecordAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -/** -* Gets the encoded audio data of all remote users. -* -* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK`, -* you can get encoded audio data of all remote users through this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onPlaybackAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + /** + * Gets the encoded audio data of all remote users. + * + * After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as + * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK`, you can get encoded audio data of all remote + * users through this callback. + * + * @param frameBuffer The pointer to the audio frame buffer. + * @param length The data length (byte) of the audio frame. + * @param audioEncodedFrameInfo Audio information after encoding. For details, see + * `EncodedAudioFrameInfo`. + */ + virtual void onPlaybackAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -/** -* Gets the mixed and encoded audio data of the local and all remote users. -* -* After calling `registerAudioEncodedFrameObserver` and setting the audio profile as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED`, -* you can get the mixed and encoded audio data of the local and all remote users through this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onMixedAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + /** + * Gets the mixed and encoded audio data of the local and all remote users. + * + * After calling `registerAudioEncodedFrameObserver` and setting the audio profile as + * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED`, you can get the mixed and encoded audio data of + * the local and all remote users through this callback. + * + * @param frameBuffer The pointer to the audio frame buffer. + * @param length The data length (byte) of the audio frame. + * @param audioEncodedFrameInfo Audio information after encoding. For details, see + * `EncodedAudioFrameInfo`. + */ + virtual void onMixedAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -virtual ~IAudioEncodedFrameObserver () {} + virtual ~IAudioEncodedFrameObserver() {} }; /** The region for connection, which is the region where the server the SDK connects to is located. */ enum AREA_CODE { - /** - * Mainland China. - */ - AREA_CODE_CN = 0x00000001, - /** - * North America. - */ - AREA_CODE_NA = 0x00000002, - /** - * Europe. - */ - AREA_CODE_EU = 0x00000004, - /** - * Asia, excluding Mainland China. - */ - AREA_CODE_AS = 0x00000008, - /** - * Japan. - */ - AREA_CODE_JP = 0x00000010, - /** - * India. - */ - AREA_CODE_IN = 0x00000020, - /** - * (Default) Global. - */ - AREA_CODE_GLOB = (0xFFFFFFFF) + /** + * Mainland China. + */ + AREA_CODE_CN = 0x00000001, + /** + * North America. + */ + AREA_CODE_NA = 0x00000002, + /** + * Europe. + */ + AREA_CODE_EU = 0x00000004, + /** + * Asia, excluding Mainland China. + */ + AREA_CODE_AS = 0x00000008, + /** + * Japan. + */ + AREA_CODE_JP = 0x00000010, + /** + * India. + */ + AREA_CODE_IN = 0x00000020, + /** + * (Default) Global. + */ + AREA_CODE_GLOB = (0xFFFFFFFF) }; /** @@ -5568,8 +5979,9 @@ enum CHANNEL_MEDIA_RELAY_ERROR { RELAY_ERROR_SERVER_ERROR_RESPONSE = 1, /** 2: No server response. You can call the `leaveChannel` method to leave the channel. * - * This error can also occur if your project has not enabled co-host token authentication. You can contact technical - * support to enable the service for cohosting across channels before starting a channel media relay. + * This error can also occur if your project has not enabled co-host token authentication. You can + * contact technical support to enable the service for cohosting across channels before starting a + * channel media relay. */ RELAY_ERROR_SERVER_NO_RESPONSE = 2, /** 3: The SDK fails to access the service, probably due to limited resources of the server. @@ -5587,8 +5999,8 @@ enum CHANNEL_MEDIA_RELAY_ERROR { /** 7: The server fails to send the media stream. */ RELAY_ERROR_FAILED_PACKET_SENT_TO_DEST = 7, - /** 8: The SDK disconnects from the server due to poor network connections. You can call the `leaveChannel` method to - * leave the channel. + /** 8: The SDK disconnects from the server due to poor network connections. You can call the + * `leaveChannel` method to leave the channel. */ RELAY_ERROR_SERVER_CONNECTION_LOST = 8, /** 9: An internal error occurs in the server. @@ -5606,8 +6018,8 @@ enum CHANNEL_MEDIA_RELAY_ERROR { * The state code of the channel media relay. */ enum CHANNEL_MEDIA_RELAY_STATE { - /** 0: The initial state. After you successfully stop the channel media relay by calling `stopChannelMediaRelay`, - * the `onChannelMediaRelayStateChanged` callback returns this state. + /** 0: The initial state. After you successfully stop the channel media relay by calling + * `stopChannelMediaRelay`, the `onChannelMediaRelayStateChanged` callback returns this state. */ RELAY_STATE_IDLE = 0, /** 1: The SDK tries to relay the media stream to the destination channel. @@ -5625,15 +6037,15 @@ enum CHANNEL_MEDIA_RELAY_STATE { */ struct ChannelMediaInfo { /** The user ID. - */ + */ uid_t uid; /** The channel name. The default value is NULL, which means that the SDK - * applies the current channel name. - */ + * applies the current channel name. + */ const char* channelName; /** The token that enables the user to join the channel. The default value - * is NULL, which means that the SDK applies the current token. - */ + * is NULL, which means that the SDK applies the current token. + */ const char* token; ChannelMediaInfo() : uid(0), channelName(NULL), token(NULL) {} @@ -5644,31 +6056,32 @@ struct ChannelMediaInfo { */ struct ChannelMediaRelayConfiguration { /** The information of the source channel `ChannelMediaInfo`. It contains the following members: - * - `channelName`: The name of the source channel. The default value is `NULL`, which means the SDK applies the name - * of the current channel. - * - `uid`: The unique ID to identify the relay stream in the source channel. The default value is 0, which means the - * SDK generates a random UID. You must set it as 0. - * - `token`: The token for joining the source channel. It is generated with the `channelName` and `uid` you set in - * `srcInfo`. - * - If you have not enabled the App Certificate, set this parameter as the default value `NULL`, which means the - * SDK applies the App ID. - * - If you have enabled the App Certificate, you must use the token generated with the `channelName` and `uid`, and - * the `uid` must be set as 0. + * - `channelName`: The name of the source channel. The default value is `NULL`, which means the + * SDK applies the name of the current channel. + * - `uid`: The unique ID to identify the relay stream in the source channel. The default value is + * 0, which means the SDK generates a random UID. You must set it as 0. + * - `token`: The token for joining the source channel. It is generated with the `channelName` and + * `uid` you set in `srcInfo`. + * - If you have not enabled the App Certificate, set this parameter as the default value + * `NULL`, which means the SDK applies the App ID. + * - If you have enabled the App Certificate, you must use the token generated with the + * `channelName` and `uid`, and the `uid` must be set as 0. */ ChannelMediaInfo* srcInfo; - /** The information of the destination channel `ChannelMediaInfo`. It contains the following members: + /** The information of the destination channel `ChannelMediaInfo`. It contains the following + * members: * - `channelName`: The name of the destination channel. * - `uid`: The unique ID to identify the relay stream in the destination channel. The value * ranges from 0 to (2^32-1). To avoid UID conflicts, this `UID` must be different from any * other `UID` in the destination channel. The default value is 0, which means the SDK generates * a random `UID`. Do not set this parameter as the `UID` of the host in the destination channel, * and ensure that this `UID` is different from any other `UID` in the channel. - * - `token`: The token for joining the destination channel. It is generated with the `channelName` - * and `uid` you set in `destInfos`. + * - `token`: The token for joining the destination channel. It is generated with the + * `channelName` and `uid` you set in `destInfos`. * - If you have not enabled the App Certificate, set this parameter as the default value NULL, * which means the SDK applies the App ID. - * If you have enabled the App Certificate, you must use the token generated with the `channelName` - * and `uid`. + * If you have enabled the App Certificate, you must use the token generated with the + * `channelName` and `uid`. */ ChannelMediaInfo* destInfos; /** The number of destination channels. The default value is 0, and the value range is from 0 to @@ -5677,7 +6090,8 @@ struct ChannelMediaRelayConfiguration { */ int destCount; - ChannelMediaRelayConfiguration() : srcInfo(OPTIONAL_NULLPTR), destInfos(OPTIONAL_NULLPTR), destCount(0) {} + ChannelMediaRelayConfiguration() + : srcInfo(OPTIONAL_NULLPTR), destInfos(OPTIONAL_NULLPTR), destCount(0) {} }; /** @@ -5722,11 +6136,11 @@ struct DownlinkNetworkInfo { expected_bitrate_bps(-1) {} PeerDownlinkInfo(const PeerDownlinkInfo& rhs) - : stream_type(rhs.stream_type), + : stream_type(rhs.stream_type), current_downscale_level(rhs.current_downscale_level), expected_bitrate_bps(rhs.expected_bitrate_bps) { if (rhs.userId != OPTIONAL_NULLPTR) { - const int len = std::strlen(rhs.userId); + const size_t len = std::strlen(rhs.userId); char* buf = new char[len + 1]; std::memcpy(buf, rhs.userId, len); buf[len] = '\0'; @@ -5741,7 +6155,7 @@ struct DownlinkNetworkInfo { current_downscale_level = rhs.current_downscale_level; expected_bitrate_bps = rhs.expected_bitrate_bps; if (rhs.userId != OPTIONAL_NULLPTR) { - const int len = std::strlen(rhs.userId); + const size_t len = std::strlen(rhs.userId); char* buf = new char[len + 1]; std::memcpy(buf, rhs.userId, len); buf[len] = '\0'; @@ -5775,18 +6189,18 @@ struct DownlinkNetworkInfo { int total_received_video_count; DownlinkNetworkInfo() - : lastmile_buffer_delay_time_ms(-1), - bandwidth_estimation_bps(-1), - total_downscale_level_count(-1), - peer_downlink_info(OPTIONAL_NULLPTR), - total_received_video_count(-1) {} + : lastmile_buffer_delay_time_ms(-1), + bandwidth_estimation_bps(-1), + total_downscale_level_count(-1), + peer_downlink_info(OPTIONAL_NULLPTR), + total_received_video_count(-1) {} DownlinkNetworkInfo(const DownlinkNetworkInfo& info) - : lastmile_buffer_delay_time_ms(info.lastmile_buffer_delay_time_ms), - bandwidth_estimation_bps(info.bandwidth_estimation_bps), - total_downscale_level_count(info.total_downscale_level_count), - peer_downlink_info(OPTIONAL_NULLPTR), - total_received_video_count(info.total_received_video_count) { + : lastmile_buffer_delay_time_ms(info.lastmile_buffer_delay_time_ms), + bandwidth_estimation_bps(info.bandwidth_estimation_bps), + total_downscale_level_count(info.total_downscale_level_count), + peer_downlink_info(OPTIONAL_NULLPTR), + total_received_video_count(info.total_received_video_count) { if (total_received_video_count <= 0) return; peer_downlink_info = new PeerDownlinkInfo[total_received_video_count]; for (int i = 0; i < total_received_video_count; ++i) @@ -5840,7 +6254,8 @@ enum ENCRYPTION_MODE { * salt (`encryptionKdfSalt`). */ AES_128_GCM2 = 7, - /** 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt (`encryptionKdfSalt`). + /** 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt + * (`encryptionKdfSalt`). */ AES_256_GCM2 = 8, /** Enumerator boundary. @@ -5858,30 +6273,31 @@ struct EncryptionConfig { /** * Encryption key in string type with unlimited length. Agora recommends using a 32-byte key. * - * @note If you do not set an encryption key or set it as NULL, you cannot use the built-in encryption, and the SDK returns #ERR_INVALID_ARGUMENT (-2). + * @note If you do not set an encryption key or set it as NULL, you cannot use the built-in + * encryption, and the SDK returns #ERR_INVALID_ARGUMENT (-2). */ const char* encryptionKey; /** - * Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server side. + * Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server + * side. * * @note This parameter takes effect only in `AES_128_GCM2` or `AES_256_GCM2` encrypted mode. * In this case, ensure that this parameter is not 0. */ uint8_t encryptionKdfSalt[32]; - + bool datastreamEncryptionEnabled; EncryptionConfig() - : encryptionMode(AES_128_GCM2), - encryptionKey(OPTIONAL_NULLPTR), - datastreamEncryptionEnabled(false) - { + : encryptionMode(AES_128_GCM2), + encryptionKey(OPTIONAL_NULLPTR), + datastreamEncryptionEnabled(false) { memset(encryptionKdfSalt, 0, sizeof(encryptionKdfSalt)); } /// @cond const char* getEncryptionString() const { - switch(encryptionMode) { + switch (encryptionMode) { case AES_128_XTS: return "aes-128-xts"; case AES_128_ECB: @@ -5909,30 +6325,31 @@ struct EncryptionConfig { /** Encryption error type. */ enum ENCRYPTION_ERROR_TYPE { - /** - * 0: Internal reason. - */ - ENCRYPTION_ERROR_INTERNAL_FAILURE = 0, - /** - * 1: MediaStream decryption errors. Ensure that the receiver and the sender use the same encryption mode and key. - */ - ENCRYPTION_ERROR_DECRYPTION_FAILURE = 1, - /** - * 2: MediaStream encryption errors. - */ - ENCRYPTION_ERROR_ENCRYPTION_FAILURE = 2, - /** - * 3: DataStream decryption errors. Ensure that the receiver and the sender use the same encryption mode and key. - */ - ENCRYPTION_ERROR_DATASTREAM_DECRYPTION_FAILURE = 3, - /** - * 4: DataStream encryption errors. - */ - ENCRYPTION_ERROR_DATASTREAM_ENCRYPTION_FAILURE = 4, + /** + * 0: Internal reason. + */ + ENCRYPTION_ERROR_INTERNAL_FAILURE = 0, + /** + * 1: MediaStream decryption errors. Ensure that the receiver and the sender use the same + * encryption mode and key. + */ + ENCRYPTION_ERROR_DECRYPTION_FAILURE = 1, + /** + * 2: MediaStream encryption errors. + */ + ENCRYPTION_ERROR_ENCRYPTION_FAILURE = 2, + /** + * 3: DataStream decryption errors. Ensure that the receiver and the sender use the same + * encryption mode and key. + */ + ENCRYPTION_ERROR_DATASTREAM_DECRYPTION_FAILURE = 3, + /** + * 4: DataStream encryption errors. + */ + ENCRYPTION_ERROR_DATASTREAM_ENCRYPTION_FAILURE = 4, }; -enum UPLOAD_ERROR_REASON -{ +enum UPLOAD_ERROR_REASON { UPLOAD_SUCCESS = 0, UPLOAD_NET_ERROR = 1, UPLOAD_SERVER_ERROR = 2, @@ -5967,7 +6384,8 @@ enum STREAM_SUBSCRIBE_STATE { * - Calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop sending local * media stream. * - Calls `disableAudio` or `disableVideo `to disable the local audio or video module. - * - Calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or video capture. + * - Calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or + * video capture. * - The role of the remote user is audience. * - The local user calls the following methods to stop receiving remote streams: * - Calls `muteRemoteAudioStream(true)`, `muteAllRemoteAudioStreams(true)` to stop receiving the remote audio streams. @@ -5994,9 +6412,12 @@ enum STREAM_PUBLISH_STATE { PUB_STATE_IDLE = 0, /** * 1: Fails to publish the local stream. Possible reasons: - * - The local user calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop sending the local media stream. - * - The local user calls `disableAudio` or `disableVideo` to disable the local audio or video module. - * - The local user calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or video capture. + * - The local user calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop + * sending the local media stream. + * - The local user calls `disableAudio` or `disableVideo` to disable the local audio or video + * module. + * - The local user calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the + * local audio or video capture. * - The role of the local user is audience. */ PUB_STATE_NO_PUBLISHED = 1, @@ -6022,10 +6443,15 @@ struct EchoTestConfiguration { int intervalInSeconds; EchoTestConfiguration(view_t v, bool ea, bool ev, const char* t, const char* c, const int is) - : view(v), enableAudio(ea), enableVideo(ev), token(t), channelId(c), intervalInSeconds(is) {} + : view(v), enableAudio(ea), enableVideo(ev), token(t), channelId(c), intervalInSeconds(is) {} EchoTestConfiguration() - : view(OPTIONAL_NULLPTR), enableAudio(true), enableVideo(true), token(OPTIONAL_NULLPTR), channelId(OPTIONAL_NULLPTR), intervalInSeconds(2) {} + : view(OPTIONAL_NULLPTR), + enableAudio(true), + enableVideo(true), + token(OPTIONAL_NULLPTR), + channelId(OPTIONAL_NULLPTR), + intervalInSeconds(2) {} }; /** @@ -6041,9 +6467,7 @@ struct UserInfo { */ char userAccount[MAX_USER_ACCOUNT_LENGTH]; - UserInfo() : uid(0) { - userAccount[0] = '\0'; - } + UserInfo() : uid(0) { userAccount[0] = '\0'; } }; /** @@ -6053,21 +6477,22 @@ enum EAR_MONITORING_FILTER_TYPE { /** * 1: Do not add an audio filter to the in-ear monitor. */ - EAR_MONITORING_FILTER_NONE = (1<<0), + EAR_MONITORING_FILTER_NONE = (1 << 0), /** * 2: Enable audio filters to the in-ear monitor. If you implement functions such as voice * beautifier and audio effect, users can hear the voice after adding these effects. */ - EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS = (1<<1), + EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS = (1 << 1), /** * 4: Enable noise suppression to the in-ear monitor. */ - EAR_MONITORING_FILTER_NOISE_SUPPRESSION = (1<<2), + EAR_MONITORING_FILTER_NOISE_SUPPRESSION = (1 << 2), /** * 32768: Enable audio filters by reuse post-processing filter to the in-ear monitor. - * This bit is intended to be used in exclusive mode, which means, if this bit is set, all other bits will be disregarded. + * This bit is intended to be used in exclusive mode, which means, if this bit is set, all other + * bits will be disregarded. */ - EAR_MONITORING_FILTER_REUSE_POST_PROCESSING_FILTER = (1<<15), + EAR_MONITORING_FILTER_REUSE_POST_PROCESSING_FILTER = (1 << 15), }; /** @@ -6139,7 +6564,7 @@ struct ScreenVideoParameters { * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles). */ int frameRate = 15; - /** + /** * The video encoding bitrate (Kbps). For recommended values, see [Recommended video * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles). */ @@ -6230,7 +6655,7 @@ struct VideoRenderingTracingInfo { int elapsedTime; /** * Elapsed time from the start tracing time to the time when join channel. - * + * * **Note** * If the start tracing time is behind the time when join channel, this value will be negative. */ @@ -6241,7 +6666,7 @@ struct VideoRenderingTracingInfo { int join2JoinSuccess; /** * Elapsed time from finishing joining channel to remote user joined. - * + * * **Note** * If the start tracing time is after the time finishing join channel, this value will be * the elapsed time from the start tracing time to remote user joined. The minimum value is 0. @@ -6249,7 +6674,7 @@ struct VideoRenderingTracingInfo { int joinSuccess2RemoteJoined; /** * Elapsed time from remote user joined to set the view. - * + * * **Note** * If the start tracing time is after the time when remote user joined, this value will be * the elapsed time from the start tracing time to set the view. The minimum value is 0. @@ -6257,7 +6682,7 @@ struct VideoRenderingTracingInfo { int remoteJoined2SetView; /** * Elapsed time from remote user joined to the time subscribing remote video stream. - * + * * **Note** * If the start tracing time is after the time when remote user joined, this value will be * the elapsed time from the start tracing time to the time subscribing remote video stream. @@ -6266,7 +6691,7 @@ struct VideoRenderingTracingInfo { int remoteJoined2UnmuteVideo; /** * Elapsed time from remote user joined to the remote video packet received. - * + * * **Note** * If the start tracing time is after the time when remote user joined, this value will be * the elapsed time from the start tracing time to the time subscribing remote video stream. @@ -6286,7 +6711,6 @@ enum CONFIG_FETCH_TYPE { CONFIG_FETCH_TYPE_JOIN_CHANNEL = 2, }; - /** The local proxy mode type. */ enum LOCAL_PROXY_MODE { /** 0: Connect local proxy with high priority, if not connected to local proxy, fallback to sdrtn. @@ -6315,7 +6739,8 @@ struct LogUploadServerInfo { LogUploadServerInfo() : serverDomain(NULL), serverPath(NULL), serverPort(0), serverHttps(true) {} - LogUploadServerInfo(const char* domain, const char* path, int port, bool https) : serverDomain(domain), serverPath(path), serverPort(port), serverHttps(https) {} + LogUploadServerInfo(const char* domain, const char* path, int port, bool https) + : serverDomain(domain), serverPath(path), serverPort(port), serverHttps(https) {} }; struct AdvancedConfigInfo { @@ -6337,8 +6762,9 @@ struct LocalAccessPointConfiguration { /** The number of local access point domain. */ int domainListSize; - /** Certificate domain name installed on specific local access point. pass "" means using sni domain on specific local access point - * SNI(Server Name Indication) is an extension to the TLS protocol. + /** Certificate domain name installed on specific local access point. pass "" means using sni + * domain on specific local access point SNI(Server Name Indication) is an extension to the TLS + * protocol. */ const char* verifyDomainName; /** Local proxy connection mode, connectivity first or local only. @@ -6353,23 +6779,42 @@ struct LocalAccessPointConfiguration { - false: not disable vos-aut */ bool disableAut; - LocalAccessPointConfiguration() : ipList(NULL), ipListSize(0), domainList(NULL), domainListSize(0), verifyDomainName(NULL), mode(ConnectivityFirst), disableAut(true) {} + LocalAccessPointConfiguration() + : ipList(NULL), + ipListSize(0), + domainList(NULL), + domainListSize(0), + verifyDomainName(NULL), + mode(ConnectivityFirst), + disableAut(true) {} +}; + +enum RecorderStreamType { + RTC, + PREVIEW, }; /** * The information about recorded media streams. */ struct RecorderStreamInfo { - const char* channelId; - /** - * The user ID. - */ - uid_t uid; - /** - * The channel ID of the audio/video stream needs to be recorded. - */ - RecorderStreamInfo() : channelId(NULL), uid(0) {} - RecorderStreamInfo(const char* channelId, uid_t uid) : channelId(channelId), uid(uid) {} + /** + * The channel ID of the audio/video stream needs to be recorded. + */ + const char* channelId; + /** + * The user ID. + */ + uid_t uid; + /** + * The Recoder Stream type. + */ + RecorderStreamType type; + RecorderStreamInfo() : channelId(NULL), uid(0), type(RTC) {} + RecorderStreamInfo(const char* channelId, uid_t uid) + : channelId(channelId), uid(uid), type(RTC) {} + RecorderStreamInfo(const char* channelId, uid_t uid, RecorderStreamType type) + : channelId(channelId), uid(uid), type(type) {} }; } // namespace rtc @@ -6396,12 +6841,12 @@ class AParameter : public agora::util::AutoPtr { }; class LicenseCallback { - public: - virtual ~LicenseCallback() {} - virtual void onCertificateRequired() = 0; - virtual void onLicenseRequest() = 0; - virtual void onLicenseValidated() = 0; - virtual void onLicenseError(int result) = 0; + public: + virtual ~LicenseCallback() {} + virtual void onCertificateRequired() = 0; + virtual void onLicenseRequest() = 0; + virtual void onLicenseValidated() = 0; + virtual void onLicenseError(int result) = 0; }; } // namespace base @@ -6445,44 +6890,51 @@ struct SpatialAudioParams { }; /** * Layout info of video stream which compose a transcoder video stream. -*/ -struct VideoLayout -{ + */ +struct VideoLayout { /** * Channel Id from which this video stream come from. - */ + */ const char* channelId; /** * User id of video stream. - */ + */ rtc::uid_t uid; /** * User account of video stream. - */ + */ user_id_t strUid; /** * x coordinate of video stream on a transcoded video stream canvas. - */ + */ uint32_t x; /** * y coordinate of video stream on a transcoded video stream canvas. - */ + */ uint32_t y; /** * width of video stream on a transcoded video stream canvas. - */ + */ uint32_t width; /** * height of video stream on a transcoded video stream canvas. - */ + */ uint32_t height; /** * video state of video stream on a transcoded video stream canvas. * 0 for normal video , 1 for placeholder image showed , 2 for black image. - */ - uint32_t videoState; + */ + uint32_t videoState; - VideoLayout() : channelId(OPTIONAL_NULLPTR), uid(0), strUid(OPTIONAL_NULLPTR), x(0), y(0), width(0), height(0), videoState(0) {} + VideoLayout() + : channelId(OPTIONAL_NULLPTR), + uid(0), + strUid(OPTIONAL_NULLPTR), + x(0), + y(0), + width(0), + height(0), + videoState(0) {} }; } // namespace agora @@ -6509,7 +6961,7 @@ AGORA_API int AGORA_CALL setAgoraSdkExternalSymbolLoader(void* (*func)(const cha * @note For license only, everytime will generate a different credential. * So, just need to call once for a device, and then save the credential */ -AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString &credential); +AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString& credential); /** * Verify given certificate and return the result @@ -6524,8 +6976,10 @@ AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString &credential) * @return The description of the error code. * @note For license only. */ -AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char *credential_buf, int credential_len, - const char *certificate_buf, int certificate_len); +AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char* credential_buf, + int credential_len, + const char* certificate_buf, + int certificate_len); /** * @brief Implement the agora::base::LicenseCallback, @@ -6534,7 +6988,7 @@ AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char *credential_ * @param [in] callback The object of agora::LiceseCallback, * set the callback to null before delete it. */ -AGORA_API void setAgoraLicenseCallback(agora::base::LicenseCallback *callback); +AGORA_API void setAgoraLicenseCallback(agora::base::LicenseCallback* callback); /** * @brief Get the LicenseCallback pointer if already setup, @@ -6550,18 +7004,15 @@ AGORA_API agora::base::LicenseCallback* getAgoraLicenseCallback(); * typical scenario is as follows: * * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - * | // custom audio/video base capture time, e.g. the first audio/video capture time. | - * | int64_t custom_capture_time_base; | - * | | - * | int64_t agora_monotonic_time = getAgoraCurrentMonotonicTimeInMs(); | - * | | - * | // offset is fixed once calculated in the begining. | - * | const int64_t offset = agora_monotonic_time - custom_capture_time_base; | - * | | - * | // realtime_custom_audio/video_capture_time is the origin capture time that customer provided.| - * | // actual_audio/video_capture_time is the actual capture time transfered to sdk. | - * | int64_t actual_audio_capture_time = realtime_custom_audio_capture_time + offset; | - * | int64_t actual_video_capture_time = realtime_custom_video_capture_time + offset; | + * | // custom audio/video base capture time, e.g. the first audio/video capture time. | | int64_t + * custom_capture_time_base; | | | | + * int64_t agora_monotonic_time = getAgoraCurrentMonotonicTimeInMs(); | + * | | | // offset is fixed once calculated in the begining. | | const int64_t offset = + * agora_monotonic_time - custom_capture_time_base; | | | | // + * realtime_custom_audio/video_capture_time is the origin capture time that customer provided.| | // + * actual_audio/video_capture_time is the actual capture time transfered to sdk. | | + * int64_t actual_audio_capture_time = realtime_custom_audio_capture_time + offset; | + * | int64_t actual_video_capture_time = realtime_custom_video_capture_time + offset; | * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ * * @return diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaBase.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaBase.h index 8120acb3f..6e7d45357 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaBase.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/AgoraMediaBase.h @@ -63,8 +63,8 @@ struct ExtensionContext { /** -* Video source types definition. -**/ + * Video source types definition. + **/ enum VIDEO_SOURCE_TYPE { /** Video captured by the camera. */ @@ -115,17 +115,45 @@ enum VIDEO_SOURCE_TYPE { */ VIDEO_SOURCE_SCREEN_FOURTH = 14, /** Video for voice drive. - */ - VIDEO_SOURCE_SPEECH_DRIVEN = 15, + */ + VIDEO_SOURCE_SPEECH_DRIVEN = 15, VIDEO_SOURCE_UNKNOWN = 100 }; +/** +* Audio source types definition. +**/ +enum AUDIO_SOURCE_TYPE { + /** Audio captured by the mic. + */ + AUDIO_SOURCE_MICROPHONE = 0, + /** Not define. + */ + AUDIO_SOURCE_CUSTOM = 1, + /** Audio for media player sharing. + */ + AUDIO_SOURCE_MEDIA_PLAYER = 2, + /** Audio for screen audio. + */ + AUDIO_SOURCE_LOOPBACK_RECORDING = 3, + /** Audio captured by mixed source. + */ + AUDIO_SOURCE_MIXED_STREAM = 4, + /** Remote audio received from network. + */ + AUDIO_SOURCE_REMOTE_USER = 5, + /** Remote audio received from network by channel. + */ + AUDIO_SOURCE_REMOTE_CHANNEL = 6, + + AUDIO_SOURCE_UNKNOWN = 100 +}; + /** * Audio routes. */ -enum AudioRoute -{ +enum AudioRoute { /** * -1: The default audio route. */ @@ -191,23 +219,21 @@ struct AudioParameters { size_t channels; size_t frames_per_buffer; - AudioParameters() - : sample_rate(0), - channels(0), - frames_per_buffer(0) {} + AudioParameters() : sample_rate(0), channels(0), frames_per_buffer(0) {} }; /** * The use mode of the audio data. */ enum RAW_AUDIO_FRAME_OP_MODE_TYPE { - /** 0: Read-only mode: Users only read the data from `AudioFrame` without modifying anything. + /** 0: Read-only mode: Users only read the data from `AudioFrame` without modifying anything. * For example, when users acquire the data with the Agora SDK, then start the media push. */ RAW_AUDIO_FRAME_OP_MODE_READ_ONLY = 0, - /** 2: Read and write mode: Users read the data from `AudioFrame`, modify it, and then play it. - * For example, when users have their own audio-effect processing module and perform some voice pre-processing, such as a voice change. + /** 2: Read and write mode: Users read the data from `AudioFrame`, modify it, and then play it. + * For example, when users have their own audio-effect processing module and perform some voice + * pre-processing, such as a voice change. */ RAW_AUDIO_FRAME_OP_MODE_READ_WRITE = 2, }; @@ -215,7 +241,7 @@ enum RAW_AUDIO_FRAME_OP_MODE_TYPE { } // namespace rtc namespace media { - /** +/** * The type of media device. */ enum MEDIA_SOURCE_TYPE { @@ -290,23 +316,23 @@ enum CONTENT_INSPECT_RESULT { }; enum CONTENT_INSPECT_TYPE { -/** - * (Default) content inspect type invalid - */ -CONTENT_INSPECT_INVALID = 0, -/** - * @deprecated - * Content inspect type moderation - */ -CONTENT_INSPECT_MODERATION __deprecated = 1, -/** - * Content inspect type supervise - */ -CONTENT_INSPECT_SUPERVISION = 2, -/** - * Content inspect type image moderation - */ -CONTENT_INSPECT_IMAGE_MODERATION = 3 + /** + * (Default) content inspect type invalid + */ + CONTENT_INSPECT_INVALID = 0, + /** + * @deprecated + * Content inspect type moderation + */ + CONTENT_INSPECT_MODERATION __deprecated = 1, + /** + * Content inspect type supervise + */ + CONTENT_INSPECT_SUPERVISION = 2, + /** + * Content inspect type image moderation + */ + CONTENT_INSPECT_IMAGE_MODERATION = 3 }; struct ContentInspectModule { @@ -338,15 +364,14 @@ struct ContentInspectConfig { /**The content inspect module count. */ int moduleCount; - ContentInspectConfig& operator=(const ContentInspectConfig& rth) - { - extraInfo = rth.extraInfo; - serverConfig = rth.serverConfig; - moduleCount = rth.moduleCount; - memcpy(&modules, &rth.modules, MAX_CONTENT_INSPECT_MODULE_COUNT * sizeof(ContentInspectModule)); - return *this; - } - ContentInspectConfig() :extraInfo(NULL), serverConfig(NULL), moduleCount(0){} + ContentInspectConfig& operator=(const ContentInspectConfig& rth) { + extraInfo = rth.extraInfo; + serverConfig = rth.serverConfig; + moduleCount = rth.moduleCount; + memcpy(&modules, &rth.modules, MAX_CONTENT_INSPECT_MODULE_COUNT * sizeof(ContentInspectModule)); + return *this; + } + ContentInspectConfig() : extraInfo(NULL), serverConfig(NULL), moduleCount(0) {} }; namespace base { @@ -368,9 +393,7 @@ struct PacketOptions { uint32_t timestamp; // Audio level indication. uint8_t audioLevelIndication; - PacketOptions() - : timestamp(0), - audioLevelIndication(127) {} + PacketOptions() : timestamp(0), audioLevelIndication(127) {} }; /** @@ -386,9 +409,7 @@ struct AudioEncodedFrameInfo { * The codec of the packet. */ uint8_t codec; - AudioEncodedFrameInfo() - : sendTs(0), - codec(0) {} + AudioEncodedFrameInfo() : sendTs(0), codec(0) {} }; /** @@ -398,17 +419,18 @@ struct AudioPcmFrame { /** * The buffer size of the PCM audio frame. */ - OPTIONAL_ENUM_SIZE_T { - // Stereo, 32 kHz, 60 ms (2 * 32 * 60) - /** - * The max number of the samples of the data. - * - * When the number of audio channel is two, the sample rate is 32 kHZ, - * the buffer length of the data is 60 ms, the number of the samples of the data is 3840 (2 x 32 x 60). - */ - kMaxDataSizeSamples = 3840, - /** The max number of the bytes of the data. */ - kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t), + OPTIONAL_ENUM_SIZE_T{ + // Stereo, 32 kHz, 60 ms (2 * 32 * 60) + /** + * The max number of the samples of the data. + * + * When the number of audio channel is two, the sample rate is 32 kHZ, + * the buffer length of the data is 60 ms, the number of the samples of the data is 3840 (2 x + * 32 x 60). + */ + kMaxDataSizeSamples = 3840, + /** The max number of the bytes of the data. */ + kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t), }; /** The timestamp (ms) of the audio frame. @@ -553,7 +575,8 @@ enum VIDEO_PIXEL_FORMAT { */ VIDEO_PIXEL_I422 = 16, /** - * 17: ID3D11Texture2D, only support DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_NV12 texture format + * 17: ID3D11Texture2D, only support DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, + * DXGI_FORMAT_NV12 texture format */ VIDEO_TEXTURE_ID3D11TEXTURE2D = 17, /** @@ -608,12 +631,12 @@ enum CAMERA_VIDEO_SOURCE_TYPE { * This interface provides access to metadata information. */ class IVideoFrameMetaInfo { - public: - enum META_INFO_KEY { - KEY_FACE_CAPTURE = 0, - }; - virtual ~IVideoFrameMetaInfo() {}; - virtual const char* getMetaInfoStr(META_INFO_KEY key) const = 0; + public: + enum META_INFO_KEY { + KEY_FACE_CAPTURE = 0, + }; + virtual ~IVideoFrameMetaInfo(){}; + virtual const char* getMetaInfoStr(META_INFO_KEY key) const = 0; }; struct ColorSpace { @@ -829,7 +852,7 @@ struct ExternalVideoFrame { d3d11Texture2d(NULL), textureSliceIndex(0){} - /** + /** * The EGL context type. */ enum EGL_CONTEXT_TYPE { @@ -869,6 +892,7 @@ struct ExternalVideoFrame { * The pixel format: #VIDEO_PIXEL_FORMAT */ VIDEO_PIXEL_FORMAT format; + /** * The video buffer. */ @@ -903,30 +927,32 @@ struct ExternalVideoFrame { */ int cropBottom; /** - * [Raw data related parameter] The clockwise rotation information of the video frame. You can set the - * rotation angle as 0, 90, 180, or 270. The default value is 0. + * [Raw data related parameter] The clockwise rotation information of the video frame. You can set + * the rotation angle as 0, 90, 180, or 270. The default value is 0. */ int rotation; /** - * The timestamp (ms) of the incoming video frame. An incorrect timestamp results in a frame loss or - * unsynchronized audio and video. - * + * The timestamp (ms) of the incoming video frame. An incorrect timestamp results in a frame loss + * or unsynchronized audio and video. + * * Please refer to getAgoraCurrentMonotonicTimeInMs or getCurrentMonotonicTimeInMs * to determine how to fill this filed. */ long long timestamp; /** * [Texture-related parameter] - * When using the OpenGL interface (javax.microedition.khronos.egl.*) defined by Khronos, set EGLContext to this field. - * When using the OpenGL interface (android.opengl.*) defined by Android, set EGLContext to this field. + * When using the OpenGL interface (javax.microedition.khronos.egl.*) defined by Khronos, set + * EGLContext to this field. When using the OpenGL interface (android.opengl.*) defined by + * Android, set EGLContext to this field. */ - void *eglContext; + void* eglContext; /** * [Texture related parameter] Texture ID used by the video frame. */ EGL_CONTEXT_TYPE eglType; /** - * [Texture related parameter] Incoming 4 × 4 transformational matrix. The typical value is a unit matrix. + * [Texture related parameter] Incoming 4 × 4 transformational matrix. The typical value is + * a unit matrix. */ int textureId; /** @@ -1060,8 +1086,8 @@ struct VideoFrame { */ int rotation; /** - * The timestamp to render the video stream. Use this parameter for audio-video synchronization when - * rendering the video. + * The timestamp to render the video stream. Use this parameter for audio-video synchronization + * when rendering the video. * * @note This parameter is for rendering the video, not capturing the video. */ @@ -1089,7 +1115,8 @@ struct VideoFrame { */ int textureId; /** - * [Texture related parameter] The pointer of ID3D11Texture2D used by the video frame,for Windows only. + * [Texture related parameter] The pointer of ID3D11Texture2D used by the video frame,for Windows + * only. */ void* d3d11Texture2d; /** @@ -1117,7 +1144,8 @@ struct VideoFrame { */ void* pixelBuffer; /** - * The pointer to IVideoFrameMetaInfo, which is the interface to get metainfo contents from VideoFrame. + * The pointer to IVideoFrameMetaInfo, which is the interface to get metainfo contents from + * VideoFrame. */ IVideoFrameMetaInfo* metaInfo; @@ -1141,7 +1169,8 @@ class IVideoFrameObserver { * Occurs each time the player receives a video frame. * * After registering the video frame observer, - * the callback occurs each time the player receives a video frame to report the detailed information of the video frame. + * the callback occurs each time the player receives a video frame to report the detailed + * information of the video frame. * @param frame The detailed information of the video frame. See {@link VideoFrame}. */ virtual void onFrame(const VideoFrame* frame) = 0; @@ -1179,6 +1208,30 @@ enum VIDEO_MODULE_POSITION { } // namespace base +/** Definition of SnapshotConfig. + */ +struct SnapshotConfig { + /** + * The local path (including filename extensions) of the snapshot. For example: + * - Windows: `C:\Users\\AppData\Local\Agora\\example.jpg` + * - iOS: `/App Sandbox/Library/Caches/example.jpg` + * - macOS: `~/Library/Logs/example.jpg` + * - Android: `/storage/emulated/0/Android/data//files/example.jpg` + */ + const char* filePath; + + /** + * The position of the video observation. See VIDEO_MODULE_POSITION. + * + * Allowed values vary depending on the `uid` parameter passed in `takeSnapshot` or `takeSnapshotEx`: + * - uid = 0: Position 2, 4 and 8 are allowed. + * - uid != 0: Only position 2 is allowed. + * + */ + media::base::VIDEO_MODULE_POSITION position; + SnapshotConfig() :filePath(NULL), position(media::base::POSITION_PRE_ENCODER) {} +}; + /** * The audio frame observer. */ @@ -1238,7 +1291,7 @@ class IAudioFrameObserverBase { */ int samplesPerSec; /** - * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data + * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data * buffer is interleaved. * * Buffer data size: buffer = samplesPerChannel × channels × bytesPerSample. @@ -1247,14 +1300,14 @@ class IAudioFrameObserverBase { /** * The timestamp to render the audio data. * - * You can use this timestamp to restore the order of the captured audio frame, and synchronize - * audio and video frames in video scenarios, including scenarios where external video sources + * You can use this timestamp to restore the order of the captured audio frame, and synchronize + * audio and video frames in video scenarios, including scenarios where external video sources * are used. */ int64_t renderTimeMs; /** * A reserved parameter. - * + * * You can use this presentationMs parameter to indicate the presenation milisecond timestamp, * this will then filled into audio4 extension part, the remote side could use this pts in av * sync process with video frame. @@ -1263,11 +1316,11 @@ class IAudioFrameObserverBase { /** * The pts timestamp of this audio frame. * - * This timestamp is used to indicate the origin pts time of the frame, and sync with video frame by - * the pts time stamp + * This timestamp is used to indicate the origin pts time of the frame, and sync with video + * frame by the pts time stamp */ int64_t presentationMs; - /** + /** * The number of the audio track. */ int audioTrackNumber; @@ -1276,17 +1329,18 @@ class IAudioFrameObserverBase { */ uint32_t rtpTimestamp; - AudioFrame() : type(FRAME_TYPE_PCM16), - samplesPerChannel(0), - bytesPerSample(rtc::TWO_BYTES_PER_SAMPLE), - channels(0), - samplesPerSec(0), - buffer(NULL), - renderTimeMs(0), - avsync_type(0), - presentationMs(0), - audioTrackNumber(0), - rtpTimestamp(0) {} + AudioFrame() + : type(FRAME_TYPE_PCM16), + samplesPerChannel(0), + bytesPerSample(rtc::TWO_BYTES_PER_SAMPLE), + channels(0), + samplesPerSec(0), + buffer(NULL), + renderTimeMs(0), + avsync_type(0), + presentationMs(0), + audioTrackNumber(0), + rtpTimestamp(0) {} }; enum AUDIO_FRAME_POSITION { @@ -1335,8 +1389,17 @@ class IAudioFrameObserverBase { */ int samples_per_call; - AudioParams() : sample_rate(0), channels(0), mode(rtc::RAW_AUDIO_FRAME_OP_MODE_READ_ONLY), samples_per_call(0) {} - AudioParams(int samplerate, int channel, rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE type, int samplesPerCall) : sample_rate(samplerate), channels(channel), mode(type), samples_per_call(samplesPerCall) {} + AudioParams() + : sample_rate(0), + channels(0), + mode(rtc::RAW_AUDIO_FRAME_OP_MODE_READ_ONLY), + samples_per_call(0) {} + AudioParams(int samplerate, int channel, rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE type, + int samplesPerCall) + : sample_rate(samplerate), + channels(channel), + mode(type), + samples_per_call(samplesPerCall) {} }; public: @@ -1386,10 +1449,11 @@ class IAudioFrameObserverBase { * - true: The before-mixing playback audio frame is valid and is encoded and sent. * - false: The before-mixing playback audio frame is invalid and is not encoded or sent. */ - virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, base::user_id_t userId, AudioFrame& audioFrame) { - (void) channelId; - (void) userId; - (void) audioFrame; + virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, base::user_id_t userId, + AudioFrame& audioFrame) { + (void)channelId; + (void)userId; + (void)audioFrame; return true; } @@ -1398,12 +1462,19 @@ class IAudioFrameObserverBase { * @return A bit mask that controls the frame position of the audio observer. * @note - Use '|' (the OR operator) to observe multiple frame positions. *

- * After you successfully register the audio observer, the SDK triggers this callback each time it receives a audio frame. You can determine which position to observe by setting the return value. - * The SDK provides 4 positions for observer. Each position corresponds to a callback function: - * - `AUDIO_FRAME_POSITION_PLAYBACK (1 << 0)`: The position for playback audio frame is received, which corresponds to the \ref onPlaybackFrame "onPlaybackFrame" callback. - * - `AUDIO_FRAME_POSITION_RECORD (1 << 1)`: The position for record audio frame is received, which corresponds to the \ref onRecordFrame "onRecordFrame" callback. - * - `AUDIO_FRAME_POSITION_MIXED (1 << 2)`: The position for mixed audio frame is received, which corresponds to the \ref onMixedFrame "onMixedFrame" callback. - * - `AUDIO_FRAME_POSITION_BEFORE_MIXING (1 << 3)`: The position for playback audio frame before mixing is received, which corresponds to the \ref onPlaybackFrameBeforeMixing "onPlaybackFrameBeforeMixing" callback. + * After you successfully register the audio observer, the SDK triggers this callback each time it + * receives a audio frame. You can determine which position to observe by setting the return + * value. The SDK provides 4 positions for observer. Each position corresponds to a callback + * function: + * - `AUDIO_FRAME_POSITION_PLAYBACK (1 << 0)`: The position for playback audio frame is received, + * which corresponds to the \ref onPlaybackFrame "onPlaybackFrame" callback. + * - `AUDIO_FRAME_POSITION_RECORD (1 << 1)`: The position for record audio frame is received, + * which corresponds to the \ref onRecordFrame "onRecordFrame" callback. + * - `AUDIO_FRAME_POSITION_MIXED (1 << 2)`: The position for mixed audio frame is received, which + * corresponds to the \ref onMixedFrame "onMixedFrame" callback. + * - `AUDIO_FRAME_POSITION_BEFORE_MIXING (1 << 3)`: The position for playback audio frame before + * mixing is received, which corresponds to the \ref onPlaybackFrameBeforeMixing + * "onPlaybackFrameBeforeMixing" callback. * @return The bit mask that controls the audio observation positions. * See AUDIO_FRAME_POSITION. */ @@ -1475,25 +1546,25 @@ class IAudioFrameObserver : public IAudioFrameObserverBase { * - true: The before-mixing playback audio frame is valid and is encoded and sent. * - false: The before-mixing playback audio frame is invalid and is not encoded or sent. */ - virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, rtc::uid_t uid, AudioFrame& audioFrame) = 0; + virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, rtc::uid_t uid, + AudioFrame& audioFrame) = 0; }; struct AudioSpectrumData { /** * The audio spectrum data of audio. */ - const float *audioSpectrumData; + const float* audioSpectrumData; /** * The data length of audio spectrum data. */ int dataLength; AudioSpectrumData() : audioSpectrumData(NULL), dataLength(0) {} - AudioSpectrumData(const float *data, int length) : - audioSpectrumData(data), dataLength(length) {} + AudioSpectrumData(const float* data, int length) : audioSpectrumData(data), dataLength(length) {} }; -struct UserAudioSpectrumInfo { +struct UserAudioSpectrumInfo { /** * User ID of the speaker. */ @@ -1505,14 +1576,15 @@ struct UserAudioSpectrumInfo { UserAudioSpectrumInfo() : uid(0) {} - UserAudioSpectrumInfo(agora::rtc::uid_t uid, const float* data, int length) : uid(uid), spectrumData(data, length) {} + UserAudioSpectrumInfo(agora::rtc::uid_t uid, const float* data, int length) + : uid(uid), spectrumData(data, length) {} }; /** * The IAudioSpectrumObserver class. */ class IAudioSpectrumObserver { -public: + public: virtual ~IAudioSpectrumObserver() {} /** @@ -1521,7 +1593,8 @@ class IAudioSpectrumObserver { * This callback reports the audio spectrum data of the local audio at the moment * in the channel. * - * You can set the time interval of this callback using \ref ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". + * You can set the time interval of this callback using \ref + * ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". * * @param data The audio spectrum data of local audio. * - true: Processed. @@ -1534,10 +1607,12 @@ class IAudioSpectrumObserver { * This callback reports the IDs and audio spectrum data of the loudest speakers at the moment * in the channel. * - * You can set the time interval of this callback using \ref ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". + * You can set the time interval of this callback using \ref + * ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". * - * @param spectrums The pointer to \ref agora::media::UserAudioSpectrumInfo "UserAudioSpectrumInfo", which is an array containing - * the user ID and audio spectrum data for each speaker. + * @param spectrums The pointer to \ref agora::media::UserAudioSpectrumInfo + * "UserAudioSpectrumInfo", which is an array containing the user ID and audio spectrum data for + * each speaker. * - This array contains the following members: * - `uid`, which is the UID of each remote speaker * - `spectrumData`, which reports the audio spectrum of each remote speaker. @@ -1545,7 +1620,8 @@ class IAudioSpectrumObserver { * - true: Processed. * - false: Not processed. */ - virtual bool onRemoteAudioSpectrum(const UserAudioSpectrumInfo* spectrums, unsigned int spectrumNumber) = 0; + virtual bool onRemoteAudioSpectrum(const UserAudioSpectrumInfo* spectrums, + unsigned int spectrumNumber) = 0; }; /** @@ -1563,8 +1639,9 @@ class IVideoEncodedFrameObserver { * - true: Accept. * - false: Do not accept. */ - virtual bool onEncodedVideoFrameReceived(rtc::uid_t uid, const uint8_t* imageBuffer, size_t length, - const rtc::EncodedVideoFrameInfo& videoEncodedFrameInfo) = 0; + virtual bool onEncodedVideoFrameReceived( + rtc::uid_t uid, const uint8_t* imageBuffer, size_t length, + const rtc::EncodedVideoFrameInfo& videoEncodedFrameInfo) = 0; virtual ~IVideoEncodedFrameObserver() {} }; @@ -1581,16 +1658,18 @@ class IVideoFrameObserver { enum VIDEO_FRAME_PROCESS_MODE { /** * Read-only mode. - * + * * In this mode, you do not modify the video frame. The video frame observer is a renderer. */ - PROCESS_MODE_READ_ONLY, // Observer works as a pure renderer and will not modify the original frame. + PROCESS_MODE_READ_ONLY, // Observer works as a pure renderer and will not modify the original + // frame. /** * Read and write mode. - * + * * In this mode, you modify the video frame. The video frame observer is a video filter. */ - PROCESS_MODE_READ_WRITE, // Observer works as a filter that will process the video frame and affect the following frame processing in SDK. + PROCESS_MODE_READ_WRITE, // Observer works as a filter that will process the video frame and + // affect the following frame processing in SDK. }; public: @@ -1599,38 +1678,43 @@ class IVideoFrameObserver { /** * Occurs each time the SDK receives a video frame captured by the local camera. * - * After you successfully register the video frame observer, the SDK triggers this callback each time - * a video frame is received. In this callback, you can get the video data captured by the local - * camera. You can then pre-process the data according to your scenarios. + * After you successfully register the video frame observer, the SDK triggers this callback each + * time a video frame is received. In this callback, you can get the video data captured by the + * local camera. You can then pre-process the data according to your scenarios. * * After pre-processing, you can send the processed video data back to the SDK by setting the * `videoFrame` parameter in this callback. * * @note - * - If you get the video data in RGBA color encoding format, Agora does not support using this callback to send the processed data in RGBA color encoding format back to the SDK. - * - The video data that this callback gets has not been pre-processed, such as watermarking, cropping content, rotating, or image enhancement. + * - If you get the video data in RGBA color encoding format, Agora does not support using this + * callback to send the processed data in RGBA color encoding format back to the SDK. + * - The video data that this callback gets has not been pre-processed, such as watermarking, + * cropping content, rotating, or image enhancement. * * @param videoFrame A pointer to the video frame: VideoFrame * @param sourceType source type of video frame. See #VIDEO_SOURCE_TYPE. * @return Determines whether to ignore the current video frame if the pre-processing fails: * - true: Do not ignore. * - false: Ignore, in which case this method does not sent the current video frame to the SDK. - */ - virtual bool onCaptureVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, VideoFrame& videoFrame) = 0; + */ + virtual bool onCaptureVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, + VideoFrame& videoFrame) = 0; /** * Occurs each time the SDK receives a video frame before encoding. * - * After you successfully register the video frame observer, the SDK triggers this callback each time - * when it receives a video frame. In this callback, you can get the video data before encoding. You can then - * process the data according to your particular scenarios. + * After you successfully register the video frame observer, the SDK triggers this callback each + * time when it receives a video frame. In this callback, you can get the video data before + * encoding. You can then process the data according to your particular scenarios. * * After processing, you can send the processed video data back to the SDK by setting the * `videoFrame` parameter in this callback. * * @note - * - To get the video data captured from the second screen before encoding, you need to set (1 << 2) as a frame position through `getObservedFramePosition`. - * - The video data that this callback gets has been pre-processed, such as watermarking, cropping content, rotating, or image enhancement. + * - To get the video data captured from the second screen before encoding, you need to set (1 << + * 2) as a frame position through `getObservedFramePosition`. + * - The video data that this callback gets has been pre-processed, such as watermarking, cropping + * content, rotating, or image enhancement. * - This callback does not support sending processed RGBA video data back to the SDK. * * @param videoFrame A pointer to the video frame: VideoFrame @@ -1639,7 +1723,8 @@ class IVideoFrameObserver { * - true: Do not ignore. * - false: Ignore, in which case this method does not sent the current video frame to the SDK. */ - virtual bool onPreEncodeVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, VideoFrame& videoFrame) = 0; + virtual bool onPreEncodeVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, + VideoFrame& videoFrame) = 0; /** * Occurs each time the SDK receives a video frame decoded by the MediaPlayer. @@ -1650,10 +1735,13 @@ class IVideoFrameObserver { * * After pre-processing, you can send the processed video data back to the SDK by setting the * `videoFrame` parameter in this callback. - * + * * @note - * - This callback will not be affected by the return values of \ref getVideoFrameProcessMode "getVideoFrameProcessMode", \ref getRotationApplied "getRotationApplied", \ref getMirrorApplied "getMirrorApplied", \ref getObservedFramePosition "getObservedFramePosition". - * - On Android, this callback is not affected by the return value of \ref getVideoFormatPreference "getVideoFormatPreference" + * - This callback will not be affected by the return values of \ref getVideoFrameProcessMode + * "getVideoFrameProcessMode", \ref getRotationApplied "getRotationApplied", \ref getMirrorApplied + * "getMirrorApplied", \ref getObservedFramePosition "getObservedFramePosition". + * - On Android, this callback is not affected by the return value of \ref + * getVideoFormatPreference "getVideoFormatPreference" * * @param videoFrame A pointer to the video frame: VideoFrame * @param mediaPlayerId ID of the mediaPlayer. @@ -1666,13 +1754,13 @@ class IVideoFrameObserver { /** * Occurs each time the SDK receives a video frame sent by the remote user. * - * After you successfully register the video frame observer, the SDK triggers this callback each time a - * video frame is received. In this callback, you can get the video data sent by the remote user. You - * can then post-process the data according to your scenarios. + * After you successfully register the video frame observer, the SDK triggers this callback each + * time a video frame is received. In this callback, you can get the video data sent by the remote + * user. You can then post-process the data according to your scenarios. + * + * After post-processing, you can send the processed data back to the SDK by setting the + * `videoFrame` parameter in this callback. * - * After post-processing, you can send the processed data back to the SDK by setting the `videoFrame` - * parameter in this callback. - * * @note This callback does not support sending processed RGBA video data back to the SDK. * * @param channelId The channel name @@ -1682,45 +1770,48 @@ class IVideoFrameObserver { * - true: Do not ignore. * - false: Ignore, in which case this method does not sent the current video frame to the SDK. */ - virtual bool onRenderVideoFrame(const char* channelId, rtc::uid_t remoteUid, VideoFrame& videoFrame) = 0; + virtual bool onRenderVideoFrame(const char* channelId, rtc::uid_t remoteUid, + VideoFrame& videoFrame) = 0; virtual bool onTranscodedVideoFrame(VideoFrame& videoFrame) = 0; /** - * Occurs each time the SDK receives a video frame and prompts you to set the process mode of the video frame. - * - * After you successfully register the video frame observer, the SDK triggers this callback each time it receives - * a video frame. You need to set your preferred process mode in the return value of this callback. + * Occurs each time the SDK receives a video frame and prompts you to set the process mode of the + * video frame. + * + * After you successfully register the video frame observer, the SDK triggers this callback each + * time it receives a video frame. You need to set your preferred process mode in the return value + * of this callback. * @return VIDEO_FRAME_PROCESS_MODE. */ - virtual VIDEO_FRAME_PROCESS_MODE getVideoFrameProcessMode() { - return PROCESS_MODE_READ_ONLY; - } + virtual VIDEO_FRAME_PROCESS_MODE getVideoFrameProcessMode() { return PROCESS_MODE_READ_ONLY; } /** * Sets the format of the raw video data output by the SDK. * - * If you want to get raw video data in a color encoding format other than YUV 420, register this callback when - * calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the SDK triggers - * this callback each time it receives a video frame. You need to set your preferred video data in the return value - * of this callback. - * - * @note If you want the video captured by the sender to be the original format, set the original video data format - * to VIDEO_PIXEL_DEFAULT in the return value. On different platforms, the original video pixel format is also - * different, for the actual video pixel format, see `VideoFrame`. - * + * If you want to get raw video data in a color encoding format other than YUV 420, register this + * callback when calling `registerVideoFrameObserver`. After you successfully register the video + * frame observer, the SDK triggers this callback each time it receives a video frame. You need to + * set your preferred video data in the return value of this callback. + * + * @note If you want the video captured by the sender to be the original format, set the original + * video data format to VIDEO_PIXEL_DEFAULT in the return value. On different platforms, the + * original video pixel format is also different, for the actual video pixel format, see + * `VideoFrame`. + * * @return Sets the video format. See VIDEO_PIXEL_FORMAT. */ virtual base::VIDEO_PIXEL_FORMAT getVideoFormatPreference() { return base::VIDEO_PIXEL_DEFAULT; } /** - * Occurs each time the SDK receives a video frame, and prompts you whether to rotate the captured video. - * - * If you want to rotate the captured video according to the rotation member in the `VideoFrame` class, register this - * callback by calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the - * SDK triggers this callback each time it receives a video frame. You need to set whether to rotate the video frame - * in the return value of this callback. - * + * Occurs each time the SDK receives a video frame, and prompts you whether to rotate the captured + * video. + * + * If you want to rotate the captured video according to the rotation member in the `VideoFrame` + * class, register this callback by calling `registerVideoFrameObserver`. After you successfully + * register the video frame observer, the SDK triggers this callback each time it receives a video + * frame. You need to set whether to rotate the video frame in the return value of this callback. + * * @note This function only supports video data in RGBA or YUV420. * * @return Determines whether to rotate. @@ -1730,13 +1821,15 @@ class IVideoFrameObserver { virtual bool getRotationApplied() { return false; } /** - * Occurs each time the SDK receives a video frame and prompts you whether or not to mirror the captured video. - * - * If the video data you want to obtain is a mirror image of the original video, you need to register this callback - * when calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the SDK - * triggers this callback each time it receives a video frame. You need to set whether or not to mirror the video - * frame in the return value of this callback. - * + * Occurs each time the SDK receives a video frame and prompts you whether or not to mirror the + * captured video. + * + * If the video data you want to obtain is a mirror image of the original video, you need to + * register this callback when calling `registerVideoFrameObserver`. After you successfully + * register the video frame observer, the SDK triggers this callback each time it receives a video + * frame. You need to set whether or not to mirror the video frame in the return value of this + * callback. + * * @note This function only supports video data in RGBA and YUV420 formats. * * @return Determines whether to mirror. @@ -1748,19 +1841,24 @@ class IVideoFrameObserver { /** * Sets the frame position for the video observer. * - * After you successfully register the video observer, the SDK triggers this callback each time it receives - * a video frame. You can determine which position to observe by setting the return value. The SDK provides - * 3 positions for observer. Each position corresponds to a callback function: + * After you successfully register the video observer, the SDK triggers this callback each time it + * receives a video frame. You can determine which position to observe by setting the return + * value. The SDK provides 3 positions for observer. Each position corresponds to a callback + * function: * - * POSITION_POST_CAPTURER(1 << 0): The position after capturing the video data, which corresponds to the onCaptureVideoFrame callback. - * POSITION_PRE_RENDERER(1 << 1): The position before receiving the remote video data, which corresponds to the onRenderVideoFrame callback. - * POSITION_PRE_ENCODER(1 << 2): The position before encoding the video data, which corresponds to the onPreEncodeVideoFrame callback. + * POSITION_POST_CAPTURER(1 << 0): The position after capturing the video data, which corresponds + * to the onCaptureVideoFrame callback. POSITION_PRE_RENDERER(1 << 1): The position before + * receiving the remote video data, which corresponds to the onRenderVideoFrame callback. + * POSITION_PRE_ENCODER(1 << 2): The position before encoding the video data, which corresponds to + * the onPreEncodeVideoFrame callback. * * To observe multiple frame positions, use '|' (the OR operator). - * This callback observes POSITION_POST_CAPTURER(1 << 0) and POSITION_PRE_RENDERER(1 << 1) by default. - * To conserve the system consumption, you can reduce the number of frame positions that you want to observe. + * This callback observes POSITION_POST_CAPTURER(1 << 0) and POSITION_PRE_RENDERER(1 << 1) by + * default. To conserve the system consumption, you can reduce the number of frame positions that + * you want to observe. * - * @return A bit mask that controls the frame position of the video observer: VIDEO_OBSERVER_POSITION. + * @return A bit mask that controls the frame position of the video observer: + * VIDEO_OBSERVER_POSITION. */ virtual uint32_t getObservedFramePosition() { return base::POSITION_POST_CAPTURER | base::POSITION_PRE_RENDERER; @@ -1854,7 +1952,8 @@ enum RecorderReasonCode { */ RECORDER_REASON_WRITE_FAILED = 1, /** - * 2: The SDK does not detect audio and video streams to be recorded, or audio and video streams are interrupted for more than five seconds during recording. + * 2: The SDK does not detect audio and video streams to be recorded, or audio and video streams + * are interrupted for more than five seconds during recording. */ RECORDER_REASON_NO_STREAM = 2, /** @@ -1882,7 +1981,8 @@ struct MediaRecorderConfiguration { */ const char* storagePath; /** - * The format of the recording file. See \ref agora::rtc::MediaRecorderContainerFormat "MediaRecorderContainerFormat". + * The format of the recording file. See \ref agora::rtc::MediaRecorderContainerFormat + * "MediaRecorderContainerFormat". */ MediaRecorderContainerFormat containerFormat; /** @@ -1900,23 +2000,70 @@ struct MediaRecorderConfiguration { * callback to report the updated recording information. */ int recorderInfoUpdateInterval; - - MediaRecorderConfiguration() : storagePath(NULL), containerFormat(FORMAT_MP4), streamType(STREAM_TYPE_BOTH), maxDurationMs(120000), recorderInfoUpdateInterval(0) {} - MediaRecorderConfiguration(const char* path, MediaRecorderContainerFormat format, MediaRecorderStreamType type, int duration, int interval) : storagePath(path), containerFormat(format), streamType(type), maxDurationMs(duration), recorderInfoUpdateInterval(interval) {} + /** + * The video width + */ + int width; + /** + * The video height + */ + int height; + /** + * The video fps + */ + int fps; + /** + * The audio sample rate + */ + int sample_rate; + /** + * The audio channel nums + */ + int channel_num; + /** + * The video source just for out channel recoder + */ + agora::rtc::VIDEO_SOURCE_TYPE videoSourceType; + + MediaRecorderConfiguration() + : storagePath(NULL), + containerFormat(FORMAT_MP4), + streamType(STREAM_TYPE_BOTH), + maxDurationMs(120000), + recorderInfoUpdateInterval(0), + width(1280), + height(720), + fps(30), + sample_rate(48000), + channel_num(1), + videoSourceType(rtc::VIDEO_SOURCE_CAMERA_PRIMARY) {} + MediaRecorderConfiguration(const char* path, MediaRecorderContainerFormat format, + MediaRecorderStreamType type, int duration, int interval) + : storagePath(path), + containerFormat(format), + streamType(type), + maxDurationMs(duration), + recorderInfoUpdateInterval(interval), + width(1280), + height(720), + fps(30), + sample_rate(48000), + channel_num(1), + videoSourceType(rtc::VIDEO_SOURCE_CAMERA_PRIMARY) {} }; class IFaceInfoObserver { -public: - /** - * Occurs when the face info is received. - * @param outFaceInfo The output face info. - * @return - * - true: The face info is valid. - * - false: The face info is invalid. + public: + /** + * Occurs when the face info is received. + * @param outFaceInfo The output face info. + * @return + * - true: The face info is valid. + * - false: The face info is invalid. */ - virtual bool onFaceInfo(const char* outFaceInfo) = 0; - - virtual ~IFaceInfoObserver() {} + virtual bool onFaceInfo(const char* outFaceInfo) = 0; + + virtual ~IFaceInfoObserver() {} }; /** @@ -1939,7 +2086,8 @@ struct RecorderInfo { unsigned int fileSize; RecorderInfo() : fileName(NULL), durationMs(0), fileSize(0) {} - RecorderInfo(const char* name, unsigned int dur, unsigned int size) : fileName(name), durationMs(dur), fileSize(size) {} + RecorderInfo(const char* name, unsigned int dur, unsigned int size) + : fileName(name), durationMs(dur), fileSize(size) {} }; class IMediaRecorderObserver { @@ -1949,30 +2097,35 @@ class IMediaRecorderObserver { * * @since v4.0.0 * - * When the local audio and video recording state changes, the SDK triggers this callback to report the current - * recording state and the reason for the change. + * When the local audio and video recording state changes, the SDK triggers this callback to + * report the current recording state and the reason for the change. * * @param channelId The channel name. * @param uid ID of the user. * @param state The current recording state. See \ref agora::media::RecorderState "RecorderState". - * @param reason The reason for the state change. See \ref agora::media::RecorderReasonCode "RecorderReasonCode". + * @param reason The reason for the state change. See \ref agora::media::RecorderReasonCode + * "RecorderReasonCode". */ - virtual void onRecorderStateChanged(const char* channelId, rtc::uid_t uid, RecorderState state, RecorderReasonCode reason) = 0; + virtual void onRecorderStateChanged(const char* channelId, rtc::uid_t uid, RecorderState state, + RecorderReasonCode reason) = 0; /** * Occurs when the recording information is updated. * * @since v4.0.0 * - * After you successfully register this callback and enable the local audio and video recording, the SDK periodically triggers - * the `onRecorderInfoUpdated` callback based on the set value of `recorderInfoUpdateInterval`. This callback reports the - * filename, duration, and size of the current recording file. + * After you successfully register this callback and enable the local audio and video recording, + * the SDK periodically triggers the `onRecorderInfoUpdated` callback based on the set value of + * `recorderInfoUpdateInterval`. This callback reports the filename, duration, and size of the + * current recording file. * * @param channelId The channel name. * @param uid ID of the user. - * @param info Information about the recording file. See \ref agora::media::RecorderInfo "RecorderInfo". + * @param info Information about the recording file. See \ref agora::media::RecorderInfo + * "RecorderInfo". * */ - virtual void onRecorderInfoUpdated(const char* channelId, rtc::uid_t uid, const RecorderInfo& info) = 0; + virtual void onRecorderInfoUpdated(const char* channelId, rtc::uid_t uid, + const RecorderInfo& info) = 0; virtual ~IMediaRecorderObserver() {} }; diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaRecorder.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaRecorder.h new file mode 100644 index 000000000..79a8db35e --- /dev/null +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/IAgoraMediaRecorder.h @@ -0,0 +1,89 @@ +// +// Agora SDK +// +// Copyright (c) 2022 Agora.io. All rights reserved. +// +#pragma once // NOLINT(build/header_guard) + +#include "AgoraBase.h" +#include "AgoraMediaBase.h" + +namespace agora { +namespace rtc { + +class IMediaRecorder : public RefCountInterface { + protected: + virtual ~IMediaRecorder() {} + + public: + /** + * Registers the IMediaRecorderObserver object. + * + * @since v4.0.0 + * + * @note Call this method before the startRecording method. + * + * @param callback The callbacks for recording audio and video streams. See \ref IMediaRecorderObserver. + * + * @return + * - 0(ERR_OK): Success. + * - < 0: Failure: + */ + virtual int setMediaRecorderObserver(media::IMediaRecorderObserver* callback) = 0; + /** + * Starts recording the local or remote audio and video. + * + * @since v4.0.0 + * + * After successfully calling \ref IRtcEngine::createMediaRecorder "createMediaRecorder" to get the media recorder object + * , you can call this method to enable the recording of the local audio and video. + * + * This method can record the following content: + * - The audio captured by the local microphone and encoded in AAC format. + * - The video captured by the local camera and encoded by the SDK. + * - The audio received from remote users and encoded in AAC format. + * - The video received from remote users. + * + * The SDK can generate a recording file only when it detects the recordable audio and video streams; when there are + * no audio and video streams to be recorded or the audio and video streams are interrupted for more than five + * seconds, the SDK stops recording and triggers the + * \ref IMediaRecorderObserver::onRecorderStateChanged "onRecorderStateChanged" (RECORDER_STATE_ERROR, RECORDER_ERROR_NO_STREAM) + * callback. + * + * @note Call this method after joining the channel. + * + * @param config The recording configurations. See MediaRecorderConfiguration. + * + * @return + * - 0(ERR_OK): Success. + * - < 0: Failure: + * - `-1(ERR_FAILED)`: IRtcEngine does not support the request because the remote user did not subscribe to the target channel or the media streams published by the local user during remote recording. + * - `-2(ERR_INVALID_ARGUMENT)`: The parameter is invalid. Ensure the following: + * - The specified path of the recording file exists and is writable. + * - The specified format of the recording file is supported. + * - The maximum recording duration is correctly set. + * - During remote recording, ensure the user whose media streams you want record did join the channel. + * - `-4(ERR_NOT_SUPPORTED)`: IRtcEngine does not support the request due to one of the following reasons: + * - The recording is ongoing. + * - The recording stops because an error occurs. + * - No \ref IMediaRecorderObserver object is registered. + */ + virtual int startRecording(const media::MediaRecorderConfiguration& config) = 0; + /** + * Stops recording the audio and video. + * + * @since v4.0.0 + * + * @note After calling \ref IMediaRecorder::startRecording "startRecording", if you want to stop the recording, + * you must call `stopRecording`; otherwise, the generated recording files might not be playable. + * + * + * @return + * - 0(ERR_OK): Success. + * - < 0: Failure: + */ + virtual int stopRecording() = 0; +}; + +} //namespace rtc +} // namespace agora diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraAudioTrack.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraAudioTrack.h index e877e370e..d184dd68b 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraAudioTrack.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraAudioTrack.h @@ -407,7 +407,7 @@ class ILocalAudioTrack : public IAudioTrack { * @param number : the buffer number set,unit is 10ms * */ - virtual void setMaxBufferedAudioFrameNumber(int number) = 0; + virtual void setMaxBufferedAudioFrameNumber(int number, aosl_ref_t ares = AOSL_REF_INVALID) = 0; /** clear sender buffer * @@ -595,6 +595,11 @@ struct RemoteAudioTrackStats { uint64_t publish_duration; int32_t e2e_delay_ms; + + /** + * e2e_delay_ calculated by the new algorithm + */ + int32_t new_e2e_delay_ms; /** * Quality of experience (QoE) of the local user when receiving a remote audio stream. See #EXPERIENCE_QUALITY_TYPE. */ @@ -652,11 +657,83 @@ struct RemoteAudioTrackStats { total_active_time(0), publish_duration(0), e2e_delay_ms(0), + new_e2e_delay_ms(0), qoe_quality(0), quality_changed_reason(0), downlink_effect_type(0) {} }; + +/** + * Properties of receive parameters for IAudioEncodedFrameReceiver + * + */ +struct AudioEncFrameRecvParams { + + /** + * The callback mode of IAudioEncodedFrameReceiver + */ + enum ReceiveMode { + /** + * IAudioEncodedFrameReceiver callback the down-link audio packet directly + */ + ORIGINAL = 0, + + /** + * IAudioEncodedFrameReceiver whill callback the original down-link audio packet while + * the codec of down-link packet is same as target_codec. + * + * Othewise convert down-link audio packet to new target packet which parameter contain + * the combination of (target_codec, target_sample_rate, target_sample_rate). + */ + MATCHED_CODEC = 1, + + /** + * IAudioEncodedFrameReceiver whill callback the original down-link audio packet while + * the combination of (codec, sampling rate, channels) of down-link packet is same as + * the combination of (target_codec, target_sample_rate, target_sample_rate). + * + * Othewise convert down-link audio packet to new target packet which parameter contain + * the combination of (target_codec, target_sample_rate, target_sample_rate). + */ + MATCHED_ALL = 2, + }; + + /** + * The trans mode of audio packet + */ + ReceiveMode receive_mode; + + /** + * The audio codec of target audio packet + */ + AUDIO_CODEC_TYPE target_codec; + + /** + * The sample rate HZ of target audio packet + */ + int32_t target_sample_rate; + + /** + * The channel numbers of target audio packet + */ + int32_t target_num_channels; + + + AudioEncFrameRecvParams() : + receive_mode(ORIGINAL), + target_codec(AUDIO_CODEC_AACLC), + target_sample_rate(0), + target_num_channels(0) {} + + AudioEncFrameRecvParams(const AudioEncFrameRecvParams& src_params) { + receive_mode = src_params.receive_mode; + target_codec = src_params.target_codec; + target_sample_rate = src_params.target_sample_rate; + target_num_channels = src_params.target_num_channels; + } +}; + /** * The IRemoteAudioTrack class. */ @@ -708,12 +785,14 @@ class IRemoteAudioTrack : public IAudioTrack { * audio packet. * * @param packetReceiver The pointer to the `IAudioEncodedFrameReceiver` object. + * @param recvParams The parameter * @return * - 0: Success. * - < 0: Failure. */ - virtual int registerAudioEncodedFrameReceiver(IAudioEncodedFrameReceiver* packetReceiver, aosl_ref_t ares = AOSL_REF_INVALID) = 0; - + virtual int registerAudioEncodedFrameReceiver(IAudioEncodedFrameReceiver* packetReceiver, + const AudioEncFrameRecvParams& recvParams, + aosl_ref_t ares = AOSL_REF_INVALID) = 0; /** * Releases the `IAudioEncodedFrameReceiver` object. * @param packetReceiver The pointer to the `IAudioEncodedFrameReceiver` object. diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraExtensionProvider.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraExtensionProvider.h index 4dff21248..c7f57c5a3 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraExtensionProvider.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraExtensionProvider.h @@ -39,6 +39,9 @@ class IExtensionProvider : public RefCountInterface { * note: discarded, Don't use it anymore. */ AUDIO_FILTER, + /** + * note: discarded, Don't use it anymore. + */ VIDEO_PRE_PROCESSING_FILTER, VIDEO_POST_PROCESSING_FILTER, AUDIO_SINK, @@ -79,6 +82,18 @@ class IExtensionProvider : public RefCountInterface { * Used to modify local playback audio data after the remote audio mixed. */ AUDIO_REMOTE_MIXED_PLAYBACK_FILTER = 10006, + /* + * Used to modify video data betweent capturer and post-capture observer + */ + VIDEO_POST_CAPTURE_FILTER = 20001, + /* + * Used to modify video data betweent post-capture observer and preview + */ + VIDEO_PRE_PREVIEW_FILTER = 20002, + /* + * Used to modify video data betweent adapter and encoder + */ + VIDEO_PRE_ENCODER_FILTER = 20003, UNKNOWN = 0xFFFF, }; diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraLocalUser.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraLocalUser.h index 2db3c9dcf..debc52706 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraLocalUser.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraLocalUser.h @@ -1146,13 +1146,11 @@ class ILocalUser { * * @param track The special mixed audio track. * @param enalble Action of start mixing this user's audio. - * @param MixLocal Mix publish stream. - * @param MixRemote Mix remote stream. * @return * - 0: success * - <0: failure */ - virtual int EnableLocalMixedAudioTrack(agora_refptr& track, bool enable, bool MixLocal, bool MixRemote, aosl_ref_t ares = AOSL_REF_INVALID) = 0; + virtual int RegisterLocalMixedAudioTrack(agora_refptr& track, bool enable, aosl_ref_t ares = AOSL_REF_INVALID) = 0; /** * Trigger data channel update callback with all data channel infos. * @@ -1172,6 +1170,16 @@ class ILocalUser { * @technical preview */ virtual int sendAudioMetadata(const char* metadata, size_t length, aosl_ref_t ares = AOSL_REF_INVALID) = 0; + /** + * set remote render target fps + * + * @param target_fps The target fps + * @return + * - 0: success + * - <0: failure + * @technical preview + */ + virtual int setRemoteRenderTargetFps(int targetFps) = 0; }; /** diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraMediaNodeFactory.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraMediaNodeFactory.h index a22242b23..955dfeab8 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraMediaNodeFactory.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraMediaNodeFactory.h @@ -29,7 +29,7 @@ class IMediaPlayerSource; class IMediaPacketSender; class IMediaStreamingSource; class IScreenCapturer2; - +class IMediaRecorder2; /** * The `IMediaNodeFactory` class. @@ -39,7 +39,9 @@ class IMediaNodeFactory : public RefCountInterface { /** * Creates a PCM audio data sender. * - * This method creates an `IAudioPcmDataSender` object, which can be used by \ref agora::base::IAgoraService::createCustomAudioTrack(agora_refptr< rtc::IAudioPcmDataSender > audioSource) "createCustomAudioTrack" to send PCM audio data. + * This method creates an `IAudioPcmDataSender` object, which can be used by \ref + * agora::base::IAgoraService::createCustomAudioTrack(agora_refptr< rtc::IAudioPcmDataSender > + * audioSource) "createCustomAudioTrack" to send PCM audio data. * * @return * - The pointer to \ref agora::rtc::IAudioPcmDataSender "IAudioPcmDataSender": Success. @@ -50,7 +52,9 @@ class IMediaNodeFactory : public RefCountInterface { /** * Creates an encoded audio data sender. * - * This method creates an IAudioEncodedFrameSender object, which can be used by \ref agora::base::IAgoraService::createCustomAudioTrack(agora_refptr< rtc::IAudioEncodedFrameSender > audioSource, TMixMode mixMode) "createCustomAudioTrack" to send encoded audio data. + * This method creates an IAudioEncodedFrameSender object, which can be used by \ref + * agora::base::IAgoraService::createCustomAudioTrack(agora_refptr< rtc::IAudioEncodedFrameSender + * > audioSource, TMixMode mixMode) "createCustomAudioTrack" to send encoded audio data. * * @return * - The pointer to IAudioEncodedFrameSender: Success. @@ -84,7 +88,7 @@ class IMediaNodeFactory : public RefCountInterface { virtual agora_refptr createScreenCapturer() = 0; #endif - /** + /** * Creates a video mixer. * * Once a video mixer object is created, you can use the video mixer data as the custom video @@ -99,8 +103,8 @@ class IMediaNodeFactory : public RefCountInterface { /** * Creates a video transceiver. * - * Once a video transceiver object is created, you can use the video transceiver data as the custom video - * source. + * Once a video transceiver object is created, you can use the video transceiver data as the + * custom video source. * * @return * - The pointer to IVideoFrameTransceiver: Success. @@ -111,8 +115,9 @@ class IMediaNodeFactory : public RefCountInterface { /** * Creates a video frame sender. * - * This method creates an `IVideoFrameSender` object, which can be used by \ref agora::base::IAgoraService::createCustomVideoTrack(agora_refptr videoSource) "createCustomVideoTrack" to - * send the custom video data. + * This method creates an `IVideoFrameSender` object, which can be used by \ref + * agora::base::IAgoraService::createCustomVideoTrack(agora_refptr + * videoSource) "createCustomVideoTrack" to send the custom video data. * * @return * - The pointer to \ref agora::rtc::IVideoFrameSender "IVideoFrameSender": Success. @@ -123,7 +128,9 @@ class IMediaNodeFactory : public RefCountInterface { /** * Creates an encoded video image sender. * - * This method creates an `IVideoEncodedImageSender` object, which can be used by \ref agora::base::IAgoraService::createCustomVideoTrack(agora_refptr videoSource, SenderOptions& options) "createCustomVideoTrack" to send the encoded video data. + * This method creates an `IVideoEncodedImageSender` object, which can be used by \ref + * agora::base::IAgoraService::createCustomVideoTrack(agora_refptr + * videoSource, SenderOptions& options) "createCustomVideoTrack" to send the encoded video data. * * @return * - The pointer to `IVideoEncodedImageSender`: Success. @@ -145,8 +152,8 @@ class IMediaNodeFactory : public RefCountInterface { /** * Creates an audio filter for the extension. * - * This method creates an `IAudioFilter` object, which can be used to filter the audio data from the - * inside extension. + * This method creates an `IAudioFilter` object, which can be used to filter the audio data from + * the inside extension. * * @param provider_name provider name string. * @param extension_name extension name string. @@ -154,7 +161,8 @@ class IMediaNodeFactory : public RefCountInterface { * - The pointer to IAudioFilter: Success. * - A null pointer: Failure. */ - virtual agora_refptr createAudioFilter(const char* provider_name, const char* extension_name) = 0; + virtual agora_refptr createAudioFilter(const char* provider_name, + const char* extension_name) = 0; /** * Creates a video filter for the extension. @@ -168,7 +176,8 @@ class IMediaNodeFactory : public RefCountInterface { * - The pointer to IVideoFilter: Success. * - A null pointer: Failure. */ - virtual agora_refptr createVideoFilter(const char* provider_name, const char* extension_name) = 0; + virtual agora_refptr createVideoFilter(const char* provider_name, + const char* extension_name) = 0; /** * Creates a video sink for the extension. @@ -182,7 +191,8 @@ class IMediaNodeFactory : public RefCountInterface { * - The pointer to IVideoSinkBase: Success. * - A null pointer: Failure. */ - virtual agora_refptr createVideoSink(const char* provider_name, const char* extension_name) = 0; + virtual agora_refptr createVideoSink(const char* provider_name, + const char* extension_name) = 0; /** * Creates a media player source object and returns the pointer. @@ -194,8 +204,10 @@ class IMediaNodeFactory : public RefCountInterface { * succeeds. * - A null pointer: Failure. */ - virtual agora_refptr createMediaPlayerSource(media::base::MEDIA_PLAYER_SOURCE_TYPE type = agora::media::base::MEDIA_PLAYER_SOURCE_DEFAULT) = 0; - + virtual agora_refptr createMediaPlayerSource( + media::base::MEDIA_PLAYER_SOURCE_TYPE type = + agora::media::base::MEDIA_PLAYER_SOURCE_DEFAULT) = 0; + /** * @brief Creates a media streaming source object and returns the pointer. * @@ -218,6 +230,8 @@ class IMediaNodeFactory : public RefCountInterface { */ virtual agora_refptr createMediaPacketSender() = 0; + virtual agora_refptr createMediaRecorder() = 0; + #if defined(__ANDROID__) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) /** * Creates screen capture source extension with given provider&extension names @@ -227,7 +241,8 @@ class IMediaNodeFactory : public RefCountInterface { * - The pointer to IScreenCapturer: Success. * - A null pointer: Failure. */ - virtual agora_refptr createScreenCapturer2(const char* provider_name, const char* extension_name) = 0; + virtual agora_refptr createScreenCapturer2(const char* provider_name, + const char* extension_name) = 0; #else /** * Creates screen capture source extension with given provider&extension names @@ -237,7 +252,8 @@ class IMediaNodeFactory : public RefCountInterface { * - The pointer to IScreenCapturer: Success. * - A null pointer: Failure. */ - virtual agora_refptr createScreenCapturer(const char* provider_name, const char* extension_name) = 0; + virtual agora_refptr createScreenCapturer(const char* provider_name, + const char* extension_name) = 0; #endif protected: diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraRtcConnection.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraRtcConnection.h index 2959d06da..eec6afb43 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraRtcConnection.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraRtcConnection.h @@ -74,7 +74,8 @@ struct TConnectSettings { * callback. Your app must record and maintain the `userId` since the SDK does not do so. */ user_id_t userId; - + /*Reserved for future use*/ + const char* info; /* App can provide a app defined start time to trace some events like connect cost , first video, etc. */ @@ -212,7 +213,7 @@ class IRtcConnection : public RefCountInterface { * - -2(ERR_INVALID_ARGUMENT): The argument that you pass is invalid. * - -8(ERR_INVALID_STATE): The current connection state is not CONNECTION_STATE_DISCONNECTED(1). */ - virtual int connect(const char* token, const char* channelId, user_id_t userId, aosl_ref_t ares = AOSL_REF_INVALID) = 0; + virtual int connect(const char* token, const char* channelId, const char* info, user_id_t userId, aosl_ref_t ares = AOSL_REF_INVALID) = 0; /** * Connects to an Agora channel. diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraScreenCapturer.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraScreenCapturer.h index 3204f9916..526e07e0d 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraScreenCapturer.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraScreenCapturer.h @@ -36,7 +36,7 @@ class IScreenCapturer : public RefCountInterface { * - 0: Success. * - < 0: Failure. */ - virtual int initWithDisplayId(uint32_t displayId, const Rectangle& regionRect) = 0; + virtual int initWithDisplayId(int64_t displayId, const Rectangle& regionRect) = 0; #endif #if defined(_WIN32) || (defined(__linux__) && !defined(__ANDROID__)) @@ -194,6 +194,26 @@ class IScreenCapturer2 : public RefCountInterface { */ virtual int setAudioVolume(uint32_t volume, aosl_ref_t ares = AOSL_REF_INVALID) = 0; +#if defined(__ANDROID__) + /** + * Sets screen sharing using the Android native class MediaProjection. + * + * When screen capture stopped, the SDK will automatically release the MediaProjection internally. + * + * @param mediaProjection MediaProjection is an Android class that provides access to screen capture and recording capabiliies. + * + * @note + * Additional MediaProjection is primarily used for specific scenarios, + * such as IOT custom devices or subprocess screen sharing. + * + * @return + * - 0: Success. + * - < 0: Failure. + * @technical preview + */ + virtual int setExternalMediaProjection(void* mediaProjection) = 0; +#endif + protected: virtual ~IScreenCapturer2() {} }; diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraSyncClient.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraSyncClient.h index 7c8880de2..4b85bd44b 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraSyncClient.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/NGIAgoraSyncClient.h @@ -47,6 +47,12 @@ enum SyncEventType { kTransactionBegin = 6, kTransactionEnd = 7, kDocSyncEnd = 8, + kInitialized = 9 +}; + +enum OP_Privilege { + OP_READ, + OP_WRITE }; /** @@ -77,7 +83,7 @@ struct SyncConfig { uint32_t connection_timeout; /* compact interval in seconds */ uint32_t compact_interval; - SyncConfig() : shakehand_interval(1), connection_timeout(10), compact_interval(3600 * 1000) {} + SyncConfig() : appId(NULL), shakehand_interval(1), connection_timeout(10), compact_interval(3600 * 1000) {} }; class ISyncClient : public RefCountInterface { @@ -107,6 +113,8 @@ class ISyncClient : public RefCountInterface { virtual int32_t subscribe(const char* database, const char* collection, util::AString& snapshotJson) = 0; virtual int32_t unsubscribe(const char* database, const char* collection, aosl_ref_t ares = AOSL_REF_INVALID) = 0; + virtual int32_t addReadable(const char* database, const char* coll, const char* readable, aosl_ref_t ares = AOSL_REF_INVALID) = 0; + virtual int32_t removeReadable(const char* database, const char* coll, const char* readable, aosl_ref_t ares = AOSL_REF_INVALID) = 0; virtual int32_t putDoc(const char* database, const char* collection, const char* docName, aosl_ref_t ares = AOSL_REF_INVALID) = 0; virtual int32_t deleteDoc(const char* database, const char* collection, @@ -131,6 +139,8 @@ class ISyncClient : public RefCountInterface { const char* docName, const char* path, bool& result) = 0; virtual int32_t keepAliveDoc(const char* database, const char* collection, const char* docName, uint32_t ttl, aosl_ref_t ares = AOSL_REF_INVALID) = 0; + virtual bool isOpPermission(const char* database, const char* collection, + const char* docName, OP_Privilege op) = 0; // sync operations virtual int32_t shakehand(aosl_ref_t ares = AOSL_REF_INVALID) = 0; diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_defs.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_defs.h index ce2386549..2b4bc1066 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_defs.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_defs.h @@ -14,6 +14,14 @@ #define __AOSL_DEFS_H__ +#ifdef _MSC_VER +/* C4200: nonstandard extension used: zero sized array */ +#pragma warning (disable: 4200) +/* C4576: a parenthesized type followed by an initializer list is a non-standard explicit type conversion syntax */ +#pragma warning (disable: 4576) +#endif + + #define aosl_stringify_1(x) #x #define aosl_stringify(x) aosl_stringify_1(x) diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_ref.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_ref.h index 4a2f439fa..e29243cb3 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_ref.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_ref.h @@ -58,7 +58,7 @@ extern __aosl_api__ aosl_ref_t aosl_ref_create (void *arg, aosl_ref_dtor_t dtor, /** * The ref object callback function prototype. - * Parameter: + * Parameters: * arg: the ref object argument which was passed in when creating; * argc: specify the argv array elements count, the same as the argc * when invoking aosl_ref_[get|read|write] functions; @@ -71,7 +71,7 @@ typedef void (*aosl_ref_func_t) (void *arg, uintptr_t argc, uintptr_t argv []); /** * Hold the ref object, and invoke the specified callback function. - * Parameter: + * Parameters: * ref: the ref object id; * f: the callback function; * argc: the args count @@ -86,7 +86,7 @@ extern __aosl_api__ int aosl_ref_hold_argv (aosl_ref_t ref, aosl_ref_func_t f, u /** * Hold the ref object and read lock it, then invoke the specified callback function. - * Parameter: + * Parameters: * ref: the ref object id; * f: the callback function; * argc: the args count @@ -101,7 +101,7 @@ extern __aosl_api__ int aosl_ref_read_argv (aosl_ref_t ref, aosl_ref_func_t f, u /** * Hold the ref object and write lock it, then invoke the specified callback function. - * Parameter: + * Parameters: * ref: the ref object id; * f: the callback function; * argc: the args count @@ -116,7 +116,7 @@ extern __aosl_api__ int aosl_ref_write_argv (aosl_ref_t ref, aosl_ref_func_t f, /** * Hold the ref object and set it unsafe, then invoke the specified callback function. - * Parameter: + * Parameters: * ref: the ref object id; * f: the callback function; * argc: the args count @@ -131,7 +131,7 @@ extern __aosl_api__ int aosl_ref_unsafe_argv (aosl_ref_t ref, aosl_ref_func_t f, /** * Hold the ref object and set it maystall, then invoke the specified callback function. - * Parameter: + * Parameters: * ref: the ref object id; * f: the callback function; * argc: the args count @@ -152,7 +152,7 @@ typedef void *aosl_refobj_t; /** * Retrieve the ref object arg. - * Parameters: + * Parameter: * robj: the reference object; * Return value: * the ref object arg; @@ -161,7 +161,7 @@ extern __aosl_api__ void *aosl_refobj_arg (aosl_refobj_t robj); /** * Get the ref id of the specified ref object. - * Parameters: + * Parameter: * robj: the reference object; * Return value: * the ref id. @@ -170,7 +170,7 @@ extern __aosl_api__ aosl_ref_t aosl_refobj_id (aosl_refobj_t robj); /** * Make sure read lock the ref object specified by robj, then invoke the specified callback function. - * Parameter: + * Parameters: * robj: the ref object itself; * f: the callback function; * argc: the args count @@ -185,7 +185,7 @@ extern __aosl_api__ int aosl_refobj_read_argv (aosl_refobj_t robj, aosl_ref_func /** * Make sure set the ref object specified by robj unsafe, then invoke the specified callback function. - * Parameter: + * Parameters: * robj: the ref object itself; * f: the callback function; * argc: the args count @@ -200,7 +200,7 @@ extern __aosl_api__ int aosl_refobj_unsafe_argv (aosl_refobj_t robj, aosl_ref_fu /** * Make sure set the ref object specified by robj maystall, then invoke the specified callback function. - * Parameter: + * Parameters: * robj: the ref object itself; * f: the callback function; * argc: the args count @@ -239,7 +239,7 @@ extern __aosl_api__ int aosl_ref_set_scope (aosl_ref_t ref, aosl_ref_t scope_ref /** * Destroy the reference object specified by ref. - * Parameter: + * Parameters: * ref: the reference object id * do_delete: 0 for just marking it destroyed * non-0 value for deleting it @@ -249,6 +249,77 @@ extern __aosl_api__ int aosl_ref_set_scope (aosl_ref_t ref, aosl_ref_t scope_ref **/ extern __aosl_api__ int aosl_ref_destroy (aosl_ref_t ref, int do_delete); +/** + * The proto for a ref destroy async exec callback function. + * Parameters: + * err: 0 for destroy ref object successfully, <0 for error code; + * argc: the args count passed by exec series functions; + * argv: args vector passed by exec series functions; + * Return value: + * none. + **/ +typedef void (*aosl_ref_destroy_exec_f) (int err, uintptr_t argc, uintptr_t argv []); + +/** + * Execute the specified function asynchronously in thread pool before destroying + * the reference object specified by ref, this function supports coroutine. + * Parameters: + * ref: the reference object id; + * ares: ares object if you want to wait the execution of function f, + * specify AOSL_REF_INVALID when you do not want to wait; + * f: the target function which will be executed in thread pool + * after destroyed the ref object; + * argc: the args count; + * ...: variable args; + * Return value: + * 0: success + * <0: failed, and aosl_errno indicates what error occurs + * Remarks: + * If ares is AOSL_REF_INVALID and the invoking thread is an mpq thread, + * then this function will support coroutine resume mechanism. + **/ +extern __aosl_api__ int aosl_ref_destroy_exec (aosl_ref_t ref, aosl_ref_t ares, aosl_ref_destroy_exec_f f, uintptr_t argc, ...); + +/** + * Execute the specified function asynchronously in thread pool before destroying + * the reference object specified by ref, this function supports coroutine. + * Parameters: + * ref: the reference object id; + * ares: ares object if you want to wait the execution of function f, + * specify AOSL_REF_INVALID when you do not want to wait; + * f: the target function which will be executed in thread pool + * after destroyed the ref object; + * argc: the args count; + * args: variable args; + * Return value: + * 0: success + * <0: failed, and aosl_errno indicates what error occurs + * Remarks: + * If ares is AOSL_REF_INVALID and the invoking thread is an mpq thread, + * then this function will support coroutine resume mechanism. + **/ +extern __aosl_api__ int aosl_ref_destroy_exec_args (aosl_ref_t ref, aosl_ref_t ares, aosl_ref_destroy_exec_f f, uintptr_t argc, va_list args); + +/** + * Execute the specified function asynchronously in thread pool before destroying + * the reference object specified by ref, this function supports coroutine. + * Parameters: + * ref: the reference object id; + * ares: ares object if you want to wait the execution of function f, + * specify AOSL_REF_INVALID when you do not want to wait; + * f: the target function which will be executed in thread pool + * after destroyed the ref object; + * argc: the args count; + * argv: variable args vector; + * Return value: + * 0: success + * <0: failed, and aosl_errno indicates what error occurs + * Remarks: + * If ares is AOSL_REF_INVALID and the invoking thread is an mpq thread, + * then this function will support coroutine resume mechanism. + **/ +extern __aosl_api__ int aosl_ref_destroy_exec_argv (aosl_ref_t ref, aosl_ref_t ares, aosl_ref_destroy_exec_f f, uintptr_t argc, uintptr_t argv []); + #ifdef __cplusplus diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_types.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_types.h index 6afa42c98..d2a458686 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_types.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/aosl_types.h @@ -19,6 +19,9 @@ #include #if defined (__linux__) || defined (__MACH__) || defined (__kliteos2__) || defined (_WIN32) #include +#if defined (__linux__) || defined (__MACH__) +#include +#endif #endif #include @@ -88,6 +91,29 @@ static __inline__ int aosl_fd_invalid (aosl_fd_t fd) #endif +#ifndef _WIN32 +typedef aosl_fd_t aosl_sk_t; +#define AOSL_INVALID_SK AOSL_INVALID_FD +#else +typedef SOCKET aosl_sk_t; +#define AOSL_INVALID_SK ((aosl_sk_t)INVALID_SOCKET) +#endif + + +#if !defined (_WIN32) && !defined (__kspreadtrum__) +typedef struct iovec aosl_miov_t; +#else +typedef struct { + void *iov_base; + size_t iov_len; +} aosl_miov_t; +#endif + +#ifndef UIO_MAXIOV +#define UIO_MAXIOV 1024 +#endif + + #ifdef __cplusplus } #endif diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ares_class.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ares_class.h index 634096300..0e52b13e9 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ares_class.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ares_class.h @@ -74,7 +74,7 @@ class aosl_ares_class: public aosl_ref_class { return 0; } -#if (__cplusplus >= 201103) || defined (_MSC_VER) +#if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) private: aosl_ares_class (const aosl_ares_class &) = delete; aosl_ares_class (aosl_ares_class &&) = delete; diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_poll_class.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_poll_class.h index 54d1a0560..5d111368c 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_poll_class.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_poll_class.h @@ -20,7 +20,7 @@ #include #include -#if (__cplusplus >= 201103) || defined (_MSC_VER) +#if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) #include #include #endif @@ -40,7 +40,7 @@ class aosl_poll_class { poll_refs [tail.ref ()] = &tail; } -#if (__cplusplus >= 201103) || defined (_MSC_VER) +#if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) template void add (const T &head, const Targs&... rest) { @@ -118,7 +118,7 @@ class aosl_poll_class { return NULL; } -#if (__cplusplus >= 201103) || defined (_MSC_VER) +#if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) private: aosl_poll_class (const aosl_poll_class &) = delete; aosl_poll_class (aosl_poll_class &&) = delete; diff --git a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ref_class.h b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ref_class.h index 4dab3878b..80c3dee52 100644 --- a/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ref_class.h +++ b/Android/APIExample/agora-simple-filter/src/main/cpp/AgoraRtcKit/api/cpp/aosl_ref_class.h @@ -31,7 +31,7 @@ #include #endif -#if (__cplusplus >= 201103) || defined (_MSC_VER) +#if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) #include #include typedef std::function aosl_ref_lambda_f; @@ -39,6 +39,7 @@ typedef std::function ao typedef std::function aosl_ref_mpq_lambda_0arg_f; typedef std::function aosl_async_prepare_lambda_f; typedef std::function aosl_async_resume_lambda_f; +typedef std::function aosl_ref_destroy_exec_lambda_f; #endif class aosl_ref_class { @@ -307,6 +308,25 @@ class aosl_ref_class { return -1; } + #if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) + int destroy_exec (aosl_ref_destroy_exec_lambda_f &&lambda_f, aosl_ref_t ares = AOSL_REF_INVALID) + { + aosl_ref_destroy_exec_lambda_f *task_obj = new aosl_ref_destroy_exec_lambda_f (std::move (lambda_f)); + int err = aosl_ref_destroy_exec (ref (), ares, ____ref_destroy_exec_f, 1, task_obj); + if (err < 0) + delete task_obj; + + return err; + } + private: + static void ____ref_destroy_exec_f (int err, uintptr_t argc, uintptr_t argv []) + { + aosl_ref_destroy_exec_lambda_f *task_obj = reinterpret_cast(argv [0]); + (*task_obj) (err); + delete task_obj; + } + #endif + #ifdef __AOSL_MPQ_H__ /* MPQ relative encapsulations */ public: @@ -499,7 +519,7 @@ class aosl_ref_class { #endif /* __AOSL_MPQ_H__ */ /* C++11 lambda encapsulations */ - #if (__cplusplus >= 201103) || defined (_MSC_VER) + #if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) public: int hold (aosl_ref_lambda_f &&lambda_f) { @@ -973,7 +993,7 @@ class aosl_ref_class { #endif /* __AOSL_ASYNC_H__ */ #endif /* C++11 */ - #if (__cplusplus >= 201103) || defined (_MSC_VER) + #if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) private: aosl_ref_t_oop (const aosl_ref_t_oop &) = delete; aosl_ref_t_oop (aosl_ref_t_oop &&) = delete; @@ -1326,6 +1346,13 @@ class aosl_ref_class { return err; } +#if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) + int destroy_exec (aosl_ref_destroy_exec_lambda_f &&lambda_f, aosl_ref_t ares = AOSL_REF_INVALID) + { + return refoop->destroy_exec (std::move (lambda_f), ares); + } +#endif + public: class deleter { public: @@ -1541,7 +1568,7 @@ class aosl_ref_class { #endif /* __AOSL_MPQ_H__ */ /* C++11 lambda encapsulations */ -#if (__cplusplus >= 201103) || defined (_MSC_VER) +#if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) public: int hold (aosl_ref_lambda_f &&lambda_f) { @@ -1780,8 +1807,6 @@ class aosl_ref_class { } public: - typedef std::function aosl_async_resume_lambda_f; - int resume (aosl_stack_id_t stack_id, const char *f_name, aosl_async_resume_lambda_f&& task) { return refoop->resume (stack_id, f_name, std::move (task)); @@ -1794,7 +1819,7 @@ class aosl_ref_class { #endif /* __AOSL_ASYNC_H__ */ #endif /* C++11 */ -#if (__cplusplus >= 201103) || defined (_MSC_VER) +#if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) private: aosl_ref_class (const aosl_ref_class &) = delete; aosl_ref_class (aosl_ref_class &&) = delete; @@ -1879,7 +1904,7 @@ class aosl_ref_unique_ptr { reset (); } -#if (__cplusplus >= 201103) || defined (_MSC_VER) +#if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) private: aosl_ref_unique_ptr (const aosl_ref_unique_ptr &) = delete; aosl_ref_unique_ptr &operator = (const aosl_ref_unique_ptr &) = delete; @@ -1923,7 +1948,7 @@ inline bool operator != (intptr_t _null, const aosl_ref_unique_ptr &p return (T_ref_cls *)_null != ptr.get (); } -#if (__cplusplus >= 201103) || defined (_MSC_VER) +#if (__cplusplus >= 201103) || (defined (_MSC_VER) && _MSC_VER >= 1800) template inline bool operator == (const aosl_ref_unique_ptr &ptr, std::nullptr_t) { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/agoraLibs b/Android/APIExample/agora-stream-encrypt/src/main/agoraLibs deleted file mode 120000 index a4d04c322..000000000 --- a/Android/APIExample/agora-stream-encrypt/src/main/agoraLibs +++ /dev/null @@ -1 +0,0 @@ -../../../../../sdk \ No newline at end of file diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraBase.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraBase.h index 792137209..c3bfa34cb 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraBase.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraBase.h @@ -559,7 +559,8 @@ enum ERROR_CODE_TYPE { /** * 101: The App ID is invalid, usually because the data format of the App ID is incorrect. * - * Solution: Check the data format of your App ID. Ensure that you use the correct App ID to initialize the Agora service. + * Solution: Check the data format of your App ID. Ensure that you use the correct App ID to + * initialize the Agora service. */ ERR_INVALID_APP_ID = 101, /** @@ -578,9 +579,9 @@ enum ERROR_CODE_TYPE { * - Timeout for token authorization: Once a token is generated, you must use it to access the * Agora service within 24 hours. Otherwise, the token times out and you can no longer use it. * - The token privilege expires: To generate a token, you need to set a timestamp for the token - * privilege to expire. For example, If you set it as seven days, the token expires seven days after - * its usage. In that case, you can no longer access the Agora service. The users cannot make calls, - * or are kicked out of the channel. + * privilege to expire. For example, If you set it as seven days, the token expires seven days + * after its usage. In that case, you can no longer access the Agora service. The users cannot + * make calls, or are kicked out of the channel. * * Solution: Regardless of whether token authorization times out or the token privilege expires, * you need to generate a new token on your server, and try to join the channel. @@ -588,19 +589,19 @@ enum ERROR_CODE_TYPE { ERR_TOKEN_EXPIRED = 109, /** * 110: The token is invalid, usually for one of the following reasons: - * - Did not provide a token when joining a channel in a situation where the project has enabled the - * App Certificate. + * - Did not provide a token when joining a channel in a situation where the project has enabled + * the App Certificate. * - Tried to join a channel with a token in a situation where the project has not enabled the App * Certificate. - * - The App ID, user ID and channel name that you use to generate the token on the server do not match - * those that you use when joining a channel. + * - The App ID, user ID and channel name that you use to generate the token on the server do not + * match those that you use when joining a channel. * * Solution: - * - Before joining a channel, check whether your project has enabled the App certificate. If yes, you - * must provide a token when joining a channel; if no, join a channel without a token. - * - When using a token to join a channel, ensure that the App ID, user ID, and channel name that you - * use to generate the token is the same as the App ID that you use to initialize the Agora service, and - * the user ID and channel name that you use to join the channel. + * - Before joining a channel, check whether your project has enabled the App certificate. If yes, + * you must provide a token when joining a channel; if no, join a channel without a token. + * - When using a token to join a channel, ensure that the App ID, user ID, and channel name that + * you use to generate the token is the same as the App ID that you use to initialize the Agora + * service, and the user ID and channel name that you use to join the channel. */ ERR_INVALID_TOKEN = 110, /** @@ -672,13 +673,15 @@ enum ERROR_CODE_TYPE { ERR_LICENSE_CREDENTIAL_INVALID = 131, /** - * 134: The user account is invalid, usually because the data format of the user account is incorrect. + * 134: The user account is invalid, usually because the data format of the user account is + * incorrect. */ ERR_INVALID_USER_ACCOUNT = 134, /** 157: The necessary dynamical library is not integrated. For example, if you call - * the \ref agora::rtc::IRtcEngine::enableDeepLearningDenoise "enableDeepLearningDenoise" but do not integrate the dynamical - * library for the deep-learning noise reduction into your project, the SDK reports this error code. + * the \ref agora::rtc::IRtcEngine::enableDeepLearningDenoise "enableDeepLearningDenoise" but do + * not integrate the dynamical library for the deep-learning noise reduction into your project, + * the SDK reports this error code. * */ ERR_MODULE_NOT_FOUND = 157, @@ -698,7 +701,7 @@ enum ERROR_CODE_TYPE { ERR_CERT_REQUEST = 168, // PcmSend Error num - ERR_PCMSEND_FORMAT = 200, // unsupport pcm format + ERR_PCMSEND_FORMAT = 200, // unsupport pcm format ERR_PCMSEND_BUFFEROVERFLOW = 201, // buffer overflow, the pcm send rate too quickly /// @cond @@ -752,27 +755,27 @@ enum ERROR_CODE_TYPE { enum LICENSE_ERROR_TYPE { /** * 1: Invalid license - */ + */ LICENSE_ERR_INVALID = 1, /** * 2: License expired - */ + */ LICENSE_ERR_EXPIRE = 2, /** * 3: Exceed license minutes limit - */ + */ LICENSE_ERR_MINUTES_EXCEED = 3, /** * 4: License use in limited period - */ + */ LICENSE_ERR_LIMITED_PERIOD = 4, /** * 5: Same license used in different devices at the same time - */ + */ LICENSE_ERR_DIFF_DEVICES = 5, /** * 99: SDK internal error - */ + */ LICENSE_ERR_INTERNAL = 99, }; @@ -845,9 +848,9 @@ enum USER_OFFLINE_REASON_TYPE { */ USER_OFFLINE_QUIT = 0, /** - * 1: The SDK times out and the user drops offline because no data packet was received within a certain - * period of time. If a user quits the call and the message is not passed to the SDK (due to an - * unreliable channel), the SDK assumes that the user drops offline. + * 1: The SDK times out and the user drops offline because no data packet was received within a + * certain period of time. If a user quits the call and the message is not passed to the SDK (due + * to an unreliable channel), the SDK assumes that the user drops offline. */ USER_OFFLINE_DROPPED = 1, /** @@ -870,7 +873,7 @@ enum INTERFACE_ID_TYPE { AGORA_IID_STATE_SYNC = 13, AGORA_IID_META_SERVICE = 14, AGORA_IID_MUSIC_CONTENT_CENTER = 15, - AGORA_IID_H265_TRANSCODER = 16, + AGORA_IID_H265_TRANSCODER = 16, }; /** @@ -999,7 +1002,6 @@ enum FRAME_HEIGHT { FRAME_HEIGHT_540 = 540, }; - /** * Types of the video frame. */ @@ -1032,9 +1034,9 @@ enum ORIENTATION_MODE { ORIENTATION_MODE_ADAPTIVE = 0, /** * 1: Landscape mode. In this mode, the SDK always outputs videos in landscape (horizontal) mode. - * If the captured video is in portrait mode, the video encoder crops it to fit the output. Applies - * to situations where the receiving end cannot process the rotational information. For example, - * CDN live streaming. + * If the captured video is in portrait mode, the video encoder crops it to fit the output. + * Applies to situations where the receiving end cannot process the rotational information. For + * example, CDN live streaming. */ ORIENTATION_MODE_FIXED_LANDSCAPE = 1, /** @@ -1051,9 +1053,16 @@ enum ORIENTATION_MODE { */ enum DEGRADATION_PREFERENCE { /** - * 0: (Default) Prefers to reduce the video frame rate while maintaining video quality during video - * encoding under limited bandwidth. This degradation preference is suitable for scenarios where - * video quality is prioritized. + * -1: (Default) SDK uses degradation preference according to setVideoScenario API settings, real-time network state and other relevant data information. + * If API setVideoScenario set video scenario to APPLICATION_SCENARIO_LIVESHOW, then MAINTAIN_BALANCED is used. If not, then MAINTAIN_RESOLUTION is used. + * Also if network state has changed, SDK may change this parameter between MAINTAIN_FRAMERATE、MAINTAIN_BALANCED and MAINTAIN_RESOLUTION automatically to get the best QOE. + * We recommend using this option. + */ + MAINTAIN_AUTO = -1, + /** + * 0: (Deprecated) Prefers to reduce the video frame rate while maintaining video quality during + * video encoding under limited bandwidth. This degradation preference is suitable for scenarios + * where video quality is prioritized. * @note In the COMMUNICATION channel profile, the resolution of the video sent may change, so * remote users need to handle this issue. */ @@ -1066,9 +1075,9 @@ enum DEGRADATION_PREFERENCE { MAINTAIN_FRAMERATE = 1, /** * 2: Reduces the video frame rate and video quality simultaneously during video encoding under - * limited bandwidth. MAINTAIN_BALANCED has a lower reduction than MAINTAIN_QUALITY and MAINTAIN_FRAMERATE, - * and this preference is suitable for scenarios where both smoothness and video quality are a - * priority. + * limited bandwidth. MAINTAIN_BALANCED has a lower reduction than MAINTAIN_RESOLUTION and + * MAINTAIN_FRAMERATE, and this preference is suitable for scenarios where both smoothness and + * video quality are a priority. */ MAINTAIN_BALANCED = 2, /** @@ -1155,6 +1164,11 @@ enum VIDEO_CODEC_CAPABILITY_LEVEL { * The video codec types. */ enum VIDEO_CODEC_TYPE { + /** + * 0: (Default) SDK will automatically adjust the codec type according to country and region or real-time network state and other relevant data information. + * Also if network state is changed, SDK may change codec automatically to get the best QOE. + * We recommend use this option. + */ VIDEO_CODEC_NONE = 0, /** * 1: Standard VP8. @@ -1170,11 +1184,13 @@ enum VIDEO_CODEC_TYPE { VIDEO_CODEC_H265 = 3, /** * 6: Generic. This type is used for transmitting raw video data, such as encrypted video frames. - * The SDK returns this type of video frames in callbacks, and you need to decode and render the frames yourself. + * The SDK returns this type of video frames in callbacks, and you need to decode and render the + * frames yourself. */ VIDEO_CODEC_GENERIC = 6, /** * 7: Generic H264. + * @deprecated This codec type is deprecated. */ VIDEO_CODEC_GENERIC_H264 = 7, /** @@ -1237,7 +1253,8 @@ struct SenderOptions { */ TCcMode ccMode; /** - * The codec type used for the encoded images: \ref agora::rtc::VIDEO_CODEC_TYPE "VIDEO_CODEC_TYPE". + * The codec type used for the encoded images: \ref agora::rtc::VIDEO_CODEC_TYPE + * "VIDEO_CODEC_TYPE". */ VIDEO_CODEC_TYPE codecType; @@ -1249,12 +1266,14 @@ struct SenderOptions { * - \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE": (Recommended) Standard bitrate. * - Communication profile: The encoding bitrate equals the base bitrate. * - Live-broadcast profile: The encoding bitrate is twice the base bitrate. - * - \ref agora::rtc::COMPATIBLE_BITRATE "COMPATIBLE_BITRATE": Compatible bitrate. The bitrate stays the same + * - \ref agora::rtc::COMPATIBLE_BITRATE "COMPATIBLE_BITRATE": Compatible bitrate. The bitrate + stays the same * regardless of the profile. * * The Communication profile prioritizes smoothness, while the Live Broadcast * profile prioritizes video quality (requiring a higher bitrate). Agora - * recommends setting the bitrate mode as \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE" or simply to + * recommends setting the bitrate mode as \ref agora::rtc::STANDARD_BITRATE "STANDARD_BITRATE" or + simply to * address this difference. * * The following table lists the recommended video encoder configurations, @@ -1262,7 +1281,8 @@ struct SenderOptions { * bitrate based on this table. If the bitrate you set is beyond the proper * range, the SDK automatically sets it to within the range. - | Resolution | Frame Rate (fps) | Base Bitrate (Kbps, for Communication) | Live Bitrate (Kbps, for Live Broadcast)| + | Resolution | Frame Rate (fps) | Base Bitrate (Kbps, for Communication) | Live + Bitrate (Kbps, for Live Broadcast)| |------------------------|------------------|----------------------------------------|----------------------------------------| | 160 × 120 | 15 | 65 | 130 | | 120 × 120 | 15 | 50 | 100 | @@ -1299,10 +1319,7 @@ struct SenderOptions { */ int targetBitrate; - SenderOptions() - : ccMode(CC_ENABLED), - codecType(VIDEO_CODEC_H265), - targetBitrate(6500) {} + SenderOptions() : ccMode(CC_ENABLED), codecType(VIDEO_CODEC_H265), targetBitrate(6500) {} }; /** @@ -1365,8 +1382,8 @@ enum AUDIO_ENCODING_TYPE { */ AUDIO_ENCODING_TYPE_AAC_16000_LOW = 0x010101, /** - * AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * AAC encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_16000_MEDIUM = 0x010102, /** @@ -1375,18 +1392,18 @@ enum AUDIO_ENCODING_TYPE { */ AUDIO_ENCODING_TYPE_AAC_32000_LOW = 0x010201, /** - * AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * AAC encoding format, 32000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_32000_MEDIUM = 0x010202, /** - * AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio duration of - * 10 minutes is approximately 3.5 MB after encoding. + * AAC encoding format, 32000 Hz sampling rate, high sound quality. A file with an audio duration + * of 10 minutes is approximately 3.5 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_32000_HIGH = 0x010203, /** - * AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * AAC encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_AAC_48000_MEDIUM = 0x010302, /** @@ -1400,18 +1417,18 @@ enum AUDIO_ENCODING_TYPE { */ AUDIO_ENCODING_TYPE_OPUS_16000_LOW = 0x020101, /** - * OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * OPUS encoding format, 16000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_16000_MEDIUM = 0x020102, /** - * OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio duration - * of 10 minutes is approximately 2 MB after encoding. + * OPUS encoding format, 48000 Hz sampling rate, medium sound quality. A file with an audio + * duration of 10 minutes is approximately 2 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM = 0x020302, /** - * OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration of - * 10 minutes is approximately 3.5 MB after encoding. + * OPUS encoding format, 48000 Hz sampling rate, high sound quality. A file with an audio duration + * of 10 minutes is approximately 3.5 MB after encoding. */ AUDIO_ENCODING_TYPE_OPUS_48000_HIGH = 0x020303, }; @@ -1421,13 +1438,13 @@ enum AUDIO_ENCODING_TYPE { */ enum WATERMARK_FIT_MODE { /** - * Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in #WatermarkOptions. - * The settings in `WatermarkRatio` are invalid. + * Use the `positionInLandscapeMode` and `positionInPortraitMode` values you set in + * #WatermarkOptions. The settings in `WatermarkRatio` are invalid. */ FIT_MODE_COVER_POSITION, /** - * Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and `positionInPortraitMode` - * in `WatermarkOptions` are invalid. + * Use the value you set in `WatermarkRatio`. The settings in `positionInLandscapeMode` and + * `positionInPortraitMode` in `WatermarkOptions` are invalid. */ FIT_MODE_USE_IMAGE_RATIO }; @@ -1436,9 +1453,7 @@ enum WATERMARK_FIT_MODE { * The advanced settings of encoded audio frame. */ struct EncodedAudioFrameAdvancedSettings { - EncodedAudioFrameAdvancedSettings() - : speech(true), - sendEvenIfEmpty(true) {} + EncodedAudioFrameAdvancedSettings() : speech(true), sendEvenIfEmpty(true) {} /** * Determines whether the audio source is speech. @@ -1459,19 +1474,19 @@ struct EncodedAudioFrameAdvancedSettings { */ struct EncodedAudioFrameInfo { EncodedAudioFrameInfo() - : codec(AUDIO_CODEC_AACLC), - sampleRateHz(0), - samplesPerChannel(0), - numberOfChannels(0), - captureTimeMs(0) {} + : codec(AUDIO_CODEC_AACLC), + sampleRateHz(0), + samplesPerChannel(0), + numberOfChannels(0), + captureTimeMs(0) {} EncodedAudioFrameInfo(const EncodedAudioFrameInfo& rhs) - : codec(rhs.codec), - sampleRateHz(rhs.sampleRateHz), - samplesPerChannel(rhs.samplesPerChannel), - numberOfChannels(rhs.numberOfChannels), - advancedSettings(rhs.advancedSettings), - captureTimeMs(rhs.captureTimeMs) {} + : codec(rhs.codec), + sampleRateHz(rhs.sampleRateHz), + samplesPerChannel(rhs.samplesPerChannel), + numberOfChannels(rhs.numberOfChannels), + advancedSettings(rhs.advancedSettings), + captureTimeMs(rhs.captureTimeMs) {} /** * The audio codec: #AUDIO_CODEC_TYPE. */ @@ -1504,14 +1519,15 @@ struct EncodedAudioFrameInfo { * The definition of the AudioPcmDataInfo struct. */ struct AudioPcmDataInfo { - AudioPcmDataInfo() : samplesPerChannel(0), channelNum(0), samplesOut(0), elapsedTimeMs(0), ntpTimeMs(0) {} + AudioPcmDataInfo() + : samplesPerChannel(0), channelNum(0), samplesOut(0), elapsedTimeMs(0), ntpTimeMs(0) {} AudioPcmDataInfo(const AudioPcmDataInfo& rhs) - : samplesPerChannel(rhs.samplesPerChannel), - channelNum(rhs.channelNum), - samplesOut(rhs.samplesOut), - elapsedTimeMs(rhs.elapsedTimeMs), - ntpTimeMs(rhs.ntpTimeMs) {} + : samplesPerChannel(rhs.samplesPerChannel), + channelNum(rhs.channelNum), + samplesOut(rhs.samplesOut), + elapsedTimeMs(rhs.elapsedTimeMs), + ntpTimeMs(rhs.ntpTimeMs) {} /** * The sample count of the PCM data that you expect. @@ -1545,7 +1561,7 @@ enum H264PacketizeMode { /** * Single NAL unit mode. See RFC 6184. */ - SingleNalUnit, // Mode 0 - only single NALU allowed + SingleNalUnit, // Mode 0 - only single NALU allowed }; /** @@ -1588,64 +1604,63 @@ enum VIDEO_STREAM_TYPE { }; struct VideoSubscriptionOptions { - /** - * The type of the video stream to subscribe to. - * - * The default value is `VIDEO_STREAM_HIGH`, which means the high-quality - * video stream. - */ - Optional type; - /** - * Whether to subscribe to encoded video data only: - * - `true`: Subscribe to encoded video data only. - * - `false`: (Default) Subscribe to decoded video data. - */ - Optional encodedFrameOnly; + /** + * The type of the video stream to subscribe to. + * + * The default value is `VIDEO_STREAM_HIGH`, which means the high-quality + * video stream. + */ + Optional type; + /** + * Whether to subscribe to encoded video data only: + * - `true`: Subscribe to encoded video data only. + * - `false`: (Default) Subscribe to decoded video data. + */ + Optional encodedFrameOnly; - VideoSubscriptionOptions() {} + VideoSubscriptionOptions() {} }; - /** The maximum length of the user account. */ -enum MAX_USER_ACCOUNT_LENGTH_TYPE -{ +enum MAX_USER_ACCOUNT_LENGTH_TYPE { /** The maximum length of the user account is 256 bytes. */ MAX_USER_ACCOUNT_LENGTH = 256 }; /** - * The definition of the EncodedVideoFrameInfo struct, which contains the information of the external encoded video frame. + * The definition of the EncodedVideoFrameInfo struct, which contains the information of the + * external encoded video frame. */ struct EncodedVideoFrameInfo { EncodedVideoFrameInfo() - : uid(0), - codecType(VIDEO_CODEC_H264), - width(0), - height(0), - framesPerSecond(0), - frameType(VIDEO_FRAME_TYPE_BLANK_FRAME), - rotation(VIDEO_ORIENTATION_0), - trackId(0), - captureTimeMs(0), - decodeTimeMs(0), - streamType(VIDEO_STREAM_HIGH), - presentationMs(-1) {} + : uid(0), + codecType(VIDEO_CODEC_H264), + width(0), + height(0), + framesPerSecond(0), + frameType(VIDEO_FRAME_TYPE_BLANK_FRAME), + rotation(VIDEO_ORIENTATION_0), + trackId(0), + captureTimeMs(0), + decodeTimeMs(0), + streamType(VIDEO_STREAM_HIGH), + presentationMs(-1) {} EncodedVideoFrameInfo(const EncodedVideoFrameInfo& rhs) - : uid(rhs.uid), - codecType(rhs.codecType), - width(rhs.width), - height(rhs.height), - framesPerSecond(rhs.framesPerSecond), - frameType(rhs.frameType), - rotation(rhs.rotation), - trackId(rhs.trackId), - captureTimeMs(rhs.captureTimeMs), - decodeTimeMs(rhs.decodeTimeMs), - streamType(rhs.streamType), - presentationMs(rhs.presentationMs) {} + : uid(rhs.uid), + codecType(rhs.codecType), + width(rhs.width), + height(rhs.height), + framesPerSecond(rhs.framesPerSecond), + frameType(rhs.frameType), + rotation(rhs.rotation), + trackId(rhs.trackId), + captureTimeMs(rhs.captureTimeMs), + decodeTimeMs(rhs.decodeTimeMs), + streamType(rhs.streamType), + presentationMs(rhs.presentationMs) {} EncodedVideoFrameInfo& operator=(const EncodedVideoFrameInfo& rhs) { if (this == &rhs) return *this; @@ -1669,7 +1684,8 @@ struct EncodedVideoFrameInfo { */ uid_t uid; /** - * The codec type of the local video stream. See #VIDEO_CODEC_TYPE. The default value is `VIDEO_CODEC_H265 (3)`. + * The codec type of the local video stream. See #VIDEO_CODEC_TYPE. The default value is + * `VIDEO_CODEC_H265 (3)`. */ VIDEO_CODEC_TYPE codecType; /** @@ -1717,33 +1733,40 @@ struct EncodedVideoFrameInfo { }; /** -* Video compression preference. -*/ + * Video compression preference. + */ enum COMPRESSION_PREFERENCE { /** - * (Default) Low latency is preferred, usually used in real-time communication where low latency is the number one priority. + * (Default) SDK uses compression preference according to setVideoScenario API settings, real-time network state and other relevant data information. + * If API setVideoScenario set video scenario to APPLICATION_SCENARIO_LIVESHOW, then PREFER_QUALITY is used. If not, then PREFER_LOW_LATENCY is used. + * Also if network state has changed, SDK may change this parameter between PREFER_QUALITY and PREFER_LOW_LATENCY automatically to get the best QOE. + * We recommend using this option. */ - PREFER_LOW_LATENCY, + PREFER_COMPRESSION_AUTO = -1, /** - * Prefer quality in sacrifice of a degree of latency, usually around 30ms ~ 150ms, depends target fps + * Prefer low latency, usually used in real-time communication where low latency is the number one priority. */ - PREFER_QUALITY, + PREFER_LOW_LATENCY = 0, + /** + * Prefer quality in sacrifice of a degree of latency, usually around 30ms ~ 150ms, depends target fps + */ + PREFER_QUALITY = 1, }; /** -* The video encoder type preference. -*/ + * The video encoder type preference. + */ enum ENCODING_PREFERENCE { /** - *Default . + *Default . */ PREFER_AUTO = -1, /** - * Software encoding. - */ + * Software encoding. + */ PREFER_SOFTWARE = 0, /** - * Hardware encoding + * Hardware encoding */ PREFER_HARDWARE = 1, }; @@ -1752,15 +1775,14 @@ enum ENCODING_PREFERENCE { * The definition of the AdvanceOptions struct. */ struct AdvanceOptions { - /** * The video encoder type preference.. */ ENCODING_PREFERENCE encodingPreference; /** - * Video compression preference. - */ + * Video compression preference. + */ COMPRESSION_PREFERENCE compressionPreference; /** @@ -1770,7 +1792,7 @@ struct AdvanceOptions { bool encodeAlpha; AdvanceOptions() : encodingPreference(PREFER_AUTO), - compressionPreference(PREFER_LOW_LATENCY), + compressionPreference(PREFER_COMPRESSION_AUTO), encodeAlpha(false) {} AdvanceOptions(ENCODING_PREFERENCE encoding_preference, @@ -1785,7 +1807,6 @@ struct AdvanceOptions { compressionPreference == rhs.compressionPreference && encodeAlpha == rhs.encodeAlpha; } - }; /** @@ -1818,6 +1839,30 @@ enum CAMERA_FORMAT_TYPE { }; #endif +enum VIDEO_MODULE_TYPE { + /** Video capture module */ + VIDEO_MODULE_CAPTURER = 0, + /** Video software encoder module */ + VIDEO_MODULE_SOFTWARE_ENCODER = 1, + /** Video hardware encoder module */ + VIDEO_MODULE_HARDWARE_ENCODER = 2, + /** Video software decoder module */ + VIDEO_MODULE_SOFTWARE_DECODER = 3, + /** Video hardware decoder module */ + VIDEO_MODULE_HARDWARE_DECODER = 4, + /** Video render module */ + VIDEO_MODULE_RENDERER = 5, +}; + +enum HDR_CAPABILITY { + /** The result of static check is not reliable, by defualt*/ + HDR_CAPABILITY_UNKNOWN = -1, + /** The module you query doesn't support HDR */ + HDR_CAPABILITY_UNSUPPORTED = 0, + /** The module you query supports HDR */ + HDR_CAPABILITY_SUPPORTED = 1, +}; + /** Supported codec type bit mask. */ enum CODEC_CAP_MASK { /** 0: No codec support. */ @@ -1840,7 +1885,9 @@ struct CodecCapLevels { VIDEO_CODEC_CAPABILITY_LEVEL hwDecodingLevel; VIDEO_CODEC_CAPABILITY_LEVEL swDecodingLevel; - CodecCapLevels(): hwDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED), swDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED) {} + CodecCapLevels() + : hwDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED), + swDecodingLevel(CODEC_CAPABILITY_LEVEL_UNSPECIFIED) {} }; /** The codec support information. */ @@ -1852,10 +1899,11 @@ struct CodecCapInfo { /** The codec capability level, estimated based on the device hardware.*/ CodecCapLevels codecLevels; - CodecCapInfo(): codecType(VIDEO_CODEC_NONE), codecCapMask(0) {} + CodecCapInfo() : codecType(VIDEO_CODEC_NONE), codecCapMask(0) {} }; -/** FocalLengthInfo contains the IDs of the front and rear cameras, along with the wide-angle types. */ +/** FocalLengthInfo contains the IDs of the front and rear cameras, along with the wide-angle types. + */ struct FocalLengthInfo { /** The camera direction. */ int cameraDirection; @@ -1882,21 +1930,22 @@ struct VideoEncoderConfiguration { /** * The bitrate (Kbps) of the video. * - * Refer to the **Video Bitrate Table** below and set your bitrate. If you set a bitrate beyond the - * proper range, the SDK automatically adjusts it to a value within the range. You can also choose - * from the following options: + * Refer to the **Video Bitrate Table** below and set your bitrate. If you set a bitrate beyond + * the proper range, the SDK automatically adjusts it to a value within the range. You can also + * choose from the following options: * - * - #STANDARD_BITRATE: (Recommended) Standard bitrate mode. In this mode, the bitrates differ between - * the Live Broadcast and Communication profiles: + * - #STANDARD_BITRATE: (Recommended) Standard bitrate mode. In this mode, the bitrates differ + * between the Live Broadcast and Communication profiles: * - In the Communication profile, the video bitrate is the same as the base bitrate. * - In the Live Broadcast profile, the video bitrate is twice the base bitrate. - * - #COMPATIBLE_BITRATE: Compatible bitrate mode. The compatible bitrate mode. In this mode, the bitrate - * stays the same regardless of the profile. If you choose this mode for the Live Broadcast profile, - * the video frame rate may be lower than the set value. + * - #COMPATIBLE_BITRATE: Compatible bitrate mode. The compatible bitrate mode. In this mode, the + * bitrate stays the same regardless of the profile. If you choose this mode for the Live + * Broadcast profile, the video frame rate may be lower than the set value. * - * Agora uses different video codecs for different profiles to optimize the user experience. For example, - * the communication profile prioritizes the smoothness while the live-broadcast profile prioritizes the - * video quality (a higher bitrate). Therefore, We recommend setting this parameter as #STANDARD_BITRATE. + * Agora uses different video codecs for different profiles to optimize the user experience. For + * example, the communication profile prioritizes the smoothness while the live-broadcast profile + * prioritizes the video quality (a higher bitrate). Therefore, We recommend setting this + * parameter as #STANDARD_BITRATE. * * | Resolution | Frame Rate (fps) | Base Bitrate (Kbps) | Live Bitrate (Kbps)| * |------------------------|------------------|---------------------|--------------------| @@ -1964,7 +2013,8 @@ struct VideoEncoderConfiguration { /** * The mirror mode is disabled by default - * If mirror_type is set to VIDEO_MIRROR_MODE_ENABLED, then the video frame would be mirrored before encoding. + * If mirror_type is set to VIDEO_MIRROR_MODE_ENABLED, then the video frame would be mirrored + * before encoding. */ VIDEO_MIRROR_MODE_TYPE mirrorMode; @@ -1980,9 +2030,9 @@ struct VideoEncoderConfiguration { bitrate(b), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(m), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(mirror), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration(int width, int height, int f, int b, ORIENTATION_MODE m, VIDEO_MIRROR_MODE_TYPE mirror = VIDEO_MIRROR_MODE_DISABLED) : codecType(VIDEO_CODEC_NONE), dimensions(width, height), @@ -1990,19 +2040,19 @@ struct VideoEncoderConfiguration { bitrate(b), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(m), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(mirror), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration(const VideoEncoderConfiguration& config) - : codecType(config.codecType), - dimensions(config.dimensions), - frameRate(config.frameRate), - bitrate(config.bitrate), - minBitrate(config.minBitrate), - orientationMode(config.orientationMode), - degradationPreference(config.degradationPreference), - mirrorMode(config.mirrorMode), - advanceOptions(config.advanceOptions) {} + : codecType(config.codecType), + dimensions(config.dimensions), + frameRate(config.frameRate), + bitrate(config.bitrate), + minBitrate(config.minBitrate), + orientationMode(config.orientationMode), + degradationPreference(config.degradationPreference), + mirrorMode(config.mirrorMode), + advanceOptions(config.advanceOptions) {} VideoEncoderConfiguration() : codecType(VIDEO_CODEC_NONE), dimensions(FRAME_WIDTH_960, FRAME_HEIGHT_540), @@ -2010,9 +2060,9 @@ struct VideoEncoderConfiguration { bitrate(STANDARD_BITRATE), minBitrate(DEFAULT_MIN_BITRATE), orientationMode(ORIENTATION_MODE_ADAPTIVE), - degradationPreference(MAINTAIN_QUALITY), + degradationPreference(MAINTAIN_AUTO), mirrorMode(VIDEO_MIRROR_MODE_DISABLED), - advanceOptions(PREFER_AUTO, PREFER_LOW_LATENCY, false) {} + advanceOptions(PREFER_AUTO, PREFER_COMPRESSION_AUTO, false) {} VideoEncoderConfiguration& operator=(const VideoEncoderConfiguration& rhs) { if (this == &rhs) return *this; @@ -2040,9 +2090,9 @@ struct DataStreamConfig { * * When you set the data packet to synchronize with the audio, then if the data packet delay is * within the audio delay, the SDK triggers the `onStreamMessage` callback when the synchronized - * audio packet is played out. Do not set this parameter as true if you need the receiver to receive - * the data packet immediately. Agora recommends that you set this parameter to `true` only when you - * need to implement specific functions, for example lyric synchronization. + * audio packet is played out. Do not set this parameter as true if you need the receiver to + * receive the data packet immediately. Agora recommends that you set this parameter to `true` + * only when you need to implement specific functions, for example lyric synchronization. */ bool syncWithAudio; /** @@ -2050,7 +2100,8 @@ struct DataStreamConfig { * - `true`: Guarantee that the receiver receives the data in the sent order. * - `false`: Do not guarantee that the receiver receives the data in the sent order. * - * Do not set this parameter as `true` if you need the receiver to receive the data packet immediately. + * Do not set this parameter as `true` if you need the receiver to receive the data packet + * immediately. */ bool ordered; }; @@ -2060,16 +2111,16 @@ struct DataStreamConfig { */ enum SIMULCAST_STREAM_MODE { /* - * disable simulcast stream until receive request for enable simulcast stream by other broadcaster - */ + * disable simulcast stream until receive request for enable simulcast stream by other broadcaster + */ AUTO_SIMULCAST_STREAM = -1, /* - * disable simulcast stream - */ + * disable simulcast stream + */ DISABLE_SIMULCAST_STREAM = 0, /* - * always enable simulcast stream - */ + * always enable simulcast stream + */ ENABLE_SIMULCAST_STREAM = 1, }; @@ -2082,7 +2133,8 @@ struct SimulcastStreamConfig { */ VideoDimensions dimensions; /** - * The video bitrate (Kbps), represented by an instantaneous value. The default value of the log level is 5. + * The video bitrate (Kbps), represented by an instantaneous value. The default value of the log + * level is 5. */ int kBitrate; /** @@ -2187,28 +2239,31 @@ struct Rectangle { /** * The position and size of the watermark on the screen. * - * The position and size of the watermark on the screen are determined by `xRatio`, `yRatio`, and `widthRatio`: - * - (`xRatio`, `yRatio`) refers to the coordinates of the upper left corner of the watermark, which determines - * the distance from the upper left corner of the watermark to the upper left corner of the screen. - * The `widthRatio` determines the width of the watermark. + * The position and size of the watermark on the screen are determined by `xRatio`, `yRatio`, and + * `widthRatio`: + * - (`xRatio`, `yRatio`) refers to the coordinates of the upper left corner of the watermark, which + * determines the distance from the upper left corner of the watermark to the upper left corner of + * the screen. The `widthRatio` determines the width of the watermark. */ struct WatermarkRatio { /** * The x-coordinate of the upper left corner of the watermark. The horizontal position relative to - * the origin, where the upper left corner of the screen is the origin, and the x-coordinate is the - * upper left corner of the watermark. The value range is [0.0,1.0], and the default value is 0. + * the origin, where the upper left corner of the screen is the origin, and the x-coordinate is + * the upper left corner of the watermark. The value range is [0.0,1.0], and the default value is + * 0. */ float xRatio; /** - * The y-coordinate of the upper left corner of the watermark. The vertical position relative to the - * origin, where the upper left corner of the screen is the origin, and the y-coordinate is the upper - * left corner of the screen. The value range is [0.0,1.0], and the default value is 0. + * The y-coordinate of the upper left corner of the watermark. The vertical position relative to + * the origin, where the upper left corner of the screen is the origin, and the y-coordinate is + * the upper left corner of the screen. The value range is [0.0,1.0], and the default value is 0. */ float yRatio; /** - * The width of the watermark. The SDK calculates the height of the watermark proportionally according - * to this parameter value to ensure that the enlarged or reduced watermark image is not distorted. - * The value range is [0,1], and the default value is 0, which means no watermark is displayed. + * The width of the watermark. The SDK calculates the height of the watermark proportionally + * according to this parameter value to ensure that the enlarged or reduced watermark image is not + * distorted. The value range is [0,1], and the default value is 0, which means no watermark is + * displayed. */ float widthRatio; @@ -2247,10 +2302,10 @@ struct WatermarkOptions { WATERMARK_FIT_MODE mode; WatermarkOptions() - : visibleInPreview(true), - positionInLandscapeMode(0, 0, 0, 0), - positionInPortraitMode(0, 0, 0, 0), - mode(FIT_MODE_COVER_POSITION) {} + : visibleInPreview(true), + positionInLandscapeMode(0, 0, 0, 0), + positionInPortraitMode(0, 0, 0, 0), + mode(FIT_MODE_COVER_POSITION) {} }; /** @@ -2321,7 +2376,8 @@ struct RtcStats { * The app CPU usage (%). * @note * - The value of `cpuAppUsage` is always reported as 0 in the `onLeaveChannel` callback. - * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations. + * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system + * limitations. */ double cpuAppUsage; /** @@ -2331,13 +2387,15 @@ struct RtcStats { * value = (100 - System Idle Progress in Task Manager)/100. * @note * - The value of `cpuTotalUsage` is always reported as 0 in the `onLeaveChannel` callback. - * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system limitations. + * - As of Android 8.1, you cannot get the CPU usage from this attribute due to system + * limitations. */ double cpuTotalUsage; /** * The round-trip time delay from the client to the local router. - * @note On Android, to get `gatewayRtt`, ensure that you add the `android.permission.ACCESS_WIFI_STATE` - * permission after `` in the `AndroidManifest.xml` file in your project. + * @note On Android, to get `gatewayRtt`, ensure that you add the + * `android.permission.ACCESS_WIFI_STATE` permission after `` in the + * `AndroidManifest.xml` file in your project. */ int gatewayRtt; /** @@ -2414,39 +2472,39 @@ struct RtcStats { */ int rxPacketLossRate; RtcStats() - : duration(0), - txBytes(0), - rxBytes(0), - txAudioBytes(0), - txVideoBytes(0), - rxAudioBytes(0), - rxVideoBytes(0), - txKBitRate(0), - rxKBitRate(0), - rxAudioKBitRate(0), - txAudioKBitRate(0), - rxVideoKBitRate(0), - txVideoKBitRate(0), - lastmileDelay(0), - userCount(0), - cpuAppUsage(0.0), - cpuTotalUsage(0.0), - gatewayRtt(0), - memoryAppUsageRatio(0.0), - memoryTotalUsageRatio(0.0), - memoryAppUsageInKbytes(0), - connectTimeMs(0), - firstAudioPacketDuration(0), - firstVideoPacketDuration(0), - firstVideoKeyFramePacketDuration(0), - packetsBeforeFirstKeyFramePacket(0), - firstAudioPacketDurationAfterUnmute(0), - firstVideoPacketDurationAfterUnmute(0), - firstVideoKeyFramePacketDurationAfterUnmute(0), - firstVideoKeyFrameDecodedDurationAfterUnmute(0), - firstVideoKeyFrameRenderedDurationAfterUnmute(0), - txPacketLossRate(0), - rxPacketLossRate(0) {} + : duration(0), + txBytes(0), + rxBytes(0), + txAudioBytes(0), + txVideoBytes(0), + rxAudioBytes(0), + rxVideoBytes(0), + txKBitRate(0), + rxKBitRate(0), + rxAudioKBitRate(0), + txAudioKBitRate(0), + rxVideoKBitRate(0), + txVideoKBitRate(0), + lastmileDelay(0), + userCount(0), + cpuAppUsage(0.0), + cpuTotalUsage(0.0), + gatewayRtt(0), + memoryAppUsageRatio(0.0), + memoryTotalUsageRatio(0.0), + memoryAppUsageInKbytes(0), + connectTimeMs(0), + firstAudioPacketDuration(0), + firstVideoPacketDuration(0), + firstVideoKeyFramePacketDuration(0), + packetsBeforeFirstKeyFramePacket(0), + firstAudioPacketDurationAfterUnmute(0), + firstVideoPacketDurationAfterUnmute(0), + firstVideoKeyFramePacketDurationAfterUnmute(0), + firstVideoKeyFrameDecodedDurationAfterUnmute(0), + firstVideoKeyFrameRenderedDurationAfterUnmute(0), + txPacketLossRate(0), + rxPacketLossRate(0) {} }; /** @@ -2464,7 +2522,8 @@ enum CLIENT_ROLE_TYPE { }; /** - * Quality change of the local video in terms of target frame rate and target bit rate since last count. + * Quality change of the local video in terms of target frame rate and target bit rate since last + * count. */ enum QUALITY_ADAPT_INDICATION { /** @@ -2482,11 +2541,10 @@ enum QUALITY_ADAPT_INDICATION { }; /** - * The latency level of an audience member in interactive live streaming. This enum takes effect only - * when the user role is set to `CLIENT_ROLE_AUDIENCE`. + * The latency level of an audience member in interactive live streaming. This enum takes effect + * only when the user role is set to `CLIENT_ROLE_AUDIENCE`. */ -enum AUDIENCE_LATENCY_LEVEL_TYPE -{ +enum AUDIENCE_LATENCY_LEVEL_TYPE { /** * 1: Low latency. */ @@ -2500,15 +2558,14 @@ enum AUDIENCE_LATENCY_LEVEL_TYPE /** * The detailed options of a user. */ -struct ClientRoleOptions -{ +struct ClientRoleOptions { /** - * The latency level of an audience member in interactive live streaming. See `AUDIENCE_LATENCY_LEVEL_TYPE`. + * The latency level of an audience member in interactive live streaming. See + * `AUDIENCE_LATENCY_LEVEL_TYPE`. */ AUDIENCE_LATENCY_LEVEL_TYPE audienceLatencyLevel; - ClientRoleOptions() - : audienceLatencyLevel(AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY) {} + ClientRoleOptions() : audienceLatencyLevel(AUDIENCE_LATENCY_LEVEL_ULTRA_LOW_LATENCY) {} }; /** @@ -2542,8 +2599,8 @@ enum EXPERIENCE_POOR_REASON { */ WIRELESS_SIGNAL_POOR = 4, /** - * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each other. - * As a result, audio transmission quality is undermined. + * 8: The local user enables both Wi-Fi and bluetooth, and their signals interfere with each + * other. As a result, audio transmission quality is undermined. */ WIFI_BLUETOOTH_COEXIST = 8, }; @@ -2552,18 +2609,18 @@ enum EXPERIENCE_POOR_REASON { * Audio AINS mode */ enum AUDIO_AINS_MODE { - /** - * AINS mode with soft suppression level. - */ - AINS_MODE_BALANCED = 0, - /** - * AINS mode with high suppression level. - */ - AINS_MODE_AGGRESSIVE = 1, - /** - * AINS mode with high suppression level and ultra-low-latency - */ - AINS_MODE_ULTRALOWLATENCY = 2 + /** + * AINS mode with soft suppression level. + */ + AINS_MODE_BALANCED = 0, + /** + * AINS mode with high suppression level. + */ + AINS_MODE_AGGRESSIVE = 1, + /** + * AINS mode with high suppression level and ultra-low-latency + */ + AINS_MODE_ULTRALOWLATENCY = 2 }; /** @@ -2574,9 +2631,10 @@ enum AUDIO_PROFILE_TYPE { * 0: The default audio profile. * - For the Communication profile: * - Windows: A sample rate of 16 kHz, audio encoding, mono, and a bitrate of up to 16 Kbps. - * - Android/macOS/iOS: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18 Kbps. - * of up to 16 Kbps. - * - For the Live-broadcast profile: A sample rate of 48 kHz, music encoding, mono, and a bitrate of up to 64 Kbps. + * - Android/macOS/iOS: A sample rate of 32 kHz, audio encoding, mono, and a bitrate of up to 18 + * Kbps. of up to 16 Kbps. + * - For the Live-broadcast profile: A sample rate of 48 kHz, music encoding, mono, and a bitrate + * of up to 64 Kbps. */ AUDIO_PROFILE_DEFAULT = 0, /** @@ -2590,8 +2648,8 @@ enum AUDIO_PROFILE_TYPE { /** * 3: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 80 Kbps. * - * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels` - * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. + * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set + * `audioProcessingChannels` to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. */ AUDIO_PROFILE_MUSIC_STANDARD_STEREO = 3, /** @@ -2601,8 +2659,8 @@ enum AUDIO_PROFILE_TYPE { /** * 5: A sample rate of 48 kHz, music encoding, stereo, and a bitrate of up to 128 Kbps. * - * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set `audioProcessingChannels` - * to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. + * To implement stereo audio, you also need to call `setAdvancedAudioOptions` and set + * `audioProcessingChannels` to `AUDIO_PROCESSING_STEREO` in `AdvancedAudioOptions`. */ AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO = 5, /** @@ -2634,7 +2692,8 @@ enum AUDIO_SCENARIO_TYPE { */ AUDIO_SCENARIO_CHATROOM = 5, /** - * 7: Real-time chorus scenario, where users have good network conditions and require ultra-low latency. + * 7: Real-time chorus scenario, where users have good network conditions and require ultra-low + * latency. */ AUDIO_SCENARIO_CHORUS = 7, /** @@ -2651,19 +2710,19 @@ enum AUDIO_SCENARIO_TYPE { * The format of the video frame. */ struct VideoFormat { - OPTIONAL_ENUM_SIZE_T { - /** The maximum value (px) of the width. */ - kMaxWidthInPixels = 3840, - /** The maximum value (px) of the height. */ - kMaxHeightInPixels = 2160, - /** The maximum value (fps) of the frame rate. */ - kMaxFps = 60, + OPTIONAL_ENUM_SIZE_T{ + /** The maximum value (px) of the width. */ + kMaxWidthInPixels = 3840, + /** The maximum value (px) of the height. */ + kMaxHeightInPixels = 2160, + /** The maximum value (fps) of the frame rate. */ + kMaxFps = 60, }; /** * The width (px) of the video. */ - int width; // Number of pixels. + int width; // Number of pixels. /** * The height (px) of the video. */ @@ -2687,9 +2746,7 @@ struct VideoFormat { bool operator==(const VideoFormat& fmt) const { return width == fmt.width && height == fmt.height && fps == fmt.fps; } - bool operator!=(const VideoFormat& fmt) const { - return !operator==(fmt); - } + bool operator!=(const VideoFormat& fmt) const { return !operator==(fmt); } }; /** @@ -2742,7 +2799,6 @@ enum SCREEN_SCENARIO_TYPE { SCREEN_SCENARIO_RDC = 4, }; - /** * The video application scenario type. */ @@ -2759,6 +2815,10 @@ enum VIDEO_APPLICATION_SCENARIO_TYPE { * 2: Video Call Scenario. This scenario is used to optimize the video experience in video application, like 1v1 video call. */ APPLICATION_SCENARIO_1V1 = 2, + /** + * 3: Live Show Scenario. This scenario is used to optimize the video experience in video live show. + */ + APPLICATION_SCENARIO_LIVESHOW = 3, }; /** @@ -2789,7 +2849,8 @@ enum VIDEO_QOE_PREFERENCE_TYPE { */ enum CAPTURE_BRIGHTNESS_LEVEL_TYPE { /** -1: The SDK does not detect the brightness level of the video image. - * Wait a few seconds to get the brightness level from `CAPTURE_BRIGHTNESS_LEVEL_TYPE` in the next callback. + * Wait a few seconds to get the brightness level from `CAPTURE_BRIGHTNESS_LEVEL_TYPE` in the next + * callback. */ CAPTURE_BRIGHTNESS_LEVEL_INVALID = -1, /** 0: The brightness level of the video image is normal. @@ -2804,20 +2865,20 @@ enum CAPTURE_BRIGHTNESS_LEVEL_TYPE { }; enum CAMERA_STABILIZATION_MODE { - /** The camera stabilization mode is disabled. - */ + /** The camera stabilization mode is disabled. + */ CAMERA_STABILIZATION_MODE_OFF = -1, - /** device choose stabilization mode automatically. - */ + /** device choose stabilization mode automatically. + */ CAMERA_STABILIZATION_MODE_AUTO = 0, - /** stabilization mode level 1. - */ + /** stabilization mode level 1. + */ CAMERA_STABILIZATION_MODE_LEVEL_1 = 1, - /** stabilization mode level 2. - */ + /** stabilization mode level 2. + */ CAMERA_STABILIZATION_MODE_LEVEL_2 = 2, - /** stabilization mode level 3. - */ + /** stabilization mode level 3. + */ CAMERA_STABILIZATION_MODE_LEVEL_3 = 3, /** The maximum level of the camera stabilization mode. */ @@ -2855,7 +2916,8 @@ enum LOCAL_AUDIO_STREAM_REASON { */ LOCAL_AUDIO_STREAM_REASON_OK = 0, /** - * 1: No specified reason for the local audio failure. Remind your users to try to rejoin the channel. + * 1: No specified reason for the local audio failure. Remind your users to try to rejoin the + * channel. */ LOCAL_AUDIO_STREAM_REASON_FAILURE = 1, /** @@ -2968,7 +3030,7 @@ enum LOCAL_VIDEO_STREAM_REASON { */ LOCAL_VIDEO_STREAM_REASON_DEVICE_NOT_FOUND = 8, /** - * 9: (macOS only) The video capture device currently in use is disconnected (such as being + * 9: (macOS and Windows only) The video capture device currently in use is disconnected (such as being * unplugged). */ LOCAL_VIDEO_STREAM_REASON_DEVICE_DISCONNECTED = 9, @@ -2983,8 +3045,8 @@ enum LOCAL_VIDEO_STREAM_REASON { */ LOCAL_VIDEO_STREAM_REASON_DEVICE_INTERRUPT = 14, /** - * 15: (Android only) The device may need to be shut down and restarted to restore camera function, - * or there may be a persistent hardware problem. + * 15: (Android only) The device may need to be shut down and restarted to restore camera + * function, or there may be a persistent hardware problem. */ LOCAL_VIDEO_STREAM_REASON_DEVICE_FATAL_ERROR = 15, /** @@ -3021,20 +3083,21 @@ enum LOCAL_VIDEO_STREAM_REASON { /** 22: No permision to capture screen. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_NO_PERMISSION = 22, /** - * 24: (Windows Only) An unexpected error (possibly due to window block failure) occurs during the screen - * sharing process, resulting in performance degradation. However, the screen sharing process itself is - * functioning normally. + * 24: (Windows Only) An unexpected error (possibly due to window block failure) occurs during the + * screen sharing process, resulting in performance degradation. However, the screen sharing + * process itself is functioning normally. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_AUTO_FALLBACK = 24, - /** 25: (Windows only) The local screen capture window is currently hidden and not visible on the desktop. */ + /** 25: (Windows only) The local screen capture window is currently hidden and not visible on the + desktop. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_HIDDEN = 25, /** 26: (Windows only) The local screen capture window is recovered from its hidden state. */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_HIDDEN = 26, /** 27: (Windows and macOS only) The window is recovered from miniminzed */ LOCAL_VIDEO_STREAM_REASON_SCREEN_CAPTURE_WINDOW_RECOVER_FROM_MINIMIZED = 27, - /** + /** * 28: The screen capture paused. - * + * * Common scenarios for reporting this error code: * - When the desktop switch to the secure desktop such as UAC dialog or the Winlogon desktop on * Windows platform, the SDK reports this error code. @@ -3050,41 +3113,41 @@ enum LOCAL_VIDEO_STREAM_REASON { /** * Remote audio states. */ -enum REMOTE_AUDIO_STATE -{ +enum REMOTE_AUDIO_STATE { /** * 0: The remote audio is in the default state. The SDK reports this state in the case of * `REMOTE_AUDIO_REASON_LOCAL_MUTED(3)`, `REMOTE_AUDIO_REASON_REMOTE_MUTED(5)`, or * `REMOTE_AUDIO_REASON_REMOTE_OFFLINE(7)`. */ - REMOTE_AUDIO_STATE_STOPPED = 0, // Default state, audio is started or remote user disabled/muted audio stream + REMOTE_AUDIO_STATE_STOPPED = + 0, // Default state, audio is started or remote user disabled/muted audio stream /** * 1: The first remote audio packet is received. */ REMOTE_AUDIO_STATE_STARTING = 1, // The first audio frame packet has been received /** - * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the case of - * `REMOTE_AUDIO_REASON_NETWORK_RECOVERY(2)`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED(4)`, or + * 2: The remote audio stream is decoded and plays normally. The SDK reports this state in the + * case of `REMOTE_AUDIO_REASON_NETWORK_RECOVERY(2)`, `REMOTE_AUDIO_REASON_LOCAL_UNMUTED(4)`, or * `REMOTE_AUDIO_REASON_REMOTE_UNMUTED(6)`. */ - REMOTE_AUDIO_STATE_DECODING = 2, // The first remote audio frame has been decoded or fronzen state ends + REMOTE_AUDIO_STATE_DECODING = + 2, // The first remote audio frame has been decoded or fronzen state ends /** * 3: The remote audio is frozen. The SDK reports this state in the case of * `REMOTE_AUDIO_REASON_NETWORK_CONGESTION(1)`. */ - REMOTE_AUDIO_STATE_FROZEN = 3, // Remote audio is frozen, probably due to network issue + REMOTE_AUDIO_STATE_FROZEN = 3, // Remote audio is frozen, probably due to network issue /** * 4: The remote audio fails to start. The SDK reports this state in the case of * `REMOTE_AUDIO_REASON_INTERNAL(0)`. */ - REMOTE_AUDIO_STATE_FAILED = 4, // Remote audio play failed + REMOTE_AUDIO_STATE_FAILED = 4, // Remote audio play failed }; /** * Reasons for the remote audio state change. */ -enum REMOTE_AUDIO_STATE_REASON -{ +enum REMOTE_AUDIO_STATE_REASON { /** * 0: The SDK reports this reason when the video state changes. */ @@ -3138,7 +3201,8 @@ enum REMOTE_VIDEO_STATE { /** * 0: The remote video is in the default state. The SDK reports this state in the case of * `REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED (3)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED (5)`, - * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE (7)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK (8)`. + * `REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE (7)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK + * (8)`. */ REMOTE_VIDEO_STATE_STOPPED = 0, /** @@ -3146,9 +3210,10 @@ enum REMOTE_VIDEO_STATE { */ REMOTE_VIDEO_STATE_STARTING = 1, /** - * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the case of - * `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY (2)`, `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED (4)`, - * `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED (6)`, or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY (9)`. + * 2: The remote video stream is decoded and plays normally. The SDK reports this state in the + * case of `REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY (2)`, + * `REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED (4)`, `REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED (6)`, + * or `REMOTE_VIDEO_STATE_REASON_AUDIO_FALLBACK_RECOVERY (9)`. */ REMOTE_VIDEO_STATE_DECODING = 2, /** 3: The remote video is frozen, probably due to @@ -3165,36 +3230,36 @@ enum REMOTE_VIDEO_STATE { */ enum REMOTE_VIDEO_STATE_REASON { /** - * 0: The SDK reports this reason when the video state changes. - */ + * 0: The SDK reports this reason when the video state changes. + */ REMOTE_VIDEO_STATE_REASON_INTERNAL = 0, /** - * 1: Network congestion. - */ + * 1: Network congestion. + */ REMOTE_VIDEO_STATE_REASON_NETWORK_CONGESTION = 1, /** - * 2: Network recovery. - */ + * 2: Network recovery. + */ REMOTE_VIDEO_STATE_REASON_NETWORK_RECOVERY = 2, /** - * 3: The local user stops receiving the remote video stream or disables the video module. - */ + * 3: The local user stops receiving the remote video stream or disables the video module. + */ REMOTE_VIDEO_STATE_REASON_LOCAL_MUTED = 3, /** - * 4: The local user resumes receiving the remote video stream or enables the video module. - */ + * 4: The local user resumes receiving the remote video stream or enables the video module. + */ REMOTE_VIDEO_STATE_REASON_LOCAL_UNMUTED = 4, /** - * 5: The remote user stops sending the video stream or disables the video module. - */ + * 5: The remote user stops sending the video stream or disables the video module. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_MUTED = 5, /** - * 6: The remote user resumes sending the video stream or enables the video module. - */ + * 6: The remote user resumes sending the video stream or enables the video module. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_UNMUTED = 6, /** - * 7: The remote user leaves the channel. - */ + * 7: The remote user leaves the channel. + */ REMOTE_VIDEO_STATE_REASON_REMOTE_OFFLINE = 7, /** 8: The remote audio-and-video stream falls back to the audio-only stream * due to poor network conditions. @@ -3210,7 +3275,7 @@ enum REMOTE_VIDEO_STATE_REASON { /** (Internal use only) 11: The remote video stream type change to high stream type */ REMOTE_VIDEO_STATE_REASON_VIDEO_STREAM_TYPE_CHANGE_TO_HIGH = 11, - /** (iOS only) 12: The app of the remote user is in background. + /** (iOS only) 12: The app of the remote user is in background. */ REMOTE_VIDEO_STATE_REASON_SDK_IN_BACKGROUND = 12, @@ -3248,10 +3313,14 @@ enum REMOTE_USER_STATE { */ struct VideoTrackInfo { VideoTrackInfo() - : isLocal(false), ownerUid(0), trackId(0), channelId(OPTIONAL_NULLPTR) - , codecType(VIDEO_CODEC_H265) - , encodedFrameOnly(false), sourceType(VIDEO_SOURCE_CAMERA_PRIMARY) - , observationPosition(agora::media::base::POSITION_POST_CAPTURER) {} + : isLocal(false), + ownerUid(0), + trackId(0), + channelId(OPTIONAL_NULLPTR), + codecType(VIDEO_CODEC_H265), + encodedFrameOnly(false), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + observationPosition(agora::media::base::POSITION_POST_CAPTURER) {} /** * Whether the video track is local or remote. * - true: The video track is local. @@ -3291,7 +3360,8 @@ struct VideoTrackInfo { }; /** - * The downscale level of the remote video stream . The higher the downscale level, the more the video downscales. + * The downscale level of the remote video stream . The higher the downscale level, the more the + * video downscales. */ enum REMOTE_VIDEO_DOWNSCALE_LEVEL { /** @@ -3340,7 +3410,8 @@ struct AudioVolumeInfo { * @note * - The `vad` parameter does not report the voice activity status of remote users. In a remote * user's callback, the value of `vad` is always 1. - * - To use this parameter, you must set `reportVad` to true when calling `enableAudioVolumeIndication`. + * - To use this parameter, you must set `reportVad` to true when calling + * `enableAudioVolumeIndication`. */ unsigned int vad; /** @@ -3464,7 +3535,8 @@ enum VIDEO_CODEC_PROFILE_TYPE { */ VIDEO_CODEC_PROFILE_BASELINE = 66, /** - * 77: Main video codec profile. Generally used in mainstream electronics, such as MP4 players, portable video players, PSP, and iPads. + * 77: Main video codec profile. Generally used in mainstream electronics, such as MP4 players, + * portable video players, PSP, and iPads. */ VIDEO_CODEC_PROFILE_MAIN = 77, /** @@ -3473,7 +3545,6 @@ enum VIDEO_CODEC_PROFILE_TYPE { VIDEO_CODEC_PROFILE_HIGH = 100, }; - /** * Self-defined audio codec profile. */ @@ -3495,8 +3566,7 @@ enum AUDIO_CODEC_PROFILE_TYPE { /** * Local audio statistics. */ -struct LocalAudioStats -{ +struct LocalAudioStats { /** * The number of audio channels. */ @@ -3514,7 +3584,8 @@ struct LocalAudioStats */ int internalCodec; /** - * The packet loss rate (%) from the local client to the Agora server before applying the anti-packet loss strategies. + * The packet loss rate (%) from the local client to the Agora server before applying the + * anti-packet loss strategies. */ unsigned short txPacketLossRate; /** @@ -3535,35 +3606,45 @@ struct LocalAudioStats int aecEstimatedDelay; }; - /** * States of the Media Push. */ enum RTMP_STREAM_PUBLISH_STATE { /** - * 0: The Media Push has not started or has ended. This state is also triggered after you remove a RTMP or RTMPS stream from the CDN by calling `removePublishStreamUrl`. + * 0: The Media Push has not started or has ended. This state is also triggered after you remove a + * RTMP or RTMPS stream from the CDN by calling `removePublishStreamUrl`. */ RTMP_STREAM_PUBLISH_STATE_IDLE = 0, /** - * 1: The SDK is connecting to Agora's streaming server and the CDN server. This state is triggered after you call the `addPublishStreamUrl` method. + * 1: The SDK is connecting to Agora's streaming server and the CDN server. This state is + * triggered after you call the `addPublishStreamUrl` method. */ RTMP_STREAM_PUBLISH_STATE_CONNECTING = 1, /** - * 2: The RTMP or RTMPS streaming publishes. The SDK successfully publishes the RTMP or RTMPS streaming and returns this state. + * 2: The RTMP or RTMPS streaming publishes. The SDK successfully publishes the RTMP or RTMPS + * streaming and returns this state. */ RTMP_STREAM_PUBLISH_STATE_RUNNING = 2, /** - * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the streaming is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this state. - * - If the SDK successfully resumes the streaming, #RTMP_STREAM_PUBLISH_STATE_RUNNING (2) returns. - * - If the streaming does not resume within 60 seconds or server errors occur, #RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. You can also reconnect to the server by calling the `removePublishStreamUrl` and `addPublishStreamUrl` methods. + * 3: The RTMP or RTMPS streaming is recovering. When exceptions occur to the CDN, or the + * streaming is interrupted, the SDK tries to resume RTMP or RTMPS streaming and returns this + * state. + * - If the SDK successfully resumes the streaming, #RTMP_STREAM_PUBLISH_STATE_RUNNING (2) + * returns. + * - If the streaming does not resume within 60 seconds or server errors occur, + * #RTMP_STREAM_PUBLISH_STATE_FAILURE (4) returns. You can also reconnect to the server by calling + * the `removePublishStreamUrl` and `addPublishStreamUrl` methods. */ RTMP_STREAM_PUBLISH_STATE_RECOVERING = 3, /** - * 4: The RTMP or RTMPS streaming fails. See the `errCode` parameter for the detailed error information. You can also call the `addPublishStreamUrl` method to publish the RTMP or RTMPS streaming again. + * 4: The RTMP or RTMPS streaming fails. See the `errCode` parameter for the detailed error + * information. You can also call the `addPublishStreamUrl` method to publish the RTMP or RTMPS + * streaming again. */ RTMP_STREAM_PUBLISH_STATE_FAILURE = 4, /** - * 5: The SDK is disconnecting to Agora's streaming server and the CDN server. This state is triggered after you call the `removePublishStreamUrl` method. + * 5: The SDK is disconnecting to Agora's streaming server and the CDN server. This state is + * triggered after you call the `removePublishStreamUrl` method. */ RTMP_STREAM_PUBLISH_STATE_DISCONNECTING = 5, }; @@ -3577,8 +3658,10 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_OK = 0, /** - * 1: Invalid argument used. If, for example, you do not call the `setLiveTranscoding` method to configure the LiveTranscoding parameters before calling the addPublishStreamUrl method, - * the SDK returns this error. Check whether you set the parameters in the `setLiveTranscoding` method properly. + * 1: Invalid argument used. If, for example, you do not call the `setLiveTranscoding` method to + * configure the LiveTranscoding parameters before calling the addPublishStreamUrl method, the SDK + * returns this error. Check whether you set the parameters in the `setLiveTranscoding` method + * properly. */ RTMP_STREAM_PUBLISH_REASON_INVALID_ARGUMENT = 1, /** @@ -3586,11 +3669,13 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_ENCRYPTED_STREAM_NOT_ALLOWED = 2, /** - * 3: Timeout for the RTMP or RTMPS streaming. Call the `addPublishStreamUrl` method to publish the streaming again. + * 3: Timeout for the RTMP or RTMPS streaming. Call the `addPublishStreamUrl` method to publish + * the streaming again. */ RTMP_STREAM_PUBLISH_REASON_CONNECTION_TIMEOUT = 3, /** - * 4: An error occurs in Agora's streaming server. Call the `addPublishStreamUrl` method to publish the streaming again. + * 4: An error occurs in Agora's streaming server. Call the `addPublishStreamUrl` method to + * publish the streaming again. */ RTMP_STREAM_PUBLISH_REASON_INTERNAL_SERVER_ERROR = 4, /** @@ -3614,17 +3699,23 @@ enum RTMP_STREAM_PUBLISH_REASON { */ RTMP_STREAM_PUBLISH_REASON_STREAM_NOT_FOUND = 9, /** - * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL format is correct. + * 10: The format of the RTMP or RTMPS streaming URL is not supported. Check whether the URL + * format is correct. */ RTMP_STREAM_PUBLISH_REASON_FORMAT_NOT_SUPPORTED = 10, /** - * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check your application code logic. + * 11: The user role is not host, so the user cannot use the CDN live streaming function. Check + * your application code logic. */ - RTMP_STREAM_PUBLISH_REASON_NOT_BROADCASTER = 11, // Note: match to ERR_PUBLISH_STREAM_NOT_BROADCASTER in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_NOT_BROADCASTER = + 11, // Note: match to ERR_PUBLISH_STREAM_NOT_BROADCASTER in AgoraBase.h /** - * 13: The `updateRtmpTranscoding` or `setLiveTranscoding` method is called to update the transcoding configuration in a scenario where there is streaming without transcoding. Check your application code logic. + * 13: The `updateRtmpTranscoding` or `setLiveTranscoding` method is called to update the + * transcoding configuration in a scenario where there is streaming without transcoding. Check + * your application code logic. */ - RTMP_STREAM_PUBLISH_REASON_TRANSCODING_NO_MIX_STREAM = 13, // Note: match to ERR_PUBLISH_STREAM_TRANSCODING_NO_MIX_STREAM in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_TRANSCODING_NO_MIX_STREAM = + 13, // Note: match to ERR_PUBLISH_STREAM_TRANSCODING_NO_MIX_STREAM in AgoraBase.h /** * 14: Errors occurred in the host's network. */ @@ -3632,11 +3723,13 @@ enum RTMP_STREAM_PUBLISH_REASON { /** * 15: Your App ID does not have permission to use the CDN live streaming function. */ - RTMP_STREAM_PUBLISH_REASON_INVALID_APPID = 15, // Note: match to ERR_PUBLISH_STREAM_APPID_INVALID in AgoraBase.h + RTMP_STREAM_PUBLISH_REASON_INVALID_APPID = + 15, // Note: match to ERR_PUBLISH_STREAM_APPID_INVALID in AgoraBase.h /** invalid privilege. */ RTMP_STREAM_PUBLISH_REASON_INVALID_PRIVILEGE = 16, /** - * 100: The streaming has been stopped normally. After you call `removePublishStreamUrl` to stop streaming, the SDK returns this value. + * 100: The streaming has been stopped normally. After you call `removePublishStreamUrl` to stop + * streaming, the SDK returns this value. */ RTMP_STREAM_UNPUBLISH_REASON_OK = 100, }; @@ -3644,11 +3737,13 @@ enum RTMP_STREAM_PUBLISH_REASON { /** Events during the RTMP or RTMPS streaming. */ enum RTMP_STREAMING_EVENT { /** - * 1: An error occurs when you add a background image or a watermark image to the RTMP or RTMPS stream. + * 1: An error occurs when you add a background image or a watermark image to the RTMP or RTMPS + * stream. */ RTMP_STREAMING_EVENT_FAILED_LOAD_IMAGE = 1, /** - * 2: The streaming URL is already being used for CDN live streaming. If you want to start new streaming, use a new streaming URL. + * 2: The streaming URL is already being used for CDN live streaming. If you want to start new + * streaming, use a new streaming URL. */ RTMP_STREAMING_EVENT_URL_ALREADY_IN_USE = 2, /** @@ -3666,15 +3761,18 @@ enum RTMP_STREAMING_EVENT { */ typedef struct RtcImage { /** - *The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter is 1024 bytes. + *The HTTP/HTTPS URL address of the image in the live video. The maximum length of this parameter + *is 1024 bytes. */ const char* url; /** - * The x coordinate (pixel) of the image on the video frame (taking the upper left corner of the video frame as the origin). + * The x coordinate (pixel) of the image on the video frame (taking the upper left corner of the + * video frame as the origin). */ int x; /** - * The y coordinate (pixel) of the image on the video frame (taking the upper left corner of the video frame as the origin). + * The y coordinate (pixel) of the image on the video frame (taking the upper left corner of the + * video frame as the origin). */ int y; /** @@ -3705,18 +3803,21 @@ typedef struct RtcImage { /** * The configuration for advanced features of the RTMP or RTMPS streaming with transcoding. * - * If you want to enable the advanced features of streaming with transcoding, contact support@agora.io. + * If you want to enable the advanced features of streaming with transcoding, contact + * support@agora.io. */ struct LiveStreamAdvancedFeature { LiveStreamAdvancedFeature() : featureName(OPTIONAL_NULLPTR), opened(false) {} - LiveStreamAdvancedFeature(const char* feat_name, bool open) : featureName(feat_name), opened(open) {} + LiveStreamAdvancedFeature(const char* feat_name, bool open) + : featureName(feat_name), opened(open) {} /** The advanced feature for high-quality video with a lower bitrate. */ // static const char* LBHQ = "lbhq"; /** The advanced feature for the optimized video encoder. */ // static const char* VEO = "veo"; /** - * The feature names, including LBHQ (high-quality video with a lower bitrate) and VEO (optimized video encoder). + * The feature names, including LBHQ (high-quality video with a lower bitrate) and VEO (optimized + * video encoder). */ const char* featureName; @@ -3726,15 +3827,15 @@ struct LiveStreamAdvancedFeature { * - `false`: (Default) Disable the advanced feature. */ bool opened; -} ; +}; /** * Connection state types. */ -enum CONNECTION_STATE_TYPE -{ +enum CONNECTION_STATE_TYPE { /** - * 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of the following phases: + * 1: The SDK is disconnected from the Agora edge server. The state indicates the SDK is in one of + * the following phases: * - The initial state before calling the `joinChannel` method. * - The app calls the `leaveChannel` method. */ @@ -3786,11 +3887,15 @@ struct TranscodingUser { */ uid_t uid; /** - * The x coordinate (pixel) of the host's video on the output video frame (taking the upper left corner of the video frame as the origin). The value range is [0, width], where width is the `width` set in `LiveTranscoding`. + * The x coordinate (pixel) of the host's video on the output video frame (taking the upper left + * corner of the video frame as the origin). The value range is [0, width], where width is the + * `width` set in `LiveTranscoding`. */ int x; /** - * The y coordinate (pixel) of the host's video on the output video frame (taking the upper left corner of the video frame as the origin). The value range is [0, height], where height is the `height` set in `LiveTranscoding`. + * The y coordinate (pixel) of the host's video on the output video frame (taking the upper left + * corner of the video frame as the origin). The value range is [0, height], where height is the + * `height` set in `LiveTranscoding`. */ int y; /** @@ -3807,7 +3912,7 @@ struct TranscodingUser { * - 100: The host's video is the top layer. * * If the value is beyond this range, the SDK reports the error code `ERR_INVALID_ARGUMENT`. - */ + */ int zOrder; /** * The transparency of the host's video. The value range is [0.0, 1.0]. @@ -3816,28 +3921,29 @@ struct TranscodingUser { */ double alpha; /** - * The audio channel used by the host's audio in the output audio. The default value is 0, and the value range is [0, 5]. - * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on the upstream of the host. - * - `1`: The host's audio uses the FL audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `2`: The host's audio uses the FC audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `3`: The host's audio uses the FR audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `4`: The host's audio uses the BL audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `5`: The host's audio uses the BR audio channel. If the host's upstream uses multiple audio channels, the Agora server mixes them into mono first. - * - `0xFF` or a value greater than 5: The host's audio is muted, and the Agora server removes the host's audio. + * The audio channel used by the host's audio in the output audio. The default value is 0, and the + * value range is [0, 5]. + * - `0`: (Recommended) The defaut setting, which supports dual channels at most and depends on + * the upstream of the host. + * - `1`: The host's audio uses the FL audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `2`: The host's audio uses the FC audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `3`: The host's audio uses the FR audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `4`: The host's audio uses the BL audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `5`: The host's audio uses the BR audio channel. If the host's upstream uses multiple audio + * channels, the Agora server mixes them into mono first. + * - `0xFF` or a value greater than 5: The host's audio is muted, and the Agora server removes the + * host's audio. * * @note If the value is not `0`, a special player is required. */ int audioChannel; TranscodingUser() - : uid(0), - x(0), - y(0), - width(0), - height(0), - zOrder(0), - alpha(1.0), - audioChannel(0) {} + : uid(0), x(0), y(0), width(0), height(0), zOrder(0), alpha(1.0), audioChannel(0) {} }; /** @@ -3860,10 +3966,12 @@ struct LiveTranscoding { int height; /** Bitrate of the CDN live output video stream. The default value is 400 Kbps. - Set this parameter according to the Video Bitrate Table. If you set a bitrate beyond the proper range, the SDK automatically adapts it to a value within the range. + Set this parameter according to the Video Bitrate Table. If you set a bitrate beyond the proper + range, the SDK automatically adapts it to a value within the range. */ int videoBitrate; - /** Frame rate of the output video stream set for the CDN live streaming. The default value is 15 fps, and the value range is (0,30]. + /** Frame rate of the output video stream set for the CDN live streaming. The default value is 15 + fps, and the value range is (0,30]. @note The Agora server adjusts any value over 30 to 30. */ @@ -3884,7 +3992,8 @@ struct LiveTranscoding { @note If you set this parameter to other values, Agora adjusts it to the default value of 100. */ VIDEO_CODEC_PROFILE_TYPE videoCodecProfile; - /** The background color in RGB hex value. Value only. Do not include a preceeding #. For example, 0xFFB6C1 (light pink). The default value is 0x000000 (black). + /** The background color in RGB hex value. Value only. Do not include a preceeding #. For example, + * 0xFFB6C1 (light pink). The default value is 0x000000 (black). */ unsigned int backgroundColor; /** Video codec profile types for Media Push. See VIDEO_CODEC_TYPE_FOR_STREAM. */ @@ -3893,10 +4002,12 @@ struct LiveTranscoding { * The value range is [0, 17]. */ unsigned int userCount; - /** Manages the user layout configuration in the Media Push. Agora supports a maximum of 17 transcoding users in a Media Push channel. See `TranscodingUser`. + /** Manages the user layout configuration in the Media Push. Agora supports a maximum of 17 + * transcoding users in a Media Push channel. See `TranscodingUser`. */ TranscodingUser* transcodingUsers; - /** Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream to the CDN live client. Maximum length: 4096 Bytes. + /** Reserved property. Extra user-defined information to send SEI for the H.264/H.265 video stream + to the CDN live client. Maximum length: 4096 Bytes. For more information on SEI frame, see [SEI-related questions](https://docs.agora.io/en/faq/sei). */ @@ -3907,31 +4018,38 @@ struct LiveTranscoding { const char* metadata; /** The watermark on the live video. The image format needs to be PNG. See `RtcImage`. - You can add one watermark, or add multiple watermarks using an array. This parameter is used with `watermarkCount`. + You can add one watermark, or add multiple watermarks using an array. This parameter is used with + `watermarkCount`. */ RtcImage* watermark; /** - * The number of watermarks on the live video. The total number of watermarks and background images can range from 0 to 10. This parameter is used with `watermark`. + * The number of watermarks on the live video. The total number of watermarks and background + * images can range from 0 to 10. This parameter is used with `watermark`. */ unsigned int watermarkCount; - /** The number of background images on the live video. The image format needs to be PNG. See `RtcImage`. + /** The number of background images on the live video. The image format needs to be PNG. See + * `RtcImage`. * - * You can add a background image or use an array to add multiple background images. This parameter is used with `backgroundImageCount`. + * You can add a background image or use an array to add multiple background images. This + * parameter is used with `backgroundImageCount`. */ RtcImage* backgroundImage; /** - * The number of background images on the live video. The total number of watermarks and background images can range from 0 to 10. This parameter is used with `backgroundImage`. + * The number of background images on the live video. The total number of watermarks and + * background images can range from 0 to 10. This parameter is used with `backgroundImage`. */ unsigned int backgroundImageCount; /** The audio sampling rate (Hz) of the output media stream. See #AUDIO_SAMPLE_RATE_TYPE. */ AUDIO_SAMPLE_RATE_TYPE audioSampleRate; - /** Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the highest value is 128. + /** Bitrate (Kbps) of the audio output stream for Media Push. The default value is 48, and the + * highest value is 128. */ int audioBitrate; - /** The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo) audio channels. Special players are required if you choose 3, 4, or 5. + /** The number of audio channels for Media Push. Agora recommends choosing 1 (mono), or 2 (stereo) + * audio channels. Special players are required if you choose 3, 4, or 5. * - 1: (Default) Mono. * - 2: Stereo. * - 3: Three audio channels. @@ -3942,7 +4060,8 @@ struct LiveTranscoding { /** Audio codec profile type for Media Push. See #AUDIO_CODEC_PROFILE_TYPE. */ AUDIO_CODEC_PROFILE_TYPE audioCodecProfile; - /** Advanced features of the RTMP or RTMPS streaming with transcoding. See LiveStreamAdvancedFeature. + /** Advanced features of the RTMP or RTMPS streaming with transcoding. See + * LiveStreamAdvancedFeature. */ LiveStreamAdvancedFeature* advancedFeatures; @@ -3959,7 +4078,7 @@ struct LiveTranscoding { videoCodecProfile(VIDEO_CODEC_PROFILE_HIGH), backgroundColor(0x000000), videoCodecType(VIDEO_CODEC_H264_FOR_STREAM), - userCount(0), + userCount(0), transcodingUsers(OPTIONAL_NULLPTR), transcodingExtraInfo(OPTIONAL_NULLPTR), metadata(OPTIONAL_NULLPTR), @@ -3985,12 +4104,14 @@ struct TranscodingVideoStream { VIDEO_SOURCE_TYPE sourceType; /** * The ID of the remote user. - * @note Use this parameter only when the source type of the video for the video mixing on the local client is `VIDEO_SOURCE_REMOTE`. + * @note Use this parameter only when the source type of the video for the video mixing on the + * local client is `VIDEO_SOURCE_REMOTE`. */ uid_t remoteUserUid; /** * The URL of the image. - * @note Use this parameter only when the source type of the video for the video mixing on the local client is `RTC_IMAGE`. + * @note Use this parameter only when the source type of the video for the video mixing on the + * local client is `RTC_IMAGE`. */ const char* imageUrl; /** @@ -3998,11 +4119,13 @@ struct TranscodingVideoStream { */ int mediaPlayerId; /** - * The horizontal displacement of the top-left corner of the video for the video mixing on the client relative to the top-left corner (origin) of the canvas for this video mixing. + * The horizontal displacement of the top-left corner of the video for the video mixing on the + * client relative to the top-left corner (origin) of the canvas for this video mixing. */ int x; /** - * The vertical displacement of the top-left corner of the video for the video mixing on the client relative to the top-left corner (origin) of the canvas for this video mixing. + * The vertical displacement of the top-left corner of the video for the video mixing on the + * client relative to the top-left corner (origin) of the canvas for this video mixing. */ int y; /** @@ -4014,13 +4137,16 @@ struct TranscodingVideoStream { */ int height; /** - * The number of the layer to which the video for the video mixing on the local client belongs. The value range is [0,100]. + * The number of the layer to which the video for the video mixing on the local client belongs. + * The value range is [0,100]. * - 0: (Default) The layer is at the bottom. * - 100: The layer is at the top. */ int zOrder; /** - * The transparency of the video for the video mixing on the local client. The value range is [0.0,1.0]. 0.0 means the transparency is completely transparent. 1.0 means the transparency is opaque. + * The transparency of the video for the video mixing on the local client. The value range is + * [0.0,1.0]. 0.0 means the transparency is completely transparent. 1.0 means the transparency is + * opaque. */ double alpha; /** @@ -4032,16 +4158,16 @@ struct TranscodingVideoStream { bool mirror; TranscodingVideoStream() - : sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), - remoteUserUid(0), - imageUrl(OPTIONAL_NULLPTR), - x(0), - y(0), - width(0), - height(0), - zOrder(0), - alpha(1.0), - mirror(false) {} + : sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + remoteUserUid(0), + imageUrl(OPTIONAL_NULLPTR), + x(0), + y(0), + width(0), + height(0), + zOrder(0), + alpha(1.0), + mirror(false) {} }; /** @@ -4057,17 +4183,25 @@ struct LocalTranscoderConfiguration { */ TranscodingVideoStream* videoInputStreams; /** - * The encoding configuration of the mixed video stream after the video mixing on the local client. See VideoEncoderConfiguration. + * The encoding configuration of the mixed video stream after the video mixing on the local + * client. See VideoEncoderConfiguration. */ VideoEncoderConfiguration videoOutputConfiguration; /** - * Whether to use the timestamp when the primary camera captures the video frame as the timestamp of the mixed video frame. - * - true: (Default) Use the timestamp of the captured video frame as the timestamp of the mixed video frame. - * - false: Do not use the timestamp of the captured video frame as the timestamp of the mixed video frame. Instead, use the timestamp when the mixed video frame is constructed. + * Whether to use the timestamp when the primary camera captures the video frame as the timestamp + * of the mixed video frame. + * - true: (Default) Use the timestamp of the captured video frame as the timestamp of the mixed + * video frame. + * - false: Do not use the timestamp of the captured video frame as the timestamp of the mixed + * video frame. Instead, use the timestamp when the mixed video frame is constructed. */ bool syncWithPrimaryCamera; - LocalTranscoderConfiguration() : streamCount(0), videoInputStreams(OPTIONAL_NULLPTR), videoOutputConfiguration(), syncWithPrimaryCamera(true) {} + LocalTranscoderConfiguration() + : streamCount(0), + videoInputStreams(OPTIONAL_NULLPTR), + videoOutputConfiguration(), + syncWithPrimaryCamera(true) {} }; enum VIDEO_TRANSCODER_ERROR { @@ -4097,6 +4231,77 @@ enum VIDEO_TRANSCODER_ERROR { VT_ERR_INTERNAL = 20 }; + +/** + * The audio streams for the video mixing on the local client. + */ +struct MixedAudioStream { + /** + * The source type of audio for the audio mixing on the local client. See #AUDIO_SOURCE_TYPE. + */ + AUDIO_SOURCE_TYPE sourceType; + /** + * The ID of the remote user. + * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`. + */ + uid_t remoteUserUid; + /** + * The channel ID of the remote user. + * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`. + */ + const char* channelId; + /** + * The track ID of the local track. + * @note Use this parameter only when the source type is `AUDIO_SOURCE_REMOTE`. + */ + track_id_t trackId; + + MixedAudioStream(AUDIO_SOURCE_TYPE source) + : sourceType(source), + remoteUserUid(0), + channelId(NULL), + trackId(-1) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, track_id_t track) + : sourceType(source), + trackId(track) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, uid_t uid, const char* channel) + : sourceType(source), + remoteUserUid(uid), + channelId(channel) {} + + MixedAudioStream(AUDIO_SOURCE_TYPE source, uid_t uid, const char* channel, track_id_t track) + : sourceType(source), + remoteUserUid(uid), + channelId(channel), + trackId(track) {} + +}; + +/** + * The configuration of the audio mixing on the local client. + */ +struct LocalAudioMixerConfiguration { + /** + * The number of the audio streams for the audio mixing on the local client. + */ + unsigned int streamCount; + /** + * The source of the streams to mixed; + */ + MixedAudioStream* audioInputStreams; + + /** + * Whether to use the timestamp follow the local mic's audio frame. + * - true: (Default) Use the timestamp of the captured audio frame as the timestamp of the mixed audio frame. + * - false: Do not use the timestamp of the captured audio frame as the timestamp of the mixed audio frame. Instead, use the timestamp when the mixed audio frame is constructed. + */ + bool syncWithLocalMic; + + LocalAudioMixerConfiguration() : streamCount(0), syncWithLocalMic(true) {} +}; + /** * Configurations of the last-mile network test. */ @@ -4115,12 +4320,14 @@ struct LastmileProbeConfig { */ bool probeDownlink; /** - * The expected maximum sending bitrate (bps) of the local user. The value range is [100000, 5000000]. We recommend setting this parameter - * according to the bitrate value set by `setVideoEncoderConfiguration`. + * The expected maximum sending bitrate (bps) of the local user. The value range is [100000, + * 5000000]. We recommend setting this parameter according to the bitrate value set by + * `setVideoEncoderConfiguration`. */ unsigned int expectedUplinkBitrate; /** - * The expected maximum receiving bitrate (bps) of the local user. The value range is [100000,5000000]. + * The expected maximum receiving bitrate (bps) of the local user. The value range is + * [100000,5000000]. */ unsigned int expectedDownlinkBitrate; }; @@ -4134,11 +4341,13 @@ enum LASTMILE_PROBE_RESULT_STATE { */ LASTMILE_PROBE_RESULT_COMPLETE = 1, /** - * 2: The last-mile network probe test is incomplete because the bandwidth estimation is not available due to limited test resources. + * 2: The last-mile network probe test is incomplete because the bandwidth estimation is not + * available due to limited test resources. */ LASTMILE_PROBE_RESULT_INCOMPLETE_NO_BWE = 2, /** - * 3: The last-mile network probe test is not carried out, probably due to poor network conditions. + * 3: The last-mile network probe test is not carried out, probably due to poor network + * conditions. */ LASTMILE_PROBE_RESULT_UNAVAILABLE = 3 }; @@ -4160,9 +4369,7 @@ struct LastmileProbeOneWayResult { */ unsigned int availableBandwidth; - LastmileProbeOneWayResult() : packetLossRate(0), - jitter(0), - availableBandwidth(0) {} + LastmileProbeOneWayResult() : packetLossRate(0), jitter(0), availableBandwidth(0) {} }; /** @@ -4186,16 +4393,13 @@ struct LastmileProbeResult { */ unsigned int rtt; - LastmileProbeResult() - : state(LASTMILE_PROBE_RESULT_UNAVAILABLE), - rtt(0) {} + LastmileProbeResult() : state(LASTMILE_PROBE_RESULT_UNAVAILABLE), rtt(0) {} }; /** * Reasons causing the change of the connection state. */ -enum CONNECTION_CHANGED_REASON_TYPE -{ +enum CONNECTION_CHANGED_REASON_TYPE { /** * 0: The SDK is connecting to the server. */ @@ -4209,11 +4413,13 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_INTERRUPTED = 2, /** - * 3: The connection between the SDK and the server is banned by the server. This error occurs when the user is kicked out of the channel by the server. + * 3: The connection between the SDK and the server is banned by the server. This error occurs + * when the user is kicked out of the channel by the server. */ CONNECTION_CHANGED_BANNED_BY_SERVER = 3, /** - * 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20 minutes, this error occurs and the SDK stops reconnecting to the channel. + * 4: The SDK fails to join the channel. When the SDK fails to join the channel for more than 20 + * minutes, this error occurs and the SDK stops reconnecting to the channel. */ CONNECTION_CHANGED_JOIN_FAILED = 4, /** @@ -4225,13 +4431,17 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_INVALID_APP_ID = 6, /** - * 7: The connection fails because the channel name is not valid. Please rejoin the channel with a valid channel name. + * 7: The connection fails because the channel name is not valid. Please rejoin the channel with a + * valid channel name. */ CONNECTION_CHANGED_INVALID_CHANNEL_NAME = 7, /** * 8: The connection fails because the token is not valid. Typical reasons include: - * - The App Certificate for the project is enabled in Agora Console, but you do not use a token when joining the channel. If you enable the App Certificate, you must use a token to join the channel. - * - The `uid` specified when calling `joinChannel` to join the channel is inconsistent with the `uid` passed in when generating the token. + * - The App Certificate for the project is enabled in Agora Console, but you do not use a token + * when joining the channel. If you enable the App Certificate, you must use a token to join the + * channel. + * - The `uid` specified when calling `joinChannel` to join the channel is inconsistent with the + * `uid` passed in when generating the token. */ CONNECTION_CHANGED_INVALID_TOKEN = 8, /** @@ -4240,8 +4450,10 @@ enum CONNECTION_CHANGED_REASON_TYPE CONNECTION_CHANGED_TOKEN_EXPIRED = 9, /** * 10: The connection is rejected by the server. Typical reasons include: - * - The user is already in the channel and still calls a method, for example, `joinChannel`, to join the channel. Stop calling this method to clear this error. - * - The user tries to join the channel when conducting a pre-call test. The user needs to call the channel after the call test ends. + * - The user is already in the channel and still calls a method, for example, `joinChannel`, to + * join the channel. Stop calling this method to clear this error. + * - The user tries to join the channel when conducting a pre-call test. The user needs to call + * the channel after the call test ends. */ CONNECTION_CHANGED_REJECTED_BY_SERVER = 10, /** @@ -4253,11 +4465,13 @@ enum CONNECTION_CHANGED_REASON_TYPE */ CONNECTION_CHANGED_RENEW_TOKEN = 12, /** - * 13: The IP address of the client has changed, possibly because the network type, IP address, or port has been changed. + * 13: The IP address of the client has changed, possibly because the network type, IP address, or + * port has been changed. */ CONNECTION_CHANGED_CLIENT_IP_ADDRESS_CHANGED = 13, /** - * 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The connection state changes to CONNECTION_STATE_RECONNECTING. + * 14: Timeout for the keep-alive of the connection between the SDK and the Agora edge server. The + * connection state changes to CONNECTION_STATE_RECONNECTING. */ CONNECTION_CHANGED_KEEP_ALIVE_TIMEOUT = 14, /** @@ -4354,11 +4568,13 @@ enum WLACC_SUGGEST_ACTION { */ WLACC_SUGGEST_ACTION_CONNECT_SSID = 1, /** - * The user is advised to check whether the AP supports 5G band and enable 5G band (the aciton link is attached), or purchases an AP that supports 5G. AP does not support 5G band. + * The user is advised to check whether the AP supports 5G band and enable 5G band (the aciton + * link is attached), or purchases an AP that supports 5G. AP does not support 5G band. */ WLACC_SUGGEST_ACTION_CHECK_5G = 2, /** - * The user is advised to change the SSID of the 2.4G or 5G band (the aciton link is attached). The SSID of the 2.4G band AP is the same as that of the 5G band. + * The user is advised to change the SSID of the 2.4G or 5G band (the aciton link is attached). + * The SSID of the 2.4G band AP is the same as that of the 5G band. */ WLACC_SUGGEST_ACTION_MODIFY_SSID = 3, }; @@ -4447,8 +4663,9 @@ struct VideoCanvas { uid_t uid; /** - * The uid of video stream composing the video stream from transcoder which will be drawn on this video canvas. - */ + * The uid of video stream composing the video stream from transcoder which will be drawn on this + * video canvas. + */ uid_t subviewUid; /** * Video display window. @@ -4467,7 +4684,7 @@ struct VideoCanvas { * The video mirror mode. See \ref VIDEO_MIRROR_MODE_TYPE "VIDEO_MIRROR_MODE_TYPE". * The default value is VIDEO_MIRROR_MODE_AUTO. * @note - * - For the mirror mode of the local video view: + * - For the mirror mode of the local video view: * If you use a front camera, the SDK enables the mirror mode by default; * if you use a rear camera, the SDK disables the mirror mode by default. * - For the remote user: The mirror mode is disabled by default. @@ -4484,14 +4701,14 @@ struct VideoCanvas { */ VIDEO_SOURCE_TYPE sourceType; /** - * The media player id of AgoraMediaPlayer. It should set this parameter when the + * The media player id of AgoraMediaPlayer. It should set this parameter when the * sourceType is VIDEO_SOURCE_MEDIA_PLAYER to show the video that AgoraMediaPlayer is playing. * You can get this value by calling the method \ref getMediaPlayerId(). */ int mediaPlayerId; /** - * If you want to display a certain part of a video frame, you can set - * this value to crop the video frame to show. + * If you want to display a certain part of a video frame, you can set + * this value to crop the video frame to show. * The default value is empty(that is, if it has zero width or height), which means no cropping. */ Rectangle cropArea; @@ -4508,62 +4725,115 @@ struct VideoCanvas { media::base::VIDEO_MODULE_POSITION position; VideoCanvas() - : uid(0), subviewUid(0), view(NULL), backgroundColor(0x00000000), renderMode(media::base::RENDER_MODE_HIDDEN), mirrorMode(VIDEO_MIRROR_MODE_AUTO), - setupMode(VIDEO_VIEW_SETUP_REPLACE), sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(0), + subviewUid(0), + view(NULL), + backgroundColor(0x00000000), + renderMode(media::base::RENDER_MODE_HIDDEN), + mirrorMode(VIDEO_MIRROR_MODE_AUTO), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt) - : uid(0), subviewUid(0), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(0), + subviewUid(0), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u) - : uid(u), subviewUid(0), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} - - VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u, uid_t subu) - : uid(u), subviewUid(subu), view(v), backgroundColor(0x00000000), renderMode(m), mirrorMode(mt), setupMode(VIDEO_VIEW_SETUP_REPLACE), - sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), mediaPlayerId(-ERR_NOT_READY), - cropArea(0, 0, 0, 0), enableAlphaMask(false), position(media::base::POSITION_POST_CAPTURER) {} + : uid(u), + subviewUid(0), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} + + VideoCanvas(view_t v, media::base::RENDER_MODE_TYPE m, VIDEO_MIRROR_MODE_TYPE mt, uid_t u, + uid_t subu) + : uid(u), + subviewUid(subu), + view(v), + backgroundColor(0x00000000), + renderMode(m), + mirrorMode(mt), + setupMode(VIDEO_VIEW_SETUP_REPLACE), + sourceType(VIDEO_SOURCE_CAMERA_PRIMARY), + mediaPlayerId(-ERR_NOT_READY), + cropArea(0, 0, 0, 0), + enableAlphaMask(false), + position(media::base::POSITION_POST_CAPTURER) {} }; /** Image enhancement options. */ struct BeautyOptions { /** The contrast level. - */ + */ enum LIGHTENING_CONTRAST_LEVEL { - /** Low contrast level. */ - LIGHTENING_CONTRAST_LOW = 0, - /** (Default) Normal contrast level. */ - LIGHTENING_CONTRAST_NORMAL = 1, - /** High contrast level. */ - LIGHTENING_CONTRAST_HIGH = 2, + /** Low contrast level. */ + LIGHTENING_CONTRAST_LOW = 0, + /** (Default) Normal contrast level. */ + LIGHTENING_CONTRAST_NORMAL = 1, + /** High contrast level. */ + LIGHTENING_CONTRAST_HIGH = 2, }; - /** The contrast level, used with the `lighteningLevel` parameter. The larger the value, the greater the contrast between light and dark. See #LIGHTENING_CONTRAST_LEVEL. - */ + /** The contrast level, used with the `lighteningLevel` parameter. The larger the value, the + * greater the contrast between light and dark. See #LIGHTENING_CONTRAST_LEVEL. + */ LIGHTENING_CONTRAST_LEVEL lighteningContrastLevel; - /** The brightness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, the greater the degree of whitening. */ + /** The brightness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. + * The greater the value, the greater the degree of whitening. */ float lighteningLevel; - /** The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, the greater the degree of skin grinding. - */ + /** The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The greater the value, + * the greater the degree of skin grinding. + */ float smoothnessLevel; - /** The redness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The larger the value, the greater the rosy degree. - */ + /** The redness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The + * larger the value, the greater the rosy degree. + */ float rednessLevel; - /** The sharpness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. The larger the value, the greater the sharpening degree. - */ + /** The sharpness level. The value ranges from 0.0 (original) to 1.0. The default value is 0.0. + * The larger the value, the greater the sharpening degree. + */ float sharpnessLevel; - BeautyOptions(LIGHTENING_CONTRAST_LEVEL contrastLevel, float lightening, float smoothness, float redness, float sharpness) : lighteningContrastLevel(contrastLevel), lighteningLevel(lightening), smoothnessLevel(smoothness), rednessLevel(redness), sharpnessLevel(sharpness) {} + BeautyOptions(LIGHTENING_CONTRAST_LEVEL contrastLevel, float lightening, float smoothness, + float redness, float sharpness) + : lighteningContrastLevel(contrastLevel), + lighteningLevel(lightening), + smoothnessLevel(smoothness), + rednessLevel(redness), + sharpnessLevel(sharpness) {} - BeautyOptions() : lighteningContrastLevel(LIGHTENING_CONTRAST_NORMAL), lighteningLevel(0), smoothnessLevel(0), rednessLevel(0), sharpnessLevel(0) {} + BeautyOptions() + : lighteningContrastLevel(LIGHTENING_CONTRAST_NORMAL), + lighteningLevel(0), + smoothnessLevel(0), + rednessLevel(0), + sharpnessLevel(0) {} }; /** Face shape area options. This structure defines options for facial adjustments on different facial areas. @@ -4645,12 +4915,45 @@ struct FaceShapeBeautyOptions { FaceShapeBeautyOptions() : shapeStyle(FACE_SHAPE_BEAUTY_STYLE_FEMALE), styleIntensity(50) {} }; +/** Filter effect options. This structure defines options for filter effect. + * + * @since v4.4.1 + */ +struct FilterEffectOptions { + /** + * The local absolute path of the custom 3D Cube path. Only cube format is supported. + * The cube file must strictly comply with the Cube LUT Specification; otherwise, the filter effects will not take effect. + * + * The following is an example of the Cube file format. The cube file starts with `LUT_3D_SIZE`, which indicates the cube size. In filter effects, the cube size is limited to 32. + + * LUT_3D_SIZE 32 + * 0.0039215689 0 0.0039215682 + * 0.0086021447 0.0037950677 0 + * 0.0728652592 0.0039215689 0 + * ... + * + * The SDK provides a built-in cube named `built_in_whiten.cube` for whitening. To use this cube, specify the path to `built_in_whiten_filter` + */ + const char * path; + + /** + * The intensity of specified filter effect. The value ranges from 0.0 to 1.0. The default value is 0.5. The greater the value, the stronger the intensity of the filter. + */ + float strength; + + FilterEffectOptions(const char * lut3dPath, float filterStrength) : path(lut3dPath), strength(filterStrength) {} + + FilterEffectOptions() : path(OPTIONAL_NULLPTR), strength(0.5) {} +}; + struct LowlightEnhanceOptions { /** * The low-light enhancement mode. */ enum LOW_LIGHT_ENHANCE_MODE { - /** 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light enhancement feature according to the ambient light to compensate for the lighting level or prevent overexposure, as necessary. */ + /** 0: (Default) Automatic mode. The SDK automatically enables or disables the low-light + enhancement feature according to the ambient light to compensate for the lighting level or + prevent overexposure, as necessary. */ LOW_LIGHT_ENHANCE_AUTO = 0, /** Manual mode. Users need to enable or disable the low-light enhancement feature manually. */ LOW_LIGHT_ENHANCE_MANUAL = 1, @@ -4660,11 +4963,14 @@ struct LowlightEnhanceOptions { */ enum LOW_LIGHT_ENHANCE_LEVEL { /** - * 0: (Default) Promotes video quality during low-light enhancement. It processes the brightness, details, and noise of the video image. The performance consumption is moderate, the processing speed is moderate, and the overall video quality is optimal. + * 0: (Default) Promotes video quality during low-light enhancement. It processes the + * brightness, details, and noise of the video image. The performance consumption is moderate, + * the processing speed is moderate, and the overall video quality is optimal. */ LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY = 0, /** - * Promotes performance during low-light enhancement. It processes the brightness and details of the video image. The processing speed is faster. + * Promotes performance during low-light enhancement. It processes the brightness and details of + * the video image. The processing speed is faster. */ LOW_LIGHT_ENHANCE_LEVEL_FAST = 1, }; @@ -4677,9 +4983,11 @@ struct LowlightEnhanceOptions { */ LOW_LIGHT_ENHANCE_LEVEL level; - LowlightEnhanceOptions(LOW_LIGHT_ENHANCE_MODE lowlightMode, LOW_LIGHT_ENHANCE_LEVEL lowlightLevel) : mode(lowlightMode), level(lowlightLevel) {} + LowlightEnhanceOptions(LOW_LIGHT_ENHANCE_MODE lowlightMode, LOW_LIGHT_ENHANCE_LEVEL lowlightLevel) + : mode(lowlightMode), level(lowlightLevel) {} - LowlightEnhanceOptions() : mode(LOW_LIGHT_ENHANCE_AUTO), level(LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY) {} + LowlightEnhanceOptions() + : mode(LOW_LIGHT_ENHANCE_AUTO), level(LOW_LIGHT_ENHANCE_LEVEL_HIGH_QUALITY) {} }; /** * The video noise reduction options. @@ -4690,7 +4998,8 @@ struct VideoDenoiserOptions { /** The video noise reduction mode. */ enum VIDEO_DENOISER_MODE { - /** 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise reduction feature according to the ambient light. */ + /** 0: (Default) Automatic mode. The SDK automatically enables or disables the video noise + reduction feature according to the ambient light. */ VIDEO_DENOISER_AUTO = 0, /** Manual mode. Users need to enable or disable the video noise reduction feature manually. */ VIDEO_DENOISER_MANUAL = 1, @@ -4700,21 +5009,20 @@ struct VideoDenoiserOptions { */ enum VIDEO_DENOISER_LEVEL { /** - * 0: (Default) Promotes video quality during video noise reduction. `HIGH_QUALITY` balances performance consumption and video noise reduction quality. - * The performance consumption is moderate, the video noise reduction speed is moderate, and the overall video quality is optimal. + * 0: (Default) Promotes video quality during video noise reduction. `HIGH_QUALITY` balances + * performance consumption and video noise reduction quality. The performance consumption is + * moderate, the video noise reduction speed is moderate, and the overall video quality is + * optimal. */ VIDEO_DENOISER_LEVEL_HIGH_QUALITY = 0, /** - * Promotes reducing performance consumption during video noise reduction. `FAST` prioritizes reducing performance consumption over video noise reduction quality. - * The performance consumption is lower, and the video noise reduction speed is faster. To avoid a noticeable shadowing effect (shadows trailing behind moving objects) in the processed video, Agora recommends that you use `FAST` when the camera is fixed. + * Promotes reducing performance consumption during video noise reduction. `FAST` prioritizes + * reducing performance consumption over video noise reduction quality. The performance + * consumption is lower, and the video noise reduction speed is faster. To avoid a noticeable + * shadowing effect (shadows trailing behind moving objects) in the processed video, Agora + * recommends that you use `FAST` when the camera is fixed. */ VIDEO_DENOISER_LEVEL_FAST = 1, - /** - * Enhanced video noise reduction. `STRENGTH` prioritizes video noise reduction quality over reducing performance consumption. - * The performance consumption is higher, the video noise reduction speed is slower, and the video noise reduction quality is better. - * If `HIGH_QUALITY` is not enough for your video noise reduction needs, you can use `STRENGTH`. - */ - VIDEO_DENOISER_LEVEL_STRENGTH = 2, }; /** The video noise reduction mode. See #VIDEO_DENOISER_MODE. */ @@ -4724,7 +5032,8 @@ struct VideoDenoiserOptions { */ VIDEO_DENOISER_LEVEL level; - VideoDenoiserOptions(VIDEO_DENOISER_MODE denoiserMode, VIDEO_DENOISER_LEVEL denoiserLevel) : mode(denoiserMode), level(denoiserLevel) {} + VideoDenoiserOptions(VIDEO_DENOISER_MODE denoiserMode, VIDEO_DENOISER_LEVEL denoiserLevel) + : mode(denoiserMode), level(denoiserLevel) {} VideoDenoiserOptions() : mode(VIDEO_DENOISER_AUTO), level(VIDEO_DENOISER_LEVEL_HIGH_QUALITY) {} }; @@ -4734,17 +5043,24 @@ struct VideoDenoiserOptions { * @since v4.0.0 */ struct ColorEnhanceOptions { - /** The level of color enhancement. The value range is [0.0,1.0]. `0.0` is the default value, which means no color enhancement is applied to the video. The higher the value, the higher the level of color enhancement. + /** The level of color enhancement. The value range is [0.0,1.0]. `0.0` is the default value, + * which means no color enhancement is applied to the video. The higher the value, the higher the + * level of color enhancement. */ float strengthLevel; - /** The level of skin tone protection. The value range is [0.0,1.0]. `0.0` means no skin tone protection. The higher the value, the higher the level of skin tone protection. - * The default value is `1.0`. When the level of color enhancement is higher, the portrait skin tone can be significantly distorted, so you need to set the level of skin tone protection; when the level of skin tone protection is higher, the color enhancement effect can be slightly reduced. - * Therefore, to get the best color enhancement effect, Agora recommends that you adjust `strengthLevel` and `skinProtectLevel` to get the most appropriate values. + /** The level of skin tone protection. The value range is [0.0,1.0]. `0.0` means no skin tone + * protection. The higher the value, the higher the level of skin tone protection. The default + * value is `1.0`. When the level of color enhancement is higher, the portrait skin tone can be + * significantly distorted, so you need to set the level of skin tone protection; when the level + * of skin tone protection is higher, the color enhancement effect can be slightly reduced. + * Therefore, to get the best color enhancement effect, Agora recommends that you adjust + * `strengthLevel` and `skinProtectLevel` to get the most appropriate values. */ float skinProtectLevel; - ColorEnhanceOptions(float stength, float skinProtect) : strengthLevel(stength), skinProtectLevel(skinProtect) {} + ColorEnhanceOptions(float stength, float skinProtect) + : strengthLevel(stength), skinProtectLevel(skinProtect) {} ColorEnhanceOptions() : strengthLevel(0), skinProtectLevel(1) {} }; @@ -4768,12 +5084,12 @@ struct VirtualBackgroundSource { * The background source is a file in PNG or JPG format. */ BACKGROUND_IMG = 2, - /** + /** * The background source is the blurred original video frame. * */ BACKGROUND_BLUR = 3, - /** - * The background source is a file in MP4, AVI, MKV, FLV format. + /** + * The background source is a file in MP4, AVI, MKV, FLV format. * */ BACKGROUND_VIDEO = 4, }; @@ -4781,11 +5097,14 @@ struct VirtualBackgroundSource { /** The degree of blurring applied to the background source. */ enum BACKGROUND_BLUR_DEGREE { - /** 1: The degree of blurring applied to the custom background image is low. The user can almost see the background clearly. */ + /** 1: The degree of blurring applied to the custom background image is low. The user can almost + see the background clearly. */ BLUR_DEGREE_LOW = 1, - /** 2: The degree of blurring applied to the custom background image is medium. It is difficult for the user to recognize details in the background. */ + /** 2: The degree of blurring applied to the custom background image is medium. It is difficult + for the user to recognize details in the background. */ BLUR_DEGREE_MEDIUM = 2, - /** 3: (Default) The degree of blurring applied to the custom background image is high. The user can barely see any distinguishing features in the background. */ + /** 3: (Default) The degree of blurring applied to the custom background image is high. The user + can barely see any distinguishing features in the background. */ BLUR_DEGREE_HIGH = 3, }; @@ -4794,34 +5113,41 @@ struct VirtualBackgroundSource { BACKGROUND_SOURCE_TYPE background_source_type; /** - * The color of the custom background image. The format is a hexadecimal integer defined by RGB, without the # sign, - * such as 0xFFB6C1 for light pink. The default value is 0xFFFFFF, which signifies white. The value range - * is [0x000000,0xFFFFFF]. If the value is invalid, the SDK replaces the original background image with a white - * background image. + * The color of the custom background image. The format is a hexadecimal integer defined by RGB, + * without the # sign, such as 0xFFB6C1 for light pink. The default value is 0xFFFFFF, which + * signifies white. The value range is [0x000000,0xFFFFFF]. If the value is invalid, the SDK + * replaces the original background image with a white background image. * - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_COLOR`. + * @note This parameter takes effect only when the type of the custom background image is + * `BACKGROUND_COLOR`. */ unsigned int color; /** - * The local absolute path of the custom background image. PNG and JPG formats are supported. If the path is invalid, - * the SDK replaces the original background image with a white background image. + * The local absolute path of the custom background image. PNG and JPG formats are supported. If + * the path is invalid, the SDK replaces the original background image with a white background + * image. * - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_IMG`. + * @note This parameter takes effect only when the type of the custom background image is + * `BACKGROUND_IMG`. */ const char* source; /** The degree of blurring applied to the custom background image. See BACKGROUND_BLUR_DEGREE. - * @note This parameter takes effect only when the type of the custom background image is `BACKGROUND_BLUR`. + * @note This parameter takes effect only when the type of the custom background image is + * `BACKGROUND_BLUR`. */ BACKGROUND_BLUR_DEGREE blur_degree; - VirtualBackgroundSource() : background_source_type(BACKGROUND_COLOR), color(0xffffff), source(OPTIONAL_NULLPTR), blur_degree(BLUR_DEGREE_HIGH) {} + VirtualBackgroundSource() + : background_source_type(BACKGROUND_COLOR), + color(0xffffff), + source(OPTIONAL_NULLPTR), + blur_degree(BLUR_DEGREE_HIGH) {} }; struct SegmentationProperty { - - enum SEG_MODEL_TYPE { + enum SEG_MODEL_TYPE { SEG_MODEL_AI = 1, SEG_MODEL_GREEN = 2 @@ -4831,34 +5157,33 @@ struct SegmentationProperty { float greenCapacity; - - SegmentationProperty() : modelType(SEG_MODEL_AI), greenCapacity(0.5){} + SegmentationProperty() : modelType(SEG_MODEL_AI), greenCapacity(0.5) {} }; /** The type of custom audio track -*/ + */ enum AUDIO_TRACK_TYPE { - /** + /** * -1: Invalid audio track */ AUDIO_TRACK_INVALID = -1, - /** + /** * 0: Mixable audio track - * You can push more than one mixable Audio tracks into one RTC connection(channel id + uid), + * You can push more than one mixable Audio tracks into one RTC connection(channel id + uid), * and SDK will mix these tracks into one audio track automatically. * However, compare to direct audio track, mixable track might cause extra 30ms+ delay. */ AUDIO_TRACK_MIXABLE = 0, /** * 1: Direct audio track - * You can only push one direct (non-mixable) audio track into one RTC connection(channel id + uid). - * Compare to mixable stream, you can have lower lantency using direct audio track. + * You can only push one direct (non-mixable) audio track into one RTC connection(channel id + + * uid). Compare to mixable stream, you can have lower lantency using direct audio track. */ AUDIO_TRACK_DIRECT = 1, }; /** The configuration of custom audio track -*/ + */ struct AudioTrackConfig { /** * Enable local playback, enabled by default @@ -4866,9 +5191,14 @@ struct AudioTrackConfig { * false: Do not enable local playback */ bool enableLocalPlayback; + /** + * Whether to enable APM (AEC/ANS/AGC) processing when the trackType is AUDIO_TRACK_DIRECT. + * false: (Default) Do not enable APM processing. + * true: Enable APM processing. + */ + bool enableAudioProcessing; - AudioTrackConfig() - : enableLocalPlayback(true) {} + AudioTrackConfig() : enableLocalPlayback(true),enableAudioProcessing(false) {} }; /** @@ -4915,11 +5245,12 @@ enum VOICE_BEAUTIFIER_PRESET { CHAT_BEAUTIFIER_VITALITY = 0x01010300, /** * Singing beautifier effect. - * - If you call `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER), you can beautify a male-sounding voice and add a reverberation effect - * that sounds like singing in a small room. Agora recommends not using `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER) to process - * a female-sounding voice; otherwise, you may experience vocal distortion. - * - If you call `setVoiceBeautifierParameters`(SINGING_BEAUTIFIER, param1, param2), you can beautify a male- or - * female-sounding voice and add a reverberation effect. + * - If you call `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER), you can beautify a male-sounding + * voice and add a reverberation effect that sounds like singing in a small room. Agora recommends + * not using `setVoiceBeautifierPreset`(SINGING_BEAUTIFIER) to process a female-sounding voice; + * otherwise, you may experience vocal distortion. + * - If you call `setVoiceBeautifierParameters`(SINGING_BEAUTIFIER, param1, param2), you can + * beautify a male- or female-sounding voice and add a reverberation effect. */ SINGING_BEAUTIFIER = 0x01020100, /** A more vigorous voice. @@ -4949,8 +5280,9 @@ enum VOICE_BEAUTIFIER_PRESET { /** * A ultra-high quality voice, which makes the audio clearer and restores more details. * - To achieve better audio effect quality, Agora recommends that you call `setAudioProfile` - * and set the `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` - * and `scenario` to `AUDIO_SCENARIO_HIGH_DEFINITION(6)` before calling `setVoiceBeautifierPreset`. + * and set the `profile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or + * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` and `scenario` to + * `AUDIO_SCENARIO_HIGH_DEFINITION(6)` before calling `setVoiceBeautifierPreset`. * - If you have an audio capturing device that can already restore audio details to a high * degree, Agora recommends that you do not enable ultra-high quality; otherwise, the SDK may * over-restore audio details, and you may not hear the anticipated voice effect. @@ -4960,7 +5292,9 @@ enum VOICE_BEAUTIFIER_PRESET { /** Preset voice effects. * - * For better voice effects, Agora recommends setting the `profile` parameter of `setAudioProfile` to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` before using the following presets: + * For better voice effects, Agora recommends setting the `profile` parameter of `setAudioProfile` + * to `AUDIO_PROFILE_MUSIC_HIGH_QUALITY` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO` before using + * the following presets: * * - `ROOM_ACOUSTICS_KTV` * - `ROOM_ACOUSTICS_VOCAL_CONCERT` @@ -5008,8 +5342,8 @@ enum AUDIO_EFFECT_PRESET { */ ROOM_ACOUSTICS_ETHEREAL = 0x02010700, /** A 3D voice effect that makes the voice appear to be moving around the user. The default cycle - * period of the 3D voice effect is 10 seconds. To change the cycle period, call `setAudioEffectParameters` - * after this method. + * period of the 3D voice effect is 10 seconds. To change the cycle period, call + * `setAudioEffectParameters` after this method. * * @note * - Before using this preset, set the `profile` parameter of `setAudioProfile` to @@ -5031,12 +5365,12 @@ enum AUDIO_EFFECT_PRESET { */ ROOM_ACOUSTICS_VIRTUAL_SURROUND_SOUND = 0x02010900, /** The voice effect for chorus. - * + * * @note: To achieve better audio effect quality, Agora recommends calling \ref * IRtcEngine::setAudioProfile "setAudioProfile" and setting the `profile` parameter to * `AUDIO_PROFILE_MUSIC_HIGH_QUALITY(4)` or `AUDIO_PROFILE_MUSIC_HIGH_QUALITY_STEREO(5)` before * setting this enumerator. - */ + */ ROOM_ACOUSTICS_CHORUS = 0x02010D00, /** A middle-aged man's voice. * @@ -5047,14 +5381,14 @@ enum AUDIO_EFFECT_PRESET { VOICE_CHANGER_EFFECT_UNCLE = 0x02020100, /** A senior man's voice. * - * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you + * may not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_OLDMAN = 0x02020200, /** A boy's voice. * - * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + * @note Agora recommends using this enumerator to process a male-sounding voice; otherwise, you + * may not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_BOY = 0x02020300, /** A young woman's voice. @@ -5066,8 +5400,8 @@ enum AUDIO_EFFECT_PRESET { VOICE_CHANGER_EFFECT_SISTER = 0x02020400, /** A girl's voice. * - * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you may - * not hear the anticipated voice effect. + * @note Agora recommends using this enumerator to process a female-sounding voice; otherwise, you + * may not hear the anticipated voice effect. */ VOICE_CHANGER_EFFECT_GIRL = 0x02020500, /** The voice of Pig King, a character in Journey to the West who has a voice like a growling @@ -5092,8 +5426,8 @@ enum AUDIO_EFFECT_PRESET { */ STYLE_TRANSFORMATION_POPULAR = 0x02030200, /** A pitch correction effect that corrects the user's pitch based on the pitch of the natural C - * major scale. After setting this voice effect, you can call `setAudioEffectParameters` to adjust - * the basic mode of tuning and the pitch of the main tone. + * major scale. After setting this voice effect, you can call `setAudioEffectParameters` to adjust + * the basic mode of tuning and the pitch of the main tone. */ PITCH_CORRECTION = 0x02040100, @@ -5108,16 +5442,20 @@ enum VOICE_CONVERSION_PRESET { /** Turn off voice conversion and use the original voice. */ VOICE_CONVERSION_OFF = 0x00000000, - /** A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to process a female-sounding voice. + /** A gender-neutral voice. To avoid audio distortion, ensure that you use this enumerator to + * process a female-sounding voice. */ VOICE_CHANGER_NEUTRAL = 0x03010100, - /** A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a female-sounding voice. + /** A sweet voice. To avoid audio distortion, ensure that you use this enumerator to process a + * female-sounding voice. */ VOICE_CHANGER_SWEET = 0x03010200, - /** A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a male-sounding voice. + /** A steady voice. To avoid audio distortion, ensure that you use this enumerator to process a + * male-sounding voice. */ VOICE_CHANGER_SOLID = 0x03010300, - /** A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a male-sounding voice. + /** A deep voice. To avoid audio distortion, ensure that you use this enumerator to process a + * male-sounding voice. */ VOICE_CHANGER_BASS = 0x03010400, /** A voice like a cartoon character. @@ -5224,9 +5562,9 @@ struct ScreenCaptureParameters { */ VideoDimensions dimensions; /** - * On Windows and macOS, it represents the video encoding frame rate (fps) of the shared screen stream. - * The frame rate (fps) of the shared region. The default value is 5. We do not recommend setting - * this to a value greater than 15. + * On Windows and macOS, it represents the video encoding frame rate (fps) of the shared screen + * stream. The frame rate (fps) of the shared region. The default value is 5. We do not recommend + * setting this to a value greater than 15. */ int frameRate; /** @@ -5241,52 +5579,109 @@ struct ScreenCaptureParameters { */ bool captureMouseCursor; /** - * Whether to bring the window to the front when calling the `startScreenCaptureByWindowId` method to share it: + * Whether to bring the window to the front when calling the `startScreenCaptureByWindowId` method + * to share it: * - `true`: Bring the window to the front. * - `false`: (Default) Do not bring the window to the front. - */ + */ bool windowFocus; /** - * A list of IDs of windows to be blocked. When calling `startScreenCaptureByDisplayId` to start screen sharing, - * you can use this parameter to block a specified window. When calling `updateScreenCaptureParameters` to update - * screen sharing configurations, you can use this parameter to dynamically block the specified windows during - * screen sharing. + * A list of IDs of windows to be blocked. When calling `startScreenCaptureByDisplayId` to start + * screen sharing, you can use this parameter to block a specified window. When calling + * `updateScreenCaptureParameters` to update screen sharing configurations, you can use this + * parameter to dynamically block the specified windows during screen sharing. */ - view_t *excludeWindowList; + view_t* excludeWindowList; /** * The number of windows to be blocked. */ int excludeWindowCount; /** The width (px) of the border. Defaults to 0, and the value range is [0,50]. - * - */ + * + */ int highLightWidth; /** The color of the border in RGBA format. The default value is 0xFF8CBF26. - * - */ + * + */ unsigned int highLightColor; /** Whether to place a border around the shared window or screen: - * - true: Place a border. - * - false: (Default) Do not place a border. - * - * @note When you share a part of a window or screen, the SDK places a border around the entire window or screen if you set `enableHighLight` as true. - * - */ + * - true: Place a border. + * - false: (Default) Do not place a border. + * + * @note When you share a part of a window or screen, the SDK places a border around the entire + * window or screen if you set `enableHighLight` as true. + * + */ bool enableHighLight; ScreenCaptureParameters() - : dimensions(1920, 1080), frameRate(5), bitrate(STANDARD_BITRATE), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : dimensions(1920, 1080), + frameRate(5), + bitrate(STANDARD_BITRATE), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(const VideoDimensions& d, int f, int b) - : dimensions(d), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : dimensions(d), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(int width, int height, int f, int b) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false){} + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(cur), windowFocus(fcs), excludeWindowList(OPTIONAL_NULLPTR), excludeWindowCount(0), highLightWidth(0), highLightColor(0), enableHighLight(false) {} - ScreenCaptureParameters(int width, int height, int f, int b, view_t *ex, int cnt) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(true), windowFocus(false), excludeWindowList(ex), excludeWindowCount(cnt), highLightWidth(0), highLightColor(0), enableHighLight(false) {} - ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs, view_t *ex, int cnt) - : dimensions(width, height), frameRate(f), bitrate(b), captureMouseCursor(cur), windowFocus(fcs), excludeWindowList(ex), excludeWindowCount(cnt), highLightWidth(0), highLightColor(0), enableHighLight(false) {} + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(cur), + windowFocus(fcs), + excludeWindowList(OPTIONAL_NULLPTR), + excludeWindowCount(0), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} + ScreenCaptureParameters(int width, int height, int f, int b, view_t* ex, int cnt) + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(true), + windowFocus(false), + excludeWindowList(ex), + excludeWindowCount(cnt), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} + ScreenCaptureParameters(int width, int height, int f, int b, bool cur, bool fcs, view_t* ex, + int cnt) + : dimensions(width, height), + frameRate(f), + bitrate(b), + captureMouseCursor(cur), + windowFocus(fcs), + excludeWindowList(ex), + excludeWindowCount(cnt), + highLightWidth(0), + highLightColor(0), + enableHighLight(false) {} }; /** @@ -5294,15 +5689,18 @@ struct ScreenCaptureParameters { */ enum AUDIO_RECORDING_QUALITY_TYPE { /** - * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes of recording. + * 0: Low quality. The sample rate is 32 kHz, and the file size is around 1.2 MB after 10 minutes + * of recording. */ AUDIO_RECORDING_QUALITY_LOW = 0, /** - * 1: Medium quality. The sample rate is 32 kHz, and the file size is around 2 MB after 10 minutes of recording. + * 1: Medium quality. The sample rate is 32 kHz, and the file size is around 2 MB after 10 minutes + * of recording. */ AUDIO_RECORDING_QUALITY_MEDIUM = 1, /** - * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 minutes of recording. + * 2: High quality. The sample rate is 32 kHz, and the file size is around 3.75 MB after 10 + * minutes of recording. */ AUDIO_RECORDING_QUALITY_HIGH = 2, /** @@ -5334,16 +5732,16 @@ enum AUDIO_FILE_RECORDING_TYPE { */ enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION { /** - * 1: Only records the audio of the local user. - */ + * 1: Only records the audio of the local user. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD = 1, /** - * 2: Only records the audio of all remote users. - */ + * 2: Only records the audio of all remote users. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK = 2, /** - * 3: Records the mixed audio of the local and all remote users. - */ + * 3: Records the mixed audio of the local and all remote users. + */ AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED = 3, }; @@ -5352,7 +5750,8 @@ enum AUDIO_ENCODED_FRAME_OBSERVER_POSITION { */ struct AudioRecordingConfiguration { /** - * The absolute path (including the filename extensions) of the recording file. For example: `C:\music\audio.mp4`. + * The absolute path (including the filename extensions) of the recording file. For example: + * `C:\music\audio.mp4`. * @note Ensure that the directory for the log files exists and is writable. */ const char* filePath; @@ -5368,8 +5767,9 @@ struct AudioRecordingConfiguration { * - (Default) 32000 * - 44100 * - 48000 - * @note If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC files with quality - * to be `AUDIO_RECORDING_QUALITY_MEDIUM` or `AUDIO_RECORDING_QUALITY_HIGH` for better recording quality. + * @note If you set this parameter to 44100 or 48000, Agora recommends recording WAV files, or AAC + * files with quality to be `AUDIO_RECORDING_QUALITY_MEDIUM` or `AUDIO_RECORDING_QUALITY_HIGH` for + * better recording quality. */ int sampleRate; /** @@ -5390,131 +5790,142 @@ struct AudioRecordingConfiguration { int recordingChannel; AudioRecordingConfiguration() - : filePath(OPTIONAL_NULLPTR), - encode(false), - sampleRate(32000), - fileRecordingType(AUDIO_FILE_RECORDING_MIXED), - quality(AUDIO_RECORDING_QUALITY_LOW), - recordingChannel(1) {} - - AudioRecordingConfiguration(const char* file_path, int sample_rate, AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) - : filePath(file_path), - encode(false), - sampleRate(sample_rate), - fileRecordingType(AUDIO_FILE_RECORDING_MIXED), - quality(quality_type), - recordingChannel(channel) {} - - AudioRecordingConfiguration(const char* file_path, bool enc, int sample_rate, AUDIO_FILE_RECORDING_TYPE type, AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) - : filePath(file_path), - encode(enc), - sampleRate(sample_rate), - fileRecordingType(type), - quality(quality_type), - recordingChannel(channel) {} - - AudioRecordingConfiguration(const AudioRecordingConfiguration &rhs) - : filePath(rhs.filePath), - encode(rhs.encode), - sampleRate(rhs.sampleRate), - fileRecordingType(rhs.fileRecordingType), - quality(rhs.quality), - recordingChannel(rhs.recordingChannel) {} + : filePath(OPTIONAL_NULLPTR), + encode(false), + sampleRate(32000), + fileRecordingType(AUDIO_FILE_RECORDING_MIXED), + quality(AUDIO_RECORDING_QUALITY_LOW), + recordingChannel(1) {} + + AudioRecordingConfiguration(const char* file_path, int sample_rate, + AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) + : filePath(file_path), + encode(false), + sampleRate(sample_rate), + fileRecordingType(AUDIO_FILE_RECORDING_MIXED), + quality(quality_type), + recordingChannel(channel) {} + + AudioRecordingConfiguration(const char* file_path, bool enc, int sample_rate, + AUDIO_FILE_RECORDING_TYPE type, + AUDIO_RECORDING_QUALITY_TYPE quality_type, int channel) + : filePath(file_path), + encode(enc), + sampleRate(sample_rate), + fileRecordingType(type), + quality(quality_type), + recordingChannel(channel) {} + + AudioRecordingConfiguration(const AudioRecordingConfiguration& rhs) + : filePath(rhs.filePath), + encode(rhs.encode), + sampleRate(rhs.sampleRate), + fileRecordingType(rhs.fileRecordingType), + quality(rhs.quality), + recordingChannel(rhs.recordingChannel) {} }; /** * Observer settings for the encoded audio. */ struct AudioEncodedFrameObserverConfig { - /** - * Audio profile. For details, see `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`. - */ - AUDIO_ENCODED_FRAME_OBSERVER_POSITION postionType; - /** - * Audio encoding type. For details, see `AUDIO_ENCODING_TYPE`. - */ - AUDIO_ENCODING_TYPE encodingType; - - AudioEncodedFrameObserverConfig() - : postionType(AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK), - encodingType(AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM){} + /** + * Audio profile. For details, see `AUDIO_ENCODED_FRAME_OBSERVER_POSITION`. + */ + AUDIO_ENCODED_FRAME_OBSERVER_POSITION postionType; + /** + * Audio encoding type. For details, see `AUDIO_ENCODING_TYPE`. + */ + AUDIO_ENCODING_TYPE encodingType; + AudioEncodedFrameObserverConfig() + : postionType(AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK), + encodingType(AUDIO_ENCODING_TYPE_OPUS_48000_MEDIUM) {} }; /** * The encoded audio observer. */ class IAudioEncodedFrameObserver { -public: -/** -* Gets the encoded audio data of the local user. -* -* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD`, -* you can get the encoded audio data of the local user from this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onRecordAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + public: + /** + * Gets the encoded audio data of the local user. + * + * After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as + * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_RECORD`, you can get the encoded audio data of the local + * user from this callback. + * + * @param frameBuffer The pointer to the audio frame buffer. + * @param length The data length (byte) of the audio frame. + * @param audioEncodedFrameInfo Audio information after encoding. For details, see + * `EncodedAudioFrameInfo`. + */ + virtual void onRecordAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -/** -* Gets the encoded audio data of all remote users. -* -* After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK`, -* you can get encoded audio data of all remote users through this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onPlaybackAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + /** + * Gets the encoded audio data of all remote users. + * + * After calling `registerAudioEncodedFrameObserver` and setting the encoded audio as + * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_PLAYBACK`, you can get encoded audio data of all remote + * users through this callback. + * + * @param frameBuffer The pointer to the audio frame buffer. + * @param length The data length (byte) of the audio frame. + * @param audioEncodedFrameInfo Audio information after encoding. For details, see + * `EncodedAudioFrameInfo`. + */ + virtual void onPlaybackAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -/** -* Gets the mixed and encoded audio data of the local and all remote users. -* -* After calling `registerAudioEncodedFrameObserver` and setting the audio profile as `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED`, -* you can get the mixed and encoded audio data of the local and all remote users through this callback. -* -* @param frameBuffer The pointer to the audio frame buffer. -* @param length The data length (byte) of the audio frame. -* @param audioEncodedFrameInfo Audio information after encoding. For details, see `EncodedAudioFrameInfo`. -*/ -virtual void onMixedAudioEncodedFrame(const uint8_t* frameBuffer, int length, const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; + /** + * Gets the mixed and encoded audio data of the local and all remote users. + * + * After calling `registerAudioEncodedFrameObserver` and setting the audio profile as + * `AUDIO_ENCODED_FRAME_OBSERVER_POSITION_MIXED`, you can get the mixed and encoded audio data of + * the local and all remote users through this callback. + * + * @param frameBuffer The pointer to the audio frame buffer. + * @param length The data length (byte) of the audio frame. + * @param audioEncodedFrameInfo Audio information after encoding. For details, see + * `EncodedAudioFrameInfo`. + */ + virtual void onMixedAudioEncodedFrame(const uint8_t* frameBuffer, int length, + const EncodedAudioFrameInfo& audioEncodedFrameInfo) = 0; -virtual ~IAudioEncodedFrameObserver () {} + virtual ~IAudioEncodedFrameObserver() {} }; /** The region for connection, which is the region where the server the SDK connects to is located. */ enum AREA_CODE { - /** - * Mainland China. - */ - AREA_CODE_CN = 0x00000001, - /** - * North America. - */ - AREA_CODE_NA = 0x00000002, - /** - * Europe. - */ - AREA_CODE_EU = 0x00000004, - /** - * Asia, excluding Mainland China. - */ - AREA_CODE_AS = 0x00000008, - /** - * Japan. - */ - AREA_CODE_JP = 0x00000010, - /** - * India. - */ - AREA_CODE_IN = 0x00000020, - /** - * (Default) Global. - */ - AREA_CODE_GLOB = (0xFFFFFFFF) + /** + * Mainland China. + */ + AREA_CODE_CN = 0x00000001, + /** + * North America. + */ + AREA_CODE_NA = 0x00000002, + /** + * Europe. + */ + AREA_CODE_EU = 0x00000004, + /** + * Asia, excluding Mainland China. + */ + AREA_CODE_AS = 0x00000008, + /** + * Japan. + */ + AREA_CODE_JP = 0x00000010, + /** + * India. + */ + AREA_CODE_IN = 0x00000020, + /** + * (Default) Global. + */ + AREA_CODE_GLOB = (0xFFFFFFFF) }; /** @@ -5568,8 +5979,9 @@ enum CHANNEL_MEDIA_RELAY_ERROR { RELAY_ERROR_SERVER_ERROR_RESPONSE = 1, /** 2: No server response. You can call the `leaveChannel` method to leave the channel. * - * This error can also occur if your project has not enabled co-host token authentication. You can contact technical - * support to enable the service for cohosting across channels before starting a channel media relay. + * This error can also occur if your project has not enabled co-host token authentication. You can + * contact technical support to enable the service for cohosting across channels before starting a + * channel media relay. */ RELAY_ERROR_SERVER_NO_RESPONSE = 2, /** 3: The SDK fails to access the service, probably due to limited resources of the server. @@ -5587,8 +5999,8 @@ enum CHANNEL_MEDIA_RELAY_ERROR { /** 7: The server fails to send the media stream. */ RELAY_ERROR_FAILED_PACKET_SENT_TO_DEST = 7, - /** 8: The SDK disconnects from the server due to poor network connections. You can call the `leaveChannel` method to - * leave the channel. + /** 8: The SDK disconnects from the server due to poor network connections. You can call the + * `leaveChannel` method to leave the channel. */ RELAY_ERROR_SERVER_CONNECTION_LOST = 8, /** 9: An internal error occurs in the server. @@ -5606,8 +6018,8 @@ enum CHANNEL_MEDIA_RELAY_ERROR { * The state code of the channel media relay. */ enum CHANNEL_MEDIA_RELAY_STATE { - /** 0: The initial state. After you successfully stop the channel media relay by calling `stopChannelMediaRelay`, - * the `onChannelMediaRelayStateChanged` callback returns this state. + /** 0: The initial state. After you successfully stop the channel media relay by calling + * `stopChannelMediaRelay`, the `onChannelMediaRelayStateChanged` callback returns this state. */ RELAY_STATE_IDLE = 0, /** 1: The SDK tries to relay the media stream to the destination channel. @@ -5625,15 +6037,15 @@ enum CHANNEL_MEDIA_RELAY_STATE { */ struct ChannelMediaInfo { /** The user ID. - */ + */ uid_t uid; /** The channel name. The default value is NULL, which means that the SDK - * applies the current channel name. - */ + * applies the current channel name. + */ const char* channelName; /** The token that enables the user to join the channel. The default value - * is NULL, which means that the SDK applies the current token. - */ + * is NULL, which means that the SDK applies the current token. + */ const char* token; ChannelMediaInfo() : uid(0), channelName(NULL), token(NULL) {} @@ -5644,31 +6056,32 @@ struct ChannelMediaInfo { */ struct ChannelMediaRelayConfiguration { /** The information of the source channel `ChannelMediaInfo`. It contains the following members: - * - `channelName`: The name of the source channel. The default value is `NULL`, which means the SDK applies the name - * of the current channel. - * - `uid`: The unique ID to identify the relay stream in the source channel. The default value is 0, which means the - * SDK generates a random UID. You must set it as 0. - * - `token`: The token for joining the source channel. It is generated with the `channelName` and `uid` you set in - * `srcInfo`. - * - If you have not enabled the App Certificate, set this parameter as the default value `NULL`, which means the - * SDK applies the App ID. - * - If you have enabled the App Certificate, you must use the token generated with the `channelName` and `uid`, and - * the `uid` must be set as 0. + * - `channelName`: The name of the source channel. The default value is `NULL`, which means the + * SDK applies the name of the current channel. + * - `uid`: The unique ID to identify the relay stream in the source channel. The default value is + * 0, which means the SDK generates a random UID. You must set it as 0. + * - `token`: The token for joining the source channel. It is generated with the `channelName` and + * `uid` you set in `srcInfo`. + * - If you have not enabled the App Certificate, set this parameter as the default value + * `NULL`, which means the SDK applies the App ID. + * - If you have enabled the App Certificate, you must use the token generated with the + * `channelName` and `uid`, and the `uid` must be set as 0. */ ChannelMediaInfo* srcInfo; - /** The information of the destination channel `ChannelMediaInfo`. It contains the following members: + /** The information of the destination channel `ChannelMediaInfo`. It contains the following + * members: * - `channelName`: The name of the destination channel. * - `uid`: The unique ID to identify the relay stream in the destination channel. The value * ranges from 0 to (2^32-1). To avoid UID conflicts, this `UID` must be different from any * other `UID` in the destination channel. The default value is 0, which means the SDK generates * a random `UID`. Do not set this parameter as the `UID` of the host in the destination channel, * and ensure that this `UID` is different from any other `UID` in the channel. - * - `token`: The token for joining the destination channel. It is generated with the `channelName` - * and `uid` you set in `destInfos`. + * - `token`: The token for joining the destination channel. It is generated with the + * `channelName` and `uid` you set in `destInfos`. * - If you have not enabled the App Certificate, set this parameter as the default value NULL, * which means the SDK applies the App ID. - * If you have enabled the App Certificate, you must use the token generated with the `channelName` - * and `uid`. + * If you have enabled the App Certificate, you must use the token generated with the + * `channelName` and `uid`. */ ChannelMediaInfo* destInfos; /** The number of destination channels. The default value is 0, and the value range is from 0 to @@ -5677,7 +6090,8 @@ struct ChannelMediaRelayConfiguration { */ int destCount; - ChannelMediaRelayConfiguration() : srcInfo(OPTIONAL_NULLPTR), destInfos(OPTIONAL_NULLPTR), destCount(0) {} + ChannelMediaRelayConfiguration() + : srcInfo(OPTIONAL_NULLPTR), destInfos(OPTIONAL_NULLPTR), destCount(0) {} }; /** @@ -5722,11 +6136,11 @@ struct DownlinkNetworkInfo { expected_bitrate_bps(-1) {} PeerDownlinkInfo(const PeerDownlinkInfo& rhs) - : stream_type(rhs.stream_type), + : stream_type(rhs.stream_type), current_downscale_level(rhs.current_downscale_level), expected_bitrate_bps(rhs.expected_bitrate_bps) { if (rhs.userId != OPTIONAL_NULLPTR) { - const int len = std::strlen(rhs.userId); + const size_t len = std::strlen(rhs.userId); char* buf = new char[len + 1]; std::memcpy(buf, rhs.userId, len); buf[len] = '\0'; @@ -5741,7 +6155,7 @@ struct DownlinkNetworkInfo { current_downscale_level = rhs.current_downscale_level; expected_bitrate_bps = rhs.expected_bitrate_bps; if (rhs.userId != OPTIONAL_NULLPTR) { - const int len = std::strlen(rhs.userId); + const size_t len = std::strlen(rhs.userId); char* buf = new char[len + 1]; std::memcpy(buf, rhs.userId, len); buf[len] = '\0'; @@ -5775,18 +6189,18 @@ struct DownlinkNetworkInfo { int total_received_video_count; DownlinkNetworkInfo() - : lastmile_buffer_delay_time_ms(-1), - bandwidth_estimation_bps(-1), - total_downscale_level_count(-1), - peer_downlink_info(OPTIONAL_NULLPTR), - total_received_video_count(-1) {} + : lastmile_buffer_delay_time_ms(-1), + bandwidth_estimation_bps(-1), + total_downscale_level_count(-1), + peer_downlink_info(OPTIONAL_NULLPTR), + total_received_video_count(-1) {} DownlinkNetworkInfo(const DownlinkNetworkInfo& info) - : lastmile_buffer_delay_time_ms(info.lastmile_buffer_delay_time_ms), - bandwidth_estimation_bps(info.bandwidth_estimation_bps), - total_downscale_level_count(info.total_downscale_level_count), - peer_downlink_info(OPTIONAL_NULLPTR), - total_received_video_count(info.total_received_video_count) { + : lastmile_buffer_delay_time_ms(info.lastmile_buffer_delay_time_ms), + bandwidth_estimation_bps(info.bandwidth_estimation_bps), + total_downscale_level_count(info.total_downscale_level_count), + peer_downlink_info(OPTIONAL_NULLPTR), + total_received_video_count(info.total_received_video_count) { if (total_received_video_count <= 0) return; peer_downlink_info = new PeerDownlinkInfo[total_received_video_count]; for (int i = 0; i < total_received_video_count; ++i) @@ -5840,7 +6254,8 @@ enum ENCRYPTION_MODE { * salt (`encryptionKdfSalt`). */ AES_128_GCM2 = 7, - /** 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt (`encryptionKdfSalt`). + /** 8: 256-bit AES encryption, GCM mode. This encryption mode requires the setting of salt + * (`encryptionKdfSalt`). */ AES_256_GCM2 = 8, /** Enumerator boundary. @@ -5858,30 +6273,31 @@ struct EncryptionConfig { /** * Encryption key in string type with unlimited length. Agora recommends using a 32-byte key. * - * @note If you do not set an encryption key or set it as NULL, you cannot use the built-in encryption, and the SDK returns #ERR_INVALID_ARGUMENT (-2). + * @note If you do not set an encryption key or set it as NULL, you cannot use the built-in + * encryption, and the SDK returns #ERR_INVALID_ARGUMENT (-2). */ const char* encryptionKey; /** - * Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server side. + * Salt, 32 bytes in length. Agora recommends that you use OpenSSL to generate salt on the server + * side. * * @note This parameter takes effect only in `AES_128_GCM2` or `AES_256_GCM2` encrypted mode. * In this case, ensure that this parameter is not 0. */ uint8_t encryptionKdfSalt[32]; - + bool datastreamEncryptionEnabled; EncryptionConfig() - : encryptionMode(AES_128_GCM2), - encryptionKey(OPTIONAL_NULLPTR), - datastreamEncryptionEnabled(false) - { + : encryptionMode(AES_128_GCM2), + encryptionKey(OPTIONAL_NULLPTR), + datastreamEncryptionEnabled(false) { memset(encryptionKdfSalt, 0, sizeof(encryptionKdfSalt)); } /// @cond const char* getEncryptionString() const { - switch(encryptionMode) { + switch (encryptionMode) { case AES_128_XTS: return "aes-128-xts"; case AES_128_ECB: @@ -5909,30 +6325,31 @@ struct EncryptionConfig { /** Encryption error type. */ enum ENCRYPTION_ERROR_TYPE { - /** - * 0: Internal reason. - */ - ENCRYPTION_ERROR_INTERNAL_FAILURE = 0, - /** - * 1: MediaStream decryption errors. Ensure that the receiver and the sender use the same encryption mode and key. - */ - ENCRYPTION_ERROR_DECRYPTION_FAILURE = 1, - /** - * 2: MediaStream encryption errors. - */ - ENCRYPTION_ERROR_ENCRYPTION_FAILURE = 2, - /** - * 3: DataStream decryption errors. Ensure that the receiver and the sender use the same encryption mode and key. - */ - ENCRYPTION_ERROR_DATASTREAM_DECRYPTION_FAILURE = 3, - /** - * 4: DataStream encryption errors. - */ - ENCRYPTION_ERROR_DATASTREAM_ENCRYPTION_FAILURE = 4, + /** + * 0: Internal reason. + */ + ENCRYPTION_ERROR_INTERNAL_FAILURE = 0, + /** + * 1: MediaStream decryption errors. Ensure that the receiver and the sender use the same + * encryption mode and key. + */ + ENCRYPTION_ERROR_DECRYPTION_FAILURE = 1, + /** + * 2: MediaStream encryption errors. + */ + ENCRYPTION_ERROR_ENCRYPTION_FAILURE = 2, + /** + * 3: DataStream decryption errors. Ensure that the receiver and the sender use the same + * encryption mode and key. + */ + ENCRYPTION_ERROR_DATASTREAM_DECRYPTION_FAILURE = 3, + /** + * 4: DataStream encryption errors. + */ + ENCRYPTION_ERROR_DATASTREAM_ENCRYPTION_FAILURE = 4, }; -enum UPLOAD_ERROR_REASON -{ +enum UPLOAD_ERROR_REASON { UPLOAD_SUCCESS = 0, UPLOAD_NET_ERROR = 1, UPLOAD_SERVER_ERROR = 2, @@ -5967,7 +6384,8 @@ enum STREAM_SUBSCRIBE_STATE { * - Calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop sending local * media stream. * - Calls `disableAudio` or `disableVideo `to disable the local audio or video module. - * - Calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or video capture. + * - Calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or + * video capture. * - The role of the remote user is audience. * - The local user calls the following methods to stop receiving remote streams: * - Calls `muteRemoteAudioStream(true)`, `muteAllRemoteAudioStreams(true)` to stop receiving the remote audio streams. @@ -5994,9 +6412,12 @@ enum STREAM_PUBLISH_STATE { PUB_STATE_IDLE = 0, /** * 1: Fails to publish the local stream. Possible reasons: - * - The local user calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop sending the local media stream. - * - The local user calls `disableAudio` or `disableVideo` to disable the local audio or video module. - * - The local user calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the local audio or video capture. + * - The local user calls `muteLocalAudioStream(true)` or `muteLocalVideoStream(true)` to stop + * sending the local media stream. + * - The local user calls `disableAudio` or `disableVideo` to disable the local audio or video + * module. + * - The local user calls `enableLocalAudio(false)` or `enableLocalVideo(false)` to disable the + * local audio or video capture. * - The role of the local user is audience. */ PUB_STATE_NO_PUBLISHED = 1, @@ -6022,10 +6443,15 @@ struct EchoTestConfiguration { int intervalInSeconds; EchoTestConfiguration(view_t v, bool ea, bool ev, const char* t, const char* c, const int is) - : view(v), enableAudio(ea), enableVideo(ev), token(t), channelId(c), intervalInSeconds(is) {} + : view(v), enableAudio(ea), enableVideo(ev), token(t), channelId(c), intervalInSeconds(is) {} EchoTestConfiguration() - : view(OPTIONAL_NULLPTR), enableAudio(true), enableVideo(true), token(OPTIONAL_NULLPTR), channelId(OPTIONAL_NULLPTR), intervalInSeconds(2) {} + : view(OPTIONAL_NULLPTR), + enableAudio(true), + enableVideo(true), + token(OPTIONAL_NULLPTR), + channelId(OPTIONAL_NULLPTR), + intervalInSeconds(2) {} }; /** @@ -6041,9 +6467,7 @@ struct UserInfo { */ char userAccount[MAX_USER_ACCOUNT_LENGTH]; - UserInfo() : uid(0) { - userAccount[0] = '\0'; - } + UserInfo() : uid(0) { userAccount[0] = '\0'; } }; /** @@ -6053,21 +6477,22 @@ enum EAR_MONITORING_FILTER_TYPE { /** * 1: Do not add an audio filter to the in-ear monitor. */ - EAR_MONITORING_FILTER_NONE = (1<<0), + EAR_MONITORING_FILTER_NONE = (1 << 0), /** * 2: Enable audio filters to the in-ear monitor. If you implement functions such as voice * beautifier and audio effect, users can hear the voice after adding these effects. */ - EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS = (1<<1), + EAR_MONITORING_FILTER_BUILT_IN_AUDIO_FILTERS = (1 << 1), /** * 4: Enable noise suppression to the in-ear monitor. */ - EAR_MONITORING_FILTER_NOISE_SUPPRESSION = (1<<2), + EAR_MONITORING_FILTER_NOISE_SUPPRESSION = (1 << 2), /** * 32768: Enable audio filters by reuse post-processing filter to the in-ear monitor. - * This bit is intended to be used in exclusive mode, which means, if this bit is set, all other bits will be disregarded. + * This bit is intended to be used in exclusive mode, which means, if this bit is set, all other + * bits will be disregarded. */ - EAR_MONITORING_FILTER_REUSE_POST_PROCESSING_FILTER = (1<<15), + EAR_MONITORING_FILTER_REUSE_POST_PROCESSING_FILTER = (1 << 15), }; /** @@ -6139,7 +6564,7 @@ struct ScreenVideoParameters { * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles). */ int frameRate = 15; - /** + /** * The video encoding bitrate (Kbps). For recommended values, see [Recommended video * profiles](https://docs.agora.io/en/Interactive%20Broadcast/game_streaming_video_profile?platform=Android#recommended-video-profiles). */ @@ -6230,7 +6655,7 @@ struct VideoRenderingTracingInfo { int elapsedTime; /** * Elapsed time from the start tracing time to the time when join channel. - * + * * **Note** * If the start tracing time is behind the time when join channel, this value will be negative. */ @@ -6241,7 +6666,7 @@ struct VideoRenderingTracingInfo { int join2JoinSuccess; /** * Elapsed time from finishing joining channel to remote user joined. - * + * * **Note** * If the start tracing time is after the time finishing join channel, this value will be * the elapsed time from the start tracing time to remote user joined. The minimum value is 0. @@ -6249,7 +6674,7 @@ struct VideoRenderingTracingInfo { int joinSuccess2RemoteJoined; /** * Elapsed time from remote user joined to set the view. - * + * * **Note** * If the start tracing time is after the time when remote user joined, this value will be * the elapsed time from the start tracing time to set the view. The minimum value is 0. @@ -6257,7 +6682,7 @@ struct VideoRenderingTracingInfo { int remoteJoined2SetView; /** * Elapsed time from remote user joined to the time subscribing remote video stream. - * + * * **Note** * If the start tracing time is after the time when remote user joined, this value will be * the elapsed time from the start tracing time to the time subscribing remote video stream. @@ -6266,7 +6691,7 @@ struct VideoRenderingTracingInfo { int remoteJoined2UnmuteVideo; /** * Elapsed time from remote user joined to the remote video packet received. - * + * * **Note** * If the start tracing time is after the time when remote user joined, this value will be * the elapsed time from the start tracing time to the time subscribing remote video stream. @@ -6286,7 +6711,6 @@ enum CONFIG_FETCH_TYPE { CONFIG_FETCH_TYPE_JOIN_CHANNEL = 2, }; - /** The local proxy mode type. */ enum LOCAL_PROXY_MODE { /** 0: Connect local proxy with high priority, if not connected to local proxy, fallback to sdrtn. @@ -6315,7 +6739,8 @@ struct LogUploadServerInfo { LogUploadServerInfo() : serverDomain(NULL), serverPath(NULL), serverPort(0), serverHttps(true) {} - LogUploadServerInfo(const char* domain, const char* path, int port, bool https) : serverDomain(domain), serverPath(path), serverPort(port), serverHttps(https) {} + LogUploadServerInfo(const char* domain, const char* path, int port, bool https) + : serverDomain(domain), serverPath(path), serverPort(port), serverHttps(https) {} }; struct AdvancedConfigInfo { @@ -6337,8 +6762,9 @@ struct LocalAccessPointConfiguration { /** The number of local access point domain. */ int domainListSize; - /** Certificate domain name installed on specific local access point. pass "" means using sni domain on specific local access point - * SNI(Server Name Indication) is an extension to the TLS protocol. + /** Certificate domain name installed on specific local access point. pass "" means using sni + * domain on specific local access point SNI(Server Name Indication) is an extension to the TLS + * protocol. */ const char* verifyDomainName; /** Local proxy connection mode, connectivity first or local only. @@ -6353,23 +6779,42 @@ struct LocalAccessPointConfiguration { - false: not disable vos-aut */ bool disableAut; - LocalAccessPointConfiguration() : ipList(NULL), ipListSize(0), domainList(NULL), domainListSize(0), verifyDomainName(NULL), mode(ConnectivityFirst), disableAut(true) {} + LocalAccessPointConfiguration() + : ipList(NULL), + ipListSize(0), + domainList(NULL), + domainListSize(0), + verifyDomainName(NULL), + mode(ConnectivityFirst), + disableAut(true) {} +}; + +enum RecorderStreamType { + RTC, + PREVIEW, }; /** * The information about recorded media streams. */ struct RecorderStreamInfo { - const char* channelId; - /** - * The user ID. - */ - uid_t uid; - /** - * The channel ID of the audio/video stream needs to be recorded. - */ - RecorderStreamInfo() : channelId(NULL), uid(0) {} - RecorderStreamInfo(const char* channelId, uid_t uid) : channelId(channelId), uid(uid) {} + /** + * The channel ID of the audio/video stream needs to be recorded. + */ + const char* channelId; + /** + * The user ID. + */ + uid_t uid; + /** + * The Recoder Stream type. + */ + RecorderStreamType type; + RecorderStreamInfo() : channelId(NULL), uid(0), type(RTC) {} + RecorderStreamInfo(const char* channelId, uid_t uid) + : channelId(channelId), uid(uid), type(RTC) {} + RecorderStreamInfo(const char* channelId, uid_t uid, RecorderStreamType type) + : channelId(channelId), uid(uid), type(type) {} }; } // namespace rtc @@ -6396,12 +6841,12 @@ class AParameter : public agora::util::AutoPtr { }; class LicenseCallback { - public: - virtual ~LicenseCallback() {} - virtual void onCertificateRequired() = 0; - virtual void onLicenseRequest() = 0; - virtual void onLicenseValidated() = 0; - virtual void onLicenseError(int result) = 0; + public: + virtual ~LicenseCallback() {} + virtual void onCertificateRequired() = 0; + virtual void onLicenseRequest() = 0; + virtual void onLicenseValidated() = 0; + virtual void onLicenseError(int result) = 0; }; } // namespace base @@ -6445,44 +6890,51 @@ struct SpatialAudioParams { }; /** * Layout info of video stream which compose a transcoder video stream. -*/ -struct VideoLayout -{ + */ +struct VideoLayout { /** * Channel Id from which this video stream come from. - */ + */ const char* channelId; /** * User id of video stream. - */ + */ rtc::uid_t uid; /** * User account of video stream. - */ + */ user_id_t strUid; /** * x coordinate of video stream on a transcoded video stream canvas. - */ + */ uint32_t x; /** * y coordinate of video stream on a transcoded video stream canvas. - */ + */ uint32_t y; /** * width of video stream on a transcoded video stream canvas. - */ + */ uint32_t width; /** * height of video stream on a transcoded video stream canvas. - */ + */ uint32_t height; /** * video state of video stream on a transcoded video stream canvas. * 0 for normal video , 1 for placeholder image showed , 2 for black image. - */ - uint32_t videoState; + */ + uint32_t videoState; - VideoLayout() : channelId(OPTIONAL_NULLPTR), uid(0), strUid(OPTIONAL_NULLPTR), x(0), y(0), width(0), height(0), videoState(0) {} + VideoLayout() + : channelId(OPTIONAL_NULLPTR), + uid(0), + strUid(OPTIONAL_NULLPTR), + x(0), + y(0), + width(0), + height(0), + videoState(0) {} }; } // namespace agora @@ -6509,7 +6961,7 @@ AGORA_API int AGORA_CALL setAgoraSdkExternalSymbolLoader(void* (*func)(const cha * @note For license only, everytime will generate a different credential. * So, just need to call once for a device, and then save the credential */ -AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString &credential); +AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString& credential); /** * Verify given certificate and return the result @@ -6524,8 +6976,10 @@ AGORA_API int AGORA_CALL createAgoraCredential(agora::util::AString &credential) * @return The description of the error code. * @note For license only. */ -AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char *credential_buf, int credential_len, - const char *certificate_buf, int certificate_len); +AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char* credential_buf, + int credential_len, + const char* certificate_buf, + int certificate_len); /** * @brief Implement the agora::base::LicenseCallback, @@ -6534,7 +6988,7 @@ AGORA_API int AGORA_CALL getAgoraCertificateVerifyResult(const char *credential_ * @param [in] callback The object of agora::LiceseCallback, * set the callback to null before delete it. */ -AGORA_API void setAgoraLicenseCallback(agora::base::LicenseCallback *callback); +AGORA_API void setAgoraLicenseCallback(agora::base::LicenseCallback* callback); /** * @brief Get the LicenseCallback pointer if already setup, @@ -6550,18 +7004,15 @@ AGORA_API agora::base::LicenseCallback* getAgoraLicenseCallback(); * typical scenario is as follows: * * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - * | // custom audio/video base capture time, e.g. the first audio/video capture time. | - * | int64_t custom_capture_time_base; | - * | | - * | int64_t agora_monotonic_time = getAgoraCurrentMonotonicTimeInMs(); | - * | | - * | // offset is fixed once calculated in the begining. | - * | const int64_t offset = agora_monotonic_time - custom_capture_time_base; | - * | | - * | // realtime_custom_audio/video_capture_time is the origin capture time that customer provided.| - * | // actual_audio/video_capture_time is the actual capture time transfered to sdk. | - * | int64_t actual_audio_capture_time = realtime_custom_audio_capture_time + offset; | - * | int64_t actual_video_capture_time = realtime_custom_video_capture_time + offset; | + * | // custom audio/video base capture time, e.g. the first audio/video capture time. | | int64_t + * custom_capture_time_base; | | | | + * int64_t agora_monotonic_time = getAgoraCurrentMonotonicTimeInMs(); | + * | | | // offset is fixed once calculated in the begining. | | const int64_t offset = + * agora_monotonic_time - custom_capture_time_base; | | | | // + * realtime_custom_audio/video_capture_time is the origin capture time that customer provided.| | // + * actual_audio/video_capture_time is the actual capture time transfered to sdk. | | + * int64_t actual_audio_capture_time = realtime_custom_audio_capture_time + offset; | + * | int64_t actual_video_capture_time = realtime_custom_video_capture_time + offset; | * ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ * * @return diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaBase.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaBase.h index 8120acb3f..6e7d45357 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaBase.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/AgoraMediaBase.h @@ -63,8 +63,8 @@ struct ExtensionContext { /** -* Video source types definition. -**/ + * Video source types definition. + **/ enum VIDEO_SOURCE_TYPE { /** Video captured by the camera. */ @@ -115,17 +115,45 @@ enum VIDEO_SOURCE_TYPE { */ VIDEO_SOURCE_SCREEN_FOURTH = 14, /** Video for voice drive. - */ - VIDEO_SOURCE_SPEECH_DRIVEN = 15, + */ + VIDEO_SOURCE_SPEECH_DRIVEN = 15, VIDEO_SOURCE_UNKNOWN = 100 }; +/** +* Audio source types definition. +**/ +enum AUDIO_SOURCE_TYPE { + /** Audio captured by the mic. + */ + AUDIO_SOURCE_MICROPHONE = 0, + /** Not define. + */ + AUDIO_SOURCE_CUSTOM = 1, + /** Audio for media player sharing. + */ + AUDIO_SOURCE_MEDIA_PLAYER = 2, + /** Audio for screen audio. + */ + AUDIO_SOURCE_LOOPBACK_RECORDING = 3, + /** Audio captured by mixed source. + */ + AUDIO_SOURCE_MIXED_STREAM = 4, + /** Remote audio received from network. + */ + AUDIO_SOURCE_REMOTE_USER = 5, + /** Remote audio received from network by channel. + */ + AUDIO_SOURCE_REMOTE_CHANNEL = 6, + + AUDIO_SOURCE_UNKNOWN = 100 +}; + /** * Audio routes. */ -enum AudioRoute -{ +enum AudioRoute { /** * -1: The default audio route. */ @@ -191,23 +219,21 @@ struct AudioParameters { size_t channels; size_t frames_per_buffer; - AudioParameters() - : sample_rate(0), - channels(0), - frames_per_buffer(0) {} + AudioParameters() : sample_rate(0), channels(0), frames_per_buffer(0) {} }; /** * The use mode of the audio data. */ enum RAW_AUDIO_FRAME_OP_MODE_TYPE { - /** 0: Read-only mode: Users only read the data from `AudioFrame` without modifying anything. + /** 0: Read-only mode: Users only read the data from `AudioFrame` without modifying anything. * For example, when users acquire the data with the Agora SDK, then start the media push. */ RAW_AUDIO_FRAME_OP_MODE_READ_ONLY = 0, - /** 2: Read and write mode: Users read the data from `AudioFrame`, modify it, and then play it. - * For example, when users have their own audio-effect processing module and perform some voice pre-processing, such as a voice change. + /** 2: Read and write mode: Users read the data from `AudioFrame`, modify it, and then play it. + * For example, when users have their own audio-effect processing module and perform some voice + * pre-processing, such as a voice change. */ RAW_AUDIO_FRAME_OP_MODE_READ_WRITE = 2, }; @@ -215,7 +241,7 @@ enum RAW_AUDIO_FRAME_OP_MODE_TYPE { } // namespace rtc namespace media { - /** +/** * The type of media device. */ enum MEDIA_SOURCE_TYPE { @@ -290,23 +316,23 @@ enum CONTENT_INSPECT_RESULT { }; enum CONTENT_INSPECT_TYPE { -/** - * (Default) content inspect type invalid - */ -CONTENT_INSPECT_INVALID = 0, -/** - * @deprecated - * Content inspect type moderation - */ -CONTENT_INSPECT_MODERATION __deprecated = 1, -/** - * Content inspect type supervise - */ -CONTENT_INSPECT_SUPERVISION = 2, -/** - * Content inspect type image moderation - */ -CONTENT_INSPECT_IMAGE_MODERATION = 3 + /** + * (Default) content inspect type invalid + */ + CONTENT_INSPECT_INVALID = 0, + /** + * @deprecated + * Content inspect type moderation + */ + CONTENT_INSPECT_MODERATION __deprecated = 1, + /** + * Content inspect type supervise + */ + CONTENT_INSPECT_SUPERVISION = 2, + /** + * Content inspect type image moderation + */ + CONTENT_INSPECT_IMAGE_MODERATION = 3 }; struct ContentInspectModule { @@ -338,15 +364,14 @@ struct ContentInspectConfig { /**The content inspect module count. */ int moduleCount; - ContentInspectConfig& operator=(const ContentInspectConfig& rth) - { - extraInfo = rth.extraInfo; - serverConfig = rth.serverConfig; - moduleCount = rth.moduleCount; - memcpy(&modules, &rth.modules, MAX_CONTENT_INSPECT_MODULE_COUNT * sizeof(ContentInspectModule)); - return *this; - } - ContentInspectConfig() :extraInfo(NULL), serverConfig(NULL), moduleCount(0){} + ContentInspectConfig& operator=(const ContentInspectConfig& rth) { + extraInfo = rth.extraInfo; + serverConfig = rth.serverConfig; + moduleCount = rth.moduleCount; + memcpy(&modules, &rth.modules, MAX_CONTENT_INSPECT_MODULE_COUNT * sizeof(ContentInspectModule)); + return *this; + } + ContentInspectConfig() : extraInfo(NULL), serverConfig(NULL), moduleCount(0) {} }; namespace base { @@ -368,9 +393,7 @@ struct PacketOptions { uint32_t timestamp; // Audio level indication. uint8_t audioLevelIndication; - PacketOptions() - : timestamp(0), - audioLevelIndication(127) {} + PacketOptions() : timestamp(0), audioLevelIndication(127) {} }; /** @@ -386,9 +409,7 @@ struct AudioEncodedFrameInfo { * The codec of the packet. */ uint8_t codec; - AudioEncodedFrameInfo() - : sendTs(0), - codec(0) {} + AudioEncodedFrameInfo() : sendTs(0), codec(0) {} }; /** @@ -398,17 +419,18 @@ struct AudioPcmFrame { /** * The buffer size of the PCM audio frame. */ - OPTIONAL_ENUM_SIZE_T { - // Stereo, 32 kHz, 60 ms (2 * 32 * 60) - /** - * The max number of the samples of the data. - * - * When the number of audio channel is two, the sample rate is 32 kHZ, - * the buffer length of the data is 60 ms, the number of the samples of the data is 3840 (2 x 32 x 60). - */ - kMaxDataSizeSamples = 3840, - /** The max number of the bytes of the data. */ - kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t), + OPTIONAL_ENUM_SIZE_T{ + // Stereo, 32 kHz, 60 ms (2 * 32 * 60) + /** + * The max number of the samples of the data. + * + * When the number of audio channel is two, the sample rate is 32 kHZ, + * the buffer length of the data is 60 ms, the number of the samples of the data is 3840 (2 x + * 32 x 60). + */ + kMaxDataSizeSamples = 3840, + /** The max number of the bytes of the data. */ + kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t), }; /** The timestamp (ms) of the audio frame. @@ -553,7 +575,8 @@ enum VIDEO_PIXEL_FORMAT { */ VIDEO_PIXEL_I422 = 16, /** - * 17: ID3D11Texture2D, only support DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_NV12 texture format + * 17: ID3D11Texture2D, only support DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, + * DXGI_FORMAT_NV12 texture format */ VIDEO_TEXTURE_ID3D11TEXTURE2D = 17, /** @@ -608,12 +631,12 @@ enum CAMERA_VIDEO_SOURCE_TYPE { * This interface provides access to metadata information. */ class IVideoFrameMetaInfo { - public: - enum META_INFO_KEY { - KEY_FACE_CAPTURE = 0, - }; - virtual ~IVideoFrameMetaInfo() {}; - virtual const char* getMetaInfoStr(META_INFO_KEY key) const = 0; + public: + enum META_INFO_KEY { + KEY_FACE_CAPTURE = 0, + }; + virtual ~IVideoFrameMetaInfo(){}; + virtual const char* getMetaInfoStr(META_INFO_KEY key) const = 0; }; struct ColorSpace { @@ -829,7 +852,7 @@ struct ExternalVideoFrame { d3d11Texture2d(NULL), textureSliceIndex(0){} - /** + /** * The EGL context type. */ enum EGL_CONTEXT_TYPE { @@ -869,6 +892,7 @@ struct ExternalVideoFrame { * The pixel format: #VIDEO_PIXEL_FORMAT */ VIDEO_PIXEL_FORMAT format; + /** * The video buffer. */ @@ -903,30 +927,32 @@ struct ExternalVideoFrame { */ int cropBottom; /** - * [Raw data related parameter] The clockwise rotation information of the video frame. You can set the - * rotation angle as 0, 90, 180, or 270. The default value is 0. + * [Raw data related parameter] The clockwise rotation information of the video frame. You can set + * the rotation angle as 0, 90, 180, or 270. The default value is 0. */ int rotation; /** - * The timestamp (ms) of the incoming video frame. An incorrect timestamp results in a frame loss or - * unsynchronized audio and video. - * + * The timestamp (ms) of the incoming video frame. An incorrect timestamp results in a frame loss + * or unsynchronized audio and video. + * * Please refer to getAgoraCurrentMonotonicTimeInMs or getCurrentMonotonicTimeInMs * to determine how to fill this filed. */ long long timestamp; /** * [Texture-related parameter] - * When using the OpenGL interface (javax.microedition.khronos.egl.*) defined by Khronos, set EGLContext to this field. - * When using the OpenGL interface (android.opengl.*) defined by Android, set EGLContext to this field. + * When using the OpenGL interface (javax.microedition.khronos.egl.*) defined by Khronos, set + * EGLContext to this field. When using the OpenGL interface (android.opengl.*) defined by + * Android, set EGLContext to this field. */ - void *eglContext; + void* eglContext; /** * [Texture related parameter] Texture ID used by the video frame. */ EGL_CONTEXT_TYPE eglType; /** - * [Texture related parameter] Incoming 4 × 4 transformational matrix. The typical value is a unit matrix. + * [Texture related parameter] Incoming 4 × 4 transformational matrix. The typical value is + * a unit matrix. */ int textureId; /** @@ -1060,8 +1086,8 @@ struct VideoFrame { */ int rotation; /** - * The timestamp to render the video stream. Use this parameter for audio-video synchronization when - * rendering the video. + * The timestamp to render the video stream. Use this parameter for audio-video synchronization + * when rendering the video. * * @note This parameter is for rendering the video, not capturing the video. */ @@ -1089,7 +1115,8 @@ struct VideoFrame { */ int textureId; /** - * [Texture related parameter] The pointer of ID3D11Texture2D used by the video frame,for Windows only. + * [Texture related parameter] The pointer of ID3D11Texture2D used by the video frame,for Windows + * only. */ void* d3d11Texture2d; /** @@ -1117,7 +1144,8 @@ struct VideoFrame { */ void* pixelBuffer; /** - * The pointer to IVideoFrameMetaInfo, which is the interface to get metainfo contents from VideoFrame. + * The pointer to IVideoFrameMetaInfo, which is the interface to get metainfo contents from + * VideoFrame. */ IVideoFrameMetaInfo* metaInfo; @@ -1141,7 +1169,8 @@ class IVideoFrameObserver { * Occurs each time the player receives a video frame. * * After registering the video frame observer, - * the callback occurs each time the player receives a video frame to report the detailed information of the video frame. + * the callback occurs each time the player receives a video frame to report the detailed + * information of the video frame. * @param frame The detailed information of the video frame. See {@link VideoFrame}. */ virtual void onFrame(const VideoFrame* frame) = 0; @@ -1179,6 +1208,30 @@ enum VIDEO_MODULE_POSITION { } // namespace base +/** Definition of SnapshotConfig. + */ +struct SnapshotConfig { + /** + * The local path (including filename extensions) of the snapshot. For example: + * - Windows: `C:\Users\\AppData\Local\Agora\\example.jpg` + * - iOS: `/App Sandbox/Library/Caches/example.jpg` + * - macOS: `~/Library/Logs/example.jpg` + * - Android: `/storage/emulated/0/Android/data//files/example.jpg` + */ + const char* filePath; + + /** + * The position of the video observation. See VIDEO_MODULE_POSITION. + * + * Allowed values vary depending on the `uid` parameter passed in `takeSnapshot` or `takeSnapshotEx`: + * - uid = 0: Position 2, 4 and 8 are allowed. + * - uid != 0: Only position 2 is allowed. + * + */ + media::base::VIDEO_MODULE_POSITION position; + SnapshotConfig() :filePath(NULL), position(media::base::POSITION_PRE_ENCODER) {} +}; + /** * The audio frame observer. */ @@ -1238,7 +1291,7 @@ class IAudioFrameObserverBase { */ int samplesPerSec; /** - * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data + * The data buffer of the audio frame. When the audio frame uses a stereo channel, the data * buffer is interleaved. * * Buffer data size: buffer = samplesPerChannel × channels × bytesPerSample. @@ -1247,14 +1300,14 @@ class IAudioFrameObserverBase { /** * The timestamp to render the audio data. * - * You can use this timestamp to restore the order of the captured audio frame, and synchronize - * audio and video frames in video scenarios, including scenarios where external video sources + * You can use this timestamp to restore the order of the captured audio frame, and synchronize + * audio and video frames in video scenarios, including scenarios where external video sources * are used. */ int64_t renderTimeMs; /** * A reserved parameter. - * + * * You can use this presentationMs parameter to indicate the presenation milisecond timestamp, * this will then filled into audio4 extension part, the remote side could use this pts in av * sync process with video frame. @@ -1263,11 +1316,11 @@ class IAudioFrameObserverBase { /** * The pts timestamp of this audio frame. * - * This timestamp is used to indicate the origin pts time of the frame, and sync with video frame by - * the pts time stamp + * This timestamp is used to indicate the origin pts time of the frame, and sync with video + * frame by the pts time stamp */ int64_t presentationMs; - /** + /** * The number of the audio track. */ int audioTrackNumber; @@ -1276,17 +1329,18 @@ class IAudioFrameObserverBase { */ uint32_t rtpTimestamp; - AudioFrame() : type(FRAME_TYPE_PCM16), - samplesPerChannel(0), - bytesPerSample(rtc::TWO_BYTES_PER_SAMPLE), - channels(0), - samplesPerSec(0), - buffer(NULL), - renderTimeMs(0), - avsync_type(0), - presentationMs(0), - audioTrackNumber(0), - rtpTimestamp(0) {} + AudioFrame() + : type(FRAME_TYPE_PCM16), + samplesPerChannel(0), + bytesPerSample(rtc::TWO_BYTES_PER_SAMPLE), + channels(0), + samplesPerSec(0), + buffer(NULL), + renderTimeMs(0), + avsync_type(0), + presentationMs(0), + audioTrackNumber(0), + rtpTimestamp(0) {} }; enum AUDIO_FRAME_POSITION { @@ -1335,8 +1389,17 @@ class IAudioFrameObserverBase { */ int samples_per_call; - AudioParams() : sample_rate(0), channels(0), mode(rtc::RAW_AUDIO_FRAME_OP_MODE_READ_ONLY), samples_per_call(0) {} - AudioParams(int samplerate, int channel, rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE type, int samplesPerCall) : sample_rate(samplerate), channels(channel), mode(type), samples_per_call(samplesPerCall) {} + AudioParams() + : sample_rate(0), + channels(0), + mode(rtc::RAW_AUDIO_FRAME_OP_MODE_READ_ONLY), + samples_per_call(0) {} + AudioParams(int samplerate, int channel, rtc::RAW_AUDIO_FRAME_OP_MODE_TYPE type, + int samplesPerCall) + : sample_rate(samplerate), + channels(channel), + mode(type), + samples_per_call(samplesPerCall) {} }; public: @@ -1386,10 +1449,11 @@ class IAudioFrameObserverBase { * - true: The before-mixing playback audio frame is valid and is encoded and sent. * - false: The before-mixing playback audio frame is invalid and is not encoded or sent. */ - virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, base::user_id_t userId, AudioFrame& audioFrame) { - (void) channelId; - (void) userId; - (void) audioFrame; + virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, base::user_id_t userId, + AudioFrame& audioFrame) { + (void)channelId; + (void)userId; + (void)audioFrame; return true; } @@ -1398,12 +1462,19 @@ class IAudioFrameObserverBase { * @return A bit mask that controls the frame position of the audio observer. * @note - Use '|' (the OR operator) to observe multiple frame positions. *

- * After you successfully register the audio observer, the SDK triggers this callback each time it receives a audio frame. You can determine which position to observe by setting the return value. - * The SDK provides 4 positions for observer. Each position corresponds to a callback function: - * - `AUDIO_FRAME_POSITION_PLAYBACK (1 << 0)`: The position for playback audio frame is received, which corresponds to the \ref onPlaybackFrame "onPlaybackFrame" callback. - * - `AUDIO_FRAME_POSITION_RECORD (1 << 1)`: The position for record audio frame is received, which corresponds to the \ref onRecordFrame "onRecordFrame" callback. - * - `AUDIO_FRAME_POSITION_MIXED (1 << 2)`: The position for mixed audio frame is received, which corresponds to the \ref onMixedFrame "onMixedFrame" callback. - * - `AUDIO_FRAME_POSITION_BEFORE_MIXING (1 << 3)`: The position for playback audio frame before mixing is received, which corresponds to the \ref onPlaybackFrameBeforeMixing "onPlaybackFrameBeforeMixing" callback. + * After you successfully register the audio observer, the SDK triggers this callback each time it + * receives a audio frame. You can determine which position to observe by setting the return + * value. The SDK provides 4 positions for observer. Each position corresponds to a callback + * function: + * - `AUDIO_FRAME_POSITION_PLAYBACK (1 << 0)`: The position for playback audio frame is received, + * which corresponds to the \ref onPlaybackFrame "onPlaybackFrame" callback. + * - `AUDIO_FRAME_POSITION_RECORD (1 << 1)`: The position for record audio frame is received, + * which corresponds to the \ref onRecordFrame "onRecordFrame" callback. + * - `AUDIO_FRAME_POSITION_MIXED (1 << 2)`: The position for mixed audio frame is received, which + * corresponds to the \ref onMixedFrame "onMixedFrame" callback. + * - `AUDIO_FRAME_POSITION_BEFORE_MIXING (1 << 3)`: The position for playback audio frame before + * mixing is received, which corresponds to the \ref onPlaybackFrameBeforeMixing + * "onPlaybackFrameBeforeMixing" callback. * @return The bit mask that controls the audio observation positions. * See AUDIO_FRAME_POSITION. */ @@ -1475,25 +1546,25 @@ class IAudioFrameObserver : public IAudioFrameObserverBase { * - true: The before-mixing playback audio frame is valid and is encoded and sent. * - false: The before-mixing playback audio frame is invalid and is not encoded or sent. */ - virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, rtc::uid_t uid, AudioFrame& audioFrame) = 0; + virtual bool onPlaybackAudioFrameBeforeMixing(const char* channelId, rtc::uid_t uid, + AudioFrame& audioFrame) = 0; }; struct AudioSpectrumData { /** * The audio spectrum data of audio. */ - const float *audioSpectrumData; + const float* audioSpectrumData; /** * The data length of audio spectrum data. */ int dataLength; AudioSpectrumData() : audioSpectrumData(NULL), dataLength(0) {} - AudioSpectrumData(const float *data, int length) : - audioSpectrumData(data), dataLength(length) {} + AudioSpectrumData(const float* data, int length) : audioSpectrumData(data), dataLength(length) {} }; -struct UserAudioSpectrumInfo { +struct UserAudioSpectrumInfo { /** * User ID of the speaker. */ @@ -1505,14 +1576,15 @@ struct UserAudioSpectrumInfo { UserAudioSpectrumInfo() : uid(0) {} - UserAudioSpectrumInfo(agora::rtc::uid_t uid, const float* data, int length) : uid(uid), spectrumData(data, length) {} + UserAudioSpectrumInfo(agora::rtc::uid_t uid, const float* data, int length) + : uid(uid), spectrumData(data, length) {} }; /** * The IAudioSpectrumObserver class. */ class IAudioSpectrumObserver { -public: + public: virtual ~IAudioSpectrumObserver() {} /** @@ -1521,7 +1593,8 @@ class IAudioSpectrumObserver { * This callback reports the audio spectrum data of the local audio at the moment * in the channel. * - * You can set the time interval of this callback using \ref ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". + * You can set the time interval of this callback using \ref + * ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". * * @param data The audio spectrum data of local audio. * - true: Processed. @@ -1534,10 +1607,12 @@ class IAudioSpectrumObserver { * This callback reports the IDs and audio spectrum data of the loudest speakers at the moment * in the channel. * - * You can set the time interval of this callback using \ref ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". + * You can set the time interval of this callback using \ref + * ILocalUser::enableAudioSpectrumMonitor "enableAudioSpectrumMonitor". * - * @param spectrums The pointer to \ref agora::media::UserAudioSpectrumInfo "UserAudioSpectrumInfo", which is an array containing - * the user ID and audio spectrum data for each speaker. + * @param spectrums The pointer to \ref agora::media::UserAudioSpectrumInfo + * "UserAudioSpectrumInfo", which is an array containing the user ID and audio spectrum data for + * each speaker. * - This array contains the following members: * - `uid`, which is the UID of each remote speaker * - `spectrumData`, which reports the audio spectrum of each remote speaker. @@ -1545,7 +1620,8 @@ class IAudioSpectrumObserver { * - true: Processed. * - false: Not processed. */ - virtual bool onRemoteAudioSpectrum(const UserAudioSpectrumInfo* spectrums, unsigned int spectrumNumber) = 0; + virtual bool onRemoteAudioSpectrum(const UserAudioSpectrumInfo* spectrums, + unsigned int spectrumNumber) = 0; }; /** @@ -1563,8 +1639,9 @@ class IVideoEncodedFrameObserver { * - true: Accept. * - false: Do not accept. */ - virtual bool onEncodedVideoFrameReceived(rtc::uid_t uid, const uint8_t* imageBuffer, size_t length, - const rtc::EncodedVideoFrameInfo& videoEncodedFrameInfo) = 0; + virtual bool onEncodedVideoFrameReceived( + rtc::uid_t uid, const uint8_t* imageBuffer, size_t length, + const rtc::EncodedVideoFrameInfo& videoEncodedFrameInfo) = 0; virtual ~IVideoEncodedFrameObserver() {} }; @@ -1581,16 +1658,18 @@ class IVideoFrameObserver { enum VIDEO_FRAME_PROCESS_MODE { /** * Read-only mode. - * + * * In this mode, you do not modify the video frame. The video frame observer is a renderer. */ - PROCESS_MODE_READ_ONLY, // Observer works as a pure renderer and will not modify the original frame. + PROCESS_MODE_READ_ONLY, // Observer works as a pure renderer and will not modify the original + // frame. /** * Read and write mode. - * + * * In this mode, you modify the video frame. The video frame observer is a video filter. */ - PROCESS_MODE_READ_WRITE, // Observer works as a filter that will process the video frame and affect the following frame processing in SDK. + PROCESS_MODE_READ_WRITE, // Observer works as a filter that will process the video frame and + // affect the following frame processing in SDK. }; public: @@ -1599,38 +1678,43 @@ class IVideoFrameObserver { /** * Occurs each time the SDK receives a video frame captured by the local camera. * - * After you successfully register the video frame observer, the SDK triggers this callback each time - * a video frame is received. In this callback, you can get the video data captured by the local - * camera. You can then pre-process the data according to your scenarios. + * After you successfully register the video frame observer, the SDK triggers this callback each + * time a video frame is received. In this callback, you can get the video data captured by the + * local camera. You can then pre-process the data according to your scenarios. * * After pre-processing, you can send the processed video data back to the SDK by setting the * `videoFrame` parameter in this callback. * * @note - * - If you get the video data in RGBA color encoding format, Agora does not support using this callback to send the processed data in RGBA color encoding format back to the SDK. - * - The video data that this callback gets has not been pre-processed, such as watermarking, cropping content, rotating, or image enhancement. + * - If you get the video data in RGBA color encoding format, Agora does not support using this + * callback to send the processed data in RGBA color encoding format back to the SDK. + * - The video data that this callback gets has not been pre-processed, such as watermarking, + * cropping content, rotating, or image enhancement. * * @param videoFrame A pointer to the video frame: VideoFrame * @param sourceType source type of video frame. See #VIDEO_SOURCE_TYPE. * @return Determines whether to ignore the current video frame if the pre-processing fails: * - true: Do not ignore. * - false: Ignore, in which case this method does not sent the current video frame to the SDK. - */ - virtual bool onCaptureVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, VideoFrame& videoFrame) = 0; + */ + virtual bool onCaptureVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, + VideoFrame& videoFrame) = 0; /** * Occurs each time the SDK receives a video frame before encoding. * - * After you successfully register the video frame observer, the SDK triggers this callback each time - * when it receives a video frame. In this callback, you can get the video data before encoding. You can then - * process the data according to your particular scenarios. + * After you successfully register the video frame observer, the SDK triggers this callback each + * time when it receives a video frame. In this callback, you can get the video data before + * encoding. You can then process the data according to your particular scenarios. * * After processing, you can send the processed video data back to the SDK by setting the * `videoFrame` parameter in this callback. * * @note - * - To get the video data captured from the second screen before encoding, you need to set (1 << 2) as a frame position through `getObservedFramePosition`. - * - The video data that this callback gets has been pre-processed, such as watermarking, cropping content, rotating, or image enhancement. + * - To get the video data captured from the second screen before encoding, you need to set (1 << + * 2) as a frame position through `getObservedFramePosition`. + * - The video data that this callback gets has been pre-processed, such as watermarking, cropping + * content, rotating, or image enhancement. * - This callback does not support sending processed RGBA video data back to the SDK. * * @param videoFrame A pointer to the video frame: VideoFrame @@ -1639,7 +1723,8 @@ class IVideoFrameObserver { * - true: Do not ignore. * - false: Ignore, in which case this method does not sent the current video frame to the SDK. */ - virtual bool onPreEncodeVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, VideoFrame& videoFrame) = 0; + virtual bool onPreEncodeVideoFrame(agora::rtc::VIDEO_SOURCE_TYPE sourceType, + VideoFrame& videoFrame) = 0; /** * Occurs each time the SDK receives a video frame decoded by the MediaPlayer. @@ -1650,10 +1735,13 @@ class IVideoFrameObserver { * * After pre-processing, you can send the processed video data back to the SDK by setting the * `videoFrame` parameter in this callback. - * + * * @note - * - This callback will not be affected by the return values of \ref getVideoFrameProcessMode "getVideoFrameProcessMode", \ref getRotationApplied "getRotationApplied", \ref getMirrorApplied "getMirrorApplied", \ref getObservedFramePosition "getObservedFramePosition". - * - On Android, this callback is not affected by the return value of \ref getVideoFormatPreference "getVideoFormatPreference" + * - This callback will not be affected by the return values of \ref getVideoFrameProcessMode + * "getVideoFrameProcessMode", \ref getRotationApplied "getRotationApplied", \ref getMirrorApplied + * "getMirrorApplied", \ref getObservedFramePosition "getObservedFramePosition". + * - On Android, this callback is not affected by the return value of \ref + * getVideoFormatPreference "getVideoFormatPreference" * * @param videoFrame A pointer to the video frame: VideoFrame * @param mediaPlayerId ID of the mediaPlayer. @@ -1666,13 +1754,13 @@ class IVideoFrameObserver { /** * Occurs each time the SDK receives a video frame sent by the remote user. * - * After you successfully register the video frame observer, the SDK triggers this callback each time a - * video frame is received. In this callback, you can get the video data sent by the remote user. You - * can then post-process the data according to your scenarios. + * After you successfully register the video frame observer, the SDK triggers this callback each + * time a video frame is received. In this callback, you can get the video data sent by the remote + * user. You can then post-process the data according to your scenarios. + * + * After post-processing, you can send the processed data back to the SDK by setting the + * `videoFrame` parameter in this callback. * - * After post-processing, you can send the processed data back to the SDK by setting the `videoFrame` - * parameter in this callback. - * * @note This callback does not support sending processed RGBA video data back to the SDK. * * @param channelId The channel name @@ -1682,45 +1770,48 @@ class IVideoFrameObserver { * - true: Do not ignore. * - false: Ignore, in which case this method does not sent the current video frame to the SDK. */ - virtual bool onRenderVideoFrame(const char* channelId, rtc::uid_t remoteUid, VideoFrame& videoFrame) = 0; + virtual bool onRenderVideoFrame(const char* channelId, rtc::uid_t remoteUid, + VideoFrame& videoFrame) = 0; virtual bool onTranscodedVideoFrame(VideoFrame& videoFrame) = 0; /** - * Occurs each time the SDK receives a video frame and prompts you to set the process mode of the video frame. - * - * After you successfully register the video frame observer, the SDK triggers this callback each time it receives - * a video frame. You need to set your preferred process mode in the return value of this callback. + * Occurs each time the SDK receives a video frame and prompts you to set the process mode of the + * video frame. + * + * After you successfully register the video frame observer, the SDK triggers this callback each + * time it receives a video frame. You need to set your preferred process mode in the return value + * of this callback. * @return VIDEO_FRAME_PROCESS_MODE. */ - virtual VIDEO_FRAME_PROCESS_MODE getVideoFrameProcessMode() { - return PROCESS_MODE_READ_ONLY; - } + virtual VIDEO_FRAME_PROCESS_MODE getVideoFrameProcessMode() { return PROCESS_MODE_READ_ONLY; } /** * Sets the format of the raw video data output by the SDK. * - * If you want to get raw video data in a color encoding format other than YUV 420, register this callback when - * calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the SDK triggers - * this callback each time it receives a video frame. You need to set your preferred video data in the return value - * of this callback. - * - * @note If you want the video captured by the sender to be the original format, set the original video data format - * to VIDEO_PIXEL_DEFAULT in the return value. On different platforms, the original video pixel format is also - * different, for the actual video pixel format, see `VideoFrame`. - * + * If you want to get raw video data in a color encoding format other than YUV 420, register this + * callback when calling `registerVideoFrameObserver`. After you successfully register the video + * frame observer, the SDK triggers this callback each time it receives a video frame. You need to + * set your preferred video data in the return value of this callback. + * + * @note If you want the video captured by the sender to be the original format, set the original + * video data format to VIDEO_PIXEL_DEFAULT in the return value. On different platforms, the + * original video pixel format is also different, for the actual video pixel format, see + * `VideoFrame`. + * * @return Sets the video format. See VIDEO_PIXEL_FORMAT. */ virtual base::VIDEO_PIXEL_FORMAT getVideoFormatPreference() { return base::VIDEO_PIXEL_DEFAULT; } /** - * Occurs each time the SDK receives a video frame, and prompts you whether to rotate the captured video. - * - * If you want to rotate the captured video according to the rotation member in the `VideoFrame` class, register this - * callback by calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the - * SDK triggers this callback each time it receives a video frame. You need to set whether to rotate the video frame - * in the return value of this callback. - * + * Occurs each time the SDK receives a video frame, and prompts you whether to rotate the captured + * video. + * + * If you want to rotate the captured video according to the rotation member in the `VideoFrame` + * class, register this callback by calling `registerVideoFrameObserver`. After you successfully + * register the video frame observer, the SDK triggers this callback each time it receives a video + * frame. You need to set whether to rotate the video frame in the return value of this callback. + * * @note This function only supports video data in RGBA or YUV420. * * @return Determines whether to rotate. @@ -1730,13 +1821,15 @@ class IVideoFrameObserver { virtual bool getRotationApplied() { return false; } /** - * Occurs each time the SDK receives a video frame and prompts you whether or not to mirror the captured video. - * - * If the video data you want to obtain is a mirror image of the original video, you need to register this callback - * when calling `registerVideoFrameObserver`. After you successfully register the video frame observer, the SDK - * triggers this callback each time it receives a video frame. You need to set whether or not to mirror the video - * frame in the return value of this callback. - * + * Occurs each time the SDK receives a video frame and prompts you whether or not to mirror the + * captured video. + * + * If the video data you want to obtain is a mirror image of the original video, you need to + * register this callback when calling `registerVideoFrameObserver`. After you successfully + * register the video frame observer, the SDK triggers this callback each time it receives a video + * frame. You need to set whether or not to mirror the video frame in the return value of this + * callback. + * * @note This function only supports video data in RGBA and YUV420 formats. * * @return Determines whether to mirror. @@ -1748,19 +1841,24 @@ class IVideoFrameObserver { /** * Sets the frame position for the video observer. * - * After you successfully register the video observer, the SDK triggers this callback each time it receives - * a video frame. You can determine which position to observe by setting the return value. The SDK provides - * 3 positions for observer. Each position corresponds to a callback function: + * After you successfully register the video observer, the SDK triggers this callback each time it + * receives a video frame. You can determine which position to observe by setting the return + * value. The SDK provides 3 positions for observer. Each position corresponds to a callback + * function: * - * POSITION_POST_CAPTURER(1 << 0): The position after capturing the video data, which corresponds to the onCaptureVideoFrame callback. - * POSITION_PRE_RENDERER(1 << 1): The position before receiving the remote video data, which corresponds to the onRenderVideoFrame callback. - * POSITION_PRE_ENCODER(1 << 2): The position before encoding the video data, which corresponds to the onPreEncodeVideoFrame callback. + * POSITION_POST_CAPTURER(1 << 0): The position after capturing the video data, which corresponds + * to the onCaptureVideoFrame callback. POSITION_PRE_RENDERER(1 << 1): The position before + * receiving the remote video data, which corresponds to the onRenderVideoFrame callback. + * POSITION_PRE_ENCODER(1 << 2): The position before encoding the video data, which corresponds to + * the onPreEncodeVideoFrame callback. * * To observe multiple frame positions, use '|' (the OR operator). - * This callback observes POSITION_POST_CAPTURER(1 << 0) and POSITION_PRE_RENDERER(1 << 1) by default. - * To conserve the system consumption, you can reduce the number of frame positions that you want to observe. + * This callback observes POSITION_POST_CAPTURER(1 << 0) and POSITION_PRE_RENDERER(1 << 1) by + * default. To conserve the system consumption, you can reduce the number of frame positions that + * you want to observe. * - * @return A bit mask that controls the frame position of the video observer: VIDEO_OBSERVER_POSITION. + * @return A bit mask that controls the frame position of the video observer: + * VIDEO_OBSERVER_POSITION. */ virtual uint32_t getObservedFramePosition() { return base::POSITION_POST_CAPTURER | base::POSITION_PRE_RENDERER; @@ -1854,7 +1952,8 @@ enum RecorderReasonCode { */ RECORDER_REASON_WRITE_FAILED = 1, /** - * 2: The SDK does not detect audio and video streams to be recorded, or audio and video streams are interrupted for more than five seconds during recording. + * 2: The SDK does not detect audio and video streams to be recorded, or audio and video streams + * are interrupted for more than five seconds during recording. */ RECORDER_REASON_NO_STREAM = 2, /** @@ -1882,7 +1981,8 @@ struct MediaRecorderConfiguration { */ const char* storagePath; /** - * The format of the recording file. See \ref agora::rtc::MediaRecorderContainerFormat "MediaRecorderContainerFormat". + * The format of the recording file. See \ref agora::rtc::MediaRecorderContainerFormat + * "MediaRecorderContainerFormat". */ MediaRecorderContainerFormat containerFormat; /** @@ -1900,23 +2000,70 @@ struct MediaRecorderConfiguration { * callback to report the updated recording information. */ int recorderInfoUpdateInterval; - - MediaRecorderConfiguration() : storagePath(NULL), containerFormat(FORMAT_MP4), streamType(STREAM_TYPE_BOTH), maxDurationMs(120000), recorderInfoUpdateInterval(0) {} - MediaRecorderConfiguration(const char* path, MediaRecorderContainerFormat format, MediaRecorderStreamType type, int duration, int interval) : storagePath(path), containerFormat(format), streamType(type), maxDurationMs(duration), recorderInfoUpdateInterval(interval) {} + /** + * The video width + */ + int width; + /** + * The video height + */ + int height; + /** + * The video fps + */ + int fps; + /** + * The audio sample rate + */ + int sample_rate; + /** + * The audio channel nums + */ + int channel_num; + /** + * The video source just for out channel recoder + */ + agora::rtc::VIDEO_SOURCE_TYPE videoSourceType; + + MediaRecorderConfiguration() + : storagePath(NULL), + containerFormat(FORMAT_MP4), + streamType(STREAM_TYPE_BOTH), + maxDurationMs(120000), + recorderInfoUpdateInterval(0), + width(1280), + height(720), + fps(30), + sample_rate(48000), + channel_num(1), + videoSourceType(rtc::VIDEO_SOURCE_CAMERA_PRIMARY) {} + MediaRecorderConfiguration(const char* path, MediaRecorderContainerFormat format, + MediaRecorderStreamType type, int duration, int interval) + : storagePath(path), + containerFormat(format), + streamType(type), + maxDurationMs(duration), + recorderInfoUpdateInterval(interval), + width(1280), + height(720), + fps(30), + sample_rate(48000), + channel_num(1), + videoSourceType(rtc::VIDEO_SOURCE_CAMERA_PRIMARY) {} }; class IFaceInfoObserver { -public: - /** - * Occurs when the face info is received. - * @param outFaceInfo The output face info. - * @return - * - true: The face info is valid. - * - false: The face info is invalid. + public: + /** + * Occurs when the face info is received. + * @param outFaceInfo The output face info. + * @return + * - true: The face info is valid. + * - false: The face info is invalid. */ - virtual bool onFaceInfo(const char* outFaceInfo) = 0; - - virtual ~IFaceInfoObserver() {} + virtual bool onFaceInfo(const char* outFaceInfo) = 0; + + virtual ~IFaceInfoObserver() {} }; /** @@ -1939,7 +2086,8 @@ struct RecorderInfo { unsigned int fileSize; RecorderInfo() : fileName(NULL), durationMs(0), fileSize(0) {} - RecorderInfo(const char* name, unsigned int dur, unsigned int size) : fileName(name), durationMs(dur), fileSize(size) {} + RecorderInfo(const char* name, unsigned int dur, unsigned int size) + : fileName(name), durationMs(dur), fileSize(size) {} }; class IMediaRecorderObserver { @@ -1949,30 +2097,35 @@ class IMediaRecorderObserver { * * @since v4.0.0 * - * When the local audio and video recording state changes, the SDK triggers this callback to report the current - * recording state and the reason for the change. + * When the local audio and video recording state changes, the SDK triggers this callback to + * report the current recording state and the reason for the change. * * @param channelId The channel name. * @param uid ID of the user. * @param state The current recording state. See \ref agora::media::RecorderState "RecorderState". - * @param reason The reason for the state change. See \ref agora::media::RecorderReasonCode "RecorderReasonCode". + * @param reason The reason for the state change. See \ref agora::media::RecorderReasonCode + * "RecorderReasonCode". */ - virtual void onRecorderStateChanged(const char* channelId, rtc::uid_t uid, RecorderState state, RecorderReasonCode reason) = 0; + virtual void onRecorderStateChanged(const char* channelId, rtc::uid_t uid, RecorderState state, + RecorderReasonCode reason) = 0; /** * Occurs when the recording information is updated. * * @since v4.0.0 * - * After you successfully register this callback and enable the local audio and video recording, the SDK periodically triggers - * the `onRecorderInfoUpdated` callback based on the set value of `recorderInfoUpdateInterval`. This callback reports the - * filename, duration, and size of the current recording file. + * After you successfully register this callback and enable the local audio and video recording, + * the SDK periodically triggers the `onRecorderInfoUpdated` callback based on the set value of + * `recorderInfoUpdateInterval`. This callback reports the filename, duration, and size of the + * current recording file. * * @param channelId The channel name. * @param uid ID of the user. - * @param info Information about the recording file. See \ref agora::media::RecorderInfo "RecorderInfo". + * @param info Information about the recording file. See \ref agora::media::RecorderInfo + * "RecorderInfo". * */ - virtual void onRecorderInfoUpdated(const char* channelId, rtc::uid_t uid, const RecorderInfo& info) = 0; + virtual void onRecorderInfoUpdated(const char* channelId, rtc::uid_t uid, + const RecorderInfo& info) = 0; virtual ~IMediaRecorderObserver() {} }; diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaEngine.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaEngine.h index b3b92e9e4..44975bfe9 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaEngine.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaEngine.h @@ -141,6 +141,24 @@ class IMediaEngine { bool enabled, bool useTexture, EXTERNAL_VIDEO_SOURCE_TYPE sourceType = VIDEO_FRAME, rtc::SenderOptions encodedVideoOption = rtc::SenderOptions()) = 0; +#if defined(__ANDROID__) + /** + * Sets the remote eglContext. + * + * When the engine is destroyed, the SDK will automatically release the eglContext. + * + * @param eglContext. + * + * @note + * setExternalRemoteEglContext needs to be called before joining the channel. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setExternalRemoteEglContext(void* eglContext) = 0; +#endif + /** * Sets the external audio source. * diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaRecorder.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaRecorder.h index 17375607c..79a8db35e 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaRecorder.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraMediaRecorder.h @@ -7,7 +7,6 @@ #include "AgoraBase.h" #include "AgoraMediaBase.h" -#include "IAgoraRtcEngineEx.h" namespace agora { namespace rtc { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngine.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngine.h index 5e075327f..70c87f818 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngine.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngine.h @@ -97,12 +97,14 @@ enum AUDIO_MIXING_REASON_TYPE { AUDIO_MIXING_REASON_TOO_FREQUENT_CALL = 702, /** 703: The audio mixing file playback is interrupted. */ AUDIO_MIXING_REASON_INTERRUPTED_EOF = 703, - /** 715: The audio mixing file is played once. */ + /** 721: The audio mixing file is played once. */ AUDIO_MIXING_REASON_ONE_LOOP_COMPLETED = 721, - /** 716: The audio mixing file is all played out. */ + /** 723: The audio mixing file is all played out. */ AUDIO_MIXING_REASON_ALL_LOOPS_COMPLETED = 723, - /** 716: The audio mixing file stopped by user */ + /** 724: The audio mixing file stopped by user */ AUDIO_MIXING_REASON_STOPPED_BY_USER = 724, + /** 726: The audio mixing playback has resumed by user */ + AUDIO_MIXING_REASON_RESUMED_BY_USER = 726, /** 0: The SDK can open the audio mixing file. */ AUDIO_MIXING_REASON_OK = 0, }; @@ -854,7 +856,7 @@ struct ScreenCaptureConfiguration { /** * (macOS only) The display ID of the screen. */ - uint32_t displayId; + int64_t displayId; /** * (Windows only) The relative position of the shared screen to the virtual screen. * @note This parameter takes effect only when you want to capture the screen on Windows. @@ -864,7 +866,7 @@ struct ScreenCaptureConfiguration { * (For Windows and macOS only) The window ID. * @note This parameter takes effect only when you want to capture the window. */ - view_t windowId; + int64_t windowId; /** * (For Windows and macOS only) The screen capture configuration. For details, see ScreenCaptureParameters. */ @@ -944,7 +946,7 @@ struct ScreenCaptureSourceInfo { /** * The window ID for a window or the display ID for a screen. */ - view_t sourceId; + int64_t sourceId; /** * The name of the window or screen. UTF-8 encoding. */ @@ -987,11 +989,11 @@ struct ScreenCaptureSourceInfo { * ID to the display monitor that has the largest area of intersection with the window, Otherwise * the return value is -2. */ - view_t sourceDisplayId; - ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(nullptr), sourceName(nullptr), - processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false), minimizeWindow(false), sourceDisplayId((view_t)-2) {} + int64_t sourceDisplayId; + ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(0), sourceName(nullptr), + processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false), minimizeWindow(false), sourceDisplayId(-2) {} #else - ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(nullptr), sourceName(nullptr), processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false) {} + ScreenCaptureSourceInfo() : type(ScreenCaptureSourceType_Unknown), sourceId(0), sourceName(nullptr), processPath(nullptr), sourceTitle(nullptr), primaryMonitor(false), isOccluded(false) {} #endif }; /** @@ -4187,6 +4189,32 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int getFaceShapeAreaOptions(agora::rtc::FaceShapeAreaOptions::FACE_SHAPE_AREA shapeArea, FaceShapeAreaOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + /** + * Sets filter effect options. + * + * @since v4.4.1 + * You can call this method to enable the filter effect feature and set the options of the filter effect. + * + * @note + * - Before calling this method, ensure that you have integrated the following dynamic library into your project: + * - Android: `libagora_clear_vision_extension.so` + * - iOS/macOS: `AgoraClearVisionExtension.xcframework` + * - Windows: `libagora_clear_vision_extension.dll` + * - Call this method after calling the \ref IRtcEngine::enableVideo "enableVideo" method. + * - You can call this method either before or after joining a channel. + * - The filter effect feature has specific performance requirements for devices. If your device overheats after enabling the filter effect, Agora recommends disabling it entirely. + * + * @param enabled. Whether to enable filter effect: + * - `true`: Enable. + * - `false`: (Default) Disable. + * @param options. Set the filter effect options. See FilterEffectOptions. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setFilterEffectOptions(bool enabled, const FilterEffectOptions& options, agora::media::MEDIA_SOURCE_TYPE type = agora::media::PRIMARY_CAMERA_SOURCE) = 0; + /** * Sets low-light enhancement. * @@ -4198,9 +4226,9 @@ class IRtcEngine : public agora::base::IEngineBase { * * @note * - Before calling this method, ensure that you have integrated the following dynamic library into your project: - * - Android: `libagora_segmentation_extension.so` - * - iOS/macOS: `AgoraVideoSegmentationExtension.xcframework` - * - Windows: `libagora_segmentation_extension.dll` + * - Android: `libagora_clear_vision_extension.so` + * - iOS/macOS: `AgoraClearVisionExtension.xcframework` + * - Windows: `libagora_clear_vision_extension.dll` * - Call this method after \ref IRtcEngine::enableVideo "enableVideo". * - The low-light enhancement feature has certain performance requirements on devices. If your device overheats after you enable low-light enhancement, Agora recommends modifying the low-light enhancement options to a less performance-consuming level or disabling low-light enhancement entirely. * @@ -4225,9 +4253,9 @@ class IRtcEngine : public agora::base::IEngineBase { * * @note * - Before calling this method, ensure that you have integrated the following dynamic library into your project: - * - Android: `libagora_segmentation_extension.so` - * - iOS/macOS: `AgoraVideoSegmentationExtension.xcframework` - * - Windows: `libagora_segmentation_extension.dll` + * - Android: `libagora_clear_vision_extension.so` + * - iOS/macOS: `AgoraClearVisionExtension.xcframework` + * - Windows: `libagora_clear_vision_extension.dll` * - Call this method after \ref IRtcEngine::enableVideo "enableVideo". * - The video noise reduction feature has certain performance requirements on devices. If your device overheats after you enable video noise reduction, Agora recommends modifying the video noise reduction options to a less performance-consuming level or disabling video noise reduction entirely. * @@ -4252,9 +4280,9 @@ class IRtcEngine : public agora::base::IEngineBase { * * @note * - Before calling this method, ensure that you have integrated the following dynamic library into your project: - * - Android: `libagora_segmentation_extension.so` - * - iOS/macOS: `AgoraVideoSegmentationExtension.xcframework` - * - Windows: `libagora_segmentation_extension.dll` + * - Android: `libagora_clear_vision_extension.so` + * - iOS/macOS: `AgoraClearVisionExtension.xcframework` + * - Windows: `libagora_clear_vision_extension.dll` * - Call this method after \ref IRtcEngine::enableVideo "enableVideo". * - The color enhancement feature has certain performance requirements on devices. If your device overheats after you enable color enhancement, Agora recommends modifying the color enhancement options to a less performance-consuming level or disabling color enhancement entirely. * @@ -6000,7 +6028,26 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int setRemoteRenderMode(uid_t uid, media::base::RENDER_MODE_TYPE renderMode, VIDEO_MIRROR_MODE_TYPE mirrorMode) = 0; - + /** + * Sets the target frames per second (FPS) for the local render target. + * + * @param sourceType The type of video source. + * @param targetFps The target frames per second to be set. + * + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setLocalRenderTargetFps(VIDEO_SOURCE_TYPE sourceType, int targetFps) = 0; + /** + * Sets the target frames per second (FPS) for the remote render target. + * + * @param targetFps The target frames per second to be set for the remote render target. + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int setRemoteRenderTargetFps(int targetFps) = 0; // The following APIs are either deprecated and going to deleted. /** @@ -7021,7 +7068,7 @@ class IRtcEngine : public agora::base::IEngineBase { - ERR_INVALID_ARGUMENT (2): The argument is invalid. - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when try to start screen capture. */ - virtual int startScreenCaptureByDisplayId(uint32_t displayId, const Rectangle& regionRect, + virtual int startScreenCaptureByDisplayId(int64_t displayId, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) = 0; #endif // __APPLE__ && TARGET_OS_MAC && !TARGET_OS_IPHONE @@ -7084,7 +7131,7 @@ class IRtcEngine : public agora::base::IEngineBase { * - ERR_INVALID_ARGUMENT (2): The argument is invalid. * - ERR_NOT_INITIALIZED (7): You have not initialized IRtcEngine when try to start screen capture. */ - virtual int startScreenCaptureByWindowId(view_t windowId, const Rectangle& regionRect, + virtual int startScreenCaptureByWindowId(int64_t windowId, const Rectangle& regionRect, const ScreenCaptureParameters& captureParams) = 0; /** @@ -7186,6 +7233,26 @@ class IRtcEngine : public agora::base::IEngineBase { * - < 0: Failure.. */ virtual int queryCameraFocalLengthCapability(agora::rtc::FocalLengthInfo* focalLengthInfos, int& size) = 0; + +#if defined(__ANDROID__) + /** + * Sets screen sharing using the Android native class MediaProjection. + * + * When screen capture stopped, the SDK will automatically release the MediaProjection internally. + * + * @param mediaProjection MediaProjection is an Android class that provides access to screen capture and recording capabiliies. + * + * @note + * Additional MediaProjection is primarily used for specific scenarios, + * such as IOT custom devices or subprocess screen sharing. + * + * @return + * - 0: Success. + * - < 0: Failure. + * @technical preview + */ + virtual int setExternalMediaProjection(void* mediaProjection) = 0; +#endif #endif #if defined(_WIN32) || defined(__APPLE__) || defined(__ANDROID__) @@ -7346,6 +7413,40 @@ class IRtcEngine : public agora::base::IEngineBase { virtual int stopRtmpStream(const char* url) = 0; virtual int stopLocalVideoTranscoder() = 0; + + /** + * Starts the local audio with a mixed audio stream. + * @param config Sets the mixed audio stream source settings. + * @return + * - 0: Success. + * - < 0: Failure. + * - #ERR_NOT_INITIALIZED (7): You have not initialized the RTC engine when publishing the + * stream. + */ + virtual int startLocalAudioMixer(const LocalAudioMixerConfiguration& config) = 0; + + /** + * Update the source stream settings for the mixed audio stream. + * @param config Update the source audio stream settings. See + * @return + * - 0: Success. + * - < 0: Failure. + * - #ERR_NOT_INITIALIZED (7): You have not initialized the RTC engine when publishing the + stream. + */ + virtual int updateLocalAudioMixerConfiguration(const LocalAudioMixerConfiguration& config) = 0; + + /** + * Stops a mixed audio track. + * + * @return + * - 0: Success. + * - < 0: Failure. + * - #ERR_NOT_INITIALIZED (7): You have not initialized the RTC engine when publishing the + * stream. + */ + virtual int stopLocalAudioMixer() = 0; + /** * Starts video capture with a camera. * @@ -8106,6 +8207,32 @@ class IRtcEngine : public agora::base::IEngineBase { */ virtual int takeSnapshot(uid_t uid, const char* filePath) = 0; + /** + * Takes a snapshot of a video stream. + * + * This method takes a snapshot of a video stream from the specified user, generates a JPG + * image, and saves it to the specified path. + * + * The method is asynchronous, and the SDK has not taken the snapshot when the method call + * returns. After a successful method call, the SDK triggers the `onSnapshotTaken` callback + * to report whether the snapshot is successfully taken, as well as the details for that + * snapshot. + * + * @note + * - Call this method after joining a channel. + * - This method takes a snapshot of the published video stream specified in `ChannelMediaOptions`. + * + * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. + * @param config The configuration for the take snapshot. See SnapshotConfig. + * + * Ensure that the path you specify exists and is writable. + * @return + * - 0 : Success. + * - < 0: Failure. + * - -4: Incorrect observation position. Modify the input observation position according to the reqiurements specified in SnapshotConfig. + */ + virtual int takeSnapshot(uid_t uid, const media::SnapshotConfig& config) = 0; + /** Enables the content inspect. @param enabled Whether to enable content inspect: - `true`: Yes. @@ -8332,6 +8459,17 @@ class IRtcEngine : public agora::base::IEngineBase { * @technical preview */ virtual int sendAudioMetadata(const char* metadata, size_t length) = 0; + + /** + * @brief Queries the HDR capability of the video module + * @param videoModule The video module. See VIDEO_MODULE_TYPE + * @param capability HDR capability of video module. See HDR_CAPABILITY + * @return + * - 0: success + * - <0: failure + * @technical preview + */ + virtual int queryHDRCapability(VIDEO_MODULE_TYPE videoModule, HDR_CAPABILITY& capability) = 0; }; // The following types are either deprecated or not implmented yet. @@ -8355,6 +8493,11 @@ enum MEDIA_DEVICE_STATE_TYPE { /** 2: The device is disabled. */ MEDIA_DEVICE_STATE_DISABLED = 2, + + /** 3: The device is plugged in. + */ + MEDIA_DEVICE_STATE_PLUGGED_IN = 3, + /** 4: The device is not present. */ MEDIA_DEVICE_STATE_NOT_PRESENT = 4, diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngineEx.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngineEx.h index a3d7f9858..bd0e816df 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngineEx.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/IAgoraRtcEngineEx.h @@ -1127,6 +1127,55 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int leaveChannelEx(const RtcConnection& connection, const LeaveChannelOptions& options) = 0; + /** + * Leaves a channel with the channel ID and user account. + * + * This method allows a user to leave the channel, for example, by hanging up or exiting a call. + * + * This method is an asynchronous call, which means that the result of this method returns even before + * the user has not actually left the channel. Once the user successfully leaves the channel, the + * SDK triggers the \ref IRtcEngineEventHandler::onLeaveChannel "onLeaveChannel" callback. + * + * @param channelId The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @return + * - 0: Success. + * - < 0: Failure. + */ + virtual int leaveChannelWithUserAccountEx(const char* channelId, const char* userAccount) = 0; + + /** + * Leaves a channel with the channel ID and user account and sets the options for leaving. + * + * @param channelId The channel name. The maximum length of this parameter is 64 bytes. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @param userAccount The user account. The maximum length of this parameter is 255 bytes. Ensure that you set this parameter and do not set it as null. Supported character scopes are: + * - All lowercase English letters: a to z. + * - All uppercase English letters: A to Z. + * - All numeric characters: 0 to 9. + * - The space character. + * - Punctuation characters and other symbols, including: "!", "#", "$", "%", "&", "(", ")", "+", "-", ":", ";", "<", "=", ".", ">", "?", "@", "[", "]", "^", "_", " {", "}", "|", "~", ",". + * @param options The options for leaving the channel. See #LeaveChannelOptions. + * @return int + * - 0: Success. + * - < 0: Failure. + */ + virtual int leaveChannelWithUserAccountEx(const char* channelId, const char* userAccount, const LeaveChannelOptions& options) = 0; + /** * Updates the channel media options after joining the channel. * @@ -1915,6 +1964,33 @@ class IRtcEngineEx : public IRtcEngine { */ virtual int takeSnapshotEx(const RtcConnection& connection, uid_t uid, const char* filePath) = 0; + /** + * Takes a snapshot of a video stream. + * + * This method takes a snapshot of a video stream from the specified user, generates a JPG + * image, and saves it to the specified path. + * + * The method is asynchronous, and the SDK has not taken the snapshot when the method call + * returns. After a successful method call, the SDK triggers the `onSnapshotTaken` callback + * to report whether the snapshot is successfully taken, as well as the details for that + * snapshot. + * + * @note + * - Call this method after joining a channel. + * - This method takes a snapshot of the published video stream specified in `ChannelMediaOptions`. + * + * @param connection The RtcConnection object. + * @param uid The user ID. Set uid as 0 if you want to take a snapshot of the local user's video. + * @param config The configuration for the take snapshot. See SnapshotConfig. + * + * Ensure that the path you specify exists and is writable. + * @return + * - 0 : Success. + * - < 0: Failure. + * - -4: Incorrect observation position. Modify the input observation position according to the reqiurements specified in SnapshotConfig. + */ + virtual int takeSnapshotEx(const RtcConnection& connection, uid_t uid, const media::SnapshotConfig& config) = 0; + /** Enables video screenshot and upload with the connection ID. @param enabled Whether to enable video screenshot and upload: - `true`: Yes. diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/bridge.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/bridge.h similarity index 94% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/bridge.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/bridge.h index 1ad36416f..675c4f5a1 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/bridge.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/bridge.h @@ -8,7 +8,7 @@ #include "handle.h" #include "common.h" -#include "stream/stream.h" +#include "./stream/stream.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/c_error.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/c_error.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/c_error.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/c_error.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/c_player.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/c_player.h similarity index 61% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/c_player.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/c_player.h index b00e5a321..b87b88af0 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/c_player.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/c_player.h @@ -20,51 +20,258 @@ typedef struct Rte Rte; typedef struct RteStream RteStream; typedef struct RtePlayerInternal RtePlayerInternal; +/** + * Player states. + * When the player state changes, the state will be notified through the PlayerObserver::onStateChanged callback interface. + * @since v4.4.0 + */ typedef enum RtePlayerState { + /** + * 0: Idle state. + */ kRtePlayerStateIdle, + /** + * 1: Opening state. This state is notified after calling rte::Player::OpenWithUrl(). + */ kRtePlayerStateOpening, + /** + * 2: Open completed state. This state is notified after successfully calling rte::Player::OpenWithUrl(). + */ kRtePlayerStateOpenCompleted, + /** + * 3: Playing state. This state is notified when audience members successfully subscribe to the broadcaster after opening an RTE URL. + */ kRtePlayerStatePlaying, + /** + * 4: Paused state. This state is notified when playback is paused. + */ kRtePlayerStatePaused, + /** + * 5: Playback completed state. This state is notified when the broadcaster stops streaming and leaves the live streaming room after playing the rte URL. + */ kRtePlayerStatePlaybackCompleted, + /** + * 6: Stopped state. This state is entered after the user calls Player::stop. + */ kRtePlayerStateStopped, + /** + * 7: Failed state. This state is entered when an internal error occurs. + */ kRtePlayerStateFailed } RtePlayerState; +/** + * Player events. + * When an event occurs, it will be notified through the PlayerObserver::onEvent callback interface. + * @since v4.4.0 + */ typedef enum RtePlayerEvent { + /** + * 0: Start seeking to a specified position for playback. + */ kRtePlayerEventSeekBegin, + /** + * 1: Seeking completes. + */ kRtePlayerEventSeekComplete, + /** + * 2: An error occurs when seeking to a new playback position. + */ kRtePlayerEventSeekError, + /** + * 3: The currently buffered data is not enough to support playback. + */ kRtePlayerEventBufferLow, + /** + * 4: The currently buffered data is just enough to support playback. + */ kRtePlayerEventBufferRecover, + /** + * 5: Audio or video playback starts freezing. + */ kRtePlayerEventFreezeStart, + /** + * 6: The audio or video playback resumes without freezing. + */ kRtePlayerEventFreezeStop, + /** + * 7: One loop playback completed. + */ kRtePlayerEventOneLoopPlaybackCompleted, - kRtePlayerEventAuthenticationWillExpire + /** + * 8: URL authentication will expire. + */ + kRtePlayerEventAuthenticationWillExpire, + /** + * 9: When the fallback option is enabled, ABR revert to the audio-only layer due to poor network. + */ + kRtePlayerEventAbrFallbackToAudioOnlyLayer, + /** + * 10: ABR recovers from audio-only layer to video layer when fallback option is enabled. + */ + kRtePlayerEventAbrRecoverFromAudioOnlyLayer } RtePlayerEvent; +/** + * ABR subscription layer. + * This enumeration can be used to set the value of the abr_subscription_layer query parameter in the rte URL. + * It can also be used in the PlayerConfig::SetAbrSubscriptionLayer setting interface. + * @since v4.4.0 + */ +typedef enum RteAbrSubscriptionLayer { + /** + * 0: High-quality video stream, this layer has the highest resolution and bitrate. + */ + kRteAbrSubscriptionHigh = 0, + /** + * 1: Low-quality video stream, this layer has the lowest resolution and bitrate. + */ + kRteAbrSubscriptionLow = 1, + /** + * 2: Layer1 video stream, this layer has lower resolution and bitrate than that of the high-quality video stream. + */ + kRteAbrSubscriptionLayer1 = 2, + /** + * 3: Layer2 video stream, this layer has lower resolution and bitrate than layer1. + */ + kRteAbrSubscriptionLayer2 = 3, + /** + * 4: Layer3 video stream, this layer has lower resolution and bitrate than layer2. + */ + kRteAbrSubscriptionLayer3 = 4, + /** + * 5: Layer4 video stream, this layer has lower resolution and bitrate than layer3. + */ + kRteAbrSubscriptionLayer4 = 5, + /** + * 6: Layer5 video stream, this layer has lower resolution and bitrate than layer4. + */ + kRteAbrSubscriptionLayer5 = 6, + /** + * 7: Layer6 video stream, this layer has lower resolution and bitrate than layer5. + */ + kRteAbrSubscriptionLayer6 = 7, +} RteAbrSubscriptionLayer; + + +/** + * ABR fallback layer. + * This enumeration can be used to set the value of the abr_fallback_layer query parameter in the rte URL. + * It can also be used in the PlayerConfig::SetAbrFallbackLayer setting interface. + * @since v4.4.0 + */ +typedef enum RteAbrFallbackLayer { + /** + * 0: When the network quality is poor, it will not revert to a lower resolution stream. + * It may still revert to scalable video coding but will maintain the high-quality video resolution. + */ + kRteAbrFallbackDisabled = 0, + /** + * 1: (Default) In a poor network environment, the receiver's SDK will receive the kRteAbrSubscriptionLow layer video stream. + */ + kRteAbrFallbackLow = 1, + /** + * 2: In a poor network environment, the SDK may first receive the kRteAbrSubscriptionLow layer, + * and if the relevant layer exists, it will revert to kRteAbrSubscriptionLayer1 to kRteAbrSubscriptionLayer6. + * If the network environment is too poor to play video, the SDK will only receive audio. + */ + kRteAbrFallbackAudioOnly = 2, + /** + * 3~8: If the receiving end sets the fallback option, the SDK will receive one of the layers from kRteAbrSubscriptionLayer1 to kRteAbrSubscriptionLayer6. + * The lower boundary of the fallback video stream is determined by the configured fallback option. + */ + kRteAbrFallbackLayer1 = 3, + kRteAbrFallbackLayer2 = 4, + kRteAbrFallbackLayer3 = 5, + kRteAbrFallbackLayer4 = 6, + kRteAbrFallbackLayer5 = 7, + kRteAbrFallbackLayer6 = 8, +} RteAbrFallbackLayer; + +/** + * Player information. + * When playerInfo changes, it will be notified through the PlayerObserver::onPlayerInfoUpdated callback interface. + * It can also be actively obtained through the Player::GetInfo interface. + * @since v4.4.0 + */ typedef struct RtePlayerInfo { + /** + * Current player state + */ RtePlayerState state; + /** + * Reserved parameter. + */ size_t duration; + /** + * Reserved parameter. + */ size_t stream_count; + /** + * Whether there is an audio stream. When opening an rte URL, it indicates whether the broadcaster has pushed audio. + */ bool has_audio; + /** + * Whether there is a video stream. When opening an rte URL, it indicates whether the broadcaster has pushed video. + */ bool has_video; + /** + * Whether the audio is muted. Indicates whether the audience has subscribed to the audio stream. + */ bool is_audio_muted; + /** + * Whether the video is muted. Indicates whether the audience has subscribed to the video stream. + */ bool is_video_muted; + /** + * Video resolution height + */ int video_height; + /** + * Video resolution width + */ int video_width; + /** + * The currently subscribed video layer + */ + RteAbrSubscriptionLayer abr_subscription_layer; + /** + * Audio sample rate + */ int audio_sample_rate; + /** + * Number of audio channels + */ int audio_channels; + /** + * Reserved parameter. + */ int audio_bits_per_sample; } RtePlayerInfo; - +/** + * Player statistics. + * Can be actively obtained through the Player::GetStats interface. + * @since v4.4.0 + */ typedef struct RtePlayerStats { - int video_decode_frame_rate; - int video_render_frame_rate; - int video_bitrate; - - int audio_bitrate; + /** + * Decoding frame rate + */ + int video_decode_frame_rate; + /** + * Rendering frame rate + */ + int video_render_frame_rate; + /** + * Video bitrate + */ + int video_bitrate; + + /** + * Audio bitrate + */ + int audio_bitrate; } RtePlayerStats; typedef struct RteMediaTrackInfo { @@ -135,6 +342,14 @@ typedef struct RtePlayerConfig { RteString *json_parameter; bool _json_parameter_is_set; + + // live player options + RteAbrSubscriptionLayer abr_subscription_layer; + bool _abr_subscription_layer_is_set; + + RteAbrFallbackLayer abr_fallback_layer; + bool _abr_fallback_layer_is_set; + } RtePlayerConfig; typedef struct RtePlayerCustomSourceProvider RtePlayerCustomSourceProvider; @@ -317,11 +532,27 @@ AGORA_RTE_API_C void RtePlayerConfigGetJsonParameter(RtePlayerConfig *config, RteString *json_parameter, RteError *err); +AGORA_RTE_API_C void RtePlayerConfigSetAbrSubscriptionLayer(RtePlayerConfig *config, + RteAbrSubscriptionLayer abr_subscription_layer, + RteError *err); + +AGORA_RTE_API_C void RtePlayerConfigGetAbrSubscriptionLayer(RtePlayerConfig *config, + RteAbrSubscriptionLayer *abr_subscription_layer, + RteError *err); + +AGORA_RTE_API_C void RtePlayerConfigSetAbrFallbackLayer(RtePlayerConfig *config, + RteAbrFallbackLayer abr_fallback_layer, + RteError *err); + +AGORA_RTE_API_C void RtePlayerConfigGetAbrFallbackLayer(RtePlayerConfig *config, + RteAbrFallbackLayer *abr_fallback_layer, + RteError *err); + AGORA_RTE_API_C RtePlayer RtePlayerCreate(Rte *self, RtePlayerInitialConfig *config, RteError *err); AGORA_RTE_API_C void RtePlayerDestroy(RtePlayer *self, RteError *err); -AGORA_RTE_API_C void RtePlayerPreloadWithUrl(RtePlayer *self, const char *url, +AGORA_RTE_API_C bool RtePlayerPreloadWithUrl(RtePlayer *self, const char *url, RteError *err); AGORA_RTE_API_C void RtePlayerOpenWithUrl( @@ -341,26 +572,24 @@ AGORA_RTE_API_C void RtePlayerOpenWithStream(RtePlayer *self, RteStream *stream, AGORA_RTE_API_C void RtePlayerGetStats(RtePlayer *self, void (*cb)(RtePlayer *player, RtePlayerStats *stats, void *cb_data, RteError *err), void *cb_data); -AGORA_RTE_API_C void RtePlayerSetCanvas(RtePlayer *self, RteCanvas *canvas, RteError *err); +AGORA_RTE_API_C bool RtePlayerSetCanvas(RtePlayer *self, RteCanvas *canvas, RteError *err); -AGORA_RTE_API_C void RtePlayerPlay(RtePlayer *self, RteError *err); -AGORA_RTE_API_C void RtePlayerStop(RtePlayer *self, RteError *err); -AGORA_RTE_API_C void RtePlayerPause(RtePlayer *self, RteError *err); -AGORA_RTE_API_C void RtePlayerSeek(RtePlayer *self, uint64_t new_time, +AGORA_RTE_API_C bool RtePlayerPlay(RtePlayer *self, RteError *err); +AGORA_RTE_API_C bool RtePlayerStop(RtePlayer *self, RteError *err); +AGORA_RTE_API_C bool RtePlayerPause(RtePlayer *self, RteError *err); +AGORA_RTE_API_C bool RtePlayerSeek(RtePlayer *self, uint64_t new_time, RteError *err); -AGORA_RTE_API_C void RtePlayerMuteAudio(RtePlayer *self, bool mute, RteError *err); -AGORA_RTE_API_C void RtePlayerMuteVideo(RtePlayer *self, bool mute, RteError *err); +AGORA_RTE_API_C bool RtePlayerMuteAudio(RtePlayer *self, bool mute, RteError *err); +AGORA_RTE_API_C bool RtePlayerMuteVideo(RtePlayer *self, bool mute, RteError *err); AGORA_RTE_API_C uint64_t RtePlayerGetPosition(RtePlayer *self, RteError *err); -AGORA_RTE_API_C void RtePlayerGetInfo(RtePlayer *self, RtePlayerInfo *info, RteError *err); +AGORA_RTE_API_C bool RtePlayerGetInfo(RtePlayer *self, RtePlayerInfo *info, RteError *err); -AGORA_RTE_API_C void RtePlayerGetConfigs(RtePlayer *self, +AGORA_RTE_API_C bool RtePlayerGetConfigs(RtePlayer *self, RtePlayerConfig *config, RteError *err); -AGORA_RTE_API_C void RtePlayerSetConfigs( - RtePlayer *self, RtePlayerConfig *config, - void (*cb)(RtePlayer *self, void *cb_data, RteError *err), void *cb_data); +AGORA_RTE_API_C bool RtePlayerSetConfigs(RtePlayer *self, RtePlayerConfig *config, RteError *err); AGORA_RTE_API_C bool RtePlayerRegisterObserver( diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/c_rte.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/c_rte.h similarity index 94% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/c_rte.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/c_rte.h index dec007f55..fd39f376f 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/c_rte.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/c_rte.h @@ -93,18 +93,15 @@ AGORA_RTE_API_C void RteConfigGetJsonParameter(RteConfig *config, RteError *err); AGORA_RTE_API_C Rte RteCreate(RteInitialConfig *config, RteError *err); -AGORA_RTE_API_C void RteDestroy(Rte *self, RteError *err); +AGORA_RTE_API_C bool RteDestroy(Rte *self, RteError *err); AGORA_RTE_API_C bool RteInitMediaEngine(Rte *self, void (*cb)(Rte *self, void *cb_data, RteError *err), void *cb_data, RteError *err); -AGORA_RTE_API_C void RteGetConfigs(Rte *self, RteConfig *config, RteError *err); -AGORA_RTE_API_C bool RteSetConfigs(Rte *self, RteConfig *config, - void (*cb)(Rte *self, void *cb_data, - RteError *err), - void *cb_data, RteError *err); +AGORA_RTE_API_C bool RteGetConfigs(Rte *self, RteConfig *config, RteError *err); +AGORA_RTE_API_C bool RteSetConfigs(Rte *self, RteConfig *config, RteError *err); AGORA_RTE_API_C void RteRelayStream(RteChannel *src_channel, RteRemoteStream *src_stream, diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/channel.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/channel.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/channel.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/channel.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/common.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/common.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/common.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/common.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/audio.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/audio.h similarity index 90% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/audio.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/audio.h index 91244f55d..8349eba52 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/audio.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/audio.h @@ -6,9 +6,9 @@ */ #pragma once -#include "device/device.h" -#include "handle.h" -#include "../common.h" +#include "rte_base/c/device/device.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/audio_device_manager.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/audio_device_manager.h similarity index 96% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/audio_device_manager.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/audio_device_manager.h index 213138acb..7f5cdf03a 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/audio_device_manager.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/audio_device_manager.h @@ -8,10 +8,10 @@ #include -#include "../common.h" -#include "c_error.h" -#include "device/audio.h" -#include "handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/device/audio.h" +#include "rte_base/c/handle.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/device.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/device.h similarity index 87% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/device.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/device.h index 3ec850e9c..f85fe4d00 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/device.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/device.h @@ -6,8 +6,8 @@ */ #pragma once -#include "../common.h" -#include "utils/string.h" +#include "rte_base/c/common.h" +#include "rte_base/c/utils/string.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/video.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/video.h similarity index 89% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/video.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/video.h index 2f9d26e5a..a2650bd7c 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/video.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/video.h @@ -6,9 +6,9 @@ */ #pragma once -#include "device/device.h" -#include "../common.h" -#include "handle.h" +#include "rte_base/c/device/device.h" +#include "rte_base/c/common.h" +#include "rte_base/c/handle.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/video_device_manager.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/video_device_manager.h similarity index 94% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/video_device_manager.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/video_device_manager.h index 785784bc0..e2ae19643 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/device/video_device_manager.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/device/video_device_manager.h @@ -8,10 +8,10 @@ #include -#include "c_error.h" -#include "device/video.h" -#include "handle.h" -#include "../common.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/device/video.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/handle.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/handle.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/handle.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/handle.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/info.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/info.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/info.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/info.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/log.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/log.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/log.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/log.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/metadata.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/metadata.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/metadata.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/metadata.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/observer.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/observer.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/observer.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/observer.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/old.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/old.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/old.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/old.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/options.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/options.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/options.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/options.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/cdn_stream.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/cdn_stream.h similarity index 87% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/cdn_stream.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/cdn_stream.h index 89d836a89..21577f4b2 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/cdn_stream.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/cdn_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "c_error.h" -#include "../common.h" -#include "stream/stream.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/stream.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/local_cdn_stream.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/local_cdn_stream.h similarity index 94% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/local_cdn_stream.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/local_cdn_stream.h index 7a60a9160..463c37310 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/local_cdn_stream.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/local_cdn_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "../common.h" -#include "stream/cdn_stream.h" -#include "stream/local_stream.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/cdn_stream.h" +#include "rte_base/c/stream/local_stream.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/local_realtime_stream.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/local_realtime_stream.h similarity index 85% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/local_realtime_stream.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/local_realtime_stream.h index 816a1a8f2..754a626d0 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/local_realtime_stream.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/local_realtime_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "../common.h" -#include "stream/local_stream.h" -#include "stream/realtime_stream.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/local_stream.h" +#include "rte_base/c/stream/realtime_stream.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/local_stream.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/local_stream.h similarity index 88% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/local_stream.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/local_stream.h index 55bac6ad8..c07abb643 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/local_stream.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/local_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "c_error.h" -#include "../common.h" -#include "stream/stream.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/stream.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/realtime_stream.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/realtime_stream.h similarity index 88% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/realtime_stream.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/realtime_stream.h index 48afc1301..2f36bae12 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/realtime_stream.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/realtime_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "c_error.h" -#include "../common.h" -#include "stream/stream.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/stream.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/remote_cdn_stream.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/remote_cdn_stream.h similarity index 93% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/remote_cdn_stream.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/remote_cdn_stream.h index e670e608b..d935f2eea 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/remote_cdn_stream.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/remote_cdn_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "../common.h" -#include "stream/cdn_stream.h" -#include "stream/remote_stream.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/cdn_stream.h" +#include "rte_base/c/stream/remote_stream.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/remote_realtime_stream.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/remote_realtime_stream.h similarity index 85% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/remote_realtime_stream.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/remote_realtime_stream.h index 438ecc047..0062d69e8 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/remote_realtime_stream.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/remote_realtime_stream.h @@ -6,9 +6,9 @@ */ #pragma once -#include "../common.h" -#include "stream/realtime_stream.h" -#include "stream/remote_stream.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/realtime_stream.h" +#include "rte_base/c/stream/remote_stream.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/remote_stream.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/remote_stream.h similarity index 88% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/remote_stream.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/remote_stream.h index 954c77b77..3564604fe 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/remote_stream.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/remote_stream.h @@ -8,10 +8,10 @@ #include -#include "c_error.h" -#include "../common.h" -#include "stream/stream.h" -#include "track/track.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/stream/stream.h" +#include "rte_base/c/track/track.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/stream.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/stream.h similarity index 98% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/stream.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/stream.h index 75464e1a8..ab1f035d4 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/stream/stream.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/stream/stream.h @@ -8,10 +8,10 @@ #include -#include "c_error.h" -#include "handle.h" -#include "observer.h" -#include "../common.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/observer.h" +#include "rte_base/c/common.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/camera_video_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/camera_video_track.h similarity index 87% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/camera_video_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/camera_video_track.h index 784c95357..bc96461f8 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/camera_video_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/camera_video_track.h @@ -6,9 +6,9 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "track/local_video_track.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/local_video_track.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/canvas.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/canvas.h similarity index 79% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/canvas.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/canvas.h index d119d09d6..d4358fb6e 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/canvas.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/canvas.h @@ -6,10 +6,10 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "track/view.h" -#include "stream/stream.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/view.h" +#include "rte_base/c/stream/stream.h" #ifdef __cplusplus extern "C" { @@ -73,18 +73,14 @@ AGORA_RTE_API_C RteCanvas RteCanvasCreate(::Rte *rte, RteCanvasInitialConfig *co RteError *err); AGORA_RTE_API_C void RteCanvasDestroy(RteCanvas *self, RteError *err); -AGORA_RTE_API_C void RteCanvasGetConfigs(RteCanvas *self, +AGORA_RTE_API_C bool RteCanvasGetConfigs(RteCanvas *self, RteCanvasConfig *config, RteError *err); -AGORA_RTE_API_C void RteCanvasSetConfigs( - RteCanvas *self, RteCanvasConfig *config, - void (*cb)(RteCanvas *canvas, void *cb_data, RteError *err), void *cb_data); +AGORA_RTE_API_C bool RteCanvasSetConfigs(RteCanvas *self, RteCanvasConfig *config, RteError *err); -AGORA_RTE_API_C void RteCanvasAddView( - RteCanvas *self, RteView *view, RteViewConfig *config, - void (*cb)(RteCanvas *canvas, RteView *view, void *cb_data, RteError *err), - void *cb_data); +AGORA_RTE_API_C bool RteCanvasAddView( + RteCanvas *self, RteView *view, RteViewConfig *config, RteError *err); -AGORA_RTE_API_C void RteCanvasRemoveView(RteCanvas *self, RteView *view, RteViewConfig *config, RteError *err); +AGORA_RTE_API_C bool RteCanvasRemoveView(RteCanvas *self, RteView *view, RteViewConfig *config, RteError *err); #ifdef __cplusplus } diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/layout.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/layout.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/layout.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/layout.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/local_audio_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/local_audio_track.h similarity index 96% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/local_audio_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/local_audio_track.h index cb98f95e6..4c76891d5 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/local_audio_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/local_audio_track.h @@ -6,10 +6,10 @@ */ #pragma once -#include "../common.h" -#include "track/local_track.h" -#include "utils/frame.h" -#include "utils/string.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/local_track.h" +#include "rte_base/c/utils/frame.h" +#include "rte_base/c/utils/string.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/local_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/local_track.h similarity index 94% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/local_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/local_track.h index 3d5e4ecda..bbba90ee1 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/local_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/local_track.h @@ -6,9 +6,9 @@ */ #pragma once -#include "c_error.h" -#include "../common.h" -#include "handle.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/handle.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/local_video_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/local_video_track.h similarity index 91% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/local_video_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/local_video_track.h index 0333b66b9..134347a73 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/local_video_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/local_video_track.h @@ -6,7 +6,7 @@ */ #pragma once -#include "track/local_track.h" +#include "rte_base/c/track/local_track.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/mic_audio_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/mic_audio_track.h similarity index 97% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/mic_audio_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/mic_audio_track.h index 2d08930f7..b73330fd5 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/mic_audio_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/mic_audio_track.h @@ -6,9 +6,9 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "track/local_audio_track.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/local_audio_track.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/mixed_video_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/mixed_video_track.h similarity index 88% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/mixed_video_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/mixed_video_track.h index ff0dcf20c..bcb98b9cc 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/mixed_video_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/mixed_video_track.h @@ -8,9 +8,9 @@ */ #include -#include "handle.h" -#include "../common.h" -#include "track/local_video_track.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/local_video_track.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/remote_audio_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/remote_audio_track.h similarity index 96% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/remote_audio_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/remote_audio_track.h index 936059f75..3f163516e 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/remote_audio_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/remote_audio_track.h @@ -8,9 +8,9 @@ #include -#include "../common.h" -#include "track/remote_track.h" -#include "utils/frame.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/remote_track.h" +#include "rte_base/c/utils/frame.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/remote_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/remote_track.h similarity index 90% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/remote_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/remote_track.h index 698134403..ca7f6cc47 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/remote_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/remote_track.h @@ -6,9 +6,9 @@ * Copyright (c) 2024 Agora IO. All rights reserved. * */ -#include "handle.h" -#include "../common.h" -#include "c_error.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/c_error.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/remote_video_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/remote_video_track.h similarity index 88% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/remote_video_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/remote_video_track.h index c2f79e366..273c1c0a3 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/remote_video_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/remote_video_track.h @@ -6,7 +6,7 @@ */ #pragma once -#include "track/remote_track.h" +#include "rte_base/c/track/remote_track.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/screen_video_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/screen_video_track.h similarity index 88% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/screen_video_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/screen_video_track.h index f14505f3b..73de05e74 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/screen_video_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/screen_video_track.h @@ -6,11 +6,11 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "track/local_video_track.h" -#include "utils/rect.h" -#include "utils/string.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/local_video_track.h" +#include "rte_base/c/utils/rect.h" +#include "rte_base/c/utils/string.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/track.h similarity index 91% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/track.h index 1dbd4efb9..3d45a2017 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/track.h @@ -6,9 +6,9 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "c_error.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/c_error.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/video_track.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/video_track.h similarity index 90% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/video_track.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/video_track.h index 86e199501..9559651e2 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/video_track.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/video_track.h @@ -6,9 +6,9 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "track/track.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/track/track.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/view.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/view.h similarity index 93% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/view.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/view.h index 3db699114..7042eaa97 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/track/view.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/track/view.h @@ -6,9 +6,9 @@ */ #pragma once -#include "c_error.h" -#include "../common.h" -#include "utils/rect.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" +#include "rte_base/c/utils/rect.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/user/local_user.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/user/local_user.h similarity index 97% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/user/local_user.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/user/local_user.h index 67c0c90e7..b15c7bea6 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/user/local_user.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/user/local_user.h @@ -6,10 +6,10 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "user/user.h" -#include "utils/buf.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/user/user.h" +#include "rte_base/c/utils/buf.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/user/remote_user.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/user/remote_user.h similarity index 96% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/user/remote_user.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/user/remote_user.h index 98c90a7a5..b4a81379b 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/user/remote_user.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/user/remote_user.h @@ -6,9 +6,9 @@ */ #pragma once -#include "handle.h" -#include "../common.h" -#include "user/user.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/common.h" +#include "rte_base/c/user/user.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/user/user.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/user/user.h similarity index 94% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/user/user.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/user/user.h index c1c955e49..fe8f13202 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/user/user.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/user/user.h @@ -6,12 +6,12 @@ */ #pragma once -#include "c_error.h" -#include "handle.h" -#include "info.h" -#include "metadata.h" -#include "observer.h" -#include "../common.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/info.h" +#include "rte_base/c/metadata.h" +#include "rte_base/c/observer.h" +#include "rte_base/c/common.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/buf.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/buf.h similarity index 97% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/buf.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/buf.h index a93b9a40a..c3a6c0bb6 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/buf.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/buf.h @@ -9,8 +9,8 @@ #include #include -#include "c_error.h" -#include "../common.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/common.h" #ifdef __cplusplus extern "C" { diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/frame.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/frame.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/frame.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/frame.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/rect.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/rect.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/rect.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/rect.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/string.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/string.h similarity index 98% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/string.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/string.h index 5950b4526..3a3cbd8f1 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/string.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/string.h @@ -10,7 +10,7 @@ #include #include -#include "../common.h" +#include "rte_base/c/common.h" #define RTE_STRING_PRE_BUF_SIZE 256 diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/uuid.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/uuid.h similarity index 100% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/internal/c/utils/uuid.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/c/utils/uuid.h diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_callback_utils.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_callback_utils.h similarity index 61% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_callback_utils.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_callback_utils.h index b3e1ab9b5..27e1e36bc 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_callback_utils.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_callback_utils.h @@ -1,17 +1,34 @@ #pragma once #include #include "rte_cpp_error.h" -#include "internal/c/handle.h" +#include "rte_base/c/handle.h" +/** + * @technical preview + */ namespace rte { template class SingleUseCallback { - public: + public: using CallbackType = std::function; - SingleUseCallback(){}; + SingleUseCallback(){ + cb_ = nullptr; + cb_data_ = nullptr; + self_ = nullptr; + }; + + SingleUseCallback(SingleUseCallback&& other){ + cb_ = other.cb_; + cb_data_ = other.cb_data_; + self_ = other.self_; + + other.cb_ = nullptr; + other.cb_data_ = nullptr; + other.self_ = nullptr; + } void Store(T* self, CallbackType cb, void* cb_data){ self_ = self; @@ -33,6 +50,12 @@ class SingleUseCallback { return cb_ == nullptr; } + void Clear(){ + self_ = nullptr; + cb_ = nullptr; + cb_data_ = nullptr; + } + CallbackType cb_; void* cb_data_; T* self_; @@ -42,17 +65,30 @@ template class CallbackContext { public: + using CallbackTypeOnlyError = std::function; + using CallbackTypeOnlyErrorWithCppError = std::function; + using CallbackType = std::function; using CallbackTypeWithCppError = std::function; + CallbackContext(T* self, CallbackTypeOnlyError cb) + :self_(self), cb_only_error_(cb) {} + + CallbackContext(T* self, CallbackTypeOnlyErrorWithCppError cb) + :self_(self), cb_only_error_with_cpp_error_(cb) {} + CallbackContext(T* self, CallbackType cb, void* cb_data) :self_(self), cb_(cb), cb_data_(cb_data) {} CallbackContext(T* self, CallbackTypeWithCppError cb, void* cb_data) :self_(self), cb_with_cpp_error_(cb), cb_data_(cb_data) {} + CallbackTypeOnlyError cb_only_error_; + CallbackTypeOnlyErrorWithCppError cb_only_error_with_cpp_error_; + CallbackType cb_; CallbackTypeWithCppError cb_with_cpp_error_; + void* cb_data_; T* self_; }; @@ -61,6 +97,15 @@ template void CallbackFunc(FromeType* self, void* cb_data, RteError* err){ auto *ctx = static_cast*>(cb_data); + if(ctx->cb_only_error_ != nullptr){ + ctx->cb_only_error_(err); + } + + if(ctx->cb_only_error_with_cpp_error_ != nullptr){ + rte::Error cpp_err(err); + ctx->cb_only_error_with_cpp_error_(&cpp_err); + } + if(ctx->cb_with_cpp_error_ != nullptr){ rte::Error cpp_err(err); ctx->cb_with_cpp_error_( self != nullptr ? ctx->self_ : nullptr, ctx->cb_data_, &cpp_err); @@ -77,15 +122,29 @@ template class CallbackContextWithArgs { public: + + using CallbackTypeOnlyError = std::function; + using CallbackTypeOnlyErrorWithCppError = std::function; + using CallbackType = std::function; using CallbackTypeWithCppError = std::function; + CallbackContextWithArgs(T* self, CallbackTypeOnlyError cb) + :self_(self), cb_only_error_(cb) {} + + CallbackContextWithArgs(T* self, CallbackTypeOnlyErrorWithCppError cb) + :self_(self), cb_only_error_with_cpp_error_(cb) {} + CallbackContextWithArgs(T* self, CallbackType cb, void* cb_data) :self_(self), cb_(cb), cb_data_(cb_data) {} CallbackContextWithArgs(T* self, CallbackTypeWithCppError cb, void* cb_data) :self_(self), cb_with_cpp_error_(cb), cb_data_(cb_data) {} + + CallbackTypeOnlyError cb_only_error_; + CallbackTypeOnlyErrorWithCppError cb_only_error_with_cpp_error_; + CallbackType cb_; CallbackTypeWithCppError cb_with_cpp_error_; void* cb_data_; @@ -96,13 +155,22 @@ template void CallbackFuncWithArgs(FromeType* self, Args... args, void* cb_data, RteError* err){ auto *ctx = static_cast*>(cb_data); + if(ctx->cb_only_error_ != nullptr){ + ctx->cb_only_error_(args..., err); + } + + if(ctx->cb_only_error_with_cpp_error_ != nullptr){ + rte::Error cpp_err(err); + ctx->cb_only_error_with_cpp_error_(args..., &cpp_err); + } + if(ctx->cb_with_cpp_error_ != nullptr){ Error cpp_err(err); - ctx->cb_with_cpp_error_(ctx->self_, args..., ctx->cb_data_, &cpp_err); + ctx->cb_with_cpp_error_( self != nullptr ? ctx->self_ : nullptr, args..., ctx->cb_data_, &cpp_err); } if(ctx->cb_ != nullptr){ - ctx->cb_(ctx->self_, args..., ctx->cb_data_, err); + ctx->cb_(self != nullptr ? ctx->self_ : nullptr, args..., ctx->cb_data_, err); } delete ctx; } diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_canvas.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_canvas.h new file mode 100644 index 000000000..c35ce3df6 --- /dev/null +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_canvas.h @@ -0,0 +1,227 @@ +#pragma once + +#include "rte_base/c/c_player.h" +#include "rte_base/c/handle.h" +#include "rte_base/c/track/canvas.h" + +#include "rte_cpp_error.h" +#include "rte_cpp_rte.h" +#include "rte_cpp_callback_utils.h" + + +namespace rte { + +using VideoRenderMode = ::RteVideoRenderMode; +using VideoMirrorMode = ::RteVideoMirrorMode; +using ViewConfig = ::RteViewConfig; +using View = ::RteView; +using Rect = ::RteRect; + +/** + * The CanvasInitialConfig class is used to initialize the Canvas object. + * @since v4.4.0 + * @technical preview + */ +class CanvasInitialConfig { + public: + CanvasInitialConfig() {RteCanvasInitialConfigInit(&c_canvas_initial_config, nullptr);} + ~CanvasInitialConfig() {RteCanvasInitialConfigDeinit(&c_canvas_initial_config, nullptr);} + + private: + friend class Canvas; + ::RteCanvasInitialConfig c_canvas_initial_config; +}; + +/** + * The CanvasConfig class is used to configure the Canvas object. + * @since v4.4.0 + */ +class CanvasConfig { + public: + CanvasConfig() {RteCanvasConfigInit(&c_canvas_config, nullptr);} + ~CanvasConfig() {RteCanvasConfigDeinit(&c_canvas_config, nullptr);} + + /** + * Set the video render mode. + * @since v4.4.0 + * @param mode The render mode to set. Refer to the rte::VideoRenderMode type, default is kRteVideoRenderModeHidden. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidArgument: The mode parameter is set to an illegal value. + * @return void + */ + void SetRenderMode(VideoRenderMode mode, Error *err = nullptr) { + RteCanvasConfigSetVideoRenderMode(&c_canvas_config, mode, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the render mode. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * @return VideoRenderMode + */ + VideoRenderMode GetRenderMode(Error *err = nullptr) { + VideoRenderMode mode; + RteCanvasConfigGetVideoRenderMode(&c_canvas_config, &mode, err != nullptr ? err->get_underlying_impl() : nullptr); + return mode; + } + + /** + * Set the video mirror mode. + * @since v4.4.0 + * @param mode The mirror mode to set. Refer to the rte::VideoMirrorMode type, default is kRteVideoMirrorModeAuto. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidArgument: The mode parameter is set to an illegal value. + * @return void + */ + void SetMirrorMode(VideoMirrorMode mode, Error *err = nullptr) { + RteCanvasConfigSetVideoMirrorMode(&c_canvas_config, mode, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the video mirror mode. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * @return VideoMirrorMode The current video mirror mode. + */ + VideoMirrorMode GetMirrorMode(Error *err = nullptr) { + VideoMirrorMode mode; + RteCanvasConfigGetVideoMirrorMode(&c_canvas_config, &mode, err != nullptr ? err->get_underlying_impl() : nullptr); + return mode; + } + + /** + * Set the Crop Area. + * @since v4.4.0 + * @param crop_area + * @param err + * @return void + * @technical preview + */ + void SetCropArea(RteRect &crop_area, Error *err = nullptr) { + RteCanvasConfigSetCropArea(&c_canvas_config, crop_area, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the Crop Area. + * @since v4.4.0 + * @param err + * @return RteRect + * @technical preview + */ + RteRect GetCropArea(Error *err = nullptr) { + RteRect crop_area; + RteCanvasConfigGetCropArea(&c_canvas_config, &crop_area, err != nullptr ? err->get_underlying_impl() : nullptr); + return crop_area; + } + + private: + friend class Canvas; + ::RteCanvasConfig c_canvas_config; +}; + +/** + * The Canvas class is used to render the video stream. + * @since v4.4.0 + */ +class Canvas { + public: + /** + * Construct a Canvas object. + * @since v4.4.0 + * @param rte Rte object. + * @param initial_config CanvasInitialConfig initialization configuration object. Currently, a null pointer can be passed. + */ + Canvas(Rte *rte, CanvasInitialConfig *initial_config = nullptr) { + c_canvas = ::RteCanvasCreate(&rte->c_rte, initial_config != nullptr ? &initial_config->c_canvas_initial_config : nullptr, nullptr); + }; + ~Canvas() { RteCanvasDestroy(&c_canvas, nullptr); }; + + Canvas(Canvas&& other) : c_canvas(other.c_canvas) { + other.c_canvas = {}; + } + + //@{ + Canvas(const Canvas& other) = delete; + Canvas& operator=(const Canvas& other) = delete; + Canvas& operator=(Canvas&& other) = delete; + //@} + + + /** + * Get the configuration of Canvas object. + * @since v4.4.0 + * @param config The object used to get the canvas config configuration. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Canvas object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The passed config object is null. + * @return bool Returns the result of getting the configuration information. + * - true: Successfully retrieved. + * - false: Failed to retrieve. + */ + bool GetConfigs(CanvasConfig *config, Error *err = nullptr) { + return RteCanvasGetConfigs(&c_canvas, &config->c_canvas_config, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Configure the Canvas object. + * @since v4.4.0 + * @param config The object used to set the canvas config configuration. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Canvas object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The passed config object is null. + * @return bool Returns the result of setting the configuration information. + * - true: Successfully set the configuration. + * - false: Failed to set the configuration. + */ + bool SetConfigs(CanvasConfig *config, Error *err = nullptr) { + return RteCanvasSetConfigs(&c_canvas, &config->c_canvas_config, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Add a rendering view. Currently, only one view is supported. + * @since v4.4.0 + * @param view Pointer to the View object. On the Windows platform, you can assign an HWND window handle to a View type variable and pass it to the interface. + * @param config View-related configuration. Currently, nullptr can be passed. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Canvas object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The passed view is null. + * @return bool Returns the result of adding the View. + * - true: Successfully add the View. + * - false: Failed to add the View. + */ + bool AddView(View *view, ViewConfig *config, rte::Error *err = nullptr) { + return RteCanvasAddView(&c_canvas, view, config, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Remove a rendering view. + * @since v4.4.0 + * @param view Pointer to the View object. + * @param config View-related configuration. Currently, nullptr can be passed. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Canvas object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The passed view is null. + * @return bool Returns the result of removing the View. + * - true: Successfully removed the View. + * - false: Failed to remove the View. + */ + bool RemoveView(View *view, ViewConfig *config, rte::Error *err = nullptr) { + return RteCanvasRemoveView(&c_canvas, view, config, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + private: + + friend class Player; + + ::RteCanvas c_canvas; +}; + +} // namespace rte \ No newline at end of file diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_error.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_error.h similarity index 65% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_error.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_error.h index 1584bf789..31afaae88 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_error.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_error.h @@ -9,8 +9,8 @@ #include #include -#include "internal/c/c_error.h" -#include "internal/c/utils/string.h" +#include "rte_base/c/c_error.h" +#include "rte_base/c/utils/string.h" namespace rte { @@ -22,11 +22,15 @@ class Config; class PlayerConfig; class CanvasConfig; +using ErrorCode = ::RteErrorCode; + +/** + * Error class. Used to record the execution result of an interface call. + * @since v4.4.0 + */ class Error { public: - using ErrorCode = ::RteErrorCode; - Error() : c_error(RteErrorCreate()) {} explicit Error(::RteError *error) : c_error(error), c_error_owned(false) {} @@ -44,11 +48,23 @@ class Error { // @} void Set(ErrorCode code, const char *message) { - RteErrorSet(c_error, code, "%s", message); + if(c_error != nullptr){ + RteErrorSet(c_error, code, "%s", message ? message : ""); + } } + /** + * This interface is used to get the specific error code. + * @since v4.4.0 + * @return ErrorCode Error code,Refer to the ErrorCode type for details. + */ ErrorCode Code() const { return c_error != nullptr ? c_error->code : kRteErrorDefault; } + /** + * This interface is used to get the specific error description. + * @since v4.4.0 + * @return const char* Error description + */ const char *Message() const { if(c_error != nullptr && c_error->message != nullptr){ return RteStringCStr(c_error->message, nullptr); diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_player.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_player.h new file mode 100644 index 000000000..213250176 --- /dev/null +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_player.h @@ -0,0 +1,1007 @@ +/** + * + * Agora Real Time Engagement + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +#pragma once +#include +#include + +#include "rte_base/c/c_rte.h" +#include "rte_base/c/c_player.h" + +#include "rte_cpp_error.h" +#include "rte_cpp_callback_utils.h" +#include "rte_cpp_canvas.h" +#include "rte_cpp_string.h" +#include "rte_cpp_stream.h" + +namespace rte { + +using PlayerState = ::RtePlayerState; +using PlayerEvent = ::RtePlayerEvent; +using PlayerMetadataType = ::RtePlayerMetadataType; +using PlayerInfo = ::RtePlayerInfo; +using PlayerStats = ::RtePlayerStats; +using PlayerCustomSourceProvider = ::RtePlayerCustomSourceProvider; +using AbrSubscriptionLayer = ::RteAbrSubscriptionLayer; +using AbrFallbackLayer = ::RteAbrFallbackLayer; + +class PlayerInitialConfig {}; + +static void onStateChanged(::RtePlayerObserver *observer, + RtePlayerState old_state, RtePlayerState new_state, + RteError *err); + +static void onPositionChanged(::RtePlayerObserver *observer, uint64_t curr_time, + uint64_t utc_time); + +static void onResolutionChanged(::RtePlayerObserver *observer, int width, int height); + +static void onEvent(::RtePlayerObserver *observer, RtePlayerEvent event); + +static void onMetadata(::RtePlayerObserver *observer, ::RtePlayerMetadataType type, + const uint8_t *data, size_t length); + +static void onPlayerInfoUpdated(::RtePlayerObserver *observer, const RtePlayerInfo *info); + +static void onAudioVolumeIndication(::RtePlayerObserver *observer, int32_t volume); + + +/** + * The PlayerObserver class is used to observe the event of Player object. + * @since v4.4.0 + */ +class PlayerObserver { + public: + PlayerObserver() : c_player_observer(::RtePlayerObserverCreate(nullptr)) { + + c_player_observer->base_observer.me_in_target_lang = this; + + c_player_observer->on_state_changed = rte::onStateChanged; + c_player_observer->on_position_changed = rte::onPositionChanged; + c_player_observer->on_resolution_changed = rte::onResolutionChanged; + c_player_observer->on_event = rte::onEvent; + c_player_observer->on_metadata = rte::onMetadata; + c_player_observer->on_player_info_updated = rte::onPlayerInfoUpdated; + c_player_observer->on_audio_volume_indication = rte::onAudioVolumeIndication; + } + virtual ~PlayerObserver(){ RtePlayerObserverDestroy(c_player_observer, nullptr); } + + // @{ + PlayerObserver(PlayerObserver &other) = delete; + PlayerObserver(PlayerObserver &&other) = delete; + PlayerObserver &operator=(const PlayerObserver &cmd) = delete; + PlayerObserver &operator=(PlayerObserver &&cmd) = delete; + // @} + + /** + * Player state callback. This function is called when the player state changes. + * @since v4.4.0 + * @param old_state The old state. + * @param new_state The new state. + * @param err Possible return values for ErrorCode. Only when the new_state value is kRtePlayerStateFailed, you need to check the value of this parameter. + * - kRteErrorDefault. For specific reasons, see Error.Message, including the following situations: + * - Failed to connect to the channel. + * - kRteErrorInvalidArgument. + * - Invalid appid. + * - Invalid channelid. + * - Invalid uid. + * - kRteErrorAuthenticationFailed. + * - Invalid token. + * - Token expired. + * - kRteErrorStreamNotFound. After entering the channel, no stream was received from the broadcaster for more than 10 seconds. + * @return void + */ + virtual void onStateChanged(PlayerState old_state, PlayerState new_state, + rte::Error *err) {}; + + /** + * This callback will be triggered when the playback position changed. + * @since v4.4.0 + * @param curr_time + * @param utc_time + */ + virtual void onPositionChanged(uint64_t curr_time, + uint64_t utc_time) {}; + + /** + * Video resolution change callback. + * @since v4.4.0 + * @param width The width of the video frame. + * @param height The height of the video frame. + * @return void + */ + virtual void onResolutionChanged(int width, int height) {}; + + /** + * Event callback. + * @since v4.4.0 + * @param event The event notified by the callback. Refer to the rte::PlayerEvent type. Currently, the following events can be handled accordingly: + * - kRtePlayerEventFreezeStart: Indicates that stuttering has occurred or shows a loading animation. + * - kRtePlayerEventFreezeStop: Indicates that stuttering has ended or stops the loading animation. + * - kRtePlayerEventAuthenticationWillExpire: Regenerate the token, use the new token to construct the RTE URL, and call Player::OpenWithUrl to refresh the token. + * - kRtePlayerEventAbrFallbackToAudioOnlyLayer: Indicates that due to network reasons, it has fallen back to audio-only mode. + * - kRtePlayerEventAbrRecoverFromAudioOnlyLayer: Indicates that it has recovered from audio-only mode to video mode. + * @return void + */ + virtual void onEvent(PlayerEvent event) {}; + + /** + * Metadata callback. + * @since v4.4.0 + * @param type The type of metadata. + * @param data The metadata buffer. + * @param length The length of the metadata. + * @return void + */ + virtual void onMetadata(PlayerMetadataType type, + const uint8_t *data, size_t length) {}; + + /** + * Player information update callback. This is called when fields in rte::PlayerInfo are updated. + * @since v4.4.0 + * @param info The current PlayerInfo information. + * @return void + */ + virtual void onPlayerInfoUpdated(const PlayerInfo *info) {}; + + /** + * Broadcaster audio volume update callback. + * @since v4.4.0 + * @param volume The current volume of the Broadcaster. The value range is [0, 255]. + * @return void + */ + virtual void onAudioVolumeIndication(int32_t volume) {}; + + private: + friend class Player; + + ::RtePlayerObserver *c_player_observer; +}; + +void onStateChanged(::RtePlayerObserver *observer, + RtePlayerState old_state, RtePlayerState new_state, + RteError *err){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + Error cpp_err(err); + player_observer->onStateChanged(old_state, new_state, &cpp_err); + } +} +void onPositionChanged(::RtePlayerObserver *observer, uint64_t curr_time, + uint64_t utc_time){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onPositionChanged(curr_time, utc_time); + } +} + +void onResolutionChanged(::RtePlayerObserver *observer, int width, int height){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onResolutionChanged(width, height); + } +} + +void onEvent(::RtePlayerObserver *observer, RtePlayerEvent event){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onEvent(event); + } +} + +void onMetadata(::RtePlayerObserver *observer, RtePlayerMetadataType type, + const uint8_t *data, size_t length){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onMetadata(type, data, length); + } +} + +void onPlayerInfoUpdated(::RtePlayerObserver *observer, const RtePlayerInfo *info){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onPlayerInfoUpdated(info); + } +} + +void onAudioVolumeIndication(::RtePlayerObserver *observer, int32_t volume){ + auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); + if (player_observer != nullptr){ + player_observer->onAudioVolumeIndication(volume); + } +} + +/** + * The PlayerConfig class is used to configure the Player object. + * @since v4.4.0 + */ +class PlayerConfig { + public: + PlayerConfig() { RtePlayerConfigInit(&c_player_config, nullptr); } + ~PlayerConfig() { RtePlayerConfigDeinit(&c_player_config, nullptr); } + + // @{ + PlayerConfig(PlayerConfig &other) = delete; + PlayerConfig(PlayerConfig &&other) = delete; + PlayerConfig &operator=(PlayerConfig &&cmd) = delete; + + PlayerConfig &operator=(const PlayerConfig &other) { + RtePlayerConfigCopy(&c_player_config, &other.c_player_config, nullptr); + return *this; + }; + + PlayerConfig &operator=(const RtePlayerConfig* other) { + RtePlayerConfigCopy(&c_player_config, other, nullptr); + return *this; + }; + // @} + + /** + * Whether to automatically play after a successful call to Player::OpenWithUrl. + * If not set, the default value is true. + * @since v4.4.0 + * @param auto_play + * - true: Automatically start streaming and playing after a successful opening. + * - false: After a successful open with OpenWithUrl, you need to actively call Player::Play() to play the audio and video stream. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * @return void + */ + void SetAutoPlay(bool auto_play, Error *err = nullptr) { + RtePlayerConfigSetAutoPlay(&c_player_config, auto_play, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the auto-play setting. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * @return bool Returns whether auto-play is enabled. + */ + bool GetAutoPlay(Error *err = nullptr) { + bool auto_play; + RtePlayerConfigGetAutoPlay(&c_player_config, &auto_play, + err != nullptr ? err->get_underlying_impl() : nullptr); + return auto_play; + } + + /** + * Set the playback speed parameter. + * @since v4.4.0 + * @param speed + * @param err + * @return void + * @technical preview + */ + void SetPlaybackSpeed(int32_t speed, Error *err = nullptr) { + RtePlayerConfigSetPlaybackSpeed(&c_player_config, speed, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the playback speed parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetPlaybackSpeed(Error *err = nullptr) { + int32_t speed; + RtePlayerConfigGetPlaybackSpeed(&c_player_config, &speed, + err != nullptr ? err->get_underlying_impl() : nullptr); + return speed; + } + + /** + * Set the playout audio track index parameter. + * @since v4.4.0 + * @param idx + * @param err + * @return void + * @technical preview + */ + void SetPlayoutAudioTrackIdx(int idx, Error *err = nullptr) { + RtePlayerConfigSetPlayoutAudioTrackIdx(&c_player_config, idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the playout audio track index parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetPlayoutAudioTrackIdx(Error *err = nullptr) { + int32_t idx; + RtePlayerConfigGetPlayoutAudioTrackIdx(&c_player_config, &idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + return idx; + } + + /** + * Set the publish audio track index parameter. + * @since v4.4.0 + * @param idx + * @param err + * @return void + * @technical preview + */ + void SetPublishAudioTrackIdx(int32_t idx, Error *err = nullptr) { + RtePlayerConfigSetPublishAudioTrackIdx(&c_player_config, idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the publish audio track index parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetPublishAudioTrackIdx(Error *err = nullptr) { + int32_t idx; + RtePlayerConfigGetPublishAudioTrackIdx(&c_player_config, &idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + return idx; + } + + /** + * Set the audio track index parameter. + * @since v4.4.0 + * @param idx + * @param err + * @return void + * @technical preview + */ + void SetAudioTrackIdx(int32_t idx, Error *err = nullptr) { + RtePlayerConfigSetAudioTrackIdx(&c_player_config, idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the audio track index parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetAudioTrackIdx(Error *err = nullptr) { + int32_t idx; + RtePlayerConfigGetAudioTrackIdx(&c_player_config, &idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + return idx; + } + + /** + * Set the subtitle track index parameter. + * @since v4.4.0 + * @param idx + * @param err + * @return void + * @technical preview + */ + void SetSubtitleTrackIdx(int32_t idx, Error *err = nullptr) { + RtePlayerConfigSetSubtitleTrackIdx(&c_player_config, idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the subtitle track index parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetSubtitleTrackIdx(Error *err = nullptr) { + int32_t idx; + RtePlayerConfigGetSubtitleTrackIdx(&c_player_config, &idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + return idx; + } + + /** + * Set the external subtitle track index parameter. + * @since v4.4.0 + * @param idx + * @param err + * @return void + * @technical preview + */ + void SetExternalSubtitleTrackIdx(int32_t idx, Error *err = nullptr) { + RtePlayerConfigSetExternalSubtitleTrackIdx(&c_player_config, idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the external subtitle track index parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetExternalSubtitleTrackIdx(Error *err = nullptr) { + int32_t idx; + RtePlayerConfigGetExternalSubtitleTrackIdx(&c_player_config, &idx, + err != nullptr ? err->get_underlying_impl() : nullptr); + return idx; + } + + /** + * Set the audio pitch parameter. + * @since v4.4.0 + * @param audio_pitch + * @param err + * @return void + * @technical preview + */ + void SetAudioPitch(int32_t audio_pitch, Error *err = nullptr) { + RtePlayerConfigSetAudioPitch(&c_player_config, audio_pitch, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the audio pitch parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetAudioPitch(Error *err = nullptr) { + int32_t audio_pitch; + RtePlayerConfigGetAudioPitch(&c_player_config, &audio_pitch, + err != nullptr ? err->get_underlying_impl() : nullptr); + return audio_pitch; + } + + /** + * Set the playout volume parameter. + * @since v4.4.0 + * @param volume + * @param err + * @return void + * @technical preview + */ + void SetPlayoutVolume(int32_t volume, Error *err = nullptr) { + RtePlayerConfigSetPlayoutVolume(&c_player_config, volume, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the playout volume parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetPlayoutVolume(Error *err = nullptr) { + int32_t volume; + RtePlayerConfigGetPlayoutVolume(&c_player_config, &volume, + err != nullptr ? err->get_underlying_impl() : nullptr); + return volume; + } + + /** + * Set the audio playback delay parameter. + * @since v4.4.0 + * @param volume + * @param err + * @return void + * @technical preview + */ + void SetAudioPlaybackDelay(int32_t delay, Error *err = nullptr) { + RtePlayerConfigSetAudioPlaybackDelay(&c_player_config, delay, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the audio playback delay parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetAudioPlaybackDelay(Error *err = nullptr) { + int32_t delay; + RtePlayerConfigGetAudioPlaybackDelay(&c_player_config, &delay, + err != nullptr ? err->get_underlying_impl() : nullptr); + return delay; + } + + /** + * Set the audio dual mono mode parameter. + * @since v4.4.0 + * @param mode + * @param err + * @return void + * @technical preview + */ + void SetAudioDualMonoMode(RteAudioDualMonoMode mode, Error *err = nullptr) { + RtePlayerConfigSetAudioDualMonoMode(&c_player_config, mode, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the audio dual mono mode parameter. + * @since v4.4.0 + * @param err + * @return RteAudioDualMonoMode + * @technical preview + */ + RteAudioDualMonoMode GetAudioDualMonoMode(Error *err = nullptr) { + RteAudioDualMonoMode mode; + RtePlayerConfigGetAudioDualMonoMode(&c_player_config, &mode, + err != nullptr ? err->get_underlying_impl() : nullptr); + return mode; + } + + /** + * Set the publish volume parameter. + * @since v4.4.0 + * @param volume + * @param err + * @return void + * @technical preview + */ + void SetPublishVolume(int32_t volume, Error *err = nullptr) { + RtePlayerConfigSetPublishVolume(&c_player_config, volume, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the publish volume parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetPublishVolume(Error *err = nullptr) { + int32_t volume; + RtePlayerConfigGetPublishVolume(&c_player_config, &volume, + err != nullptr ? err->get_underlying_impl() : nullptr); + return volume; + } + + /** + * Set the loop count parameter. + * @since v4.4.0 + * @param count + * @param err + * @return void + * @technical preview + */ + void SetLoopCount(int32_t count, Error *err = nullptr) { + RtePlayerConfigSetLoopCount(&c_player_config, count, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the loop count parameter. + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetLoopCount(Error *err = nullptr) { + int32_t count; + RtePlayerConfigGetLoopCount(&c_player_config, &count, + err != nullptr ? err->get_underlying_impl() : nullptr); + return count; + } + + /** + * Set player private parameters. This parameter setting can be done according to actual needs, referring to the suggestions of Agora SA. + * @since v4.4.0 + * @param json_parameter JSON formatted string + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidArgument: Indicates that the json_parameter parameter is empty. + * @return void + */ + void SetJsonParameter(const char *json_parameter, Error *err = nullptr) { + String str(json_parameter); + RtePlayerConfigSetJsonParameter(&c_player_config, str.get_underlying_impl(), + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the currently configured private parameters of the player. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * @return std::string + */ + std::string GetJsonParameter(Error *err = nullptr) { + String str; + RtePlayerConfigGetJsonParameter(&c_player_config, str.get_underlying_impl(), + err != nullptr ? err->get_underlying_impl() : nullptr); + return std::string(str.Cstr()); + } + + /** + * Set the ABR subscription layer. + * If ABR is not enabled, the audience can only switch the high and low video stream in the origin channel. After enabling it, the audience can switch any layer in the abr channel. + * @since v4.4.0 + * @param abr_subscription_layer The layer to subscribe to. Refer to the rte::AbrSubscriptionLayer enumeration values for details. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidArgument: An illegal AbrSubscriptionLayer value was set. + * @return void + */ + void SetAbrSubscriptionLayer(AbrSubscriptionLayer abr_subscription_layer, Error *err = nullptr) { + RtePlayerConfigSetAbrSubscriptionLayer(&c_player_config, abr_subscription_layer, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the ABR subscription layer. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * @return AbrSubscriptionLayer The currently set subscription layer. + */ + AbrSubscriptionLayer GetAbrSubscriptionLayer(Error *err = nullptr) { + AbrSubscriptionLayer abr_subscription_layer; + RtePlayerConfigGetAbrSubscriptionLayer(&c_player_config, &abr_subscription_layer, + err != nullptr ? err->get_underlying_impl() : nullptr); + return abr_subscription_layer; + } + + /** + * Set the ABR fallback layer option. + * If ABR is not enabled, after calling this method, the audience can only set kRteAbrFallbackDisabled ~ kRteAbrFallbackAudioOnly in the original channel. + * After enabling it, the audience can switch all values of AbrFallbackLayer in the abr channel. + * @since v4.4.0 + * @param abr_fallback_layer The ABR fallback option to set. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidArgument: An illegal AbrFallbackLayer value was set. Check the value of the passed abr_fallback_layer parameter. + * @return void + */ + void SetAbrFallbackLayer(AbrFallbackLayer abr_fallback_layer, Error *err = nullptr) { + RtePlayerConfigSetAbrFallbackLayer(&c_player_config, abr_fallback_layer, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + + /** + * Get the ABR fallback layer option. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * @return AbrFallbackLayer The currently set ABR fallback option. + */ + AbrFallbackLayer GetAbrFallbackLayer(Error *err = nullptr) { + AbrFallbackLayer abr_fallback_layer; + RtePlayerConfigGetAbrFallbackLayer(&c_player_config, &abr_fallback_layer, + err != nullptr ? err->get_underlying_impl() : nullptr); + return abr_fallback_layer; + } + + + ::RtePlayerConfig* get_underlying_impl() { return &c_player_config; } + + private: + friend class Player; + + ::RtePlayerConfig c_player_config; +}; + +/** + * The Player class can be used to play URL resources. + * @since v4.4.0 + */ +class Player { + public: +/** + * Construct a Player object. + * @since v4.4.0 + * @param rte Rte object. + * @param config PlayerInitialConfig initialization configuration object. Currently, a null pointer can be passed. + */ + explicit Player(Rte *self, PlayerInitialConfig *config = nullptr) + : c_player(::RtePlayerCreate(&self->c_rte, nullptr, nullptr)) {}; + ~Player() { + RtePlayerDestroy(&c_player, nullptr); + }; + + Player(Player &other) = default; + Player(Player &&other) = default; + + // @{ + Player &operator=(const Player &cmd) = delete; + Player &operator=(Player &&cmd) = delete; + // @} + + /** + * Preload URL, only valid for rte type URLs. This interface can speed up the OpenWithUrl operation. Up to 20 URLs can be preloaded. If the limit is exceeded, new preloads will replace old ones. + * @since v4.4.0 + * @param url rte type URL + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidArgument: The passed URL is empty or has an invalid format. + * @return bool Whether the preload operation was successful. + * - true: Successfully preload the Rte URL. + * - false: Failed to preload the Rte URL. + */ + static bool PreloadWithUrl(const char* url, Error *err = nullptr) { + return RtePlayerPreloadWithUrl(nullptr, url, err != nullptr ? err->get_underlying_impl() : nullptr); + }; + + /** + * Open URL resource. Currently, only rte URLs are supported, and cdn URLs and files are not supported. + * This interface can also be used to refresh the token of an already opened URL. + * For URL format definition and token refresh method description, refer to the doc: + * https://doc.shengwang.cn/doc/rtc/android/best-practice/playing-url + * @since v4.4.0 + * @param url The URL resource to open + * @param start_time Start time [currently not supported] + * @param cb Callback to asynchronously notify the result of the open operation. If an error occurs during open, it will enter the kRtePlayerStateFailed state. You need to call the Stop method before calling OpenWithUrl again. + * @param err Possible return values for ErrorCode. At this time, the new_state value corresponds to kRtePlayerStateFailed. + * - kRteOk: Success + * - kRteErrorDefault: For specific reasons, see Error.Message, including the following situations: + * - Failed to connect to the channel + * - kRteErrorInvalidArgument: + * - Invalid appid + * - Invalid channelid + * - Invalid uid + * - kRteErrorAuthenticationFailed: + * - Invalid token + * - Token expired + * - kRteErrorInvalidOperation: + * - Engine not initialized + * @return void + */ + void OpenWithUrl(const char* url, uint64_t start_time, std::function cb) { + CallbackContext* callbackCtx = new CallbackContext(this, cb); + RtePlayerOpenWithUrl(&c_player, url, start_time, &CallbackFunc<::RtePlayer, Player>, callbackCtx); + }; + + /** + * Open a custom source provider. + * @since v4.4.0 + * @param provider + * @param start_time + * @param cb + * @return void + * @technical preview + */ + void OpenWithCustomSourceProvider(PlayerCustomSourceProvider* provider, uint64_t start_time, + std::function cb) { + CallbackContext* callbackCtx = new CallbackContext(this, cb); + RtePlayerOpenWithCustomSourceProvider(&c_player, provider, start_time, &CallbackFunc<::RtePlayer, Player>, callbackCtx); + }; + + /** + * Open a stream. + * @since v4.4.0 + * @param stream + * @param cb + * @return void + * @technical preview + */ + void OpenWithStream(Stream* stream, std::function cb) { + CallbackContext* callbackCtx = new CallbackContext(this, cb); + RtePlayerOpenWithStream(&c_player, stream != nullptr ? &stream->c_rte_stream : nullptr, &CallbackFunc<::RtePlayer, Player>, callbackCtx); + }; + + /** + * Get player playback statistics. + * @since v4.4.0 + * @param cb Asynchronous callback for statistical data. + * @param stats Statistical values. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * @return void + */ + void GetStats(std::function cb){ + CallbackContextWithArgs *ctx = new CallbackContextWithArgs(this, cb); + RtePlayerGetStats(&c_player, &CallbackFuncWithArgs<::RtePlayer, Player, rte::PlayerStats*>, ctx); + } + + /** + * Set canvas. After the stream is successfully pulled, the video frame will be rendered on the set canvas. + * @since v4.4.0 + * @param canvas The canvas object used to render video frames. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidArgument: The canvas is null. + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * @return bool The result of the SetCanvas operation. If it fails, you can check the specific error through err. + * - true: Successfully set the canvas. + * - false: Failed to set the canvas. + */ + bool SetCanvas(Canvas *canvas, Error *err = nullptr) { + return RtePlayerSetCanvas(&c_player, canvas != nullptr ? &canvas->c_canvas : nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + }; + + /** + * Start stream playback. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * @return bool The result of the Play operation. If it fails, you can check the specific error through err. + * - true: Successfully play. + * - false: Failed to play. + */ + bool Play(Error *err = nullptr) { + return RtePlayerPlay(&c_player, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Stop playback. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * @return bool The result of the Stop operation. If it fails, you can check the specific error through err. + * - true: Successfully stop. + * - false: Failed to stop. + */ + bool Stop(Error *err = nullptr) { + return RtePlayerStop(&c_player, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Pause playback. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * @return bool The result of the Pause operation. If it fails, you can check the specific error through err. + * - true: Successfully pause. + * - false: Failed to pause. + */ + bool Pause(Error *err = nullptr) { + return RtePlayerPause(&c_player, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Seek the playback position. + * @since v4.4.0 + * @param new_time The new playback position to seek to. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * @return bool The result of the Seek operation. If it fails, you can check the specific error through err. + * - true: Successfully Seek. + * - false: Failed to Seek. + * @technical preview + */ + bool Seek(uint64_t new_time, Error *err = nullptr) { + return RtePlayerSeek(&c_player, new_time, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Mute/unmute audio separately. + * @since v4.4.0 + * @param mute Whether to mute. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * @return bool The result of the MuteAudio operation. If it fails, you can check the specific error through err. + * - true: The mute operation was successful. + * - false: The mute operation failed. + */ + bool MuteAudio(bool mute, Error *err = nullptr) { + return RtePlayerMuteAudio(&c_player, mute, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Mute/unmute video separately. + * @since v4.4.0 + * @param mute Whether to mute. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * @return bool The result of the MuteVideo operation. If it fails, you can check the specific error through err. + * - true: The mute operation was successful. + * - false: The mute operation failedl. + */ + bool MuteVideo(bool mute, Error *err = nullptr) { + return RtePlayerMuteVideo(&c_player, mute, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the playback position. + * @since v4.4.0 + * @param err + * @return uint64_t + * @technical preview + */ + uint64_t GetPosition(Error *err = nullptr){ + return RtePlayerGetPosition(&c_player, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get player information. + * @since v4.4.0 + * @param info The object used to receive player information. After the interface call is successful, the player information will be copied to the info object. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The info object is null. + * @return bool The result of the GetInfo operation. If it fails, you can check the specific error through err. + * - true: Successfully get the player information. + * - false: Failed to get the player information. + */ + bool GetInfo(PlayerInfo *info, Error *err = nullptr){ + return RtePlayerGetInfo(&c_player, info, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the configuration of Player object. + * @since v4.4.0 + * @param config The object used to receive PlayerConfig information. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The config object is null. + * @return bool The result of the GetConfigs operation. If it fails, you can check the specific error through err. + * - true: Successfully get the configuration. + * - false: Failed to get the configuration. + */ + bool GetConfigs(PlayerConfig* config, Error *err = nullptr) { + return RtePlayerGetConfigs(&c_player, config->get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Configure the Player object. + * @since v4.4.0 + * @param config The object used to change the player configuration. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The config object is null. + * @return bool The result of the SetConfigs operation. If it fails, you can check the specific error through err. + * - true: Successfully set the configuration. + * - false: Failed to set the configuration. + */ + bool SetConfigs(PlayerConfig* config, Error *err = nullptr) { + return RtePlayerSetConfigs(&c_player, config->get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Register player observer. + * @since v4.4.0 + * @param observer The object used to receive player-related callbacks. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The observer object is null. + * @return bool The result of the RegisterObserver operation. If it fails, you can check the specific error through err. + * - true: Successfully register the observer. + * - false: Failed to register the observer. + */ + bool RegisterObserver(PlayerObserver *observer, Error *err = nullptr) { + return RtePlayerRegisterObserver( + &c_player, observer != nullptr ? observer->c_player_observer : nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + } + +/** + * Unregister player observer. + * @since v4.4.0 + * @param observer The object used to receive player-related callbacks. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Player object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The observer object is null. + * @return bool The result of the UnregisterObserver operation. If it fails, you can check the specific error through err. + * - true: Successfully unregister the observer. + * - false: Failed to unregister the observer. + */ + bool UnregisterObserver(PlayerObserver *observer, Error *err = nullptr){ + return RtePlayerUnregisterObserver(&c_player, observer != nullptr ? observer->c_player_observer : nullptr, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + private: + ::RtePlayer c_player; +}; + +} // namespace rte diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_rte.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_rte.h new file mode 100644 index 000000000..7c51e346d --- /dev/null +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_rte.h @@ -0,0 +1,403 @@ +/** + * + * Agora Real Time Engagement + * Copyright (c) 2024 Agora IO. All rights reserved. + * + */ +#pragma once +#include +#include "rte_base/c/c_rte.h" +#include "rte_base/c/bridge.h" + +#include "rte_cpp_error.h" +#include "rte_cpp_callback_utils.h" +#include "rte_cpp_string.h" + +struct RteObserver; +struct RteInitialConfig; +struct RteConfig; + +namespace rte { + +class Player; + +/** + * The InitialConfig class is used to initialize the Rte object. + * @since v4.4.0 + * @technical preview + */ +class InitialConfig { + public: + InitialConfig() { RteInitialConfigInit(&c_rte_init_cfg, nullptr); } + ~InitialConfig() { RteInitialConfigDeinit(&c_rte_init_cfg, nullptr);} + + private: + friend class Rte; + ::RteInitialConfig c_rte_init_cfg; +}; + +/** + * The Observer class is used to observe the event of Rte object. + * @since v4.4.0 + * @technical preview + */ +class Observer { + public: + Observer(): c_rte_observer(::RteObserverCreate(nullptr)) { + c_rte_observer->base_observer.me_in_target_lang = this;} + ~Observer() { RteObserverDestroy(c_rte_observer, nullptr); } + + // @{ + Observer(Observer &other) = delete; + Observer(Observer &&other) = delete; + Observer &operator=(const Observer &cmd) = delete; + Observer &operator=(Observer &&cmd) = delete; + // @} + + private: + friend class Rte; + + ::RteObserver *c_rte_observer; +}; + +/** + * The Config class is used to configure the Rte object. + * @since v4.4.0 + */ +class Config { + public: + Config() {RteConfigInit(&c_rte_config, nullptr);} + ~Config() {RteConfigDeinit(&c_rte_config, nullptr);} + + // @{ + Config(Config &other) = delete; + Config(Config &&other) = delete; + Config &operator=(const Config &cmd) = delete; + Config &operator=(Config &&cmd) = delete; + // @} + + /** + * Set the App ID Parameter, which is used to initialize the engine. This field value needs to be set before calling Rte::InitMediaEngine to initialize the engine. + * If not set, the default value is an empty string. + * @since v4.4.0 + * @param app_id Your project's App ID + * @param err Possible return of the following ErrorCode + * - kRteOk: Success + * - kRteErrorInvalidArgument: Indicates that the app_id parameter is empty. + * @return void + */ + void SetAppId(const char *app_id, Error *err = nullptr){ + String str(app_id); + RteConfigSetAppId(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the App ID Parameter. + * @since v4.4.0 + * @param err Possible return of the following ErrorCode + * - kRteOk: Success + * @return std::string The AppId value + */ + std::string GetAppId(Error *err = nullptr){ + String str; + RteConfigGetAppId(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + return std::string(str.Cstr()); + } + + + /** + * Set the Log Folder Parameter + * @since v4.4.0 + * @param log_folder + * @param err + * @technical preview + */ + void SetLogFolder(const char *log_folder, Error *err = nullptr){ + String str(log_folder); + RteConfigSetLogFolder(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + + /** + * Get the Log Folder Parameter + * @since v4.4.0 + * @param err + * @return const char* + * @technical preview + */ + std::string GetLogFolder(Error *err = nullptr){ + String str; + RteConfigGetLogFolder(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + return std::string(str.Cstr()); + } + + /** + * Set the Log File Size Parameter + * @since v4.4.0 + * @param log_file_size + * @param err + * @technical preview + */ + void SetLogFileSize(size_t log_file_size, Error *err = nullptr){ + RteConfigSetLogFileSize(&c_rte_config, log_file_size, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the Log File Size Parameter + * @since v4.4.0 + * @param err + * @return size_t + * @technical preview + */ + size_t GetLogFileSize(Error *err = nullptr){ + size_t log_file_size; + RteConfigGetLogFileSize(&c_rte_config, &log_file_size, err != nullptr ? err->get_underlying_impl() : nullptr); + return log_file_size; + } + + /** + * Set the Area Code Parameter + * @since v4.4.0 + * @param area_code + * @param err + * @technical preview + */ + void SetAreaCode(int32_t area_code, Error *err = nullptr){ + RteConfigSetAreaCode(&c_rte_config, area_code, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the Area Code Parameter + * @since v4.4.0 + * @param err + * @return int32_t + * @technical preview + */ + int32_t GetAreaCode(Error *err = nullptr){ + int32_t area_code; + RteConfigGetAreaCode(&c_rte_config, &area_code, err != nullptr ? err->get_underlying_impl() : nullptr); + return area_code; + } + + /** + * Set the Cloud Proxy Parameter + * @since v4.4.0 + * @param cloud_proxy + * @param err + * @technical preview + */ + void SetCloudProxy(const char *cloud_proxy, Error *err = nullptr){ + String str(cloud_proxy); + RteConfigSetCloudProxy(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the Cloud Proxy Parameter + * @since v4.4.0 + * @param err + * @return const char* + * @technical preview + */ + std::string GetCloudProxy(Error *err = nullptr){ + String str; + RteConfigGetCloudProxy(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + return std::string(str.Cstr()); + } + + /** + * Set Json format parameters, usually used to set some private parameters supported by rte. + * @since v4.4.0 + * @param json_parameter json format parameter set + * @param err Possible return of the following ErrorCode + * - kRteOk: Success + * - kRteErrorInvalidArgument: Indicates that the json_parameter parameter is empty. + * @return void + */ + + void SetJsonParameter(const char *json_parameter, Error *err = nullptr){ + String str(json_parameter); + RteConfigSetJsonParameter(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Get the currently configured private parameters of the Rte. + * + * @since v4.4.0 + * @param err Possible return of the following error codes: + * - kRteOk: Success + * @return std::string Returns the set JSON format parameter set. + */ + std::string GetJsonParameter(Error *err = nullptr){ + String str; + RteConfigGetJsonParameter(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); + return std::string(str.Cstr()); + } + + private: + ::RteConfig* get_underlying_impl() { return &c_rte_config; } + + private: + friend class Rte; + ::RteConfig c_rte_config; +}; + +/** + * The Rte class, which is the base interface of the Agora Real Time Engagement SDK. + * @since v4.4.0 + */ +class Rte { + public: + + /** + * Create an Rte object from the rtc bridge. Used in scenarios where the rtc engine has already been initialized, + * which can save the operation of initializing the rte engine. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: Indicates that the rtc engine instance has not been created or the rtc engine has not been initialized. + * Unable to bridge rte from rtc engine. + * @return Rte object. If the Rte object is invalid, subsequent operations on Rte will return an error. + */ + static Rte GetFromBridge(Error* err = nullptr){ + Rte rte( RteGetFromBridge(err != nullptr ? err->get_underlying_impl() : nullptr)); + return rte; + } + + /** + * Construct an Rte object. + * @since v4.4.0 + * @param config Rte object initialization configuration object. + */ + explicit Rte(InitialConfig *config = nullptr): c_rte(::RteCreate(config != nullptr ? &config->c_rte_init_cfg : nullptr, nullptr)) {} + ~Rte(){Destroy();}; + + /** + * Construct a new Rte object. + * + * @param other + */ + Rte(Rte &&other) : c_rte(other.c_rte) { + other.c_rte = {}; + } + + // @{ + Rte(Rte &other) = delete; + Rte &operator=(const Rte &other) = delete; + Rte &operator=(Rte &&other) = delete; + // @} + + /** + * Register an RTE observer. + * @since v4.4.0 + * @param observer The object that observes RTE callback events. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal RTE object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The registered observer object is null. + * @return bool + * - true: Registration is successful. + * - false: Registration failed. + * @technical preview + */ + bool RegisterObserver(Observer *observer, Error *err = nullptr){ + return RteRegisterObserver(&c_rte, observer != nullptr ? observer->c_rte_observer : nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Unregister the RTE observer object. + * @since v4.4.0 + * @param observer The object that observes RTE callback events. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal RTE object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The unregistered observer object is null. + * @return bool + * - true: Unregistration is successful. + * - false: Unregistration failed. + * @technical preview + */ + bool UnregisterObserver(Observer *observer, Error *err = nullptr){ + return RteUnregisterObserver(&c_rte, observer != nullptr ? observer->c_rte_observer : nullptr, + err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Initialize the media engine. + * + * @param cb Asynchronous callback function that returns the result of engine initialization. + * - @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorDefault: Engine initialization failed, specific error reason can be obtained through Error.Message(). + * - kRteErrorInvalidOperation: Rte object created through GetFromBridge, initialization is not allowed. + * + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorDefault: Engine initialization failed, specific error description can be obtained through Error.Message(). + * - kRteErrorInvalidOperation: The corresponding internal Rte object has been destroyed or is invalid. + * @return bool Returns whether the asynchronous operation was successfully placed in the asynchronous operation queue, not whether the initialization action was successful. + * - true: Asynchronous initialization action executed normally. + * - false: Asynchronous initialization action did not execute normally. + */ + bool InitMediaEngine(std::function cb, Error *err = nullptr){ + auto* ctx = new CallbackContext(this, cb); + return RteInitMediaEngine(&c_rte, &CallbackFunc<::Rte, Rte>, ctx, err != nullptr ? err->get_underlying_impl() : nullptr); + } + +/** + * Get the configuration of Rte object. + * @since v4.4.0 + * @param config The object used to get the rte config configuration. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Rte object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The passed config object is null. + * @return bool Returns the result of getting the configuration information. + * - true: Successfully retrieved. + * - false: Failed to retrieve. + */ + bool GetConfigs(Config *config, Error *err = nullptr){ + return RteGetConfigs(&c_rte, config != nullptr ? config->get_underlying_impl(): nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Configure the Rte object. + * @since v4.4.0 + * @param config The object used to set the rte config configuration. + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Rte object has been destroyed or is invalid. + * - kRteErrorInvalidArgument: The passed config object is null. + * @return bool Returns the result of setting the configuration information. + * - true: Successfully set the configuration. + * - false: Failed to set the configuration. + */ + bool SetConfigs(Config *config, Error *err = nullptr){ + return RteSetConfigs(&c_rte, config != nullptr ? config->get_underlying_impl(): nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + /** + * Destroy the Rte object. The operation will release all resources used by the Rte object. + * @since v4.4.0 + * @param err Possible return values for ErrorCode: + * - kRteOk: Success + * - kRteErrorInvalidOperation: The corresponding internal Rte object has been destroyed or is invalid. + * @return bool Returns the result of destroying the Rte object. + * - true: Successfully destroyed. + * - false: Failed to destroy. + */ + bool Destroy(Error *err = nullptr){ + return RteDestroy(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); + } + + private: + + explicit Rte(::Rte other) { c_rte = other; } + + private: + friend class Player; + friend class Canvas; + + ::Rte c_rte; +}; + +} // namespace rte diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_stream.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_stream.h similarity index 69% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_stream.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_stream.h index 703feba05..1f646ad16 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_stream.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_stream.h @@ -5,10 +5,15 @@ * */ #pragma once -#include "internal/c/stream/stream.h" +#include "rte_base/c/stream/stream.h" namespace rte { +/** + * The Stream class is used to manage the stream. + * @since v4.4.0 + * @technical preview + */ class Stream { public: diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_string.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_string.h similarity index 89% rename from Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_string.h rename to Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_string.h index 76c3ef58d..106891d62 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_string.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_base/rte_cpp_string.h @@ -5,13 +5,18 @@ * */ #pragma once -#include "internal/c/utils/string.h" +#include "rte_base/c/utils/string.h" namespace rte { class Config; class PlayerConfig; +/** + * The String class is used to manage the string. + * @since v4.4.0 + * @technical preview + */ class String { public: diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp.h index c23be31a6..4ea178b95 100644 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp.h +++ b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp.h @@ -6,9 +6,9 @@ */ #pragma once -#include "rte_cpp_error.h" // IWYU pragma: export -#include "rte_cpp_player.h" // IWYU pragma: export -#include "rte_cpp_rte.h" // IWYU pragma: export -#include "rte_cpp_canvas.h" // IWYU pragma: export -#include "rte_cpp_string.h" // IWYU pragma: export -#include "rte_cpp_callback_utils.h" // IWYU pragma: export +#include "rte_base/rte_cpp_error.h" // IWYU pragma: export +#include "rte_base/rte_cpp_player.h" // IWYU pragma: export +#include "rte_base/rte_cpp_rte.h" // IWYU pragma: export +#include "rte_base/rte_cpp_canvas.h" // IWYU pragma: export +#include "rte_base/rte_cpp_string.h" // IWYU pragma: export +#include "rte_base/rte_cpp_callback_utils.h" // IWYU pragma: export diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_canvas.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_canvas.h deleted file mode 100644 index 020484fac..000000000 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_canvas.h +++ /dev/null @@ -1,109 +0,0 @@ -#pragma once - -#include "internal/c/c_player.h" -#include "internal/c/handle.h" -#include "internal/c/track/canvas.h" - -#include "rte_cpp_error.h" -#include "rte_cpp_rte.h" -#include "rte_cpp_callback_utils.h" - - -namespace rte { - -using VideoRenderMode = ::RteVideoRenderMode; -using VideoMirrorMode = ::RteVideoMirrorMode; -using ViewConfig = ::RteViewConfig; -using View = ::RteView; -using Rect = ::RteRect; - -class CanvasInitialConfig { - public: - CanvasInitialConfig() {RteCanvasInitialConfigInit(&c_canvas_initial_config, nullptr);} - ~CanvasInitialConfig() {RteCanvasInitialConfigDeinit(&c_canvas_initial_config, nullptr);} - - private: - friend class Canvas; - ::RteCanvasInitialConfig c_canvas_initial_config; -}; - - -class CanvasConfig { - public: - CanvasConfig() {RteCanvasConfigInit(&c_canvas_config, nullptr);} - ~CanvasConfig() {RteCanvasConfigDeinit(&c_canvas_config, nullptr);} - - void SetRenderMode(VideoRenderMode mode, Error *err) { - RteCanvasConfigSetVideoRenderMode(&c_canvas_config, mode, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - VideoRenderMode GetRenderMode(Error *err) { - VideoRenderMode mode; - RteCanvasConfigGetVideoRenderMode(&c_canvas_config, &mode, err != nullptr ? err->get_underlying_impl() : nullptr); - return mode; - } - - void SetMirrorMode(VideoMirrorMode mode, Error *err) { - RteCanvasConfigSetVideoMirrorMode(&c_canvas_config, mode, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - VideoMirrorMode GetMirrorMode(Error *err) { - VideoMirrorMode mode; - RteCanvasConfigGetVideoMirrorMode(&c_canvas_config, &mode, err != nullptr ? err->get_underlying_impl() : nullptr); - return mode; - } - - void SetCropArea(RteRect &crop_area, Error *err) { - RteCanvasConfigSetCropArea(&c_canvas_config, crop_area, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - RteRect GetCropArea(Error *err) { - RteRect crop_area; - RteCanvasConfigGetCropArea(&c_canvas_config, &crop_area, err != nullptr ? err->get_underlying_impl() : nullptr); - return crop_area; - } - - private: - friend class Canvas; - ::RteCanvasConfig c_canvas_config; -}; - -class Canvas { - public: - Canvas(Rte *rte, CanvasInitialConfig *initial_config) { - c_canvas = ::RteCanvasCreate(&rte->c_rte, &initial_config->c_canvas_initial_config, nullptr); - }; - ~Canvas() { RteCanvasDestroy(&c_canvas, nullptr); }; - - void Destroy(Error *err = nullptr) { - RteCanvasDestroy(&c_canvas, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - Canvas(const Canvas& other) = delete; - Canvas(Canvas&& other) = delete; - Canvas& operator=(const Canvas& other) = delete; - Canvas& operator=(Canvas&& other) = delete; - - void GetConfigs(CanvasConfig *config, Error *err) { - RteCanvasGetConfigs(&c_canvas, &config->c_canvas_config, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void SetConfigs(CanvasConfig *config, std::function cb, void *cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RteCanvasSetConfigs(&c_canvas, &config->c_canvas_config, &CallbackFunc<::RteCanvas, Canvas>, callbackCtx); - } - - void AddView(View *view, ViewConfig *config, std::function cb, void *cb_data) { - CallbackContextWithArgs *ctx = new CallbackContextWithArgs(this, cb, cb_data); - RteCanvasAddView(&c_canvas, view, config, &CallbackFuncWithArgs<::RteCanvas, Canvas, View*>, ctx); - } - - private: - - friend class Player; - - ::RteCanvas c_canvas; -}; - -} // namespace rte \ No newline at end of file diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_player.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_player.h deleted file mode 100644 index c19b44265..000000000 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_player.h +++ /dev/null @@ -1,443 +0,0 @@ -/** - * - * Agora Real Time Engagement - * Copyright (c) 2024 Agora IO. All rights reserved. - * - */ -#pragma once -#include - -#include "internal/c/c_rte.h" -#include "internal/c/c_player.h" - -#include "rte_cpp_error.h" -#include "rte_cpp_callback_utils.h" -#include "rte_cpp_canvas.h" -#include "rte_cpp_string.h" -#include "rte_cpp_stream.h" - -struct RtePlayerObserver; - -namespace rte { - - -using PlayerState = ::RtePlayerState; -using PlayerEvent = ::RtePlayerEvent; -using PlayerMetadataType = ::RtePlayerMetadataType; -using PlayerInfo = ::RtePlayerInfo; -using PlayerStats = ::RtePlayerStats; -using PlayerCustomSourceProvider = ::RtePlayerCustomSourceProvider; - -class PlayerInitialConfig {}; - -static void onStateChanged(::RtePlayerObserver *observer, - RtePlayerState old_state, RtePlayerState new_state, - RteError *err); - -static void onPositionChanged(::RtePlayerObserver *observer, uint64_t curr_time, - uint64_t utc_time); - -static void onResolutionChanged(::RtePlayerObserver *observer, int width, int height); - -static void onEvent(::RtePlayerObserver *observer, RtePlayerEvent event); - -static void onMetadata(::RtePlayerObserver *observer, RtePlayerMetadataType type, - const uint8_t *data, size_t length); - -static void onPlayerInfoUpdated(::RtePlayerObserver *observer, const RtePlayerInfo *info); - -static void onAudioVolumeIndication(::RtePlayerObserver *observer, int32_t volume); - - -class PlayerObserver { - public: - PlayerObserver() : c_rte_observer(::RtePlayerObserverCreate(nullptr)) { - - c_rte_observer->base_observer.me_in_target_lang = this; - - c_rte_observer->on_state_changed = rte::onStateChanged; - c_rte_observer->on_position_changed = rte::onPositionChanged; - c_rte_observer->on_resolution_changed = rte::onResolutionChanged; - c_rte_observer->on_event = rte::onEvent; - c_rte_observer->on_metadata = rte::onMetadata; - c_rte_observer->on_player_info_updated = rte::onPlayerInfoUpdated; - c_rte_observer->on_audio_volume_indication = rte::onAudioVolumeIndication; - } - virtual ~PlayerObserver(){ RtePlayerObserverDestroy(c_rte_observer, nullptr); } - - // @{ - PlayerObserver(PlayerObserver &other) = delete; - PlayerObserver(PlayerObserver &&other) = delete; - PlayerObserver &operator=(const PlayerObserver &cmd) = delete; - PlayerObserver &operator=(PlayerObserver &&cmd) = delete; - // @} - - virtual void onStateChanged(PlayerState old_state, PlayerState new_state, - rte::Error *err) = 0; - virtual void onPositionChanged(uint64_t curr_time, - uint64_t utc_time) = 0; - virtual void onResolutionChanged(int width, int height) = 0; - virtual void onEvent(PlayerEvent event) = 0; - virtual void onMetadata(PlayerMetadataType type, - const uint8_t *data, size_t length) = 0; - - virtual void onPlayerInfoUpdated(const PlayerInfo *info) = 0; - - virtual void onAudioVolumeIndication(int32_t volume) = 0; - - private: - friend class Player; - - ::RtePlayerObserver *c_rte_observer; -}; - -void onStateChanged(::RtePlayerObserver *observer, - RtePlayerState old_state, RtePlayerState new_state, - RteError *err){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - Error cpp_err(err); - player_observer->onStateChanged(old_state, new_state, &cpp_err); - } -} -void onPositionChanged(::RtePlayerObserver *observer, uint64_t curr_time, - uint64_t utc_time){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onPositionChanged(curr_time, utc_time); - } -} - -void onResolutionChanged(::RtePlayerObserver *observer, int width, int height){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onResolutionChanged(width, height); - } -} - -void onEvent(::RtePlayerObserver *observer, RtePlayerEvent event){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onEvent(event); - } - -} - -void onMetadata(::RtePlayerObserver *observer, RtePlayerMetadataType type, - const uint8_t *data, size_t length){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onMetadata(type, data, length); - } -} - -void onPlayerInfoUpdated(::RtePlayerObserver *observer, const RtePlayerInfo *info){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onPlayerInfoUpdated(info); - } -} - -void onAudioVolumeIndication(::RtePlayerObserver *observer, int32_t volume){ - auto *player_observer = static_cast(observer->base_observer.me_in_target_lang); - if (player_observer != nullptr){ - player_observer->onAudioVolumeIndication(volume); - } -} - -class PlayerConfig { - public: - PlayerConfig() { RtePlayerConfigInit(&c_rte_player_config, nullptr); } - ~PlayerConfig() { RtePlayerConfigDeinit(&c_rte_player_config, nullptr); } - - // @{ - PlayerConfig(PlayerConfig &other) = delete; - PlayerConfig(PlayerConfig &&other) = delete; - PlayerConfig &operator=(const PlayerConfig &cmd) = delete; - PlayerConfig &operator=(PlayerConfig &&cmd) = delete; - // @} - - void SetAutoPlay(bool auto_play, Error *err) { - RtePlayerConfigSetAutoPlay(&c_rte_player_config, auto_play, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool GetAutoPlay(Error *err) { - bool auto_play; - RtePlayerConfigGetAutoPlay(&c_rte_player_config, &auto_play, - err != nullptr ? err->get_underlying_impl() : nullptr); - return auto_play; - } - - void SetPlaybackSpeed(int32_t speed, Error *err) { - RtePlayerConfigSetPlaybackSpeed(&c_rte_player_config, speed, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPlaybackSpeed(Error *err) { - int32_t speed; - RtePlayerConfigGetPlaybackSpeed(&c_rte_player_config, &speed, - err != nullptr ? err->get_underlying_impl() : nullptr); - return speed; - } - - void SetPlayoutAudioTrackIdx(int idx, Error *err) { - RtePlayerConfigSetPlayoutAudioTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPlayoutAudioTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetPlayoutAudioTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetPublishAudioTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetPublishAudioTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPublishAudioTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetPublishAudioTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetAudioTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetAudioTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAudioTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetAudioTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetSubtitleTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetSubtitleTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetSubtitleTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetSubtitleTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetExternalSubtitleTrackIdx(int32_t idx, Error *err) { - RtePlayerConfigSetExternalSubtitleTrackIdx(&c_rte_player_config, idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetExternalSubtitleTrackIdx(Error *err) { - int32_t idx; - RtePlayerConfigGetExternalSubtitleTrackIdx(&c_rte_player_config, &idx, - err != nullptr ? err->get_underlying_impl() : nullptr); - return idx; - } - - void SetAudioPitch(int32_t audio_pitch, Error *err) { - RtePlayerConfigSetAudioPitch(&c_rte_player_config, audio_pitch, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAudioPitch(Error *err) { - int32_t audio_pitch; - RtePlayerConfigGetAudioPitch(&c_rte_player_config, &audio_pitch, - err != nullptr ? err->get_underlying_impl() : nullptr); - return audio_pitch; - } - - void SetPlayoutVolume(int32_t volume, Error *err) { - RtePlayerConfigSetPlayoutVolume(&c_rte_player_config, volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPlayoutVolume(Error *err) { - int32_t volume; - RtePlayerConfigGetPlayoutVolume(&c_rte_player_config, &volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - return volume; - } - - void SetAudioPlaybackDelay(int32_t delay, Error *err) { - RtePlayerConfigSetAudioPlaybackDelay(&c_rte_player_config, delay, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAudioPlaybackDelay(Error *err) { - int32_t delay; - RtePlayerConfigGetAudioPlaybackDelay(&c_rte_player_config, &delay, - err != nullptr ? err->get_underlying_impl() : nullptr); - return delay; - } - - void SetAudioDualMonoMode(RteAudioDualMonoMode mode, Error *err) { - RtePlayerConfigSetAudioDualMonoMode(&c_rte_player_config, mode, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - RteAudioDualMonoMode GetAudioDualMonoMode(Error *err) { - RteAudioDualMonoMode mode; - RtePlayerConfigGetAudioDualMonoMode(&c_rte_player_config, &mode, - err != nullptr ? err->get_underlying_impl() : nullptr); - return mode; - } - - void SetPublishVolume(int32_t volume, Error *err) { - RtePlayerConfigSetPublishVolume(&c_rte_player_config, volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetPublishVolume(Error *err) { - int32_t volume; - RtePlayerConfigGetPublishVolume(&c_rte_player_config, &volume, - err != nullptr ? err->get_underlying_impl() : nullptr); - return volume; - } - - void SetLoopCount(int32_t count, Error *err) { - RtePlayerConfigSetLoopCount(&c_rte_player_config, count, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetLoopCount(Error *err) { - int32_t count; - RtePlayerConfigGetLoopCount(&c_rte_player_config, &count, - err != nullptr ? err->get_underlying_impl() : nullptr); - return count; - } - - void SetJsonParameter(const char *json_parameter, Error *err) { - String str(json_parameter); - RtePlayerConfigSetJsonParameter(&c_rte_player_config, str.get_underlying_impl(), - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char *GetJsonParameter(Error *err) { - String str; - RtePlayerConfigGetJsonParameter(&c_rte_player_config, str.get_underlying_impl(), - err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - private: - ::RtePlayerConfig* get_underlying_impl() { return &c_rte_player_config; } - - private: - friend class Player; - - ::RtePlayerConfig c_rte_player_config; -}; - - -class Player { - public: - explicit Player(Rte *self, PlayerInitialConfig *config = nullptr) - : c_rte(::RtePlayerCreate(&self->c_rte, nullptr, nullptr)) {}; - ~Player() { RtePlayerDestroy(&c_rte, nullptr); }; - - void Destroy(Error *err = nullptr){ - RtePlayerDestroy(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); -}; - - Player(Player &other) = default; - Player(Player &&other) = default; - - // @{ - Player &operator=(const Player &cmd) = delete; - Player &operator=(Player &&cmd) = delete; - // @} - - void PreloadWithUrl(const char* url, Error* err) { - RtePlayerPreloadWithUrl(&c_rte, url, err != nullptr ? err->get_underlying_impl() : nullptr); - }; - - void OpenWithUrl(const char* url, uint64_t start_time, std::function cb, - void* cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerOpenWithUrl(&c_rte, url, start_time, &CallbackFunc<::RtePlayer, Player>, callbackCtx); - }; - - void OpenWithCustomSourceProvider(PlayerCustomSourceProvider* provider, uint64_t start_time, - std::function cb, - void* cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerOpenWithCustomSourceProvider(&c_rte, provider, start_time, &CallbackFunc<::RtePlayer, Player>, callbackCtx); - }; - - - void OpenWithStream(Stream* stream, std::function cb, - void* cb_data) { - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerOpenWithStream(&c_rte, stream != nullptr ? &stream->c_rte_stream : nullptr, &CallbackFunc<::RtePlayer, Player>, callbackCtx); - }; - - void GetStats(std::function cb, void *cb_data){ - CallbackContextWithArgs *ctx = new CallbackContextWithArgs(this, cb, cb_data); - RtePlayerGetStats(&c_rte, &CallbackFuncWithArgs<::RtePlayer, Player, rte::PlayerStats*>, ctx); - } - - void SetCanvas(Canvas *canvas, Error *err) { - RtePlayerSetCanvas(&c_rte, canvas != nullptr ? &canvas->c_canvas : nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); - }; - - void Play(Error* err) { - RtePlayerPlay(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void Stop(Error* err) { - RtePlayerStop(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void Pause(Error* err) { - RtePlayerPause(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void Seek(uint64_t new_time, Error* err) { - RtePlayerSeek(&c_rte, new_time, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void MuteAudio(bool mute, Error* err) { - RtePlayerMuteAudio(&c_rte, mute, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void MuteVideo(bool mute, Error* err) { - RtePlayerMuteVideo(&c_rte, mute, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - uint64_t GetPosition(Error *err){ - return RtePlayerGetPosition(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - void GetInfo(PlayerInfo *info, Error *err){ - RtePlayerGetInfo(&c_rte, info, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void GetConfigs(PlayerConfig* config, Error* err) { - RtePlayerGetConfigs(&c_rte, config->get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void SetConfigs(PlayerConfig* config, std::function cb, - void* cb_data) { - - rte::CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - RtePlayerSetConfigs(&c_rte, config->get_underlying_impl(), &CallbackFunc<::RtePlayer, Player>, callbackCtx); - } - - bool RegisterObserver(PlayerObserver *observer, Error *err) { - return RtePlayerRegisterObserver( - &c_rte, observer->c_rte_observer, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - void UnregisterObserver(PlayerObserver *observer, Error *err){ - RtePlayerUnregisterObserver(&c_rte, observer->c_rte_observer, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - private: - ::RtePlayer c_rte; -}; - -} // namespace rte diff --git a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_rte.h b/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_rte.h deleted file mode 100644 index 50c49395a..000000000 --- a/Android/APIExample/agora-stream-encrypt/src/main/cpp/include/agora/rte_cpp_rte.h +++ /dev/null @@ -1,218 +0,0 @@ -/** - * - * Agora Real Time Engagement - * Copyright (c) 2024 Agora IO. All rights reserved. - * - */ -#pragma once - -#include "internal/c/c_rte.h" -#include "internal/c/bridge.h" - -#include "rte_cpp_error.h" -#include "rte_cpp_callback_utils.h" -#include "rte_cpp_string.h" - - -struct RteObserver; -struct RteInitialConfig; -struct RteConfig; - -namespace rte { - -class Player; - -class RteInitialConfig { - ::RteInitialConfig *c_rte_init_cfg; -}; - -class RteObserver { - public: - RteObserver(): c_rte_observer(::RteObserverCreate(nullptr)) { - c_rte_observer->base_observer.me_in_target_lang = this;} - ~RteObserver() { RteObserverDestroy(c_rte_observer, nullptr); } - - // @{ - RteObserver(RteObserver &other) = delete; - RteObserver(RteObserver &&other) = delete; - RteObserver &operator=(const RteObserver &cmd) = delete; - RteObserver &operator=(RteObserver &&cmd) = delete; - // @} - - private: - friend class Rte; - - ::RteObserver *c_rte_observer; -}; - -class Config { - public: - Config() {RteConfigInit(&c_rte_config, nullptr);} - ~Config() {RteConfigDeinit(&c_rte_config, nullptr);} - - // @{ - Config(Config &other) = delete; - Config(Config &&other) = delete; - Config &operator=(const Config &cmd) = delete; - Config &operator=(Config &&cmd) = delete; - // @} - - void SetAppId(const char *app_id, Error *err){ - String str(app_id); - RteConfigSetAppId(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetAppId(Error *err){ - String str; - RteConfigGetAppId(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - void SetLogFolder(const char *log_folder, Error *err){ - String str(log_folder); - RteConfigSetLogFolder(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetLogFolder(Error *err){ - String str; - RteConfigGetLogFolder(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - void SetLogFileSize(size_t log_file_size, Error *err){ - RteConfigSetLogFileSize(&c_rte_config, log_file_size, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - size_t GetLogFileSize(Error *err){ - size_t log_file_size; - RteConfigGetLogFileSize(&c_rte_config, &log_file_size, err != nullptr ? err->get_underlying_impl() : nullptr); - return log_file_size; - } - - void SetAreaCode(int32_t area_code, Error *err){ - RteConfigSetAreaCode(&c_rte_config, area_code, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - int32_t GetAreaCode(Error *err){ - int32_t area_code; - RteConfigGetAreaCode(&c_rte_config, &area_code, err != nullptr ? err->get_underlying_impl() : nullptr); - return area_code; - } - - void SetCloudProxy(const char *cloud_proxy, Error *err){ - String str(cloud_proxy); - RteConfigSetCloudProxy(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetCloudProxy(Error *err){ - String str; - RteConfigGetCloudProxy(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - void SetJsonParameter(const char *json_parameter, Error *err){ - String str(json_parameter); - RteConfigSetJsonParameter(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - } - - const char* GetJsonParameter(Error *err){ - String str; - RteConfigGetJsonParameter(&c_rte_config, str.get_underlying_impl(), err != nullptr ? err->get_underlying_impl() : nullptr); - return str.Cstr(); - } - - private: - ::RteConfig* get_underlying_impl() { return &c_rte_config; } - - private: - friend class Rte; - ::RteConfig c_rte_config; -}; - -class Rte { - public: - - static Rte GetFromBridge(Error* err = nullptr){ - Rte rte( RteGetFromBridge(err != nullptr ? err->get_underlying_impl() : nullptr)); - return rte; - } - - explicit Rte(::RteInitialConfig *config = nullptr): c_rte(::RteCreate(config, nullptr)) {} - ~Rte()=default; - - void Destroy(Error *err = nullptr) { - RteDestroy(&c_rte, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool RegisterObserver(RteObserver *observer, Error *err){ - return RteRegisterObserver(&c_rte, observer->c_rte_observer, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool UnregisterObserver(RteObserver *observer, Error *err){ - return RteUnregisterObserver(&c_rte, observer->c_rte_observer, - err != nullptr ? err->get_underlying_impl() : nullptr); - } - - bool InitMediaEngine(std::function cb, void *cb_data, Error *err = nullptr){ - auto* ctx = new CallbackContext(this, cb, cb_data); - return RteInitMediaEngine(&c_rte, &CallbackFunc<::Rte, Rte>, ctx, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - Rte(Rte &other) = default; - Rte(Rte &&other) = default; - - // @{ - Rte &operator=(const Rte &cmd) = delete; - Rte &operator=(Rte &&cmd) = delete; - // @} - - void GetConfigs(Config *config, Error *err){ - RteGetConfigs(&c_rte, config != nullptr ? config->get_underlying_impl(): nullptr, err != nullptr ? err->get_underlying_impl() : nullptr); - } - bool SetConfigs(Config *config, std::function cb, void *cb_data, Error *err = nullptr){ - CallbackContext* callbackCtx = new CallbackContext(this, cb, cb_data); - return RteSetConfigs(&c_rte, config != nullptr ? config->get_underlying_impl(): nullptr, &CallbackFunc<::Rte, Rte>, callbackCtx, err != nullptr ? err->get_underlying_impl() : nullptr); - } - - private: - - explicit Rte(::Rte other) { c_rte = other; } - - private: - friend class Player; - friend class Canvas; - - ::Rte c_rte; - -// struct RteInitMediaEngineCtx { -// RteInitMediaEngineCtx(InitMediaEngineCb cb, void *cb_data) -// : cb(cb), cb_data(cb_data) {} - -// ~RteInitMediaEngineCtx() = default; - -// // @{ -// RteInitMediaEngineCtx(RteInitMediaEngineCtx &other) = delete; -// RteInitMediaEngineCtx(RteInitMediaEngineCtx &&other) = delete; -// RteInitMediaEngineCtx &operator=(const RteInitMediaEngineCtx &cmd) = delete; -// RteInitMediaEngineCtx &operator=(RteInitMediaEngineCtx &&cmd) = delete; -// // @} - -// InitMediaEngineCb cb; -// void *cb_data; -// }; - -// static void RteInitMediaEngineCtxProxy(::Rte *self, void *cb_data, -// ::RteError *err){ -// auto *ctx = static_cast(cb_data); - -// Rte rte; -// rte.c_rte = *self; - -// Error cpp_err(err); -// ctx->cb(&rte, ctx->cb_data, &cpp_err); - -// delete ctx; -// } -}; - -} // namespace rte diff --git a/Android/APIExample/app/.gitignore b/Android/APIExample/app/.gitignore index e881db1f4..90f560fd2 100644 --- a/Android/APIExample/app/.gitignore +++ b/Android/APIExample/app/.gitignore @@ -24,4 +24,5 @@ src/main/assets/beauty_faceunity src/main/assets/beauty_sensetime !src/main/assets/beauty_bytedance/PLACEHOLDER !src/main/assets/beauty_faceunity/PLACEHOLDER -!src/main/assets/beauty_sensetime/PLACEHOLDER \ No newline at end of file +!src/main/assets/beauty_sensetime/PLACEHOLDER +libs \ No newline at end of file