Skip to content

Commit

Permalink
[AUTO] Generate comments by iris-doc (#1185)
Browse files Browse the repository at this point in the history
Co-authored-by: Nero-Hu <[email protected]>
  • Loading branch information
sda-rob and Nero-Hu authored Apr 23, 2024
1 parent 1aaa454 commit 6236d44
Show file tree
Hide file tree
Showing 5 changed files with 111 additions and 2,109 deletions.
20 changes: 12 additions & 8 deletions ts/Private/AgoraBase.ts
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ export enum ErrorCodeType {
*/
ErrInvalidUserId = 121,
/**
* @ignore
* 122: Data streams decryption fails. The user might use an incorrect password to join the channel. Check the entered password, or tell the user to try rejoining the channel.
*/
ErrDatastreamDecryptionFailed = 122,
/**
Expand Down Expand Up @@ -2208,7 +2208,7 @@ export enum LocalVideoStreamReason {
*/
LocalVideoStreamReasonScreenCaptureWindowRecoverFromHidden = 26,
/**
* @ignore
* 27: The window for screen capture has been restored from the minimized state.
*/
LocalVideoStreamReasonScreenCaptureWindowRecoverFromMinimized = 27,
/**
Expand Down Expand Up @@ -3867,7 +3867,7 @@ export enum AudioEffectPreset {
*/
RoomAcousticsVirtualSurroundSound = 0x02010900,
/**
* @ignore
* The audio effect of chorus. Agora recommends using this effect in chorus scenarios to enhance the sense of depth and dimension in the vocals.
*/
RoomAcousticsChorus = 0x02010d00,
/**
Expand Down Expand Up @@ -4005,9 +4005,13 @@ export enum HeadphoneEqualizerPreset {
*/
export class ScreenCaptureParameters {
/**
* The video encoding resolution of the shared screen stream. See VideoDimensions. The default value is 1920 × 1080, that is, 2,073,600 pixels. Agora uses the value of this parameter to calculate the charges. If the screen dimensions are different from the value of this parameter, Agora applies the following strategies for encoding. Suppose is set to 1920 × 1080:
* The video encoding resolution of the screen sharing stream. See VideoDimensions. The default value is 1920 × 1080, that is, 2,073,600 pixels. Agora uses the value of this parameter to calculate the charges. If the screen dimensions are different from the value of this parameter, Agora applies the following strategies for encoding. Suppose dimensions is set to 1920 × 1080:
* If the value of the screen dimensions is lower than that of dimensions, for example, 1000 × 1000 pixels, the SDK uses the screen dimensions, that is, 1000 × 1000 pixels, for encoding.
* If the value of the screen dimensions is higher than that of dimensions, for example, 2000 × 1500, the SDK uses the maximum value under with the aspect ratio of the screen dimension (4:3) for encoding, that is, 1440 × 1080.
* If the value of the screen dimensions is higher than that of dimensions, for example, 2000 × 1500, the SDK uses the maximum value under dimensions with the aspect ratio of the screen dimension (4:3) for encoding, that is, 1440 × 1080. When setting the encoding resolution in the scenario of sharing documents (ScreenScenarioDocument), choose one of the following two methods:
* If you require the best image quality, it is recommended to set the encoding resolution to be the same as the capture resolution.
* If you wish to achieve a relative balance between image quality, bandwidth, and system performance, then:
* When the capture resolution is greater than 1920 × 1080, it is recommended that the encoding resolution is not less than 1920 × 1080.
* When the capture resolution is less than 1920 × 1080, it is recommended that the encoding resolution is not less than 1280 × 720.
*/
dimensions?: VideoDimensions;
/**
Expand Down Expand Up @@ -4547,11 +4551,11 @@ export enum EncryptionErrorType {
*/
EncryptionErrorEncryptionFailure = 2,
/**
* @ignore
* 3: Data stream decryption error. Ensure that the receiver and the sender use the same encryption mode and key.
*/
EncryptionErrorDatastreamDecryptionFailure = 3,
/**
* @ignore
* 4: Data stream encryption error.
*/
EncryptionErrorDatastreamEncryptionFailure = 4,
}
Expand Down Expand Up @@ -4711,7 +4715,7 @@ export enum EarMonitoringFilterType {
*/
EarMonitoringFilterNoiseSuppression = 1 << 2,
/**
* @ignore
* 1<<15: Reuse the audio filter that has been processed on the sending end for in-ear monitoring. This enumerator reduces CPU usage while increasing in-ear monitoring latency, which is suitable for latency-tolerant scenarios requiring low CPU consumption.
*/
EarMonitoringFilterReusePostProcessingFilter = 1 << 15,
}
Expand Down
46 changes: 41 additions & 5 deletions ts/Private/AgoraMediaBase.ts
Original file line number Diff line number Diff line change
Expand Up @@ -951,6 +951,8 @@ export interface IAudioFrameObserver extends IAudioFrameObserverBase {
/**
* Retrieves the audio frame of a specified user before mixing.
*
* Due to framework limitations, this callback does not support sending processed audio data back to the SDK.
*
* @param channelId The channel ID.
* @param uid The user ID of the specified user.
* @param audioFrame The raw audio data. See AudioFrame.
Expand Down Expand Up @@ -1060,9 +1062,7 @@ export interface IVideoFrameObserver {
/**
* Occurs each time the SDK receives a video frame captured by local devices.
*
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data captured by local devices. You can then pre-process the data according to your scenarios. Once the pre-processing is complete, you can directly modify videoFrame in this callback, and set the return value to true to send the modified video data to the SDK.
* The video data that this callback gets has not been pre-processed such as watermarking, cropping, and rotating.
* If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel.
* You can get raw video data collected by the local device through this callback.
*
* @param sourceType Video source types, including cameras, screens, or media player. See VideoSourceType.
* @param videoFrame The video frame. See VideoFrame. The default value of the video frame data format obtained through this callback is as follows:
Expand All @@ -1078,6 +1078,7 @@ export interface IVideoFrameObserver {
* Occurs each time the SDK receives a video frame before encoding.
*
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data before encoding and then process the data according to your particular scenarios. After processing, you can send the processed video data back to the SDK in this callback.
* Due to framework limitations, this callback does not support sending processed video data back to the SDK.
* The video data that this callback gets has been preprocessed, with its content cropped and rotated, and the image enhanced.
*
* @param sourceType The type of the video source. See VideoSourceType.
Expand All @@ -1100,6 +1101,7 @@ export interface IVideoFrameObserver {
*
* After you successfully register the video frame observer, the SDK triggers this callback each time it receives a video frame. In this callback, you can get the video data sent from the remote end before rendering, and then process it according to the particular scenarios.
* If the video data type you get is RGBA, the SDK does not support processing the data of the alpha channel.
* Due to framework limitations, this callback does not support sending processed video data back to the SDK.
*
* @param channelId The channel ID.
* @param remoteUid The user ID of the remote user who sends the current video frame.
Expand Down Expand Up @@ -1232,11 +1234,45 @@ export class MediaRecorderConfiguration {
}

/**
* @ignore
* Facial information observer.
*
* You can call registerFaceInfoObserver to register or unregister the IFaceInfoObserver object.
*/
export interface IFaceInfoObserver {
/**
* @ignore
* Occurs when the facial information processed by speech driven extension is received.
*
* @param outFaceInfo Output parameter, the JSON string of the facial information processed by the voice driver plugin, including the following fields:
* faces: Object sequence. The collection of facial information, with each face corresponding to an object.
* blendshapes: Object. The collection of face capture coefficients, named according to ARkit standards, with each key-value pair representing a blendshape coefficient. The blendshape coefficient is a floating point number with a range of [0.0, 1.0].
* rotation: Object sequence. The rotation of the head, which includes the following three key-value pairs, with values as floating point numbers ranging from -180.0 to 180.0:
* pitch: Head pitch angle. A positve value means looking down, while a negative value means looking up.
* yaw: Head yaw angle. A positve value means turning left, while a negative value means turning right.
* roll: Head roll angle. A positve value means tilting to the right, while a negative value means tilting to the left.
* timestamp: String. The timestamp of the output result, in milliseconds. Here is an example of JSON:
* {
* "faces":[{
* "blendshapes":{
* "eyeBlinkLeft":0.9, "eyeLookDownLeft":0.0, "eyeLookInLeft":0.0, "eyeLookOutLeft":0.0, "eyeLookUpLeft":0.0,
* "eyeSquintLeft":0.0, "eyeWideLeft":0.0, "eyeBlinkRight":0.0, "eyeLookDownRight":0.0, "eyeLookInRight":0.0,
* "eyeLookOutRight":0.0, "eyeLookUpRight":0.0, "eyeSquintRight":0.0, "eyeWideRight":0.0, "jawForward":0.0,
* "jawLeft":0.0, "jawRight":0.0, "jawOpen":0.0, "mouthClose":0.0, "mouthFunnel":0.0, "mouthPucker":0.0,
* "mouthLeft":0.0, "mouthRight":0.0, "mouthSmileLeft":0.0, "mouthSmileRight":0.0, "mouthFrownLeft":0.0,
* "mouthFrownRight":0.0, "mouthDimpleLeft":0.0, "mouthDimpleRight":0.0, "mouthStretchLeft":0.0, "mouthStretchRight":0.0,
* "mouthRollLower":0.0, "mouthRollUpper":0.0, "mouthShrugLower":0.0, "mouthShrugUpper":0.0, "mouthPressLeft":0.0,
* "mouthPressRight":0.0, "mouthLowerDownLeft":0.0, "mouthLowerDownRight":0.0, "mouthUpperUpLeft":0.0, "mouthUpperUpRight":0.0,
* "browDownLeft":0.0, "browDownRight":0.0, "browInnerUp":0.0, "browOuterUpLeft":0.0, "browOuterUpRight":0.0,
* "cheekPuff":0.0, "cheekSquintLeft":0.0, "cheekSquintRight":0.0, "noseSneerLeft":0.0, "noseSneerRight":0.0,
* "tongueOut":0.0
* },
* "rotation":{"pitch":30.0, "yaw":25.5, "roll":-15.5},
*
* }],
* "timestamp":"654879876546"
* }
*
* @returns
* true : Facial information JSON parsing successful. false : Facial information JSON parsing failed.
*/
onFaceInfo?(outFaceInfo: string): void;
}
Expand Down
20 changes: 18 additions & 2 deletions ts/Private/IAgoraMediaEngine.ts
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,17 @@ export abstract class IMediaEngine {
): number;

/**
* @ignore
* Registers a facial information observer.
*
* You can call this method to register the onFaceInfo callback to receive the facial information processed by Agora speech driven extension. When calling this method to register a facial information observer, you can register callbacks in the IFaceInfoObserver class as needed. After successfully registering the facial information observer, the SDK triggers the callback you have registered when it captures the facial information converted by the speech driven extension.
* Ensure that you call this method before joining a channel.
* Before calling this method, you need to make sure that the speech driven extension has been enabled by calling enableExtension.
*
* @param observer Facial information observer, see IFaceInfoObserver.
*
* @returns
* 0: Success.
* < 0: Failure.
*/
abstract registerFaceInfoObserver(observer: IFaceInfoObserver): number;

Expand Down Expand Up @@ -313,7 +323,13 @@ export abstract class IMediaEngine {
): number;

/**
* @ignore
* Unregisters a facial information observer.
*
* @param observer Facial information observer, see IFaceInfoObserver.
*
* @returns
* 0: Success.
* < 0: Failure.
*/
abstract unregisterFaceInfoObserver(observer: IFaceInfoObserver): number;
}
Loading

0 comments on commit 6236d44

Please sign in to comment.