Skip to content

Commit

Permalink
Voice to Content: refactor processing and error state handling, and e…
Browse files Browse the repository at this point in the history
…nsure proper error handling (#36001)

* Move processing state out of the recording hook

* Keep considering processing a state, but a trancription state

* changelog

* Save state to const so it's simpler to reuse

* Map error codes to translated error messages
  • Loading branch information
lhkowalski authored Feb 28, 2024
1 parent 002ed26 commit d7242d2
Show file tree
Hide file tree
Showing 7 changed files with 110 additions and 47 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
Significance: patch
Type: changed

AI Client: change loading and error state handling on media recording hook.
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
* External dependencies
*/
import { useCallback, useState, useRef } from '@wordpress/element';
import { __ } from '@wordpress/i18n';
import debugFactory from 'debug';
/**
* Internal dependencies
Expand Down Expand Up @@ -30,6 +31,63 @@ export type UseAudioTranscriptionProps = {
onError?: ( error: string ) => void;
};

/**
* The error response from the audio transcription service.
*/
type AudioTranscriptionErrorResponse = {
/**
* The error message.
*/
message: string;

/**
* The error code.
*/
code: string;
};

/**
* Map error response to a string.
* @param {Error | string | AudioTranscriptionErrorResponse} error - The error response from the audio transcription service.
* @returns {string} the translated error message
*/
const mapErrorResponse = ( error: Error | string | AudioTranscriptionErrorResponse ): string => {
if ( typeof error === 'string' ) {
return error;
}

if ( 'code' in error ) {
switch ( error.code ) {
case 'error_quota_exceeded':
return __(
'You exceeded your current quota, please check your plan details.',
'jetpack-ai-client'
);
case 'jetpack_ai_missing_audio_param':
return __( 'The audio_file is required to perform a transcription.', 'jetpack-ai-client' );
case 'jetpack_ai_service_unavailable':
return __( 'The Jetpack AI service is temporarily unavailable.', 'jetpack-ai-client' );
case 'file_size_not_supported':
return __( 'The provided audio file is too big.', 'jetpack-ai-client' );
case 'file_type_not_supported':
return __( 'The provided audio file type is not supported.', 'jetpack-ai-client' );
case 'jetpack_ai_error':
return __(
'There was an error processing the transcription request.',
'jetpack-ai-client'
);
default:
return error.message;
}
}

if ( 'message' in error ) {
return error.message;
}

return __( 'There was an error processing the transcription request.', 'jetpack-ai-client' );
};

/**
* A hook to handle audio transcription.
*
Expand Down Expand Up @@ -74,7 +132,7 @@ export default function useAudioTranscription( {
.catch( error => {
if ( ! controller.signal.aborted ) {
setTranscriptionError( error.message );
onError?.( error.message );
onError?.( mapErrorResponse( error ) );
}
} )
.finally( () => setIsTranscribingAudio( false ) );
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import { useRef, useState, useEffect, useCallback } from '@wordpress/element';
/*
* Types
*/
export type RecordingState = 'inactive' | 'recording' | 'paused' | 'processing' | 'error';
export type RecordingState = 'inactive' | 'recording' | 'paused' | 'error';
type UseMediaRecordingProps = {
onDone?: ( blob: Blob ) => void;
};
Expand Down Expand Up @@ -41,11 +41,6 @@ type UseMediaRecordingReturn = {
*/
onError: ( err: string | Error ) => void;

/**
* The processing handler
*/
onProcessing: () => void;

controls: {
/**
* `start` recording handler
Expand Down Expand Up @@ -90,7 +85,7 @@ export default function useMediaRecording( {
// Reference to the media recorder instance
const mediaRecordRef = useRef( null );

// Recording state: `inactive`, `recording`, `paused`, `processing`, `error`
// Recording state: `inactive`, `recording`, `paused`, `error`
const [ state, setState ] = useState< RecordingState >( 'inactive' );

// reference to the paused state to be used in the `onDataAvailable` event listener,
Expand Down Expand Up @@ -239,11 +234,6 @@ export default function useMediaRecording( {
setState( 'error' );
}, [] );

// manually set the state to `processing` for the file upload case
const onProcessing = useCallback( () => {
setState( 'processing' );
}, [] );

/**
* `start` event listener for the media recorder instance.
*/
Expand All @@ -258,7 +248,6 @@ export default function useMediaRecording( {
* @returns {void}
*/
function onStopListener(): void {
setState( 'processing' );
const lastBlob = getBlob();
onDone?.( lastBlob );

Expand Down Expand Up @@ -326,7 +315,6 @@ export default function useMediaRecording( {
duration,
analyser: analyser.current,
onError,
onProcessing,

controls: {
start,
Expand Down
5 changes: 5 additions & 0 deletions projects/js-packages/ai-client/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,11 @@ export type { RecordingState } from './hooks/use-media-recording/index.js';
*/
export type CancelablePromise< T = void > = Promise< T > & { canceled?: boolean };

/*
* Transcription types
*/
export type TranscriptionState = RecordingState | 'processing' | 'error';

// Connection initial state
// @todo: it should be provided by the connection package
interface JPConnectionInitialState {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
Significance: patch
Type: other

Voice to Content: change the way the processing and error states are handled, and ensure proper error handling.
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,15 @@ import Oscilloscope from './oscilloscope';
/**
* Types
*/
import type { RecordingState } from '@automattic/jetpack-ai-client';
import type { TranscriptionState } from '@automattic/jetpack-ai-client';

export default function AudioStatusPanel( {
state,
error = null,
analyser,
duration = 0,
}: {
state: RecordingState;
state: TranscriptionState;
error: string;
analyser: AnalyserNode;
duration: number;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,30 +42,31 @@ export default function VoiceToContentEdit( { clientId } ) {

const { upsertTranscription } = useTranscriptionInserter();

const { processTranscription, cancelTranscriptionProcessing } = useTranscriptionPostProcessing( {
feature: 'voice-to-content',
onReady: postProcessingResult => {
// Insert the content into the editor
upsertTranscription( postProcessingResult );
handleClose();
},
onError: error => {
// Use the transcription instead for a partial result
if ( transcription ) {
dispatch.insertBlock( createBlock( 'core/paragraph', { content: transcription } ) );
}
// eslint-disable-next-line no-console
console.log( 'Post-processing error: ', error );
handleClose();
},
onUpdate: currentPostProcessingResult => {
/*
* We can upsert partial results because the hook takes care of replacing
* the previous result with the new one.
*/
upsertTranscription( currentPostProcessingResult );
},
} );
const { processTranscription, cancelTranscriptionProcessing, isProcessingTranscription } =
useTranscriptionPostProcessing( {
feature: 'voice-to-content',
onReady: postProcessingResult => {
// Insert the content into the editor
upsertTranscription( postProcessingResult );
handleClose();
},
onError: error => {
// Use the transcription instead for a partial result
if ( transcription ) {
dispatch.insertBlock( createBlock( 'core/paragraph', { content: transcription } ) );
}
// eslint-disable-next-line no-console
console.log( 'Post-processing error: ', error );
handleClose();
},
onUpdate: currentPostProcessingResult => {
/*
* We can upsert partial results because the hook takes care of replacing
* the previous result with the new one.
*/
upsertTranscription( currentPostProcessingResult );
},
} );

const onTranscriptionReady = ( content: string ) => {
// eslint-disable-next-line no-console
Expand All @@ -78,14 +79,16 @@ export default function VoiceToContentEdit( { clientId } ) {
onError( error );
};

const { transcribeAudio, cancelTranscription }: UseAudioTranscriptionReturn =
const { transcribeAudio, cancelTranscription, isTranscribingAudio }: UseAudioTranscriptionReturn =
useAudioTranscription( {
feature: 'voice-to-content',
onReady: onTranscriptionReady,
onError: onTranscriptionError,
} );

const { state, controls, error, onError, onProcessing, duration, analyser } = useMediaRecording( {
const isCreatingTranscription = isTranscribingAudio || isProcessingTranscription;

const { state, controls, error, onError, duration, analyser } = useMediaRecording( {
onDone: lastBlob => {
// When recording is done, set the audio to be transcribed
onAudioHandler( lastBlob );
Expand All @@ -95,11 +98,10 @@ export default function VoiceToContentEdit( { clientId } ) {
const onAudioHandler = useCallback(
( audio: Blob ) => {
if ( audio ) {
onProcessing();
transcribeAudio( audio );
}
},
[ transcribeAudio, onProcessing ]
[ transcribeAudio ]
);

// Destructure controls
Expand Down Expand Up @@ -146,6 +148,8 @@ export default function VoiceToContentEdit( { clientId } ) {
// To avoid a wrong TS warning
const iconProps = { className: 'icon' };

const transcriptionState = isCreatingTranscription ? 'processing' : state;

return (
<Modal
onRequestClose={ handleClose }
Expand All @@ -163,14 +167,14 @@ export default function VoiceToContentEdit( { clientId } ) {
</span>
<div className="jetpack-ai-voice-to-content__contextual-row">
<AudioStatusPanel
state={ state }
state={ transcriptionState }
error={ error }
duration={ duration }
analyser={ analyser }
/>
</div>
<ActionButtons
state={ state }
state={ transcriptionState }
onUpload={ onUploadHandler }
onCancel={ onCancelHandler }
onRecord={ onRecordHandler }
Expand Down

0 comments on commit d7242d2

Please sign in to comment.