# This file was auto-generated by Fern from our API Definition.

import typing
from ..core.client_wrapper import SyncClientWrapper
from ..types.output_format import OutputFormat
from ..types.voice_settings import VoiceSettings
from ..types.pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator
from .types.body_text_to_speech_v_1_text_to_speech_voice_id_post_apply_text_normalization import (
    BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization,
)
from ..core.request_options import RequestOptions
from ..core.jsonable_encoder import jsonable_encoder
from ..core.serialization import convert_and_respect_annotation_metadata
from ..errors.unprocessable_entity_error import UnprocessableEntityError
from ..types.http_validation_error import HttpValidationError
from ..core.unchecked_base_model import construct_type
from json.decoder import JSONDecodeError
from ..core.api_error import ApiError
from .types.body_text_to_speech_with_timestamps_v_1_text_to_speech_voice_id_with_timestamps_post_apply_text_normalization import (
    BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization,
)
from ..types.audio_with_timestamps_response_model import AudioWithTimestampsResponseModel
from .types.body_text_to_speech_streaming_v_1_text_to_speech_voice_id_stream_post_apply_text_normalization import (
    BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization,
)
from .types.body_text_to_speech_streaming_with_timestamps_v_1_text_to_speech_voice_id_stream_with_timestamps_post_apply_text_normalization import (
    BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization,
)
from ..types.streaming_audio_chunk_with_timestamps_response_model import StreamingAudioChunkWithTimestampsResponseModel
import json
from ..core.client_wrapper import AsyncClientWrapper

# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class TextToSpeechClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._client_wrapper = client_wrapper

    def convert(
        self,
        voice_id: str,
        *,
        text: str,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[OutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[VoiceSettings] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        previous_text: typing.Optional[str] = OMIT,
        next_text: typing.Optional[str] = OMIT,
        previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        use_pvc_as_ivc: typing.Optional[bool] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[bytes]:
        """
        Converts text into speech using a voice of your choice and returns audio.

        Parameters
        ----------
        voice_id : str
            Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.

        text : str
            The text that will get converted into speech.

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[OutputFormat]
            The output format of the generated audio.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.

        voice_settings : typing.Optional[VoiceSettings]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        previous_text : typing.Optional[str]
            The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        next_text : typing.Optional[str]
            The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        previous_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.

        next_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.

        use_pvc_as_ivc : typing.Optional[bool]
            If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.

        apply_text_normalization : typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Yields
        ------
        typing.Iterator[bytes]
            The generated audio file

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.text_to_speech.convert(
            voice_id="JBFqnCBsd6RMkjVDRZzb",
            output_format="mp3_44100_128",
            text="The first move is what sets everything in motion.",
            model_id="eleven_multilingual_v2",
        )
        """
        with self._client_wrapper.httpx_client.stream(
            f"v1/text-to-speech/{jsonable_encoder(voice_id)}",
            method="POST",
            params={
                "enable_logging": enable_logging,
                "optimize_streaming_latency": optimize_streaming_latency,
                "output_format": output_format,
            },
            json={
                "text": text,
                "model_id": model_id,
                "language_code": language_code,
                "voice_settings": convert_and_respect_annotation_metadata(
                    object_=voice_settings, annotation=VoiceSettings, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "previous_text": previous_text,
                "next_text": next_text,
                "previous_request_ids": previous_request_ids,
                "next_request_ids": next_request_ids,
                "use_pvc_as_ivc": use_pvc_as_ivc,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:
            try:
                if 200 <= _response.status_code < 300:
                    _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                    for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
                        yield _chunk
                    return
                _response.read()
                if _response.status_code == 422:
                    raise UnprocessableEntityError(
                        typing.cast(
                            HttpValidationError,
                            construct_type(
                                type_=HttpValidationError,  # type: ignore
                                object_=_response.json(),
                            ),
                        )
                    )
                _response_json = _response.json()
            except JSONDecodeError:
                raise ApiError(status_code=_response.status_code, body=_response.text)
            raise ApiError(status_code=_response.status_code, body=_response_json)

    def convert_with_timestamps(
        self,
        voice_id: str,
        *,
        text: str,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[OutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[VoiceSettings] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        previous_text: typing.Optional[str] = OMIT,
        next_text: typing.Optional[str] = OMIT,
        previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        use_pvc_as_ivc: typing.Optional[bool] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> AudioWithTimestampsResponseModel:
        """
        Generate speech from text with precise character-level timing information for audio-text synchronization.

        Parameters
        ----------
        voice_id : str
            Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.

        text : str
            The text that will get converted into speech.

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[OutputFormat]
            The output format of the generated audio.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.

        voice_settings : typing.Optional[VoiceSettings]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        previous_text : typing.Optional[str]
            The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        next_text : typing.Optional[str]
            The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        previous_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.

        next_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.

        use_pvc_as_ivc : typing.Optional[bool]
            If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.

        apply_text_normalization : typing.Optional[BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AudioWithTimestampsResponseModel
            Successful Response

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.text_to_speech.convert_with_timestamps(
            voice_id="21m00Tcm4TlvDq8ikWAM",
            text="This is a test for the API of ElevenLabs.",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            f"v1/text-to-speech/{jsonable_encoder(voice_id)}/with-timestamps",
            method="POST",
            params={
                "enable_logging": enable_logging,
                "optimize_streaming_latency": optimize_streaming_latency,
                "output_format": output_format,
            },
            json={
                "text": text,
                "model_id": model_id,
                "language_code": language_code,
                "voice_settings": convert_and_respect_annotation_metadata(
                    object_=voice_settings, annotation=VoiceSettings, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "previous_text": previous_text,
                "next_text": next_text,
                "previous_request_ids": previous_request_ids,
                "next_request_ids": next_request_ids,
                "use_pvc_as_ivc": use_pvc_as_ivc,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                return typing.cast(
                    AudioWithTimestampsResponseModel,
                    construct_type(
                        type_=AudioWithTimestampsResponseModel,  # type: ignore
                        object_=_response.json(),
                    ),
                )
            if _response.status_code == 422:
                raise UnprocessableEntityError(
                    typing.cast(
                        HttpValidationError,
                        construct_type(
                            type_=HttpValidationError,  # type: ignore
                            object_=_response.json(),
                        ),
                    )
                )
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def convert_as_stream(
        self,
        voice_id: str,
        *,
        text: str,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[OutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[VoiceSettings] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        previous_text: typing.Optional[str] = OMIT,
        next_text: typing.Optional[str] = OMIT,
        previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        use_pvc_as_ivc: typing.Optional[bool] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[bytes]:
        """
        Converts text into speech using a voice of your choice and returns audio as an audio stream.

        Parameters
        ----------
        voice_id : str
            Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.

        text : str
            The text that will get converted into speech.

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[OutputFormat]
            The output format of the generated audio.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.

        voice_settings : typing.Optional[VoiceSettings]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        previous_text : typing.Optional[str]
            The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        next_text : typing.Optional[str]
            The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        previous_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.

        next_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.

        use_pvc_as_ivc : typing.Optional[bool]
            If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.

        apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Yields
        ------
        typing.Iterator[bytes]
            Streaming audio data

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        client.text_to_speech.convert_as_stream(
            voice_id="JBFqnCBsd6RMkjVDRZzb",
            output_format="mp3_44100_128",
            text="The first move is what sets everything in motion.",
            model_id="eleven_multilingual_v2",
        )
        """
        with self._client_wrapper.httpx_client.stream(
            f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream",
            method="POST",
            params={
                "enable_logging": enable_logging,
                "optimize_streaming_latency": optimize_streaming_latency,
                "output_format": output_format,
            },
            json={
                "text": text,
                "model_id": model_id,
                "language_code": language_code,
                "voice_settings": convert_and_respect_annotation_metadata(
                    object_=voice_settings, annotation=VoiceSettings, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "previous_text": previous_text,
                "next_text": next_text,
                "previous_request_ids": previous_request_ids,
                "next_request_ids": next_request_ids,
                "use_pvc_as_ivc": use_pvc_as_ivc,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:
            try:
                if 200 <= _response.status_code < 300:
                    _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                    for _chunk in _response.iter_bytes(chunk_size=_chunk_size):
                        yield _chunk
                    return
                _response.read()
                if _response.status_code == 422:
                    raise UnprocessableEntityError(
                        typing.cast(
                            HttpValidationError,
                            construct_type(
                                type_=HttpValidationError,  # type: ignore
                                object_=_response.json(),
                            ),
                        )
                    )
                _response_json = _response.json()
            except JSONDecodeError:
                raise ApiError(status_code=_response.status_code, body=_response.text)
            raise ApiError(status_code=_response.status_code, body=_response_json)

    def stream_with_timestamps(
        self,
        voice_id: str,
        *,
        text: str,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[OutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[VoiceSettings] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        previous_text: typing.Optional[str] = OMIT,
        next_text: typing.Optional[str] = OMIT,
        previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        use_pvc_as_ivc: typing.Optional[bool] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.Iterator[StreamingAudioChunkWithTimestampsResponseModel]:
        """
        Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken.

        Parameters
        ----------
        voice_id : str
            Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.

        text : str
            The text that will get converted into speech.

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[OutputFormat]
            The output format of the generated audio.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.

        voice_settings : typing.Optional[VoiceSettings]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        previous_text : typing.Optional[str]
            The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        next_text : typing.Optional[str]
            The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        previous_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.

        next_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.

        use_pvc_as_ivc : typing.Optional[bool]
            If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.

        apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Yields
        ------
        typing.Iterator[StreamingAudioChunkWithTimestampsResponseModel]
            Stream of JSON objects containing audio chunks and character timing information

        Examples
        --------
        from elevenlabs import ElevenLabs

        client = ElevenLabs(
            api_key="YOUR_API_KEY",
        )
        response = client.text_to_speech.stream_with_timestamps(
            voice_id="JBFqnCBsd6RMkjVDRZzb",
            output_format="mp3_44100_128",
            text="The first move is what sets everything in motion.",
            model_id="eleven_multilingual_v2",
        )
        for chunk in response:
            yield chunk
        """
        with self._client_wrapper.httpx_client.stream(
            f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream/with-timestamps",
            method="POST",
            params={
                "enable_logging": enable_logging,
                "optimize_streaming_latency": optimize_streaming_latency,
                "output_format": output_format,
            },
            json={
                "text": text,
                "model_id": model_id,
                "language_code": language_code,
                "voice_settings": convert_and_respect_annotation_metadata(
                    object_=voice_settings, annotation=VoiceSettings, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "previous_text": previous_text,
                "next_text": next_text,
                "previous_request_ids": previous_request_ids,
                "next_request_ids": next_request_ids,
                "use_pvc_as_ivc": use_pvc_as_ivc,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:
            try:
                if 200 <= _response.status_code < 300:
                    for _text in _response.iter_lines():
                        try:
                            if len(_text) == 0:
                                continue
                            yield typing.cast(
                                StreamingAudioChunkWithTimestampsResponseModel,
                                construct_type(
                                    type_=StreamingAudioChunkWithTimestampsResponseModel,  # type: ignore
                                    object_=json.loads(_text),
                                ),
                            )
                        except:
                            pass
                    return
                _response.read()
                if _response.status_code == 422:
                    raise UnprocessableEntityError(
                        typing.cast(
                            HttpValidationError,
                            construct_type(
                                type_=HttpValidationError,  # type: ignore
                                object_=_response.json(),
                            ),
                        )
                    )
                _response_json = _response.json()
            except JSONDecodeError:
                raise ApiError(status_code=_response.status_code, body=_response.text)
            raise ApiError(status_code=_response.status_code, body=_response_json)


class AsyncTextToSpeechClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._client_wrapper = client_wrapper

    async def convert(
        self,
        voice_id: str,
        *,
        text: str,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[OutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[VoiceSettings] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        previous_text: typing.Optional[str] = OMIT,
        next_text: typing.Optional[str] = OMIT,
        previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        use_pvc_as_ivc: typing.Optional[bool] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[bytes]:
        """
        Converts text into speech using a voice of your choice and returns audio.

        Parameters
        ----------
        voice_id : str
            Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.

        text : str
            The text that will get converted into speech.

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[OutputFormat]
            The output format of the generated audio.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.

        voice_settings : typing.Optional[VoiceSettings]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        previous_text : typing.Optional[str]
            The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        next_text : typing.Optional[str]
            The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        previous_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.

        next_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.

        use_pvc_as_ivc : typing.Optional[bool]
            If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.

        apply_text_normalization : typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Yields
        ------
        typing.AsyncIterator[bytes]
            The generated audio file

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.text_to_speech.convert(
                voice_id="JBFqnCBsd6RMkjVDRZzb",
                output_format="mp3_44100_128",
                text="The first move is what sets everything in motion.",
                model_id="eleven_multilingual_v2",
            )


        asyncio.run(main())
        """
        async with self._client_wrapper.httpx_client.stream(
            f"v1/text-to-speech/{jsonable_encoder(voice_id)}",
            method="POST",
            params={
                "enable_logging": enable_logging,
                "optimize_streaming_latency": optimize_streaming_latency,
                "output_format": output_format,
            },
            json={
                "text": text,
                "model_id": model_id,
                "language_code": language_code,
                "voice_settings": convert_and_respect_annotation_metadata(
                    object_=voice_settings, annotation=VoiceSettings, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "previous_text": previous_text,
                "next_text": next_text,
                "previous_request_ids": previous_request_ids,
                "next_request_ids": next_request_ids,
                "use_pvc_as_ivc": use_pvc_as_ivc,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:
            try:
                if 200 <= _response.status_code < 300:
                    _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                    async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
                        yield _chunk
                    return
                await _response.aread()
                if _response.status_code == 422:
                    raise UnprocessableEntityError(
                        typing.cast(
                            HttpValidationError,
                            construct_type(
                                type_=HttpValidationError,  # type: ignore
                                object_=_response.json(),
                            ),
                        )
                    )
                _response_json = _response.json()
            except JSONDecodeError:
                raise ApiError(status_code=_response.status_code, body=_response.text)
            raise ApiError(status_code=_response.status_code, body=_response_json)

    async def convert_with_timestamps(
        self,
        voice_id: str,
        *,
        text: str,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[OutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[VoiceSettings] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        previous_text: typing.Optional[str] = OMIT,
        next_text: typing.Optional[str] = OMIT,
        previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        use_pvc_as_ivc: typing.Optional[bool] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> AudioWithTimestampsResponseModel:
        """
        Generate speech from text with precise character-level timing information for audio-text synchronization.

        Parameters
        ----------
        voice_id : str
            Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.

        text : str
            The text that will get converted into speech.

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[OutputFormat]
            The output format of the generated audio.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.

        voice_settings : typing.Optional[VoiceSettings]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        previous_text : typing.Optional[str]
            The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        next_text : typing.Optional[str]
            The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        previous_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.

        next_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.

        use_pvc_as_ivc : typing.Optional[bool]
            If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.

        apply_text_normalization : typing.Optional[BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Returns
        -------
        AudioWithTimestampsResponseModel
            Successful Response

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.text_to_speech.convert_with_timestamps(
                voice_id="21m00Tcm4TlvDq8ikWAM",
                text="This is a test for the API of ElevenLabs.",
            )


        asyncio.run(main())
        """
        _response = await self._client_wrapper.httpx_client.request(
            f"v1/text-to-speech/{jsonable_encoder(voice_id)}/with-timestamps",
            method="POST",
            params={
                "enable_logging": enable_logging,
                "optimize_streaming_latency": optimize_streaming_latency,
                "output_format": output_format,
            },
            json={
                "text": text,
                "model_id": model_id,
                "language_code": language_code,
                "voice_settings": convert_and_respect_annotation_metadata(
                    object_=voice_settings, annotation=VoiceSettings, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "previous_text": previous_text,
                "next_text": next_text,
                "previous_request_ids": previous_request_ids,
                "next_request_ids": next_request_ids,
                "use_pvc_as_ivc": use_pvc_as_ivc,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        )
        try:
            if 200 <= _response.status_code < 300:
                return typing.cast(
                    AudioWithTimestampsResponseModel,
                    construct_type(
                        type_=AudioWithTimestampsResponseModel,  # type: ignore
                        object_=_response.json(),
                    ),
                )
            if _response.status_code == 422:
                raise UnprocessableEntityError(
                    typing.cast(
                        HttpValidationError,
                        construct_type(
                            type_=HttpValidationError,  # type: ignore
                            object_=_response.json(),
                        ),
                    )
                )
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def convert_as_stream(
        self,
        voice_id: str,
        *,
        text: str,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[OutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[VoiceSettings] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        previous_text: typing.Optional[str] = OMIT,
        next_text: typing.Optional[str] = OMIT,
        previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        use_pvc_as_ivc: typing.Optional[bool] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[bytes]:
        """
        Converts text into speech using a voice of your choice and returns audio as an audio stream.

        Parameters
        ----------
        voice_id : str
            Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.

        text : str
            The text that will get converted into speech.

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[OutputFormat]
            The output format of the generated audio.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.

        voice_settings : typing.Optional[VoiceSettings]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        previous_text : typing.Optional[str]
            The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        next_text : typing.Optional[str]
            The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        previous_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.

        next_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.

        use_pvc_as_ivc : typing.Optional[bool]
            If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.

        apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.

        Yields
        ------
        typing.AsyncIterator[bytes]
            Streaming audio data

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            await client.text_to_speech.convert_as_stream(
                voice_id="JBFqnCBsd6RMkjVDRZzb",
                output_format="mp3_44100_128",
                text="The first move is what sets everything in motion.",
                model_id="eleven_multilingual_v2",
            )


        asyncio.run(main())
        """
        async with self._client_wrapper.httpx_client.stream(
            f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream",
            method="POST",
            params={
                "enable_logging": enable_logging,
                "optimize_streaming_latency": optimize_streaming_latency,
                "output_format": output_format,
            },
            json={
                "text": text,
                "model_id": model_id,
                "language_code": language_code,
                "voice_settings": convert_and_respect_annotation_metadata(
                    object_=voice_settings, annotation=VoiceSettings, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "previous_text": previous_text,
                "next_text": next_text,
                "previous_request_ids": previous_request_ids,
                "next_request_ids": next_request_ids,
                "use_pvc_as_ivc": use_pvc_as_ivc,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:
            try:
                if 200 <= _response.status_code < 300:
                    _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024
                    async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size):
                        yield _chunk
                    return
                await _response.aread()
                if _response.status_code == 422:
                    raise UnprocessableEntityError(
                        typing.cast(
                            HttpValidationError,
                            construct_type(
                                type_=HttpValidationError,  # type: ignore
                                object_=_response.json(),
                            ),
                        )
                    )
                _response_json = _response.json()
            except JSONDecodeError:
                raise ApiError(status_code=_response.status_code, body=_response.text)
            raise ApiError(status_code=_response.status_code, body=_response_json)

    async def stream_with_timestamps(
        self,
        voice_id: str,
        *,
        text: str,
        enable_logging: typing.Optional[bool] = None,
        optimize_streaming_latency: typing.Optional[int] = None,
        output_format: typing.Optional[OutputFormat] = None,
        model_id: typing.Optional[str] = OMIT,
        language_code: typing.Optional[str] = OMIT,
        voice_settings: typing.Optional[VoiceSettings] = OMIT,
        pronunciation_dictionary_locators: typing.Optional[
            typing.Sequence[PronunciationDictionaryVersionLocator]
        ] = OMIT,
        seed: typing.Optional[int] = OMIT,
        previous_text: typing.Optional[str] = OMIT,
        next_text: typing.Optional[str] = OMIT,
        previous_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        next_request_ids: typing.Optional[typing.Sequence[str]] = OMIT,
        use_pvc_as_ivc: typing.Optional[bool] = OMIT,
        apply_text_normalization: typing.Optional[
            BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization
        ] = OMIT,
        request_options: typing.Optional[RequestOptions] = None,
    ) -> typing.AsyncIterator[StreamingAudioChunkWithTimestampsResponseModel]:
        """
        Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken.

        Parameters
        ----------
        voice_id : str
            Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.

        text : str
            The text that will get converted into speech.

        enable_logging : typing.Optional[bool]
            When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.

        optimize_streaming_latency : typing.Optional[int]
            You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values:
            0 - default mode (no latency optimizations)
            1 - normal latency optimizations (about 50% of possible latency improvement of option 3)
            2 - strong latency optimizations (about 75% of possible latency improvement of option 3)
            3 - max latency optimizations
            4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates).

            Defaults to None.

        output_format : typing.Optional[OutputFormat]
            The output format of the generated audio.

        model_id : typing.Optional[str]
            Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.

        language_code : typing.Optional[str]
            Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.

        voice_settings : typing.Optional[VoiceSettings]
            Voice settings overriding stored settings for the given voice. They are applied only on the given request.

        pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]
            A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request

        seed : typing.Optional[int]
            If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.

        previous_text : typing.Optional[str]
            The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        next_text : typing.Optional[str]
            The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation.

        previous_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send.

        next_request_ids : typing.Optional[typing.Sequence[str]]
            A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.

        use_pvc_as_ivc : typing.Optional[bool]
            If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.

        apply_text_normalization : typing.Optional[BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization]
            This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.

        request_options : typing.Optional[RequestOptions]
            Request-specific configuration.

        Yields
        ------
        typing.AsyncIterator[StreamingAudioChunkWithTimestampsResponseModel]
            Stream of JSON objects containing audio chunks and character timing information

        Examples
        --------
        import asyncio

        from elevenlabs import AsyncElevenLabs

        client = AsyncElevenLabs(
            api_key="YOUR_API_KEY",
        )


        async def main() -> None:
            response = await client.text_to_speech.stream_with_timestamps(
                voice_id="JBFqnCBsd6RMkjVDRZzb",
                output_format="mp3_44100_128",
                text="The first move is what sets everything in motion.",
                model_id="eleven_multilingual_v2",
            )
            async for chunk in response:
                yield chunk


        asyncio.run(main())
        """
        async with self._client_wrapper.httpx_client.stream(
            f"v1/text-to-speech/{jsonable_encoder(voice_id)}/stream/with-timestamps",
            method="POST",
            params={
                "enable_logging": enable_logging,
                "optimize_streaming_latency": optimize_streaming_latency,
                "output_format": output_format,
            },
            json={
                "text": text,
                "model_id": model_id,
                "language_code": language_code,
                "voice_settings": convert_and_respect_annotation_metadata(
                    object_=voice_settings, annotation=VoiceSettings, direction="write"
                ),
                "pronunciation_dictionary_locators": convert_and_respect_annotation_metadata(
                    object_=pronunciation_dictionary_locators,
                    annotation=typing.Sequence[PronunciationDictionaryVersionLocator],
                    direction="write",
                ),
                "seed": seed,
                "previous_text": previous_text,
                "next_text": next_text,
                "previous_request_ids": previous_request_ids,
                "next_request_ids": next_request_ids,
                "use_pvc_as_ivc": use_pvc_as_ivc,
                "apply_text_normalization": apply_text_normalization,
            },
            headers={
                "content-type": "application/json",
            },
            request_options=request_options,
            omit=OMIT,
        ) as _response:
            try:
                if 200 <= _response.status_code < 300:
                    async for _text in _response.aiter_lines():
                        try:
                            if len(_text) == 0:
                                continue
                            yield typing.cast(
                                StreamingAudioChunkWithTimestampsResponseModel,
                                construct_type(
                                    type_=StreamingAudioChunkWithTimestampsResponseModel,  # type: ignore
                                    object_=json.loads(_text),
                                ),
                            )
                        except:
                            pass
                    return
                await _response.aread()
                if _response.status_code == 422:
                    raise UnprocessableEntityError(
                        typing.cast(
                            HttpValidationError,
                            construct_type(
                                type_=HttpValidationError,  # type: ignore
                                object_=_response.json(),
                            ),
                        )
                    )
                _response_json = _response.json()
            except JSONDecodeError:
                raise ApiError(status_code=_response.status_code, body=_response.text)
            raise ApiError(status_code=_response.status_code, body=_response_json)
