mirror of
https://github.com/OMGeeky/google-apis-rs.git
synced 2026-01-04 10:32:16 +01:00
update dependencies
This commit is contained in:
@@ -187,7 +187,7 @@
|
||||
],
|
||||
"parameters": {
|
||||
"parent": {
|
||||
"description": "Required. The parent resource where this custom class will be created. Format: `projects/{project}/locations/{location}/customClasses` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"description": "Required. The parent resource where this custom class will be created. Format: `projects/{project}/locations/{location}/customClasses` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](https://cloud.google.com/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"location": "path",
|
||||
"pattern": "^projects/[^/]+/locations/[^/]+$",
|
||||
"required": true,
|
||||
@@ -215,7 +215,7 @@
|
||||
],
|
||||
"parameters": {
|
||||
"name": {
|
||||
"description": "Required. The name of the custom class to delete. Format: `projects/{project}/locations/{location}/customClasses/{custom_class}` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"description": "Required. The name of the custom class to delete. Format: `projects/{project}/locations/{location}/customClasses/{custom_class}` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](https://cloud.google.com/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"location": "path",
|
||||
"pattern": "^projects/[^/]+/locations/[^/]+/customClasses/[^/]+$",
|
||||
"required": true,
|
||||
@@ -276,7 +276,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"parent": {
|
||||
"description": "Required. The parent, which owns this collection of custom classes. Format: `projects/{project}/locations/{location}/customClasses` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"description": "Required. The parent, which owns this collection of custom classes. Format: `projects/{project}/locations/{location}/customClasses` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](https://cloud.google.com/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"location": "path",
|
||||
"pattern": "^projects/[^/]+/locations/[^/]+$",
|
||||
"required": true,
|
||||
@@ -339,7 +339,7 @@
|
||||
],
|
||||
"parameters": {
|
||||
"parent": {
|
||||
"description": "Required. The parent resource where this phrase set will be created. Format: `projects/{project}/locations/{location}/phraseSets` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"description": "Required. The parent resource where this phrase set will be created. Format: `projects/{project}/locations/{location}/phraseSets` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](https://cloud.google.com/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"location": "path",
|
||||
"pattern": "^projects/[^/]+/locations/[^/]+$",
|
||||
"required": true,
|
||||
@@ -392,7 +392,7 @@
|
||||
],
|
||||
"parameters": {
|
||||
"name": {
|
||||
"description": "Required. The name of the phrase set to retrieve. Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"description": "Required. The name of the phrase set to retrieve. Format: `projects/{project}/locations/{location}/phraseSets/{phrase_set}` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](https://cloud.google.com/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"location": "path",
|
||||
"pattern": "^projects/[^/]+/locations/[^/]+/phraseSets/[^/]+$",
|
||||
"required": true,
|
||||
@@ -428,7 +428,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"parent": {
|
||||
"description": "Required. The parent, which owns this collection of phrase set. Format: `projects/{project}/locations/{location}` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"description": "Required. The parent, which owns this collection of phrase set. Format: `projects/{project}/locations/{location}` Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](https://cloud.google.com/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"location": "path",
|
||||
"pattern": "^projects/[^/]+/locations/[^/]+$",
|
||||
"required": true,
|
||||
@@ -524,9 +524,22 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"revision": "20220221",
|
||||
"revision": "20230119",
|
||||
"rootUrl": "https://speech.googleapis.com/",
|
||||
"schemas": {
|
||||
"ABNFGrammar": {
|
||||
"id": "ABNFGrammar",
|
||||
"properties": {
|
||||
"abnfStrings": {
|
||||
"description": "All declarations and rules of an ABNF grammar broken up into multiple strings that will end up concatenated.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"ClassItem": {
|
||||
"description": "An item of the class.",
|
||||
"id": "ClassItem",
|
||||
@@ -547,7 +560,7 @@
|
||||
"description": "Required. The custom class to create."
|
||||
},
|
||||
"customClassId": {
|
||||
"description": "Required. The ID to use for the custom class, which will become the final component of the custom class' resource name. This value should be 4-63 characters, and valid characters are /a-z-/.",
|
||||
"description": "Required. The ID to use for the custom class, which will become the final component of the custom class' resource name. This value should restrict to letters, numbers, and hyphens, with the first character a letter, the last a letter or a number, and be 4-63 characters.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
@@ -562,7 +575,7 @@
|
||||
"description": "Required. The phrase set to create."
|
||||
},
|
||||
"phraseSetId": {
|
||||
"description": "Required. The ID to use for the phrase set, which will become the final component of the phrase set's resource name. This value should be 4-63 characters, and valid characters are /a-z-/.",
|
||||
"description": "Required. The ID to use for the phrase set, which will become the final component of the phrase set's resource name. This value should restrict to letters, numbers, and hyphens, with the first character a letter, the last a letter or a number, and be 4-63 characters.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
@@ -591,7 +604,7 @@
|
||||
"type": "object"
|
||||
},
|
||||
"Empty": {
|
||||
"description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.",
|
||||
"description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }",
|
||||
"id": "Empty",
|
||||
"properties": {},
|
||||
"type": "object"
|
||||
@@ -708,6 +721,11 @@
|
||||
"$ref": "Status",
|
||||
"description": "If the transcript output fails this field contains the relevant error."
|
||||
},
|
||||
"requestId": {
|
||||
"description": "The ID associated with the request. This is a unique ID specific only to the given request.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"results": {
|
||||
"description": "Sequential list of transcription results corresponding to sequential portions of audio.",
|
||||
"items": {
|
||||
@@ -715,6 +733,10 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"speechAdaptationInfo": {
|
||||
"$ref": "SpeechAdaptationInfo",
|
||||
"description": "Provides information on speech adaptation behavior in response"
|
||||
},
|
||||
"totalBilledTime": {
|
||||
"description": "When available, billed audio seconds for the corresponding request.",
|
||||
"format": "google-duration",
|
||||
@@ -759,11 +781,11 @@
|
||||
"type": "object"
|
||||
},
|
||||
"Phrase": {
|
||||
"description": "A phrases containing words and phrase \"hints\" so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add additional words to the vocabulary of the recognizer. See [usage limits](https://cloud.google.com/speech-to-text/quotas#content). List items can also include pre-built or custom classes containing groups of words that represent common concepts that occur in natural language. For example, rather than providing a phrase hint for every month of the year (e.g. \"i was born in january\", \"i was born in febuary\", ...), use the pre-built `$MONTH` class improves the likelihood of correctly transcribing audio that includes months (e.g. \"i was born in $month\"). To refer to pre-built classes, use the class' symbol prepended with `$` e.g. `$MONTH`. To refer to custom classes that were defined inline in the request, set the class's `custom_class_id` to a string unique to all class resources and inline classes. Then use the class' id wrapped in $`{...}` e.g. \"${my-months}\". To refer to custom classes resources, use the class' id wrapped in `${}` (e.g. `${my-months}`). Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"description": "A phrases containing words and phrase \"hints\" so that the speech recognition is more likely to recognize them. This can be used to improve the accuracy for specific words and phrases, for example, if specific commands are typically spoken by the user. This can also be used to add additional words to the vocabulary of the recognizer. See [usage limits](https://cloud.google.com/speech-to-text/quotas#content). List items can also include pre-built or custom classes containing groups of words that represent common concepts that occur in natural language. For example, rather than providing a phrase hint for every month of the year (e.g. \"i was born in january\", \"i was born in febuary\", ...), use the pre-built `$MONTH` class improves the likelihood of correctly transcribing audio that includes months (e.g. \"i was born in $month\"). To refer to pre-built classes, use the class' symbol prepended with `$` e.g. `$MONTH`. To refer to custom classes that were defined inline in the request, set the class's `custom_class_id` to a string unique to all class resources and inline classes. Then use the class' id wrapped in $`{...}` e.g. \"${my-months}\". To refer to custom classes resources, use the class' id wrapped in `${}` (e.g. `${my-months}`). Speech-to-Text supports three locations: `global`, `us` (US North America), and `eu` (Europe). If you are calling the `speech.googleapis.com` endpoint, use the `global` location. To specify a region, use a [regional endpoint](https://cloud.google.com/speech-to-text/docs/endpoints) with matching `us` or `eu` location value.",
|
||||
"id": "Phrase",
|
||||
"properties": {
|
||||
"boost": {
|
||||
"description": "Hint Boost. Overrides the boost set at the phrase set level. Positive value will increase the probability that a specific phrase will be recognized over other similar sounding phrases. The higher the boost, the higher the chance of false positive recognition as well. Negative boost will simply be ignored. Though `boost` can accept a wide range of positive values, most use cases are best served with values between 0 and 20. We recommend using a binary search approach to finding the optimal value for your use case. Speech recognition will skip PhraseSets with a boost value of 0.",
|
||||
"description": "Hint Boost. Overrides the boost set at the phrase set level. Positive value will increase the probability that a specific phrase will be recognized over other similar sounding phrases. The higher the boost, the higher the chance of false positive recognition as well. Negative boost will simply be ignored. Though `boost` can accept a wide range of positive values, most use cases are best served with values between 0 and 20. We recommend using a binary search approach to finding the optimal value for your use case as well as adding phrases both with and without boost to your requests.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
@@ -779,7 +801,7 @@
|
||||
"id": "PhraseSet",
|
||||
"properties": {
|
||||
"boost": {
|
||||
"description": "Hint Boost. Positive value will increase the probability that a specific phrase will be recognized over other similar sounding phrases. The higher the boost, the higher the chance of false positive recognition as well. Negative boost values would correspond to anti-biasing. Anti-biasing is not enabled, so negative boost will simply be ignored. Though `boost` can accept a wide range of positive values, most use cases are best served with values between 0 (exclusive) and 20. We recommend using a binary search approach to finding the optimal value for your use case. Speech recognition will skip PhraseSets with a boost value of 0.",
|
||||
"description": "Hint Boost. Positive value will increase the probability that a specific phrase will be recognized over other similar sounding phrases. The higher the boost, the higher the chance of false positive recognition as well. Negative boost values would correspond to anti-biasing. Anti-biasing is not enabled, so negative boost will simply be ignored. Though `boost` can accept a wide range of positive values, most use cases are best served with values between 0 (exclusive) and 20. We recommend using a binary search approach to finding the optimal value for your use case as well as adding phrases both with and without boost to your requests.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
@@ -829,7 +851,7 @@
|
||||
"type": "array"
|
||||
},
|
||||
"audioChannelCount": {
|
||||
"description": "The number of channels in the input audio data. ONLY set this for MULTI-CHANNEL recognition. Valid values for LINEAR16 and FLAC are `1`-`8`. Valid values for OGG_OPUS are '1'-'254'. Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`. If `0` or omitted, defaults to one channel (mono). Note: We only recognize the first channel by default. To perform independent recognition on each channel set `enable_separate_recognition_per_channel` to 'true'.",
|
||||
"description": "The number of channels in the input audio data. ONLY set this for MULTI-CHANNEL recognition. Valid values for LINEAR16, OGG_OPUS and FLAC are `1`-`8`. Valid value for MULAW, AMR, AMR_WB and SPEEX_WITH_HEADER_BYTE is only `1`. If `0` or omitted, defaults to one channel (mono). Note: We only recognize the first channel by default. To perform independent recognition on each channel set `enable_separate_recognition_per_channel` to 'true'.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
},
|
||||
@@ -901,7 +923,7 @@
|
||||
"description": "Metadata regarding this request."
|
||||
},
|
||||
"model": {
|
||||
"description": "Which model to select for the given request. Select the model best suited to your domain to get best results. If a model is not explicitly specified, then we auto-select a model based on the parameters in the RecognitionConfig. *Model* *Description* command_and_search Best for short queries such as voice commands or voice search. phone_call Best for audio that originated from a phone call (typically recorded at an 8khz sampling rate). video Best for audio that originated from video or includes multiple speakers. Ideally the audio is recorded at a 16khz or greater sampling rate. This is a premium model that costs more than the standard rate. default Best for audio that is not one of the specific audio models. For example, long-form audio. Ideally the audio is high-fidelity, recorded at a 16khz or greater sampling rate. medical_conversation Best for audio that originated from a conversation between a medical provider and patient. medical_dictation Best for audio that originated from dictation notes by a medical provider. ",
|
||||
"description": "Which model to select for the given request. Select the model best suited to your domain to get best results. If a model is not explicitly specified, then we auto-select a model based on the parameters in the RecognitionConfig. *Model* *Description* latest_long Best for long form content like media or conversation. latest_short Best for short form content like commands or single shot directed speech. command_and_search Best for short queries such as voice commands or voice search. phone_call Best for audio that originated from a phone call (typically recorded at an 8khz sampling rate). video Best for audio that originated from video or includes multiple speakers. Ideally the audio is recorded at a 16khz or greater sampling rate. This is a premium model that costs more than the standard rate. default Best for audio that is not one of the specific audio models. For example, long-form audio. Ideally the audio is high-fidelity, recorded at a 16khz or greater sampling rate. medical_conversation Best for audio that originated from a conversation between a medical provider and patient. medical_dictation Best for audio that originated from dictation notes by a medical provider. ",
|
||||
"type": "string"
|
||||
},
|
||||
"profanityFilter": {
|
||||
@@ -1048,6 +1070,11 @@
|
||||
"description": "The only message returned to the client by the `Recognize` method. It contains the result as zero or more sequential `SpeechRecognitionResult` messages.",
|
||||
"id": "RecognizeResponse",
|
||||
"properties": {
|
||||
"requestId": {
|
||||
"description": "The ID associated with the request. This is a unique ID specific only to the given request.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"results": {
|
||||
"description": "Sequential list of transcription results corresponding to sequential portions of audio.",
|
||||
"items": {
|
||||
@@ -1055,6 +1082,10 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"speechAdaptationInfo": {
|
||||
"$ref": "SpeechAdaptationInfo",
|
||||
"description": "Provides information on adaptation behavior in response"
|
||||
},
|
||||
"totalBilledTime": {
|
||||
"description": "When available, billed audio seconds for the corresponding request.",
|
||||
"format": "google-duration",
|
||||
@@ -1094,6 +1125,10 @@
|
||||
"description": "Speech adaptation configuration.",
|
||||
"id": "SpeechAdaptation",
|
||||
"properties": {
|
||||
"abnfGrammar": {
|
||||
"$ref": "ABNFGrammar",
|
||||
"description": "Augmented Backus-Naur form (ABNF) is a standardized grammar notation comprised by a set of derivation rules. See specifications: https://www.w3.org/TR/speech-grammar"
|
||||
},
|
||||
"customClasses": {
|
||||
"description": "A collection of custom classes. To specify the classes inline, leave the class' `name` blank and fill in the rest of its fields, giving it a unique `custom_class_id`. Refer to the inline defined class in phrase hints by its `custom_class_id`.",
|
||||
"items": {
|
||||
@@ -1118,6 +1153,21 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"SpeechAdaptationInfo": {
|
||||
"description": "Information on speech adaptation use in results",
|
||||
"id": "SpeechAdaptationInfo",
|
||||
"properties": {
|
||||
"adaptationTimeout": {
|
||||
"description": "Whether there was a timeout when applying speech adaptation. If true, adaptation had no effect in the response transcript.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"timeoutMessage": {
|
||||
"description": "If set, returns a message specifying which part of the speech adaptation request timed out.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"SpeechContext": {
|
||||
"description": "Provides \"hints\" to the speech recognizer to favor specific words and phrases in the results.",
|
||||
"id": "SpeechContext",
|
||||
|
||||
Reference in New Issue
Block a user