update API descriptions

This commit is contained in:
Sebastian Thiel
2020-07-10 09:11:32 +08:00
parent b6ee34dcff
commit 69fb05c4e1
271 changed files with 82506 additions and 23249 deletions

View File

@@ -128,7 +128,7 @@
}
}
},
"revision": "20200325",
"revision": "20200706",
"rootUrl": "https://videointelligence.googleapis.com/",
"schemas": {
"GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
@@ -169,7 +169,7 @@
"type": "number"
},
"name": {
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
"type": "string"
},
"value": {
@@ -189,7 +189,7 @@
"type": "number"
},
"name": {
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
"type": "string"
},
"point": {
@@ -204,7 +204,7 @@
"id": "GoogleCloudVideointelligenceV1_Entity",
"properties": {
"description": {
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
"type": "string"
},
"entityId": {
@@ -228,6 +228,10 @@
"$ref": "GoogleCloudVideointelligenceV1_ExplicitContentFrame"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -269,7 +273,7 @@
"id": "GoogleCloudVideointelligenceV1_LabelAnnotation",
"properties": {
"categoryEntities": {
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1_Entity"
},
@@ -292,6 +296,10 @@
"$ref": "GoogleCloudVideointelligenceV1_LabelSegment"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -440,6 +448,10 @@
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
"format": "int64",
"type": "string"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -474,7 +486,7 @@
"type": "string"
},
"words": {
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1_WordInfo"
},
@@ -515,6 +527,10 @@
"text": {
"description": "The detected text.",
"type": "string"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -623,7 +639,7 @@
"id": "GoogleCloudVideointelligenceV1_VideoAnnotationProgress",
"properties": {
"feature": {
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
"enum": [
"FEATURE_UNSPECIFIED",
"LABEL_DETECTION",
@@ -657,7 +673,7 @@
},
"segment": {
"$ref": "GoogleCloudVideointelligenceV1_VideoSegment",
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
},
"startTime": {
"description": "Time when the request was received.",
@@ -714,14 +730,14 @@
"description": "Video segment on which the annotation is run."
},
"segmentLabelAnnotations": {
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1_LabelAnnotation"
},
"type": "array"
},
"segmentPresenceLabelAnnotations": {
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1_LabelAnnotation"
},
@@ -859,20 +875,20 @@
"type": "array"
},
"inputContent": {
"description": "The video data bytes.\nIf unset, the input video(s) should be specified via `input_uri`.\nIf set, `input_uri` should be unset.",
"description": "The video data bytes.\nIf unset, the input video(s) should be specified via the `input_uri`.\nIf set, `input_uri` must be unset.",
"format": "byte",
"type": "string"
},
"inputUri": {
"description": "Input video location. Currently, only\n[Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).\nA video URI may include wildcards in `object-id`, and thus identify\nmultiple videos. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` should be unset.",
"description": "Input video location. Currently, only\n[Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported. URIs must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).\nTo identify multiple videos, a video URI may include wildcards in the\n`object-id`. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` must be unset.",
"type": "string"
},
"locationId": {
"description": "Optional. Cloud region where annotation should take place. Supported cloud\nregions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region\nis specified, a region will be determined based on video file location.",
"description": "Optional. Cloud region where annotation should take place. Supported cloud\nregions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no\nregion is specified, the region will be determined based on video file\nlocation.",
"type": "string"
},
"outputUri": {
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).",
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported. These must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).",
"type": "string"
},
"videoContext": {
@@ -906,7 +922,7 @@
"type": "number"
},
"name": {
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
"type": "string"
},
"value": {
@@ -926,7 +942,7 @@
"type": "number"
},
"name": {
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
"type": "string"
},
"point": {
@@ -941,7 +957,7 @@
"id": "GoogleCloudVideointelligenceV1beta2_Entity",
"properties": {
"description": {
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
"type": "string"
},
"entityId": {
@@ -965,6 +981,10 @@
"$ref": "GoogleCloudVideointelligenceV1beta2_ExplicitContentFrame"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -1017,7 +1037,7 @@
"id": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation",
"properties": {
"categoryEntities": {
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1beta2_Entity"
},
@@ -1040,6 +1060,10 @@
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelSegment"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -1049,7 +1073,7 @@
"id": "GoogleCloudVideointelligenceV1beta2_LabelDetectionConfig",
"properties": {
"frameConfidenceThreshold": {
"description": "The confidence threshold we perform filtering on the labels from\nframe-level detection. If not set, it is set to 0.4 by default. The valid\nrange for this threshold is [0.1, 0.9]. Any value set outside of this\nrange will be clipped.\nNote: for best results please follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
"description": "The confidence threshold we perform filtering on the labels from\nframe-level detection. If not set, it is set to 0.4 by default. The valid\nrange for this threshold is [0.1, 0.9]. Any value set outside of this\nrange will be clipped.\nNote: For best results, follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
"format": "float",
"type": "number"
},
@@ -1074,11 +1098,11 @@
"type": "string"
},
"stationaryCamera": {
"description": "Whether the video has been shot from a stationary (i.e. non-moving) camera.\nWhen set to true, might improve detection accuracy for moving objects.\nShould be used with `SHOT_AND_FRAME_MODE` enabled.",
"description": "Whether the video has been shot from a stationary (i.e., non-moving)\ncamera. When set to true, might improve detection accuracy for moving\nobjects. Should be used with `SHOT_AND_FRAME_MODE` enabled.",
"type": "boolean"
},
"videoConfidenceThreshold": {
"description": "The confidence threshold we perform filtering on the labels from\nvideo-level and shot-level detections. If not set, it is set to 0.3 by\ndefault. The valid range for this threshold is [0.1, 0.9]. Any value set\noutside of this range will be clipped.\nNote: for best results please follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
"description": "The confidence threshold we perform filtering on the labels from\nvideo-level and shot-level detections. If not set, it's set to 0.3 by\ndefault. The valid range for this threshold is [0.1, 0.9]. Any value set\noutside of this range will be clipped.\nNote: For best results, follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
"format": "float",
"type": "number"
}
@@ -1229,6 +1253,10 @@
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
"format": "int64",
"type": "string"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -1299,7 +1327,7 @@
"type": "string"
},
"words": {
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1beta2_WordInfo"
},
@@ -1348,7 +1376,7 @@
"type": "boolean"
},
"enableSpeakerDiarization": {
"description": "Optional. If 'true', enables speaker detection for each recognized word in\nthe top alternative of the recognition result using a speaker_tag provided\nin the WordInfo.\nNote: When this is true, we send all the words from the beginning of the\naudio for the top alternative in every consecutive responses.\nThis is done in order to improve our speaker tags as our models learn to\nidentify the speakers in the conversation over time.",
"description": "Optional. If 'true', enables speaker detection for each recognized word in\nthe top alternative of the recognition result using a speaker_tag provided\nin the WordInfo.\nNote: When this is true, we send all the words from the beginning of the\naudio for the top alternative in every consecutive response.\nThis is done in order to improve our speaker tags as our models learn to\nidentify the speakers in the conversation over time.",
"type": "boolean"
},
"enableWordConfidence": {
@@ -1392,6 +1420,10 @@
"text": {
"description": "The detected text.",
"type": "string"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -1518,7 +1550,7 @@
"id": "GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress",
"properties": {
"feature": {
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
"enum": [
"FEATURE_UNSPECIFIED",
"LABEL_DETECTION",
@@ -1552,7 +1584,7 @@
},
"segment": {
"$ref": "GoogleCloudVideointelligenceV1beta2_VideoSegment",
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
},
"startTime": {
"description": "Time when the request was received.",
@@ -1609,14 +1641,14 @@
"description": "Video segment on which the annotation is run."
},
"segmentLabelAnnotations": {
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation"
},
"type": "array"
},
"segmentPresenceLabelAnnotations": {
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation"
},
@@ -1784,7 +1816,7 @@
"type": "number"
},
"name": {
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
"type": "string"
},
"value": {
@@ -1804,7 +1836,7 @@
"type": "number"
},
"name": {
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
"type": "string"
},
"point": {
@@ -1819,7 +1851,7 @@
"id": "GoogleCloudVideointelligenceV1p1beta1_Entity",
"properties": {
"description": {
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
"type": "string"
},
"entityId": {
@@ -1843,6 +1875,10 @@
"$ref": "GoogleCloudVideointelligenceV1p1beta1_ExplicitContentFrame"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -1884,7 +1920,7 @@
"id": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation",
"properties": {
"categoryEntities": {
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p1beta1_Entity"
},
@@ -1907,6 +1943,10 @@
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelSegment"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -2055,6 +2095,10 @@
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
"format": "int64",
"type": "string"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -2089,7 +2133,7 @@
"type": "string"
},
"words": {
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p1beta1_WordInfo"
},
@@ -2130,6 +2174,10 @@
"text": {
"description": "The detected text.",
"type": "string"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -2238,7 +2286,7 @@
"id": "GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress",
"properties": {
"feature": {
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
"enum": [
"FEATURE_UNSPECIFIED",
"LABEL_DETECTION",
@@ -2272,7 +2320,7 @@
},
"segment": {
"$ref": "GoogleCloudVideointelligenceV1p1beta1_VideoSegment",
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
},
"startTime": {
"description": "Time when the request was received.",
@@ -2329,14 +2377,14 @@
"description": "Video segment on which the annotation is run."
},
"segmentLabelAnnotations": {
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation"
},
"type": "array"
},
"segmentPresenceLabelAnnotations": {
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation"
},
@@ -2466,7 +2514,7 @@
"type": "number"
},
"name": {
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
"type": "string"
},
"value": {
@@ -2486,7 +2534,7 @@
"type": "number"
},
"name": {
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
"type": "string"
},
"point": {
@@ -2501,7 +2549,7 @@
"id": "GoogleCloudVideointelligenceV1p2beta1_Entity",
"properties": {
"description": {
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
"type": "string"
},
"entityId": {
@@ -2525,6 +2573,10 @@
"$ref": "GoogleCloudVideointelligenceV1p2beta1_ExplicitContentFrame"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -2566,7 +2618,7 @@
"id": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation",
"properties": {
"categoryEntities": {
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p2beta1_Entity"
},
@@ -2589,6 +2641,10 @@
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelSegment"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -2737,6 +2793,10 @@
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
"format": "int64",
"type": "string"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -2771,7 +2831,7 @@
"type": "string"
},
"words": {
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p2beta1_WordInfo"
},
@@ -2812,6 +2872,10 @@
"text": {
"description": "The detected text.",
"type": "string"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -2920,7 +2984,7 @@
"id": "GoogleCloudVideointelligenceV1p2beta1_VideoAnnotationProgress",
"properties": {
"feature": {
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
"enum": [
"FEATURE_UNSPECIFIED",
"LABEL_DETECTION",
@@ -2954,7 +3018,7 @@
},
"segment": {
"$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
},
"startTime": {
"description": "Time when the request was received.",
@@ -3011,14 +3075,14 @@
"description": "Video segment on which the annotation is run."
},
"segmentLabelAnnotations": {
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation"
},
"type": "array"
},
"segmentPresenceLabelAnnotations": {
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation"
},
@@ -3167,6 +3231,10 @@
"$ref": "GoogleCloudVideointelligenceV1p3beta1_CelebrityTrack"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -3199,7 +3267,7 @@
"type": "number"
},
"name": {
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
"type": "string"
},
"value": {
@@ -3219,7 +3287,7 @@
"type": "number"
},
"name": {
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
"type": "string"
},
"point": {
@@ -3234,7 +3302,7 @@
"id": "GoogleCloudVideointelligenceV1p3beta1_Entity",
"properties": {
"description": {
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
"type": "string"
},
"entityId": {
@@ -3258,6 +3326,10 @@
"$ref": "GoogleCloudVideointelligenceV1p3beta1_ExplicitContentFrame"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -3309,6 +3381,10 @@
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -3318,7 +3394,7 @@
"id": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation",
"properties": {
"categoryEntities": {
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Entity"
},
@@ -3341,6 +3417,10 @@
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelSegment"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -3489,6 +3569,10 @@
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
"format": "int64",
"type": "string"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -3514,11 +3598,15 @@
"id": "GoogleCloudVideointelligenceV1p3beta1_PersonDetectionAnnotation",
"properties": {
"tracks": {
"description": "The trackes that a person is detected.",
"description": "The detected tracks of a person.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
},
"type": "array"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -3553,7 +3641,7 @@
"type": "string"
},
"words": {
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p3beta1_WordInfo"
},
@@ -3589,7 +3677,7 @@
"description": "Streaming annotation results."
},
"annotationResultsUri": {
"description": "Cloud Storage URI that stores annotation results of one streaming session.\nIt is a directory that can hold multiple files in JSON format.\nExample uri format:\ngs://bucket_id/object_id/cloud_project_name-session_id",
"description": "Google Cloud Storage URI that stores annotation results of one\nstreaming session in JSON format.\nIt is the annotation_result_storage_directory\nfrom the request followed by '/cloud_project_number-session_id'.",
"type": "string"
},
"error": {
@@ -3645,6 +3733,10 @@
"text": {
"description": "The detected text.",
"type": "string"
},
"version": {
"description": "Feature version.",
"type": "string"
}
},
"type": "object"
@@ -3753,7 +3845,7 @@
"id": "GoogleCloudVideointelligenceV1p3beta1_VideoAnnotationProgress",
"properties": {
"feature": {
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
"enum": [
"FEATURE_UNSPECIFIED",
"LABEL_DETECTION",
@@ -3793,7 +3885,7 @@
},
"segment": {
"$ref": "GoogleCloudVideointelligenceV1p3beta1_VideoSegment",
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
},
"startTime": {
"description": "Time when the request was received.",
@@ -3868,14 +3960,14 @@
"description": "Video segment on which the annotation is run."
},
"segmentLabelAnnotations": {
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation"
},
"type": "array"
},
"segmentPresenceLabelAnnotations": {
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
"items": {
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation"
},