mirror of
https://github.com/OMGeeky/google-apis-rs.git
synced 2026-02-23 15:49:49 +01:00
update API descriptions
This commit is contained in:
@@ -200,6 +200,39 @@
|
||||
"resources": {
|
||||
"locations": {
|
||||
"resources": {
|
||||
"corpura": {
|
||||
"resources": {
|
||||
"operations": {
|
||||
"methods": {
|
||||
"get": {
|
||||
"description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice.",
|
||||
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/corpura/{corpuraId}/operations/{operationsId}",
|
||||
"httpMethod": "GET",
|
||||
"id": "videointelligence.projects.locations.corpura.operations.get",
|
||||
"parameterOrder": [
|
||||
"name"
|
||||
],
|
||||
"parameters": {
|
||||
"name": {
|
||||
"description": "The name of the operation resource.",
|
||||
"location": "path",
|
||||
"pattern": "^projects/[^/]+/locations/[^/]+/corpura/[^/]+/operations/[^/]+$",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"path": "v1/{+name}",
|
||||
"response": {
|
||||
"$ref": "GoogleLongrunning_Operation"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"operations": {
|
||||
"methods": {
|
||||
"cancel": {
|
||||
@@ -350,7 +383,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"revision": "20200325",
|
||||
"revision": "20200706",
|
||||
"rootUrl": "https://videointelligence.googleapis.com/",
|
||||
"schemas": {
|
||||
"GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
|
||||
@@ -399,20 +432,20 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputContent": {
|
||||
"description": "The video data bytes.\nIf unset, the input video(s) should be specified via `input_uri`.\nIf set, `input_uri` should be unset.",
|
||||
"description": "The video data bytes.\nIf unset, the input video(s) should be specified via the `input_uri`.\nIf set, `input_uri` must be unset.",
|
||||
"format": "byte",
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Input video location. Currently, only\n[Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).\nA video URI may include wildcards in `object-id`, and thus identify\nmultiple videos. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` should be unset.",
|
||||
"description": "Input video location. Currently, only\n[Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported. URIs must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).\nTo identify multiple videos, a video URI may include wildcards in the\n`object-id`. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` must be unset.",
|
||||
"type": "string"
|
||||
},
|
||||
"locationId": {
|
||||
"description": "Optional. Cloud region where annotation should take place. Supported cloud\nregions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region\nis specified, a region will be determined based on video file location.",
|
||||
"description": "Optional. Cloud region where annotation should take place. Supported cloud\nregions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no\nregion is specified, the region will be determined based on video file\nlocation.",
|
||||
"type": "string"
|
||||
},
|
||||
"outputUri": {
|
||||
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).",
|
||||
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported. These must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).",
|
||||
"type": "string"
|
||||
},
|
||||
"videoContext": {
|
||||
@@ -446,7 +479,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -466,7 +499,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -481,7 +514,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -505,6 +538,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -557,7 +594,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_Entity"
|
||||
},
|
||||
@@ -580,6 +617,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -589,7 +630,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1_LabelDetectionConfig",
|
||||
"properties": {
|
||||
"frameConfidenceThreshold": {
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nframe-level detection. If not set, it is set to 0.4 by default. The valid\nrange for this threshold is [0.1, 0.9]. Any value set outside of this\nrange will be clipped.\nNote: for best results please follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nframe-level detection. If not set, it is set to 0.4 by default. The valid\nrange for this threshold is [0.1, 0.9]. Any value set outside of this\nrange will be clipped.\nNote: For best results, follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
@@ -614,11 +655,11 @@
|
||||
"type": "string"
|
||||
},
|
||||
"stationaryCamera": {
|
||||
"description": "Whether the video has been shot from a stationary (i.e. non-moving) camera.\nWhen set to true, might improve detection accuracy for moving objects.\nShould be used with `SHOT_AND_FRAME_MODE` enabled.",
|
||||
"description": "Whether the video has been shot from a stationary (i.e., non-moving)\ncamera. When set to true, might improve detection accuracy for moving\nobjects. Should be used with `SHOT_AND_FRAME_MODE` enabled.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"videoConfidenceThreshold": {
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nvideo-level and shot-level detections. If not set, it is set to 0.3 by\ndefault. The valid range for this threshold is [0.1, 0.9]. Any value set\noutside of this range will be clipped.\nNote: for best results please follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nvideo-level and shot-level detections. If not set, it's set to 0.3 by\ndefault. The valid range for this threshold is [0.1, 0.9]. Any value set\noutside of this range will be clipped.\nNote: For best results, follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
}
|
||||
@@ -769,6 +810,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -839,7 +884,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_WordInfo"
|
||||
},
|
||||
@@ -888,7 +933,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"enableSpeakerDiarization": {
|
||||
"description": "Optional. If 'true', enables speaker detection for each recognized word in\nthe top alternative of the recognition result using a speaker_tag provided\nin the WordInfo.\nNote: When this is true, we send all the words from the beginning of the\naudio for the top alternative in every consecutive responses.\nThis is done in order to improve our speaker tags as our models learn to\nidentify the speakers in the conversation over time.",
|
||||
"description": "Optional. If 'true', enables speaker detection for each recognized word in\nthe top alternative of the recognition result using a speaker_tag provided\nin the WordInfo.\nNote: When this is true, we send all the words from the beginning of the\naudio for the top alternative in every consecutive response.\nThis is done in order to improve our speaker tags as our models learn to\nidentify the speakers in the conversation over time.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"enableWordConfidence": {
|
||||
@@ -932,6 +977,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1058,7 +1107,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -1092,7 +1141,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -1149,14 +1198,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LabelAnnotation"
|
||||
},
|
||||
@@ -1324,7 +1373,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -1344,7 +1393,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -1359,7 +1408,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -1383,6 +1432,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1424,7 +1477,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_Entity"
|
||||
},
|
||||
@@ -1447,6 +1500,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1595,6 +1652,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1629,7 +1690,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_WordInfo"
|
||||
},
|
||||
@@ -1670,6 +1731,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1778,7 +1843,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -1812,7 +1877,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -1869,14 +1934,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation"
|
||||
},
|
||||
@@ -2006,7 +2071,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -2026,7 +2091,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -2041,7 +2106,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -2065,6 +2130,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2106,7 +2175,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_Entity"
|
||||
},
|
||||
@@ -2129,6 +2198,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2277,6 +2350,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2311,7 +2388,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_WordInfo"
|
||||
},
|
||||
@@ -2352,6 +2429,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2460,7 +2541,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -2494,7 +2575,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -2551,14 +2632,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation"
|
||||
},
|
||||
@@ -2688,7 +2769,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -2708,7 +2789,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -2723,7 +2804,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -2747,6 +2828,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2788,7 +2873,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_Entity"
|
||||
},
|
||||
@@ -2811,6 +2896,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2959,6 +3048,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2993,7 +3086,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_WordInfo"
|
||||
},
|
||||
@@ -3034,6 +3127,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3142,7 +3239,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -3176,7 +3273,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -3233,14 +3330,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation"
|
||||
},
|
||||
@@ -3389,6 +3486,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_CelebrityTrack"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3421,7 +3522,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -3441,7 +3542,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -3456,7 +3557,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -3480,6 +3581,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3531,6 +3636,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3540,7 +3649,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Entity"
|
||||
},
|
||||
@@ -3563,6 +3672,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3711,6 +3824,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3736,11 +3853,15 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_PersonDetectionAnnotation",
|
||||
"properties": {
|
||||
"tracks": {
|
||||
"description": "The trackes that a person is detected.",
|
||||
"description": "The detected tracks of a person.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3775,7 +3896,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_WordInfo"
|
||||
},
|
||||
@@ -3811,7 +3932,7 @@
|
||||
"description": "Streaming annotation results."
|
||||
},
|
||||
"annotationResultsUri": {
|
||||
"description": "Cloud Storage URI that stores annotation results of one streaming session.\nIt is a directory that can hold multiple files in JSON format.\nExample uri format:\ngs://bucket_id/object_id/cloud_project_name-session_id",
|
||||
"description": "Google Cloud Storage URI that stores annotation results of one\nstreaming session in JSON format.\nIt is the annotation_result_storage_directory\nfrom the request followed by '/cloud_project_number-session_id'.",
|
||||
"type": "string"
|
||||
},
|
||||
"error": {
|
||||
@@ -3867,6 +3988,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3975,7 +4100,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -4015,7 +4140,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -4090,14 +4215,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation"
|
||||
},
|
||||
|
||||
@@ -128,7 +128,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"revision": "20200325",
|
||||
"revision": "20200706",
|
||||
"rootUrl": "https://videointelligence.googleapis.com/",
|
||||
"schemas": {
|
||||
"GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
|
||||
@@ -169,7 +169,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -189,7 +189,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -204,7 +204,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -228,6 +228,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -269,7 +273,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_Entity"
|
||||
},
|
||||
@@ -292,6 +296,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -440,6 +448,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -474,7 +486,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_WordInfo"
|
||||
},
|
||||
@@ -515,6 +527,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -623,7 +639,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -657,7 +673,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -714,14 +730,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LabelAnnotation"
|
||||
},
|
||||
@@ -859,20 +875,20 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputContent": {
|
||||
"description": "The video data bytes.\nIf unset, the input video(s) should be specified via `input_uri`.\nIf set, `input_uri` should be unset.",
|
||||
"description": "The video data bytes.\nIf unset, the input video(s) should be specified via the `input_uri`.\nIf set, `input_uri` must be unset.",
|
||||
"format": "byte",
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Input video location. Currently, only\n[Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).\nA video URI may include wildcards in `object-id`, and thus identify\nmultiple videos. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` should be unset.",
|
||||
"description": "Input video location. Currently, only\n[Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported. URIs must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).\nTo identify multiple videos, a video URI may include wildcards in the\n`object-id`. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` must be unset.",
|
||||
"type": "string"
|
||||
},
|
||||
"locationId": {
|
||||
"description": "Optional. Cloud region where annotation should take place. Supported cloud\nregions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region\nis specified, a region will be determined based on video file location.",
|
||||
"description": "Optional. Cloud region where annotation should take place. Supported cloud\nregions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no\nregion is specified, the region will be determined based on video file\nlocation.",
|
||||
"type": "string"
|
||||
},
|
||||
"outputUri": {
|
||||
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).",
|
||||
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported. These must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).",
|
||||
"type": "string"
|
||||
},
|
||||
"videoContext": {
|
||||
@@ -906,7 +922,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -926,7 +942,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -941,7 +957,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -965,6 +981,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1017,7 +1037,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_Entity"
|
||||
},
|
||||
@@ -1040,6 +1060,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1049,7 +1073,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_LabelDetectionConfig",
|
||||
"properties": {
|
||||
"frameConfidenceThreshold": {
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nframe-level detection. If not set, it is set to 0.4 by default. The valid\nrange for this threshold is [0.1, 0.9]. Any value set outside of this\nrange will be clipped.\nNote: for best results please follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nframe-level detection. If not set, it is set to 0.4 by default. The valid\nrange for this threshold is [0.1, 0.9]. Any value set outside of this\nrange will be clipped.\nNote: For best results, follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
@@ -1074,11 +1098,11 @@
|
||||
"type": "string"
|
||||
},
|
||||
"stationaryCamera": {
|
||||
"description": "Whether the video has been shot from a stationary (i.e. non-moving) camera.\nWhen set to true, might improve detection accuracy for moving objects.\nShould be used with `SHOT_AND_FRAME_MODE` enabled.",
|
||||
"description": "Whether the video has been shot from a stationary (i.e., non-moving)\ncamera. When set to true, might improve detection accuracy for moving\nobjects. Should be used with `SHOT_AND_FRAME_MODE` enabled.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"videoConfidenceThreshold": {
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nvideo-level and shot-level detections. If not set, it is set to 0.3 by\ndefault. The valid range for this threshold is [0.1, 0.9]. Any value set\noutside of this range will be clipped.\nNote: for best results please follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nvideo-level and shot-level detections. If not set, it's set to 0.3 by\ndefault. The valid range for this threshold is [0.1, 0.9]. Any value set\noutside of this range will be clipped.\nNote: For best results, follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
}
|
||||
@@ -1229,6 +1253,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1299,7 +1327,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_WordInfo"
|
||||
},
|
||||
@@ -1348,7 +1376,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"enableSpeakerDiarization": {
|
||||
"description": "Optional. If 'true', enables speaker detection for each recognized word in\nthe top alternative of the recognition result using a speaker_tag provided\nin the WordInfo.\nNote: When this is true, we send all the words from the beginning of the\naudio for the top alternative in every consecutive responses.\nThis is done in order to improve our speaker tags as our models learn to\nidentify the speakers in the conversation over time.",
|
||||
"description": "Optional. If 'true', enables speaker detection for each recognized word in\nthe top alternative of the recognition result using a speaker_tag provided\nin the WordInfo.\nNote: When this is true, we send all the words from the beginning of the\naudio for the top alternative in every consecutive response.\nThis is done in order to improve our speaker tags as our models learn to\nidentify the speakers in the conversation over time.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"enableWordConfidence": {
|
||||
@@ -1392,6 +1420,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1518,7 +1550,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -1552,7 +1584,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -1609,14 +1641,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation"
|
||||
},
|
||||
@@ -1784,7 +1816,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -1804,7 +1836,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -1819,7 +1851,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -1843,6 +1875,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1884,7 +1920,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_Entity"
|
||||
},
|
||||
@@ -1907,6 +1943,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2055,6 +2095,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2089,7 +2133,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_WordInfo"
|
||||
},
|
||||
@@ -2130,6 +2174,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2238,7 +2286,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -2272,7 +2320,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -2329,14 +2377,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation"
|
||||
},
|
||||
@@ -2466,7 +2514,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -2486,7 +2534,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -2501,7 +2549,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -2525,6 +2573,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2566,7 +2618,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_Entity"
|
||||
},
|
||||
@@ -2589,6 +2641,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2737,6 +2793,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2771,7 +2831,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_WordInfo"
|
||||
},
|
||||
@@ -2812,6 +2872,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2920,7 +2984,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -2954,7 +3018,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -3011,14 +3075,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation"
|
||||
},
|
||||
@@ -3167,6 +3231,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_CelebrityTrack"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3199,7 +3267,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -3219,7 +3287,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -3234,7 +3302,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -3258,6 +3326,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3309,6 +3381,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3318,7 +3394,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Entity"
|
||||
},
|
||||
@@ -3341,6 +3417,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3489,6 +3569,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3514,11 +3598,15 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_PersonDetectionAnnotation",
|
||||
"properties": {
|
||||
"tracks": {
|
||||
"description": "The trackes that a person is detected.",
|
||||
"description": "The detected tracks of a person.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3553,7 +3641,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_WordInfo"
|
||||
},
|
||||
@@ -3589,7 +3677,7 @@
|
||||
"description": "Streaming annotation results."
|
||||
},
|
||||
"annotationResultsUri": {
|
||||
"description": "Cloud Storage URI that stores annotation results of one streaming session.\nIt is a directory that can hold multiple files in JSON format.\nExample uri format:\ngs://bucket_id/object_id/cloud_project_name-session_id",
|
||||
"description": "Google Cloud Storage URI that stores annotation results of one\nstreaming session in JSON format.\nIt is the annotation_result_storage_directory\nfrom the request followed by '/cloud_project_number-session_id'.",
|
||||
"type": "string"
|
||||
},
|
||||
"error": {
|
||||
@@ -3645,6 +3733,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3753,7 +3845,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -3793,7 +3885,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -3868,14 +3960,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation"
|
||||
},
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
},
|
||||
"id": "videointelligence:v1p1beta1",
|
||||
"kind": "discovery#restDescription",
|
||||
"mtlsRootUrl": "https://videointelligence.mtls.googleapis.com/",
|
||||
"name": "videointelligence",
|
||||
"ownerDomain": "google.com",
|
||||
"ownerName": "Google",
|
||||
@@ -128,7 +127,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"revision": "20200325",
|
||||
"revision": "20191230",
|
||||
"rootUrl": "https://videointelligence.googleapis.com/",
|
||||
"schemas": {
|
||||
"GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
|
||||
@@ -159,46 +158,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_DetectedAttribute": {
|
||||
"description": "A generic detected attribute represented by name in string format.",
|
||||
"id": "GoogleCloudVideointelligenceV1_DetectedAttribute",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "Detected attribute confidence. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"description": "Text value of the detection result. For example, the value for \"HairColor\"\ncan be \"black\", \"blonde\", etc.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_DetectedLandmark": {
|
||||
"description": "A generic detected landmark represented by name in string format and a 2D\nlocation.",
|
||||
"id": "GoogleCloudVideointelligenceV1_DetectedLandmark",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "The confidence score of the detected landmark. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_NormalizedVertex",
|
||||
"description": "The 2D point of the detected landmark using the normalized image\ncoordindate system. The normalized coordinates have the range from 0 to 1."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_Entity": {
|
||||
"description": "Detected entity from video analysis.",
|
||||
"id": "GoogleCloudVideointelligenceV1_Entity",
|
||||
@@ -329,31 +288,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_LogoRecognitionAnnotation": {
|
||||
"description": "Annotation corresponding to one detected, tracked and recognized logo class.",
|
||||
"id": "GoogleCloudVideointelligenceV1_LogoRecognitionAnnotation",
|
||||
"properties": {
|
||||
"entity": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_Entity",
|
||||
"description": "Entity category information to specify the logo class that all the logo\ntracks within this LogoRecognitionAnnotation are recognized as."
|
||||
},
|
||||
"segments": {
|
||||
"description": "All video segments where the recognized logo appears. There might be\nmultiple instances of the same logo class appearing in one VideoSegment.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_VideoSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"tracks": {
|
||||
"description": "All logo tracks where the recognized logo appears. Each track corresponds\nto one logo instance appearing in consecutive frames.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_NormalizedBoundingBox": {
|
||||
"description": "Normalized bounding box.\nThe normalized vertex coordinates are relative to the original image.\nRange: [0, 1].",
|
||||
"id": "GoogleCloudVideointelligenceV1_NormalizedBoundingBox",
|
||||
@@ -558,66 +492,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_TimestampedObject": {
|
||||
"description": "For tracking related features.\nAn object at time_offset with attributes, and located with\nnormalized_bounding_box.",
|
||||
"id": "GoogleCloudVideointelligenceV1_TimestampedObject",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. The attributes of the object in the bounding box.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"landmarks": {
|
||||
"description": "Optional. The detected landmarks.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_DetectedLandmark"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"normalizedBoundingBox": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_NormalizedBoundingBox",
|
||||
"description": "Normalized Bounding box in a frame, where the object is located."
|
||||
},
|
||||
"timeOffset": {
|
||||
"description": "Time-offset, relative to the beginning of the video,\ncorresponding to the video frame for this object.",
|
||||
"format": "google-duration",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_Track": {
|
||||
"description": "A track of an object instance.",
|
||||
"id": "GoogleCloudVideointelligenceV1_Track",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. Attributes in the track level.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"confidence": {
|
||||
"description": "Optional. The confidence score of the tracked object.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_VideoSegment",
|
||||
"description": "Video segment of a track."
|
||||
},
|
||||
"timestampedObjects": {
|
||||
"description": "The object with timestamp and attributes per frame in the track.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_TimestampedObject"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_VideoAnnotationProgress": {
|
||||
"description": "Annotation progress for a single video.",
|
||||
"id": "GoogleCloudVideointelligenceV1_VideoAnnotationProgress",
|
||||
@@ -631,8 +505,7 @@
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION"
|
||||
"OBJECT_TRACKING"
|
||||
],
|
||||
"enumDescriptions": [
|
||||
"Unspecified.",
|
||||
@@ -641,13 +514,12 @@
|
||||
"Explicit content detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition."
|
||||
"Object detection and tracking."
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"progressPercent": {
|
||||
@@ -692,16 +564,9 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"logoRecognitionAnnotations": {
|
||||
"description": "Annotations for list of logos detected, tracked and recognized in video.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LogoRecognitionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"objectAnnotations": {
|
||||
"description": "Annotations for list of objects detected and tracked in video.",
|
||||
"items": {
|
||||
@@ -841,46 +706,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_DetectedAttribute": {
|
||||
"description": "A generic detected attribute represented by name in string format.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_DetectedAttribute",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "Detected attribute confidence. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"description": "Text value of the detection result. For example, the value for \"HairColor\"\ncan be \"black\", \"blonde\", etc.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_DetectedLandmark": {
|
||||
"description": "A generic detected landmark represented by name in string format and a 2D\nlocation.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_DetectedLandmark",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "The confidence score of the detected landmark. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_NormalizedVertex",
|
||||
"description": "The 2D point of the detected landmark using the normalized image\ncoordindate system. The normalized coordinates have the range from 0 to 1."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_Entity": {
|
||||
"description": "Detected entity from video analysis.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_Entity",
|
||||
@@ -1011,31 +836,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_LogoRecognitionAnnotation": {
|
||||
"description": "Annotation corresponding to one detected, tracked and recognized logo class.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_LogoRecognitionAnnotation",
|
||||
"properties": {
|
||||
"entity": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_Entity",
|
||||
"description": "Entity category information to specify the logo class that all the logo\ntracks within this LogoRecognitionAnnotation are recognized as."
|
||||
},
|
||||
"segments": {
|
||||
"description": "All video segments where the recognized logo appears. There might be\nmultiple instances of the same logo class appearing in one VideoSegment.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_VideoSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"tracks": {
|
||||
"description": "All logo tracks where the recognized logo appears. Each track corresponds\nto one logo instance appearing in consecutive frames.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox": {
|
||||
"description": "Normalized bounding box.\nThe normalized vertex coordinates are relative to the original image.\nRange: [0, 1].",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox",
|
||||
@@ -1240,66 +1040,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_TimestampedObject": {
|
||||
"description": "For tracking related features.\nAn object at time_offset with attributes, and located with\nnormalized_bounding_box.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_TimestampedObject",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. The attributes of the object in the bounding box.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"landmarks": {
|
||||
"description": "Optional. The detected landmarks.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_DetectedLandmark"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"normalizedBoundingBox": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox",
|
||||
"description": "Normalized Bounding box in a frame, where the object is located."
|
||||
},
|
||||
"timeOffset": {
|
||||
"description": "Time-offset, relative to the beginning of the video,\ncorresponding to the video frame for this object.",
|
||||
"format": "google-duration",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_Track": {
|
||||
"description": "A track of an object instance.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_Track",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. Attributes in the track level.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"confidence": {
|
||||
"description": "Optional. The confidence score of the tracked object.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_VideoSegment",
|
||||
"description": "Video segment of a track."
|
||||
},
|
||||
"timestampedObjects": {
|
||||
"description": "The object with timestamp and attributes per frame in the track.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_TimestampedObject"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress": {
|
||||
"description": "Annotation progress for a single video.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress",
|
||||
@@ -1313,8 +1053,7 @@
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION"
|
||||
"OBJECT_TRACKING"
|
||||
],
|
||||
"enumDescriptions": [
|
||||
"Unspecified.",
|
||||
@@ -1323,13 +1062,12 @@
|
||||
"Explicit content detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition."
|
||||
"Object detection and tracking."
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"progressPercent": {
|
||||
@@ -1374,16 +1112,9 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"logoRecognitionAnnotations": {
|
||||
"description": "Annotations for list of logos detected, tracked and recognized in video.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LogoRecognitionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"objectAnnotations": {
|
||||
"description": "Annotations for list of objects detected and tracked in video.",
|
||||
"items": {
|
||||
@@ -1522,8 +1253,7 @@
|
||||
"Explicit content detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition."
|
||||
"Object detection and tracking."
|
||||
],
|
||||
"items": {
|
||||
"enum": [
|
||||
@@ -1533,8 +1263,7 @@
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION"
|
||||
"OBJECT_TRACKING"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1546,7 +1275,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Input video location. Currently, only\n[Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).\nA video URI may include wildcards in `object-id`, and thus identify\nmultiple videos. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` should be unset.",
|
||||
"description": "Input video location. Currently, only\n[Google Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](/storage/docs/reference-uris).\nA video URI may include wildcards in `object-id`, and thus identify\nmultiple videos. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` should be unset.",
|
||||
"type": "string"
|
||||
},
|
||||
"locationId": {
|
||||
@@ -1554,7 +1283,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"outputUri": {
|
||||
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).",
|
||||
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Google Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](/storage/docs/reference-uris).",
|
||||
"type": "string"
|
||||
},
|
||||
"videoContext": {
|
||||
@@ -1578,46 +1307,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_DetectedAttribute": {
|
||||
"description": "A generic detected attribute represented by name in string format.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_DetectedAttribute",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "Detected attribute confidence. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"description": "Text value of the detection result. For example, the value for \"HairColor\"\ncan be \"black\", \"blonde\", etc.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_DetectedLandmark": {
|
||||
"description": "A generic detected landmark represented by name in string format and a 2D\nlocation.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_DetectedLandmark",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "The confidence score of the detected landmark. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex",
|
||||
"description": "The 2D point of the detected landmark using the normalized image\ncoordindate system. The normalized coordinates have the range from 0 to 1."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_Entity": {
|
||||
"description": "Detected entity from video analysis.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_Entity",
|
||||
@@ -1800,31 +1489,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_LogoRecognitionAnnotation": {
|
||||
"description": "Annotation corresponding to one detected, tracked and recognized logo class.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_LogoRecognitionAnnotation",
|
||||
"properties": {
|
||||
"entity": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_Entity",
|
||||
"description": "Entity category information to specify the logo class that all the logo\ntracks within this LogoRecognitionAnnotation are recognized as."
|
||||
},
|
||||
"segments": {
|
||||
"description": "All video segments where the recognized logo appears. There might be\nmultiple instances of the same logo class appearing in one VideoSegment.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_VideoSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"tracks": {
|
||||
"description": "All logo tracks where the recognized logo appears. Each track corresponds\nto one logo instance appearing in consecutive frames.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox": {
|
||||
"description": "Normalized bounding box.\nThe normalized vertex coordinates are relative to the original image.\nRange: [0, 1].",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox",
|
||||
@@ -2135,66 +1799,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_TimestampedObject": {
|
||||
"description": "For tracking related features.\nAn object at time_offset with attributes, and located with\nnormalized_bounding_box.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_TimestampedObject",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. The attributes of the object in the bounding box.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"landmarks": {
|
||||
"description": "Optional. The detected landmarks.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_DetectedLandmark"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"normalizedBoundingBox": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox",
|
||||
"description": "Normalized Bounding box in a frame, where the object is located."
|
||||
},
|
||||
"timeOffset": {
|
||||
"description": "Time-offset, relative to the beginning of the video,\ncorresponding to the video frame for this object.",
|
||||
"format": "google-duration",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_Track": {
|
||||
"description": "A track of an object instance.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_Track",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. Attributes in the track level.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"confidence": {
|
||||
"description": "Optional. The confidence score of the tracked object.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_VideoSegment",
|
||||
"description": "Video segment of a track."
|
||||
},
|
||||
"timestampedObjects": {
|
||||
"description": "The object with timestamp and attributes per frame in the track.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_TimestampedObject"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress": {
|
||||
"description": "Annotation progress for a single video.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress",
|
||||
@@ -2208,8 +1812,7 @@
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION"
|
||||
"OBJECT_TRACKING"
|
||||
],
|
||||
"enumDescriptions": [
|
||||
"Unspecified.",
|
||||
@@ -2218,13 +1821,12 @@
|
||||
"Explicit content detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition."
|
||||
"Object detection and tracking."
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"progressPercent": {
|
||||
@@ -2269,16 +1871,9 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"logoRecognitionAnnotations": {
|
||||
"description": "Annotations for list of logos detected, tracked and recognized in video.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LogoRecognitionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"objectAnnotations": {
|
||||
"description": "Annotations for list of objects detected and tracked in video.",
|
||||
"items": {
|
||||
@@ -2456,46 +2051,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_DetectedAttribute": {
|
||||
"description": "A generic detected attribute represented by name in string format.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_DetectedAttribute",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "Detected attribute confidence. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"description": "Text value of the detection result. For example, the value for \"HairColor\"\ncan be \"black\", \"blonde\", etc.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_DetectedLandmark": {
|
||||
"description": "A generic detected landmark represented by name in string format and a 2D\nlocation.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_DetectedLandmark",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "The confidence score of the detected landmark. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_NormalizedVertex",
|
||||
"description": "The 2D point of the detected landmark using the normalized image\ncoordindate system. The normalized coordinates have the range from 0 to 1."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_Entity": {
|
||||
"description": "Detected entity from video analysis.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_Entity",
|
||||
@@ -2626,31 +2181,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_LogoRecognitionAnnotation": {
|
||||
"description": "Annotation corresponding to one detected, tracked and recognized logo class.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_LogoRecognitionAnnotation",
|
||||
"properties": {
|
||||
"entity": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_Entity",
|
||||
"description": "Entity category information to specify the logo class that all the logo\ntracks within this LogoRecognitionAnnotation are recognized as."
|
||||
},
|
||||
"segments": {
|
||||
"description": "All video segments where the recognized logo appears. There might be\nmultiple instances of the same logo class appearing in one VideoSegment.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"tracks": {
|
||||
"description": "All logo tracks where the recognized logo appears. Each track corresponds\nto one logo instance appearing in consecutive frames.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_NormalizedBoundingBox": {
|
||||
"description": "Normalized bounding box.\nThe normalized vertex coordinates are relative to the original image.\nRange: [0, 1].",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_NormalizedBoundingBox",
|
||||
@@ -2855,66 +2385,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_TimestampedObject": {
|
||||
"description": "For tracking related features.\nAn object at time_offset with attributes, and located with\nnormalized_bounding_box.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_TimestampedObject",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. The attributes of the object in the bounding box.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"landmarks": {
|
||||
"description": "Optional. The detected landmarks.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_DetectedLandmark"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"normalizedBoundingBox": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_NormalizedBoundingBox",
|
||||
"description": "Normalized Bounding box in a frame, where the object is located."
|
||||
},
|
||||
"timeOffset": {
|
||||
"description": "Time-offset, relative to the beginning of the video,\ncorresponding to the video frame for this object.",
|
||||
"format": "google-duration",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_Track": {
|
||||
"description": "A track of an object instance.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_Track",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. Attributes in the track level.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"confidence": {
|
||||
"description": "Optional. The confidence score of the tracked object.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
|
||||
"description": "Video segment of a track."
|
||||
},
|
||||
"timestampedObjects": {
|
||||
"description": "The object with timestamp and attributes per frame in the track.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_TimestampedObject"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_VideoAnnotationProgress": {
|
||||
"description": "Annotation progress for a single video.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_VideoAnnotationProgress",
|
||||
@@ -2928,8 +2398,7 @@
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION"
|
||||
"OBJECT_TRACKING"
|
||||
],
|
||||
"enumDescriptions": [
|
||||
"Unspecified.",
|
||||
@@ -2938,13 +2407,12 @@
|
||||
"Explicit content detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition."
|
||||
"Object detection and tracking."
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"progressPercent": {
|
||||
@@ -2989,16 +2457,9 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"logoRecognitionAnnotations": {
|
||||
"description": "Annotations for list of logos detected, tracked and recognized in video.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LogoRecognitionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"objectAnnotations": {
|
||||
"description": "Annotations for list of objects detected and tracked in video.",
|
||||
"items": {
|
||||
@@ -3209,26 +2670,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_DetectedLandmark": {
|
||||
"description": "A generic detected landmark represented by name in string format and a 2D\nlocation.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_DetectedLandmark",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "The confidence score of the detected landmark. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_NormalizedVertex",
|
||||
"description": "The 2D point of the detected landmark using the normalized image\ncoordindate system. The normalized coordinates have the range from 0 to 1."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_Entity": {
|
||||
"description": "Detected entity from video analysis.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_Entity",
|
||||
@@ -3294,25 +2735,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_FaceDetectionAnnotation": {
|
||||
"description": "Face detection annotation.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_FaceDetectionAnnotation",
|
||||
"properties": {
|
||||
"thumbnail": {
|
||||
"description": "The thumbnail of a person's face.",
|
||||
"format": "byte",
|
||||
"type": "string"
|
||||
},
|
||||
"tracks": {
|
||||
"description": "The face tracks with attributes.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation": {
|
||||
"description": "Label annotation.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation",
|
||||
@@ -3509,20 +2931,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_PersonDetectionAnnotation": {
|
||||
"description": "Person detection annotation per video.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_PersonDetectionAnnotation",
|
||||
"properties": {
|
||||
"tracks": {
|
||||
"description": "The trackes that a person is detected.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_RecognizedCelebrity": {
|
||||
"description": "The recognized celebrity with confidence score.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_RecognizedCelebrity",
|
||||
@@ -3589,7 +2997,7 @@
|
||||
"description": "Streaming annotation results."
|
||||
},
|
||||
"annotationResultsUri": {
|
||||
"description": "Cloud Storage URI that stores annotation results of one streaming session.\nIt is a directory that can hold multiple files in JSON format.\nExample uri format:\ngs://bucket_id/object_id/cloud_project_name-session_id",
|
||||
"description": "GCS URI that stores annotation results of one streaming session.\nIt is a directory that can hold multiple files in JSON format.\nExample uri format:\ngs://bucket_id/object_id/cloud_project_name-session_id",
|
||||
"type": "string"
|
||||
},
|
||||
"error": {
|
||||
@@ -3699,13 +3107,6 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"landmarks": {
|
||||
"description": "Optional. The detected landmarks.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_DetectedLandmark"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"normalizedBoundingBox": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_NormalizedBoundingBox",
|
||||
"description": "Normalized Bounding box in a frame, where the object is located."
|
||||
@@ -3759,31 +3160,27 @@
|
||||
"LABEL_DETECTION",
|
||||
"SHOT_CHANGE_DETECTION",
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"FACE_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION",
|
||||
"CELEBRITY_RECOGNITION",
|
||||
"PERSON_DETECTION"
|
||||
"CELEBRITY_RECOGNITION"
|
||||
],
|
||||
"enumDescriptions": [
|
||||
"Unspecified.",
|
||||
"Label detection. Detect objects, such as dog or flower.",
|
||||
"Shot change detection.",
|
||||
"Explicit content detection.",
|
||||
"Human face detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition.",
|
||||
"Celebrity recognition.",
|
||||
"Person detection."
|
||||
"Celebrity recognition."
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"progressPercent": {
|
||||
@@ -3824,13 +3221,6 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_ExplicitContentAnnotation",
|
||||
"description": "Explicit content annotation."
|
||||
},
|
||||
"faceDetectionAnnotations": {
|
||||
"description": "Face detection annotations.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_FaceDetectionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"frameLabelAnnotations": {
|
||||
"description": "Label annotations on frame level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
@@ -3839,7 +3229,7 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"logoRecognitionAnnotations": {
|
||||
@@ -3856,13 +3246,6 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"personDetectionAnnotations": {
|
||||
"description": "Person detection annotations.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_PersonDetectionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_VideoSegment",
|
||||
"description": "Video segment on which the annotation is run."
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
},
|
||||
"id": "videointelligence:v1p2beta1",
|
||||
"kind": "discovery#restDescription",
|
||||
"mtlsRootUrl": "https://videointelligence.mtls.googleapis.com/",
|
||||
"name": "videointelligence",
|
||||
"ownerDomain": "google.com",
|
||||
"ownerName": "Google",
|
||||
@@ -128,7 +127,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"revision": "20200325",
|
||||
"revision": "20191230",
|
||||
"rootUrl": "https://videointelligence.googleapis.com/",
|
||||
"schemas": {
|
||||
"GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
|
||||
@@ -159,46 +158,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_DetectedAttribute": {
|
||||
"description": "A generic detected attribute represented by name in string format.",
|
||||
"id": "GoogleCloudVideointelligenceV1_DetectedAttribute",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "Detected attribute confidence. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"description": "Text value of the detection result. For example, the value for \"HairColor\"\ncan be \"black\", \"blonde\", etc.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_DetectedLandmark": {
|
||||
"description": "A generic detected landmark represented by name in string format and a 2D\nlocation.",
|
||||
"id": "GoogleCloudVideointelligenceV1_DetectedLandmark",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "The confidence score of the detected landmark. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_NormalizedVertex",
|
||||
"description": "The 2D point of the detected landmark using the normalized image\ncoordindate system. The normalized coordinates have the range from 0 to 1."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_Entity": {
|
||||
"description": "Detected entity from video analysis.",
|
||||
"id": "GoogleCloudVideointelligenceV1_Entity",
|
||||
@@ -329,31 +288,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_LogoRecognitionAnnotation": {
|
||||
"description": "Annotation corresponding to one detected, tracked and recognized logo class.",
|
||||
"id": "GoogleCloudVideointelligenceV1_LogoRecognitionAnnotation",
|
||||
"properties": {
|
||||
"entity": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_Entity",
|
||||
"description": "Entity category information to specify the logo class that all the logo\ntracks within this LogoRecognitionAnnotation are recognized as."
|
||||
},
|
||||
"segments": {
|
||||
"description": "All video segments where the recognized logo appears. There might be\nmultiple instances of the same logo class appearing in one VideoSegment.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_VideoSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"tracks": {
|
||||
"description": "All logo tracks where the recognized logo appears. Each track corresponds\nto one logo instance appearing in consecutive frames.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_NormalizedBoundingBox": {
|
||||
"description": "Normalized bounding box.\nThe normalized vertex coordinates are relative to the original image.\nRange: [0, 1].",
|
||||
"id": "GoogleCloudVideointelligenceV1_NormalizedBoundingBox",
|
||||
@@ -558,66 +492,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_TimestampedObject": {
|
||||
"description": "For tracking related features.\nAn object at time_offset with attributes, and located with\nnormalized_bounding_box.",
|
||||
"id": "GoogleCloudVideointelligenceV1_TimestampedObject",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. The attributes of the object in the bounding box.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"landmarks": {
|
||||
"description": "Optional. The detected landmarks.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_DetectedLandmark"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"normalizedBoundingBox": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_NormalizedBoundingBox",
|
||||
"description": "Normalized Bounding box in a frame, where the object is located."
|
||||
},
|
||||
"timeOffset": {
|
||||
"description": "Time-offset, relative to the beginning of the video,\ncorresponding to the video frame for this object.",
|
||||
"format": "google-duration",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_Track": {
|
||||
"description": "A track of an object instance.",
|
||||
"id": "GoogleCloudVideointelligenceV1_Track",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. Attributes in the track level.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"confidence": {
|
||||
"description": "Optional. The confidence score of the tracked object.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_VideoSegment",
|
||||
"description": "Video segment of a track."
|
||||
},
|
||||
"timestampedObjects": {
|
||||
"description": "The object with timestamp and attributes per frame in the track.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_TimestampedObject"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1_VideoAnnotationProgress": {
|
||||
"description": "Annotation progress for a single video.",
|
||||
"id": "GoogleCloudVideointelligenceV1_VideoAnnotationProgress",
|
||||
@@ -631,8 +505,7 @@
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION"
|
||||
"OBJECT_TRACKING"
|
||||
],
|
||||
"enumDescriptions": [
|
||||
"Unspecified.",
|
||||
@@ -641,13 +514,12 @@
|
||||
"Explicit content detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition."
|
||||
"Object detection and tracking."
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"progressPercent": {
|
||||
@@ -692,16 +564,9 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"logoRecognitionAnnotations": {
|
||||
"description": "Annotations for list of logos detected, tracked and recognized in video.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LogoRecognitionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"objectAnnotations": {
|
||||
"description": "Annotations for list of objects detected and tracked in video.",
|
||||
"items": {
|
||||
@@ -841,46 +706,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_DetectedAttribute": {
|
||||
"description": "A generic detected attribute represented by name in string format.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_DetectedAttribute",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "Detected attribute confidence. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"description": "Text value of the detection result. For example, the value for \"HairColor\"\ncan be \"black\", \"blonde\", etc.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_DetectedLandmark": {
|
||||
"description": "A generic detected landmark represented by name in string format and a 2D\nlocation.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_DetectedLandmark",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "The confidence score of the detected landmark. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_NormalizedVertex",
|
||||
"description": "The 2D point of the detected landmark using the normalized image\ncoordindate system. The normalized coordinates have the range from 0 to 1."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_Entity": {
|
||||
"description": "Detected entity from video analysis.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_Entity",
|
||||
@@ -1011,31 +836,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_LogoRecognitionAnnotation": {
|
||||
"description": "Annotation corresponding to one detected, tracked and recognized logo class.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_LogoRecognitionAnnotation",
|
||||
"properties": {
|
||||
"entity": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_Entity",
|
||||
"description": "Entity category information to specify the logo class that all the logo\ntracks within this LogoRecognitionAnnotation are recognized as."
|
||||
},
|
||||
"segments": {
|
||||
"description": "All video segments where the recognized logo appears. There might be\nmultiple instances of the same logo class appearing in one VideoSegment.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_VideoSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"tracks": {
|
||||
"description": "All logo tracks where the recognized logo appears. Each track corresponds\nto one logo instance appearing in consecutive frames.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox": {
|
||||
"description": "Normalized bounding box.\nThe normalized vertex coordinates are relative to the original image.\nRange: [0, 1].",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox",
|
||||
@@ -1240,66 +1040,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_TimestampedObject": {
|
||||
"description": "For tracking related features.\nAn object at time_offset with attributes, and located with\nnormalized_bounding_box.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_TimestampedObject",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. The attributes of the object in the bounding box.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"landmarks": {
|
||||
"description": "Optional. The detected landmarks.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_DetectedLandmark"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"normalizedBoundingBox": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_NormalizedBoundingBox",
|
||||
"description": "Normalized Bounding box in a frame, where the object is located."
|
||||
},
|
||||
"timeOffset": {
|
||||
"description": "Time-offset, relative to the beginning of the video,\ncorresponding to the video frame for this object.",
|
||||
"format": "google-duration",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_Track": {
|
||||
"description": "A track of an object instance.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_Track",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. Attributes in the track level.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"confidence": {
|
||||
"description": "Optional. The confidence score of the tracked object.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_VideoSegment",
|
||||
"description": "Video segment of a track."
|
||||
},
|
||||
"timestampedObjects": {
|
||||
"description": "The object with timestamp and attributes per frame in the track.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_TimestampedObject"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress": {
|
||||
"description": "Annotation progress for a single video.",
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress",
|
||||
@@ -1313,8 +1053,7 @@
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION"
|
||||
"OBJECT_TRACKING"
|
||||
],
|
||||
"enumDescriptions": [
|
||||
"Unspecified.",
|
||||
@@ -1323,13 +1062,12 @@
|
||||
"Explicit content detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition."
|
||||
"Object detection and tracking."
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"progressPercent": {
|
||||
@@ -1374,16 +1112,9 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"logoRecognitionAnnotations": {
|
||||
"description": "Annotations for list of logos detected, tracked and recognized in video.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LogoRecognitionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"objectAnnotations": {
|
||||
"description": "Annotations for list of objects detected and tracked in video.",
|
||||
"items": {
|
||||
@@ -1523,46 +1254,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_DetectedAttribute": {
|
||||
"description": "A generic detected attribute represented by name in string format.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_DetectedAttribute",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "Detected attribute confidence. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"description": "Text value of the detection result. For example, the value for \"HairColor\"\ncan be \"black\", \"blonde\", etc.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_DetectedLandmark": {
|
||||
"description": "A generic detected landmark represented by name in string format and a 2D\nlocation.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_DetectedLandmark",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "The confidence score of the detected landmark. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_NormalizedVertex",
|
||||
"description": "The 2D point of the detected landmark using the normalized image\ncoordindate system. The normalized coordinates have the range from 0 to 1."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_Entity": {
|
||||
"description": "Detected entity from video analysis.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_Entity",
|
||||
@@ -1693,31 +1384,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_LogoRecognitionAnnotation": {
|
||||
"description": "Annotation corresponding to one detected, tracked and recognized logo class.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_LogoRecognitionAnnotation",
|
||||
"properties": {
|
||||
"entity": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_Entity",
|
||||
"description": "Entity category information to specify the logo class that all the logo\ntracks within this LogoRecognitionAnnotation are recognized as."
|
||||
},
|
||||
"segments": {
|
||||
"description": "All video segments where the recognized logo appears. There might be\nmultiple instances of the same logo class appearing in one VideoSegment.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_VideoSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"tracks": {
|
||||
"description": "All logo tracks where the recognized logo appears. Each track corresponds\nto one logo instance appearing in consecutive frames.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox": {
|
||||
"description": "Normalized bounding box.\nThe normalized vertex coordinates are relative to the original image.\nRange: [0, 1].",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox",
|
||||
@@ -1922,66 +1588,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_TimestampedObject": {
|
||||
"description": "For tracking related features.\nAn object at time_offset with attributes, and located with\nnormalized_bounding_box.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_TimestampedObject",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. The attributes of the object in the bounding box.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"landmarks": {
|
||||
"description": "Optional. The detected landmarks.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_DetectedLandmark"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"normalizedBoundingBox": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_NormalizedBoundingBox",
|
||||
"description": "Normalized Bounding box in a frame, where the object is located."
|
||||
},
|
||||
"timeOffset": {
|
||||
"description": "Time-offset, relative to the beginning of the video,\ncorresponding to the video frame for this object.",
|
||||
"format": "google-duration",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_Track": {
|
||||
"description": "A track of an object instance.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_Track",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. Attributes in the track level.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"confidence": {
|
||||
"description": "Optional. The confidence score of the tracked object.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_VideoSegment",
|
||||
"description": "Video segment of a track."
|
||||
},
|
||||
"timestampedObjects": {
|
||||
"description": "The object with timestamp and attributes per frame in the track.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_TimestampedObject"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress": {
|
||||
"description": "Annotation progress for a single video.",
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress",
|
||||
@@ -1995,8 +1601,7 @@
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION"
|
||||
"OBJECT_TRACKING"
|
||||
],
|
||||
"enumDescriptions": [
|
||||
"Unspecified.",
|
||||
@@ -2005,13 +1610,12 @@
|
||||
"Explicit content detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition."
|
||||
"Object detection and tracking."
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"progressPercent": {
|
||||
@@ -2056,16 +1660,9 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"logoRecognitionAnnotations": {
|
||||
"description": "Annotations for list of logos detected, tracked and recognized in video.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LogoRecognitionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"objectAnnotations": {
|
||||
"description": "Annotations for list of objects detected and tracked in video.",
|
||||
"items": {
|
||||
@@ -2204,8 +1801,7 @@
|
||||
"Explicit content detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition."
|
||||
"Object detection and tracking."
|
||||
],
|
||||
"items": {
|
||||
"enum": [
|
||||
@@ -2215,8 +1811,7 @@
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION"
|
||||
"OBJECT_TRACKING"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2228,7 +1823,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Input video location. Currently, only\n[Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).\nA video URI may include wildcards in `object-id`, and thus identify\nmultiple videos. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` should be unset.",
|
||||
"description": "Input video location. Currently, only\n[Google Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](/storage/docs/reference-uris).\nA video URI may include wildcards in `object-id`, and thus identify\nmultiple videos. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` should be unset.",
|
||||
"type": "string"
|
||||
},
|
||||
"locationId": {
|
||||
@@ -2236,7 +1831,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"outputUri": {
|
||||
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).",
|
||||
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Google Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](/storage/docs/reference-uris).",
|
||||
"type": "string"
|
||||
},
|
||||
"videoContext": {
|
||||
@@ -2260,46 +1855,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_DetectedAttribute": {
|
||||
"description": "A generic detected attribute represented by name in string format.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_DetectedAttribute",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "Detected attribute confidence. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"description": "Text value of the detection result. For example, the value for \"HairColor\"\ncan be \"black\", \"blonde\", etc.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_DetectedLandmark": {
|
||||
"description": "A generic detected landmark represented by name in string format and a 2D\nlocation.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_DetectedLandmark",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "The confidence score of the detected landmark. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_NormalizedVertex",
|
||||
"description": "The 2D point of the detected landmark using the normalized image\ncoordindate system. The normalized coordinates have the range from 0 to 1."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_Entity": {
|
||||
"description": "Detected entity from video analysis.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_Entity",
|
||||
@@ -2482,31 +2037,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_LogoRecognitionAnnotation": {
|
||||
"description": "Annotation corresponding to one detected, tracked and recognized logo class.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_LogoRecognitionAnnotation",
|
||||
"properties": {
|
||||
"entity": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_Entity",
|
||||
"description": "Entity category information to specify the logo class that all the logo\ntracks within this LogoRecognitionAnnotation are recognized as."
|
||||
},
|
||||
"segments": {
|
||||
"description": "All video segments where the recognized logo appears. There might be\nmultiple instances of the same logo class appearing in one VideoSegment.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"tracks": {
|
||||
"description": "All logo tracks where the recognized logo appears. Each track corresponds\nto one logo instance appearing in consecutive frames.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_NormalizedBoundingBox": {
|
||||
"description": "Normalized bounding box.\nThe normalized vertex coordinates are relative to the original image.\nRange: [0, 1].",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_NormalizedBoundingBox",
|
||||
@@ -2817,66 +2347,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_TimestampedObject": {
|
||||
"description": "For tracking related features.\nAn object at time_offset with attributes, and located with\nnormalized_bounding_box.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_TimestampedObject",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. The attributes of the object in the bounding box.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"landmarks": {
|
||||
"description": "Optional. The detected landmarks.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_DetectedLandmark"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"normalizedBoundingBox": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_NormalizedBoundingBox",
|
||||
"description": "Normalized Bounding box in a frame, where the object is located."
|
||||
},
|
||||
"timeOffset": {
|
||||
"description": "Time-offset, relative to the beginning of the video,\ncorresponding to the video frame for this object.",
|
||||
"format": "google-duration",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_Track": {
|
||||
"description": "A track of an object instance.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_Track",
|
||||
"properties": {
|
||||
"attributes": {
|
||||
"description": "Optional. Attributes in the track level.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_DetectedAttribute"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"confidence": {
|
||||
"description": "Optional. The confidence score of the tracked object.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
|
||||
"description": "Video segment of a track."
|
||||
},
|
||||
"timestampedObjects": {
|
||||
"description": "The object with timestamp and attributes per frame in the track.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_TimestampedObject"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p2beta1_VideoAnnotationProgress": {
|
||||
"description": "Annotation progress for a single video.",
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_VideoAnnotationProgress",
|
||||
@@ -2890,8 +2360,7 @@
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION"
|
||||
"OBJECT_TRACKING"
|
||||
],
|
||||
"enumDescriptions": [
|
||||
"Unspecified.",
|
||||
@@ -2900,13 +2369,12 @@
|
||||
"Explicit content detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition."
|
||||
"Object detection and tracking."
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"progressPercent": {
|
||||
@@ -2951,16 +2419,9 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"logoRecognitionAnnotations": {
|
||||
"description": "Annotations for list of logos detected, tracked and recognized in video.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LogoRecognitionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"objectAnnotations": {
|
||||
"description": "Annotations for list of objects detected and tracked in video.",
|
||||
"items": {
|
||||
@@ -3209,26 +2670,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_DetectedLandmark": {
|
||||
"description": "A generic detected landmark represented by name in string format and a 2D\nlocation.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_DetectedLandmark",
|
||||
"properties": {
|
||||
"confidence": {
|
||||
"description": "The confidence score of the detected landmark. Range [0, 1].",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_NormalizedVertex",
|
||||
"description": "The 2D point of the detected landmark using the normalized image\ncoordindate system. The normalized coordinates have the range from 0 to 1."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_Entity": {
|
||||
"description": "Detected entity from video analysis.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_Entity",
|
||||
@@ -3294,25 +2735,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_FaceDetectionAnnotation": {
|
||||
"description": "Face detection annotation.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_FaceDetectionAnnotation",
|
||||
"properties": {
|
||||
"thumbnail": {
|
||||
"description": "The thumbnail of a person's face.",
|
||||
"format": "byte",
|
||||
"type": "string"
|
||||
},
|
||||
"tracks": {
|
||||
"description": "The face tracks with attributes.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation": {
|
||||
"description": "Label annotation.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation",
|
||||
@@ -3509,20 +2931,6 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_PersonDetectionAnnotation": {
|
||||
"description": "Person detection annotation per video.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_PersonDetectionAnnotation",
|
||||
"properties": {
|
||||
"tracks": {
|
||||
"description": "The trackes that a person is detected.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"GoogleCloudVideointelligenceV1p3beta1_RecognizedCelebrity": {
|
||||
"description": "The recognized celebrity with confidence score.",
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_RecognizedCelebrity",
|
||||
@@ -3589,7 +2997,7 @@
|
||||
"description": "Streaming annotation results."
|
||||
},
|
||||
"annotationResultsUri": {
|
||||
"description": "Cloud Storage URI that stores annotation results of one streaming session.\nIt is a directory that can hold multiple files in JSON format.\nExample uri format:\ngs://bucket_id/object_id/cloud_project_name-session_id",
|
||||
"description": "GCS URI that stores annotation results of one streaming session.\nIt is a directory that can hold multiple files in JSON format.\nExample uri format:\ngs://bucket_id/object_id/cloud_project_name-session_id",
|
||||
"type": "string"
|
||||
},
|
||||
"error": {
|
||||
@@ -3699,13 +3107,6 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"landmarks": {
|
||||
"description": "Optional. The detected landmarks.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_DetectedLandmark"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"normalizedBoundingBox": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_NormalizedBoundingBox",
|
||||
"description": "Normalized Bounding box in a frame, where the object is located."
|
||||
@@ -3759,31 +3160,27 @@
|
||||
"LABEL_DETECTION",
|
||||
"SHOT_CHANGE_DETECTION",
|
||||
"EXPLICIT_CONTENT_DETECTION",
|
||||
"FACE_DETECTION",
|
||||
"SPEECH_TRANSCRIPTION",
|
||||
"TEXT_DETECTION",
|
||||
"OBJECT_TRACKING",
|
||||
"LOGO_RECOGNITION",
|
||||
"CELEBRITY_RECOGNITION",
|
||||
"PERSON_DETECTION"
|
||||
"CELEBRITY_RECOGNITION"
|
||||
],
|
||||
"enumDescriptions": [
|
||||
"Unspecified.",
|
||||
"Label detection. Detect objects, such as dog or flower.",
|
||||
"Shot change detection.",
|
||||
"Explicit content detection.",
|
||||
"Human face detection.",
|
||||
"Speech transcription.",
|
||||
"OCR text detection and tracking.",
|
||||
"Object detection and tracking.",
|
||||
"Logo detection, tracking, and recognition.",
|
||||
"Celebrity recognition.",
|
||||
"Person detection."
|
||||
"Celebrity recognition."
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"progressPercent": {
|
||||
@@ -3824,13 +3221,6 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_ExplicitContentAnnotation",
|
||||
"description": "Explicit content annotation."
|
||||
},
|
||||
"faceDetectionAnnotations": {
|
||||
"description": "Face detection annotations.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_FaceDetectionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"frameLabelAnnotations": {
|
||||
"description": "Label annotations on frame level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
@@ -3839,7 +3229,7 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Video file location in\n[Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"description": "Video file location in\n[Google Cloud Storage](https://cloud.google.com/storage/).",
|
||||
"type": "string"
|
||||
},
|
||||
"logoRecognitionAnnotations": {
|
||||
@@ -3856,13 +3246,6 @@
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"personDetectionAnnotations": {
|
||||
"description": "Person detection annotations.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_PersonDetectionAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_VideoSegment",
|
||||
"description": "Video segment on which the annotation is run."
|
||||
|
||||
@@ -128,7 +128,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"revision": "20200325",
|
||||
"revision": "20200615",
|
||||
"rootUrl": "https://videointelligence.googleapis.com/",
|
||||
"schemas": {
|
||||
"GoogleCloudVideointelligenceV1_AnnotateVideoProgress": {
|
||||
@@ -169,7 +169,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -189,7 +189,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -204,7 +204,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -228,6 +228,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -269,7 +273,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_Entity"
|
||||
},
|
||||
@@ -292,6 +296,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -440,6 +448,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -474,7 +486,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_WordInfo"
|
||||
},
|
||||
@@ -515,6 +527,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -623,7 +639,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -657,7 +673,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -714,14 +730,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1_LabelAnnotation"
|
||||
},
|
||||
@@ -851,7 +867,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -871,7 +887,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -886,7 +902,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -910,6 +926,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -951,7 +971,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_Entity"
|
||||
},
|
||||
@@ -974,6 +994,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1122,6 +1146,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1156,7 +1184,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_WordInfo"
|
||||
},
|
||||
@@ -1197,6 +1225,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1305,7 +1337,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1beta2_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -1339,7 +1371,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -1396,14 +1428,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1beta2_LabelAnnotation"
|
||||
},
|
||||
@@ -1533,7 +1565,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -1553,7 +1585,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -1568,7 +1600,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -1592,6 +1624,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1633,7 +1669,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_Entity"
|
||||
},
|
||||
@@ -1656,6 +1692,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1804,6 +1844,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1838,7 +1882,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_WordInfo"
|
||||
},
|
||||
@@ -1879,6 +1923,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -1987,7 +2035,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p1beta1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -2021,7 +2069,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -2078,14 +2126,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p1beta1_LabelAnnotation"
|
||||
},
|
||||
@@ -2215,7 +2263,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -2235,7 +2283,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -2250,7 +2298,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -2274,6 +2322,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2315,7 +2367,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_Entity"
|
||||
},
|
||||
@@ -2338,6 +2390,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2486,6 +2542,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2520,7 +2580,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_WordInfo"
|
||||
},
|
||||
@@ -2561,6 +2621,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -2669,7 +2733,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p2beta1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -2703,7 +2767,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -2760,14 +2824,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p2beta1_LabelAnnotation"
|
||||
},
|
||||
@@ -2911,20 +2975,20 @@
|
||||
"type": "array"
|
||||
},
|
||||
"inputContent": {
|
||||
"description": "The video data bytes.\nIf unset, the input video(s) should be specified via `input_uri`.\nIf set, `input_uri` should be unset.",
|
||||
"description": "The video data bytes.\nIf unset, the input video(s) should be specified via the `input_uri`.\nIf set, `input_uri` must be unset.",
|
||||
"format": "byte",
|
||||
"type": "string"
|
||||
},
|
||||
"inputUri": {
|
||||
"description": "Input video location. Currently, only\n[Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).\nA video URI may include wildcards in `object-id`, and thus identify\nmultiple videos. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` should be unset.",
|
||||
"description": "Input video location. Currently, only\n[Cloud Storage](https://cloud.google.com/storage/) URIs are\nsupported. URIs must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).\nTo identify multiple videos, a video URI may include wildcards in the\n`object-id`. Supported wildcards: '*' to match 0 or more characters;\n'?' to match 1 character. If unset, the input video should be embedded\nin the request as `input_content`. If set, `input_content` must be unset.",
|
||||
"type": "string"
|
||||
},
|
||||
"locationId": {
|
||||
"description": "Optional. Cloud region where annotation should take place. Supported cloud\nregions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region\nis specified, a region will be determined based on video file location.",
|
||||
"description": "Optional. Cloud region where annotation should take place. Supported cloud\nregions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no\nregion is specified, the region will be determined based on video file\nlocation.",
|
||||
"type": "string"
|
||||
},
|
||||
"outputUri": {
|
||||
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported, which must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).",
|
||||
"description": "Optional. Location where the output (in JSON format) should be stored.\nCurrently, only [Cloud Storage](https://cloud.google.com/storage/)\nURIs are supported. These must be specified in the following format:\n`gs://bucket-id/object-id` (other URI formats return\ngoogle.rpc.Code.INVALID_ARGUMENT). For more information, see\n[Request URIs](https://cloud.google.com/storage/docs/request-endpoints).",
|
||||
"type": "string"
|
||||
},
|
||||
"videoContext": {
|
||||
@@ -2977,6 +3041,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_CelebrityTrack"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3009,7 +3077,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.\nA full list of supported type names will be provided in the document.",
|
||||
"description": "The name of the attribute, for example, glasses, dark_glasses, mouth_open.\nA full list of supported type names will be provided in the document.",
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
@@ -3029,7 +3097,7 @@
|
||||
"type": "number"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of this landmark, i.e. left_hand, right_shoulder.",
|
||||
"description": "The name of this landmark, for example, left_hand, right_shoulder.",
|
||||
"type": "string"
|
||||
},
|
||||
"point": {
|
||||
@@ -3044,7 +3112,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_Entity",
|
||||
"properties": {
|
||||
"description": {
|
||||
"description": "Textual description, e.g. `Fixed-gear bicycle`.",
|
||||
"description": "Textual description, e.g., `Fixed-gear bicycle`.",
|
||||
"type": "string"
|
||||
},
|
||||
"entityId": {
|
||||
@@ -3068,6 +3136,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_ExplicitContentFrame"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3130,6 +3202,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3139,11 +3215,11 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_FaceDetectionConfig",
|
||||
"properties": {
|
||||
"includeAttributes": {
|
||||
"description": "Whether to enable face attributes detection, such as glasses, dark_glasses,\nmouth_open etc. Ignored if 'include_bounding_boxes' is false.",
|
||||
"description": "Whether to enable face attributes detection, such as glasses, dark_glasses,\nmouth_open etc. Ignored if 'include_bounding_boxes' is set to false.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"includeBoundingBoxes": {
|
||||
"description": "Whether bounding boxes be included in the face annotation output.",
|
||||
"description": "Whether bounding boxes are included in the face annotation output.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"model": {
|
||||
@@ -3158,7 +3234,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation",
|
||||
"properties": {
|
||||
"categoryEntities": {
|
||||
"description": "Common categories for the detected entity.\nE.g. when the label is `Terrier` the category is likely `dog`. And in some\ncases there might be more than one categories e.g. `Terrier` could also be\na `pet`.",
|
||||
"description": "Common categories for the detected entity.\nFor example, when the label is `Terrier`, the category is likely `dog`. And\nin some cases there might be more than one categories e.g., `Terrier` could\nalso be a `pet`.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Entity"
|
||||
},
|
||||
@@ -3181,6 +3257,10 @@
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelSegment"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3190,7 +3270,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_LabelDetectionConfig",
|
||||
"properties": {
|
||||
"frameConfidenceThreshold": {
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nframe-level detection. If not set, it is set to 0.4 by default. The valid\nrange for this threshold is [0.1, 0.9]. Any value set outside of this\nrange will be clipped.\nNote: for best results please follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nframe-level detection. If not set, it is set to 0.4 by default. The valid\nrange for this threshold is [0.1, 0.9]. Any value set outside of this\nrange will be clipped.\nNote: For best results, follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
},
|
||||
@@ -3215,11 +3295,11 @@
|
||||
"type": "string"
|
||||
},
|
||||
"stationaryCamera": {
|
||||
"description": "Whether the video has been shot from a stationary (i.e. non-moving) camera.\nWhen set to true, might improve detection accuracy for moving objects.\nShould be used with `SHOT_AND_FRAME_MODE` enabled.",
|
||||
"description": "Whether the video has been shot from a stationary (i.e., non-moving)\ncamera. When set to true, might improve detection accuracy for moving\nobjects. Should be used with `SHOT_AND_FRAME_MODE` enabled.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"videoConfidenceThreshold": {
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nvideo-level and shot-level detections. If not set, it is set to 0.3 by\ndefault. The valid range for this threshold is [0.1, 0.9]. Any value set\noutside of this range will be clipped.\nNote: for best results please follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"description": "The confidence threshold we perform filtering on the labels from\nvideo-level and shot-level detections. If not set, it's set to 0.3 by\ndefault. The valid range for this threshold is [0.1, 0.9]. Any value set\noutside of this range will be clipped.\nNote: For best results, follow the default threshold. We will update\nthe default threshold everytime when we release a new model.",
|
||||
"format": "float",
|
||||
"type": "number"
|
||||
}
|
||||
@@ -3370,6 +3450,10 @@
|
||||
"description": "Streaming mode ONLY.\nIn streaming mode, we do not know the end time of a tracked object\nbefore it is completed. Hence, there is no VideoSegment info returned.\nInstead, we provide a unique identifiable integer track_id so that\nthe customers can correlate the results of the ongoing\nObjectTrackAnnotation of the same track_id over time.",
|
||||
"format": "int64",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3406,11 +3490,15 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_PersonDetectionAnnotation",
|
||||
"properties": {
|
||||
"tracks": {
|
||||
"description": "The trackes that a person is detected.",
|
||||
"description": "The detected tracks of a person.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_Track"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3420,15 +3508,15 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_PersonDetectionConfig",
|
||||
"properties": {
|
||||
"includeAttributes": {
|
||||
"description": "Whether to enable person attributes detection, such as cloth color (black,\nblue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair\ncolor (black, blonde, etc), hair length (long, short, bald), etc.\nIgnored if 'include_bounding_boxes' is false.",
|
||||
"description": "Whether to enable person attributes detection, such as cloth color (black,\nblue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,\netc.\nIgnored if 'include_bounding_boxes' is set to false.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"includeBoundingBoxes": {
|
||||
"description": "Whether bounding boxes be included in the person detection annotation\noutput.",
|
||||
"description": "Whether bounding boxes are included in the person detection annotation\noutput.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"includePoseLandmarks": {
|
||||
"description": "Whether to enable pose landmarks detection. Ignored if\n'include_bounding_boxes' is false.",
|
||||
"description": "Whether to enable pose landmarks detection. Ignored if\n'include_bounding_boxes' is set to false.",
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
@@ -3489,7 +3577,7 @@
|
||||
"type": "string"
|
||||
},
|
||||
"words": {
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is true, you will see all the words\nfrom the beginning of the audio.",
|
||||
"description": "Output only. A list of word-specific information for each recognized word.\nNote: When `enable_speaker_diarization` is set to true, you will see all\nthe words from the beginning of the audio.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_WordInfo"
|
||||
},
|
||||
@@ -3538,7 +3626,7 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"enableSpeakerDiarization": {
|
||||
"description": "Optional. If 'true', enables speaker detection for each recognized word in\nthe top alternative of the recognition result using a speaker_tag provided\nin the WordInfo.\nNote: When this is true, we send all the words from the beginning of the\naudio for the top alternative in every consecutive responses.\nThis is done in order to improve our speaker tags as our models learn to\nidentify the speakers in the conversation over time.",
|
||||
"description": "Optional. If 'true', enables speaker detection for each recognized word in\nthe top alternative of the recognition result using a speaker_tag provided\nin the WordInfo.\nNote: When this is true, we send all the words from the beginning of the\naudio for the top alternative in every consecutive response.\nThis is done in order to improve our speaker tags as our models learn to\nidentify the speakers in the conversation over time.",
|
||||
"type": "boolean"
|
||||
},
|
||||
"enableWordConfidence": {
|
||||
@@ -3577,7 +3665,7 @@
|
||||
"description": "Streaming annotation results."
|
||||
},
|
||||
"annotationResultsUri": {
|
||||
"description": "Cloud Storage URI that stores annotation results of one streaming session.\nIt is a directory that can hold multiple files in JSON format.\nExample uri format:\ngs://bucket_id/object_id/cloud_project_name-session_id",
|
||||
"description": "Google Cloud Storage URI that stores annotation results of one\nstreaming session in JSON format.\nIt is the annotation_result_storage_directory\nfrom the request followed by '/cloud_project_number-session_id'.",
|
||||
"type": "string"
|
||||
},
|
||||
"error": {
|
||||
@@ -3633,6 +3721,10 @@
|
||||
"text": {
|
||||
"description": "The detected text.",
|
||||
"type": "string"
|
||||
},
|
||||
"version": {
|
||||
"description": "Feature version.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
@@ -3759,7 +3851,7 @@
|
||||
"id": "GoogleCloudVideointelligenceV1p3beta1_VideoAnnotationProgress",
|
||||
"properties": {
|
||||
"feature": {
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none features.",
|
||||
"description": "Specifies which feature is being tracked if the request contains more than\none feature.",
|
||||
"enum": [
|
||||
"FEATURE_UNSPECIFIED",
|
||||
"LABEL_DETECTION",
|
||||
@@ -3799,7 +3891,7 @@
|
||||
},
|
||||
"segment": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_VideoSegment",
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segments."
|
||||
"description": "Specifies which segment is being tracked if the request contains more than\none segment."
|
||||
},
|
||||
"startTime": {
|
||||
"description": "Time when the request was received.",
|
||||
@@ -3874,14 +3966,14 @@
|
||||
"description": "Video segment on which the annotation is run."
|
||||
},
|
||||
"segmentLabelAnnotations": {
|
||||
"description": "Topical label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"description": "Topical label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"segmentPresenceLabelAnnotations": {
|
||||
"description": "Presence label annotations on video level or user specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"description": "Presence label annotations on video level or user-specified segment level.\nThere is exactly one element for each unique label. Compared to the\nexisting topical `segment_label_annotations`, this field presents more\nfine-grained, segment-level labels detected in video content and is made\navailable only when the client sets `LabelDetectionConfig.model` to\n\"builtin/latest\" in the request.",
|
||||
"items": {
|
||||
"$ref": "GoogleCloudVideointelligenceV1p3beta1_LabelAnnotation"
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user