mirror of
https://github.com/OMGeeky/google-apis-rs.git
synced 2026-01-19 09:50:46 +01:00
chore(api-update): to latest
Using `make update-json`, all json descriptions have been update. Quite interesting to see that there are plenty of new ones which are giving 404 when queried. An actual bug, or something I should look into ?
This commit is contained in:
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"kind": "discovery#restDescription",
|
||||
"etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/-PCQObxeMXLhOncDOKreF3FN9IU\"",
|
||||
"etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/947N7-PR3f8n42WFCweiQxxRQdA\"",
|
||||
"discoveryVersion": "v1",
|
||||
"id": "dataflow:v1b3",
|
||||
"name": "dataflow",
|
||||
"version": "v1b3",
|
||||
"revision": "20160331",
|
||||
"revision": "20160908",
|
||||
"title": "Google Dataflow API",
|
||||
"description": "Develops and executes data processing patterns like ETL, batch computation, and continuous computation.",
|
||||
"ownerDomain": "google.com",
|
||||
@@ -113,6 +113,56 @@
|
||||
}
|
||||
},
|
||||
"schemas": {
|
||||
"GetDebugConfigRequest": {
|
||||
"id": "GetDebugConfigRequest",
|
||||
"type": "object",
|
||||
"description": "Request to get updated debug configuration for component.",
|
||||
"properties": {
|
||||
"workerId": {
|
||||
"type": "string",
|
||||
"description": "The worker id, i.e., VM hostname."
|
||||
},
|
||||
"componentId": {
|
||||
"type": "string",
|
||||
"description": "The internal component id for which debug configuration is requested."
|
||||
}
|
||||
}
|
||||
},
|
||||
"GetDebugConfigResponse": {
|
||||
"id": "GetDebugConfigResponse",
|
||||
"type": "object",
|
||||
"description": "Response to a get debug configuration request.",
|
||||
"properties": {
|
||||
"config": {
|
||||
"type": "string",
|
||||
"description": "The encoded debug configuration for the requested component."
|
||||
}
|
||||
}
|
||||
},
|
||||
"SendDebugCaptureRequest": {
|
||||
"id": "SendDebugCaptureRequest",
|
||||
"type": "object",
|
||||
"description": "Request to send encoded debug information.",
|
||||
"properties": {
|
||||
"workerId": {
|
||||
"type": "string",
|
||||
"description": "The worker id, i.e., VM hostname."
|
||||
},
|
||||
"componentId": {
|
||||
"type": "string",
|
||||
"description": "The internal component id for which debug information is sent."
|
||||
},
|
||||
"data": {
|
||||
"type": "string",
|
||||
"description": "The encoded debug information."
|
||||
}
|
||||
}
|
||||
},
|
||||
"SendDebugCaptureResponse": {
|
||||
"id": "SendDebugCaptureResponse",
|
||||
"type": "object",
|
||||
"description": "Response to a send capture request. nothing"
|
||||
},
|
||||
"Job": {
|
||||
"id": "Job",
|
||||
"type": "object",
|
||||
@@ -217,6 +267,13 @@
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"labels": {
|
||||
"type": "object",
|
||||
"description": "User-defined labels for this job. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: * Keys must conform to regexp: \\p{Ll}\\p{Lo}{0,62} * Values must conform to regexp: [\\p{Ll}\\p{Lo}\\p{N}_-]{0,63} * Both keys and values are additionally constrained to be \u003c= 128 bytes in size.",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -280,7 +337,7 @@
|
||||
"description": "Experimental settings.",
|
||||
"additionalProperties": {
|
||||
"type": "any",
|
||||
"description": "Properties of the object. Contains field @ype with type URL."
|
||||
"description": "Properties of the object. Contains field @type with type URL."
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -378,7 +435,7 @@
|
||||
"description": "Extra arguments for this worker pool.",
|
||||
"additionalProperties": {
|
||||
"type": "any",
|
||||
"description": "Properties of the object. Contains field @ype with type URL."
|
||||
"description": "Properties of the object. Contains field @type with type URL."
|
||||
}
|
||||
},
|
||||
"network": {
|
||||
@@ -387,7 +444,7 @@
|
||||
},
|
||||
"subnetwork": {
|
||||
"type": "string",
|
||||
"description": "Subnetwork to which VMs will be assigned, if desired. Expected to be of the form \"zones/ZONE/subnetworks/SUBNETWORK\"."
|
||||
"description": "Subnetwork to which VMs will be assigned, if desired. Expected to be of the form \"regions/REGION/subnetworks/SUBNETWORK\"."
|
||||
},
|
||||
"workerHarnessContainerImage": {
|
||||
"type": "string",
|
||||
@@ -397,6 +454,15 @@
|
||||
"type": "integer",
|
||||
"description": "The number of threads per worker harness. If empty or unspecified, the service will choose a number of threads (according to the number of cores on the selected machine type for batch, or 1 by convention for streaming).",
|
||||
"format": "int32"
|
||||
},
|
||||
"ipConfiguration": {
|
||||
"type": "string",
|
||||
"description": "Configuration for VM IPs.",
|
||||
"enum": [
|
||||
"WORKER_IP_UNSPECIFIED",
|
||||
"WORKER_IP_PUBLIC",
|
||||
"WORKER_IP_PRIVATE"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -774,6 +840,24 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"CreateJobFromTemplateRequest": {
|
||||
"id": "CreateJobFromTemplateRequest",
|
||||
"type": "object",
|
||||
"description": "Request to create a Dataflow job.",
|
||||
"properties": {
|
||||
"gcsPath": {
|
||||
"type": "string",
|
||||
"description": "A path to the serialized JSON representation of the job."
|
||||
},
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"description": "Dynamic parameterization of the job's runtime environment.",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ReportWorkItemStatusRequest": {
|
||||
"id": "ReportWorkItemStatusRequest",
|
||||
"type": "object",
|
||||
@@ -825,9 +909,16 @@
|
||||
"$ref": "Status"
|
||||
}
|
||||
},
|
||||
"counterUpdates": {
|
||||
"type": "array",
|
||||
"description": "Worker output counters for this WorkItem.",
|
||||
"items": {
|
||||
"$ref": "CounterUpdate"
|
||||
}
|
||||
},
|
||||
"metricUpdates": {
|
||||
"type": "array",
|
||||
"description": "Worker output metrics (counters) for this WorkItem.",
|
||||
"description": "DEPRECATED in favor of counter_updates.",
|
||||
"items": {
|
||||
"$ref": "MetricUpdate"
|
||||
}
|
||||
@@ -879,12 +970,295 @@
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "any",
|
||||
"description": "Properties of the object. Contains field @ype with type URL."
|
||||
"description": "Properties of the object. Contains field @type with type URL."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"CounterUpdate": {
|
||||
"id": "CounterUpdate",
|
||||
"type": "object",
|
||||
"description": "An update to a Counter sent from a worker.",
|
||||
"properties": {
|
||||
"nameAndKind": {
|
||||
"$ref": "NameAndKind",
|
||||
"description": "Counter name and aggregation type."
|
||||
},
|
||||
"shortId": {
|
||||
"type": "string",
|
||||
"description": "The service-generated short identifier for this counter. The short_id -\u003e (name, metadata) mapping is constant for the lifetime of a job.",
|
||||
"format": "int64"
|
||||
},
|
||||
"structuredNameAndMetadata": {
|
||||
"$ref": "CounterStructuredNameAndMetadata",
|
||||
"description": "Counter structured name and metadata."
|
||||
},
|
||||
"cumulative": {
|
||||
"type": "boolean",
|
||||
"description": "True if this counter is reported as the total cumulative aggregate value accumulated since the worker started working on this WorkItem. By default this is false, indicating that this counter is reported as a delta."
|
||||
},
|
||||
"integer": {
|
||||
"$ref": "SplitInt64",
|
||||
"description": "Integer value for Sum, Max, Min."
|
||||
},
|
||||
"floatingPoint": {
|
||||
"type": "number",
|
||||
"description": "Floating point value for Sum, Max, Min.",
|
||||
"format": "double"
|
||||
},
|
||||
"boolean": {
|
||||
"type": "boolean",
|
||||
"description": "Boolean value for And, Or."
|
||||
},
|
||||
"integerMean": {
|
||||
"$ref": "IntegerMean",
|
||||
"description": "Integer mean aggregation value for Mean."
|
||||
},
|
||||
"floatingPointMean": {
|
||||
"$ref": "FloatingPointMean",
|
||||
"description": "Floating point mean aggregation value for Mean."
|
||||
},
|
||||
"integerList": {
|
||||
"$ref": "IntegerList",
|
||||
"description": "List of integers, for Set."
|
||||
},
|
||||
"floatingPointList": {
|
||||
"$ref": "FloatingPointList",
|
||||
"description": "List of floating point numbers, for Set."
|
||||
},
|
||||
"stringList": {
|
||||
"$ref": "StringList",
|
||||
"description": "List of strings, for Set."
|
||||
},
|
||||
"internal": {
|
||||
"type": "any",
|
||||
"description": "Value for internally-defined counters used by the Dataflow service."
|
||||
}
|
||||
}
|
||||
},
|
||||
"NameAndKind": {
|
||||
"id": "NameAndKind",
|
||||
"type": "object",
|
||||
"description": "Basic metadata about a counter.",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Name of the counter."
|
||||
},
|
||||
"kind": {
|
||||
"type": "string",
|
||||
"description": "Counter aggregation kind.",
|
||||
"enum": [
|
||||
"INVALID",
|
||||
"SUM",
|
||||
"MAX",
|
||||
"MIN",
|
||||
"MEAN",
|
||||
"OR",
|
||||
"AND",
|
||||
"SET"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"CounterStructuredNameAndMetadata": {
|
||||
"id": "CounterStructuredNameAndMetadata",
|
||||
"type": "object",
|
||||
"description": "A single message which encapsulates structured name and metadata for a given counter.",
|
||||
"properties": {
|
||||
"name": {
|
||||
"$ref": "CounterStructuredName",
|
||||
"description": "Structured name of the counter."
|
||||
},
|
||||
"metadata": {
|
||||
"$ref": "CounterMetadata",
|
||||
"description": "Metadata associated with a counter"
|
||||
}
|
||||
}
|
||||
},
|
||||
"CounterStructuredName": {
|
||||
"id": "CounterStructuredName",
|
||||
"type": "object",
|
||||
"description": "Identifies a counter within a per-job namespace. Counters whose structured names are the same get merged into a single value for the job.",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Counter name. Not necessarily globally-unique, but unique within the context of the other fields. Required."
|
||||
},
|
||||
"standardOrigin": {
|
||||
"type": "string",
|
||||
"description": "One of the standard Origins defined above.",
|
||||
"enum": [
|
||||
"DATAFLOW",
|
||||
"USER"
|
||||
]
|
||||
},
|
||||
"otherOrigin": {
|
||||
"type": "string",
|
||||
"description": "A string containing the origin of the counter."
|
||||
},
|
||||
"originalStepName": {
|
||||
"type": "string",
|
||||
"description": "System generated name of the original step in the user's graph, before optimization."
|
||||
},
|
||||
"componentStepName": {
|
||||
"type": "string",
|
||||
"description": "Name of the optimized step being executed by the workers."
|
||||
},
|
||||
"executionStepName": {
|
||||
"type": "string",
|
||||
"description": "Name of the stage. An execution step contains multiple component steps."
|
||||
},
|
||||
"workerId": {
|
||||
"type": "string",
|
||||
"description": "ID of a particular worker."
|
||||
},
|
||||
"portion": {
|
||||
"type": "string",
|
||||
"description": "Portion of this counter, either key or value.",
|
||||
"enum": [
|
||||
"ALL",
|
||||
"KEY",
|
||||
"VALUE"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"CounterMetadata": {
|
||||
"id": "CounterMetadata",
|
||||
"type": "object",
|
||||
"description": "CounterMetadata includes all static non-name non-value counter attributes.",
|
||||
"properties": {
|
||||
"kind": {
|
||||
"type": "string",
|
||||
"description": "Counter aggregation kind.",
|
||||
"enum": [
|
||||
"INVALID",
|
||||
"SUM",
|
||||
"MAX",
|
||||
"MIN",
|
||||
"MEAN",
|
||||
"OR",
|
||||
"AND",
|
||||
"SET"
|
||||
]
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Human-readable description of the counter semantics."
|
||||
},
|
||||
"standardUnits": {
|
||||
"type": "string",
|
||||
"description": "System defined Units, see above enum.",
|
||||
"enum": [
|
||||
"BYTES",
|
||||
"BYTES_PER_SEC",
|
||||
"MILLISECONDS",
|
||||
"MICROSECONDS",
|
||||
"NANOSECONDS",
|
||||
"TIMESTAMP_MSEC",
|
||||
"TIMESTAMP_USEC",
|
||||
"TIMESTAMP_NSEC"
|
||||
]
|
||||
},
|
||||
"otherUnits": {
|
||||
"type": "string",
|
||||
"description": "A string referring to the unit type."
|
||||
}
|
||||
}
|
||||
},
|
||||
"SplitInt64": {
|
||||
"id": "SplitInt64",
|
||||
"type": "object",
|
||||
"description": "A representation of an int64, n, that is immune to precision loss when encoded in JSON.",
|
||||
"properties": {
|
||||
"lowBits": {
|
||||
"type": "integer",
|
||||
"description": "The low order bits: n & 0xffffffff.",
|
||||
"format": "uint32"
|
||||
},
|
||||
"highBits": {
|
||||
"type": "integer",
|
||||
"description": "The high order bits, including the sign: n \u003e\u003e 32.",
|
||||
"format": "int32"
|
||||
}
|
||||
}
|
||||
},
|
||||
"IntegerMean": {
|
||||
"id": "IntegerMean",
|
||||
"type": "object",
|
||||
"description": "A representation of an integer mean metric contribution.",
|
||||
"properties": {
|
||||
"sum": {
|
||||
"$ref": "SplitInt64",
|
||||
"description": "The sum of all values being aggregated."
|
||||
},
|
||||
"count": {
|
||||
"$ref": "SplitInt64",
|
||||
"description": "The number of values being aggregated."
|
||||
}
|
||||
}
|
||||
},
|
||||
"FloatingPointMean": {
|
||||
"id": "FloatingPointMean",
|
||||
"type": "object",
|
||||
"description": "A representation of a floating point mean metric contribution.",
|
||||
"properties": {
|
||||
"sum": {
|
||||
"type": "number",
|
||||
"description": "The sum of all values being aggregated.",
|
||||
"format": "double"
|
||||
},
|
||||
"count": {
|
||||
"$ref": "SplitInt64",
|
||||
"description": "The number of values being aggregated."
|
||||
}
|
||||
}
|
||||
},
|
||||
"IntegerList": {
|
||||
"id": "IntegerList",
|
||||
"type": "object",
|
||||
"description": "A metric value representing a list of integers.",
|
||||
"properties": {
|
||||
"elements": {
|
||||
"type": "array",
|
||||
"description": "Elements of the list.",
|
||||
"items": {
|
||||
"$ref": "SplitInt64"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"FloatingPointList": {
|
||||
"id": "FloatingPointList",
|
||||
"type": "object",
|
||||
"description": "A metric value representing a list of floating point numbers.",
|
||||
"properties": {
|
||||
"elements": {
|
||||
"type": "array",
|
||||
"description": "Elements of the list.",
|
||||
"items": {
|
||||
"type": "number",
|
||||
"format": "double"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"StringList": {
|
||||
"id": "StringList",
|
||||
"type": "object",
|
||||
"description": "A metric value representing a list of strings.",
|
||||
"properties": {
|
||||
"elements": {
|
||||
"type": "array",
|
||||
"description": "Elements of the list.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ApproximateReportedProgress": {
|
||||
"id": "ApproximateReportedProgress",
|
||||
"type": "object",
|
||||
@@ -901,11 +1275,11 @@
|
||||
},
|
||||
"remainingParallelism": {
|
||||
"$ref": "ReportedParallelism",
|
||||
"description": "Total amount of parallelism in the input of this WorkItem that has not been consumed yet (i.e. can be delegated to a new WorkItem via dynamic splitting). \"Amount of parallelism\" refers to how many non-empty parts of the input can be read in parallel. This does not necessarily equal number of records. An input that can be read in parallel down to the individual records is called \"perfectly splittable\". An example of non-perfectly parallelizable input is a block-compressed file format where a block of records has to be read as a whole, but different blocks can be read in parallel. Examples: * If we have read 30 records out of 50 in a perfectly splittable 50-record input, this value should be 20. * If we are reading through block 3 in a block-compressed file consisting of 5 blocks, this value should be 2 (since blocks 4 and 5 can be processed in parallel by new work items via dynamic splitting). * If we are reading through the last block in a block-compressed file, or reading or processing the last record in a perfectly splittable input, this value should be 0, because the remainder of the work item cannot be further split."
|
||||
"description": "Total amount of parallelism in the input of this task that remains, (i.e. can be delegated to this task and any new tasks via dynamic splitting). Always at least 1 for non-finished work items and 0 for finished. \"Amount of parallelism\" refers to how many non-empty parts of the input can be read in parallel. This does not necessarily equal number of records. An input that can be read in parallel down to the individual records is called \"perfectly splittable\". An example of non-perfectly parallelizable input is a block-compressed file format where a block of records has to be read as a whole, but different blocks can be read in parallel. Examples: * If we are processing record #30 (starting at 1) out of 50 in a perfectly splittable 50-record input, this value should be 21 (20 remaining + 1 current). * If we are reading through block 3 in a block-compressed file consisting of 5 blocks, this value should be 3 (since blocks 4 and 5 can be processed in parallel by new tasks via dynamic splitting and the current task remains processing block 3). * If we are reading through the last block in a block-compressed file, or reading or processing the last record in a perfectly splittable input, this value should be 1, because apart from the current task, no additional remainder can be split off."
|
||||
},
|
||||
"consumedParallelism": {
|
||||
"$ref": "ReportedParallelism",
|
||||
"description": "Total amount of parallelism in the portion of input of this work item that has already been consumed. In the first two examples above (see remaining_parallelism), the value should be 30 or 3 respectively. The sum of remaining_parallelism and consumed_parallelism should equal the total amount of parallelism in this work item. If specified, must be finite."
|
||||
"description": "Total amount of parallelism in the portion of input of this task that has already been consumed and is no longer active. In the first two examples above (see remaining_parallelism), the value should be 29 or 2 respectively. The sum of remaining_parallelism and consumed_parallelism should equal the total amount of parallelism in this work item. If specified, must be finite."
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1236,6 +1610,13 @@
|
||||
"description": "The index value to use for the next report sent by the worker. Note: If the report call fails for whatever reason, the worker should reuse this index for subsequent report attempts.",
|
||||
"format": "int64"
|
||||
},
|
||||
"metricShortId": {
|
||||
"type": "array",
|
||||
"description": "The short ids that workers should use in subsequent metric updates. Workers should strive to use short ids whenever possible, but it is ok to request the short_id again if a worker lost track of it (e.g. if the worker is recovering from a crash). NOTE: it is possible that the response may have short ids for a subset of the metrics.",
|
||||
"items": {
|
||||
"$ref": "MetricShortId"
|
||||
}
|
||||
},
|
||||
"suggestedStopPosition": {
|
||||
"$ref": "Position",
|
||||
"description": "Obsolete, always empty."
|
||||
@@ -1262,6 +1643,23 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"MetricShortId": {
|
||||
"id": "MetricShortId",
|
||||
"type": "object",
|
||||
"description": "The metric short id is returned to the user alongside an offset into ReportWorkItemStatusRequest",
|
||||
"properties": {
|
||||
"metricIndex": {
|
||||
"type": "integer",
|
||||
"description": "The index of the corresponding metric in the ReportWorkItemStatusRequest. Required.",
|
||||
"format": "int32"
|
||||
},
|
||||
"shortId": {
|
||||
"type": "string",
|
||||
"description": "The service-generated short identifier for the metric.",
|
||||
"format": "int64"
|
||||
}
|
||||
}
|
||||
},
|
||||
"LeaseWorkItemRequest": {
|
||||
"id": "LeaseWorkItemRequest",
|
||||
"type": "object",
|
||||
@@ -1358,6 +1756,10 @@
|
||||
"$ref": "StreamingComputationTask",
|
||||
"description": "Additional information for StreamingComputationTask WorkItems."
|
||||
},
|
||||
"streamingConfigTask": {
|
||||
"$ref": "StreamingConfigTask",
|
||||
"description": "Additional information for StreamingConfigTask WorkItems."
|
||||
},
|
||||
"reportStatusInterval": {
|
||||
"type": "string",
|
||||
"description": "Recommended reporting interval."
|
||||
@@ -1412,6 +1814,10 @@
|
||||
"type": "string",
|
||||
"description": "User-provided name of this operation."
|
||||
},
|
||||
"originalName": {
|
||||
"type": "string",
|
||||
"description": "System-defined name for the operation in the original workflow graph."
|
||||
},
|
||||
"read": {
|
||||
"$ref": "ReadInstruction",
|
||||
"description": "Additional information for Read instructions."
|
||||
@@ -1613,6 +2019,14 @@
|
||||
"items": {
|
||||
"$ref": "SideInputInfo"
|
||||
}
|
||||
},
|
||||
"originalCombineValuesStepName": {
|
||||
"type": "string",
|
||||
"description": "If this instruction includes a combining function, this is the name of the CombineValues instruction lifted into this instruction."
|
||||
},
|
||||
"originalCombineValuesInputStoreName": {
|
||||
"type": "string",
|
||||
"description": "If this instruction includes a combining function this is the name of the intermediate store between the GBK and the CombineValues."
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -1643,6 +2057,10 @@
|
||||
"type": "string",
|
||||
"description": "System-defined name of this output. Unique across the workflow."
|
||||
},
|
||||
"originalName": {
|
||||
"type": "string",
|
||||
"description": "System-defined name for this output in the original workflow graph. Outputs that do not contribute to an original instruction do not set this."
|
||||
},
|
||||
"codec": {
|
||||
"type": "object",
|
||||
"description": "The codec to use to encode data being written via this output.",
|
||||
@@ -1650,6 +2068,14 @@
|
||||
"type": "any",
|
||||
"description": "Properties of the object."
|
||||
}
|
||||
},
|
||||
"onlyCountKeyBytes": {
|
||||
"type": "boolean",
|
||||
"description": "For system-generated byte and mean byte metrics, certain instructions should only report the key size."
|
||||
},
|
||||
"onlyCountValueBytes": {
|
||||
"type": "boolean",
|
||||
"description": "For system-generated byte and mean byte metrics, certain instructions should only report the value size."
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -2122,6 +2548,53 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"StreamingConfigTask": {
|
||||
"id": "StreamingConfigTask",
|
||||
"type": "object",
|
||||
"description": "A task that carries configuration information for streaming computations.",
|
||||
"properties": {
|
||||
"streamingComputationConfigs": {
|
||||
"type": "array",
|
||||
"description": "Set of computation configuration information.",
|
||||
"items": {
|
||||
"$ref": "StreamingComputationConfig"
|
||||
}
|
||||
},
|
||||
"userStepToStateFamilyNameMap": {
|
||||
"type": "object",
|
||||
"description": "Map from user step names to state families.",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"StreamingComputationConfig": {
|
||||
"id": "StreamingComputationConfig",
|
||||
"type": "object",
|
||||
"description": "Configuration information for a single streaming computation.",
|
||||
"properties": {
|
||||
"computationId": {
|
||||
"type": "string",
|
||||
"description": "Unique identifier for this computation."
|
||||
},
|
||||
"systemName": {
|
||||
"type": "string",
|
||||
"description": "System defined name for this computation."
|
||||
},
|
||||
"stageName": {
|
||||
"type": "string",
|
||||
"description": "Stage name of this computation."
|
||||
},
|
||||
"instructions": {
|
||||
"type": "array",
|
||||
"description": "Instructions that comprise the computation.",
|
||||
"items": {
|
||||
"$ref": "ParallelInstruction"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"SendWorkerMessagesRequest": {
|
||||
"id": "SendWorkerMessagesRequest",
|
||||
"type": "object",
|
||||
@@ -2415,6 +2888,7 @@
|
||||
"type": "string",
|
||||
"description": "The kind of filter to use.",
|
||||
"enum": [
|
||||
"UNKNOWN",
|
||||
"ALL",
|
||||
"TERMINATED",
|
||||
"ACTIVE"
|
||||
@@ -2492,6 +2966,78 @@
|
||||
}
|
||||
},
|
||||
"resources": {
|
||||
"debug": {
|
||||
"methods": {
|
||||
"getConfig": {
|
||||
"id": "dataflow.projects.jobs.debug.getConfig",
|
||||
"path": "v1b3/projects/{projectId}/jobs/{jobId}/debug/getConfig",
|
||||
"httpMethod": "POST",
|
||||
"description": "Get encoded debug configuration for component. Not cacheable.",
|
||||
"parameters": {
|
||||
"projectId": {
|
||||
"type": "string",
|
||||
"description": "The project id.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
},
|
||||
"jobId": {
|
||||
"type": "string",
|
||||
"description": "The job id.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
},
|
||||
"parameterOrder": [
|
||||
"projectId",
|
||||
"jobId"
|
||||
],
|
||||
"request": {
|
||||
"$ref": "GetDebugConfigRequest"
|
||||
},
|
||||
"response": {
|
||||
"$ref": "GetDebugConfigResponse"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/userinfo.email"
|
||||
]
|
||||
},
|
||||
"sendCapture": {
|
||||
"id": "dataflow.projects.jobs.debug.sendCapture",
|
||||
"path": "v1b3/projects/{projectId}/jobs/{jobId}/debug/sendCapture",
|
||||
"httpMethod": "POST",
|
||||
"description": "Send encoded debug capture data for component.",
|
||||
"parameters": {
|
||||
"projectId": {
|
||||
"type": "string",
|
||||
"description": "The project id.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
},
|
||||
"jobId": {
|
||||
"type": "string",
|
||||
"description": "The job id.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
},
|
||||
"parameterOrder": [
|
||||
"projectId",
|
||||
"jobId"
|
||||
],
|
||||
"request": {
|
||||
"$ref": "SendDebugCaptureRequest"
|
||||
},
|
||||
"response": {
|
||||
"$ref": "SendDebugCaptureResponse"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/userinfo.email"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"messages": {
|
||||
"methods": {
|
||||
"list": {
|
||||
@@ -2634,6 +3180,37 @@
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"templates": {
|
||||
"methods": {
|
||||
"create": {
|
||||
"id": "dataflow.projects.templates.create",
|
||||
"path": "v1b3/projects/{projectId}/templates",
|
||||
"httpMethod": "POST",
|
||||
"description": "Creates a dataflow job from a template.",
|
||||
"parameters": {
|
||||
"projectId": {
|
||||
"type": "string",
|
||||
"description": "The project which owns the job.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
},
|
||||
"parameterOrder": [
|
||||
"projectId"
|
||||
],
|
||||
"request": {
|
||||
"$ref": "CreateJobFromTemplateRequest"
|
||||
},
|
||||
"response": {
|
||||
"$ref": "Job"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/userinfo.email"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user