chore(api-update): to latest

Using `make update-json`, all json descriptions have been update.
Quite interesting to see that there are plenty of new ones which
are giving 404 when queried. An actual bug, or something I should
look into ?
This commit is contained in:
Sebastian Thiel
2016-09-11 10:00:21 +02:00
parent 33771a6dc7
commit 13ed4eaecb
148 changed files with 84357 additions and 3237 deletions

View File

@@ -1,11 +1,11 @@
{
"kind": "discovery#restDescription",
"etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/sNb4igYg4wCeulufFNSiGUjvhBU\"",
"etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/VlhO-7U_LLGUoZP3BlsW4eOrFfk\"",
"discoveryVersion": "v1",
"id": "bigquery:v2",
"name": "bigquery",
"version": "v2",
"revision": "20160408",
"revision": "20160903",
"title": "BigQuery API",
"description": "A data platform for customers to create, manage, share and query data.",
"ownerDomain": "google.com",
@@ -27,11 +27,9 @@
"description": "Data format for the response.",
"default": "json",
"enum": [
"csv",
"json"
],
"enumDescriptions": [
"Responses with Content-Type of text/csv",
"Responses with Content-Type of application/json"
],
"location": "query"
@@ -122,7 +120,7 @@
},
"type": {
"type": "string",
"description": "[Optional] The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Defaut type is BYTES. 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels."
"description": "[Optional] The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is BYTES. 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels."
}
}
},
@@ -151,7 +149,7 @@
},
"type": {
"type": "string",
"description": "[Optional] The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Defaut type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it."
"description": "[Optional] The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it."
}
}
},
@@ -169,6 +167,10 @@
"ignoreUnspecifiedColumnFamilies": {
"type": "boolean",
"description": "[Optional] If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false."
},
"readRowkeyAsString": {
"type": "boolean",
"description": "[Optional] If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false."
}
}
},
@@ -199,9 +201,9 @@
"pattern": ".?"
},
"skipLeadingRows": {
"type": "integer",
"type": "string",
"description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.",
"format": "int32"
"format": "int64"
}
}
},
@@ -277,6 +279,13 @@
"description": "[Output-only] The resource type.",
"default": "bigquery#dataset"
},
"labels": {
"type": "object",
"description": "[Experimental] The labels associated with this dataset. You can use these to organize and group your datasets. You can set this property when inserting or updating a dataset. See Labeling Datasets for more information.",
"additionalProperties": {
"type": "string"
}
},
"lastModifiedTime": {
"type": "string",
"description": "[Output-only] The date when this dataset or any of its tables was last modified, in milliseconds since the epoch.",
@@ -318,6 +327,13 @@
"type": "string",
"description": "The resource type. This property always returns the value \"bigquery#dataset\".",
"default": "bigquery#dataset"
},
"labels": {
"type": "object",
"description": "[Experimental] The labels associated with this dataset. You can use these to organize and group your datasets.",
"additionalProperties": {
"type": "string"
}
}
}
}
@@ -492,6 +508,10 @@
"$ref": "CsvOptions",
"description": "Additional properties to set if sourceFormat is set to CSV."
},
"googleSheetsOptions": {
"$ref": "GoogleSheetsOptions",
"description": "[Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS."
},
"ignoreUnknownValues": {
"type": "boolean",
"description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored."
@@ -507,11 +527,11 @@
},
"sourceFormat": {
"type": "string",
"description": "[Required] The data format. For CSV files, specify \"CSV\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". For Avro files, specify \"AVRO\". For Google Cloud Datastore backups, specify \"DATASTORE_BACKUP\". [Experimental] For Google Cloud Bigtable, specify \"BIGTABLE\". Please note that reading from Google Cloud Bigtable is experimental and has to be enabled for your project. Please contact Google Cloud Support to enable this for your project."
"description": "[Required] The data format. For CSV files, specify \"CSV\". For Google sheets, specify \"GOOGLE_SHEETS\". For newline-delimited JSON, specify \"NEWLINE_DELIMITED_JSON\". For Avro files, specify \"AVRO\". For Google Cloud Datastore backups, specify \"DATASTORE_BACKUP\". [Experimental] For Google Cloud Bigtable, specify \"BIGTABLE\". Please note that reading from Google Cloud Bigtable is experimental and has to be enabled for your project. Please contact Google Cloud Support to enable this for your project."
},
"sourceUris": {
"type": "array",
"description": "[Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified, and it must end with '.backup_info'. Also, the '*' wildcard character is not allowed.",
"description": "[Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified, and it must end with '.backup_info'. Also, the '*' wildcard character is not allowed.",
"items": {
"type": "string"
}
@@ -550,6 +570,11 @@
"description": "The resource type of the response.",
"default": "bigquery#getQueryResultsResponse"
},
"numDmlAffectedRows": {
"type": "string",
"description": "[Output-only, Experimental] The number of rows affected by a DML statement. Present only for DML statements INSERT, UPDATE or DELETE.",
"format": "int64"
},
"pageToken": {
"type": "string",
"description": "A token used for paging results."
@@ -577,16 +602,14 @@
}
}
},
"IntervalPartitionConfiguration": {
"id": "IntervalPartitionConfiguration",
"GoogleSheetsOptions": {
"id": "GoogleSheetsOptions",
"type": "object",
"properties": {
"expirationMs": {
"skipLeadingRows": {
"type": "string",
"description": "[Optional] The number of rows at the top of a sheet that BigQuery will skip when reading the data. The default value is 0. This property is useful if you have header rows that should be skipped. When autodetect is on, behavior is the following: * skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise data is read starting from the second row. * skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. * skipLeadingRows = N \u003e 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise row N is used to extract column names for the detected schema.",
"format": "int64"
},
"type": {
"type": "string"
}
}
},
@@ -724,6 +747,10 @@
"type": "boolean",
"description": "Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false."
},
"autodetect": {
"type": "boolean",
"description": "[Experimental] Indicates if we should automatically infer the options and schema for CSV and JSON sources."
},
"createDisposition": {
"type": "string",
"description": "[Optional] Specifies whether the job is allowed to create new tables. The following values are supported: CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. The default value is CREATE_IF_NEEDED. Creation, truncation and append actions occur as one atomic update upon job completion."
@@ -774,6 +801,13 @@
"type": "string",
"description": "[Deprecated] The format of the schemaInline property."
},
"schemaUpdateOptions": {
"type": "array",
"description": "[Experimental] Allows the schema of the desitination table to be updated as a side effect of the load job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.",
"items": {
"type": "string"
}
},
"skipLeadingRows": {
"type": "integer",
"description": "[Optional] The number of rows at the top of a CSV file that BigQuery will skip when loading the data. The default value is 0. This property is useful if you have header rows in the file that should be skipped.",
@@ -827,6 +861,15 @@
"default": "1",
"format": "int32"
},
"maximumBytesBilled": {
"type": "string",
"description": "[Optional] Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). If unspecified, this will be set to your project default.",
"format": "int64"
},
"parameterMode": {
"type": "string",
"description": "[Experimental] Standard SQL only. Whether to use positional (?) or named (@myparam) query parameters in this query."
},
"preserveNulls": {
"type": "boolean",
"description": "[Deprecated] This property is deprecated."
@@ -839,6 +882,20 @@
"type": "string",
"description": "[Required] BigQuery SQL query to execute."
},
"queryParameters": {
"type": "array",
"description": "[Experimental] Query parameters for Standard SQL queries.",
"items": {
"$ref": "QueryParameter"
}
},
"schemaUpdateOptions": {
"type": "array",
"description": "[Experimental] Allows the schema of the desitination table to be updated as a side effect of the query job. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable.",
"items": {
"type": "string"
}
},
"tableDefinitions": {
"type": "object",
"description": "[Optional] If querying an external data source outside of BigQuery, describes the data format, location and other properties of the data source. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.",
@@ -848,7 +905,7 @@
},
"useLegacySql": {
"type": "boolean",
"description": "[Experimental] Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's updated SQL dialect with improved standards compliance. When using BigQuery's updated SQL, the values of allowLargeResults and flattenResults are ignored. Queries with useLegacySql set to false will be run as if allowLargeResults is true and flattenResults is false."
"description": "[Experimental] Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is set to false, the values of allowLargeResults and flattenResults are ignored; query will be run as if allowLargeResults is true and flattenResults is false."
},
"useQueryCache": {
"type": "boolean",
@@ -1037,9 +1094,14 @@
"type": "boolean",
"description": "[Output-only] Whether the query result was fetched from the query cache."
},
"numDmlAffectedRows": {
"type": "string",
"description": "[Output-only, Experimental] The number of rows affected by a DML statement. Present only for DML statements INSERT, UPDATE or DELETE.",
"format": "int64"
},
"queryPlan": {
"type": "array",
"description": "[Output-only, Experimental] Describes execution plan for the query as a list of stages.",
"description": "[Output-only, Experimental] Describes execution plan for the query.",
"items": {
"$ref": "ExplainQueryStage"
}
@@ -1051,6 +1113,10 @@
"$ref": "TableReference"
}
},
"schema": {
"$ref": "TableSchema",
"description": "[Output-only, Experimental] The schema of the results. Present only for successful dry run of non-legacy SQL queries."
},
"totalBytesBilled": {
"type": "string",
"description": "[Output-only] Total bytes billed for the job.",
@@ -1060,6 +1126,13 @@
"type": "string",
"description": "[Output-only] Total bytes processed for the job.",
"format": "int64"
},
"undeclaredQueryParameters": {
"type": "array",
"description": "[Output-only, Experimental] Standard SQL only: list of undeclared query parameters detected during a dry run validation.",
"items": {
"$ref": "QueryParameter"
}
}
}
},
@@ -1201,6 +1274,83 @@
}
}
},
"QueryParameter": {
"id": "QueryParameter",
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "[Optional] If unset, this is a positional parameter. Otherwise, should be unique within a query."
},
"parameterType": {
"$ref": "QueryParameterType",
"description": "[Required] The type of this parameter."
},
"parameterValue": {
"$ref": "QueryParameterValue",
"description": "[Required] The value of this parameter."
}
}
},
"QueryParameterType": {
"id": "QueryParameterType",
"type": "object",
"properties": {
"arrayType": {
"$ref": "QueryParameterType",
"description": "[Optional] The type of the array's elements, if this is an array."
},
"structTypes": {
"type": "array",
"description": "[Optional] The types of the fields of this struct, in order, if this is a struct.",
"items": {
"type": "object",
"properties": {
"description": {
"type": "string",
"description": "[Optional] Human-oriented description of the field."
},
"name": {
"type": "string",
"description": "[Optional] The name of this field."
},
"type": {
"$ref": "QueryParameterType",
"description": "[Required] The type of this field."
}
}
}
},
"type": {
"type": "string",
"description": "[Required] The top level type of this field."
}
}
},
"QueryParameterValue": {
"id": "QueryParameterValue",
"type": "object",
"properties": {
"arrayValues": {
"type": "array",
"description": "[Optional] The array values, if this is an array type.",
"items": {
"$ref": "QueryParameterValue"
}
},
"structValues": {
"type": "object",
"description": "[Optional] The struct field values, in order of the struct type's declaration.",
"additionalProperties": {
"$ref": "QueryParameterValue"
}
},
"value": {
"type": "string",
"description": "[Optional] The value of this value, if a simple scalar type."
}
}
},
"QueryRequest": {
"id": "QueryRequest",
"type": "object",
@@ -1223,6 +1373,10 @@
"description": "[Optional] The maximum number of rows of data to return per page of results. Setting this flag to a small value such as 1000 and then paging through results might improve reliability when the query result set is large. In addition to this limit, responses are also limited to 10 MB. By default, there is no maximum row count, and only the byte limit applies.",
"format": "uint32"
},
"parameterMode": {
"type": "string",
"description": "[Experimental] Standard SQL only. Whether to use positional (?) or named (@myparam) query parameters in this query."
},
"preserveNulls": {
"type": "boolean",
"description": "[Deprecated] This property is deprecated."
@@ -1236,6 +1390,13 @@
]
}
},
"queryParameters": {
"type": "array",
"description": "[Experimental] Query parameters for Standard SQL queries.",
"items": {
"$ref": "QueryParameter"
}
},
"timeoutMs": {
"type": "integer",
"description": "[Optional] How long to wait for the query to complete, in milliseconds, before the request times out and returns. Note that this is only a timeout for the request, not the query. If the query takes longer to run than the timeout value, the call returns without any results and with the 'jobComplete' flag set to false. You can call GetQueryResults() to wait for the query to complete and read the results. The default value is 10000 milliseconds (10 seconds).",
@@ -1243,7 +1404,8 @@
},
"useLegacySql": {
"type": "boolean",
"description": "[Experimental] Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's updated SQL dialect with improved standards compliance. When using BigQuery's updated SQL, the values of allowLargeResults and flattenResults are ignored. Queries with useLegacySql set to false will be run as if allowLargeResults is true and flattenResults is false."
"description": "[Experimental] Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. If set to false, the query will use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is set to false, the values of allowLargeResults and flattenResults are ignored; query will be run as if allowLargeResults is true and flattenResults is false.",
"default": "true"
},
"useQueryCache": {
"type": "boolean",
@@ -1280,6 +1442,11 @@
"description": "The resource type.",
"default": "bigquery#queryResponse"
},
"numDmlAffectedRows": {
"type": "string",
"description": "[Output-only, Experimental] The number of rows affected by a DML statement. Present only for DML statements INSERT, UPDATE or DELETE.",
"format": "int64"
},
"pageToken": {
"type": "string",
"description": "A token used for paging results."
@@ -1381,18 +1548,16 @@
"description": "[Output-only] The size of this table in bytes, excluding any data in the streaming buffer.",
"format": "int64"
},
"numLongTermBytes": {
"type": "string",
"description": "[Output-only] The number of bytes in the table that are considered \"long-term storage\".",
"format": "int64"
},
"numRows": {
"type": "string",
"description": "[Output-only] The number of rows of data in this table, excluding any data in the streaming buffer.",
"format": "uint64"
},
"partitionConfigurations": {
"type": "array",
"description": "[Experimental] List of partition configurations for this table. Currently only one configuration can be specified and it can only be an interval partition with type daily.",
"items": {
"$ref": "TablePartitionConfiguration"
}
},
"schema": {
"$ref": "TableSchema",
"description": "[Optional] Describes the schema of this table."
@@ -1409,6 +1574,10 @@
"$ref": "TableReference",
"description": "[Required] Reference describing the ID of this table."
},
"timePartitioning": {
"$ref": "TimePartitioning",
"description": "[Experimental] If specified, configures time-based partitioning for this table."
},
"type": {
"type": "string",
"description": "[Output-only] Describes the table type. The following values are supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined by a SQL query. EXTERNAL: A table that references data stored in an external storage system, such as Google Cloud Storage. The default value is TABLE."
@@ -1614,17 +1783,6 @@
}
}
},
"TablePartitionConfiguration": {
"id": "TablePartitionConfiguration",
"type": "object",
"description": "[Required] A partition configuration. Only one type of partition should be configured.",
"properties": {
"interval": {
"$ref": "IntervalPartitionConfiguration",
"description": "[Pick one] Configures an interval partition."
}
}
},
"TableReference": {
"id": "TableReference",
"type": "object",
@@ -1684,6 +1842,21 @@
}
}
},
"TimePartitioning": {
"id": "TimePartitioning",
"type": "object",
"properties": {
"expirationMs": {
"type": "string",
"description": "[Optional] Number of milliseconds for which to keep the storage for a partition.",
"format": "int64"
},
"type": {
"type": "string",
"description": "[Required] The only type supported is DAY, which will generate one partition per day based on data loading time."
}
}
},
"UserDefinedFunctionResource": {
"id": "UserDefinedFunctionResource",
"type": "object",
@@ -1706,6 +1879,10 @@
"type": "string",
"description": "[Required] A query that BigQuery executes when the view is referenced."
},
"useLegacySql": {
"type": "boolean",
"description": "[Experimental] Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-reference/ Queries and views that reference this view must use the same flag value."
},
"userDefinedFunctionResources": {
"type": "array",
"description": "[Experimental] Describes user-defined function resources used in the query.",
@@ -1822,6 +1999,11 @@
"description": "Whether to list all datasets, including hidden ones",
"location": "query"
},
"filter": {
"type": "string",
"description": "An expression for filtering the results of the request by label. The syntax is \"labels.\u003cname\u003e[:\u003cvalue\u003e]\". Multiple filters can be ANDed together by connecting with a space. Example: \"labels.department:receiving labels.active\". See Filtering datasets using labels for details.",
"location": "query"
},
"maxResults": {
"type": "integer",
"description": "The maximum number of results to return",
@@ -1926,7 +2108,7 @@
"methods": {
"cancel": {
"id": "bigquery.jobs.cancel",
"path": "project/{projectId}/jobs/{jobId}/cancel",
"path": "projects/{projectId}/jobs/{jobId}/cancel",
"httpMethod": "POST",
"description": "Requests that a job be cancelled. This call will return immediately, and the client will need to poll for the job status to see if the cancel completed successfully. Cancelled jobs may still incur costs.",
"parameters": {