Bump version to 1.0.9; update JSON schemas; add new APIs

This commit is contained in:
Sebastian Thiel
2019-07-05 11:32:35 +08:00
parent 99e97ceece
commit e42ebc0c2b
2442 changed files with 190984 additions and 71186 deletions

View File

@@ -27,7 +27,7 @@
}
},
"basePath": "/bigquery/v2/",
"baseUrl": "https://www.googleapis.com/bigquery/v2/",
"baseUrl": "https://bigquery.googleapis.com/bigquery/v2/",
"batchPath": "batch/bigquery/v2",
"description": "A data platform for customers to create, manage, share and query data.",
"discoveryVersion": "v1",
@@ -693,7 +693,7 @@
"type": "string"
},
"maxResults": {
"description": "The maximum number of results per page.",
"description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.",
"format": "uint32",
"location": "query",
"type": "integer"
@@ -824,6 +824,226 @@
}
}
},
"routines": {
"methods": {
"delete": {
"description": "Deletes the routine specified by routineId from the dataset.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}",
"httpMethod": "DELETE",
"id": "bigquery.routines.delete",
"parameterOrder": [
"projectId",
"datasetId",
"routineId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the routine to delete",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the routine to delete",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"routineId": {
"description": "Routine ID of the routine to delete",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}",
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"get": {
"description": "Gets the specified routine resource by routine ID.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}",
"httpMethod": "GET",
"id": "bigquery.routines.get",
"parameterOrder": [
"projectId",
"datasetId",
"routineId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the requested routine",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"fieldMask": {
"description": "If set, only the Routine fields in the field mask are returned in the\nresponse. If unset, all Routine fields are returned.",
"format": "google-fieldmask",
"location": "query",
"type": "string"
},
"projectId": {
"description": "Project ID of the requested routine",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"routineId": {
"description": "Routine ID of the requested routine",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}",
"response": {
"$ref": "Routine"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"insert": {
"description": "Creates a new routine in the dataset.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines",
"httpMethod": "POST",
"id": "bigquery.routines.insert",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the new routine",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the new routine",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/routines",
"request": {
"$ref": "Routine"
},
"response": {
"$ref": "Routine"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
},
"list": {
"description": "Lists all routines in the specified dataset. Requires the READER dataset\nrole.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines",
"httpMethod": "GET",
"id": "bigquery.routines.list",
"parameterOrder": [
"projectId",
"datasetId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the routines to list",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"maxResults": {
"description": "The maximum number of results to return in a single response page.\nLeverage the page tokens to iterate through the entire collection.",
"format": "uint32",
"location": "query",
"type": "integer"
},
"pageToken": {
"description": "Page token, returned by a previous call, to request the next page of\nresults",
"location": "query",
"type": "string"
},
"projectId": {
"description": "Project ID of the routines to list",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/routines",
"response": {
"$ref": "ListRoutinesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only"
]
},
"update": {
"description": "Updates information in an existing routine. The update method replaces the\nentire Routine resource.",
"flatPath": "projects/{projectsId}/datasets/{datasetsId}/routines/{routinesId}",
"httpMethod": "PUT",
"id": "bigquery.routines.update",
"parameterOrder": [
"projectId",
"datasetId",
"routineId"
],
"parameters": {
"datasetId": {
"description": "Dataset ID of the routine to update",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"projectId": {
"description": "Project ID of the routine to update",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
},
"routineId": {
"description": "Routine ID of the routine to update",
"location": "path",
"pattern": "^[^/]+$",
"required": true,
"type": "string"
}
},
"path": "projects/{+projectId}/datasets/{+datasetId}/routines/{+routineId}",
"request": {
"$ref": "Routine"
},
"response": {
"$ref": "Routine"
},
"scopes": [
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/cloud-platform"
]
}
}
},
"tabledata": {
"methods": {
"insertAll": {
@@ -1174,11 +1394,11 @@
}
}
},
"revision": "20190314",
"rootUrl": "https://www.googleapis.com/",
"revision": "20190630",
"rootUrl": "https://bigquery.googleapis.com/",
"schemas": {
"AggregateClassificationMetrics": {
"description": "Aggregate metrics for classification models. For multi-class models,\nthe metrics are either macro-averaged: metrics are calculated for each\nlabel and then an unweighted average is taken of those values or\nmicro-averaged: the metric is calculated globally by counting the total\nnumber of correctly predicted rows.",
"description": "Aggregate metrics for classification/classifier models. For multi-class\nmodels, the metrics are either macro-averaged or micro-averaged. When\nmacro-averaged, the metrics are calculated for each label and then an\nunweighted average is taken of those values. When micro-averaged, the\nmetric is calculated globally by counting the total number of correctly\npredicted rows.",
"id": "AggregateClassificationMetrics",
"properties": {
"accuracy": {
@@ -1219,6 +1439,51 @@
},
"type": "object"
},
"Argument": {
"description": "Input/output argument of a function or a stored procedure.",
"id": "Argument",
"properties": {
"argumentKind": {
"description": "Optional. Defaults to FIXED_TYPE.",
"enum": [
"ARGUMENT_KIND_UNSPECIFIED",
"FIXED_TYPE",
"ANY_TYPE"
],
"enumDescriptions": [
"",
"The argument is a variable with fully specified type, which can be a\nstruct or an array, but not a table.",
"The argument is any type, including struct or array, but not a table.\nTo be added: FIXED_TABLE, ANY_TABLE"
],
"type": "string"
},
"dataType": {
"$ref": "StandardSqlDataType",
"description": "Required unless argument_kind = ANY_TYPE."
},
"mode": {
"description": "Optional. Specifies whether the argument is input or output.\nCan be set for procedures only.",
"enum": [
"MODE_UNSPECIFIED",
"IN",
"OUT",
"INOUT"
],
"enumDescriptions": [
"",
"The argument is input-only.",
"The argument is output-only.",
"The argument is both an input and an output."
],
"type": "string"
},
"name": {
"description": "Optional. The name of this argument. Can be absent for function return\nargument.",
"type": "string"
}
},
"type": "object"
},
"BigQueryModelTraining": {
"id": "BigQueryModelTraining",
"properties": {
@@ -1316,7 +1581,7 @@
"type": "object"
},
"BinaryClassificationMetrics": {
"description": "Evaluation metrics for binary classification models.",
"description": "Evaluation metrics for binary classification/classifier models.",
"id": "BinaryClassificationMetrics",
"properties": {
"aggregateClassificationMetrics": {
@@ -1329,6 +1594,14 @@
"$ref": "BinaryConfusionMatrix"
},
"type": "array"
},
"negativeLabel": {
"description": "Label representing the negative class.",
"type": "string"
},
"positiveLabel": {
"description": "Label representing the positive class.",
"type": "string"
}
},
"type": "object"
@@ -1337,6 +1610,16 @@
"description": "Confusion matrix for binary classification models.",
"id": "BinaryConfusionMatrix",
"properties": {
"accuracy": {
"description": "The fraction of predictions given the correct label.",
"format": "double",
"type": "number"
},
"f1Score": {
"description": "The equally weighted average of recall and precision.",
"format": "double",
"type": "number"
},
"falseNegatives": {
"description": "Number of false samples predicted as false.",
"format": "int64",
@@ -1353,12 +1636,12 @@
"type": "number"
},
"precision": {
"description": "Aggregate precision.",
"description": "The fraction of actual positive predictions that had positive actual\nlabels.",
"format": "double",
"type": "number"
},
"recall": {
"description": "Aggregate recall.",
"description": "The fraction of actual positive labels that were given a positive\nprediction.",
"format": "double",
"type": "number"
},
@@ -1620,6 +1903,9 @@
"$ref": "DatasetReference",
"description": "[Required] A reference that identifies the dataset."
},
"defaultEncryptionConfiguration": {
"$ref": "EncryptionConfiguration"
},
"defaultPartitionExpirationMs": {
"description": "[Optional] The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an expirationMs property in the timePartitioning settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of defaultTableExpirationMs for partitioned tables: only one of defaultTableExpirationMs and defaultPartitionExpirationMs will be used for any new partitioned table. If you provide an explicit timePartitioning.expirationMs when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property.",
"format": "int64",
@@ -1824,12 +2110,12 @@
"type": "object"
},
"EvaluationMetrics": {
"description": "Evaluation metrics of a model. These are either computed on all\ntraining data or just the eval data based on whether eval data was used\nduring training.",
"description": "Evaluation metrics of a model. These are either computed on all training\ndata or just the eval data based on whether eval data was used during\ntraining. These are not present for imported models.",
"id": "EvaluationMetrics",
"properties": {
"binaryClassificationMetrics": {
"$ref": "BinaryClassificationMetrics",
"description": "Populated for binary classification models."
"description": "Populated for binary classification/classifier models."
},
"clusteringMetrics": {
"$ref": "ClusteringMetrics",
@@ -1837,7 +2123,7 @@
},
"multiClassClassificationMetrics": {
"$ref": "MultiClassClassificationMetrics",
"description": "Populated for multi-class classification models."
"description": "Populated for multi-class classification/classifier models."
},
"regressionMetrics": {
"$ref": "RegressionMetrics",
@@ -2041,9 +2327,13 @@
"description": "[Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS."
},
"hivePartitioningMode": {
"description": "[Optional, Experimental] If hive partitioning is enabled, which mode to use. Two modes are supported: - AUTO: automatically infer partition key name(s) and type(s). - STRINGS: automatic infer partition key name(s). All types are strings. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error.",
"description": "[Optional, Trusted Tester] If hive partitioning is enabled, which mode to use. Two modes are supported: - AUTO: automatically infer partition key name(s) and type(s). - STRINGS: automatic infer partition key name(s). All types are strings. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error. Note: this setting is in the process of being deprecated in favor of hivePartitioningOptions.",
"type": "string"
},
"hivePartitioningOptions": {
"$ref": "HivePartitioningOptions",
"description": "[Optional, Trusted Tester] Options to configure hive partitioning support."
},
"ignoreUnknownValues": {
"description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.",
"type": "boolean"
@@ -2165,6 +2455,20 @@
},
"type": "object"
},
"HivePartitioningOptions": {
"id": "HivePartitioningOptions",
"properties": {
"mode": {
"description": "[Optional, Trusted Tester] When set, what mode of hive partitioning to use when reading data. Two modes are supported. (1) AUTO: automatically infer partition key name(s) and type(s). (2) STRINGS: automatically infer partition key name(s). All types are interpreted as strings. Not all storage formats support hive partitioning. Requesting hive partitioning on an unsupported format will lead to an error. Currently supported types include: AVRO, CSV, JSON, ORC and Parquet.",
"type": "string"
},
"sourceUriPrefix": {
"description": "[Optional, Trusted Tester] When hive partition detection is requested, a common prefix for all source uris should be supplied. The prefix must end immediately before the partition key encoding begins. For example, consider files following this data layout. gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro When hive partitioning is requested with either AUTO or STRINGS detection, the common prefix can be either of gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing slash does not matter).",
"type": "string"
}
},
"type": "object"
},
"IterationResult": {
"description": "Information about a single iteration of the training run.",
"id": "IterationResult",
@@ -2386,9 +2690,13 @@
"type": "string"
},
"hivePartitioningMode": {
"description": "[Optional, Experimental] If hive partitioning is enabled, which mode to use. Two modes are supported: - AUTO: automatically infer partition key name(s) and type(s). - STRINGS: automatic infer partition key name(s). All types are strings. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error.",
"description": "[Optional, Trusted Tester] If hive partitioning is enabled, which mode to use. Two modes are supported: - AUTO: automatically infer partition key name(s) and type(s). - STRINGS: automatic infer partition key name(s). All types are strings. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error.",
"type": "string"
},
"hivePartitioningOptions": {
"$ref": "HivePartitioningOptions",
"description": "[Optional, Trusted Tester] Options to configure hive partitioning support."
},
"ignoreUnknownValues": {
"description": "[Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names",
"type": "boolean"
@@ -2772,6 +3080,10 @@
},
"type": "array"
},
"reservation_id": {
"description": "[Output-only] Name of the primary reservation assigned to this job. Note that this could be different than reservations reported in the reservation usage field if parent reservations were used to execute this job.",
"type": "string"
},
"startTime": {
"description": "[Output-only] Start time of this job, in milliseconds since the epoch. This field will be present when the job transitions from the PENDING state to either RUNNING or DONE.",
"format": "int64",
@@ -2875,7 +3187,7 @@
"description": "[Output-only] The schema of the results. Present only for successful dry run of non-legacy SQL queries."
},
"statementType": {
"description": "The type of query statement, if valid. Possible values (new values might be added in the future): \"SELECT\": SELECT query. \"INSERT\": INSERT query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"UPDATE\": UPDATE query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"DELETE\": DELETE query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"MERGE\": MERGE query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"CREATE_TABLE\": CREATE [OR REPLACE] TABLE without AS SELECT. \"CREATE_TABLE_AS_SELECT\": CREATE [OR REPLACE] TABLE ... AS SELECT ... . \"DROP_TABLE\": DROP TABLE query. \"CREATE_VIEW\": CREATE [OR REPLACE] VIEW ... AS SELECT ... . \"DROP_VIEW\": DROP VIEW query. \"CREATE_FUNCTION\": CREATE FUNCTION query. \"DROP_FUNCTION\" : DROP FUNCTION query. \"ALTER_TABLE\": ALTER TABLE query. \"ALTER_VIEW\": ALTER VIEW query.",
"description": "The type of query statement, if valid. Possible values (new values might be added in the future): \"SELECT\": SELECT query. \"INSERT\": INSERT query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"UPDATE\": UPDATE query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"DELETE\": DELETE query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"MERGE\": MERGE query; see https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language. \"ALTER_TABLE\": ALTER TABLE query. \"ALTER_VIEW\": ALTER VIEW query. \"CREATE_FUNCTION\": CREATE FUNCTION query. \"CREATE_MODEL\": CREATE [OR REPLACE] MODEL ... AS SELECT ... . \"CREATE_PROCEDURE\": CREATE PROCEDURE query. \"CREATE_TABLE\": CREATE [OR REPLACE] TABLE without AS SELECT. \"CREATE_TABLE_AS_SELECT\": CREATE [OR REPLACE] TABLE ... AS SELECT ... . \"CREATE_VIEW\": CREATE [OR REPLACE] VIEW ... AS SELECT ... . \"DROP_FUNCTION\" : DROP FUNCTION query. \"DROP_PROCEDURE\": DROP PROCEDURE query. \"DROP_TABLE\": DROP TABLE query. \"DROP_VIEW\": DROP VIEW query.",
"type": "string"
},
"timeline": {
@@ -3019,6 +3331,34 @@
},
"type": "object"
},
"ListRoutinesResponse": {
"id": "ListRoutinesResponse",
"properties": {
"nextPageToken": {
"description": "A token to request the next page of results.",
"type": "string"
},
"routines": {
"description": "Routines in the requested dataset. Only the following fields are populated:\netag, project_id, dataset_id, routine_id, routine_type, creation_time,\nlast_modified_time, language.",
"items": {
"$ref": "Routine"
},
"type": "array"
}
},
"type": "object"
},
"LocationMetadata": {
"description": "BigQuery-specific metadata about a location. This will be set on\ngoogle.cloud.location.Location.metadata in Cloud Location API\nresponses.",
"id": "LocationMetadata",
"properties": {
"legacyLocationId": {
"description": "The legacy BigQuery location ID, e.g. \u201cEU\u201d for the \u201ceurope\u201d location.\nThis is for any API consumers that need the legacy \u201cUS\u201d and \u201cEU\u201d locations.",
"type": "string"
}
},
"type": "object"
},
"MaterializedViewDefinition": {
"id": "MaterializedViewDefinition",
"properties": {
@@ -3043,7 +3383,7 @@
"type": "string"
},
"description": {
"description": "[Optional] A user-friendly description of this model.\n@mutable bigquery.models.patch",
"description": "[Optional] A user-friendly description of this model.",
"type": "string"
},
"etag": {
@@ -3051,7 +3391,7 @@
"type": "string"
},
"expirationTime": {
"description": "[Optional] The time when this model expires, in milliseconds since the\nepoch. If not present, the model will persist indefinitely. Expired models\nwill be deleted and their storage reclaimed. The defaultTableExpirationMs\nproperty of the encapsulating dataset can be used to set a default\nexpirationTime on newly created models.\n@mutable bigquery.models.patch",
"description": "[Optional] The time when this model expires, in milliseconds since the\nepoch. If not present, the model will persist indefinitely. Expired models\nwill be deleted and their storage reclaimed. The defaultTableExpirationMs\nproperty of the encapsulating dataset can be used to set a default\nexpirationTime on newly created models.",
"format": "int64",
"type": "string"
},
@@ -3063,11 +3403,11 @@
"type": "array"
},
"friendlyName": {
"description": "[Optional] A descriptive name for this model.\n@mutable bigquery.models.patch",
"description": "[Optional] A descriptive name for this model.",
"type": "string"
},
"labelColumns": {
"description": "Output only. Label columns that were used to train this model.\nThe output of the model will have a \u201cpredicted_\u201d prefix to these columns.",
"description": "Output only. Label columns that were used to train this model.\nThe output of the model will have a \"predicted_\" prefix to these columns.",
"items": {
"$ref": "StandardSqlField"
},
@@ -3077,7 +3417,7 @@
"additionalProperties": {
"type": "string"
},
"description": "[Optional] The labels associated with this model. You can use these to\norganize and group your models. Label keys and values can be no longer\nthan 63 characters, can only contain lowercase letters, numeric\ncharacters, underscores and dashes. International characters are allowed.\nLabel values are optional. Label keys must start with a letter and each\nlabel in the list must have a different key.\n@mutable bigquery.models.patch",
"description": "[Optional] The labels associated with this model. You can use these to\norganize and group your models. Label keys and values can be no longer\nthan 63 characters, can only contain lowercase letters, numeric\ncharacters, underscores and dashes. International characters are allowed.\nLabel values are optional. Label keys must start with a letter and each\nlabel in the list must have a different key.",
"type": "object"
},
"lastModifiedTime": {
@@ -3099,13 +3439,15 @@
"MODEL_TYPE_UNSPECIFIED",
"LINEAR_REGRESSION",
"LOGISTIC_REGRESSION",
"KMEANS"
"KMEANS",
"TENSORFLOW"
],
"enumDescriptions": [
"",
"Linear regression model.",
"Logistic regression model.",
"[Beta] K-means clustering model."
"Logistic regression based classification model.",
"[Beta] K-means clustering model.",
"[Beta] An imported TensorFlow model."
],
"type": "string"
},
@@ -3170,7 +3512,7 @@
"type": "object"
},
"MultiClassClassificationMetrics": {
"description": "Evaluation metrics for multi-class classification models.",
"description": "Evaluation metrics for multi-class classification/classifier models.",
"id": "MultiClassClassificationMetrics",
"properties": {
"aggregateClassificationMetrics": {
@@ -3550,6 +3892,81 @@
},
"type": "object"
},
"Routine": {
"description": "A user-defined function or a stored procedure.",
"id": "Routine",
"properties": {
"arguments": {
"description": "Optional.",
"items": {
"$ref": "Argument"
},
"type": "array"
},
"creationTime": {
"description": "Output only. The time when this routine was created, in milliseconds since\nthe epoch.",
"format": "int64",
"type": "string"
},
"definitionBody": {
"description": "Required. The body of the routine.\n\nFor functions, this is the expression in the AS clause.\n\nIf language=SQL, it is the substring inside (but excluding) the\nparentheses. For example, for the function created with the following\nstatement:\n\n`CREATE FUNCTION JoinLines(x string, y string) as (concat(x, \"\\n\", y))`\n\nThe definition_body is `concat(x, \"\\n\", y)` (\\n is not replaced with\nlinebreak).\n\nIf language=JAVASCRIPT, it is the evaluated string in the AS clause.\nFor example, for the function created with the following statement:\n\n`CREATE FUNCTION f() RETURNS STRING LANGUAGE js AS 'return \"\\n\";\\n'`\n\nThe definition_body is\n\n`return \"\\n\";\\n`\n\nNote that both \\n are replaced with linebreaks.",
"type": "string"
},
"etag": {
"description": "Output only. A hash of this resource.",
"type": "string"
},
"importedLibraries": {
"description": "Optional. If language = \"JAVASCRIPT\", this field stores the path of the\nimported JAVASCRIPT libraries.",
"items": {
"type": "string"
},
"type": "array"
},
"language": {
"description": "Optional. Defaults to \"SQL\".",
"enum": [
"LANGUAGE_UNSPECIFIED",
"SQL",
"JAVASCRIPT"
],
"enumDescriptions": [
"",
"SQL language.",
"JavaScript language."
],
"type": "string"
},
"lastModifiedTime": {
"description": "Output only. The time when this routine was last modified, in milliseconds\nsince the epoch.",
"format": "int64",
"type": "string"
},
"returnType": {
"$ref": "StandardSqlDataType",
"description": "Optional if language = \"SQL\"; required otherwise.\n\nIf absent, the return type is inferred from definition_body at query time\nin each query that references this routine. If present, then the evaluated\nresult will be cast to the specified returned type at query time.\n\nFor example, for the functions created with the following statements:\n\n* `CREATE FUNCTION Add(x FLOAT64, y FLOAT64) RETURNS FLOAT64 AS (x + y);`\n\n* `CREATE FUNCTION Increment(x FLOAT64) AS (Add(x, 1));`\n\n* `CREATE FUNCTION Decrement(x FLOAT64) RETURNS FLOAT64 AS (Add(x, -1));`\n\nThe return_type is `{type_kind: \"FLOAT64\"}` for `Add` and `Decrement`, and\nis absent for `Increment` (inferred as FLOAT64 at query time).\n\nSuppose the function `Add` is replaced by\n `CREATE OR REPLACE FUNCTION Add(x INT64, y INT64) AS (x + y);`\n\nThen the inferred return type of `Increment` is automatically changed to\nINT64 at query time, while the return type of `Decrement` remains FLOAT64."
},
"routineReference": {
"$ref": "RoutineReference",
"description": "Required. Reference describing the ID of this routine."
},
"routineType": {
"description": "Required.",
"enum": [
"ROUTINE_TYPE_UNSPECIFIED",
"SCALAR_FUNCTION",
"PROCEDURE"
],
"enumDescriptions": [
"",
"Non-builtin permanent scalar function.",
"Stored procedure."
],
"type": "string"
}
},
"type": "object"
},
"RoutineReference": {
"id": "RoutineReference",
"properties": {
@@ -4186,11 +4603,11 @@
"type": "string"
},
"earlyStop": {
"description": "Whether to stop early when the loss doesn't improve significantly\nany more (compared to min_relative_progress).",
"description": "Whether to stop early when the loss doesn't improve significantly\nany more (compared to min_relative_progress). Used only for iterative\ntraining algorithms.",
"type": "boolean"
},
"initialLearnRate": {
"description": "Specifies the initial learning rate for line search to start at.",
"description": "Specifies the initial learning rate for the line search learn rate\nstrategy.",
"format": "double",
"type": "number"
},
@@ -4216,16 +4633,16 @@
"format": "double",
"type": "number"
},
"description": "Weights associated with each label class, for rebalancing the\ntraining data.",
"description": "Weights associated with each label class, for rebalancing the\ntraining data. Only applicable for classification models.",
"type": "object"
},
"learnRate": {
"description": "Learning rate in training.",
"description": "Learning rate in training. Used only for iterative training algorithms.",
"format": "double",
"type": "number"
},
"learnRateStrategy": {
"description": "The strategy to determine learning rate.",
"description": "The strategy to determine learn rate for the current iteration.",
"enum": [
"LEARN_RATE_STRATEGY_UNSPECIFIED",
"LINE_SEARCH",
@@ -4253,20 +4670,38 @@
"type": "string"
},
"maxIterations": {
"description": "The maximum number of iterations in training.",
"description": "The maximum number of iterations in training. Used only for iterative\ntraining algorithms.",
"format": "int64",
"type": "string"
},
"minRelativeProgress": {
"description": "When early_stop is true, stops training when accuracy improvement is\nless than 'min_relative_progress'.",
"description": "When early_stop is true, stops training when accuracy improvement is\nless than 'min_relative_progress'. Used only for iterative training\nalgorithms.",
"format": "double",
"type": "number"
},
"modelUri": {
"description": "[Beta] Google Cloud Storage URI from which the model was imported. Only\napplicable for imported models.",
"type": "string"
},
"numClusters": {
"description": "[Beta] Number of clusters for clustering models.",
"format": "int64",
"type": "string"
},
"optimizationStrategy": {
"description": "Optimization strategy for training linear regression models.",
"enum": [
"OPTIMIZATION_STRATEGY_UNSPECIFIED",
"BATCH_GRADIENT_DESCENT",
"NORMAL_EQUATION"
],
"enumDescriptions": [
"",
"Uses an iterative batch gradient descent algorithm.",
"Uses a normal equation to solve linear regression problem."
],
"type": "string"
},
"warmStart": {
"description": "Whether to train a model from the last checkpoint.",
"type": "boolean"