Bump version to 1.0.9; update JSON schemas; add new APIs

This commit is contained in:
Sebastian Thiel
2019-07-05 11:32:35 +08:00
parent 99e97ceece
commit e42ebc0c2b
2442 changed files with 190984 additions and 71186 deletions

View File

@@ -104,35 +104,6 @@
},
"protocol": "rest",
"resources": {
"operations": {
"methods": {
"delete": {
"description": "Deletes a long-running operation. This method indicates that the client is\nno longer interested in the operation result. It does not cancel the\noperation. If the server doesn't support this method, it returns\n`google.rpc.Code.UNIMPLEMENTED`.",
"flatPath": "v1/operations/{operationsId}",
"httpMethod": "DELETE",
"id": "ml.operations.delete",
"parameterOrder": [
"name"
],
"parameters": {
"name": {
"description": "The name of the operation resource to be deleted.",
"location": "path",
"pattern": "^operations/.+$",
"required": true,
"type": "string"
}
},
"path": "v1/{+name}",
"response": {
"$ref": "GoogleProtobuf__Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
}
}
},
"projects": {
"methods": {
"getConfig": {
@@ -161,7 +132,7 @@
]
},
"predict": {
"description": "Performs prediction on the data in the request.\nCloud ML Engine implements a custom `predict` verb on top of an HTTP POST\nmethod. <p>For details of the request and response format, see the **guide\nto the [predict request format](/ml-engine/docs/v1/predict-request)**.",
"description": "Performs prediction on the data in the request.\nAI Platform implements a custom `predict` verb on top of an HTTP POST\nmethod. <p>For details of the request and response format, see the **guide\nto the [predict request format](/ml-engine/docs/v1/predict-request)**.",
"flatPath": "v1/projects/{projectsId}:predict",
"httpMethod": "POST",
"id": "ml.projects.predict",
@@ -308,7 +279,7 @@
],
"parameters": {
"filter": {
"description": "Optional. Specifies the subset of jobs to retrieve.\nYou can filter on the value of one or more attributes of the job object.\nFor example, retrieve jobs with a job identifier that starts with 'census':\n<p><code>gcloud ml-engine jobs list --filter='jobId:census*'</code>\n<p>List all failed jobs with names that start with 'rnn':\n<p><code>gcloud ml-engine jobs list --filter='jobId:rnn*\nAND state:FAILED'</code>\n<p>For more examples, see the guide to\n<a href=\"/ml-engine/docs/tensorflow/monitor-training\">monitoring jobs</a>.",
"description": "Optional. Specifies the subset of jobs to retrieve.\nYou can filter on the value of one or more attributes of the job object.\nFor example, retrieve jobs with a job identifier that starts with 'census':\n<p><code>gcloud ai-platform jobs list --filter='jobId:census*'</code>\n<p>List all failed jobs with names that start with 'rnn':\n<p><code>gcloud ai-platform jobs list --filter='jobId:rnn*\nAND state:FAILED'</code>\n<p>For more examples, see the guide to\n<a href=\"/ml-engine/docs/tensorflow/monitor-training\">monitoring jobs</a>.",
"location": "query",
"type": "string"
},
@@ -468,7 +439,7 @@
],
"parameters": {
"pageSize": {
"description": "Optional. The number of locations to retrieve per \"page\" of results. If there\nare more remaining results than this number, the response message will\ncontain a valid value in the `next_page_token` field.\n\nThe default value is 20, and the maximum page size is 100.",
"description": "Optional. The number of locations to retrieve per \"page\" of results. If\nthere are more remaining results than this number, the response message\nwill contain a valid value in the `next_page_token` field.\n\nThe default value is 20, and the maximum page size is 100.",
"format": "int32",
"location": "query",
"type": "integer"
@@ -1019,11 +990,11 @@
}
}
},
"revision": "20190325",
"revision": "20190621",
"rootUrl": "https://ml.googleapis.com/",
"schemas": {
"GoogleApi__HttpBody": {
"description": "Message that represents an arbitrary HTTP body. It should only be used for\npayload formats that can't be represented as JSON, such as raw binary or\nan HTML page.\n\n\nThis message can be used both in streaming and non-streaming API methods in\nthe request as well as the response.\n\nIt can be used as a top-level request field, which is convenient if one\nwants to extract parameters from either the URL or HTTP template into the\nrequest fields and also want access to the raw HTTP body.\n\nExample:\n\n message GetResourceRequest {\n // A unique request id.\n string request_id = 1;\n\n // The raw HTTP body is bound to this field.\n google.api.HttpBody http_body = 2;\n }\n\n service ResourceService {\n rpc GetResource(GetResourceRequest) returns (google.api.HttpBody);\n rpc UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty);\n }\n\nExample with streaming methods:\n\n service CaldavService {\n rpc GetCalendar(stream google.api.HttpBody)\n returns (stream google.api.HttpBody);\n rpc UpdateCalendar(stream google.api.HttpBody)\n returns (stream google.api.HttpBody);\n }\n\nUse of this type only changes how the request and response bodies are\nhandled, all other features will continue to work unchanged.",
"description": "Message that represents an arbitrary HTTP body. It should only be used for\npayload formats that can't be represented as JSON, such as raw binary or\nan HTML page.\n\n\nThis message can be used both in streaming and non-streaming API methods in\nthe request as well as the response.\n\nIt can be used as a top-level request field, which is convenient if one\nwants to extract parameters from either the URL or HTTP template into the\nrequest fields and also want access to the raw HTTP body.\n\nExample:\n\n message GetResourceRequest {\n // A unique request id.\n string request_id = 1;\n\n // The raw HTTP body is bound to this field.\n google.api.HttpBody http_body = 2;\n }\n\n service ResourceService {\n rpc GetResource(GetResourceRequest) returns (google.api.HttpBody);\n rpc UpdateResource(google.api.HttpBody) returns\n (google.protobuf.Empty);\n }\n\nExample with streaming methods:\n\n service CaldavService {\n rpc GetCalendar(stream google.api.HttpBody)\n returns (stream google.api.HttpBody);\n rpc UpdateCalendar(stream google.api.HttpBody)\n returns (stream google.api.HttpBody);\n }\n\nUse of this type only changes how the request and response bodies are\nhandled, all other features will continue to work unchanged.",
"id": "GoogleApi__HttpBody",
"properties": {
"contentType": {
@@ -1083,6 +1054,7 @@
"NVIDIA_TESLA_P100",
"NVIDIA_TESLA_V100",
"NVIDIA_TESLA_P4",
"NVIDIA_TESLA_T4",
"TPU_V2"
],
"enumDescriptions": [
@@ -1091,6 +1063,7 @@
"Nvidia Tesla P100 GPU.",
"Nvidia Tesla V100 GPU.",
"Nvidia Tesla P4 GPU.",
"Nvidia Tesla T4 GPU.",
"TPU v2."
],
"type": "string"
@@ -1115,19 +1088,19 @@
"id": "GoogleCloudMlV1__BuiltInAlgorithmOutput",
"properties": {
"framework": {
"description": "Framework on which the built-in algorithm was trained on.",
"description": "Framework on which the built-in algorithm was trained.",
"type": "string"
},
"modelPath": {
"description": "Built-in algorithm's saved model path.\nOnly set for non-hptuning succeeded jobs.",
"description": "The Cloud Storage path to the `model/` directory where the training job\nsaves the trained model. Only set for successful jobs that don't use\nhyperparameter tuning.",
"type": "string"
},
"pythonVersion": {
"description": "Python version on which the built-in algorithm was trained on.",
"description": "Python version on which the built-in algorithm was trained.",
"type": "string"
},
"runtimeVersion": {
"description": "CMLE runtime version on which the built-in algorithm was trained on.",
"description": "AI Platform runtime version on which the built-in algorithm was\ntrained.",
"type": "string"
}
},
@@ -1150,6 +1123,7 @@
"Nvidia Tesla P100 GPU.",
"Nvidia Tesla V100 GPU.",
"Nvidia Tesla P4 GPU.",
"Nvidia Tesla T4 GPU.",
"TPU v2."
],
"items": {
@@ -1159,6 +1133,7 @@
"NVIDIA_TESLA_P100",
"NVIDIA_TESLA_V100",
"NVIDIA_TESLA_P4",
"NVIDIA_TESLA_T4",
"TPU_V2"
],
"type": "string"
@@ -1225,7 +1200,12 @@
},
"builtInAlgorithmOutput": {
"$ref": "GoogleCloudMlV1__BuiltInAlgorithmOutput",
"description": "Details related to built-in algorithms job.\nOnly set this for built-in algorithms jobs and for trials that succeeded."
"description": "Details related to built-in algorithms jobs.\nOnly set for trials of built-in algorithms jobs that have succeeded."
},
"endTime": {
"description": "Output only. End time for the trial.",
"format": "google-datetime",
"type": "string"
},
"finalMetric": {
"$ref": "GoogleCloudMlV1_HyperparameterOutput_HyperparameterMetric",
@@ -1242,6 +1222,35 @@
"description": "True if the trial is stopped early.",
"type": "boolean"
},
"startTime": {
"description": "Output only. Start time for the trial.",
"format": "google-datetime",
"type": "string"
},
"state": {
"description": "Output only. The detailed state of the trial.",
"enum": [
"STATE_UNSPECIFIED",
"QUEUED",
"PREPARING",
"RUNNING",
"SUCCEEDED",
"FAILED",
"CANCELLING",
"CANCELLED"
],
"enumDescriptions": [
"The job state is unspecified.",
"The job has been just created and processing has not yet begun.",
"The service is preparing to run the job.",
"The job is in progress.",
"The job completed successfully.",
"The job failed.\n`error_message` should contain the details of the failure.",
"The job is being cancelled.\n`error_message` should describe the reason for the cancellation.",
"The job has been cancelled.\n`error_message` should describe the reason for the cancellation."
],
"type": "string"
},
"trialId": {
"description": "The trial id for these results.",
"type": "string"
@@ -1254,7 +1263,7 @@
"id": "GoogleCloudMlV1__HyperparameterSpec",
"properties": {
"algorithm": {
"description": "Optional. The search algorithm specified for the hyperparameter\ntuning job.\nUses the default CloudML Engine hyperparameter tuning\nalgorithm if unspecified.",
"description": "Optional. The search algorithm specified for the hyperparameter\ntuning job.\nUses the default AI Platform hyperparameter tuning\nalgorithm if unspecified.",
"enum": [
"ALGORITHM_UNSPECIFIED",
"GRID_SEARCH",
@@ -1286,11 +1295,11 @@
"type": "string"
},
"hyperparameterMetricTag": {
"description": "Optional. The Tensorflow summary tag name to use for optimizing trials. For\ncurrent versions of Tensorflow, this tag name should exactly match what is\nshown in Tensorboard, including all scopes. For versions of Tensorflow\nprior to 0.12, this should be only the tag passed to tf.Summary.\nBy default, \"training/hptuning/metric\" will be used.",
"description": "Optional. The TensorFlow summary tag name to use for optimizing trials. For\ncurrent versions of TensorFlow, this tag name should exactly match what is\nshown in TensorBoard, including all scopes. For versions of TensorFlow\nprior to 0.12, this should be only the tag passed to tf.Summary.\nBy default, \"training/hptuning/metric\" will be used.",
"type": "string"
},
"maxFailedTrials": {
"description": "Optional. How many failed trials that need to be seen before failing the\nhyperparameter tuning job. User can specify this field to override the\ndefault failing criteria for CloudML Engine hyperparameter tuning jobs.\n\nDefaults to zero, which means to let the service decide when a\nhyperparameter job should fail.",
"description": "Optional. The number of failed trials that need to be seen before failing\nthe hyperparameter tuning job. You can specify this field to override the\ndefault failing criteria for AI Platform hyperparameter tuning jobs.\n\nDefaults to zero, which means the service decides when a hyperparameter\njob should fail.",
"format": "int32",
"type": "integer"
},
@@ -1319,7 +1328,7 @@
"type": "object"
},
"GoogleCloudMlV1__Job": {
"description": "Represents a training, prediction or explanation job.",
"description": "Represents a training or prediction job.",
"id": "GoogleCloudMlV1__Job",
"properties": {
"createTime": {
@@ -1528,15 +1537,15 @@
"type": "string"
},
"onlinePredictionConsoleLogging": {
"description": "Optional. If true, enables logging of stderr and stdout streams\nfor online prediction in Stackdriver Logging. These can be more verbose\nthan the standard access logs (see `online_prediction_logging`) and thus\ncan incur higher cost. However, they are helpful for debugging. Note that\nsince Stackdriver logs may incur a cost, particularly if the total QPS\nin your project is high, be sure to estimate your costs before enabling\nthis flag.\n\nDefault is false.",
"description": "Optional. If true, online prediction nodes send `stderr` and `stdout`\nstreams to Stackdriver Logging. These can be more verbose than the standard\naccess logs (see `onlinePredictionLogging`) and can incur higher cost.\nHowever, they are helpful for debugging. Note that\n[Stackdriver logs may incur a cost](/stackdriver/pricing), especially if\nyour project receives prediction requests at a high QPS. Estimate your\ncosts before enabling this option.\n\nDefault is false.",
"type": "boolean"
},
"onlinePredictionLogging": {
"description": "Optional. If true, online prediction access logs are sent to StackDriver\nLogging. These logs are like standard server access logs, containing\ninformation like timestamp and latency for each request. Note that\nStackdriver logs may incur a cost, particular if the total QPS in your\nproject is high.\n\nDefault is false.",
"description": "Optional. If true, online prediction access logs are sent to StackDriver\nLogging. These logs are like standard server access logs, containing\ninformation like timestamp and latency for each request. Note that\n[Stackdriver logs may incur a cost](/stackdriver/pricing), especially if\nyour project receives prediction requests at a high queries per second rate\n(QPS). Estimate your costs before enabling this option.\n\nDefault is false.",
"type": "boolean"
},
"regions": {
"description": "Optional. The list of regions where the model is going to be deployed.\nCurrently only one region per model is supported.\nDefaults to 'us-central1' if nothing is set.\nSee the <a href=\"/ml-engine/docs/tensorflow/regions\">available regions</a>\nfor ML Engine services.\nNote:\n* No matter where a model is deployed, it can always be accessed by\n users from anywhere, both for online and batch prediction.\n* The region for a batch prediction job is set by the region field when\n submitting the batch prediction job and does not take its value from\n this field.",
"description": "Optional. The list of regions where the model is going to be deployed.\nCurrently only one region per model is supported.\nDefaults to 'us-central1' if nothing is set.\nSee the <a href=\"/ml-engine/docs/tensorflow/regions\">available regions</a>\nfor AI Platform services.\nNote:\n* No matter where a model is deployed, it can always be accessed by\n users from anywhere, both for online and batch prediction.\n* The region for a batch prediction job is set by the region field when\n submitting the batch prediction job and does not take its value from\n this field.",
"items": {
"type": "string"
},
@@ -1724,7 +1733,7 @@
"type": "string"
},
"inputPaths": {
"description": "Required. The Google Cloud Storage location of the input data files.\nMay contain wildcards. See <a href=\"https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames</a>",
"description": "Required. The Cloud Storage location of the input data files. May contain\n<a href=\"/storage/docs/gsutil/addlhelp/WildcardNames\">wildcards</a>.",
"items": {
"type": "string"
},
@@ -1764,11 +1773,11 @@
"type": "string"
},
"region": {
"description": "Required. The Google Compute Engine region to run the prediction job in.\nSee the <a href=\"/ml-engine/docs/tensorflow/regions\">available regions</a>\nfor ML Engine services.",
"description": "Required. The Google Compute Engine region to run the prediction job in.\nSee the <a href=\"/ml-engine/docs/tensorflow/regions\">available regions</a>\nfor AI Platform services.",
"type": "string"
},
"runtimeVersion": {
"description": "Optional. The Cloud ML Engine runtime version to use for this batch\nprediction. If not set, Cloud ML Engine will pick the runtime version used\nduring the CreateVersion request for this model version, or choose the\nlatest stable version when model version information is not available\nsuch as when the model is specified by uri.",
"description": "Optional. The AI Platform runtime version to use for this batch\nprediction. If not set, AI Platform will pick the runtime version used\nduring the CreateVersion request for this model version, or choose the\nlatest stable version when model version information is not available\nsuch as when the model is specified by uri.",
"type": "string"
},
"signatureName": {
@@ -1823,6 +1832,10 @@
"imageUri": {
"description": "The Docker image to run on the replica. This image must be in Container\nRegistry. Learn more about [configuring custom\ncontainers](/ml-engine/docs/distributed-training-containers).",
"type": "string"
},
"tpuTfVersion": {
"description": "TensorFlow version used in the custom container. This field is required if\nthe replica is a TPU worker that uses a custom container. Otherwise, do not\nspecify this field.",
"type": "string"
}
},
"type": "object"
@@ -1860,6 +1873,11 @@
"description": "Optional. Specifies the type of virtual machine to use for your training\njob's master worker.\n\nThe following types are supported:\n\n<dl>\n <dt>standard</dt>\n <dd>\n A basic machine configuration suitable for training simple models with\n small to moderate datasets.\n </dd>\n <dt>large_model</dt>\n <dd>\n A machine with a lot of memory, specially suited for parameter servers\n when your model is large (having many hidden layers or layers with very\n large numbers of nodes).\n </dd>\n <dt>complex_model_s</dt>\n <dd>\n A machine suitable for the master and workers of the cluster when your\n model requires more computation than the standard machine can handle\n satisfactorily.\n </dd>\n <dt>complex_model_m</dt>\n <dd>\n A machine with roughly twice the number of cores and roughly double the\n memory of <i>complex_model_s</i>.\n </dd>\n <dt>complex_model_l</dt>\n <dd>\n A machine with roughly twice the number of cores and roughly double the\n memory of <i>complex_model_m</i>.\n </dd>\n <dt>standard_gpu</dt>\n <dd>\n A machine equivalent to <i>standard</i> that\n also includes a single NVIDIA Tesla K80 GPU. See more about\n <a href=\"/ml-engine/docs/tensorflow/using-gpus\">using GPUs to\n train your model</a>.\n </dd>\n <dt>complex_model_m_gpu</dt>\n <dd>\n A machine equivalent to <i>complex_model_m</i> that also includes\n four NVIDIA Tesla K80 GPUs.\n </dd>\n <dt>complex_model_l_gpu</dt>\n <dd>\n A machine equivalent to <i>complex_model_l</i> that also includes\n eight NVIDIA Tesla K80 GPUs.\n </dd>\n <dt>standard_p100</dt>\n <dd>\n A machine equivalent to <i>standard</i> that\n also includes a single NVIDIA Tesla P100 GPU.\n </dd>\n <dt>complex_model_m_p100</dt>\n <dd>\n A machine equivalent to <i>complex_model_m</i> that also includes\n four NVIDIA Tesla P100 GPUs.\n </dd>\n <dt>standard_v100</dt>\n <dd>\n A machine equivalent to <i>standard</i> that\n also includes a single NVIDIA Tesla V100 GPU.\n </dd>\n <dt>large_model_v100</dt>\n <dd>\n A machine equivalent to <i>large_model</i> that\n also includes a single NVIDIA Tesla V100 GPU.\n </dd>\n <dt>complex_model_m_v100</dt>\n <dd>\n A machine equivalent to <i>complex_model_m</i> that\n also includes four NVIDIA Tesla V100 GPUs.\n </dd>\n <dt>complex_model_l_v100</dt>\n <dd>\n A machine equivalent to <i>complex_model_l</i> that\n also includes eight NVIDIA Tesla V100 GPUs.\n </dd>\n <dt>cloud_tpu</dt>\n <dd>\n A TPU VM including one Cloud TPU. See more about\n <a href=\"/ml-engine/docs/tensorflow/using-tpus\">using TPUs to train\n your model</a>.\n </dd>\n</dl>\n\nYou may also use certain Compute Engine machine types directly in this\nfield. The following types are supported:\n\n- `n1-standard-4`\n- `n1-standard-8`\n- `n1-standard-16`\n- `n1-standard-32`\n- `n1-standard-64`\n- `n1-standard-96`\n- `n1-highmem-2`\n- `n1-highmem-4`\n- `n1-highmem-8`\n- `n1-highmem-16`\n- `n1-highmem-32`\n- `n1-highmem-64`\n- `n1-highmem-96`\n- `n1-highcpu-16`\n- `n1-highcpu-32`\n- `n1-highcpu-64`\n- `n1-highcpu-96`\n\nSee more about [using Compute Engine machine\ntypes](/ml-engine/docs/tensorflow/machine-types#compute-engine-machine-types).\n\nYou must set this value when `scaleTier` is set to `CUSTOM`.",
"type": "string"
},
"maxRunningTime": {
"description": "Optional. The maximum job running time. The default is 7 days.",
"format": "google-duration",
"type": "string"
},
"packageUris": {
"description": "Required. The Google Cloud Storage location of the packages with\nthe training program and any additional dependencies.\nThe maximum number of package URIs is 100.",
"items": {
@@ -1869,7 +1887,7 @@
},
"parameterServerConfig": {
"$ref": "GoogleCloudMlV1__ReplicaConfig",
"description": "Optional. The configuration for parameter servers.\n\nYou should only set `parameterServerConfig.acceleratorConfig` if\n`parameterServerConfigType` is set to a Compute Engine machine type. [Learn\nabout restrictions on accelerator configurations for\ntraining.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)\n\nSet `parameterServerConfig.imageUri` only if you build a custom image for\nyour parameter server. If `parameterServerConfig.imageUri` has not been\nset, Cloud ML Engine uses the value of `masterConfig.imageUri`.\nLearn more about [configuring custom\ncontainers](/ml-engine/docs/distributed-training-containers)."
"description": "Optional. The configuration for parameter servers.\n\nYou should only set `parameterServerConfig.acceleratorConfig` if\n`parameterServerConfigType` is set to a Compute Engine machine type. [Learn\nabout restrictions on accelerator configurations for\ntraining.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)\n\nSet `parameterServerConfig.imageUri` only if you build a custom image for\nyour parameter server. If `parameterServerConfig.imageUri` has not been\nset, AI Platform uses the value of `masterConfig.imageUri`.\nLearn more about [configuring custom\ncontainers](/ml-engine/docs/distributed-training-containers)."
},
"parameterServerCount": {
"description": "Optional. The number of parameter server replicas to use for the training\njob. Each replica in the cluster will be of the type specified in\n`parameter_server_type`.\n\nThis value can only be used when `scale_tier` is set to `CUSTOM`.If you\nset this value, you must also set `parameter_server_type`.\n\nThe default value is zero.",
@@ -1877,7 +1895,7 @@
"type": "string"
},
"parameterServerType": {
"description": "Optional. Specifies the type of virtual machine to use for your training\njob's parameter server.\n\nThe supported values are the same as those described in the entry for\n`master_type`.\n\nThis value must be consistent with the category of machine type that\n`masterType` uses. In other words, both must be Cloud ML Engine machine\ntypes or both must be Compute Engine machine types.\n\nThis value must be present when `scaleTier` is set to `CUSTOM` and\n`parameter_server_count` is greater than zero.",
"description": "Optional. Specifies the type of virtual machine to use for your training\njob's parameter server.\n\nThe supported values are the same as those described in the entry for\n`master_type`.\n\nThis value must be consistent with the category of machine type that\n`masterType` uses. In other words, both must be AI Platform machine\ntypes or both must be Compute Engine machine types.\n\nThis value must be present when `scaleTier` is set to `CUSTOM` and\n`parameter_server_count` is greater than zero.",
"type": "string"
},
"pythonModule": {
@@ -1889,11 +1907,11 @@
"type": "string"
},
"region": {
"description": "Required. The Google Compute Engine region to run the training job in.\nSee the <a href=\"/ml-engine/docs/tensorflow/regions\">available regions</a>\nfor ML Engine services.",
"description": "Required. The Google Compute Engine region to run the training job in.\nSee the <a href=\"/ml-engine/docs/tensorflow/regions\">available regions</a>\nfor AI Platform services.",
"type": "string"
},
"runtimeVersion": {
"description": "Optional. The Cloud ML Engine runtime version to use for training. If not\nset, Cloud ML Engine uses the default stable version, 1.0. For more\ninformation, see the\n<a href=\"/ml-engine/docs/runtime-version-list\">runtime version list</a>\nand\n<a href=\"/ml-engine/docs/versioning\">how to manage runtime versions</a>.",
"description": "Optional. The AI Platform runtime version to use for training. If not\nset, AI Platform uses the default stable version, 1.0. For more\ninformation, see the\n<a href=\"/ml-engine/docs/runtime-version-list\">runtime version list</a>\nand\n<a href=\"/ml-engine/docs/versioning\">how to manage runtime versions</a>.",
"type": "string"
},
"scaleTier": {
@@ -1918,7 +1936,7 @@
},
"workerConfig": {
"$ref": "GoogleCloudMlV1__ReplicaConfig",
"description": "Optional. The configuration for workers.\n\nYou should only set `workerConfig.acceleratorConfig` if `workerType` is set\nto a Compute Engine machine type. [Learn about restrictions on accelerator\nconfigurations for\ntraining.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)\n\nSet `workerConfig.imageUri` only if you build a custom image for your\nworker. If `workerConfig.imageUri` has not been set, Cloud ML Engine uses\nthe value of `masterConfig.imageUri`. Learn more about\n[configuring custom\ncontainers](/ml-engine/docs/distributed-training-containers)."
"description": "Optional. The configuration for workers.\n\nYou should only set `workerConfig.acceleratorConfig` if `workerType` is set\nto a Compute Engine machine type. [Learn about restrictions on accelerator\nconfigurations for\ntraining.](/ml-engine/docs/tensorflow/using-gpus#compute-engine-machine-types-with-gpu)\n\nSet `workerConfig.imageUri` only if you build a custom image for your\nworker. If `workerConfig.imageUri` has not been set, AI Platform uses\nthe value of `masterConfig.imageUri`. Learn more about\n[configuring custom\ncontainers](/ml-engine/docs/distributed-training-containers)."
},
"workerCount": {
"description": "Optional. The number of worker replicas to use for the training job. Each\nreplica in the cluster will be of the type specified in `worker_type`.\n\nThis value can only be used when `scale_tier` is set to `CUSTOM`. If you\nset this value, you must also set `worker_type`.\n\nThe default value is zero.",
@@ -1926,7 +1944,7 @@
"type": "string"
},
"workerType": {
"description": "Optional. Specifies the type of virtual machine to use for your training\njob's worker nodes.\n\nThe supported values are the same as those described in the entry for\n`masterType`.\n\nThis value must be consistent with the category of machine type that\n`masterType` uses. In other words, both must be Cloud ML Engine machine\ntypes or both must be Compute Engine machine types.\n\nIf you use `cloud_tpu` for this value, see special instructions for\n[configuring a custom TPU\nmachine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).\n\nThis value must be present when `scaleTier` is set to `CUSTOM` and\n`workerCount` is greater than zero.",
"description": "Optional. Specifies the type of virtual machine to use for your training\njob's worker nodes.\n\nThe supported values are the same as those described in the entry for\n`masterType`.\n\nThis value must be consistent with the category of machine type that\n`masterType` uses. In other words, both must be AI Platform machine\ntypes or both must be Compute Engine machine types.\n\nIf you use `cloud_tpu` for this value, see special instructions for\n[configuring a custom TPU\nmachine](/ml-engine/docs/tensorflow/using-tpus#configuring_a_custom_tpu_machine).\n\nThis value must be present when `scaleTier` is set to `CUSTOM` and\n`workerCount` is greater than zero.",
"type": "string"
}
},
@@ -1938,7 +1956,7 @@
"properties": {
"builtInAlgorithmOutput": {
"$ref": "GoogleCloudMlV1__BuiltInAlgorithmOutput",
"description": "Details related to built-in algorithms job.\nOnly set for built-in algorithms jobs."
"description": "Details related to built-in algorithms jobs.\nOnly set for built-in algorithms jobs."
},
"completedTrialCount": {
"description": "The number of hyperparameter tuning trials that completed successfully.\nOnly set for hyperparameter tuning jobs.",
@@ -1950,6 +1968,10 @@
"format": "double",
"type": "number"
},
"hyperparameterMetricTag": {
"description": "The TensorFlow summary tag name used for optimizing hyperparameter tuning\ntrials. See\n[`HyperparameterSpec.hyperparameterMetricTag`](#HyperparameterSpec.FIELDS.hyperparameter_metric_tag)\nfor more information. Only set for hyperparameter tuning jobs.",
"type": "string"
},
"isBuiltInAlgorithmJob": {
"description": "Whether this job is a built-in Algorithm job.",
"type": "boolean"
@@ -1969,7 +1991,7 @@
"type": "object"
},
"GoogleCloudMlV1__Version": {
"description": "Represents a version of the model.\n\nEach version is a trained model deployed in the cloud, ready to handle\nprediction requests. A model can have multiple versions. You can get\ninformation about all of the versions of a given model by calling\n[projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list).\nNext ID: 30",
"description": "Represents a version of the model.\n\nEach version is a trained model deployed in the cloud, ready to handle\nprediction requests. A model can have multiple versions. You can get\ninformation about all of the versions of a given model by calling\n[projects.models.versions.list](/ml-engine/reference/rest/v1/projects.models.versions/list).",
"id": "GoogleCloudMlV1__Version",
"properties": {
"autoScaling": {
@@ -1982,7 +2004,7 @@
"type": "string"
},
"deploymentUri": {
"description": "Required. The Google Cloud Storage location of the trained model used to\ncreate the version. See the\n[guide to model\ndeployment](/ml-engine/docs/tensorflow/deploying-models) for more\ninformation.\n\nWhen passing Version to\n[projects.models.versions.create](/ml-engine/reference/rest/v1/projects.models.versions/create)\nthe model service uses the specified location as the source of the model.\nOnce deployed, the model version is hosted by the prediction service, so\nthis location is useful only as a historical record.\nThe total number of model files can't exceed 1000.",
"description": "Required. The Cloud Storage location of the trained model used to\ncreate the version. See the\n[guide to model\ndeployment](/ml-engine/docs/tensorflow/deploying-models) for more\ninformation.\n\nWhen passing Version to\n[projects.models.versions.create](/ml-engine/reference/rest/v1/projects.models.versions/create)\nthe model service uses the specified location as the source of the model.\nOnce deployed, the model version is hosted by the prediction service, so\nthis location is useful only as a historical record.\nThe total number of model files can't exceed 1000.",
"type": "string"
},
"description": {
@@ -1999,7 +2021,7 @@
"type": "string"
},
"framework": {
"description": "Optional. The machine learning framework Cloud ML Engine uses to train\nthis version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,\n`XGBOOST`. If you do not specify a framework, Cloud ML Engine\nwill analyze files in the deployment_uri to determine a framework. If you\nchoose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version\nof the model to 1.4 or greater.",
"description": "Optional. The machine learning framework AI Platform uses to train\nthis version of the model. Valid values are `TENSORFLOW`, `SCIKIT_LEARN`,\n`XGBOOST`. If you do not specify a framework, AI Platform\nwill analyze files in the deployment_uri to determine a framework. If you\nchoose `SCIKIT_LEARN` or `XGBOOST`, you must also set the runtime version\nof the model to 1.4 or greater.\n\nDo **not** specify a framework if you're deploying a [custom\nprediction routine](/ml-engine/docs/tensorflow/custom-prediction-routines).",
"enum": [
"FRAMEWORK_UNSPECIFIED",
"TENSORFLOW",
@@ -2042,12 +2064,27 @@
"description": "Required.The name specified for the version when it was created.\n\nThe version name must be unique within the model it is created in.",
"type": "string"
},
"packageUris": {
"description": "Optional. Cloud Storage paths (`gs://\u2026`) of packages for [custom\nprediction routines](/ml-engine/docs/tensorflow/custom-prediction-routines)\nor [scikit-learn pipelines with custom\ncode](/ml-engine/docs/scikit/exporting-for-prediction#custom-pipeline-code).\n\nFor a custom prediction routine, one of these packages must contain your\nPredictor class (see\n[`predictionClass`](#Version.FIELDS.prediction_class)). Additionally,\ninclude any dependencies used by your Predictor or scikit-learn pipeline\nuses that are not already included in your selected [runtime\nversion](/ml-engine/docs/tensorflow/runtime-version-list).\n\nIf you specify this field, you must also set\n[`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.",
"items": {
"type": "string"
},
"type": "array"
},
"predictionClass": {
"description": "Optional. The fully qualified name\n(<var>module_name</var>.<var>class_name</var>) of a class that implements\nthe Predictor interface described in this reference field. The module\ncontaining this class should be included in a package provided to the\n[`packageUris` field](#Version.FIELDS.package_uris).\n\nSpecify this field if and only if you are deploying a [custom prediction\nroutine (beta)](/ml-engine/docs/tensorflow/custom-prediction-routines).\nIf you specify this field, you must set\n[`runtimeVersion`](#Version.FIELDS.runtime_version) to 1.4 or greater.\n\nThe following code sample provides the Predictor interface:\n\n```py\nclass Predictor(object):\n\"\"\"Interface for constructing custom predictors.\"\"\"\n\ndef predict(self, instances, **kwargs):\n \"\"\"Performs custom prediction.\n\n Instances are the decoded values from the request. They have already\n been deserialized from JSON.\n\n Args:\n instances: A list of prediction input instances.\n **kwargs: A dictionary of keyword args provided as additional\n fields on the predict request body.\n\n Returns:\n A list of outputs containing the prediction results. This list must\n be JSON serializable.\n \"\"\"\n raise NotImplementedError()\n\n@classmethod\ndef from_path(cls, model_dir):\n \"\"\"Creates an instance of Predictor using the given path.\n\n Loading of the predictor should be done in this method.\n\n Args:\n model_dir: The local directory that contains the exported model\n file along with any additional files uploaded when creating the\n version resource.\n\n Returns:\n An instance implementing this Predictor class.\n \"\"\"\n raise NotImplementedError()\n```\n\nLearn more about [the Predictor interface and custom prediction\nroutines](/ml-engine/docs/tensorflow/custom-prediction-routines).",
"type": "string"
},
"pythonVersion": {
"description": "Optional. The version of Python used in prediction. If not set, the default\nversion is '2.7'. Python '3.5' is available when `runtime_version` is set\nto '1.4' and above. Python '2.7' works with all supported runtime versions.",
"type": "string"
},
"runtimeVersion": {
"description": "Optional. The Cloud ML Engine runtime version to use for this deployment.\nIf not set, Cloud ML Engine uses the default stable version, 1.0. For more\ninformation, see the\n[runtime version list](/ml-engine/docs/runtime-version-list) and\n[how to manage runtime versions](/ml-engine/docs/versioning).",
"description": "Optional. The AI Platform runtime version to use for this deployment.\nIf not set, AI Platform uses the default stable version, 1.0. For more\ninformation, see the\n[runtime version list](/ml-engine/docs/runtime-version-list) and\n[how to manage runtime versions](/ml-engine/docs/versioning).",
"type": "string"
},
"serviceAccount": {
"description": "Optional. Specifies the service account for resource access control.",
"type": "string"
},
"state": {
@@ -2127,7 +2164,7 @@
"properties": {
"condition": {
"$ref": "GoogleType__Expr",
"description": "The condition that is associated with this binding.\nNOTE: an unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently."
"description": "The condition that is associated with this binding.\nNOTE: An unsatisfied condition will not allow user access via current\nbinding. Different bindings, including their conditions, are examined\nindependently."
},
"members": {
"description": "Specifies the identities requesting access for a Cloud Platform resource.\n`members` can have the following values:\n\n* `allUsers`: A special identifier that represents anyone who is\n on the internet; with or without a Google account.\n\n* `allAuthenticatedUsers`: A special identifier that represents anyone\n who is authenticated with a Google account or a service account.\n\n* `user:{emailid}`: An email address that represents a specific Google\n account. For example, `alice@gmail.com` .\n\n\n* `serviceAccount:{emailid}`: An email address that represents a service\n account. For example, `my-other-app@appspot.gserviceaccount.com`.\n\n* `group:{emailid}`: An email address that represents a Google group.\n For example, `admins@example.com`.\n\n\n* `domain:{domain}`: The G Suite domain (primary) that represents all the\n users of that domain. For example, `google.com` or `example.com`.\n\n",
@@ -2257,7 +2294,7 @@
"type": "object"
},
"name": {
"description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should have the format of `operations/some/unique/name`.",
"description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.",
"type": "string"
},
"response": {
@@ -2278,7 +2315,7 @@
"type": "object"
},
"GoogleRpc__Status": {
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error\nmessage, and error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` that can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.",
"description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).",
"id": "GoogleRpc__Status",
"properties": {
"code": {