mirror of
https://github.com/OMGeeky/google-apis-rs.git
synced 2026-02-23 15:49:49 +01:00
chore(json): fetch latest json and re-gen all code
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
// This file was generated automatically from 'src/mako/api/lib.rs.mako'
|
||||
// DO NOT EDIT !
|
||||
|
||||
//! This documentation was generated from *dataproc* crate version *1.0.0+20160503*, where *20160503* is the exact revision of the *dataproc:v1* schema built by the [mako](http://www.makotemplates.org/) code generator *v1.0.0*.
|
||||
//! This documentation was generated from *dataproc* crate version *1.0.0+20161102*, where *20161102* is the exact revision of the *dataproc:v1* schema built by the [mako](http://www.makotemplates.org/) code generator *v1.0.0*.
|
||||
//!
|
||||
//! Everything else about the *dataproc* *v1* API can be found at the
|
||||
//! [official documentation site](https://cloud.google.com/dataproc/).
|
||||
|
||||
@@ -161,7 +161,7 @@ impl<'a, C, A> Dataproc<C, A>
|
||||
// ############
|
||||
// SCHEMAS ###
|
||||
// ##########
|
||||
/// A Cloud Dataproc job for running Spark SQL queries.
|
||||
/// A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries.
|
||||
///
|
||||
/// This type is not used in any activity, and only used as *part* of another schema.
|
||||
///
|
||||
@@ -189,28 +189,28 @@ pub struct SparkSqlJob {
|
||||
impl Part for SparkSqlJob {}
|
||||
|
||||
|
||||
/// The config settings for Google Compute Engine resources in an instance group, such as a master or worker group.
|
||||
/// [Optional] The config settings for Google Compute Engine resources in an instance group, such as a master or worker group.
|
||||
///
|
||||
/// This type is not used in any activity, and only used as *part* of another schema.
|
||||
///
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct InstanceGroupConfig {
|
||||
/// The number of VM instances in the instance group. For master instance groups, must be set to 1.
|
||||
/// [Required] The number of VM instances in the instance group. For master instance groups, must be set to 1.
|
||||
#[serde(rename="numInstances")]
|
||||
pub num_instances: Option<i32>,
|
||||
/// Specifies that this instance group contains Preemptible Instances.
|
||||
/// [Optional] Specifies that this instance group contains preemptible instances.
|
||||
#[serde(rename="isPreemptible")]
|
||||
pub is_preemptible: Option<bool>,
|
||||
/// [Output-only] The config for Google Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.
|
||||
#[serde(rename="managedGroupConfig")]
|
||||
pub managed_group_config: Option<ManagedGroupConfig>,
|
||||
/// The Google Compute Engine machine type used for cluster instances. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`.
|
||||
/// [Required] The Google Compute Engine machine type used for cluster instances. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`.
|
||||
#[serde(rename="machineTypeUri")]
|
||||
pub machine_type_uri: Option<String>,
|
||||
/// Disk option config settings.
|
||||
/// [Optional] Disk option config settings.
|
||||
#[serde(rename="diskConfig")]
|
||||
pub disk_config: Option<DiskConfig>,
|
||||
/// The list of instance names. Cloud Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group if not set by user (recommended practice is to let Cloud Dataproc derive the name).
|
||||
/// [Optional] The list of instance names. Cloud Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group if not set by user (recommended practice is to let Cloud Dataproc derive the name).
|
||||
#[serde(rename="instanceNames")]
|
||||
pub instance_names: Option<Vec<String>>,
|
||||
/// [Output-only] The Google Compute Engine image resource used for cluster instances. Inferred from `SoftwareConfig.image_version`.
|
||||
@@ -221,6 +221,26 @@ pub struct InstanceGroupConfig {
|
||||
impl Part for InstanceGroupConfig {}
|
||||
|
||||
|
||||
/// A YARN application created by a job. Application information is a subset of org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.
|
||||
///
|
||||
/// This type is not used in any activity, and only used as *part* of another schema.
|
||||
///
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct YarnApplication {
|
||||
/// [Required] The numerical progress of the application, from 1 to 100.
|
||||
pub progress: Option<f32>,
|
||||
/// [Required] The application state.
|
||||
pub state: Option<String>,
|
||||
/// [Required] The application name.
|
||||
pub name: Option<String>,
|
||||
/// [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.
|
||||
#[serde(rename="trackingUrl")]
|
||||
pub tracking_url: Option<String>,
|
||||
}
|
||||
|
||||
impl Part for YarnApplication {}
|
||||
|
||||
|
||||
/// A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
///
|
||||
/// # Activities
|
||||
@@ -264,7 +284,7 @@ pub struct JobReference {
|
||||
/// [Required] The ID of the Google Cloud Platform project that the job belongs to.
|
||||
#[serde(rename="projectId")]
|
||||
pub project_id: Option<String>,
|
||||
/// [Required] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
|
||||
/// [Optional] The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 512 characters.
|
||||
#[serde(rename="jobId")]
|
||||
pub job_id: Option<String>,
|
||||
}
|
||||
@@ -278,18 +298,21 @@ impl Part for JobReference {}
|
||||
///
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct GceClusterConfig {
|
||||
/// The Google Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see https://cloud.google.com/compute/docs/subnetworks for more information). Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`.
|
||||
/// [Optional] If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.
|
||||
#[serde(rename="internalIpOnly")]
|
||||
pub internal_ip_only: Option<bool>,
|
||||
/// [Optional] The Google Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the "default" network of the project is used, if it exists. Cannot be a "Custom Subnet Network" (see [Using Subnetworks](/compute/docs/subnetworks) for more information). Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`.
|
||||
#[serde(rename="networkUri")]
|
||||
pub network_uri: Option<String>,
|
||||
/// The Google Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`.
|
||||
/// [Optional] The Google Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`.
|
||||
#[serde(rename="subnetworkUri")]
|
||||
pub subnetwork_uri: Option<String>,
|
||||
/// The URIs of service account scopes to be included in Google Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specfied, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
|
||||
/// [Optional] The URIs of service account scopes to be included in Google Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control
|
||||
#[serde(rename="serviceAccountScopes")]
|
||||
pub service_account_scopes: Option<Vec<String>>,
|
||||
/// The Google Compute Engine tags to add to all instances.
|
||||
/// The Google Compute Engine tags to add to all instances (see [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
|
||||
pub tags: Option<Vec<String>>,
|
||||
/// The Google Compute Engine metadata entries to add to all instances.
|
||||
/// The Google Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
|
||||
pub metadata: Option<HashMap<String, String>>,
|
||||
/// [Required] The zone where the Google Compute Engine cluster will be located. Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`.
|
||||
#[serde(rename="zoneUri")]
|
||||
@@ -314,7 +337,7 @@ pub struct ClusterConfig {
|
||||
/// [Required] The shared Google Compute Engine config settings for all instances in a cluster.
|
||||
#[serde(rename="gceClusterConfig")]
|
||||
pub gce_cluster_config: Option<GceClusterConfig>,
|
||||
/// [Optional] Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below: ROLE=$(/usr/share/google/get_metadata_value attributes/role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi
|
||||
/// [Optional] Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's role metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi
|
||||
#[serde(rename="initializationActions")]
|
||||
pub initialization_actions: Option<Vec<NodeInitializationAction>>,
|
||||
/// [Optional] The Google Compute Engine config settings for worker instances in a cluster.
|
||||
@@ -368,7 +391,7 @@ impl Part for QueryList {}
|
||||
///
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct DiskConfig {
|
||||
/// [Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
|
||||
/// [Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.
|
||||
#[serde(rename="numLocalSsds")]
|
||||
pub num_local_ssds: Option<i32>,
|
||||
/// [Optional] Size in GB of the boot disk (default is 500GB).
|
||||
@@ -397,9 +420,8 @@ pub struct Job {
|
||||
/// Job is a Hadoop job.
|
||||
#[serde(rename="hadoopJob")]
|
||||
pub hadoop_job: Option<HadoopJob>,
|
||||
/// Job is a Pyspark job.
|
||||
#[serde(rename="pysparkJob")]
|
||||
pub pyspark_job: Option<PySparkJob>,
|
||||
/// [Optional] The labels to associate with this job. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.
|
||||
pub labels: Option<HashMap<String, String>>,
|
||||
/// [Required] Job information, including how, when, and where to run the job.
|
||||
pub placement: Option<JobPlacement>,
|
||||
/// [Optional] The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a job_id.
|
||||
@@ -413,12 +435,18 @@ pub struct Job {
|
||||
/// [Output-only] A URI pointing to the location of the stdout of the job's driver program.
|
||||
#[serde(rename="driverOutputResourceUri")]
|
||||
pub driver_output_resource_uri: Option<String>,
|
||||
/// Job is a Pyspark job.
|
||||
#[serde(rename="pysparkJob")]
|
||||
pub pyspark_job: Option<PySparkJob>,
|
||||
/// [Output-only] If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as `driver_output_uri`.
|
||||
#[serde(rename="driverControlFilesUri")]
|
||||
pub driver_control_files_uri: Option<String>,
|
||||
/// Job is a Spark job.
|
||||
#[serde(rename="sparkJob")]
|
||||
pub spark_job: Option<SparkJob>,
|
||||
/// [Output-only] The collection of YARN applications spun up by this job. **Beta** Feature: This report is available for testing purposes only. It may be changed before final release.
|
||||
#[serde(rename="yarnApplications")]
|
||||
pub yarn_applications: Option<Vec<YarnApplication>>,
|
||||
/// [Output-only] The previous job status.
|
||||
#[serde(rename="statusHistory")]
|
||||
pub status_history: Option<Vec<JobStatus>>,
|
||||
@@ -447,7 +475,7 @@ pub struct ManagedGroupConfig {
|
||||
impl Part for ManagedGroupConfig {}
|
||||
|
||||
|
||||
/// A Cloud Dataproc job for running PySpark applications on YARN.
|
||||
/// A Cloud Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.
|
||||
///
|
||||
/// This type is not used in any activity, and only used as *part* of another schema.
|
||||
///
|
||||
@@ -523,7 +551,7 @@ impl RequestValue for DiagnoseClusterRequest {}
|
||||
///
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ListClustersResponse {
|
||||
/// [Optional] This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the `page_token` in a subsequent ListClustersRequest.
|
||||
/// [Output-only] This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the `page_token` in a subsequent ListClustersRequest.
|
||||
#[serde(rename="nextPageToken")]
|
||||
pub next_page_token: Option<String>,
|
||||
/// [Output-only] The clusters in the project.
|
||||
@@ -560,12 +588,12 @@ impl ResponseResult for ListJobsResponse {}
|
||||
///
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct JobStatus {
|
||||
/// [Required] A state message specifying the overall job state.
|
||||
/// [Output-only] A state message specifying the overall job state.
|
||||
pub state: Option<String>,
|
||||
/// [Output-only] The time when this state was entered.
|
||||
#[serde(rename="stateStartTime")]
|
||||
pub state_start_time: Option<String>,
|
||||
/// [Optional] Job state details, such as an error description if the state is ERROR.
|
||||
/// [Output-only] Optional job state details, such as an error description if the state is ERROR.
|
||||
pub details: Option<String>,
|
||||
}
|
||||
|
||||
@@ -603,7 +631,24 @@ pub struct LoggingConfig {
|
||||
impl Part for LoggingConfig {}
|
||||
|
||||
|
||||
/// A Cloud Dataproc job for running Hive queries on YARN.
|
||||
/// Contains cluster daemon metrics, such as HDFS and YARN stats. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.
|
||||
///
|
||||
/// This type is not used in any activity, and only used as *part* of another schema.
|
||||
///
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ClusterMetrics {
|
||||
/// The YARN metrics.
|
||||
#[serde(rename="yarnMetrics")]
|
||||
pub yarn_metrics: Option<HashMap<String, String>>,
|
||||
/// The HDFS metrics.
|
||||
#[serde(rename="hdfsMetrics")]
|
||||
pub hdfs_metrics: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
impl Part for ClusterMetrics {}
|
||||
|
||||
|
||||
/// A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN.
|
||||
///
|
||||
/// This type is not used in any activity, and only used as *part* of another schema.
|
||||
///
|
||||
@@ -631,7 +676,7 @@ pub struct HiveJob {
|
||||
impl Part for HiveJob {}
|
||||
|
||||
|
||||
/// A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN.
|
||||
/// A Cloud Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
|
||||
///
|
||||
/// This type is not used in any activity, and only used as *part* of another schema.
|
||||
///
|
||||
@@ -701,7 +746,7 @@ pub struct ListOperationsResponse {
|
||||
impl ResponseResult for ListOperationsResponse {}
|
||||
|
||||
|
||||
/// A Cloud Dataproc job for running Pig queries on YARN.
|
||||
/// A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN.
|
||||
///
|
||||
/// This type is not used in any activity, and only used as *part* of another schema.
|
||||
///
|
||||
@@ -756,6 +801,10 @@ pub struct Cluster {
|
||||
/// [Required] The Google Cloud Platform project ID that the cluster belongs to.
|
||||
#[serde(rename="projectId")]
|
||||
pub project_id: Option<String>,
|
||||
/// [Optional] The labels to associate with this cluster. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.
|
||||
pub labels: Option<HashMap<String, String>>,
|
||||
/// Contains cluster daemon metrics such as HDFS and YARN stats. **Beta Feature**: This report is available for testing purposes only. It may be changed before final release.
|
||||
pub metrics: Option<ClusterMetrics>,
|
||||
/// [Output-only] The previous cluster status.
|
||||
#[serde(rename="statusHistory")]
|
||||
pub status_history: Option<Vec<ClusterStatus>>,
|
||||
@@ -767,7 +816,7 @@ impl RequestValue for Cluster {}
|
||||
impl ResponseResult for Cluster {}
|
||||
|
||||
|
||||
/// A Cloud Dataproc job for running Spark applications on YARN.
|
||||
/// A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN.
|
||||
///
|
||||
/// This type is not used in any activity, and only used as *part* of another schema.
|
||||
///
|
||||
@@ -806,12 +855,12 @@ impl Part for SparkJob {}
|
||||
///
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ClusterStatus {
|
||||
/// The cluster's state.
|
||||
/// [Output-only] The cluster's state.
|
||||
pub state: Option<String>,
|
||||
/// Time when this state was entered.
|
||||
/// [Output-only] Time when this state was entered.
|
||||
#[serde(rename="stateStartTime")]
|
||||
pub state_start_time: Option<String>,
|
||||
/// Optional details of cluster's state.
|
||||
/// [Output-only] Optional details of cluster's state.
|
||||
pub detail: Option<String>,
|
||||
}
|
||||
|
||||
@@ -841,7 +890,7 @@ pub struct Operation {
|
||||
pub response: Option<HashMap<String, String>>,
|
||||
/// The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should have the format of `operations/some/unique/name`.
|
||||
pub name: Option<String>,
|
||||
/// The error result of the operation in case of failure.
|
||||
/// The error result of the operation in case of failure or cancellation.
|
||||
pub error: Option<Status>,
|
||||
}
|
||||
|
||||
@@ -942,6 +991,7 @@ impl<'a, C, A> ProjectMethods<'a, C, A> {
|
||||
_region: region.to_string(),
|
||||
_page_token: Default::default(),
|
||||
_page_size: Default::default(),
|
||||
_filter: Default::default(),
|
||||
_delegate: Default::default(),
|
||||
_scopes: Default::default(),
|
||||
_additional_params: Default::default(),
|
||||
@@ -964,6 +1014,7 @@ impl<'a, C, A> ProjectMethods<'a, C, A> {
|
||||
_page_token: Default::default(),
|
||||
_page_size: Default::default(),
|
||||
_job_state_matcher: Default::default(),
|
||||
_filter: Default::default(),
|
||||
_cluster_name: Default::default(),
|
||||
_delegate: Default::default(),
|
||||
_scopes: Default::default(),
|
||||
@@ -1018,7 +1069,7 @@ impl<'a, C, A> ProjectMethods<'a, C, A> {
|
||||
|
||||
/// Create a builder to help you perform the following task:
|
||||
///
|
||||
/// Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation.
|
||||
/// Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
@@ -1520,6 +1571,7 @@ impl<'a, C, A> ProjectRegionClusterGetCall<'a, C, A> where C: BorrowMut<hyper::C
|
||||
/// let result = hub.projects().regions_clusters_list("projectId", "region")
|
||||
/// .page_token("sea")
|
||||
/// .page_size(-90)
|
||||
/// .filter("dolores")
|
||||
/// .doit();
|
||||
/// # }
|
||||
/// ```
|
||||
@@ -1531,6 +1583,7 @@ pub struct ProjectRegionClusterListCall<'a, C, A>
|
||||
_region: String,
|
||||
_page_token: Option<String>,
|
||||
_page_size: Option<i32>,
|
||||
_filter: Option<String>,
|
||||
_delegate: Option<&'a mut Delegate>,
|
||||
_additional_params: HashMap<String, String>,
|
||||
_scopes: BTreeMap<String, ()>
|
||||
@@ -1552,7 +1605,7 @@ impl<'a, C, A> ProjectRegionClusterListCall<'a, C, A> where C: BorrowMut<hyper::
|
||||
};
|
||||
dlg.begin(MethodInfo { id: "dataproc.projects.regions.clusters.list",
|
||||
http_method: hyper::method::Method::Get });
|
||||
let mut params: Vec<(&str, String)> = Vec::with_capacity((6 + self._additional_params.len()));
|
||||
let mut params: Vec<(&str, String)> = Vec::with_capacity((7 + self._additional_params.len()));
|
||||
params.push(("projectId", self._project_id.to_string()));
|
||||
params.push(("region", self._region.to_string()));
|
||||
if let Some(value) = self._page_token {
|
||||
@@ -1561,7 +1614,10 @@ impl<'a, C, A> ProjectRegionClusterListCall<'a, C, A> where C: BorrowMut<hyper::
|
||||
if let Some(value) = self._page_size {
|
||||
params.push(("pageSize", value.to_string()));
|
||||
}
|
||||
for &field in ["alt", "projectId", "region", "pageToken", "pageSize"].iter() {
|
||||
if let Some(value) = self._filter {
|
||||
params.push(("filter", value.to_string()));
|
||||
}
|
||||
for &field in ["alt", "projectId", "region", "pageToken", "pageSize", "filter"].iter() {
|
||||
if self._additional_params.contains_key(field) {
|
||||
dlg.finished(false);
|
||||
return Err(Error::FieldClash(field));
|
||||
@@ -1696,20 +1752,27 @@ impl<'a, C, A> ProjectRegionClusterListCall<'a, C, A> where C: BorrowMut<hyper::
|
||||
self._region = new_value.to_string();
|
||||
self
|
||||
}
|
||||
/// The standard List page token.
|
||||
/// [Optional] The standard List page token.
|
||||
///
|
||||
/// Sets the *page token* query property to the given value.
|
||||
pub fn page_token(mut self, new_value: &str) -> ProjectRegionClusterListCall<'a, C, A> {
|
||||
self._page_token = Some(new_value.to_string());
|
||||
self
|
||||
}
|
||||
/// The standard List page size.
|
||||
/// [Optional] The standard List page size.
|
||||
///
|
||||
/// Sets the *page size* query property to the given value.
|
||||
pub fn page_size(mut self, new_value: i32) -> ProjectRegionClusterListCall<'a, C, A> {
|
||||
self._page_size = Some(new_value);
|
||||
self
|
||||
}
|
||||
/// [Optional] A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax: field:value [field:value] ... or field = value [AND [field = value]] ... where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, and `[KEY]` is a label key. **value** can be `*` to match all values. `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` contains the `DELETING` and `ERROR` states. `clusterName` is the name of the cluster provided at creation time. Only the logical `AND` operator is supported; space-separated items are treated as having an implicit `AND` operator. Example valid filters are: status.state:ACTIVE clusterName:mycluster labels.env:staging \ labels.starred:* and status.state = ACTIVE AND clusterName = mycluster \ AND labels.env = staging AND labels.starred = *
|
||||
///
|
||||
/// Sets the *filter* query property to the given value.
|
||||
pub fn filter(mut self, new_value: &str) -> ProjectRegionClusterListCall<'a, C, A> {
|
||||
self._filter = Some(new_value.to_string());
|
||||
self
|
||||
}
|
||||
/// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong
|
||||
/// while executing the actual API request.
|
||||
///
|
||||
@@ -1795,10 +1858,11 @@ impl<'a, C, A> ProjectRegionClusterListCall<'a, C, A> where C: BorrowMut<hyper::
|
||||
/// // execute the final call using `doit()`.
|
||||
/// // Values shown here are possibly random and not representative !
|
||||
/// let result = hub.projects().regions_jobs_list("projectId", "region")
|
||||
/// .page_token("sadipscing")
|
||||
/// .page_size(-31)
|
||||
/// .job_state_matcher("ea")
|
||||
/// .cluster_name("no")
|
||||
/// .page_token("aliquyam")
|
||||
/// .page_size(-66)
|
||||
/// .job_state_matcher("no")
|
||||
/// .filter("justo")
|
||||
/// .cluster_name("justo")
|
||||
/// .doit();
|
||||
/// # }
|
||||
/// ```
|
||||
@@ -1811,6 +1875,7 @@ pub struct ProjectRegionJobListCall<'a, C, A>
|
||||
_page_token: Option<String>,
|
||||
_page_size: Option<i32>,
|
||||
_job_state_matcher: Option<String>,
|
||||
_filter: Option<String>,
|
||||
_cluster_name: Option<String>,
|
||||
_delegate: Option<&'a mut Delegate>,
|
||||
_additional_params: HashMap<String, String>,
|
||||
@@ -1833,7 +1898,7 @@ impl<'a, C, A> ProjectRegionJobListCall<'a, C, A> where C: BorrowMut<hyper::Clie
|
||||
};
|
||||
dlg.begin(MethodInfo { id: "dataproc.projects.regions.jobs.list",
|
||||
http_method: hyper::method::Method::Get });
|
||||
let mut params: Vec<(&str, String)> = Vec::with_capacity((8 + self._additional_params.len()));
|
||||
let mut params: Vec<(&str, String)> = Vec::with_capacity((9 + self._additional_params.len()));
|
||||
params.push(("projectId", self._project_id.to_string()));
|
||||
params.push(("region", self._region.to_string()));
|
||||
if let Some(value) = self._page_token {
|
||||
@@ -1845,10 +1910,13 @@ impl<'a, C, A> ProjectRegionJobListCall<'a, C, A> where C: BorrowMut<hyper::Clie
|
||||
if let Some(value) = self._job_state_matcher {
|
||||
params.push(("jobStateMatcher", value.to_string()));
|
||||
}
|
||||
if let Some(value) = self._filter {
|
||||
params.push(("filter", value.to_string()));
|
||||
}
|
||||
if let Some(value) = self._cluster_name {
|
||||
params.push(("clusterName", value.to_string()));
|
||||
}
|
||||
for &field in ["alt", "projectId", "region", "pageToken", "pageSize", "jobStateMatcher", "clusterName"].iter() {
|
||||
for &field in ["alt", "projectId", "region", "pageToken", "pageSize", "jobStateMatcher", "filter", "clusterName"].iter() {
|
||||
if self._additional_params.contains_key(field) {
|
||||
dlg.finished(false);
|
||||
return Err(Error::FieldClash(field));
|
||||
@@ -1997,13 +2065,20 @@ impl<'a, C, A> ProjectRegionJobListCall<'a, C, A> where C: BorrowMut<hyper::Clie
|
||||
self._page_size = Some(new_value);
|
||||
self
|
||||
}
|
||||
/// [Optional] Specifies enumerated categories of jobs to list.
|
||||
/// [Optional] Specifies enumerated categories of jobs to list (default = match ALL jobs).
|
||||
///
|
||||
/// Sets the *job state matcher* query property to the given value.
|
||||
pub fn job_state_matcher(mut self, new_value: &str) -> ProjectRegionJobListCall<'a, C, A> {
|
||||
self._job_state_matcher = Some(new_value.to_string());
|
||||
self
|
||||
}
|
||||
/// [Optional] A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax: field:value] ... or [field = value] AND [field [= value]] ... where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label key. **value** can be `*` to match all values. `status.state` can be either `ACTIVE` or `INACTIVE`. Only the logical `AND` operator is supported; space-separated items are treated as having an implicit `AND` operator. Example valid filters are: status.state:ACTIVE labels.env:staging labels.starred:* and status.state = ACTIVE AND labels.env = staging AND labels.starred = *
|
||||
///
|
||||
/// Sets the *filter* query property to the given value.
|
||||
pub fn filter(mut self, new_value: &str) -> ProjectRegionJobListCall<'a, C, A> {
|
||||
self._filter = Some(new_value.to_string());
|
||||
self
|
||||
}
|
||||
/// [Optional] If set, the returned jobs list includes only jobs that were submitted to the named cluster.
|
||||
///
|
||||
/// Sets the *cluster name* query property to the given value.
|
||||
@@ -2102,7 +2177,7 @@ impl<'a, C, A> ProjectRegionJobListCall<'a, C, A> where C: BorrowMut<hyper::Clie
|
||||
/// // execute the final call using `doit()`.
|
||||
/// // Values shown here are possibly random and not representative !
|
||||
/// let result = hub.projects().regions_clusters_patch(req, "projectId", "region", "clusterName")
|
||||
/// .update_mask("et")
|
||||
/// .update_mask("ipsum")
|
||||
/// .doit();
|
||||
/// # }
|
||||
/// ```
|
||||
@@ -2642,7 +2717,7 @@ impl<'a, C, A> ProjectRegionJobGetCall<'a, C, A> where C: BorrowMut<hyper::Clien
|
||||
}
|
||||
|
||||
|
||||
/// Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation.
|
||||
/// Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
|
||||
///
|
||||
/// A builder for the *regions.operations.cancel* method supported by a *project* resource.
|
||||
/// It is not used directly, but through a `ProjectMethods` instance.
|
||||
@@ -4798,9 +4873,9 @@ impl<'a, C, A> ProjectRegionOperationDeleteCall<'a, C, A> where C: BorrowMut<hyp
|
||||
/// // execute the final call using `doit()`.
|
||||
/// // Values shown here are possibly random and not representative !
|
||||
/// let result = hub.projects().regions_operations_list("name")
|
||||
/// .page_token("aliquyam")
|
||||
/// .page_size(-73)
|
||||
/// .filter("Lorem")
|
||||
/// .page_token("Lorem")
|
||||
/// .page_size(-9)
|
||||
/// .filter("et")
|
||||
/// .doit();
|
||||
/// # }
|
||||
/// ```
|
||||
|
||||
Reference in New Issue
Block a user