chore(code-update): to latest schema version

This commit is contained in:
Sebastian Thiel
2016-04-10 14:08:43 +02:00
parent 930ce6d5c2
commit e0de1b4c10
1035 changed files with 42559 additions and 13556 deletions

View File

@@ -4,7 +4,7 @@
[package]
name = "google-bigquery2"
version = "0.1.12+20160222"
version = "0.1.13+20160408"
authors = ["Sebastian Thiel <byronimo@gmail.com>"]
description = "A complete library to interact with bigquery (protocol v2)"
repository = "https://github.com/Byron/google-apis-rs/tree/master/gen/bigquery2"
@@ -16,14 +16,14 @@ build = "src/build.rs"
[dependencies]
hyper = "0.7"
mime = "0.1.0"
serde = ">= 0.7.0"
serde_json = ">= 0.7.0"
yup-oauth2 = ">= 0.5.4"
hyper = "0.8"
mime = "0.2.0"
serde = "0.6.0"
serde_json = "0.6.0"
yup-oauth2 = ">= 0.5.6"
url = ">= 0.5"
[build-dependencies]
syntex = { version = ">= 0.23" }
serde_codegen = { version = ">= 0.6" }
syntex = { version = "= 0.28" }
serde_codegen = { version = "= 0.6.13" }

View File

@@ -5,7 +5,7 @@ DO NOT EDIT !
-->
The `google-bigquery2` library allows access to all features of the *Google bigquery* service.
This documentation was generated from *bigquery* crate version *0.1.12+20160222*, where *20160222* is the exact revision of the *bigquery:v2* schema built by the [mako](http://www.makotemplates.org/) code generator *v0.1.12*.
This documentation was generated from *bigquery* crate version *0.1.13+20160408*, where *20160408* is the exact revision of the *bigquery:v2* schema built by the [mako](http://www.makotemplates.org/) code generator *v0.1.13*.
Everything else about the *bigquery* *v2* API can be found at the
[official documentation site](https://cloud.google.com/bigquery/).

View File

@@ -12,7 +12,7 @@ use mime::{Mime, TopLevel, SubLevel, Attr, Value};
use oauth2::{TokenType, Retry, self};
use hyper;
use hyper::header::{ContentType, ContentLength, Headers, UserAgent, Authorization, Header,
HeaderFormat};
HeaderFormat, Bearer};
use hyper::http::h1::LINE_ENDING;
use hyper::method::Method;
use hyper::status::StatusCode;
@@ -634,7 +634,7 @@ pub struct ResumableUploadHelper<'a, A: 'a> {
pub start_at: Option<u64>,
pub auth: &'a mut A,
pub user_agent: &'a str,
pub auth_header: Authorization<oauth2::Scheme>,
pub auth_header: Authorization<Bearer>,
pub url: &'a str,
pub reader: &'a mut ReadSeek,
pub media_type: Mime,

View File

@@ -2,7 +2,7 @@
// This file was generated automatically from 'src/mako/api/lib.rs.mako'
// DO NOT EDIT !
//! This documentation was generated from *bigquery* crate version *0.1.12+20160222*, where *20160222* is the exact revision of the *bigquery:v2* schema built by the [mako](http://www.makotemplates.org/) code generator *v0.1.12*.
//! This documentation was generated from *bigquery* crate version *0.1.13+20160408*, where *20160408* is the exact revision of the *bigquery:v2* schema built by the [mako](http://www.makotemplates.org/) code generator *v0.1.13*.
//!
//! Everything else about the *bigquery* *v2* API can be found at the
//! [official documentation site](https://cloud.google.com/bigquery/).

View File

@@ -157,7 +157,7 @@ impl<'a, C, A> Bigquery<C, A>
Bigquery {
client: RefCell::new(client),
auth: RefCell::new(authenticator),
_user_agent: "google-api-rust-client/0.1.12".to_string(),
_user_agent: "google-api-rust-client/0.1.13".to_string(),
}
}
@@ -178,7 +178,7 @@ impl<'a, C, A> Bigquery<C, A>
}
/// Set the user-agent header field to use in all requests to the server.
/// It defaults to `google-api-rust-client/0.1.12`.
/// It defaults to `google-api-rust-client/0.1.13`.
///
/// Returns the previously set user-agent.
pub fn user_agent(&mut self, agent_name: String) -> String {
@@ -364,7 +364,7 @@ pub struct TableFieldSchema {
pub fields: Option<Vec<TableFieldSchema>>,
/// [Optional] The field description. The maximum length is 16K characters.
pub description: Option<String>,
/// [Required] The field data type. Possible values include STRING, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema).
/// [Required] The field data type. Possible values include STRING, BYTES, INTEGER, FLOAT, BOOLEAN, TIMESTAMP or RECORD (where RECORD indicates that the field contains a nested schema).
#[serde(rename="type")]
pub type_: Option<String>,
/// [Optional] The field mode. Possible values include NULLABLE, REQUIRED and REPEATED. The default value is NULLABLE.
@@ -378,55 +378,71 @@ impl Part for TableFieldSchema {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [get datasets](struct.DatasetGetCall.html) (response)
/// * [list datasets](struct.DatasetListCall.html) (none)
/// * [patch datasets](struct.DatasetPatchCall.html) (request|response)
/// * [update datasets](struct.DatasetUpdateCall.html) (request|response)
/// * [delete datasets](struct.DatasetDeleteCall.html) (none)
/// * [insert datasets](struct.DatasetInsertCall.html) (request|response)
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Dataset {
/// [Output-only] The resource type.
pub kind: Option<String>,
/// [Optional] A user-friendly description of the dataset.
pub description: Option<String>,
/// [Required] A reference that identifies the dataset.
#[serde(rename="datasetReference")]
pub dataset_reference: Option<DatasetReference>,
/// [Output-only] The time when this dataset was created, in milliseconds since the epoch.
#[serde(rename="creationTime")]
pub creation_time: Option<String>,
/// [Optional] An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER;
pub access: Option<Vec<DatasetAccess>>,
/// [Experimental] The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an expirationTime property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the expirationTime for a given table is reached, that table will be deleted automatically. If a table's expirationTime is modified or removed before the table expires, or if you provide an explicit expirationTime when creating a table, that value takes precedence over the default expiration time indicated by this property.
#[serde(rename="defaultTableExpirationMs")]
pub default_table_expiration_ms: Option<String>,
/// [Output-only] A hash of the resource.
pub etag: Option<String>,
/// [Experimental] The geographic location where the dataset should reside. Possible values include EU and US. The default value is US.
pub location: Option<String>,
/// [Optional] A descriptive name for the dataset.
#[serde(rename="friendlyName")]
pub friendly_name: Option<String>,
/// [Output-only] The date when this dataset or any of its tables was last modified, in milliseconds since the epoch.
#[serde(rename="lastModifiedTime")]
pub last_modified_time: Option<String>,
/// [Output-only] The fully-qualified unique name of the dataset in the format projectId:datasetId. The dataset name without the project name is given in the datasetId field. When creating a new dataset, leave this field blank, and instead specify the datasetId field.
pub id: Option<String>,
/// [Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.
#[serde(rename="selfLink")]
pub self_link: Option<String>,
pub struct BigtableColumnFamily {
/// [Optional] The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.
pub encoding: Option<String>,
/// [Optional] The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Defaut type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.
#[serde(rename="type")]
pub type_: Option<String>,
/// [Optional] If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.
#[serde(rename="onlyReadLatest")]
pub only_read_latest: Option<bool>,
/// [Optional] Lists of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as .. Other columns can be accessed as a list through .Column field.
pub columns: Option<Vec<BigtableColumn>>,
/// Identifier of the column family.
#[serde(rename="familyId")]
pub family_id: Option<String>,
}
impl RequestValue for Dataset {}
impl Resource for Dataset {}
impl ResponseResult for Dataset {}
impl Part for BigtableColumnFamily {}
/// There is no detailed description.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct BigtableOptions {
/// [Optional] If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.
#[serde(rename="ignoreUnspecifiedColumnFamilies")]
pub ignore_unspecified_column_families: Option<bool>,
/// [Optional] List of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable.
#[serde(rename="columnFamilies")]
pub column_families: Option<Vec<BigtableColumnFamily>>,
}
impl Part for BigtableOptions {}
/// There is no detailed description.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct BigtableColumn {
/// [Required] Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifier_string field. Otherwise, a base-64 encoded value must be set to qualifier_encoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as field_name.
#[serde(rename="qualifierEncoded")]
pub qualifier_encoded: Option<String>,
/// [Optional] If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.
#[serde(rename="fieldName")]
pub field_name: Option<String>,
/// [Optional] If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.
#[serde(rename="onlyReadLatest")]
pub only_read_latest: Option<bool>,
/// [Optional] The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.
pub encoding: Option<String>,
/// no description provided
#[serde(rename="qualifierString")]
pub qualifier_string: Option<String>,
/// [Optional] The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Defaut type is BYTES. 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.
#[serde(rename="type")]
pub type_: Option<String>,
}
impl Part for BigtableColumn {}
/// An array of errors for rows that were not inserted.
@@ -564,21 +580,17 @@ pub struct Streamingbuffer {
impl Part for Streamingbuffer {}
/// There is no detailed description.
/// [Required] A partition configuration. Only one type of partition should be configured.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct UserDefinedFunctionResource {
/// [Pick one] A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
#[serde(rename="resourceUri")]
pub resource_uri: Option<String>,
/// [Pick one] An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
#[serde(rename="inlineCode")]
pub inline_code: Option<String>,
pub struct TablePartitionConfiguration {
/// [Pick one] Configures an interval partition.
pub interval: Option<IntervalPartitionConfiguration>,
}
impl Part for UserDefinedFunctionResource {}
impl Part for TablePartitionConfiguration {}
/// Tables in the requested dataset.
@@ -779,7 +791,7 @@ pub struct JobConfigurationLoad {
/// [Optional] The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. The default value is a comma (',').
#[serde(rename="fieldDelimiter")]
pub field_delimiter: Option<String>,
/// [Optional] The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". The default value is CSV.
/// [Optional] The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". The default value is CSV.
#[serde(rename="sourceFormat")]
pub source_format: Option<String>,
/// [Optional] The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid.
@@ -866,6 +878,23 @@ pub struct JobConfigurationExtract {
impl Part for JobConfigurationExtract {}
/// There is no detailed description.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct UserDefinedFunctionResource {
/// [Pick one] A code resource to load from a Google Cloud Storage URI (gs://bucket/path).
#[serde(rename="resourceUri")]
pub resource_uri: Option<String>,
/// [Pick one] An inline resource that contains code for a user-defined function (UDF). Providing a inline code resource is equivalent to providing a URI for a file containing the same code.
#[serde(rename="inlineCode")]
pub inline_code: Option<String>,
}
impl Part for UserDefinedFunctionResource {}
/// There is no detailed description.
///
/// # Activities
@@ -910,6 +939,26 @@ pub struct GetQueryResultsResponse {
impl ResponseResult for GetQueryResultsResponse {}
/// There is no detailed description.
///
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TableReference {
/// [Required] The ID of the project containing this table.
#[serde(rename="projectId")]
pub project_id: Option<String>,
/// [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
#[serde(rename="tableId")]
pub table_id: Option<String>,
/// [Required] The ID of the dataset containing this table.
#[serde(rename="datasetId")]
pub dataset_id: Option<String>,
}
impl Part for TableReference {}
/// There is no detailed description.
///
/// This type is not used in any activity, and only used as *part* of another schema.
@@ -943,26 +992,29 @@ impl Part for CsvOptions {}
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct ExternalDataConfiguration {
/// [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. This setting is ignored for Google Cloud Datastore backups and Avro.
/// [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
pub compression: Option<String>,
/// Additional properties to set if sourceFormat is set to CSV.
#[serde(rename="csvOptions")]
pub csv_options: Option<CsvOptions>,
/// [Experimental] Try to detect schema and format options automatically. Any option specified explicitly will be honored.
pub autodetect: Option<bool>,
/// [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Datastore backups and Avro.
/// [Optional] The maximum number of bad records that BigQuery can ignore when reading data. If the number of bad records exceeds this value, an invalid error is returned in the job result. The default value is 0, which requires that all records are valid. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
#[serde(rename="maxBadRecords")]
pub max_bad_records: Option<i32>,
/// [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
/// [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values that don't match any column names Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore backups: This setting is ignored. Avro: This setting is ignored.
#[serde(rename="ignoreUnknownValues")]
pub ignore_unknown_values: Option<bool>,
/// [Required] The fully-qualified URIs that point to your data in Google Cloud Storage. Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs. For Google Cloud Datastore backups, exactly one URI can be specified, and it must end with '.backup_info'. Also, the '*' wildcard character is not allowed.
/// [Required] The fully-qualified URIs that point to your data in Google Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard character and it must come after the 'bucket' name. Size limits related to load jobs apply to external data sources, plus an additional limit of 10 GB maximum size across all URIs. For Google Cloud Bigtable URIs: Exactly one URI can be specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore backups, exactly one URI can be specified, and it must end with '.backup_info'. Also, the '*' wildcard character is not allowed.
#[serde(rename="sourceUris")]
pub source_uris: Option<Vec<String>>,
/// [Required] The data format. For CSV files, specify "CSV". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Google Cloud Datastore backups, specify "DATASTORE_BACKUP". For Avro files, specify "AVRO".
/// [Optional] Additional options if sourceFormat is set to BIGTABLE.
#[serde(rename="bigtableOptions")]
pub bigtable_options: Option<BigtableOptions>,
/// [Required] The data format. For CSV files, specify "CSV". For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro files, specify "AVRO". For Google Cloud Datastore backups, specify "DATASTORE_BACKUP". [Experimental] For Google Cloud Bigtable, specify "BIGTABLE". Please note that reading from Google Cloud Bigtable is experimental and has to be enabled for your project. Please contact Google Cloud Support to enable this for your project.
#[serde(rename="sourceFormat")]
pub source_format: Option<String>,
/// [Optional] The schema for the data. Schema is required for CSV and JSON formats. Schema is disallowed for Google Cloud Datastore backups and Avro.
/// [Optional] The schema for the data. Schema is required for CSV and JSON formats. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats.
pub schema: Option<TableSchema>,
}
@@ -1332,19 +1384,69 @@ impl ResponseResult for JobCancelResponse {}
/// This type is not used in any activity, and only used as *part* of another schema.
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct TableReference {
/// [Required] The ID of the project containing this table.
#[serde(rename="projectId")]
pub project_id: Option<String>,
/// [Required] The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
#[serde(rename="tableId")]
pub table_id: Option<String>,
/// [Required] The ID of the dataset containing this table.
#[serde(rename="datasetId")]
pub dataset_id: Option<String>,
pub struct IntervalPartitionConfiguration {
/// no description provided
#[serde(rename="type")]
pub type_: Option<String>,
/// no description provided
#[serde(rename="expirationMs")]
pub expiration_ms: Option<String>,
}
impl Part for TableReference {}
impl Part for IntervalPartitionConfiguration {}
/// There is no detailed description.
///
/// # Activities
///
/// This type is used in activities, which are methods you may call on this type or where this type is involved in.
/// The list links the activity name, along with information about where it is used (one of *request* and *response*).
///
/// * [get datasets](struct.DatasetGetCall.html) (response)
/// * [list datasets](struct.DatasetListCall.html) (none)
/// * [patch datasets](struct.DatasetPatchCall.html) (request|response)
/// * [update datasets](struct.DatasetUpdateCall.html) (request|response)
/// * [delete datasets](struct.DatasetDeleteCall.html) (none)
/// * [insert datasets](struct.DatasetInsertCall.html) (request|response)
///
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
pub struct Dataset {
/// [Output-only] The resource type.
pub kind: Option<String>,
/// [Optional] A user-friendly description of the dataset.
pub description: Option<String>,
/// [Required] A reference that identifies the dataset.
#[serde(rename="datasetReference")]
pub dataset_reference: Option<DatasetReference>,
/// [Output-only] The time when this dataset was created, in milliseconds since the epoch.
#[serde(rename="creationTime")]
pub creation_time: Option<String>,
/// [Optional] An array of objects that define dataset access for one or more entities. You can set this property when inserting or updating a dataset in order to control who is allowed to access the data. If unspecified at dataset creation time, BigQuery adds default dataset access for the following entities: access.specialGroup: projectReaders; access.role: READER; access.specialGroup: projectWriters; access.role: WRITER; access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail: [dataset creator email]; access.role: OWNER;
pub access: Option<Vec<DatasetAccess>>,
/// [Optional] The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an expirationTime property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the expirationTime for a given table is reached, that table will be deleted automatically. If a table's expirationTime is modified or removed before the table expires, or if you provide an explicit expirationTime when creating a table, that value takes precedence over the default expiration time indicated by this property.
#[serde(rename="defaultTableExpirationMs")]
pub default_table_expiration_ms: Option<String>,
/// [Output-only] A hash of the resource.
pub etag: Option<String>,
/// [Experimental] The geographic location where the dataset should reside. Possible values include EU and US. The default value is US.
pub location: Option<String>,
/// [Optional] A descriptive name for the dataset.
#[serde(rename="friendlyName")]
pub friendly_name: Option<String>,
/// [Output-only] The date when this dataset or any of its tables was last modified, in milliseconds since the epoch.
#[serde(rename="lastModifiedTime")]
pub last_modified_time: Option<String>,
/// [Output-only] The fully-qualified unique name of the dataset in the format projectId:datasetId. The dataset name without the project name is given in the datasetId field. When creating a new dataset, leave this field blank, and instead specify the datasetId field.
pub id: Option<String>,
/// [Output-only] A URL that can be used to access the resource again. You can use this URL in Get or Update requests to the resource.
#[serde(rename="selfLink")]
pub self_link: Option<String>,
}
impl RequestValue for Dataset {}
impl Resource for Dataset {}
impl ResponseResult for Dataset {}
/// There is no detailed description.
@@ -1371,6 +1473,9 @@ pub struct Table {
/// [Output-only] The size of this table in bytes, excluding any data in the streaming buffer.
#[serde(rename="numBytes")]
pub num_bytes: Option<String>,
/// [Experimental] List of partition configurations for this table. Currently only one configuration can be specified and it can only be an interval partition with type daily.
#[serde(rename="partitionConfigurations")]
pub partition_configurations: Option<Vec<TablePartitionConfiguration>>,
/// [Output-only] The time when this table was last modified, in milliseconds since the epoch.
#[serde(rename="lastModifiedTime")]
pub last_modified_time: Option<String>,
@@ -2211,7 +2316,7 @@ impl<'a, C, A> TableUpdateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: o
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Table)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -2293,8 +2398,7 @@ impl<'a, C, A> TableUpdateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: o
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
@@ -2501,7 +2605,7 @@ impl<'a, C, A> TableInsertCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: o
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Table)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -2582,8 +2686,7 @@ impl<'a, C, A> TableInsertCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: o
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
@@ -2777,7 +2880,7 @@ impl<'a, C, A> TableListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oau
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, TableList)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -2853,8 +2956,7 @@ impl<'a, C, A> TableListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oau
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url)
@@ -3046,7 +3148,7 @@ impl<'a, C, A> TableDeleteCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: o
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<hyper::client::Response> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -3116,8 +3218,7 @@ impl<'a, C, A> TableDeleteCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: o
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Delete, &url)
@@ -3295,7 +3396,7 @@ impl<'a, C, A> TableGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oaut
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Table)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -3366,8 +3467,7 @@ impl<'a, C, A> TableGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oaut
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url)
@@ -3562,7 +3662,7 @@ impl<'a, C, A> TablePatchCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oa
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Table)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -3644,8 +3744,7 @@ impl<'a, C, A> TablePatchCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oa
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
@@ -3852,7 +3951,7 @@ impl<'a, C, A> DatasetPatchCall<'a, C, A> where C: BorrowMut<hyper::Client>, A:
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Dataset)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -3933,8 +4032,7 @@ impl<'a, C, A> DatasetPatchCall<'a, C, A> where C: BorrowMut<hyper::Client>, A:
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
@@ -4124,7 +4222,7 @@ impl<'a, C, A> DatasetGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oa
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Dataset)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -4194,8 +4292,7 @@ impl<'a, C, A> DatasetGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oa
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url)
@@ -4377,7 +4474,7 @@ impl<'a, C, A> DatasetListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: o
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, DatasetList)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -4455,8 +4552,7 @@ impl<'a, C, A> DatasetListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: o
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url)
@@ -4651,7 +4747,7 @@ impl<'a, C, A> DatasetUpdateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A:
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Dataset)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -4732,8 +4828,7 @@ impl<'a, C, A> DatasetUpdateCall<'a, C, A> where C: BorrowMut<hyper::Client>, A:
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
@@ -4925,7 +5020,7 @@ impl<'a, C, A> DatasetDeleteCall<'a, C, A> where C: BorrowMut<hyper::Client>, A:
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<hyper::client::Response> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -4997,8 +5092,7 @@ impl<'a, C, A> DatasetDeleteCall<'a, C, A> where C: BorrowMut<hyper::Client>, A:
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Delete, &url)
@@ -5178,7 +5272,7 @@ impl<'a, C, A> DatasetInsertCall<'a, C, A> where C: BorrowMut<hyper::Client>, A:
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Dataset)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -5258,8 +5352,7 @@ impl<'a, C, A> DatasetInsertCall<'a, C, A> where C: BorrowMut<hyper::Client>, A:
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
@@ -5439,7 +5532,7 @@ impl<'a, C, A> JobCancelCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oau
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, JobCancelResponse)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -5509,8 +5602,7 @@ impl<'a, C, A> JobCancelCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oau
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Post, &url)
@@ -5693,7 +5785,7 @@ impl<'a, C, A> JobQueryCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oaut
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, QueryResponse)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -5773,8 +5865,7 @@ impl<'a, C, A> JobQueryCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oaut
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
@@ -5962,7 +6053,7 @@ impl<'a, C, A> JobGetQueryResultCall<'a, C, A> where C: BorrowMut<hyper::Client>
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, GetQueryResultsResponse)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -6044,8 +6135,7 @@ impl<'a, C, A> JobGetQueryResultCall<'a, C, A> where C: BorrowMut<hyper::Client>
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url)
@@ -6259,7 +6349,7 @@ impl<'a, C, A> JobListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, JobList)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -6345,8 +6435,7 @@ impl<'a, C, A> JobListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url)
@@ -6549,7 +6638,7 @@ impl<'a, C, A> JobGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, Job)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -6619,8 +6708,7 @@ impl<'a, C, A> JobGetCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oauth2
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url)
@@ -6805,7 +6893,7 @@ impl<'a, C, A> JobInsertCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oau
fn doit<RS>(mut self, mut reader: RS, reader_mime_type: mime::Mime, protocol: &'static str) -> Result<(hyper::client::Response, Job)>
where RS: ReadSeek {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -6896,8 +6984,7 @@ impl<'a, C, A> JobInsertCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: oau
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
if should_ask_dlg_for_url && (upload_url = dlg.upload_url()) == () && upload_url.is_some() {
@@ -7182,7 +7269,7 @@ impl<'a, C, A> TabledataInsertAllCall<'a, C, A> where C: BorrowMut<hyper::Client
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, TableDataInsertAllResponse)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -7264,8 +7351,7 @@ impl<'a, C, A> TabledataInsertAllCall<'a, C, A> where C: BorrowMut<hyper::Client
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
request_value_reader.seek(io::SeekFrom::Start(0)).unwrap();
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
@@ -7472,7 +7558,7 @@ impl<'a, C, A> TabledataListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A:
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, TableDataList)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -7552,8 +7638,7 @@ impl<'a, C, A> TabledataListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A:
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url)
@@ -7763,7 +7848,7 @@ impl<'a, C, A> ProjectListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: o
/// Perform the operation you have build so far.
pub fn doit(mut self) -> Result<(hyper::client::Response, ProjectList)> {
use std::io::{Read, Seek};
use hyper::header::{ContentType, ContentLength, Authorization, UserAgent, Location};
use hyper::header::{ContentType, ContentLength, Authorization, Bearer, UserAgent, Location};
let mut dd = DefaultDelegate;
let mut dlg: &mut Delegate = match self._delegate {
Some(d) => d,
@@ -7816,8 +7901,7 @@ impl<'a, C, A> ProjectListCall<'a, C, A> where C: BorrowMut<hyper::Client>, A: o
}
}
};
let auth_header = Authorization(oauth2::Scheme { token_type: oauth2::TokenType::Bearer,
access_token: token.access_token });
let auth_header = Authorization(Bearer { token: token.access_token });
let mut req_result = {
let mut client = &mut *self.hub.client.borrow_mut();
let mut req = client.borrow_mut().request(hyper::method::Method::Get, &url)