Files
tarpc/examples/concurrency.rs
Tim 7aabfb3c14 Rewrite using tokio (#44)
* Rewrite tarpc on top of tokio.

* Add examples

* Move error types to their own module.

Also, cull unused error variants.

* Remove unused fn

* Remove CanonicalRpcError* types. They're 100% useless.

* Track tokio master (WIP)

* The great error revamp.

Removed the canonical rpc error type. Instead, the user declares
the error type for each rpc:

In the above example, the error type is Baz. Declaring an error is
optional; if none is specified, it defaults to Never, a convenience
struct that wraps the never type (exclamation mark) to impl Serialize, Deserialize,
Error, etc. Also adds the convenience type StringError for easily
using a String as an error type.

* Add missing license header

* Minor cleanup

* Rename StringError => Message

* Create a sync::Connect trait.

Along with this, the existing Connect trait moves to future::Connect. The future
and sync modules are reexported from the crate root.

Additionally, the utility errors Never and Message are no longer reexported from
the crate root.

* Update readme

* Track tokio/futures master. Add a Spawn utility trait to replace the removed forget.

* Fix pre-push hook

* Add doc comment to SyncServiceExt.

* Fix up some documentation

* Track tokio-proto master

* Don't set tcp nodelay

* Make future::Connect take an associated type for the future.

* Unbox FutureClient::connect return type

* Use type alias instead of newtype struct for ClientFuture

* Fix benches/latency.rs

* Write a plugin to convert lower_snake_case idents/types to UpperCamelCase.

Use it to add associated types to FutureService instead of boxing the return futures.

* Specify plugin = true in snake_to_camel/Cargo.toml. Weird things happen otherwise.

* Add clippy.toml
2016-09-04 16:09:50 -07:00

123 lines
3.3 KiB
Rust

// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
// This file may not be copied, modified, or distributed except according to those terms.
#![feature(inclusive_range_syntax, conservative_impl_trait, plugin)]
#![plugin(snake_to_camel)]
extern crate chrono;
extern crate env_logger;
extern crate futures;
#[macro_use]
extern crate log;
#[macro_use]
extern crate tarpc;
extern crate futures_cpupool;
use futures::Future;
use futures_cpupool::{CpuFuture, CpuPool};
use std::thread;
use std::time::{Duration, Instant, SystemTime};
use tarpc::future::{Connect};
use tarpc::util::Never;
service! {
rpc read(size: u32) -> Vec<u8>;
}
#[derive(Clone)]
struct Server(CpuPool);
impl Server {
fn new() -> Self {
Server(CpuPool::new_num_cpus())
}
}
impl FutureService for Server {
type Read = CpuFuture<Vec<u8>, Never>;
fn read(&self, size: u32) -> Self::Read {
self.0
.spawn(futures::lazy(move || {
let mut vec: Vec<u8> = Vec::with_capacity(size as usize);
for i in 0..size {
vec.push((i % 1 << 8) as u8);
}
futures::finished::<_, Never>(vec)
}))
}
}
fn run_once(clients: &[FutureClient], concurrency: u32, print: bool) {
let _ = env_logger::init();
let start = Instant::now();
let futures: Vec<_> = clients.iter()
.cycle()
.take(concurrency as usize)
.map(|client| {
let start = SystemTime::now();
let future = client.read(&CHUNK_SIZE).map(move |_| start.elapsed().unwrap());
thread::yield_now();
future
})
.collect();
let latencies: Vec<_> = futures.into_iter()
.map(|future| {
future.wait().unwrap()
})
.collect();
let total_time = start.elapsed();
let sum_latencies = latencies.iter().fold(Duration::new(0, 0), |sum, &dur| sum + dur);
let mean = sum_latencies / latencies.len() as u32;
let min_latency = *latencies.iter().min().unwrap();
let max_latency = *latencies.iter().max().unwrap();
if print {
println!("{} requests => Mean={}µs, Min={}µs, Max={}µs, Total={}µs",
latencies.len(),
mean.microseconds(),
min_latency.microseconds(),
max_latency.microseconds(),
total_time.microseconds());
}
}
trait Microseconds {
fn microseconds(&self) -> i64;
}
impl Microseconds for Duration {
fn microseconds(&self) -> i64 {
chrono::Duration::from_std(*self)
.unwrap()
.num_microseconds()
.unwrap()
}
}
const CHUNK_SIZE: u32 = 1 << 10;
const MAX_CONCURRENCY: u32 = 100;
fn main() {
let _ = env_logger::init();
let server = Server::new().listen("localhost:0").unwrap();
println!("Server listening on {}.", server.local_addr());
let clients: Vec<_> = (1...5)
.map(|i| {
println!("Client {} connecting...", i);
FutureClient::connect(server.local_addr()).wait().unwrap()
})
.collect();
println!("Starting...");
run_once(&clients, MAX_CONCURRENCY, false);
for concurrency in 1...MAX_CONCURRENCY {
run_once(&clients, concurrency, true);
}
}