mirror of
https://github.com/OMGeeky/tarpc.git
synced 2026-01-24 11:40:22 +01:00
-- Connection Limits The problem with having ConnectionFilter default-enabled is elaborated on in https://github.com/google/tarpc/issues/217. The gist of it is not all servers want a policy based on `SocketAddr`. This PR allows customizing the behavior of ConnectionFilter, at the cost of not having it enabled by default. However, enabling it is as simple as one line: incoming.max_channels_per_key(10, ip_addr) The second argument is a key function that takes the user-chosen transport and returns some hashable, equatable, cloneable key. In the above example, it returns an `IpAddr`. This also allows the `Transport` trait to have the addr fns removed, which means it has become simply an alias for `Stream + Sink`. -- Per-Channel Request Throttling With respect to Channel's throttling behavior, the same argument applies. There isn't a one size fits all solution to throttling requests, and the policy applied by tarpc is just one of potentially many solutions. As such, `Channel` is now a trait that offers a few combinators, one of which is throttling: channel.max_concurrent_requests(10).respond_with(serve(Server)) This functionality is also available on the existing `Handler` trait, which applies it to all incoming channels and can be used in tandem with connection limits: incoming .max_channels_per_key(10, ip_addr) .max_concurrent_requests_per_channel(10).respond_with(serve(Server)) -- Global Request Throttling I've entirely removed the overall request limit enforced across all channels. This functionality is easily gotten back via [`StreamExt::buffer_unordered`](https://rust-lang-nursery.github.io/futures-api-docs/0.3.0-alpha.1/futures/stream/trait.StreamExt.html#method.buffer_unordered), with the difference being that the previous behavior allowed you to spawn channels onto different threads, whereas `buffer_unordered ` means the `Channels` are handled on a single thread (the per-request handlers are still spawned). Considering the existing options, I don't believe that the benefit provided by this functionality held its own.
105 lines
2.9 KiB
Rust
105 lines
2.9 KiB
Rust
// Copyright 2019 Google LLC
|
|
//
|
|
// Use of this source code is governed by an MIT-style
|
|
// license that can be found in the LICENSE file or at
|
|
// https://opensource.org/licenses/MIT.
|
|
|
|
//! Tests client/server control flow.
|
|
|
|
#![feature(test, integer_atomics, async_await)]
|
|
|
|
extern crate test;
|
|
|
|
use futures::{compat::Executor01CompatExt, prelude::*};
|
|
use test::stats::Stats;
|
|
use rpc::{
|
|
client, context,
|
|
server::{Handler, Server},
|
|
};
|
|
use std::{
|
|
io,
|
|
time::{Duration, Instant},
|
|
};
|
|
|
|
async fn bench() -> io::Result<()> {
|
|
let listener = tarpc_json_transport::listen(&"0.0.0.0:0".parse().unwrap())?
|
|
.filter_map(|r| future::ready(r.ok()));
|
|
let addr = listener.get_ref().local_addr();
|
|
|
|
tokio_executor::spawn(
|
|
Server::<u32, u32>::default()
|
|
.incoming(listener)
|
|
.take(1)
|
|
.respond_with(|_ctx, request| futures::future::ready(Ok(request)))
|
|
.unit_error()
|
|
.boxed()
|
|
.compat(),
|
|
);
|
|
|
|
let conn = tarpc_json_transport::connect(&addr).await?;
|
|
let client = &mut client::new::<u32, u32, _>(client::Config::default(), conn).await?;
|
|
|
|
let total = 10_000usize;
|
|
let mut successful = 0u32;
|
|
let mut unsuccessful = 0u32;
|
|
let mut durations = vec![];
|
|
for _ in 1..=total {
|
|
let now = Instant::now();
|
|
let response = client.call(context::current(), 0u32).await;
|
|
let elapsed = now.elapsed();
|
|
|
|
match response {
|
|
Ok(_) => successful += 1,
|
|
Err(_) => unsuccessful += 1,
|
|
};
|
|
durations.push(elapsed);
|
|
}
|
|
|
|
let durations_nanos = durations
|
|
.iter()
|
|
.map(|duration| duration.as_secs() as f64 * 1E9 + duration.subsec_nanos() as f64)
|
|
.collect::<Vec<_>>();
|
|
|
|
let (lower, median, upper) = durations_nanos.quartiles();
|
|
|
|
println!("Of {} runs:", durations_nanos.len());
|
|
println!("\tSuccessful: {}", successful);
|
|
println!("\tUnsuccessful: {}", unsuccessful);
|
|
println!(
|
|
"\tMean: {:?}",
|
|
Duration::from_nanos(durations_nanos.mean() as u64)
|
|
);
|
|
println!("\tMedian: {:?}", Duration::from_nanos(median as u64));
|
|
println!(
|
|
"\tStd Dev: {:?}",
|
|
Duration::from_nanos(durations_nanos.std_dev() as u64)
|
|
);
|
|
println!(
|
|
"\tMin: {:?}",
|
|
Duration::from_nanos(durations_nanos.min() as u64)
|
|
);
|
|
println!(
|
|
"\tMax: {:?}",
|
|
Duration::from_nanos(durations_nanos.max() as u64)
|
|
);
|
|
println!(
|
|
"\tQuartiles: ({:?}, {:?}, {:?})",
|
|
Duration::from_nanos(lower as u64),
|
|
Duration::from_nanos(median as u64),
|
|
Duration::from_nanos(upper as u64)
|
|
);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[test]
|
|
fn bench_small_packet() -> io::Result<()> {
|
|
env_logger::init();
|
|
rpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
|
|
tokio::run(bench().map_err(|e| panic!(e.to_string())).boxed().compat());
|
|
println!("done");
|
|
|
|
Ok(())
|
|
}
|