Feature rollup (#129)

* Create a directory for the `future::server` module, which has become quite large. server.rs => server/mod.rs. Server submodules for shutdown and connection logic are added.

* Add fn thread_pool(...) to sync::server::Options

* Configure idle threads to expire after one minute

* Add tarpc::util::lazy for lazily executing functions. Similar to `futures::lazy` but useful in different circumstances. Specifically, `futures::lazy` typically requires a closure, whereas `util::lazy` kind of deconstructs a closure into its function and args.

* Remove some unstable features, and `cfg(plugin)` only in tests. Features `unboxed_closures` and `fn_traits` are removed by replacing manual Fn impls with Stream impls. This actually leads to slightly more performant code, as well, because some `Rc`s could be removed.

* Fix tokio deprecation warnings. Update to use tokio-io in lieu of deprecated tokio-core items. impl AsyncRead's optional `unsafe fn prepare_uninitialized_buffer` for huge perf wins

* Add debug impls to all public items and add `deny(missing_debug_implementations)` to the crate.

* Bump tokio core version.
This commit is contained in:
Tim
2017-03-31 12:16:40 -07:00
committed by GitHub
parent 15080b2889
commit 5add81b5f3
15 changed files with 1262 additions and 982 deletions

View File

@@ -1,4 +1,3 @@
use future::client::{Client as FutureClient, ClientExt as FutureClientExt,
Options as FutureOptions};
/// Exposes a trait for connecting synchronously to servers.
@@ -29,7 +28,10 @@ impl<Req, Resp, E> Clone for Client<Req, Resp, E> {
impl<Req, Resp, E> fmt::Debug for Client<Req, Resp, E> {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "Client {{ .. }}")
const PROXY: &'static &'static str = &"ClientProxy { .. }";
f.debug_struct("Client")
.field("proxy", PROXY)
.finish()
}
}
@@ -40,6 +42,9 @@ impl<Req, Resp, E> Client<Req, Resp, E>
{
/// Drives an RPC call for the given request.
pub fn call(&self, request: Req) -> Result<Resp, ::Error<E>> {
// Must call wait here to block on the response.
// The request handler relies on this fact to safely unwrap the
// oneshot send.
self.proxy.call(request).wait()
}
@@ -85,6 +90,19 @@ impl Options {
}
}
impl fmt::Debug for Options {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
#[cfg(feature = "tls")]
const SOME: &'static &'static str = &"Some(_)";
#[cfg(feature = "tls")]
const NONE: &'static &'static str = &"None";
let mut f = f.debug_struct("Options");
#[cfg(feature = "tls")]
f.field("tls_ctx", if self.tls_ctx.is_some() { SOME } else { NONE });
f.finish()
}
}
impl Into<FutureOptions> for (reactor::Handle, Options) {
#[cfg(feature = "tls")]
fn into(self) -> FutureOptions {
@@ -180,7 +198,10 @@ impl<Req, Resp, E, S> RequestHandler<Req, Resp, E, S>
.for_each(|(request, response_tx)| {
let request = client.call(request)
.then(move |response| {
response_tx.complete(response);
// Safe to unwrap because clients always block on the response future.
response_tx.send(response)
.map_err(|_| ())
.expect("Client should block on response");
Ok(())
});
handle.spawn(request);

View File

@@ -1,20 +1,41 @@
use {bincode, future};
use {bincode, future, num_cpus};
use future::server::{Response, Shutdown};
use futures::Future;
use futures::{Future, future as futures};
use futures::sync::oneshot;
use serde::{Deserialize, Serialize};
use std::io;
use std::fmt;
use std::net::SocketAddr;
use std::time::Duration;
use std::usize;
use thread_pool::{self, Sender, Task, ThreadPool};
use tokio_core::reactor;
use tokio_service::NewService;
use tokio_service::{NewService, Service};
#[cfg(feature = "tls")]
use native_tls_inner::TlsAcceptor;
/// Additional options to configure how the server operates.
#[derive(Default)]
#[derive(Debug)]
pub struct Options {
thread_pool: thread_pool::Builder,
opts: future::server::Options,
}
impl Default for Options {
fn default() -> Self {
let num_cpus = num_cpus::get();
Options {
thread_pool: thread_pool::Builder::new()
.keep_alive(Duration::from_secs(60))
.max_pool_size(num_cpus * 100)
.core_pool_size(num_cpus)
.work_queue_capacity(usize::MAX)
.name_prefix("request-thread-"),
opts: future::server::Options::default(),
}
}
}
impl Options {
/// Set the max payload size in bytes. The default is 2,000,000 (2 MB).
pub fn max_payload_size(mut self, bytes: u64) -> Self {
@@ -22,6 +43,12 @@ impl Options {
self
}
/// Sets the thread pool builder to use when creating the server's thread pool.
pub fn thread_pool(mut self, builder: thread_pool::Builder) -> Self {
self.thread_pool = builder;
self
}
/// Set the `TlsAcceptor`
#[cfg(feature = "tls")]
pub fn tls(mut self, tls_acceptor: TlsAcceptor) -> Self {
@@ -39,29 +66,6 @@ pub struct Handle {
}
impl Handle {
#[doc(hidden)]
pub fn listen<S, Req, Resp, E>(new_service: S,
addr: SocketAddr,
options: Options)
-> io::Result<Self>
where S: NewService<Request = Result<Req, bincode::Error>,
Response = Response<Resp, E>,
Error = io::Error> + 'static,
Req: Deserialize + 'static,
Resp: Serialize + 'static,
E: Serialize + 'static
{
let reactor = reactor::Core::new()?;
let (handle, server) =
future::server::Handle::listen(new_service, addr, &reactor.handle(), options.opts)?;
let server = Box::new(server);
Ok(Handle {
reactor: reactor,
handle: handle,
server: server,
})
}
/// Runs the server on the current thread, blocking indefinitely.
pub fn run(mut self) {
trace!("Running...");
@@ -81,3 +85,141 @@ impl Handle {
self.handle.addr()
}
}
impl fmt::Debug for Handle {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
const CORE: &'static &'static str = &"Core { .. }";
const SERVER: &'static &'static str = &"Box<Future<Item = (), Error = ()>>";
f.debug_struct("Handle").field("reactor", CORE)
.field("handle", &self.handle)
.field("server", SERVER)
.finish()
}
}
#[doc(hidden)]
pub fn listen<S, Req, Resp, E>(new_service: S, addr: SocketAddr, options: Options)
-> io::Result<Handle>
where S: NewService<Request = Result<Req, bincode::Error>,
Response = Response<Resp, E>,
Error = io::Error> + 'static,
<S::Instance as Service>::Future: Send + 'static,
S::Response: Send,
S::Error: Send,
Req: Deserialize + 'static,
Resp: Serialize + 'static,
E: Serialize + 'static
{
let new_service = NewThreadService::new(new_service, options.thread_pool);
let reactor = reactor::Core::new()?;
let (handle, server) =
future::server::listen(new_service, addr, &reactor.handle(), options.opts)?;
let server = Box::new(server);
Ok(Handle {
reactor: reactor,
handle: handle,
server: server,
})
}
/// A service that uses a thread pool.
struct NewThreadService<S> where S: NewService {
new_service: S,
sender: Sender<ServiceTask<<S::Instance as Service>::Future>>,
_pool: ThreadPool<ServiceTask<<S::Instance as Service>::Future>>,
}
/// A service that runs by executing request handlers in a thread pool.
struct ThreadService<S> where S: Service {
service: S,
sender: Sender<ServiceTask<S::Future>>,
}
/// A task that handles a single request.
struct ServiceTask<F> where F: Future {
future: F,
tx: oneshot::Sender<Result<F::Item, F::Error>>,
}
impl<S> NewThreadService<S>
where S: NewService,
<S::Instance as Service>::Future: Send + 'static,
S::Response: Send,
S::Error: Send,
{
/// Create a NewThreadService by wrapping another service.
fn new(new_service: S, pool: thread_pool::Builder) -> Self {
let (sender, _pool) = pool.build();
NewThreadService { new_service, sender, _pool }
}
}
impl<S> NewService for NewThreadService<S>
where S: NewService,
<S::Instance as Service>::Future: Send + 'static,
S::Response: Send,
S::Error: Send,
{
type Request = S::Request;
type Response = S::Response;
type Error = S::Error;
type Instance = ThreadService<S::Instance>;
fn new_service(&self) -> io::Result<Self::Instance> {
Ok(ThreadService {
service: self.new_service.new_service()?,
sender: self.sender.clone(),
})
}
}
impl<F> Task for ServiceTask<F>
where F: Future + Send + 'static,
F::Item: Send,
F::Error: Send,
{
fn run(self) {
// Don't care if sending fails. It just means the request is no longer
// being handled (I think).
let _ = self.tx.send(self.future.wait());
}
}
impl<S> Service for ThreadService<S>
where S: Service,
S::Future: Send + 'static,
S::Response: Send,
S::Error: Send,
{
type Request = S::Request;
type Response = S::Response;
type Error = S::Error;
type Future =
futures::AndThen<
futures::MapErr<
oneshot::Receiver<Result<Self::Response, Self::Error>>,
fn(oneshot::Canceled) -> Self::Error>,
Result<Self::Response, Self::Error>,
fn(Result<Self::Response, Self::Error>) -> Result<Self::Response, Self::Error>>;
fn call(&self, request: Self::Request) -> Self::Future {
let (tx, rx) = oneshot::channel();
self.sender.send(ServiceTask {
future: self.service.call(request),
tx: tx,
}).unwrap();
rx.map_err(unreachable as _).and_then(ident)
}
}
fn unreachable<T, U>(t: T) -> U
where T: fmt::Display
{
unreachable!(t)
}
fn ident<T>(t: T) -> T {
t
}