mirror of
https://github.com/OMGeeky/tarpc.git
synced 2026-01-05 19:16:29 +01:00
Merge branch 'master' into server-writer-thread
This commit is contained in:
@@ -11,5 +11,5 @@ bincode = "*"
|
||||
serde_macros = "*"
|
||||
log = "*"
|
||||
env_logger = "*"
|
||||
scoped-pool = "0.1.4"
|
||||
scoped-pool = "*"
|
||||
lazy_static = "*"
|
||||
|
||||
@@ -132,11 +132,11 @@ macro_rules! request_variant {
|
||||
///
|
||||
/// The following items are expanded in the enclosing module:
|
||||
///
|
||||
/// * `Service` -- the trait defining the rpc service
|
||||
/// * `Client` -- a client that makes synchronous requests to the rpc server
|
||||
/// * `AsyncClient` -- a client that makes asynchronous requests to the rpc server
|
||||
/// * `Future` -- a generic type returned by `AsyncClient`'s rpc's
|
||||
/// * `serve` -- the function that starts the rpc server
|
||||
/// * `Service` -- the trait defining the RPC service
|
||||
/// * `Client` -- a client that makes synchronous requests to the RPC server
|
||||
/// * `AsyncClient` -- a client that makes asynchronous requests to the RPC server
|
||||
/// * `Future` -- a handle for asynchronously retrieving the result of an RPC
|
||||
/// * `serve` -- the function that starts the RPC server
|
||||
///
|
||||
/// **Warning**: In addition to the above items, there are a few expanded items that
|
||||
/// are considered implementation details. As with the above items, shadowing
|
||||
@@ -154,7 +154,7 @@ macro_rules! service {
|
||||
rpc $fn_name:ident( $( $arg:ident : $in_:ty ),* ) -> $out:ty;
|
||||
)*
|
||||
) => {
|
||||
#[doc="The provided RPC service."]
|
||||
#[doc="Defines the RPC service"]
|
||||
pub trait Service: Send + Sync {
|
||||
$(
|
||||
$(#[$attr])*
|
||||
@@ -428,6 +428,12 @@ mod test {
|
||||
futures.push(client.hello("Bob".into()));
|
||||
count += 1;
|
||||
if count % concurrency == 0 {
|
||||
// We can't block on each rpc call, otherwise we'd be
|
||||
// benchmarking latency instead of throughput. It's also
|
||||
// not ideal to call more than one rpc per iteration, because
|
||||
// it makes the output of the bencher harder to parse (you have
|
||||
// to mentally divide the number by `concurrency` to get
|
||||
// the ns / iter for one rpc
|
||||
for f in futures.drain(..) {
|
||||
f.get().unwrap();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user