mirror of
https://github.com/OMGeeky/tarpc.git
synced 2026-02-23 15:49:54 +01:00
Compare commits
122 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d27f341bde | ||
|
|
2264ebecfc | ||
|
|
3207affb4a | ||
|
|
0602afd50c | ||
|
|
4343e12217 | ||
|
|
7fda862fb8 | ||
|
|
aa7b875b1a | ||
|
|
54d6e0e3b6 | ||
|
|
bea3b442aa | ||
|
|
954a2502e7 | ||
|
|
e3f34917c5 | ||
|
|
f65dd05949 | ||
|
|
240c436b34 | ||
|
|
c9803688cc | ||
|
|
4987094483 | ||
|
|
ff55080193 | ||
|
|
258193c932 | ||
|
|
67823ef5de | ||
|
|
a671457243 | ||
|
|
cf654549da | ||
|
|
6a01e32a2d | ||
|
|
e6597fab03 | ||
|
|
ebd245a93d | ||
|
|
3ebc3b5845 | ||
|
|
0e5973109d | ||
|
|
5f02d7383a | ||
|
|
2bae148529 | ||
|
|
42a2e03aab | ||
|
|
b566d0c646 | ||
|
|
b359f16767 | ||
|
|
f8681ab134 | ||
|
|
7e521768ab | ||
|
|
e9b1e7d101 | ||
|
|
f0322fb892 | ||
|
|
617daebb88 | ||
|
|
a11d4fff58 | ||
|
|
bf42a04d83 | ||
|
|
06528d6953 | ||
|
|
9f00395746 | ||
|
|
e0674cd57f | ||
|
|
7e49bd9ee7 | ||
|
|
8a1baa9c4e | ||
|
|
31c713d188 | ||
|
|
d905bc1591 | ||
|
|
7f946c7f83 | ||
|
|
36cfdb6c6f | ||
|
|
dbabe9774f | ||
|
|
deb041b8d3 | ||
|
|
85d49477f5 | ||
|
|
45af6ccdeb | ||
|
|
917c0c5e2d | ||
|
|
bbbd43e282 | ||
|
|
f945392b5a | ||
|
|
f4060779e4 | ||
|
|
7cc8d9640b | ||
|
|
7f871f03ef | ||
|
|
709b966150 | ||
|
|
5e19b79aa4 | ||
|
|
6eb806907a | ||
|
|
8250ca31ff | ||
|
|
7cd776143b | ||
|
|
5f6c3d7d98 | ||
|
|
915fe3ed4e | ||
|
|
d8c7b9feb2 | ||
|
|
5ab3866d96 | ||
|
|
184ea42033 | ||
|
|
014c209b8e | ||
|
|
e91005855c | ||
|
|
46bcc0f559 | ||
|
|
61322ebf41 | ||
|
|
db0c9c4182 | ||
|
|
9ee3011687 | ||
|
|
5aa4a2cef6 | ||
|
|
f38a172523 | ||
|
|
66dbca80b2 | ||
|
|
61377dd4ff | ||
|
|
cd03f3ff8c | ||
|
|
9479963773 | ||
|
|
f974533bf7 | ||
|
|
d560ac6197 | ||
|
|
1cdff15412 | ||
|
|
f8ba7d9f4e | ||
|
|
41c1aafaf7 | ||
|
|
75d1e877be | ||
|
|
88e1cf558b | ||
|
|
50879d2acb | ||
|
|
13cb14a119 | ||
|
|
22ef6b7800 | ||
|
|
e48e6dfe67 | ||
|
|
1b58914d59 | ||
|
|
2f24842b2d | ||
|
|
5c485fe608 | ||
|
|
b0319e7db9 | ||
|
|
a4d9581888 | ||
|
|
fb5022b1c0 | ||
|
|
abb0b5b3ac | ||
|
|
49f2641e3c | ||
|
|
650c60fe44 | ||
|
|
1d0bbcb36c | ||
|
|
c456ad7fa5 | ||
|
|
537446a5c9 | ||
|
|
94b5b2c431 | ||
|
|
9863433fea | ||
|
|
9a27465a25 | ||
|
|
263cfe1435 | ||
|
|
6ae5302a70 | ||
|
|
c67b7283e7 | ||
|
|
7b6e98da7b | ||
|
|
15b65fa20f | ||
|
|
372900173a | ||
|
|
1089415451 | ||
|
|
8dbeeff0eb | ||
|
|
85312d430c | ||
|
|
9843af9e00 | ||
|
|
a6bd423ef0 | ||
|
|
146496d08c | ||
|
|
b562051c38 | ||
|
|
fe164ca368 | ||
|
|
950ad5187c | ||
|
|
e6ab69c314 | ||
|
|
373dcbed57 | ||
|
|
ce9c057b1b |
66
.github/workflows/main.yml
vendored
Normal file
66
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
on: [push, pull_request]
|
||||||
|
|
||||||
|
name: Continuous integration
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
name: Check
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
toolchain: stable
|
||||||
|
override: true
|
||||||
|
- uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: check
|
||||||
|
args: --all-features
|
||||||
|
|
||||||
|
test:
|
||||||
|
name: Test Suite
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
toolchain: stable
|
||||||
|
override: true
|
||||||
|
- uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: test
|
||||||
|
args: --all-features
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
name: Rustfmt
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
toolchain: stable
|
||||||
|
override: true
|
||||||
|
- run: rustup component add rustfmt
|
||||||
|
- uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: fmt
|
||||||
|
args: --all -- --check
|
||||||
|
|
||||||
|
clippy:
|
||||||
|
name: Clippy
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v1
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
profile: minimal
|
||||||
|
toolchain: stable
|
||||||
|
override: true
|
||||||
|
- run: rustup component add clippy
|
||||||
|
- uses: actions-rs/cargo@v1
|
||||||
|
with:
|
||||||
|
command: clippy
|
||||||
|
args: --all-features -- -D warnings
|
||||||
13
.travis.yml
13
.travis.yml
@@ -1,13 +0,0 @@
|
|||||||
language: rust
|
|
||||||
rust:
|
|
||||||
- nightly
|
|
||||||
sudo: false
|
|
||||||
cache: cargo
|
|
||||||
|
|
||||||
os:
|
|
||||||
- osx
|
|
||||||
- linux
|
|
||||||
|
|
||||||
script:
|
|
||||||
- cargo test --all-targets --all-features
|
|
||||||
- cargo test --doc --all-features
|
|
||||||
@@ -2,9 +2,6 @@
|
|||||||
|
|
||||||
members = [
|
members = [
|
||||||
"example-service",
|
"example-service",
|
||||||
"rpc",
|
|
||||||
"trace",
|
|
||||||
"bincode-transport",
|
|
||||||
"tarpc",
|
"tarpc",
|
||||||
"plugins",
|
"plugins",
|
||||||
]
|
]
|
||||||
|
|||||||
141
README.md
141
README.md
@@ -1,8 +1,20 @@
|
|||||||
## tarpc: Tim & Adam's RPC lib
|
[![Crates.io][crates-badge]][crates-url]
|
||||||
[](https://travis-ci.org/google/tarpc)
|
[![MIT licensed][mit-badge]][mit-url]
|
||||||
[](LICENSE)
|
[![Build status][gh-actions-badge]][gh-actions-url]
|
||||||
[](https://crates.io/crates/tarpc)
|
[![Discord chat][discord-badge]][discord-url]
|
||||||
[](https://gitter.im/tarpc/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
|
||||||
|
[crates-badge]: https://img.shields.io/crates/v/tarpc.svg
|
||||||
|
[crates-url]: https://crates.io/crates/tarpc
|
||||||
|
[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg
|
||||||
|
[mit-url]: LICENSE
|
||||||
|
[gh-actions-badge]: https://github.com/google/tarpc/workflows/Continuous%20integration/badge.svg
|
||||||
|
[gh-actions-url]: https://github.com/google/tarpc/actions?query=workflow%3A%22Continuous+integration%22
|
||||||
|
[discord-badge]: https://img.shields.io/discord/647529123996237854.svg?logo=discord&style=flat-square
|
||||||
|
[discord-url]: https://discord.gg/gXwpdSt
|
||||||
|
|
||||||
|
# tarpc
|
||||||
|
|
||||||
|
<!-- cargo-sync-readme start -->
|
||||||
|
|
||||||
*Disclaimer*: This is not an official Google product.
|
*Disclaimer*: This is not an official Google product.
|
||||||
|
|
||||||
@@ -25,34 +37,54 @@ architectures. Two well-known ones are [gRPC](http://www.grpc.io) and
|
|||||||
|
|
||||||
tarpc differentiates itself from other RPC frameworks by defining the schema in code,
|
tarpc differentiates itself from other RPC frameworks by defining the schema in code,
|
||||||
rather than in a separate language such as .proto. This means there's no separate compilation
|
rather than in a separate language such as .proto. This means there's no separate compilation
|
||||||
process, and no cognitive context switching between different languages. Additionally, it
|
process, and no context switching between different languages.
|
||||||
works with the community-backed library serde: any serde-serializable type can be used as
|
|
||||||
arguments to tarpc fns.
|
Some other features of tarpc:
|
||||||
|
- Pluggable transport: any type impling `Stream<Item = Request> + Sink<Response>` can be
|
||||||
|
used as a transport to connect the client and server.
|
||||||
|
- `Send + 'static` optional: if the transport doesn't require it, neither does tarpc!
|
||||||
|
- Cascading cancellation: dropping a request will send a cancellation message to the server.
|
||||||
|
The server will cease any unfinished work on the request, subsequently cancelling any of its
|
||||||
|
own requests, repeating for the entire chain of transitive dependencies.
|
||||||
|
- Configurable deadlines and deadline propagation: request deadlines default to 10s if
|
||||||
|
unspecified. The server will automatically cease work when the deadline has passed. Any
|
||||||
|
requests sent by the server that use the request context will propagate the request deadline.
|
||||||
|
For example, if a server is handling a request with a 10s deadline, does 2s of work, then
|
||||||
|
sends a request to another server, that server will see an 8s deadline.
|
||||||
|
- Serde serialization: enabling the `serde1` Cargo feature will make service requests and
|
||||||
|
responses `Serialize + Deserialize`. It's entirely optional, though: in-memory transports can
|
||||||
|
be used, as well, so the price of serialization doesn't have to be paid when it's not needed.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
Add to your `Cargo.toml` dependencies:
|
Add to your `Cargo.toml` dependencies:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
tarpc = "0.18.0"
|
tarpc = "0.22.0"
|
||||||
```
|
```
|
||||||
|
|
||||||
The `service!` macro expands to a collection of items that form an
|
The `tarpc::service` attribute expands to a collection of items that form an rpc service.
|
||||||
rpc service. In the above example, the macro is called within the
|
These generated types make it easy and ergonomic to write servers with less boilerplate.
|
||||||
`hello_service` module. This module will contain a `Client` stub and `Service` trait. There is
|
Simply implement the generated service trait, and you're off to the races!
|
||||||
These generated types make it easy and ergonomic to write servers without dealing with serialization
|
|
||||||
directly. Simply implement one of the generated traits, and you're off to the
|
|
||||||
races!
|
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
Here's a small service.
|
For this example, in addition to tarpc, also add two other dependencies to
|
||||||
|
your `Cargo.toml`:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
futures = "0.3"
|
||||||
|
tokio = "0.2"
|
||||||
|
```
|
||||||
|
|
||||||
|
In the following example, we use an in-process channel for communication between
|
||||||
|
client and server. In real code, you will likely communicate over the network.
|
||||||
|
For a more real-world example, see [example-service](example-service).
|
||||||
|
|
||||||
|
First, let's set up the dependencies and service definition.
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
#![feature(arbitrary_self_types, await_macro, async_await, proc_macro_hygiene)]
|
|
||||||
|
|
||||||
|
|
||||||
use futures::{
|
use futures::{
|
||||||
compat::Executor01CompatExt,
|
|
||||||
future::{self, Ready},
|
future::{self, Ready},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
};
|
};
|
||||||
@@ -64,17 +96,23 @@ use std::io;
|
|||||||
|
|
||||||
// This is the service definition. It looks a lot like a trait definition.
|
// This is the service definition. It looks a lot like a trait definition.
|
||||||
// It defines one RPC, hello, which takes one arg, name, and returns a String.
|
// It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
tarpc::service! {
|
#[tarpc::service]
|
||||||
|
trait World {
|
||||||
/// Returns a greeting for name.
|
/// Returns a greeting for name.
|
||||||
rpc hello(name: String) -> String;
|
async fn hello(name: String) -> String;
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
// This is the type that implements the generated Service trait. It is the business logic
|
This service definition generates a trait called `World`. Next we need to
|
||||||
|
implement it for our Server struct.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// This is the type that implements the generated World trait. It is the business logic
|
||||||
// and is used to start the server.
|
// and is used to start the server.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct HelloServer;
|
struct HelloServer;
|
||||||
|
|
||||||
impl Service for HelloServer {
|
impl World for HelloServer {
|
||||||
// Each defined rpc generates two items in the trait, a fn that serves the RPC, and
|
// Each defined rpc generates two items in the trait, a fn that serves the RPC, and
|
||||||
// an associated type representing the future output by the fn.
|
// an associated type representing the future output by the fn.
|
||||||
|
|
||||||
@@ -84,53 +122,46 @@ impl Service for HelloServer {
|
|||||||
future::ready(format!("Hello, {}!", name))
|
future::ready(format!("Hello, {}!", name))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
async fn run() -> io::Result<()> {
|
Lastly let's write our `main` that will start the server. While this example uses an
|
||||||
// bincode_transport is provided by the associated crate bincode-transport. It makes it easy
|
[in-process channel](rpc::transport::channel), tarpc also ships a generic [`serde_transport`]
|
||||||
// to start up a serde-powered bincode serialization strategy over TCP.
|
behind the `serde-transport` feature, with additional [TCP](serde_transport::tcp) functionality
|
||||||
let transport = bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
available behind the `tcp` feature.
|
||||||
let addr = transport.local_addr();
|
|
||||||
|
```rust
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> io::Result<()> {
|
||||||
|
let (client_transport, server_transport) = tarpc::transport::channel::unbounded();
|
||||||
|
|
||||||
// The server is configured with the defaults.
|
|
||||||
let server = server::new(server::Config::default())
|
let server = server::new(server::Config::default())
|
||||||
// Server can listen on any type that implements the Transport trait.
|
// incoming() takes a stream of transports such as would be returned by
|
||||||
.incoming(transport)
|
// TcpListener::incoming (but a stream instead of an iterator).
|
||||||
// Close the stream after the client connects
|
.incoming(stream::once(future::ready(server_transport)))
|
||||||
.take(1)
|
.respond_with(HelloServer.serve());
|
||||||
// serve is generated by the service! macro. It takes as input any type implementing
|
|
||||||
// the generated Service trait.
|
|
||||||
.respond_with(serve(HelloServer));
|
|
||||||
|
|
||||||
tokio_executor::spawn(server.unit_error().boxed().compat());
|
tokio::spawn(server);
|
||||||
|
|
||||||
let transport = await!(bincode_transport::connect(&addr))?;
|
// WorldClient is generated by the macro. It has a constructor `new` that takes a config and
|
||||||
|
// any Transport as input
|
||||||
|
let mut client = WorldClient::new(client::Config::default(), client_transport).spawn()?;
|
||||||
|
|
||||||
// new_stub is generated by the service! macro. Like Server, it takes a config and any
|
// The client has an RPC method for each RPC defined in the annotated trait. It takes the same
|
||||||
// Transport as input, and returns a Client, also generated by the macro.
|
// args as defined, with the addition of a Context, which is always the first arg. The Context
|
||||||
// by the service mcro.
|
|
||||||
let mut client = await!(new_stub(client::Config::default(), transport))?;
|
|
||||||
|
|
||||||
// The client has an RPC method for each RPC defined in service!. It takes the same args
|
|
||||||
// as defined, with the addition of a Context, which is always the first arg. The Context
|
|
||||||
// specifies a deadline and trace information which can be helpful in debugging requests.
|
// specifies a deadline and trace information which can be helpful in debugging requests.
|
||||||
let hello = await!(client.hello(context::current(), "Stim".to_string()))?;
|
let hello = client.hello(context::current(), "Stim".to_string()).await?;
|
||||||
|
|
||||||
println!("{}", hello);
|
println!("{}", hello);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
|
||||||
tarpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
tokio::run(run()
|
|
||||||
.map_err(|e| eprintln!("Oh no: {}", e))
|
|
||||||
.boxed()
|
|
||||||
.compat(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Service Documentation
|
## Service Documentation
|
||||||
|
|
||||||
Use `cargo doc` as you normally would to see the documentation created for all
|
Use `cargo doc` as you normally would to see the documentation created for all
|
||||||
items expanded by a `service!` invocation.
|
items expanded by a `service!` invocation.
|
||||||
|
|
||||||
|
<!-- cargo-sync-readme end -->
|
||||||
|
|
||||||
|
License: MIT
|
||||||
|
|||||||
110
RELEASES.md
110
RELEASES.md
@@ -1,6 +1,114 @@
|
|||||||
|
## 0.22.0 (2020-08-02)
|
||||||
|
|
||||||
|
This release adds some flexibility and consistency to `serde_transport`, with one new feature and
|
||||||
|
one small breaking change.
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
`serde_transport::tcp` now exposes framing configuration on `connect()` and `listen()`. This is
|
||||||
|
useful if, for instance, you want to send requests or responses that are larger than the maximum
|
||||||
|
payload allowed by default:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let mut transport = tarpc::serde_transport::tcp::connect(server_addr, Json::default);
|
||||||
|
transport.config_mut().max_frame_length(4294967296);
|
||||||
|
let mut client = MyClient::new(client::Config::default(), transport.await?).spawn()?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
The codec argument to `serde_transport::tcp::connect` changed from a Codec to impl Fn() -> Codec,
|
||||||
|
to be consistent with `serde_transport::tcp::listen`. While only one Codec is needed, more than one
|
||||||
|
person has been tripped up by the inconsistency between `connect` and `listen`. Unfortunately, the
|
||||||
|
compiler errors are not much help in this case, so it was decided to simply do the more intuitive
|
||||||
|
thing so that the compiler doesn't need to step in in the first place.
|
||||||
|
|
||||||
|
|
||||||
|
## 0.21.1 (2020-08-02)
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
#### #[tarpc::server] diagnostics
|
||||||
|
|
||||||
|
When a service impl uses #[tarpc::server], only `async fn`s are re-written. This can lead to
|
||||||
|
confusing compiler errors about missing associated types:
|
||||||
|
|
||||||
|
```
|
||||||
|
error: not all trait items implemented, missing: `HelloFut`
|
||||||
|
--> $DIR/tarpc_server_missing_async.rs:9:1
|
||||||
|
|
|
||||||
|
9 | impl World for HelloServer {
|
||||||
|
| ^^^^
|
||||||
|
```
|
||||||
|
|
||||||
|
The proc macro now provides better diagnostics for this case:
|
||||||
|
|
||||||
|
```
|
||||||
|
error: not all trait items implemented, missing: `HelloFut`
|
||||||
|
--> $DIR/tarpc_server_missing_async.rs:9:1
|
||||||
|
|
|
||||||
|
9 | impl World for HelloServer {
|
||||||
|
| ^^^^
|
||||||
|
|
||||||
|
error: hint: `#[tarpc::server]` only rewrites async fns, and `fn hello` is not async
|
||||||
|
--> $DIR/tarpc_server_missing_async.rs:10:5
|
||||||
|
|
|
||||||
|
10 | fn hello(name: String) -> String {
|
||||||
|
| ^^
|
||||||
|
```
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
#### Fixed client hanging when server shuts down
|
||||||
|
|
||||||
|
Previously, clients would ignore when the read half of the transport was closed, continuing to
|
||||||
|
write requests. This didn't make much sense, because without the ability to receive responses,
|
||||||
|
clients have no way to know if requests were actually processed by the server. It basically just
|
||||||
|
led to clients that would hang for a few seconds before shutting down. This has now been
|
||||||
|
corrected: clients will immediately shut down when the read-half of the transport is closed.
|
||||||
|
|
||||||
|
#### More docs.rs documentation
|
||||||
|
|
||||||
|
Previously, docs.rs only documented items enabled by default, notably leaving out documentation
|
||||||
|
for tokio and serde features. This has now been corrected: docs.rs should have documentation
|
||||||
|
for all optional features.
|
||||||
|
|
||||||
|
## 0.21.0 (2020-06-26)
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
A new proc macro, `#[tarpc::server]` was added! This enables service impls to elide the boilerplate
|
||||||
|
of specifying associated types for each RPC. With the ubiquity of async-await, most code won't have
|
||||||
|
nameable futures and will just be boxing the return type anyway. This macro does that for you.
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
- Enums had _non_exhaustive fields replaced with the #[non_exhaustive] attribute.
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- https://github.com/google/tarpc/issues/304
|
||||||
|
|
||||||
|
A race condition in code that limits number of connections per client caused occasional panics.
|
||||||
|
|
||||||
|
- https://github.com/google/tarpc/pull/295
|
||||||
|
|
||||||
|
Made request timeouts account for time spent in the outbound buffer. Previously, a large outbound
|
||||||
|
queue would lead to requests not timing out correctly.
|
||||||
|
|
||||||
|
## 0.20.0 (2019-12-11)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
1. tarpc has updated its tokio dependency to the latest 0.2 version.
|
||||||
|
2. The tarpc crates have been unified into just `tarpc`, with new Cargo features to enable
|
||||||
|
functionality.
|
||||||
|
- The bincode-transport and json-transport crates are deprecated and superseded by
|
||||||
|
the `serde_transport` module, which unifies much of the logic present in both crates.
|
||||||
|
|
||||||
## 0.13.0 (2018-10-16)
|
## 0.13.0 (2018-10-16)
|
||||||
|
|
||||||
### Breaking Changes
|
### Breaking Changes
|
||||||
|
|
||||||
Version 0.13 marks a significant departure from previous versions of tarpc. The
|
Version 0.13 marks a significant departure from previous versions of tarpc. The
|
||||||
API has changed significantly. The tokio-proto crate has been torn out and
|
API has changed significantly. The tokio-proto crate has been torn out and
|
||||||
|
|||||||
@@ -1,36 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "tarpc-bincode-transport"
|
|
||||||
version = "0.7.0"
|
|
||||||
authors = ["Tim Kuehn <tikue@google.com>"]
|
|
||||||
edition = '2018'
|
|
||||||
license = "MIT"
|
|
||||||
documentation = "https://docs.rs/tarpc-bincode-transport"
|
|
||||||
homepage = "https://github.com/google/tarpc"
|
|
||||||
repository = "https://github.com/google/tarpc"
|
|
||||||
keywords = ["rpc", "network", "bincode", "serde", "tarpc"]
|
|
||||||
categories = ["asynchronous", "network-programming"]
|
|
||||||
readme = "../README.md"
|
|
||||||
description = "A bincode-based transport for tarpc services."
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
bincode = "1"
|
|
||||||
futures-preview = { version = "0.3.0-alpha.16", features = ["compat"] }
|
|
||||||
futures_legacy = { version = "0.1", package = "futures" }
|
|
||||||
pin-utils = "0.1.0-alpha.4"
|
|
||||||
rpc = { package = "tarpc-lib", version = "0.6", path = "../rpc", features = ["serde1"] }
|
|
||||||
serde = "1.0"
|
|
||||||
tokio-io = "0.1"
|
|
||||||
async-bincode = "0.4"
|
|
||||||
tokio-tcp = "0.1"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
env_logger = "0.6"
|
|
||||||
humantime = "1.0"
|
|
||||||
libtest = "0.0.1"
|
|
||||||
log = "0.4"
|
|
||||||
rand = "0.6"
|
|
||||||
tokio = "0.1"
|
|
||||||
tokio-executor = "0.1"
|
|
||||||
tokio-reactor = "0.1"
|
|
||||||
tokio-serde = "0.3"
|
|
||||||
tokio-timer = "0.2"
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
edition = "2018"
|
|
||||||
@@ -1,181 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
//! A TCP [`Transport`] that serializes as bincode.
|
|
||||||
|
|
||||||
#![feature(arbitrary_self_types, async_await)]
|
|
||||||
#![deny(missing_docs, missing_debug_implementations)]
|
|
||||||
|
|
||||||
use async_bincode::{AsyncBincodeStream, AsyncDestination};
|
|
||||||
use futures::{compat::*, prelude::*, ready};
|
|
||||||
use pin_utils::unsafe_pinned;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::{
|
|
||||||
error::Error,
|
|
||||||
io,
|
|
||||||
marker::PhantomData,
|
|
||||||
net::SocketAddr,
|
|
||||||
pin::Pin,
|
|
||||||
task::{Context, Poll},
|
|
||||||
};
|
|
||||||
use tokio_io::{AsyncRead, AsyncWrite};
|
|
||||||
use tokio_tcp::{TcpListener, TcpStream};
|
|
||||||
|
|
||||||
/// A transport that serializes to, and deserializes from, a [`TcpStream`].
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Transport<S, Item, SinkItem> {
|
|
||||||
inner: Compat01As03Sink<AsyncBincodeStream<S, Item, SinkItem, AsyncDestination>, SinkItem>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Item, SinkItem> Transport<S, Item, SinkItem> {
|
|
||||||
unsafe_pinned!(
|
|
||||||
inner: Compat01As03Sink<AsyncBincodeStream<S, Item, SinkItem, AsyncDestination>, SinkItem>
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Item, SinkItem> Stream for Transport<S, Item, SinkItem>
|
|
||||||
where
|
|
||||||
S: AsyncRead,
|
|
||||||
Item: for<'a> Deserialize<'a>,
|
|
||||||
{
|
|
||||||
type Item = io::Result<Item>;
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<io::Result<Item>>> {
|
|
||||||
match self.inner().poll_next(cx) {
|
|
||||||
Poll::Pending => Poll::Pending,
|
|
||||||
Poll::Ready(None) => Poll::Ready(None),
|
|
||||||
Poll::Ready(Some(Ok(next))) => Poll::Ready(Some(Ok(next))),
|
|
||||||
Poll::Ready(Some(Err(e))) => {
|
|
||||||
Poll::Ready(Some(Err(io::Error::new(io::ErrorKind::Other, e))))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Item, SinkItem> Sink<SinkItem> for Transport<S, Item, SinkItem>
|
|
||||||
where
|
|
||||||
S: AsyncWrite,
|
|
||||||
SinkItem: Serialize,
|
|
||||||
{
|
|
||||||
type SinkError = io::Error;
|
|
||||||
|
|
||||||
fn start_send(self: Pin<&mut Self>, item: SinkItem) -> io::Result<()> {
|
|
||||||
self.inner()
|
|
||||||
.start_send(item)
|
|
||||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
||||||
convert(self.inner().poll_ready(cx))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
||||||
convert(self.inner().poll_flush(cx))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
||||||
convert(self.inner().poll_close(cx))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn convert<E: Into<Box<Error + Send + Sync>>>(poll: Poll<Result<(), E>>) -> Poll<io::Result<()>> {
|
|
||||||
match poll {
|
|
||||||
Poll::Pending => Poll::Pending,
|
|
||||||
Poll::Ready(Ok(())) => Poll::Ready(Ok(())),
|
|
||||||
Poll::Ready(Err(e)) => Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, e))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Item, SinkItem> rpc::Transport for Transport<TcpStream, Item, SinkItem>
|
|
||||||
where
|
|
||||||
Item: for<'de> Deserialize<'de>,
|
|
||||||
SinkItem: Serialize,
|
|
||||||
{
|
|
||||||
type Item = Item;
|
|
||||||
type SinkItem = SinkItem;
|
|
||||||
|
|
||||||
fn peer_addr(&self) -> io::Result<SocketAddr> {
|
|
||||||
self.inner.get_ref().get_ref().peer_addr()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn local_addr(&self) -> io::Result<SocketAddr> {
|
|
||||||
self.inner.get_ref().get_ref().local_addr()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a new bincode transport that reads from and writes to `io`.
|
|
||||||
pub fn new<Item, SinkItem>(io: TcpStream) -> Transport<TcpStream, Item, SinkItem>
|
|
||||||
where
|
|
||||||
Item: for<'de> Deserialize<'de>,
|
|
||||||
SinkItem: Serialize,
|
|
||||||
{
|
|
||||||
Transport::from(io)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Item, SinkItem> From<S> for Transport<S, Item, SinkItem> {
|
|
||||||
fn from(inner: S) -> Self {
|
|
||||||
Transport {
|
|
||||||
inner: Compat01As03Sink::new(AsyncBincodeStream::from(inner).for_async()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Connects to `addr`, wrapping the connection in a bincode transport.
|
|
||||||
pub async fn connect<Item, SinkItem>(
|
|
||||||
addr: &SocketAddr,
|
|
||||||
) -> io::Result<Transport<TcpStream, Item, SinkItem>>
|
|
||||||
where
|
|
||||||
Item: for<'de> Deserialize<'de>,
|
|
||||||
SinkItem: Serialize,
|
|
||||||
{
|
|
||||||
Ok(new(TcpStream::connect(addr).compat().await?))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Listens on `addr`, wrapping accepted connections in bincode transports.
|
|
||||||
pub fn listen<Item, SinkItem>(addr: &SocketAddr) -> io::Result<Incoming<Item, SinkItem>>
|
|
||||||
where
|
|
||||||
Item: for<'de> Deserialize<'de>,
|
|
||||||
SinkItem: Serialize,
|
|
||||||
{
|
|
||||||
let listener = TcpListener::bind(addr)?;
|
|
||||||
let local_addr = listener.local_addr()?;
|
|
||||||
let incoming = listener.incoming().compat();
|
|
||||||
Ok(Incoming {
|
|
||||||
incoming,
|
|
||||||
local_addr,
|
|
||||||
ghost: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A [`TcpListener`] that wraps connections in bincode transports.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Incoming<Item, SinkItem> {
|
|
||||||
incoming: Compat01As03<tokio_tcp::Incoming>,
|
|
||||||
local_addr: SocketAddr,
|
|
||||||
ghost: PhantomData<(Item, SinkItem)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Item, SinkItem> Incoming<Item, SinkItem> {
|
|
||||||
unsafe_pinned!(incoming: Compat01As03<tokio_tcp::Incoming>);
|
|
||||||
|
|
||||||
/// Returns the address being listened on.
|
|
||||||
pub fn local_addr(&self) -> SocketAddr {
|
|
||||||
self.local_addr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Item, SinkItem> Stream for Incoming<Item, SinkItem>
|
|
||||||
where
|
|
||||||
Item: for<'a> Deserialize<'a>,
|
|
||||||
SinkItem: Serialize,
|
|
||||||
{
|
|
||||||
type Item = io::Result<Transport<TcpStream, Item, SinkItem>>;
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
|
||||||
let next = ready!(self.incoming().poll_next(cx)?);
|
|
||||||
Poll::Ready(next.map(|conn| Ok(new(conn))))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,101 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
//! Tests client/server control flow.
|
|
||||||
|
|
||||||
#![feature(test, integer_atomics, async_await)]
|
|
||||||
|
|
||||||
use futures::{compat::Executor01CompatExt, prelude::*};
|
|
||||||
use libtest::stats::Stats;
|
|
||||||
use rpc::{
|
|
||||||
client, context,
|
|
||||||
server::{Handler, Server},
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
io,
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
|
|
||||||
async fn bench() -> io::Result<()> {
|
|
||||||
let listener = tarpc_bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
|
||||||
let addr = listener.local_addr();
|
|
||||||
|
|
||||||
tokio_executor::spawn(
|
|
||||||
Server::<u32, u32>::default()
|
|
||||||
.incoming(listener)
|
|
||||||
.take(1)
|
|
||||||
.respond_with(|_ctx, request| futures::future::ready(Ok(request)))
|
|
||||||
.unit_error()
|
|
||||||
.boxed()
|
|
||||||
.compat(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let conn = tarpc_bincode_transport::connect(&addr).await?;
|
|
||||||
let client = &mut client::new::<u32, u32, _>(client::Config::default(), conn).await?;
|
|
||||||
|
|
||||||
let total = 10_000usize;
|
|
||||||
let mut successful = 0u32;
|
|
||||||
let mut unsuccessful = 0u32;
|
|
||||||
let mut durations = vec![];
|
|
||||||
for _ in 1..=total {
|
|
||||||
let now = Instant::now();
|
|
||||||
let response = client.call(context::current(), 0u32).await;
|
|
||||||
let elapsed = now.elapsed();
|
|
||||||
|
|
||||||
match response {
|
|
||||||
Ok(_) => successful += 1,
|
|
||||||
Err(_) => unsuccessful += 1,
|
|
||||||
};
|
|
||||||
durations.push(elapsed);
|
|
||||||
}
|
|
||||||
|
|
||||||
let durations_nanos = durations
|
|
||||||
.iter()
|
|
||||||
.map(|duration| duration.as_secs() as f64 * 1E9 + duration.subsec_nanos() as f64)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let (lower, median, upper) = durations_nanos.quartiles();
|
|
||||||
|
|
||||||
println!("Of {} runs:", durations_nanos.len());
|
|
||||||
println!("\tSuccessful: {}", successful);
|
|
||||||
println!("\tUnsuccessful: {}", unsuccessful);
|
|
||||||
println!(
|
|
||||||
"\tMean: {:?}",
|
|
||||||
Duration::from_nanos(durations_nanos.mean() as u64)
|
|
||||||
);
|
|
||||||
println!("\tMedian: {:?}", Duration::from_nanos(median as u64));
|
|
||||||
println!(
|
|
||||||
"\tStd Dev: {:?}",
|
|
||||||
Duration::from_nanos(durations_nanos.std_dev() as u64)
|
|
||||||
);
|
|
||||||
println!(
|
|
||||||
"\tMin: {:?}",
|
|
||||||
Duration::from_nanos(durations_nanos.min() as u64)
|
|
||||||
);
|
|
||||||
println!(
|
|
||||||
"\tMax: {:?}",
|
|
||||||
Duration::from_nanos(durations_nanos.max() as u64)
|
|
||||||
);
|
|
||||||
println!(
|
|
||||||
"\tQuartiles: ({:?}, {:?}, {:?})",
|
|
||||||
Duration::from_nanos(lower as u64),
|
|
||||||
Duration::from_nanos(median as u64),
|
|
||||||
Duration::from_nanos(upper as u64)
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn bench_small_packet() -> io::Result<()> {
|
|
||||||
env_logger::init();
|
|
||||||
rpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
|
|
||||||
tokio::run(bench().map_err(|e| panic!(e.to_string())).boxed().compat());
|
|
||||||
println!("done");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,143 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
//! Tests client/server control flow.
|
|
||||||
|
|
||||||
#![feature(async_await)]
|
|
||||||
|
|
||||||
use futures::{
|
|
||||||
compat::{Executor01CompatExt, Future01CompatExt},
|
|
||||||
prelude::*,
|
|
||||||
stream::FuturesUnordered,
|
|
||||||
};
|
|
||||||
use log::{info, trace};
|
|
||||||
use rand::distributions::{Distribution, Normal};
|
|
||||||
use rpc::{client, context, server::Server};
|
|
||||||
use std::{
|
|
||||||
io,
|
|
||||||
time::{Duration, Instant, SystemTime},
|
|
||||||
};
|
|
||||||
use tokio::timer::Delay;
|
|
||||||
|
|
||||||
pub trait AsDuration {
|
|
||||||
/// Delay of 0 if self is in the past
|
|
||||||
fn as_duration(&self) -> Duration;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsDuration for SystemTime {
|
|
||||||
fn as_duration(&self) -> Duration {
|
|
||||||
self.duration_since(SystemTime::now()).unwrap_or_default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run() -> io::Result<()> {
|
|
||||||
let listener = tarpc_bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
|
||||||
let addr = listener.local_addr();
|
|
||||||
let server = Server::<String, String>::default()
|
|
||||||
.incoming(listener)
|
|
||||||
.take(1)
|
|
||||||
.for_each(async move |channel| {
|
|
||||||
let channel = if let Ok(channel) = channel {
|
|
||||||
channel
|
|
||||||
} else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let client_addr = *channel.client_addr();
|
|
||||||
let handler = channel.respond_with(move |ctx, request| {
|
|
||||||
// Sleep for a time sampled from a normal distribution with:
|
|
||||||
// - mean: 1/2 the deadline.
|
|
||||||
// - std dev: 1/2 the deadline.
|
|
||||||
let deadline: Duration = ctx.deadline.as_duration();
|
|
||||||
let deadline_millis = deadline.as_secs() * 1000 + deadline.subsec_millis() as u64;
|
|
||||||
let distribution =
|
|
||||||
Normal::new(deadline_millis as f64 / 2., deadline_millis as f64 / 2.);
|
|
||||||
let delay_millis = distribution.sample(&mut rand::thread_rng()).max(0.);
|
|
||||||
let delay = Duration::from_millis(delay_millis as u64);
|
|
||||||
|
|
||||||
trace!(
|
|
||||||
"[{}/{}] Responding to request in {:?}.",
|
|
||||||
ctx.trace_id(),
|
|
||||||
client_addr,
|
|
||||||
delay,
|
|
||||||
);
|
|
||||||
|
|
||||||
let wait = Delay::new(Instant::now() + delay).compat();
|
|
||||||
async move {
|
|
||||||
wait.await.unwrap();
|
|
||||||
Ok(request)
|
|
||||||
}
|
|
||||||
});
|
|
||||||
tokio_executor::spawn(handler.unit_error().boxed().compat());
|
|
||||||
});
|
|
||||||
|
|
||||||
tokio_executor::spawn(server.unit_error().boxed().compat());
|
|
||||||
|
|
||||||
let conn = tarpc_bincode_transport::connect(&addr).await?;
|
|
||||||
let client = client::new::<String, String, _>(client::Config::default(), conn).await?;
|
|
||||||
|
|
||||||
// Proxy service
|
|
||||||
let listener = tarpc_bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
|
||||||
let addr = listener.local_addr();
|
|
||||||
let proxy_server = Server::<String, String>::default()
|
|
||||||
.incoming(listener)
|
|
||||||
.take(1)
|
|
||||||
.for_each(move |channel| {
|
|
||||||
let client = client.clone();
|
|
||||||
async move {
|
|
||||||
let channel = if let Ok(channel) = channel {
|
|
||||||
channel
|
|
||||||
} else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let client_addr = *channel.client_addr();
|
|
||||||
let handler = channel.respond_with(move |ctx, request| {
|
|
||||||
trace!("[{}/{}] Proxying request.", ctx.trace_id(), client_addr);
|
|
||||||
let mut client = client.clone();
|
|
||||||
async move { client.call(ctx, request).await }
|
|
||||||
});
|
|
||||||
tokio_executor::spawn(handler.unit_error().boxed().compat());
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
tokio_executor::spawn(proxy_server.unit_error().boxed().compat());
|
|
||||||
|
|
||||||
let mut config = client::Config::default();
|
|
||||||
config.max_in_flight_requests = 10;
|
|
||||||
config.pending_request_buffer = 10;
|
|
||||||
|
|
||||||
let client =
|
|
||||||
client::new::<String, String, _>(config, tarpc_bincode_transport::connect(&addr).await?)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// Make 3 speculative requests, returning only the quickest.
|
|
||||||
let mut clients: Vec<_> = (1..=3u32).map(|_| client.clone()).collect();
|
|
||||||
let mut requests = vec![];
|
|
||||||
for client in &mut clients {
|
|
||||||
let mut ctx = context::current();
|
|
||||||
ctx.deadline = SystemTime::now() + Duration::from_millis(200);
|
|
||||||
let trace_id = *ctx.trace_id();
|
|
||||||
let response = client.call(ctx, "ping".into());
|
|
||||||
requests.push(response.map(move |r| (trace_id, r)));
|
|
||||||
}
|
|
||||||
let (fastest_response, _) = requests
|
|
||||||
.into_iter()
|
|
||||||
.collect::<FuturesUnordered<_>>()
|
|
||||||
.into_future()
|
|
||||||
.await;
|
|
||||||
let (trace_id, resp) = fastest_response.unwrap();
|
|
||||||
info!("[{}] fastest_response = {:?}", trace_id, resp);
|
|
||||||
|
|
||||||
Ok::<_, io::Error>(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn cancel_slower() -> io::Result<()> {
|
|
||||||
env_logger::init();
|
|
||||||
rpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
|
|
||||||
tokio::run(run().boxed().map_err(|e| panic!(e)).compat());
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,119 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
//! Tests client/server control flow.
|
|
||||||
|
|
||||||
#![feature(async_await)]
|
|
||||||
|
|
||||||
use futures::{
|
|
||||||
compat::{Executor01CompatExt, Future01CompatExt},
|
|
||||||
prelude::*,
|
|
||||||
};
|
|
||||||
use log::{error, info, trace};
|
|
||||||
use rand::distributions::{Distribution, Normal};
|
|
||||||
use rpc::{client, context, server::Server};
|
|
||||||
use std::{
|
|
||||||
io,
|
|
||||||
time::{Duration, Instant, SystemTime},
|
|
||||||
};
|
|
||||||
use tokio::timer::Delay;
|
|
||||||
|
|
||||||
pub trait AsDuration {
|
|
||||||
/// Delay of 0 if self is in the past
|
|
||||||
fn as_duration(&self) -> Duration;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsDuration for SystemTime {
|
|
||||||
fn as_duration(&self) -> Duration {
|
|
||||||
self.duration_since(SystemTime::now()).unwrap_or_default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run() -> io::Result<()> {
|
|
||||||
let listener = tarpc_bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
|
||||||
let addr = listener.local_addr();
|
|
||||||
let server = Server::<String, String>::default()
|
|
||||||
.incoming(listener)
|
|
||||||
.take(1)
|
|
||||||
.for_each(async move |channel| {
|
|
||||||
let channel = if let Ok(channel) = channel {
|
|
||||||
channel
|
|
||||||
} else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let client_addr = *channel.client_addr();
|
|
||||||
let handler = channel.respond_with(move |ctx, request| {
|
|
||||||
// Sleep for a time sampled from a normal distribution with:
|
|
||||||
// - mean: 1/2 the deadline.
|
|
||||||
// - std dev: 1/2 the deadline.
|
|
||||||
let deadline: Duration = ctx.deadline.as_duration();
|
|
||||||
let deadline_millis = deadline.as_secs() * 1000 + deadline.subsec_millis() as u64;
|
|
||||||
let distribution =
|
|
||||||
Normal::new(deadline_millis as f64 / 2., deadline_millis as f64 / 2.);
|
|
||||||
let delay_millis = distribution.sample(&mut rand::thread_rng()).max(0.);
|
|
||||||
let delay = Duration::from_millis(delay_millis as u64);
|
|
||||||
|
|
||||||
trace!(
|
|
||||||
"[{}/{}] Responding to request in {:?}.",
|
|
||||||
ctx.trace_id(),
|
|
||||||
client_addr,
|
|
||||||
delay,
|
|
||||||
);
|
|
||||||
|
|
||||||
let sleep = Delay::new(Instant::now() + delay).compat();
|
|
||||||
async {
|
|
||||||
sleep.await.unwrap();
|
|
||||||
Ok(request)
|
|
||||||
}
|
|
||||||
});
|
|
||||||
tokio_executor::spawn(handler.unit_error().boxed().compat());
|
|
||||||
});
|
|
||||||
|
|
||||||
tokio_executor::spawn(server.unit_error().boxed().compat());
|
|
||||||
|
|
||||||
let mut config = client::Config::default();
|
|
||||||
config.max_in_flight_requests = 10;
|
|
||||||
config.pending_request_buffer = 10;
|
|
||||||
|
|
||||||
let conn = tarpc_bincode_transport::connect(&addr).await?;
|
|
||||||
let client = client::new::<String, String, _>(config, conn).await?;
|
|
||||||
|
|
||||||
let clients = (1..=100u32).map(|_| client.clone()).collect::<Vec<_>>();
|
|
||||||
for mut client in clients {
|
|
||||||
let ctx = context::current();
|
|
||||||
tokio_executor::spawn(
|
|
||||||
async move {
|
|
||||||
let trace_id = *ctx.trace_id();
|
|
||||||
let response = client.call(ctx, "ping".into());
|
|
||||||
match response.await {
|
|
||||||
Ok(response) => info!("[{}] response: {}", trace_id, response),
|
|
||||||
Err(e) => error!("[{}] request error: {:?}: {}", trace_id, e.kind(), e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.unit_error()
|
|
||||||
.boxed()
|
|
||||||
.compat(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn ping_pong() -> io::Result<()> {
|
|
||||||
env_logger::init();
|
|
||||||
rpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
|
|
||||||
tokio::run(
|
|
||||||
run()
|
|
||||||
.map_ok(|_| println!("done"))
|
|
||||||
.map_err(|e| panic!(e.to_string()))
|
|
||||||
.boxed()
|
|
||||||
.compat(),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -13,13 +13,14 @@ readme = "../README.md"
|
|||||||
description = "An example server built on tarpc."
|
description = "An example server built on tarpc."
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bincode-transport = { package = "tarpc-bincode-transport", version = "0.7", path = "../bincode-transport" }
|
|
||||||
clap = "2.0"
|
clap = "2.0"
|
||||||
futures-preview = { version = "0.3.0-alpha.16", features = ["compat"] }
|
futures = "0.3"
|
||||||
serde = { version = "1.0" }
|
serde = { version = "1.0" }
|
||||||
tarpc = { version = "0.18", path = "../tarpc", features = ["serde1"] }
|
tarpc = { version = "0.22", path = "../tarpc", features = ["full"] }
|
||||||
tokio = "0.1"
|
tokio = { version = "0.2", features = ["full"] }
|
||||||
tokio-executor = "0.1"
|
tokio-serde = { version = "0.6", features = ["json"] }
|
||||||
|
tokio-util = { version = "0.3", features = ["codec"] }
|
||||||
|
env_logger = "0.6"
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
name = "service"
|
name = "service"
|
||||||
|
|||||||
@@ -4,32 +4,15 @@
|
|||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://opensource.org/licenses/MIT.
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
#![feature(arbitrary_self_types, async_await)]
|
|
||||||
|
|
||||||
use clap::{App, Arg};
|
use clap::{App, Arg};
|
||||||
use futures::{compat::Executor01CompatExt, prelude::*};
|
|
||||||
use std::{io, net::SocketAddr};
|
use std::{io, net::SocketAddr};
|
||||||
use tarpc::{client, context};
|
use tarpc::{client, context};
|
||||||
|
use tokio_serde::formats::Json;
|
||||||
|
|
||||||
async fn run(server_addr: SocketAddr, name: String) -> io::Result<()> {
|
#[tokio::main]
|
||||||
let transport = bincode_transport::connect(&server_addr).await?;
|
async fn main() -> io::Result<()> {
|
||||||
|
env_logger::init();
|
||||||
|
|
||||||
// new_stub is generated by the service! macro. Like Server, it takes a config and any
|
|
||||||
// Transport as input, and returns a Client, also generated by the macro.
|
|
||||||
// by the service mcro.
|
|
||||||
let mut client = service::new_stub(client::Config::default(), transport).await?;
|
|
||||||
|
|
||||||
// The client has an RPC method for each RPC defined in service!. It takes the same args
|
|
||||||
// as defined, with the addition of a Context, which is always the first arg. The Context
|
|
||||||
// specifies a deadline and trace information which can be helpful in debugging requests.
|
|
||||||
let hello = client.hello(context::current(), name).await?;
|
|
||||||
|
|
||||||
println!("{}", hello);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let flags = App::new("Hello Client")
|
let flags = App::new("Hello Client")
|
||||||
.version("0.1")
|
.version("0.1")
|
||||||
.author("Tim <tikue@google.com>")
|
.author("Tim <tikue@google.com>")
|
||||||
@@ -53,21 +36,27 @@ fn main() {
|
|||||||
)
|
)
|
||||||
.get_matches();
|
.get_matches();
|
||||||
|
|
||||||
tarpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
|
|
||||||
let server_addr = flags.value_of("server_addr").unwrap();
|
let server_addr = flags.value_of("server_addr").unwrap();
|
||||||
let server_addr = server_addr
|
let server_addr = server_addr
|
||||||
.parse()
|
.parse::<SocketAddr>()
|
||||||
.unwrap_or_else(|e| panic!(r#"--server_addr value "{}" invalid: {}"#, server_addr, e));
|
.unwrap_or_else(|e| panic!(r#"--server_addr value "{}" invalid: {}"#, server_addr, e));
|
||||||
|
|
||||||
let name = flags.value_of("name").unwrap();
|
let name = flags.value_of("name").unwrap().into();
|
||||||
|
|
||||||
tarpc::init(tokio::executor::DefaultExecutor::current().compat());
|
let mut transport = tarpc::serde_transport::tcp::connect(server_addr, Json::default);
|
||||||
|
transport.config_mut().max_frame_length(4294967296);
|
||||||
|
|
||||||
tokio::run(
|
// WorldClient is generated by the service attribute. It has a constructor `new` that takes a
|
||||||
run(server_addr, name.into())
|
// config and any Transport as input.
|
||||||
.map_err(|e| eprintln!("Oh no: {}", e))
|
let mut client =
|
||||||
.boxed()
|
service::WorldClient::new(client::Config::default(), transport.await?).spawn()?;
|
||||||
.compat(),
|
|
||||||
);
|
// The client has an RPC method for each RPC defined in the annotated trait. It takes the same
|
||||||
|
// args as defined, with the addition of a Context, which is always the first arg. The Context
|
||||||
|
// specifies a deadline and trace information which can be helpful in debugging requests.
|
||||||
|
let hello = client.hello(context::current(), name).await?;
|
||||||
|
|
||||||
|
println!("{}", hello);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,11 +4,10 @@
|
|||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://opensource.org/licenses/MIT.
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
#![feature(arbitrary_self_types, async_await, proc_macro_hygiene)]
|
/// This is the service definition. It looks a lot like a trait definition.
|
||||||
|
/// It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
// This is the service definition. It looks a lot like a trait definition.
|
#[tarpc::service]
|
||||||
// It defines one RPC, hello, which takes one arg, name, and returns a String.
|
pub trait World {
|
||||||
tarpc::service! {
|
|
||||||
/// Returns a greeting for name.
|
/// Returns a greeting for name.
|
||||||
rpc hello(name: String) -> String;
|
async fn hello(name: String) -> String;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,55 +4,35 @@
|
|||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://opensource.org/licenses/MIT.
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
#![feature(arbitrary_self_types, async_await)]
|
|
||||||
|
|
||||||
use clap::{App, Arg};
|
use clap::{App, Arg};
|
||||||
use futures::{
|
use futures::{future, prelude::*};
|
||||||
compat::Executor01CompatExt,
|
use service::World;
|
||||||
future::{self, Ready},
|
use std::{
|
||||||
prelude::*,
|
io,
|
||||||
|
net::{IpAddr, SocketAddr},
|
||||||
};
|
};
|
||||||
use std::{io, net::SocketAddr};
|
|
||||||
use tarpc::{
|
use tarpc::{
|
||||||
context,
|
context,
|
||||||
server::{Handler, Server},
|
server::{self, Channel, Handler},
|
||||||
};
|
};
|
||||||
|
use tokio_serde::formats::Json;
|
||||||
|
|
||||||
// This is the type that implements the generated Service trait. It is the business logic
|
// This is the type that implements the generated World trait. It is the business logic
|
||||||
// and is used to start the server.
|
// and is used to start the server.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct HelloServer;
|
struct HelloServer(SocketAddr);
|
||||||
|
|
||||||
impl service::Service for HelloServer {
|
#[tarpc::server]
|
||||||
// Each defined rpc generates two items in the trait, a fn that serves the RPC, and
|
impl World for HelloServer {
|
||||||
// an associated type representing the future output by the fn.
|
async fn hello(self, _: context::Context, name: String) -> String {
|
||||||
|
format!("Hello, {}! You are connected from {:?}.", name, self.0)
|
||||||
type HelloFut = Ready<String>;
|
|
||||||
|
|
||||||
fn hello(self, _: context::Context, name: String) -> Self::HelloFut {
|
|
||||||
future::ready(format!("Hello, {}!", name))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run(server_addr: SocketAddr) -> io::Result<()> {
|
#[tokio::main]
|
||||||
// bincode_transport is provided by the associated crate bincode-transport. It makes it easy
|
async fn main() -> io::Result<()> {
|
||||||
// to start up a serde-powered bincode serialization strategy over TCP.
|
env_logger::init();
|
||||||
let transport = bincode_transport::listen(&server_addr)?;
|
|
||||||
|
|
||||||
// The server is configured with the defaults.
|
|
||||||
let server = Server::default()
|
|
||||||
// Server can listen on any type that implements the Transport trait.
|
|
||||||
.incoming(transport)
|
|
||||||
// serve is generated by the service! macro. It takes as input any type implementing
|
|
||||||
// the generated Service trait.
|
|
||||||
.respond_with(service::serve(HelloServer));
|
|
||||||
|
|
||||||
server.await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let flags = App::new("Hello Server")
|
let flags = App::new("Hello Server")
|
||||||
.version("0.1")
|
.version("0.1")
|
||||||
.author("Tim <tikue@google.com>")
|
.author("Tim <tikue@google.com>")
|
||||||
@@ -73,12 +53,28 @@ fn main() {
|
|||||||
.parse()
|
.parse()
|
||||||
.unwrap_or_else(|e| panic!(r#"--port value "{}" invalid: {}"#, port, e));
|
.unwrap_or_else(|e| panic!(r#"--port value "{}" invalid: {}"#, port, e));
|
||||||
|
|
||||||
tarpc::init(tokio::executor::DefaultExecutor::current().compat());
|
let server_addr = (IpAddr::from([0, 0, 0, 0]), port);
|
||||||
|
|
||||||
tokio::run(
|
// JSON transport is provided by the json_transport tarpc module. It makes it easy
|
||||||
run(([0, 0, 0, 0], port).into())
|
// to start up a serde-powered json serialization strategy over TCP.
|
||||||
.map_err(|e| eprintln!("Oh no: {}", e))
|
let mut listener = tarpc::serde_transport::tcp::listen(&server_addr, Json::default).await?;
|
||||||
.boxed()
|
listener.config_mut().max_frame_length(4294967296);
|
||||||
.compat(),
|
listener
|
||||||
);
|
// Ignore accept errors.
|
||||||
|
.filter_map(|r| future::ready(r.ok()))
|
||||||
|
.map(server::BaseChannel::with_defaults)
|
||||||
|
// Limit channels to 1 per IP.
|
||||||
|
.max_channels_per_key(1, |t| t.as_ref().peer_addr().unwrap().ip())
|
||||||
|
// serve is generated by the service attribute. It takes as input any type implementing
|
||||||
|
// the generated World trait.
|
||||||
|
.map(|channel| {
|
||||||
|
let server = HelloServer(channel.as_ref().as_ref().peer_addr().unwrap());
|
||||||
|
channel.respond_with(server.serve()).execute()
|
||||||
|
})
|
||||||
|
// Max 10 channels.
|
||||||
|
.buffer_unordered(10)
|
||||||
|
.for_each(|_| async {})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ diff=""
|
|||||||
for file in $(git diff --name-only --cached);
|
for file in $(git diff --name-only --cached);
|
||||||
do
|
do
|
||||||
if [ ${file: -3} == ".rs" ]; then
|
if [ ${file: -3} == ".rs" ]; then
|
||||||
diff="$diff$(cargo fmt -- --skip-children --write-mode=diff $file)"
|
diff="$diff$(cargo fmt -- --unstable-features --skip-children --check $file)"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
if grep --quiet "^[-+]" <<< "$diff"; then
|
if grep --quiet "^[-+]" <<< "$diff"; then
|
||||||
|
|||||||
@@ -89,9 +89,13 @@ if [ "$?" == 0 ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
try_run "Building ... " cargo build --color=always
|
try_run "Building ... " cargo +stable build --color=always
|
||||||
try_run "Testing ... " cargo test --color=always
|
try_run "Testing ... " cargo +stable test --color=always
|
||||||
try_run "Doc Test ... " cargo clean && cargo build --tests && rustdoc --test README.md --edition 2018 -L target/debug/deps -Z unstable-options
|
try_run "Testing with all features enabled ... " cargo +stable test --all-features --color=always
|
||||||
|
for EXAMPLE in $(cargo +stable run --example 2>&1 | grep ' ' | awk '{print $1}')
|
||||||
|
do
|
||||||
|
try_run "Running example \"$EXAMPLE\" ... " cargo +stable run --example $EXAMPLE
|
||||||
|
done
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tarpc-plugins"
|
name = "tarpc-plugins"
|
||||||
version = "0.5.1"
|
version = "0.8.0"
|
||||||
authors = ["Adam Wright <adam.austin.wright@gmail.com>", "Tim Kuehn <timothy.j.kuehn@gmail.com>"]
|
authors = ["Adam Wright <adam.austin.wright@gmail.com>", "Tim Kuehn <timothy.j.kuehn@gmail.com>"]
|
||||||
|
edition = "2018"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
documentation = "https://docs.rs/tarpc-plugins"
|
documentation = "https://docs.rs/tarpc-plugins"
|
||||||
homepage = "https://github.com/google/tarpc"
|
homepage = "https://github.com/google/tarpc"
|
||||||
@@ -11,14 +12,22 @@ categories = ["asynchronous", "network-programming"]
|
|||||||
readme = "../README.md"
|
readme = "../README.md"
|
||||||
description = "Proc macros for tarpc."
|
description = "Proc macros for tarpc."
|
||||||
|
|
||||||
|
[features]
|
||||||
|
serde1 = []
|
||||||
|
|
||||||
[badges]
|
[badges]
|
||||||
travis-ci = { repository = "google/tarpc" }
|
travis-ci = { repository = "google/tarpc" }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
itertools = "0.8"
|
syn = { version = "1.0.11", features = ["full"] }
|
||||||
syn = { version = "0.15", features = ["full", "extra-traits"] }
|
quote = "1.0.2"
|
||||||
quote = "0.6"
|
proc-macro2 = "1.0.6"
|
||||||
proc-macro2 = "0.4"
|
|
||||||
|
|
||||||
[lib]
|
[lib]
|
||||||
proc-macro = true
|
proc-macro = true
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
futures = "0.3"
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
tarpc = { path = "../tarpc" }
|
||||||
|
assert-type-eq = "0.1.0"
|
||||||
|
|||||||
@@ -4,87 +4,801 @@
|
|||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://opensource.org/licenses/MIT.
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
extern crate itertools;
|
#![recursion_limit = "512"]
|
||||||
|
|
||||||
extern crate proc_macro;
|
extern crate proc_macro;
|
||||||
extern crate proc_macro2;
|
extern crate proc_macro2;
|
||||||
extern crate quote;
|
extern crate quote;
|
||||||
extern crate syn;
|
extern crate syn;
|
||||||
|
|
||||||
use proc_macro::TokenStream;
|
use proc_macro::TokenStream;
|
||||||
|
use proc_macro2::{Span, TokenStream as TokenStream2};
|
||||||
|
use quote::{format_ident, quote, ToTokens};
|
||||||
|
use syn::{
|
||||||
|
braced,
|
||||||
|
ext::IdentExt,
|
||||||
|
parenthesized,
|
||||||
|
parse::{Parse, ParseStream},
|
||||||
|
parse_macro_input, parse_quote, parse_str,
|
||||||
|
spanned::Spanned,
|
||||||
|
token::Comma,
|
||||||
|
Attribute, FnArg, Ident, ImplItem, ImplItemMethod, ImplItemType, ItemImpl, Lit, LitBool,
|
||||||
|
MetaNameValue, Pat, PatType, ReturnType, Token, Type, Visibility,
|
||||||
|
};
|
||||||
|
|
||||||
use itertools::Itertools;
|
/// Accumulates multiple errors into a result.
|
||||||
use proc_macro2::Span;
|
/// Only use this for recoverable errors, i.e. non-parse errors. Fatal errors should early exit to
|
||||||
use quote::ToTokens;
|
/// avoid further complications.
|
||||||
use std::str::FromStr;
|
macro_rules! extend_errors {
|
||||||
use syn::{parse, Ident, TraitItemType, TypePath};
|
($errors: ident, $e: expr) => {
|
||||||
|
match $errors {
|
||||||
|
Ok(_) => $errors = Err($e),
|
||||||
|
Err(ref mut errors) => errors.extend($e),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
#[proc_macro]
|
struct Service {
|
||||||
pub fn snake_to_camel(input: TokenStream) -> TokenStream {
|
attrs: Vec<Attribute>,
|
||||||
let i = input.clone();
|
vis: Visibility,
|
||||||
let mut assoc_type = parse::<TraitItemType>(input)
|
ident: Ident,
|
||||||
.unwrap_or_else(|_| panic!("Could not parse trait item from:\n{}", i));
|
rpcs: Vec<RpcMethod>,
|
||||||
|
}
|
||||||
|
|
||||||
let old_ident = convert(&mut assoc_type.ident);
|
struct RpcMethod {
|
||||||
|
attrs: Vec<Attribute>,
|
||||||
|
ident: Ident,
|
||||||
|
args: Vec<PatType>,
|
||||||
|
output: ReturnType,
|
||||||
|
}
|
||||||
|
|
||||||
for mut attr in &mut assoc_type.attrs {
|
impl Parse for Service {
|
||||||
if let Some(pair) = attr.path.segments.first() {
|
fn parse(input: ParseStream) -> syn::Result<Self> {
|
||||||
if pair.value().ident == "doc" {
|
let attrs = input.call(Attribute::parse_outer)?;
|
||||||
attr.tts = proc_macro2::TokenStream::from_str(
|
let vis = input.parse()?;
|
||||||
&attr.tts.to_string().replace("{}", &old_ident),
|
input.parse::<Token![trait]>()?;
|
||||||
)
|
let ident: Ident = input.parse()?;
|
||||||
.unwrap();
|
let content;
|
||||||
|
braced!(content in input);
|
||||||
|
let mut rpcs = Vec::<RpcMethod>::new();
|
||||||
|
while !content.is_empty() {
|
||||||
|
rpcs.push(content.parse()?);
|
||||||
|
}
|
||||||
|
let mut ident_errors = Ok(());
|
||||||
|
for rpc in &rpcs {
|
||||||
|
if rpc.ident == "new" {
|
||||||
|
extend_errors!(
|
||||||
|
ident_errors,
|
||||||
|
syn::Error::new(
|
||||||
|
rpc.ident.span(),
|
||||||
|
format!(
|
||||||
|
"method name conflicts with generated fn `{}Client::new`",
|
||||||
|
ident.unraw()
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
if rpc.ident == "serve" {
|
||||||
|
extend_errors!(
|
||||||
|
ident_errors,
|
||||||
|
syn::Error::new(
|
||||||
|
rpc.ident.span(),
|
||||||
|
format!("method name conflicts with generated fn `{}::serve`", ident)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ident_errors?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
attrs,
|
||||||
|
vis,
|
||||||
|
ident,
|
||||||
|
rpcs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Parse for RpcMethod {
|
||||||
|
fn parse(input: ParseStream) -> syn::Result<Self> {
|
||||||
|
let attrs = input.call(Attribute::parse_outer)?;
|
||||||
|
input.parse::<Token![async]>()?;
|
||||||
|
input.parse::<Token![fn]>()?;
|
||||||
|
let ident = input.parse()?;
|
||||||
|
let content;
|
||||||
|
parenthesized!(content in input);
|
||||||
|
let mut args = Vec::new();
|
||||||
|
let mut errors = Ok(());
|
||||||
|
for arg in content.parse_terminated::<FnArg, Comma>(FnArg::parse)? {
|
||||||
|
match arg {
|
||||||
|
FnArg::Typed(captured) if matches!(&*captured.pat, Pat::Ident(_)) => {
|
||||||
|
args.push(captured);
|
||||||
|
}
|
||||||
|
FnArg::Typed(captured) => {
|
||||||
|
extend_errors!(
|
||||||
|
errors,
|
||||||
|
syn::Error::new(captured.pat.span(), "patterns aren't allowed in RPC args")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
FnArg::Receiver(_) => {
|
||||||
|
extend_errors!(
|
||||||
|
errors,
|
||||||
|
syn::Error::new(arg.span(), "method args cannot start with self")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
errors?;
|
||||||
|
let output = input.parse()?;
|
||||||
|
input.parse::<Token![;]>()?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
attrs,
|
||||||
|
ident,
|
||||||
|
args,
|
||||||
|
output,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If `derive_serde` meta item is not present, defaults to cfg!(feature = "serde1").
|
||||||
|
// `derive_serde` can only be true when serde1 is enabled.
|
||||||
|
struct DeriveSerde(bool);
|
||||||
|
|
||||||
|
impl Parse for DeriveSerde {
|
||||||
|
fn parse(input: ParseStream) -> syn::Result<Self> {
|
||||||
|
let mut result = Ok(None);
|
||||||
|
let mut derive_serde = Vec::new();
|
||||||
|
let meta_items = input.parse_terminated::<MetaNameValue, Comma>(MetaNameValue::parse)?;
|
||||||
|
for meta in meta_items {
|
||||||
|
if meta.path.segments.len() != 1 {
|
||||||
|
extend_errors!(
|
||||||
|
result,
|
||||||
|
syn::Error::new(
|
||||||
|
meta.span(),
|
||||||
|
"tarpc::service does not support this meta item"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let segment = meta.path.segments.first().unwrap();
|
||||||
|
if segment.ident != "derive_serde" {
|
||||||
|
extend_errors!(
|
||||||
|
result,
|
||||||
|
syn::Error::new(
|
||||||
|
meta.span(),
|
||||||
|
"tarpc::service does not support this meta item"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
match meta.lit {
|
||||||
|
Lit::Bool(LitBool { value: true, .. }) if cfg!(feature = "serde1") => {
|
||||||
|
result = result.and(Ok(Some(true)))
|
||||||
|
}
|
||||||
|
Lit::Bool(LitBool { value: true, .. }) => {
|
||||||
|
extend_errors!(
|
||||||
|
result,
|
||||||
|
syn::Error::new(
|
||||||
|
meta.span(),
|
||||||
|
"To enable serde, first enable the `serde1` feature of tarpc"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Lit::Bool(LitBool { value: false, .. }) => result = result.and(Ok(Some(false))),
|
||||||
|
_ => extend_errors!(
|
||||||
|
result,
|
||||||
|
syn::Error::new(
|
||||||
|
meta.lit.span(),
|
||||||
|
"`derive_serde` expects a value of type `bool`"
|
||||||
|
)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
derive_serde.push(meta);
|
||||||
|
}
|
||||||
|
if derive_serde.len() > 1 {
|
||||||
|
for (i, derive_serde) in derive_serde.iter().enumerate() {
|
||||||
|
extend_errors!(
|
||||||
|
result,
|
||||||
|
syn::Error::new(
|
||||||
|
derive_serde.span(),
|
||||||
|
format!(
|
||||||
|
"`derive_serde` appears more than once (occurrence #{})",
|
||||||
|
i + 1
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let derive_serde = result?.unwrap_or(cfg!(feature = "serde1"));
|
||||||
|
Ok(Self(derive_serde))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generates:
|
||||||
|
/// - service trait
|
||||||
|
/// - serve fn
|
||||||
|
/// - client stub struct
|
||||||
|
/// - new_stub client factory fn
|
||||||
|
/// - Request and Response enums
|
||||||
|
/// - ResponseFut Future
|
||||||
|
#[proc_macro_attribute]
|
||||||
|
pub fn service(attr: TokenStream, input: TokenStream) -> TokenStream {
|
||||||
|
let derive_serde = parse_macro_input!(attr as DeriveSerde);
|
||||||
|
let unit_type: &Type = &parse_quote!(());
|
||||||
|
let Service {
|
||||||
|
ref attrs,
|
||||||
|
ref vis,
|
||||||
|
ref ident,
|
||||||
|
ref rpcs,
|
||||||
|
} = parse_macro_input!(input as Service);
|
||||||
|
|
||||||
|
let camel_case_fn_names: &Vec<_> = &rpcs
|
||||||
|
.iter()
|
||||||
|
.map(|rpc| snake_to_camel(&rpc.ident.unraw().to_string()))
|
||||||
|
.collect();
|
||||||
|
let args: &[&[PatType]] = &rpcs.iter().map(|rpc| &*rpc.args).collect::<Vec<_>>();
|
||||||
|
let response_fut_name = &format!("{}ResponseFut", ident.unraw());
|
||||||
|
let derive_serialize = if derive_serde.0 {
|
||||||
|
Some(quote!(#[derive(serde::Serialize, serde::Deserialize)]))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
ServiceGenerator {
|
||||||
|
response_fut_name,
|
||||||
|
service_ident: ident,
|
||||||
|
server_ident: &format_ident!("Serve{}", ident),
|
||||||
|
response_fut_ident: &Ident::new(&response_fut_name, ident.span()),
|
||||||
|
client_ident: &format_ident!("{}Client", ident),
|
||||||
|
request_ident: &format_ident!("{}Request", ident),
|
||||||
|
response_ident: &format_ident!("{}Response", ident),
|
||||||
|
vis,
|
||||||
|
args,
|
||||||
|
method_attrs: &rpcs.iter().map(|rpc| &*rpc.attrs).collect::<Vec<_>>(),
|
||||||
|
method_idents: &rpcs.iter().map(|rpc| &rpc.ident).collect::<Vec<_>>(),
|
||||||
|
attrs,
|
||||||
|
rpcs,
|
||||||
|
return_types: &rpcs
|
||||||
|
.iter()
|
||||||
|
.map(|rpc| match rpc.output {
|
||||||
|
ReturnType::Type(_, ref ty) => ty,
|
||||||
|
ReturnType::Default => unit_type,
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
arg_pats: &args
|
||||||
|
.iter()
|
||||||
|
.map(|args| args.iter().map(|arg| &*arg.pat).collect())
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
camel_case_idents: &rpcs
|
||||||
|
.iter()
|
||||||
|
.zip(camel_case_fn_names.iter())
|
||||||
|
.map(|(rpc, name)| Ident::new(name, rpc.ident.span()))
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
future_types: &camel_case_fn_names
|
||||||
|
.iter()
|
||||||
|
.map(|name| parse_str(&format!("{}Fut", name)).unwrap())
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
derive_serialize: derive_serialize.as_ref(),
|
||||||
|
}
|
||||||
|
.into_token_stream()
|
||||||
|
.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// generate an identifier consisting of the method name to CamelCase with
|
||||||
|
/// Fut appended to it.
|
||||||
|
fn associated_type_for_rpc(method: &ImplItemMethod) -> String {
|
||||||
|
snake_to_camel(&method.sig.ident.unraw().to_string()) + "Fut"
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Transforms an async function into a sync one, returning a type declaration
|
||||||
|
/// for the return type (a future).
|
||||||
|
fn transform_method(method: &mut ImplItemMethod) -> ImplItemType {
|
||||||
|
method.sig.asyncness = None;
|
||||||
|
|
||||||
|
// get either the return type or ().
|
||||||
|
let ret = match &method.sig.output {
|
||||||
|
ReturnType::Default => quote!(()),
|
||||||
|
ReturnType::Type(_, ret) => quote!(#ret),
|
||||||
|
};
|
||||||
|
|
||||||
|
let fut_name = associated_type_for_rpc(method);
|
||||||
|
let fut_name_ident = Ident::new(&fut_name, method.sig.ident.span());
|
||||||
|
|
||||||
|
// generate the updated return signature.
|
||||||
|
method.sig.output = parse_quote! {
|
||||||
|
-> ::core::pin::Pin<Box<
|
||||||
|
dyn ::core::future::Future<Output = #ret> + ::core::marker::Send
|
||||||
|
>>
|
||||||
|
};
|
||||||
|
|
||||||
|
// transform the body of the method into Box::pin(async move { body }).
|
||||||
|
let block = method.block.clone();
|
||||||
|
method.block = parse_quote! [{
|
||||||
|
Box::pin(async move
|
||||||
|
#block
|
||||||
|
)
|
||||||
|
}];
|
||||||
|
|
||||||
|
// generate and return type declaration for return type.
|
||||||
|
let t: ImplItemType = parse_quote! {
|
||||||
|
type #fut_name_ident = ::core::pin::Pin<Box<dyn ::core::future::Future<Output = #ret> + ::core::marker::Send>>;
|
||||||
|
};
|
||||||
|
|
||||||
|
t
|
||||||
|
}
|
||||||
|
|
||||||
|
#[proc_macro_attribute]
|
||||||
|
pub fn server(_attr: TokenStream, input: TokenStream) -> TokenStream {
|
||||||
|
let mut item = syn::parse_macro_input!(input as ItemImpl);
|
||||||
|
let span = item.span();
|
||||||
|
|
||||||
|
// the generated type declarations
|
||||||
|
let mut types: Vec<ImplItemType> = Vec::new();
|
||||||
|
let mut expected_non_async_types: Vec<(&ImplItemMethod, String)> = Vec::new();
|
||||||
|
let mut found_non_async_types: Vec<&ImplItemType> = Vec::new();
|
||||||
|
|
||||||
|
for inner in &mut item.items {
|
||||||
|
match inner {
|
||||||
|
ImplItem::Method(method) => {
|
||||||
|
if method.sig.asyncness.is_some() {
|
||||||
|
// if this function is declared async, transform it into a regular function
|
||||||
|
let typedecl = transform_method(method);
|
||||||
|
types.push(typedecl);
|
||||||
|
} else {
|
||||||
|
// If it's not async, keep track of all required associated types for better
|
||||||
|
// error reporting.
|
||||||
|
expected_non_async_types.push((method, associated_type_for_rpc(method)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ImplItem::Type(typedecl) => found_non_async_types.push(typedecl),
|
||||||
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assoc_type.into_token_stream().into()
|
if let Err(e) =
|
||||||
}
|
verify_types_were_provided(span, &expected_non_async_types, &found_non_async_types)
|
||||||
|
|
||||||
#[proc_macro]
|
|
||||||
pub fn ty_snake_to_camel(input: TokenStream) -> TokenStream {
|
|
||||||
let mut path = parse::<TypePath>(input).unwrap();
|
|
||||||
|
|
||||||
// Only capitalize the final segment
|
|
||||||
convert(&mut path.path.segments.last_mut().unwrap().into_value().ident);
|
|
||||||
|
|
||||||
path.into_token_stream().into()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Converts an ident in-place to CamelCase and returns the previous ident.
|
|
||||||
fn convert(ident: &mut Ident) -> String {
|
|
||||||
let ident_str = ident.to_string();
|
|
||||||
let mut camel_ty = String::new();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
// Find the first non-underscore and add it capitalized.
|
return TokenStream::from(e.to_compile_error());
|
||||||
let mut chars = ident_str.chars();
|
}
|
||||||
|
|
||||||
// Find the first non-underscore char, uppercase it, and append it.
|
// add the type declarations into the impl block
|
||||||
// Guaranteed to succeed because all idents must have at least one non-underscore char.
|
for t in types.into_iter() {
|
||||||
camel_ty.extend(chars.find(|&c| c != '_').unwrap().to_uppercase());
|
item.items.push(syn::ImplItem::Type(t));
|
||||||
|
}
|
||||||
|
|
||||||
// When we find an underscore, we remove it and capitalize the next char. To do this,
|
TokenStream::from(quote!(#item))
|
||||||
// we need to ensure the next char is not another underscore.
|
}
|
||||||
let mut chars = chars.coalesce(|c1, c2| {
|
|
||||||
if c1 == '_' && c2 == '_' {
|
fn verify_types_were_provided(
|
||||||
Ok(c1)
|
span: Span,
|
||||||
} else {
|
expected: &[(&ImplItemMethod, String)],
|
||||||
Err((c1, c2))
|
provided: &[&ImplItemType],
|
||||||
|
) -> syn::Result<()> {
|
||||||
|
let mut result = Ok(());
|
||||||
|
for (method, expected) in expected {
|
||||||
|
if provided
|
||||||
|
.iter()
|
||||||
|
.find(|typedecl| typedecl.ident == expected)
|
||||||
|
.is_none()
|
||||||
|
{
|
||||||
|
let mut e = syn::Error::new(
|
||||||
|
span,
|
||||||
|
format!("not all trait items implemented, missing: `{}`", expected),
|
||||||
|
);
|
||||||
|
let fn_span = method.sig.fn_token.span();
|
||||||
|
e.extend(syn::Error::new(
|
||||||
|
fn_span.join(method.sig.ident.span()).unwrap_or(fn_span),
|
||||||
|
format!(
|
||||||
|
"hint: `#[tarpc::server]` only rewrites async fns, and `fn {}` is not async",
|
||||||
|
method.sig.ident
|
||||||
|
),
|
||||||
|
));
|
||||||
|
match result {
|
||||||
|
Ok(_) => result = Err(e),
|
||||||
|
Err(ref mut error) => error.extend(Some(e)),
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
while let Some(c) = chars.next() {
|
// Things needed to generate the service items: trait, serve impl, request/response enums, and
|
||||||
if c != '_' {
|
// the client stub.
|
||||||
camel_ty.push(c);
|
struct ServiceGenerator<'a> {
|
||||||
} else if let Some(c) = chars.next() {
|
service_ident: &'a Ident,
|
||||||
camel_ty.extend(c.to_uppercase());
|
server_ident: &'a Ident,
|
||||||
|
response_fut_ident: &'a Ident,
|
||||||
|
response_fut_name: &'a str,
|
||||||
|
client_ident: &'a Ident,
|
||||||
|
request_ident: &'a Ident,
|
||||||
|
response_ident: &'a Ident,
|
||||||
|
vis: &'a Visibility,
|
||||||
|
attrs: &'a [Attribute],
|
||||||
|
rpcs: &'a [RpcMethod],
|
||||||
|
camel_case_idents: &'a [Ident],
|
||||||
|
future_types: &'a [Type],
|
||||||
|
method_idents: &'a [&'a Ident],
|
||||||
|
method_attrs: &'a [&'a [Attribute]],
|
||||||
|
args: &'a [&'a [PatType]],
|
||||||
|
return_types: &'a [&'a Type],
|
||||||
|
arg_pats: &'a [Vec<&'a Pat>],
|
||||||
|
derive_serialize: Option<&'a TokenStream2>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ServiceGenerator<'a> {
|
||||||
|
fn trait_service(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
attrs,
|
||||||
|
rpcs,
|
||||||
|
vis,
|
||||||
|
future_types,
|
||||||
|
return_types,
|
||||||
|
service_ident,
|
||||||
|
server_ident,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
let types_and_fns = rpcs
|
||||||
|
.iter()
|
||||||
|
.zip(future_types.iter())
|
||||||
|
.zip(return_types.iter())
|
||||||
|
.map(
|
||||||
|
|(
|
||||||
|
(
|
||||||
|
RpcMethod {
|
||||||
|
attrs, ident, args, ..
|
||||||
|
},
|
||||||
|
future_type,
|
||||||
|
),
|
||||||
|
output,
|
||||||
|
)| {
|
||||||
|
let ty_doc = format!("The response future returned by {}.", ident);
|
||||||
|
quote! {
|
||||||
|
#[doc = #ty_doc]
|
||||||
|
type #future_type: std::future::Future<Output = #output>;
|
||||||
|
|
||||||
|
#( #attrs )*
|
||||||
|
fn #ident(self, context: tarpc::context::Context, #( #args ),*) -> Self::#future_type;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
#( #attrs )*
|
||||||
|
#vis trait #service_ident: Clone {
|
||||||
|
#( #types_and_fns )*
|
||||||
|
|
||||||
|
/// Returns a serving function to use with [tarpc::server::Channel::respond_with].
|
||||||
|
fn serve(self) -> #server_ident<Self> {
|
||||||
|
#server_ident { service: self }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The Fut suffix is hardcoded right now; this macro isn't really meant to be general-purpose.
|
fn struct_server(&self) -> TokenStream2 {
|
||||||
camel_ty.push_str("Fut");
|
let &Self {
|
||||||
|
vis, server_ident, ..
|
||||||
|
} = self;
|
||||||
|
|
||||||
*ident = Ident::new(&camel_ty, Span::call_site());
|
quote! {
|
||||||
ident_str
|
/// A serving function to use with [tarpc::server::Channel::respond_with].
|
||||||
|
#[derive(Clone)]
|
||||||
|
#vis struct #server_ident<S> {
|
||||||
|
service: S,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_serve_for_server(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
request_ident,
|
||||||
|
server_ident,
|
||||||
|
service_ident,
|
||||||
|
response_ident,
|
||||||
|
response_fut_ident,
|
||||||
|
camel_case_idents,
|
||||||
|
arg_pats,
|
||||||
|
method_idents,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
impl<S> tarpc::server::Serve<#request_ident> for #server_ident<S>
|
||||||
|
where S: #service_ident
|
||||||
|
{
|
||||||
|
type Resp = #response_ident;
|
||||||
|
type Fut = #response_fut_ident<S>;
|
||||||
|
|
||||||
|
fn serve(self, ctx: tarpc::context::Context, req: #request_ident) -> Self::Fut {
|
||||||
|
match req {
|
||||||
|
#(
|
||||||
|
#request_ident::#camel_case_idents{ #( #arg_pats ),* } => {
|
||||||
|
#response_fut_ident::#camel_case_idents(
|
||||||
|
#service_ident::#method_idents(
|
||||||
|
self.service, ctx, #( #arg_pats ),*
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn enum_request(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
derive_serialize,
|
||||||
|
vis,
|
||||||
|
request_ident,
|
||||||
|
camel_case_idents,
|
||||||
|
args,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
/// The request sent over the wire from the client to the server.
|
||||||
|
#[derive(Debug)]
|
||||||
|
#derive_serialize
|
||||||
|
#vis enum #request_ident {
|
||||||
|
#( #camel_case_idents{ #( #args ),* } ),*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn enum_response(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
derive_serialize,
|
||||||
|
vis,
|
||||||
|
response_ident,
|
||||||
|
camel_case_idents,
|
||||||
|
return_types,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
/// The response sent over the wire from the server to the client.
|
||||||
|
#[derive(Debug)]
|
||||||
|
#derive_serialize
|
||||||
|
#vis enum #response_ident {
|
||||||
|
#( #camel_case_idents(#return_types) ),*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn enum_response_future(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
vis,
|
||||||
|
service_ident,
|
||||||
|
response_fut_ident,
|
||||||
|
camel_case_idents,
|
||||||
|
future_types,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
/// A future resolving to a server response.
|
||||||
|
#vis enum #response_fut_ident<S: #service_ident> {
|
||||||
|
#( #camel_case_idents(<S as #service_ident>::#future_types) ),*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_debug_for_response_future(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
service_ident,
|
||||||
|
response_fut_ident,
|
||||||
|
response_fut_name,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
impl<S: #service_ident> std::fmt::Debug for #response_fut_ident<S> {
|
||||||
|
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
fmt.debug_struct(#response_fut_name).finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_future_for_response_future(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
service_ident,
|
||||||
|
response_fut_ident,
|
||||||
|
response_ident,
|
||||||
|
camel_case_idents,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
impl<S: #service_ident> std::future::Future for #response_fut_ident<S> {
|
||||||
|
type Output = #response_ident;
|
||||||
|
|
||||||
|
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>)
|
||||||
|
-> std::task::Poll<#response_ident>
|
||||||
|
{
|
||||||
|
unsafe {
|
||||||
|
match std::pin::Pin::get_unchecked_mut(self) {
|
||||||
|
#(
|
||||||
|
#response_fut_ident::#camel_case_idents(resp) =>
|
||||||
|
std::pin::Pin::new_unchecked(resp)
|
||||||
|
.poll(cx)
|
||||||
|
.map(#response_ident::#camel_case_idents),
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn struct_client(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
vis,
|
||||||
|
client_ident,
|
||||||
|
request_ident,
|
||||||
|
response_ident,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
#[allow(unused)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
/// The client stub that makes RPC calls to the server. Exposes a Future interface.
|
||||||
|
#vis struct #client_ident<C = tarpc::client::Channel<#request_ident, #response_ident>>(C);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_from_for_client(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
client_ident,
|
||||||
|
request_ident,
|
||||||
|
response_ident,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
impl<C> From<C> for #client_ident<C>
|
||||||
|
where for <'a> C: tarpc::Client<'a, #request_ident, Response = #response_ident>
|
||||||
|
{
|
||||||
|
fn from(client: C) -> Self {
|
||||||
|
#client_ident(client)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_client_new(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
client_ident,
|
||||||
|
vis,
|
||||||
|
request_ident,
|
||||||
|
response_ident,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
impl #client_ident {
|
||||||
|
/// Returns a new client stub that sends requests over the given transport.
|
||||||
|
#vis fn new<T>(config: tarpc::client::Config, transport: T)
|
||||||
|
-> tarpc::client::NewClient<
|
||||||
|
Self,
|
||||||
|
tarpc::client::channel::RequestDispatch<#request_ident, #response_ident, T>
|
||||||
|
>
|
||||||
|
where
|
||||||
|
T: tarpc::Transport<tarpc::ClientMessage<#request_ident>, tarpc::Response<#response_ident>>
|
||||||
|
{
|
||||||
|
let new_client = tarpc::client::new(config, transport);
|
||||||
|
tarpc::client::NewClient {
|
||||||
|
client: #client_ident(new_client.client),
|
||||||
|
dispatch: new_client.dispatch,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_client_rpc_methods(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
client_ident,
|
||||||
|
request_ident,
|
||||||
|
response_ident,
|
||||||
|
method_attrs,
|
||||||
|
vis,
|
||||||
|
method_idents,
|
||||||
|
args,
|
||||||
|
return_types,
|
||||||
|
arg_pats,
|
||||||
|
camel_case_idents,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
impl<C> #client_ident<C>
|
||||||
|
where for<'a> C: tarpc::Client<'a, #request_ident, Response = #response_ident>
|
||||||
|
{
|
||||||
|
#(
|
||||||
|
#[allow(unused)]
|
||||||
|
#( #method_attrs )*
|
||||||
|
#vis fn #method_idents(&mut self, ctx: tarpc::context::Context, #( #args ),*)
|
||||||
|
-> impl std::future::Future<Output = std::io::Result<#return_types>> + '_ {
|
||||||
|
let request = #request_ident::#camel_case_idents { #( #arg_pats ),* };
|
||||||
|
let resp = tarpc::Client::call(&mut self.0, ctx, request);
|
||||||
|
async move {
|
||||||
|
match resp.await? {
|
||||||
|
#response_ident::#camel_case_idents(msg) => std::result::Result::Ok(msg),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ToTokens for ServiceGenerator<'a> {
|
||||||
|
fn to_tokens(&self, output: &mut TokenStream2) {
|
||||||
|
output.extend(vec![
|
||||||
|
self.trait_service(),
|
||||||
|
self.struct_server(),
|
||||||
|
self.impl_serve_for_server(),
|
||||||
|
self.enum_request(),
|
||||||
|
self.enum_response(),
|
||||||
|
self.enum_response_future(),
|
||||||
|
self.impl_debug_for_response_future(),
|
||||||
|
self.impl_future_for_response_future(),
|
||||||
|
self.struct_client(),
|
||||||
|
self.impl_from_for_client(),
|
||||||
|
self.impl_client_new(),
|
||||||
|
self.impl_client_rpc_methods(),
|
||||||
|
])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn snake_to_camel(ident_str: &str) -> String {
|
||||||
|
let mut camel_ty = String::with_capacity(ident_str.len());
|
||||||
|
|
||||||
|
let mut last_char_was_underscore = true;
|
||||||
|
for c in ident_str.chars() {
|
||||||
|
match c {
|
||||||
|
'_' => last_char_was_underscore = true,
|
||||||
|
c if last_char_was_underscore => {
|
||||||
|
camel_ty.extend(c.to_uppercase());
|
||||||
|
last_char_was_underscore = false;
|
||||||
|
}
|
||||||
|
c => camel_ty.extend(c.to_lowercase()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
camel_ty.shrink_to_fit();
|
||||||
|
camel_ty
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn snake_to_camel_basic() {
|
||||||
|
assert_eq!(snake_to_camel("abc_def"), "AbcDef");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn snake_to_camel_underscore_suffix() {
|
||||||
|
assert_eq!(snake_to_camel("abc_def_"), "AbcDef");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn snake_to_camel_underscore_prefix() {
|
||||||
|
assert_eq!(snake_to_camel("_abc_def"), "AbcDef");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn snake_to_camel_underscore_consecutive() {
|
||||||
|
assert_eq!(snake_to_camel("abc__def"), "AbcDef");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn snake_to_camel_capital_in_middle() {
|
||||||
|
assert_eq!(snake_to_camel("aBc_dEf"), "AbcDef");
|
||||||
}
|
}
|
||||||
|
|||||||
144
plugins/tests/server.rs
Normal file
144
plugins/tests/server.rs
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
use assert_type_eq::assert_type_eq;
|
||||||
|
use futures::Future;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use tarpc::context;
|
||||||
|
|
||||||
|
// these need to be out here rather than inside the function so that the
|
||||||
|
// assert_type_eq macro can pick them up.
|
||||||
|
#[tarpc::service]
|
||||||
|
trait Foo {
|
||||||
|
async fn two_part(s: String, i: i32) -> (String, i32);
|
||||||
|
async fn bar(s: String) -> String;
|
||||||
|
async fn baz();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn type_generation_works() {
|
||||||
|
#[tarpc::server]
|
||||||
|
impl Foo for () {
|
||||||
|
async fn two_part(self, _: context::Context, s: String, i: i32) -> (String, i32) {
|
||||||
|
(s, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn bar(self, _: context::Context, s: String) -> String {
|
||||||
|
s
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn baz(self, _: context::Context) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// the assert_type_eq macro can only be used once per block.
|
||||||
|
{
|
||||||
|
assert_type_eq!(
|
||||||
|
<() as Foo>::TwoPartFut,
|
||||||
|
Pin<Box<dyn Future<Output = (String, i32)> + Send>>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
assert_type_eq!(
|
||||||
|
<() as Foo>::BarFut,
|
||||||
|
Pin<Box<dyn Future<Output = String> + Send>>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
{
|
||||||
|
assert_type_eq!(
|
||||||
|
<() as Foo>::BazFut,
|
||||||
|
Pin<Box<dyn Future<Output = ()> + Send>>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[test]
|
||||||
|
fn raw_idents_work() {
|
||||||
|
type r#yield = String;
|
||||||
|
|
||||||
|
#[tarpc::service]
|
||||||
|
trait r#trait {
|
||||||
|
async fn r#await(r#struct: r#yield, r#enum: i32) -> (r#yield, i32);
|
||||||
|
async fn r#fn(r#impl: r#yield) -> r#yield;
|
||||||
|
async fn r#async();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tarpc::server]
|
||||||
|
impl r#trait for () {
|
||||||
|
async fn r#await(
|
||||||
|
self,
|
||||||
|
_: context::Context,
|
||||||
|
r#struct: r#yield,
|
||||||
|
r#enum: i32,
|
||||||
|
) -> (r#yield, i32) {
|
||||||
|
(r#struct, r#enum)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn r#fn(self, _: context::Context, r#impl: r#yield) -> r#yield {
|
||||||
|
r#impl
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn r#async(self, _: context::Context) {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn syntax() {
|
||||||
|
#[tarpc::service]
|
||||||
|
trait Syntax {
|
||||||
|
#[deny(warnings)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
async fn TestCamelCaseDoesntConflict();
|
||||||
|
async fn hello() -> String;
|
||||||
|
#[doc = "attr"]
|
||||||
|
async fn attr(s: String) -> String;
|
||||||
|
async fn no_args_no_return();
|
||||||
|
async fn no_args() -> ();
|
||||||
|
async fn one_arg(one: String) -> i32;
|
||||||
|
async fn two_args_no_return(one: String, two: u64);
|
||||||
|
async fn two_args(one: String, two: u64) -> String;
|
||||||
|
async fn no_args_ret_error() -> i32;
|
||||||
|
async fn one_arg_ret_error(one: String) -> String;
|
||||||
|
async fn no_arg_implicit_return_error();
|
||||||
|
#[doc = "attr"]
|
||||||
|
async fn one_arg_implicit_return_error(one: String);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tarpc::server]
|
||||||
|
impl Syntax for () {
|
||||||
|
#[deny(warnings)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
async fn TestCamelCaseDoesntConflict(self, _: context::Context) {}
|
||||||
|
|
||||||
|
async fn hello(self, _: context::Context) -> String {
|
||||||
|
String::new()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn attr(self, _: context::Context, _s: String) -> String {
|
||||||
|
String::new()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn no_args_no_return(self, _: context::Context) {}
|
||||||
|
|
||||||
|
async fn no_args(self, _: context::Context) -> () {}
|
||||||
|
|
||||||
|
async fn one_arg(self, _: context::Context, _one: String) -> i32 {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn two_args_no_return(self, _: context::Context, _one: String, _two: u64) {}
|
||||||
|
|
||||||
|
async fn two_args(self, _: context::Context, _one: String, _two: u64) -> String {
|
||||||
|
String::new()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn no_args_ret_error(self, _: context::Context) -> i32 {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn one_arg_ret_error(self, _: context::Context, _one: String) -> String {
|
||||||
|
String::new()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn no_arg_implicit_return_error(self, _: context::Context) {}
|
||||||
|
|
||||||
|
async fn one_arg_implicit_return_error(self, _: context::Context, _one: String) {}
|
||||||
|
}
|
||||||
|
}
|
||||||
85
plugins/tests/service.rs
Normal file
85
plugins/tests/service.rs
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
use tarpc::context;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn att_service_trait() {
|
||||||
|
use futures::future::{ready, Ready};
|
||||||
|
|
||||||
|
#[tarpc::service]
|
||||||
|
trait Foo {
|
||||||
|
async fn two_part(s: String, i: i32) -> (String, i32);
|
||||||
|
async fn bar(s: String) -> String;
|
||||||
|
async fn baz();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Foo for () {
|
||||||
|
type TwoPartFut = Ready<(String, i32)>;
|
||||||
|
fn two_part(self, _: context::Context, s: String, i: i32) -> Self::TwoPartFut {
|
||||||
|
ready((s, i))
|
||||||
|
}
|
||||||
|
|
||||||
|
type BarFut = Ready<String>;
|
||||||
|
fn bar(self, _: context::Context, s: String) -> Self::BarFut {
|
||||||
|
ready(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
type BazFut = Ready<()>;
|
||||||
|
fn baz(self, _: context::Context) -> Self::BazFut {
|
||||||
|
ready(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[test]
|
||||||
|
fn raw_idents() {
|
||||||
|
use futures::future::{ready, Ready};
|
||||||
|
|
||||||
|
type r#yield = String;
|
||||||
|
|
||||||
|
#[tarpc::service]
|
||||||
|
trait r#trait {
|
||||||
|
async fn r#await(r#struct: r#yield, r#enum: i32) -> (r#yield, i32);
|
||||||
|
async fn r#fn(r#impl: r#yield) -> r#yield;
|
||||||
|
async fn r#async();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl r#trait for () {
|
||||||
|
type AwaitFut = Ready<(r#yield, i32)>;
|
||||||
|
fn r#await(self, _: context::Context, r#struct: r#yield, r#enum: i32) -> Self::AwaitFut {
|
||||||
|
ready((r#struct, r#enum))
|
||||||
|
}
|
||||||
|
|
||||||
|
type FnFut = Ready<r#yield>;
|
||||||
|
fn r#fn(self, _: context::Context, r#impl: r#yield) -> Self::FnFut {
|
||||||
|
ready(r#impl)
|
||||||
|
}
|
||||||
|
|
||||||
|
type AsyncFut = Ready<()>;
|
||||||
|
fn r#async(self, _: context::Context) -> Self::AsyncFut {
|
||||||
|
ready(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn syntax() {
|
||||||
|
#[tarpc::service]
|
||||||
|
trait Syntax {
|
||||||
|
#[deny(warnings)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
async fn TestCamelCaseDoesntConflict();
|
||||||
|
async fn hello() -> String;
|
||||||
|
#[doc = "attr"]
|
||||||
|
async fn attr(s: String) -> String;
|
||||||
|
async fn no_args_no_return();
|
||||||
|
async fn no_args() -> ();
|
||||||
|
async fn one_arg(one: String) -> i32;
|
||||||
|
async fn two_args_no_return(one: String, two: u64);
|
||||||
|
async fn two_args(one: String, two: u64) -> String;
|
||||||
|
async fn no_args_ret_error() -> i32;
|
||||||
|
async fn one_arg_ret_error(one: String) -> String;
|
||||||
|
async fn no_arg_implicit_return_error();
|
||||||
|
#[doc = "attr"]
|
||||||
|
async fn one_arg_implicit_return_error(one: String);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "tarpc-lib"
|
|
||||||
version = "0.6.0"
|
|
||||||
authors = ["Tim Kuehn <tikue@google.com>"]
|
|
||||||
edition = '2018'
|
|
||||||
license = "MIT"
|
|
||||||
documentation = "https://docs.rs/tarpc-lib"
|
|
||||||
homepage = "https://github.com/google/tarpc"
|
|
||||||
repository = "https://github.com/google/tarpc"
|
|
||||||
keywords = ["rpc", "network", "server", "api", "microservices"]
|
|
||||||
categories = ["asynchronous", "network-programming"]
|
|
||||||
readme = "../README.md"
|
|
||||||
description = "An RPC framework for Rust with a focus on ease of use."
|
|
||||||
|
|
||||||
[features]
|
|
||||||
default = []
|
|
||||||
serde1 = ["trace/serde", "serde", "serde/derive"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
fnv = "1.0"
|
|
||||||
futures-preview = { version = "0.3.0-alpha.16", features = ["compat"] }
|
|
||||||
humantime = "1.0"
|
|
||||||
log = "0.4"
|
|
||||||
pin-utils = "0.1.0-alpha.4"
|
|
||||||
rand = "0.6"
|
|
||||||
tokio-timer = "0.2"
|
|
||||||
trace = { package = "tarpc-trace", version = "0.2", path = "../trace" }
|
|
||||||
serde = { optional = true, version = "1.0" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
futures-test-preview = { version = "0.3.0-alpha.16" }
|
|
||||||
env_logger = "0.6"
|
|
||||||
tokio = "0.1"
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
edition = "2018"
|
|
||||||
@@ -1,266 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
server::{Channel, Config},
|
|
||||||
util::Compact,
|
|
||||||
ClientMessage, PollIo, Response, Transport,
|
|
||||||
};
|
|
||||||
use fnv::FnvHashMap;
|
|
||||||
use futures::{
|
|
||||||
channel::mpsc,
|
|
||||||
prelude::*,
|
|
||||||
ready,
|
|
||||||
stream::Fuse,
|
|
||||||
task::{Context, Poll},
|
|
||||||
};
|
|
||||||
use log::{debug, error, info, trace, warn};
|
|
||||||
use pin_utils::unsafe_pinned;
|
|
||||||
use std::{
|
|
||||||
collections::hash_map::Entry,
|
|
||||||
io,
|
|
||||||
marker::PhantomData,
|
|
||||||
net::{IpAddr, SocketAddr},
|
|
||||||
ops::Try,
|
|
||||||
option::NoneError,
|
|
||||||
pin::Pin,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Drops connections under configurable conditions:
|
|
||||||
///
|
|
||||||
/// 1. If the max number of connections is reached.
|
|
||||||
/// 2. If the max number of connections for a single IP is reached.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ConnectionFilter<S, Req, Resp> {
|
|
||||||
listener: Fuse<S>,
|
|
||||||
closed_connections: mpsc::UnboundedSender<SocketAddr>,
|
|
||||||
closed_connections_rx: mpsc::UnboundedReceiver<SocketAddr>,
|
|
||||||
config: Config,
|
|
||||||
connections_per_ip: FnvHashMap<IpAddr, usize>,
|
|
||||||
open_connections: usize,
|
|
||||||
ghost: PhantomData<(Req, Resp)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
enum NewConnection<Req, Resp, C> {
|
|
||||||
Filtered,
|
|
||||||
Accepted(Channel<Req, Resp, C>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, C> Try for NewConnection<Req, Resp, C> {
|
|
||||||
type Ok = Channel<Req, Resp, C>;
|
|
||||||
type Error = NoneError;
|
|
||||||
|
|
||||||
fn into_result(self) -> Result<Channel<Req, Resp, C>, NoneError> {
|
|
||||||
match self {
|
|
||||||
NewConnection::Filtered => Err(NoneError),
|
|
||||||
NewConnection::Accepted(channel) => Ok(channel),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_error(_: NoneError) -> Self {
|
|
||||||
NewConnection::Filtered
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_ok(channel: Channel<Req, Resp, C>) -> Self {
|
|
||||||
NewConnection::Accepted(channel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Req, Resp> ConnectionFilter<S, Req, Resp> {
|
|
||||||
unsafe_pinned!(open_connections: usize);
|
|
||||||
unsafe_pinned!(config: Config);
|
|
||||||
unsafe_pinned!(connections_per_ip: FnvHashMap<IpAddr, usize>);
|
|
||||||
unsafe_pinned!(closed_connections_rx: mpsc::UnboundedReceiver<SocketAddr>);
|
|
||||||
unsafe_pinned!(listener: Fuse<S>);
|
|
||||||
|
|
||||||
/// Sheds new connections to stay under configured limits.
|
|
||||||
pub fn filter<C>(listener: S, config: Config) -> Self
|
|
||||||
where
|
|
||||||
S: Stream<Item = Result<C, io::Error>>,
|
|
||||||
C: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send,
|
|
||||||
{
|
|
||||||
let (closed_connections, closed_connections_rx) = mpsc::unbounded();
|
|
||||||
|
|
||||||
ConnectionFilter {
|
|
||||||
listener: listener.fuse(),
|
|
||||||
closed_connections,
|
|
||||||
closed_connections_rx,
|
|
||||||
config,
|
|
||||||
connections_per_ip: FnvHashMap::default(),
|
|
||||||
open_connections: 0,
|
|
||||||
ghost: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_new_connection<C>(self: &mut Pin<&mut Self>, stream: C) -> NewConnection<Req, Resp, C>
|
|
||||||
where
|
|
||||||
C: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send,
|
|
||||||
{
|
|
||||||
let peer = match stream.peer_addr() {
|
|
||||||
Ok(peer) => peer,
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Could not get peer_addr of new connection: {}", e);
|
|
||||||
return NewConnection::Filtered;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let open_connections = *self.as_mut().open_connections();
|
|
||||||
if open_connections >= self.as_mut().config().max_connections {
|
|
||||||
warn!(
|
|
||||||
"[{}] Shedding connection because the maximum open connections \
|
|
||||||
limit is reached ({}/{}).",
|
|
||||||
peer,
|
|
||||||
open_connections,
|
|
||||||
self.as_mut().config().max_connections
|
|
||||||
);
|
|
||||||
return NewConnection::Filtered;
|
|
||||||
}
|
|
||||||
|
|
||||||
let config = self.config.clone();
|
|
||||||
let open_connections_for_ip = self.increment_connections_for_ip(&peer)?;
|
|
||||||
*self.as_mut().open_connections() += 1;
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
"[{}] Opening channel ({}/{} connections for IP, {} total).",
|
|
||||||
peer,
|
|
||||||
open_connections_for_ip,
|
|
||||||
config.max_connections_per_ip,
|
|
||||||
self.as_mut().open_connections(),
|
|
||||||
);
|
|
||||||
|
|
||||||
NewConnection::Accepted(Channel {
|
|
||||||
client_addr: peer,
|
|
||||||
closed_connections: self.closed_connections.clone(),
|
|
||||||
transport: stream.fuse(),
|
|
||||||
config,
|
|
||||||
ghost: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_closed_connection(self: &mut Pin<&mut Self>, addr: &SocketAddr) {
|
|
||||||
*self.as_mut().open_connections() -= 1;
|
|
||||||
debug!(
|
|
||||||
"[{}] Closing channel. {} open connections remaining.",
|
|
||||||
addr, self.open_connections
|
|
||||||
);
|
|
||||||
self.decrement_connections_for_ip(&addr);
|
|
||||||
self.as_mut().connections_per_ip().compact(0.1);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn increment_connections_for_ip(self: &mut Pin<&mut Self>, peer: &SocketAddr) -> Option<usize> {
|
|
||||||
let max_connections_per_ip = self.as_mut().config().max_connections_per_ip;
|
|
||||||
let mut occupied;
|
|
||||||
let mut connections_per_ip = self.as_mut().connections_per_ip();
|
|
||||||
let occupied = match connections_per_ip.entry(peer.ip()) {
|
|
||||||
Entry::Vacant(vacant) => vacant.insert(0),
|
|
||||||
Entry::Occupied(o) => {
|
|
||||||
if *o.get() < max_connections_per_ip {
|
|
||||||
// Store the reference outside the block to extend the lifetime.
|
|
||||||
occupied = o;
|
|
||||||
occupied.get_mut()
|
|
||||||
} else {
|
|
||||||
info!(
|
|
||||||
"[{}] Opened max connections from IP ({}/{}).",
|
|
||||||
peer,
|
|
||||||
o.get(),
|
|
||||||
max_connections_per_ip
|
|
||||||
);
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
*occupied += 1;
|
|
||||||
Some(*occupied)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn decrement_connections_for_ip(self: &mut Pin<&mut Self>, addr: &SocketAddr) {
|
|
||||||
let should_compact = match self.as_mut().connections_per_ip().entry(addr.ip()) {
|
|
||||||
Entry::Vacant(_) => {
|
|
||||||
error!("[{}] Got vacant entry when closing connection.", addr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Entry::Occupied(mut occupied) => {
|
|
||||||
*occupied.get_mut() -= 1;
|
|
||||||
if *occupied.get() == 0 {
|
|
||||||
occupied.remove();
|
|
||||||
true
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if should_compact {
|
|
||||||
self.as_mut().connections_per_ip().compact(0.1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_listener<C>(
|
|
||||||
mut self: Pin<&mut Self>,
|
|
||||||
cx: &mut Context<'_>,
|
|
||||||
) -> PollIo<NewConnection<Req, Resp, C>>
|
|
||||||
where
|
|
||||||
S: Stream<Item = Result<C, io::Error>>,
|
|
||||||
C: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send,
|
|
||||||
{
|
|
||||||
match ready!(self.as_mut().listener().poll_next_unpin(cx)?) {
|
|
||||||
Some(codec) => Poll::Ready(Some(Ok(self.handle_new_connection(codec)))),
|
|
||||||
None => Poll::Ready(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_closed_connections(
|
|
||||||
self: &mut Pin<&mut Self>,
|
|
||||||
cx: &mut Context<'_>,
|
|
||||||
) -> Poll<io::Result<()>> {
|
|
||||||
match ready!(self.as_mut().closed_connections_rx().poll_next_unpin(cx)) {
|
|
||||||
Some(addr) => {
|
|
||||||
self.handle_closed_connection(&addr);
|
|
||||||
Poll::Ready(Ok(()))
|
|
||||||
}
|
|
||||||
None => unreachable!("Holding a copy of closed_connections and didn't close it."),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Req, Resp, T> Stream for ConnectionFilter<S, Req, Resp>
|
|
||||||
where
|
|
||||||
S: Stream<Item = Result<T, io::Error>>,
|
|
||||||
T: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send,
|
|
||||||
{
|
|
||||||
type Item = io::Result<Channel<Req, Resp, T>>;
|
|
||||||
|
|
||||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> PollIo<Channel<Req, Resp, T>> {
|
|
||||||
loop {
|
|
||||||
match (
|
|
||||||
self.as_mut().poll_listener(cx)?,
|
|
||||||
self.poll_closed_connections(cx)?,
|
|
||||||
) {
|
|
||||||
(Poll::Ready(Some(NewConnection::Accepted(channel))), _) => {
|
|
||||||
return Poll::Ready(Some(Ok(channel)));
|
|
||||||
}
|
|
||||||
(Poll::Ready(Some(NewConnection::Filtered)), _) | (_, Poll::Ready(())) => {
|
|
||||||
trace!(
|
|
||||||
"Filtered a connection; {} open.",
|
|
||||||
self.as_mut().open_connections()
|
|
||||||
);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
(Poll::Pending, Poll::Pending) => return Poll::Pending,
|
|
||||||
(Poll::Ready(None), Poll::Pending) => {
|
|
||||||
if *self.as_mut().open_connections() > 0 {
|
|
||||||
trace!(
|
|
||||||
"Listener closed; {} open connections.",
|
|
||||||
self.as_mut().open_connections()
|
|
||||||
);
|
|
||||||
return Poll::Pending;
|
|
||||||
}
|
|
||||||
trace!("Shutting down listener: all connections closed, and no more coming.");
|
|
||||||
return Poll::Ready(None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,633 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
//! Provides a server that concurrently handles many connections sending multiplexed requests.
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
context, util::deadline_compat, util::AsDuration, util::Compact, ClientMessage,
|
|
||||||
ClientMessageKind, PollIo, Request, Response, ServerError, Transport,
|
|
||||||
};
|
|
||||||
use fnv::FnvHashMap;
|
|
||||||
use futures::{
|
|
||||||
channel::mpsc,
|
|
||||||
future::{abortable, AbortHandle},
|
|
||||||
prelude::*,
|
|
||||||
ready,
|
|
||||||
stream::Fuse,
|
|
||||||
task::{Context, Poll},
|
|
||||||
try_ready,
|
|
||||||
};
|
|
||||||
use humantime::format_rfc3339;
|
|
||||||
use log::{debug, error, info, trace, warn};
|
|
||||||
use pin_utils::{unsafe_pinned, unsafe_unpinned};
|
|
||||||
use std::{
|
|
||||||
error::Error as StdError,
|
|
||||||
io,
|
|
||||||
marker::PhantomData,
|
|
||||||
net::SocketAddr,
|
|
||||||
pin::Pin,
|
|
||||||
time::{Instant, SystemTime},
|
|
||||||
};
|
|
||||||
use tokio_timer::timeout;
|
|
||||||
use trace::{self, TraceId};
|
|
||||||
|
|
||||||
mod filter;
|
|
||||||
|
|
||||||
/// Manages clients, serving multiplexed requests over each connection.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Server<Req, Resp> {
|
|
||||||
config: Config,
|
|
||||||
ghost: PhantomData<(Req, Resp)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp> Default for Server<Req, Resp> {
|
|
||||||
fn default() -> Self {
|
|
||||||
new(Config::default())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Settings that control the behavior of the server.
|
|
||||||
#[non_exhaustive]
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct Config {
|
|
||||||
/// The maximum number of clients that can be connected to the server at once. When at the
|
|
||||||
/// limit, existing connections are honored and new connections are rejected.
|
|
||||||
pub max_connections: usize,
|
|
||||||
/// The maximum number of clients per IP address that can be connected to the server at once.
|
|
||||||
/// When an IP is at the limit, existing connections are honored and new connections on that IP
|
|
||||||
/// address are rejected.
|
|
||||||
pub max_connections_per_ip: usize,
|
|
||||||
/// The maximum number of requests that can be in flight for each client. When a client is at
|
|
||||||
/// the in-flight request limit, existing requests are fulfilled and new requests are rejected.
|
|
||||||
/// Rejected requests are sent a response error.
|
|
||||||
pub max_in_flight_requests_per_connection: usize,
|
|
||||||
/// The number of responses per client that can be buffered server-side before being sent.
|
|
||||||
/// `pending_response_buffer` controls the buffer size of the channel that a server's
|
|
||||||
/// response tasks use to send responses to the client handler task.
|
|
||||||
pub pending_response_buffer: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Config {
|
|
||||||
fn default() -> Self {
|
|
||||||
Config {
|
|
||||||
max_connections: 1_000_000,
|
|
||||||
max_connections_per_ip: 1_000,
|
|
||||||
max_in_flight_requests_per_connection: 1_000,
|
|
||||||
pending_response_buffer: 100,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a new server with configuration specified `config`.
|
|
||||||
pub fn new<Req, Resp>(config: Config) -> Server<Req, Resp> {
|
|
||||||
Server {
|
|
||||||
config,
|
|
||||||
ghost: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp> Server<Req, Resp> {
|
|
||||||
/// Returns the config for this server.
|
|
||||||
pub fn config(&self) -> &Config {
|
|
||||||
&self.config
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a stream of the incoming connections to the server.
|
|
||||||
pub fn incoming<S, T>(
|
|
||||||
self,
|
|
||||||
listener: S,
|
|
||||||
) -> impl Stream<Item = io::Result<Channel<Req, Resp, T>>>
|
|
||||||
where
|
|
||||||
Req: Send,
|
|
||||||
Resp: Send,
|
|
||||||
S: Stream<Item = io::Result<T>>,
|
|
||||||
T: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send,
|
|
||||||
{
|
|
||||||
self::filter::ConnectionFilter::filter(listener, self.config.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The future driving the server.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Running<S, F> {
|
|
||||||
incoming: S,
|
|
||||||
request_handler: F,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, F> Running<S, F> {
|
|
||||||
unsafe_pinned!(incoming: S);
|
|
||||||
unsafe_unpinned!(request_handler: F);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, T, Req, Resp, F, Fut> Future for Running<S, F>
|
|
||||||
where
|
|
||||||
S: Sized + Stream<Item = io::Result<Channel<Req, Resp, T>>>,
|
|
||||||
Req: Send + 'static,
|
|
||||||
Resp: Send + 'static,
|
|
||||||
T: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send + 'static,
|
|
||||||
F: FnOnce(context::Context, Req) -> Fut + Send + 'static + Clone,
|
|
||||||
Fut: Future<Output = io::Result<Resp>> + Send + 'static,
|
|
||||||
{
|
|
||||||
type Output = ();
|
|
||||||
|
|
||||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
|
|
||||||
while let Some(channel) = ready!(self.as_mut().incoming().poll_next(cx)) {
|
|
||||||
match channel {
|
|
||||||
Ok(channel) => {
|
|
||||||
let peer = channel.client_addr;
|
|
||||||
if let Err(e) =
|
|
||||||
crate::spawn(channel.respond_with(self.as_mut().request_handler().clone()))
|
|
||||||
{
|
|
||||||
warn!("[{}] Failed to spawn connection handler: {:?}", peer, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!("Incoming connection error: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
info!("Server shutting down.");
|
|
||||||
Poll::Ready(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A utility trait enabling a stream to fluently chain a request handler.
|
|
||||||
pub trait Handler<T, Req, Resp>
|
|
||||||
where
|
|
||||||
Self: Sized + Stream<Item = io::Result<Channel<Req, Resp, T>>>,
|
|
||||||
Req: Send,
|
|
||||||
Resp: Send,
|
|
||||||
T: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send,
|
|
||||||
{
|
|
||||||
/// Responds to all requests with `request_handler`.
|
|
||||||
fn respond_with<F, Fut>(self, request_handler: F) -> Running<Self, F>
|
|
||||||
where
|
|
||||||
F: FnOnce(context::Context, Req) -> Fut + Send + 'static + Clone,
|
|
||||||
Fut: Future<Output = io::Result<Resp>> + Send + 'static,
|
|
||||||
{
|
|
||||||
Running {
|
|
||||||
incoming: self,
|
|
||||||
request_handler,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, Req, Resp, S> Handler<T, Req, Resp> for S
|
|
||||||
where
|
|
||||||
S: Sized + Stream<Item = io::Result<Channel<Req, Resp, T>>>,
|
|
||||||
Req: Send,
|
|
||||||
Resp: Send,
|
|
||||||
T: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send,
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Responds to all requests with `request_handler`.
|
|
||||||
/// The server end of an open connection with a client.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Channel<Req, Resp, T> {
|
|
||||||
/// Writes responses to the wire and reads requests off the wire.
|
|
||||||
transport: Fuse<T>,
|
|
||||||
/// Signals the connection is closed when `Channel` is dropped.
|
|
||||||
closed_connections: mpsc::UnboundedSender<SocketAddr>,
|
|
||||||
/// Channel limits to prevent unlimited resource usage.
|
|
||||||
config: Config,
|
|
||||||
/// The address of the server connected to.
|
|
||||||
client_addr: SocketAddr,
|
|
||||||
/// Types the request and response.
|
|
||||||
ghost: PhantomData<(Req, Resp)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, T> Drop for Channel<Req, Resp, T> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
trace!("[{}] Closing channel.", self.client_addr);
|
|
||||||
|
|
||||||
// Even in a bounded channel, each connection would have a guaranteed slot, so using
|
|
||||||
// an unbounded sender is actually no different. And, the bound is on the maximum number
|
|
||||||
// of open connections.
|
|
||||||
if self
|
|
||||||
.closed_connections
|
|
||||||
.unbounded_send(self.client_addr)
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
warn!(
|
|
||||||
"[{}] Failed to send closed connection message.",
|
|
||||||
self.client_addr
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, T> Channel<Req, Resp, T> {
|
|
||||||
unsafe_pinned!(transport: Fuse<T>);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, T> Channel<Req, Resp, T>
|
|
||||||
where
|
|
||||||
T: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send,
|
|
||||||
Req: Send,
|
|
||||||
Resp: Send,
|
|
||||||
{
|
|
||||||
pub(crate) fn start_send(mut self: Pin<&mut Self>, response: Response<Resp>) -> io::Result<()> {
|
|
||||||
self.as_mut().transport().start_send(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn poll_ready(
|
|
||||||
mut self: Pin<&mut Self>,
|
|
||||||
cx: &mut Context<'_>,
|
|
||||||
) -> Poll<io::Result<()>> {
|
|
||||||
self.as_mut().transport().poll_ready(cx)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn poll_flush(
|
|
||||||
mut self: Pin<&mut Self>,
|
|
||||||
cx: &mut Context<'_>,
|
|
||||||
) -> Poll<io::Result<()>> {
|
|
||||||
self.as_mut().transport().poll_flush(cx)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn poll_next(
|
|
||||||
mut self: Pin<&mut Self>,
|
|
||||||
cx: &mut Context<'_>,
|
|
||||||
) -> PollIo<ClientMessage<Req>> {
|
|
||||||
self.as_mut().transport().poll_next(cx)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the address of the client connected to the channel.
|
|
||||||
pub fn client_addr(&self) -> &SocketAddr {
|
|
||||||
&self.client_addr
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Respond to requests coming over the channel with `f`. Returns a future that drives the
|
|
||||||
/// responses and resolves when the connection is closed.
|
|
||||||
pub fn respond_with<F, Fut>(self, f: F) -> impl Future<Output = ()>
|
|
||||||
where
|
|
||||||
F: FnOnce(context::Context, Req) -> Fut + Send + 'static + Clone,
|
|
||||||
Fut: Future<Output = io::Result<Resp>> + Send + 'static,
|
|
||||||
Req: 'static,
|
|
||||||
Resp: 'static,
|
|
||||||
{
|
|
||||||
let (responses_tx, responses) = mpsc::channel(self.config.pending_response_buffer);
|
|
||||||
let responses = responses.fuse();
|
|
||||||
let peer = self.client_addr;
|
|
||||||
|
|
||||||
ClientHandler {
|
|
||||||
channel: self,
|
|
||||||
f,
|
|
||||||
pending_responses: responses,
|
|
||||||
responses_tx,
|
|
||||||
in_flight_requests: FnvHashMap::default(),
|
|
||||||
}
|
|
||||||
.unwrap_or_else(move |e| {
|
|
||||||
info!("[{}] ClientHandler errored out: {}", peer, e);
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct ClientHandler<Req, Resp, T, F> {
|
|
||||||
channel: Channel<Req, Resp, T>,
|
|
||||||
/// Responses waiting to be written to the wire.
|
|
||||||
pending_responses: Fuse<mpsc::Receiver<(context::Context, Response<Resp>)>>,
|
|
||||||
/// Handed out to request handlers to fan in responses.
|
|
||||||
responses_tx: mpsc::Sender<(context::Context, Response<Resp>)>,
|
|
||||||
/// Number of requests currently being responded to.
|
|
||||||
in_flight_requests: FnvHashMap<u64, AbortHandle>,
|
|
||||||
/// Request handler.
|
|
||||||
f: F,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, T, F> ClientHandler<Req, Resp, T, F> {
|
|
||||||
unsafe_pinned!(channel: Channel<Req, Resp, T>);
|
|
||||||
unsafe_pinned!(in_flight_requests: FnvHashMap<u64, AbortHandle>);
|
|
||||||
unsafe_pinned!(pending_responses: Fuse<mpsc::Receiver<(context::Context, Response<Resp>)>>);
|
|
||||||
unsafe_pinned!(responses_tx: mpsc::Sender<(context::Context, Response<Resp>)>);
|
|
||||||
// For this to be safe, field f must be private, and code in this module must never
|
|
||||||
// construct PinMut<F>.
|
|
||||||
unsafe_unpinned!(f: F);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, T, F, Fut> ClientHandler<Req, Resp, T, F>
|
|
||||||
where
|
|
||||||
Req: Send + 'static,
|
|
||||||
Resp: Send + 'static,
|
|
||||||
T: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send,
|
|
||||||
F: FnOnce(context::Context, Req) -> Fut + Send + 'static + Clone,
|
|
||||||
Fut: Future<Output = io::Result<Resp>> + Send + 'static,
|
|
||||||
{
|
|
||||||
/// If at max in-flight requests, check that there's room to immediately write a throttled
|
|
||||||
/// response.
|
|
||||||
fn poll_ready_if_throttling(
|
|
||||||
mut self: Pin<&mut Self>,
|
|
||||||
cx: &mut Context<'_>,
|
|
||||||
) -> Poll<io::Result<()>> {
|
|
||||||
if self.in_flight_requests.len()
|
|
||||||
>= self.channel.config.max_in_flight_requests_per_connection
|
|
||||||
{
|
|
||||||
let peer = self.as_mut().channel().client_addr;
|
|
||||||
|
|
||||||
while let Poll::Pending = self.as_mut().channel().poll_ready(cx)? {
|
|
||||||
info!(
|
|
||||||
"[{}] In-flight requests at max ({}), and transport is not ready.",
|
|
||||||
peer,
|
|
||||||
self.as_mut().in_flight_requests().len(),
|
|
||||||
);
|
|
||||||
try_ready!(self.as_mut().channel().poll_flush(cx));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Poll::Ready(Ok(()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pump_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> PollIo<()> {
|
|
||||||
ready!(self.as_mut().poll_ready_if_throttling(cx)?);
|
|
||||||
|
|
||||||
Poll::Ready(match ready!(self.as_mut().channel().poll_next(cx)?) {
|
|
||||||
Some(message) => {
|
|
||||||
match message.message {
|
|
||||||
ClientMessageKind::Request(request) => {
|
|
||||||
self.handle_request(message.trace_context, request)?;
|
|
||||||
}
|
|
||||||
ClientMessageKind::Cancel { request_id } => {
|
|
||||||
self.cancel_request(&message.trace_context, request_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(Ok(()))
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
trace!("[{}] Read half closed", self.channel.client_addr);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pump_write(
|
|
||||||
mut self: Pin<&mut Self>,
|
|
||||||
cx: &mut Context<'_>,
|
|
||||||
read_half_closed: bool,
|
|
||||||
) -> PollIo<()> {
|
|
||||||
match self.as_mut().poll_next_response(cx)? {
|
|
||||||
Poll::Ready(Some((_, response))) => {
|
|
||||||
self.as_mut().channel().start_send(response)?;
|
|
||||||
Poll::Ready(Some(Ok(())))
|
|
||||||
}
|
|
||||||
Poll::Ready(None) => {
|
|
||||||
// Shutdown can't be done before we finish pumping out remaining responses.
|
|
||||||
ready!(self.as_mut().channel().poll_flush(cx)?);
|
|
||||||
Poll::Ready(None)
|
|
||||||
}
|
|
||||||
Poll::Pending => {
|
|
||||||
// No more requests to process, so flush any requests buffered in the transport.
|
|
||||||
ready!(self.as_mut().channel().poll_flush(cx)?);
|
|
||||||
|
|
||||||
// Being here means there are no staged requests and all written responses are
|
|
||||||
// fully flushed. So, if the read half is closed and there are no in-flight
|
|
||||||
// requests, then we can close the write half.
|
|
||||||
if read_half_closed && self.as_mut().in_flight_requests().is_empty() {
|
|
||||||
Poll::Ready(None)
|
|
||||||
} else {
|
|
||||||
Poll::Pending
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_next_response(
|
|
||||||
mut self: Pin<&mut Self>,
|
|
||||||
cx: &mut Context<'_>,
|
|
||||||
) -> PollIo<(context::Context, Response<Resp>)> {
|
|
||||||
// Ensure there's room to write a response.
|
|
||||||
while let Poll::Pending = self.as_mut().channel().poll_ready(cx)? {
|
|
||||||
ready!(self.as_mut().channel().poll_flush(cx)?);
|
|
||||||
}
|
|
||||||
|
|
||||||
let peer = self.as_mut().channel().client_addr;
|
|
||||||
|
|
||||||
match ready!(self.as_mut().pending_responses().poll_next(cx)) {
|
|
||||||
Some((ctx, response)) => {
|
|
||||||
if self
|
|
||||||
.as_mut()
|
|
||||||
.in_flight_requests()
|
|
||||||
.remove(&response.request_id)
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
self.as_mut().in_flight_requests().compact(0.1);
|
|
||||||
}
|
|
||||||
trace!(
|
|
||||||
"[{}/{}] Staging response. In-flight requests = {}.",
|
|
||||||
ctx.trace_id(),
|
|
||||||
peer,
|
|
||||||
self.as_mut().in_flight_requests().len(),
|
|
||||||
);
|
|
||||||
Poll::Ready(Some(Ok((ctx, response))))
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
// This branch likely won't happen, since the ClientHandler is holding a Sender.
|
|
||||||
trace!("[{}] No new responses.", peer);
|
|
||||||
Poll::Ready(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_request(
|
|
||||||
mut self: Pin<&mut Self>,
|
|
||||||
trace_context: trace::Context,
|
|
||||||
request: Request<Req>,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
let request_id = request.id;
|
|
||||||
let peer = self.as_mut().channel().client_addr;
|
|
||||||
let ctx = context::Context {
|
|
||||||
deadline: request.deadline,
|
|
||||||
trace_context,
|
|
||||||
};
|
|
||||||
let request = request.message;
|
|
||||||
|
|
||||||
if self.as_mut().in_flight_requests().len()
|
|
||||||
>= self
|
|
||||||
.as_mut()
|
|
||||||
.channel()
|
|
||||||
.config
|
|
||||||
.max_in_flight_requests_per_connection
|
|
||||||
{
|
|
||||||
debug!(
|
|
||||||
"[{}/{}] Client has reached in-flight request limit ({}/{}).",
|
|
||||||
ctx.trace_id(),
|
|
||||||
peer,
|
|
||||||
self.as_mut().in_flight_requests().len(),
|
|
||||||
self.as_mut()
|
|
||||||
.channel()
|
|
||||||
.config
|
|
||||||
.max_in_flight_requests_per_connection
|
|
||||||
);
|
|
||||||
|
|
||||||
self.as_mut().channel().start_send(Response {
|
|
||||||
request_id,
|
|
||||||
message: Err(ServerError {
|
|
||||||
kind: io::ErrorKind::WouldBlock,
|
|
||||||
detail: Some("Server throttled the request.".into()),
|
|
||||||
}),
|
|
||||||
})?;
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let deadline = ctx.deadline;
|
|
||||||
let timeout = deadline.as_duration();
|
|
||||||
trace!(
|
|
||||||
"[{}/{}] Received request with deadline {} (timeout {:?}).",
|
|
||||||
ctx.trace_id(),
|
|
||||||
peer,
|
|
||||||
format_rfc3339(deadline),
|
|
||||||
timeout,
|
|
||||||
);
|
|
||||||
let mut response_tx = self.as_mut().responses_tx().clone();
|
|
||||||
|
|
||||||
let trace_id = *ctx.trace_id();
|
|
||||||
let response = self.as_mut().f().clone()(ctx, request);
|
|
||||||
let response = deadline_compat::Deadline::new(response, Instant::now() + timeout).then(
|
|
||||||
async move |result| {
|
|
||||||
let response = Response {
|
|
||||||
request_id,
|
|
||||||
message: match result {
|
|
||||||
Ok(message) => Ok(message),
|
|
||||||
Err(e) => Err(make_server_error(e, trace_id, peer, deadline)),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
trace!("[{}/{}] Sending response.", trace_id, peer);
|
|
||||||
response_tx
|
|
||||||
.send((ctx, response))
|
|
||||||
.unwrap_or_else(|_| ())
|
|
||||||
.await;
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let (abortable_response, abort_handle) = abortable(response);
|
|
||||||
crate::spawn(abortable_response.map(|_| ())).map_err(|e| {
|
|
||||||
io::Error::new(
|
|
||||||
io::ErrorKind::Other,
|
|
||||||
format!(
|
|
||||||
"Could not spawn response task. Is shutdown: {}",
|
|
||||||
e.is_shutdown()
|
|
||||||
),
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
self.as_mut()
|
|
||||||
.in_flight_requests()
|
|
||||||
.insert(request_id, abort_handle);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cancel_request(mut self: Pin<&mut Self>, trace_context: &trace::Context, request_id: u64) {
|
|
||||||
// It's possible the request was already completed, so it's fine
|
|
||||||
// if this is None.
|
|
||||||
if let Some(cancel_handle) = self.as_mut().in_flight_requests().remove(&request_id) {
|
|
||||||
self.as_mut().in_flight_requests().compact(0.1);
|
|
||||||
|
|
||||||
cancel_handle.abort();
|
|
||||||
let remaining = self.as_mut().in_flight_requests().len();
|
|
||||||
trace!(
|
|
||||||
"[{}/{}] Request canceled. In-flight requests = {}",
|
|
||||||
trace_context.trace_id,
|
|
||||||
self.channel.client_addr,
|
|
||||||
remaining,
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
trace!(
|
|
||||||
"[{}/{}] Received cancellation, but response handler \
|
|
||||||
is already complete.",
|
|
||||||
trace_context.trace_id,
|
|
||||||
self.channel.client_addr
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, T, F, Fut> Future for ClientHandler<Req, Resp, T, F>
|
|
||||||
where
|
|
||||||
Req: Send + 'static,
|
|
||||||
Resp: Send + 'static,
|
|
||||||
T: Transport<Item = ClientMessage<Req>, SinkItem = Response<Resp>> + Send,
|
|
||||||
F: FnOnce(context::Context, Req) -> Fut + Send + 'static + Clone,
|
|
||||||
Fut: Future<Output = io::Result<Resp>> + Send + 'static,
|
|
||||||
{
|
|
||||||
type Output = io::Result<()>;
|
|
||||||
|
|
||||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
||||||
trace!("[{}] ClientHandler::poll", self.channel.client_addr);
|
|
||||||
loop {
|
|
||||||
let read = self.as_mut().pump_read(cx)?;
|
|
||||||
match (
|
|
||||||
read,
|
|
||||||
self.as_mut().pump_write(cx, read == Poll::Ready(None))?,
|
|
||||||
) {
|
|
||||||
(Poll::Ready(None), Poll::Ready(None)) => {
|
|
||||||
info!("[{}] Client disconnected.", self.channel.client_addr);
|
|
||||||
return Poll::Ready(Ok(()));
|
|
||||||
}
|
|
||||||
(read @ Poll::Ready(Some(())), write) | (read, write @ Poll::Ready(Some(()))) => {
|
|
||||||
trace!(
|
|
||||||
"[{}] read: {:?}, write: {:?}.",
|
|
||||||
self.channel.client_addr,
|
|
||||||
read,
|
|
||||||
write
|
|
||||||
)
|
|
||||||
}
|
|
||||||
(read, write) => {
|
|
||||||
trace!(
|
|
||||||
"[{}] read: {:?}, write: {:?} (not ready).",
|
|
||||||
self.channel.client_addr,
|
|
||||||
read,
|
|
||||||
write,
|
|
||||||
);
|
|
||||||
return Poll::Pending;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_server_error(
|
|
||||||
e: timeout::Error<io::Error>,
|
|
||||||
trace_id: TraceId,
|
|
||||||
peer: SocketAddr,
|
|
||||||
deadline: SystemTime,
|
|
||||||
) -> ServerError {
|
|
||||||
if e.is_elapsed() {
|
|
||||||
debug!(
|
|
||||||
"[{}/{}] Response did not complete before deadline of {}s.",
|
|
||||||
trace_id,
|
|
||||||
peer,
|
|
||||||
format_rfc3339(deadline)
|
|
||||||
);
|
|
||||||
// No point in responding, since the client will have dropped the request.
|
|
||||||
ServerError {
|
|
||||||
kind: io::ErrorKind::TimedOut,
|
|
||||||
detail: Some(format!(
|
|
||||||
"Response did not complete before deadline of {}s.",
|
|
||||||
format_rfc3339(deadline)
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
} else if e.is_timer() {
|
|
||||||
error!(
|
|
||||||
"[{}/{}] Response failed because of an issue with a timer: {}",
|
|
||||||
trace_id, peer, e
|
|
||||||
);
|
|
||||||
|
|
||||||
ServerError {
|
|
||||||
kind: io::ErrorKind::Other,
|
|
||||||
detail: Some(format!("{}", e)),
|
|
||||||
}
|
|
||||||
} else if e.is_inner() {
|
|
||||||
let e = e.into_inner().unwrap();
|
|
||||||
ServerError {
|
|
||||||
kind: e.kind(),
|
|
||||||
detail: Some(e.description().into()),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
error!("[{}/{}] Unexpected response failure: {}", trace_id, peer, e);
|
|
||||||
|
|
||||||
ServerError {
|
|
||||||
kind: io::ErrorKind::Other,
|
|
||||||
detail: Some(format!("Server unexpectedly failed to respond: {}", e)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,161 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
//! Transports backed by in-memory channels.
|
|
||||||
|
|
||||||
use crate::{PollIo, Transport};
|
|
||||||
use futures::{channel::mpsc, task::Context, Poll, Sink, Stream};
|
|
||||||
use pin_utils::unsafe_pinned;
|
|
||||||
use std::pin::Pin;
|
|
||||||
use std::{
|
|
||||||
io,
|
|
||||||
net::{IpAddr, Ipv4Addr, SocketAddr},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Returns two unbounded channel peers. Each [`Stream`] yields items sent through the other's
|
|
||||||
/// [`Sink`].
|
|
||||||
pub fn unbounded<SinkItem, Item>() -> (
|
|
||||||
UnboundedChannel<SinkItem, Item>,
|
|
||||||
UnboundedChannel<Item, SinkItem>,
|
|
||||||
) {
|
|
||||||
let (tx1, rx2) = mpsc::unbounded();
|
|
||||||
let (tx2, rx1) = mpsc::unbounded();
|
|
||||||
(
|
|
||||||
UnboundedChannel { tx: tx1, rx: rx1 },
|
|
||||||
UnboundedChannel { tx: tx2, rx: rx2 },
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A bi-directional channel backed by an [`UnboundedSender`](mpsc::UnboundedSender)
|
|
||||||
/// and [`UnboundedReceiver`](mpsc::UnboundedReceiver).
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct UnboundedChannel<Item, SinkItem> {
|
|
||||||
rx: mpsc::UnboundedReceiver<Item>,
|
|
||||||
tx: mpsc::UnboundedSender<SinkItem>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Item, SinkItem> UnboundedChannel<Item, SinkItem> {
|
|
||||||
unsafe_pinned!(rx: mpsc::UnboundedReceiver<Item>);
|
|
||||||
unsafe_pinned!(tx: mpsc::UnboundedSender<SinkItem>);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Item, SinkItem> Stream for UnboundedChannel<Item, SinkItem> {
|
|
||||||
type Item = Result<Item, io::Error>;
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> PollIo<Item> {
|
|
||||||
self.rx().poll_next(cx).map(|option| option.map(Ok))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Item, SinkItem> Sink<SinkItem> for UnboundedChannel<Item, SinkItem> {
|
|
||||||
type SinkError = io::Error;
|
|
||||||
|
|
||||||
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
||||||
self.tx()
|
|
||||||
.poll_ready(cx)
|
|
||||||
.map_err(|_| io::Error::from(io::ErrorKind::NotConnected))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn start_send(self: Pin<&mut Self>, item: SinkItem) -> io::Result<()> {
|
|
||||||
self.tx()
|
|
||||||
.start_send(item)
|
|
||||||
.map_err(|_| io::Error::from(io::ErrorKind::NotConnected))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::SinkError>> {
|
|
||||||
self.tx()
|
|
||||||
.poll_flush(cx)
|
|
||||||
.map_err(|_| io::Error::from(io::ErrorKind::NotConnected))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
|
||||||
self.tx()
|
|
||||||
.poll_close(cx)
|
|
||||||
.map_err(|_| io::Error::from(io::ErrorKind::NotConnected))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Item, SinkItem> Transport for UnboundedChannel<Item, SinkItem> {
|
|
||||||
type SinkItem = SinkItem;
|
|
||||||
type Item = Item;
|
|
||||||
|
|
||||||
fn peer_addr(&self) -> io::Result<SocketAddr> {
|
|
||||||
Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn local_addr(&self) -> io::Result<SocketAddr> {
|
|
||||||
Ok(SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use crate::{
|
|
||||||
client, context,
|
|
||||||
server::{Handler, Server},
|
|
||||||
transport,
|
|
||||||
};
|
|
||||||
use futures::compat::Executor01CompatExt;
|
|
||||||
use futures::{prelude::*, stream};
|
|
||||||
use log::trace;
|
|
||||||
use std::io;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn integration() {
|
|
||||||
let _ = env_logger::try_init();
|
|
||||||
crate::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
|
|
||||||
let (client_channel, server_channel) = transport::channel::unbounded();
|
|
||||||
let server = Server::<String, u64>::default()
|
|
||||||
.incoming(stream::once(future::ready(Ok(server_channel))))
|
|
||||||
.respond_with(|_ctx, request| {
|
|
||||||
future::ready(request.parse::<u64>().map_err(|_| {
|
|
||||||
io::Error::new(
|
|
||||||
io::ErrorKind::InvalidInput,
|
|
||||||
format!("{:?} is not an int", request),
|
|
||||||
)
|
|
||||||
}))
|
|
||||||
});
|
|
||||||
|
|
||||||
let responses = async {
|
|
||||||
let mut client = client::new(client::Config::default(), client_channel).await?;
|
|
||||||
|
|
||||||
let response1 = client.call(context::current(), "123".into()).await;
|
|
||||||
let response2 = client.call(context::current(), "abc".into()).await;
|
|
||||||
|
|
||||||
Ok::<_, io::Error>((response1, response2))
|
|
||||||
};
|
|
||||||
|
|
||||||
let (response1, response2) = run_future(future::join(
|
|
||||||
server,
|
|
||||||
responses.unwrap_or_else(|e| panic!(e)),
|
|
||||||
))
|
|
||||||
.1;
|
|
||||||
|
|
||||||
trace!("response1: {:?}, response2: {:?}", response1, response2);
|
|
||||||
|
|
||||||
assert!(response1.is_ok());
|
|
||||||
assert_eq!(response1.ok().unwrap(), 123);
|
|
||||||
|
|
||||||
assert!(response2.is_err());
|
|
||||||
assert_eq!(response2.err().unwrap().kind(), io::ErrorKind::InvalidInput);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_future<F>(f: F) -> F::Output
|
|
||||||
where
|
|
||||||
F: Future + Send + 'static,
|
|
||||||
F::Output: Send + 'static,
|
|
||||||
{
|
|
||||||
let (tx, rx) = futures::channel::oneshot::channel();
|
|
||||||
tokio::run(
|
|
||||||
f.map(|result| tx.send(result).unwrap_or_else(|_| unreachable!()))
|
|
||||||
.boxed()
|
|
||||||
.unit_error()
|
|
||||||
.compat(),
|
|
||||||
);
|
|
||||||
futures::executor::block_on(rx).unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,123 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
//! Provides a [`Transport`] trait as well as implementations.
|
|
||||||
//!
|
|
||||||
//! The rpc crate is transport- and protocol-agnostic. Any transport that impls [`Transport`]
|
|
||||||
//! can be plugged in, using whatever protocol it wants.
|
|
||||||
|
|
||||||
use futures::prelude::*;
|
|
||||||
use std::{
|
|
||||||
io,
|
|
||||||
marker::PhantomData,
|
|
||||||
net::SocketAddr,
|
|
||||||
pin::Pin,
|
|
||||||
task::{Context, Poll},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub mod channel;
|
|
||||||
|
|
||||||
/// A bidirectional stream ([`Sink`] + [`Stream`]) of messages.
|
|
||||||
pub trait Transport
|
|
||||||
where
|
|
||||||
Self: Stream<Item = io::Result<<Self as Transport>::Item>>,
|
|
||||||
Self: Sink<<Self as Transport>::SinkItem, SinkError = io::Error>,
|
|
||||||
{
|
|
||||||
/// The type read off the transport.
|
|
||||||
type Item;
|
|
||||||
/// The type written to the transport.
|
|
||||||
type SinkItem;
|
|
||||||
|
|
||||||
/// The address of the remote peer this transport is in communication with.
|
|
||||||
fn peer_addr(&self) -> io::Result<SocketAddr>;
|
|
||||||
/// The address of the local half of this transport.
|
|
||||||
fn local_addr(&self) -> io::Result<SocketAddr>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a new Transport backed by the given Stream + Sink and connecting addresses.
|
|
||||||
pub fn new<S, SinkItem, Item>(
|
|
||||||
inner: S,
|
|
||||||
peer_addr: SocketAddr,
|
|
||||||
local_addr: SocketAddr,
|
|
||||||
) -> impl Transport<Item = Item, SinkItem = SinkItem>
|
|
||||||
where
|
|
||||||
S: Stream<Item = io::Result<Item>>,
|
|
||||||
S: Sink<SinkItem, SinkError = io::Error>,
|
|
||||||
{
|
|
||||||
TransportShim {
|
|
||||||
inner,
|
|
||||||
peer_addr,
|
|
||||||
local_addr,
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A transport created by adding peers to a Stream + Sink.
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct TransportShim<S, SinkItem> {
|
|
||||||
peer_addr: SocketAddr,
|
|
||||||
local_addr: SocketAddr,
|
|
||||||
inner: S,
|
|
||||||
_marker: PhantomData<SinkItem>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, SinkItem> TransportShim<S, SinkItem> {
|
|
||||||
pin_utils::unsafe_pinned!(inner: S);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, SinkItem> Stream for TransportShim<S, SinkItem>
|
|
||||||
where
|
|
||||||
S: Stream,
|
|
||||||
{
|
|
||||||
type Item = S::Item;
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<S::Item>> {
|
|
||||||
self.inner().poll_next(cx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Item> Sink<Item> for TransportShim<S, Item>
|
|
||||||
where
|
|
||||||
S: Sink<Item>,
|
|
||||||
{
|
|
||||||
type SinkError = S::SinkError;
|
|
||||||
|
|
||||||
fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), S::SinkError> {
|
|
||||||
self.inner().start_send(item)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), S::SinkError>> {
|
|
||||||
self.inner().poll_ready(cx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), S::SinkError>> {
|
|
||||||
self.inner().poll_flush(cx)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), S::SinkError>> {
|
|
||||||
self.inner().poll_close(cx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, SinkItem, Item> Transport for TransportShim<S, SinkItem>
|
|
||||||
where
|
|
||||||
S: Stream + Sink<SinkItem>,
|
|
||||||
Self: Stream<Item = io::Result<Item>>,
|
|
||||||
Self: Sink<SinkItem, SinkError = io::Error>,
|
|
||||||
{
|
|
||||||
type Item = Item;
|
|
||||||
type SinkItem = SinkItem;
|
|
||||||
|
|
||||||
/// The address of the remote peer this transport is in communication with.
|
|
||||||
fn peer_addr(&self) -> io::Result<SocketAddr> {
|
|
||||||
Ok(self.peer_addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The address of the local half of this transport.
|
|
||||||
fn local_addr(&self) -> io::Result<SocketAddr> {
|
|
||||||
Ok(self.local_addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,69 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
use futures::{
|
|
||||||
compat::*,
|
|
||||||
prelude::*,
|
|
||||||
ready,
|
|
||||||
task::{Context, Poll},
|
|
||||||
};
|
|
||||||
use pin_utils::unsafe_pinned;
|
|
||||||
use std::pin::Pin;
|
|
||||||
use std::time::Instant;
|
|
||||||
use tokio_timer::{timeout, Delay};
|
|
||||||
|
|
||||||
#[must_use = "futures do nothing unless polled"]
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Deadline<T> {
|
|
||||||
future: T,
|
|
||||||
delay: Compat01As03<Delay>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Deadline<T> {
|
|
||||||
unsafe_pinned!(future: T);
|
|
||||||
unsafe_pinned!(delay: Compat01As03<Delay>);
|
|
||||||
|
|
||||||
/// Create a new `Deadline` that completes when `future` completes or when
|
|
||||||
/// `deadline` is reached.
|
|
||||||
pub fn new(future: T, deadline: Instant) -> Deadline<T> {
|
|
||||||
Deadline::new_with_delay(future, Delay::new(deadline))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn new_with_delay(future: T, delay: Delay) -> Deadline<T> {
|
|
||||||
Deadline {
|
|
||||||
future,
|
|
||||||
delay: delay.compat(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets a mutable reference to the underlying future in this deadline.
|
|
||||||
pub fn get_mut(&mut self) -> &mut T {
|
|
||||||
&mut self.future
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<T> Future for Deadline<T>
|
|
||||||
where
|
|
||||||
T: TryFuture,
|
|
||||||
{
|
|
||||||
type Output = Result<T::Ok, timeout::Error<T::Error>>;
|
|
||||||
|
|
||||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
||||||
// First, try polling the future
|
|
||||||
match self.as_mut().future().try_poll(cx) {
|
|
||||||
Poll::Ready(Ok(v)) => return Poll::Ready(Ok(v)),
|
|
||||||
Poll::Pending => {}
|
|
||||||
Poll::Ready(Err(e)) => return Poll::Ready(Err(timeout::Error::inner(e))),
|
|
||||||
}
|
|
||||||
|
|
||||||
let delay = self.delay().poll_unpin(cx);
|
|
||||||
|
|
||||||
// Now check the timer
|
|
||||||
match ready!(delay) {
|
|
||||||
Ok(_) => Poll::Ready(Err(timeout::Error::elapsed())),
|
|
||||||
Err(e) => Poll::Ready(Err(timeout::Error::timer(e))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "tarpc"
|
name = "tarpc"
|
||||||
version = "0.18.0"
|
version = "0.22.0"
|
||||||
authors = ["Adam Wright <adam.austin.wright@gmail.com>", "Tim Kuehn <timothy.j.kuehn@gmail.com>"]
|
authors = ["Adam Wright <adam.austin.wright@gmail.com>", "Tim Kuehn <timothy.j.kuehn@gmail.com>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
@@ -13,26 +13,60 @@ readme = "../README.md"
|
|||||||
description = "An RPC framework for Rust with a focus on ease of use."
|
description = "An RPC framework for Rust with a focus on ease of use."
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
serde1 = ["rpc/serde1", "serde", "serde/derive"]
|
default = []
|
||||||
|
|
||||||
|
serde1 = ["tarpc-plugins/serde1", "serde", "serde/derive"]
|
||||||
|
tokio1 = []
|
||||||
|
serde-transport = ["tokio-serde", "tokio-util/codec"]
|
||||||
|
tcp = ["tokio/net", "tokio/stream"]
|
||||||
|
|
||||||
|
full = ["serde1", "tokio1", "serde-transport", "tcp"]
|
||||||
|
|
||||||
[badges]
|
[badges]
|
||||||
travis-ci = { repository = "google/tarpc" }
|
travis-ci = { repository = "google/tarpc" }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
futures-preview = { version = "0.3.0-alpha.16", features = ["compat"] }
|
anyhow = "1.0"
|
||||||
|
fnv = "1.0"
|
||||||
|
futures = "0.3"
|
||||||
|
humantime = "1.0"
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
serde = { optional = true, version = "1.0" }
|
pin-project = "0.4.17"
|
||||||
rpc = { package = "tarpc-lib", path = "../rpc", version = "0.6" }
|
rand = "0.7"
|
||||||
tarpc-plugins = { path = "../plugins", version = "0.5.0" }
|
tokio = { version = "0.2", features = ["time"] }
|
||||||
|
serde = { optional = true, version = "1.0", features = ["derive"] }
|
||||||
|
static_assertions = "1.1.0"
|
||||||
|
tarpc-plugins = { path = "../plugins", version = "0.8" }
|
||||||
|
tokio-util = { optional = true, version = "0.3" }
|
||||||
|
tokio-serde = { optional = true, version = "0.6" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
bincode = "1"
|
assert_matches = "1.0"
|
||||||
bytes = { version = "0.4", features = ["serde"] }
|
bincode = "1.3"
|
||||||
humantime = "1.0"
|
bytes = { version = "0.5", features = ["serde"] }
|
||||||
bincode-transport = { package = "tarpc-bincode-transport", version = "0.7", path = "../bincode-transport" }
|
|
||||||
env_logger = "0.6"
|
env_logger = "0.6"
|
||||||
libtest = "0.0.1"
|
flate2 = "1.0.16"
|
||||||
tokio = "0.1"
|
futures = "0.3"
|
||||||
tokio-executor = "0.1"
|
humantime = "1.0"
|
||||||
tokio-tcp = "0.1"
|
log = "0.4"
|
||||||
pin-utils = "0.1.0-alpha.4"
|
pin-utils = "0.1.0-alpha"
|
||||||
|
serde_bytes = "0.11"
|
||||||
|
tokio = { version = "0.2", features = ["full"] }
|
||||||
|
tokio-serde = { version = "0.6", features = ["json", "bincode"] }
|
||||||
|
trybuild = "1.0"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "server_calling_server"
|
||||||
|
required-features = ["full"]
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "readme"
|
||||||
|
required-features = ["full"]
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "pubsub"
|
||||||
|
required-features = ["full"]
|
||||||
|
|||||||
130
tarpc/examples/compression.rs
Normal file
130
tarpc/examples/compression.rs
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
use flate2::{read::DeflateDecoder, write::DeflateEncoder, Compression};
|
||||||
|
use futures::{Sink, SinkExt, Stream, StreamExt, TryStreamExt};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_bytes::ByteBuf;
|
||||||
|
use std::{io, io::Read, io::Write};
|
||||||
|
use tarpc::{
|
||||||
|
client, context,
|
||||||
|
serde_transport::tcp,
|
||||||
|
server::{BaseChannel, Channel},
|
||||||
|
};
|
||||||
|
use tokio_serde::formats::Bincode;
|
||||||
|
|
||||||
|
/// Type of compression that should be enabled on the request. The transport is free to ignore this.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Copy, Deserialize, Serialize)]
|
||||||
|
pub enum CompressionAlgorithm {
|
||||||
|
Deflate,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize, Serialize)]
|
||||||
|
pub enum CompressedMessage<T> {
|
||||||
|
Uncompressed(T),
|
||||||
|
Compressed {
|
||||||
|
algorithm: CompressionAlgorithm,
|
||||||
|
payload: ByteBuf,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
enum CompressionType {
|
||||||
|
Uncompressed,
|
||||||
|
Compressed,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn compress<T>(message: T) -> io::Result<CompressedMessage<T>>
|
||||||
|
where
|
||||||
|
T: Serialize,
|
||||||
|
{
|
||||||
|
let message = serialize(message)?;
|
||||||
|
let mut encoder = DeflateEncoder::new(Vec::new(), Compression::default());
|
||||||
|
encoder.write_all(&message).unwrap();
|
||||||
|
let compressed = encoder.finish()?;
|
||||||
|
Ok(CompressedMessage::Compressed {
|
||||||
|
algorithm: CompressionAlgorithm::Deflate,
|
||||||
|
payload: ByteBuf::from(compressed),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn decompress<T>(message: CompressedMessage<T>) -> io::Result<T>
|
||||||
|
where
|
||||||
|
for<'a> T: Deserialize<'a>,
|
||||||
|
{
|
||||||
|
match message {
|
||||||
|
CompressedMessage::Compressed { algorithm, payload } => {
|
||||||
|
if algorithm != CompressionAlgorithm::Deflate {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!("Compression algorithm {:?} not supported", algorithm),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let mut deflater = DeflateDecoder::new(payload.as_slice());
|
||||||
|
let mut payload = ByteBuf::new();
|
||||||
|
deflater.read_to_end(&mut payload)?;
|
||||||
|
let message = deserialize(payload)?;
|
||||||
|
Ok(message)
|
||||||
|
}
|
||||||
|
CompressedMessage::Uncompressed(message) => Ok(message),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serialize<T: Serialize>(t: T) -> io::Result<ByteBuf> {
|
||||||
|
bincode::serialize(&t)
|
||||||
|
.map(ByteBuf::from)
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<D>(message: ByteBuf) -> io::Result<D>
|
||||||
|
where
|
||||||
|
for<'a> D: Deserialize<'a>,
|
||||||
|
{
|
||||||
|
bincode::deserialize(message.as_ref()).map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_compression<In, Out>(
|
||||||
|
transport: impl Stream<Item = io::Result<CompressedMessage<In>>>
|
||||||
|
+ Sink<CompressedMessage<Out>, Error = io::Error>,
|
||||||
|
) -> impl Stream<Item = io::Result<In>> + Sink<Out, Error = io::Error>
|
||||||
|
where
|
||||||
|
Out: Serialize,
|
||||||
|
for<'a> In: Deserialize<'a>,
|
||||||
|
{
|
||||||
|
transport.with(compress).and_then(decompress)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tarpc::service]
|
||||||
|
pub trait World {
|
||||||
|
async fn hello(name: String) -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct HelloServer;
|
||||||
|
|
||||||
|
#[tarpc::server]
|
||||||
|
impl World for HelloServer {
|
||||||
|
async fn hello(self, _: context::Context, name: String) -> String {
|
||||||
|
format!("Hey, {}!", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let mut incoming = tcp::listen("localhost:0", Bincode::default).await?;
|
||||||
|
let addr = incoming.local_addr();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let transport = incoming.next().await.unwrap().unwrap();
|
||||||
|
BaseChannel::with_defaults(add_compression(transport))
|
||||||
|
.respond_with(HelloServer.serve())
|
||||||
|
.execute()
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let transport = tcp::connect(addr, Bincode::default).await?;
|
||||||
|
let mut client =
|
||||||
|
WorldClient::new(client::Config::default(), add_compression(transport)).spawn()?;
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"{}",
|
||||||
|
client.hello(context::current(), "friend".into()).await?
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -4,186 +4,341 @@
|
|||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://opensource.org/licenses/MIT.
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
#![feature(
|
/// - The PubSub server sets up TCP listeners on 2 ports, the "subscriber" port and the "publisher"
|
||||||
arbitrary_self_types,
|
/// port. Because both publishers and subscribers initiate their connections to the PubSub
|
||||||
async_await,
|
/// server, the server requires no prior knowledge of either publishers or subscribers.
|
||||||
existential_type,
|
///
|
||||||
proc_macro_hygiene
|
/// - Subscribers connect to the server on the server's "subscriber" port. Once a connection is
|
||||||
)]
|
/// established, the server acts as the client of the Subscriber service, initially requesting
|
||||||
|
/// the topics the subscriber is interested in, and subsequently sending topical messages to the
|
||||||
|
/// subscriber.
|
||||||
|
///
|
||||||
|
/// - Publishers connect to the server on the "publisher" port and, once connected, they send
|
||||||
|
/// topical messages via Publisher service to the server. The server then broadcasts each
|
||||||
|
/// messages to all clients subscribed to the topic of that message.
|
||||||
|
///
|
||||||
|
/// Subscriber Publisher PubSub Server
|
||||||
|
/// T1 | | |
|
||||||
|
/// T2 |-----Connect------------------------------------------------------>|
|
||||||
|
/// T3 | | |
|
||||||
|
/// T2 |<-------------------------------------------------------Topics-----|
|
||||||
|
/// T2 |-----(OK) Topics-------------------------------------------------->|
|
||||||
|
/// T3 | | |
|
||||||
|
/// T4 | |-----Connect-------------------->|
|
||||||
|
/// T5 | | |
|
||||||
|
/// T6 | |-----Publish-------------------->|
|
||||||
|
/// T7 | | |
|
||||||
|
/// T8 |<------------------------------------------------------Receive-----|
|
||||||
|
/// T9 |-----(OK) Receive------------------------------------------------->|
|
||||||
|
/// T10 | | |
|
||||||
|
/// T11 | |<--------------(OK) Publish------|
|
||||||
|
use anyhow::anyhow;
|
||||||
use futures::{
|
use futures::{
|
||||||
future::{self, Ready},
|
channel::oneshot,
|
||||||
|
future::{self, AbortHandle},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
Future,
|
|
||||||
};
|
|
||||||
use rpc::{
|
|
||||||
client, context,
|
|
||||||
server::{self, Handler, Server},
|
|
||||||
};
|
};
|
||||||
|
use log::info;
|
||||||
|
use publisher::Publisher as _;
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::HashMap,
|
||||||
io,
|
io,
|
||||||
net::SocketAddr,
|
net::SocketAddr,
|
||||||
sync::{Arc, Mutex},
|
sync::{Arc, Mutex, RwLock},
|
||||||
thread,
|
|
||||||
time::Duration,
|
|
||||||
};
|
};
|
||||||
|
use subscriber::Subscriber as _;
|
||||||
|
use tarpc::{
|
||||||
|
client, context,
|
||||||
|
serde_transport::tcp,
|
||||||
|
server::{self, Channel},
|
||||||
|
};
|
||||||
|
use tokio::net::ToSocketAddrs;
|
||||||
|
use tokio_serde::formats::Json;
|
||||||
|
|
||||||
pub mod subscriber {
|
pub mod subscriber {
|
||||||
tarpc::service! {
|
#[tarpc::service]
|
||||||
rpc receive(message: String);
|
pub trait Subscriber {
|
||||||
|
async fn topics() -> Vec<String>;
|
||||||
|
async fn receive(topic: String, message: String);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod publisher {
|
pub mod publisher {
|
||||||
use std::net::SocketAddr;
|
#[tarpc::service]
|
||||||
tarpc::service! {
|
pub trait Publisher {
|
||||||
rpc broadcast(message: String);
|
async fn publish(topic: String, message: String);
|
||||||
rpc subscribe(id: u32, address: SocketAddr) -> Result<(), String>;
|
|
||||||
rpc unsubscribe(id: u32);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
struct Subscriber {
|
struct Subscriber {
|
||||||
id: u32,
|
local_addr: SocketAddr,
|
||||||
|
topics: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl subscriber::Service for Subscriber {
|
#[tarpc::server]
|
||||||
type ReceiveFut = Ready<()>;
|
impl subscriber::Subscriber for Subscriber {
|
||||||
|
async fn topics(self, _: context::Context) -> Vec<String> {
|
||||||
|
self.topics.clone()
|
||||||
|
}
|
||||||
|
|
||||||
fn receive(self, _: context::Context, message: String) -> Self::ReceiveFut {
|
async fn receive(self, _: context::Context, topic: String, message: String) {
|
||||||
println!("{} received message: {}", self.id, message);
|
info!(
|
||||||
future::ready(())
|
"[{}] received message on topic '{}': {}",
|
||||||
|
self.local_addr, topic, message
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct SubscriberHandle(AbortHandle);
|
||||||
|
|
||||||
|
impl Drop for SubscriberHandle {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.0.abort();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Subscriber {
|
impl Subscriber {
|
||||||
async fn listen(id: u32, config: server::Config) -> io::Result<SocketAddr> {
|
async fn connect(
|
||||||
let incoming = bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
publisher_addr: impl ToSocketAddrs,
|
||||||
let addr = incoming.local_addr();
|
topics: Vec<String>,
|
||||||
tokio_executor::spawn(
|
) -> anyhow::Result<SubscriberHandle> {
|
||||||
server::new(config)
|
let publisher = tcp::connect(publisher_addr, Json::default).await?;
|
||||||
.incoming(incoming)
|
let local_addr = publisher.local_addr()?;
|
||||||
.take(1)
|
let mut handler = server::BaseChannel::with_defaults(publisher)
|
||||||
.respond_with(subscriber::serve(Subscriber { id }))
|
.respond_with(Subscriber { local_addr, topics }.serve());
|
||||||
.unit_error()
|
// The first request is for the topics being subscriibed to.
|
||||||
.boxed()
|
match handler.next().await {
|
||||||
.compat(),
|
Some(init_topics) => init_topics?.await,
|
||||||
);
|
None => {
|
||||||
Ok(addr)
|
return Err(anyhow!(
|
||||||
|
"[{}] Server never initialized the subscriber.",
|
||||||
|
local_addr
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let (handler, abort_handle) = future::abortable(handler.execute());
|
||||||
|
tokio::spawn(async move {
|
||||||
|
match handler.await {
|
||||||
|
Ok(()) | Err(future::Aborted) => info!("[{}] subscriber shutdown.", local_addr),
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Ok(SubscriberHandle(abort_handle))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Subscription {
|
||||||
|
subscriber: subscriber::SubscriberClient,
|
||||||
|
topics: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
struct Publisher {
|
struct Publisher {
|
||||||
clients: Arc<Mutex<HashMap<u32, subscriber::Client>>>,
|
clients: Arc<Mutex<HashMap<SocketAddr, Subscription>>>,
|
||||||
|
subscriptions: Arc<RwLock<HashMap<String, HashMap<SocketAddr, subscriber::SubscriberClient>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct PublisherAddrs {
|
||||||
|
publisher: SocketAddr,
|
||||||
|
subscriptions: SocketAddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Publisher {
|
impl Publisher {
|
||||||
fn new() -> Publisher {
|
async fn start(self) -> io::Result<PublisherAddrs> {
|
||||||
Publisher {
|
let mut connecting_publishers = tcp::listen("localhost:0", Json::default).await?;
|
||||||
clients: Arc::new(Mutex::new(HashMap::new())),
|
|
||||||
}
|
let publisher_addrs = PublisherAddrs {
|
||||||
|
publisher: connecting_publishers.local_addr(),
|
||||||
|
subscriptions: self.clone().start_subscription_manager().await?,
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("[{}] listening for publishers.", publisher_addrs.publisher);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
// Because this is just an example, we know there will only be one publisher. In more
|
||||||
|
// realistic code, this would be a loop to continually accept new publisher
|
||||||
|
// connections.
|
||||||
|
let publisher = connecting_publishers.next().await.unwrap().unwrap();
|
||||||
|
info!("[{}] publisher connected.", publisher.peer_addr().unwrap());
|
||||||
|
|
||||||
|
server::BaseChannel::with_defaults(publisher)
|
||||||
|
.respond_with(self.serve())
|
||||||
|
.execute()
|
||||||
|
.await
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(publisher_addrs)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
impl publisher::Service for Publisher {
|
async fn start_subscription_manager(mut self) -> io::Result<SocketAddr> {
|
||||||
existential type BroadcastFut: Future<Output = ()>;
|
let mut connecting_subscribers = tcp::listen("localhost:0", Json::default)
|
||||||
|
.await?
|
||||||
|
.filter_map(|r| future::ready(r.ok()));
|
||||||
|
let new_subscriber_addr = connecting_subscribers.get_ref().local_addr();
|
||||||
|
info!("[{}] listening for subscribers.", new_subscriber_addr);
|
||||||
|
|
||||||
fn broadcast(self, _: context::Context, message: String) -> Self::BroadcastFut {
|
tokio::spawn(async move {
|
||||||
async fn broadcast(clients: Arc<Mutex<HashMap<u32, subscriber::Client>>>, message: String) {
|
while let Some(conn) = connecting_subscribers.next().await {
|
||||||
let mut clients = clients.lock().unwrap().clone();
|
let subscriber_addr = conn.peer_addr().unwrap();
|
||||||
for client in clients.values_mut() {
|
|
||||||
// Ignore failing subscribers. In a real pubsub,
|
let tarpc::client::NewClient {
|
||||||
// you'd want to continually retry until subscribers
|
client: subscriber,
|
||||||
// ack.
|
dispatch,
|
||||||
let _ = client.receive(context::current(), message.clone()).await;
|
} = subscriber::SubscriberClient::new(client::Config::default(), conn);
|
||||||
|
let (ready_tx, ready) = oneshot::channel();
|
||||||
|
self.clone()
|
||||||
|
.start_subscriber_gc(subscriber_addr, dispatch, ready);
|
||||||
|
|
||||||
|
// Populate the topics
|
||||||
|
self.initialize_subscription(subscriber_addr, subscriber)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Signal that initialization is done.
|
||||||
|
ready_tx.send(()).unwrap();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(new_subscriber_addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn initialize_subscription(
|
||||||
|
&mut self,
|
||||||
|
subscriber_addr: SocketAddr,
|
||||||
|
mut subscriber: subscriber::SubscriberClient,
|
||||||
|
) {
|
||||||
|
// Populate the topics
|
||||||
|
if let Ok(topics) = subscriber.topics(context::current()).await {
|
||||||
|
self.clients.lock().unwrap().insert(
|
||||||
|
subscriber_addr,
|
||||||
|
Subscription {
|
||||||
|
subscriber: subscriber.clone(),
|
||||||
|
topics: topics.clone(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
info!("[{}] subscribed to topics: {:?}", subscriber_addr, topics);
|
||||||
|
let mut subscriptions = self.subscriptions.write().unwrap();
|
||||||
|
for topic in topics {
|
||||||
|
subscriptions
|
||||||
|
.entry(topic)
|
||||||
|
.or_insert_with(HashMap::new)
|
||||||
|
.insert(subscriber_addr, subscriber.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
broadcast(self.clients.clone(), message)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
existential type SubscribeFut: Future<Output = Result<(), String>>;
|
fn start_subscriber_gc(
|
||||||
|
self,
|
||||||
fn subscribe(self, _: context::Context, id: u32, addr: SocketAddr) -> Self::SubscribeFut {
|
subscriber_addr: SocketAddr,
|
||||||
async fn subscribe(
|
client_dispatch: impl Future<Output = anyhow::Result<()>> + Send + 'static,
|
||||||
clients: Arc<Mutex<HashMap<u32, subscriber::Client>>>,
|
subscriber_ready: oneshot::Receiver<()>,
|
||||||
id: u32,
|
) {
|
||||||
addr: SocketAddr,
|
tokio::spawn(async move {
|
||||||
) -> io::Result<()> {
|
if let Err(e) = client_dispatch.await {
|
||||||
let conn = bincode_transport::connect(&addr).await?;
|
info!(
|
||||||
let subscriber = subscriber::new_stub(client::Config::default(), conn).await?;
|
"[{}] subscriber connection broken: {:?}",
|
||||||
println!("Subscribing {}.", id);
|
subscriber_addr, e
|
||||||
clients.lock().unwrap().insert(id, subscriber);
|
)
|
||||||
Ok(())
|
}
|
||||||
}
|
// Don't clean up the subscriber until initialization is done.
|
||||||
|
let _ = subscriber_ready.await;
|
||||||
subscribe(Arc::clone(&self.clients), id, addr).map_err(|e| e.to_string())
|
if let Some(subscription) = self.clients.lock().unwrap().remove(&subscriber_addr) {
|
||||||
}
|
info!(
|
||||||
|
"[{} unsubscribing from topics: {:?}",
|
||||||
existential type UnsubscribeFut: Future<Output = ()>;
|
subscriber_addr, subscription.topics
|
||||||
|
);
|
||||||
fn unsubscribe(self, _: context::Context, id: u32) -> Self::UnsubscribeFut {
|
let mut subscriptions = self.subscriptions.write().unwrap();
|
||||||
println!("Unsubscribing {}", id);
|
for topic in subscription.topics {
|
||||||
let mut clients = self.clients.lock().unwrap();
|
let subscribers = subscriptions.get_mut(&topic).unwrap();
|
||||||
if let None = clients.remove(&id) {
|
subscribers.remove(&subscriber_addr);
|
||||||
eprintln!(
|
if subscribers.is_empty() {
|
||||||
"Client {} not found. Existings clients: {:?}",
|
subscriptions.remove(&topic);
|
||||||
id, &*clients
|
}
|
||||||
);
|
}
|
||||||
}
|
}
|
||||||
future::ready(())
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> io::Result<()> {
|
#[tarpc::server]
|
||||||
|
impl publisher::Publisher for Publisher {
|
||||||
|
async fn publish(self, _: context::Context, topic: String, message: String) {
|
||||||
|
info!("received message to publish.");
|
||||||
|
let mut subscribers = match self.subscriptions.read().unwrap().get(&topic) {
|
||||||
|
None => return,
|
||||||
|
Some(subscriptions) => subscriptions.clone(),
|
||||||
|
};
|
||||||
|
let mut publications = Vec::new();
|
||||||
|
for client in subscribers.values_mut() {
|
||||||
|
publications.push(client.receive(context::current(), topic.clone(), message.clone()));
|
||||||
|
}
|
||||||
|
// Ignore failing subscribers. In a real pubsub, you'd want to continually retry until
|
||||||
|
// subscribers ack. Of course, a lot would be different in a real pubsub :)
|
||||||
|
for response in future::join_all(publications).await {
|
||||||
|
if let Err(e) = response {
|
||||||
|
info!("failed to broadcast to subscriber: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
env_logger::init();
|
env_logger::init();
|
||||||
let transport = bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
|
||||||
let publisher_addr = transport.local_addr();
|
|
||||||
tokio_executor::spawn(
|
|
||||||
Server::default()
|
|
||||||
.incoming(transport)
|
|
||||||
.take(1)
|
|
||||||
.respond_with(publisher::serve(Publisher::new()))
|
|
||||||
.unit_error()
|
|
||||||
.boxed()
|
|
||||||
.compat(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let subscriber1 = Subscriber::listen(0, server::Config::default()).await?;
|
let clients = Arc::new(Mutex::new(HashMap::new()));
|
||||||
let subscriber2 = Subscriber::listen(1, server::Config::default()).await?;
|
let addrs = Publisher {
|
||||||
|
clients,
|
||||||
let publisher_conn = bincode_transport::connect(&publisher_addr);
|
subscriptions: Arc::new(RwLock::new(HashMap::new())),
|
||||||
let publisher_conn = publisher_conn.await?;
|
|
||||||
let mut publisher = publisher::new_stub(client::Config::default(), publisher_conn).await?;
|
|
||||||
|
|
||||||
if let Err(e) = publisher
|
|
||||||
.subscribe(context::current(), 0, subscriber1)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
eprintln!("Couldn't subscribe subscriber 0: {}", e);
|
|
||||||
}
|
|
||||||
if let Err(e) = publisher
|
|
||||||
.subscribe(context::current(), 1, subscriber2)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
eprintln!("Couldn't subscribe subscriber 1: {}", e);
|
|
||||||
}
|
}
|
||||||
|
.start()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let _subscriber0 = Subscriber::connect(
|
||||||
|
addrs.subscriptions,
|
||||||
|
vec!["calculus".into(), "cool shorts".into()],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let _subscriber1 = Subscriber::connect(
|
||||||
|
addrs.subscriptions,
|
||||||
|
vec!["cool shorts".into(), "history".into()],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut publisher = publisher::PublisherClient::new(
|
||||||
|
client::Config::default(),
|
||||||
|
tcp::connect(addrs.publisher, Json::default).await?,
|
||||||
|
)
|
||||||
|
.spawn()?;
|
||||||
|
|
||||||
println!("Broadcasting...");
|
|
||||||
publisher
|
publisher
|
||||||
.broadcast(context::current(), "hello to all".to_string())
|
.publish(context::current(), "calculus".into(), "sqrt(2)".into())
|
||||||
.await?;
|
.await?;
|
||||||
publisher.unsubscribe(context::current(), 1).await?;
|
|
||||||
publisher
|
publisher
|
||||||
.broadcast(context::current(), "hi again".to_string())
|
.publish(
|
||||||
|
context::current(),
|
||||||
|
"cool shorts".into(),
|
||||||
|
"hello to all".into(),
|
||||||
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
publisher
|
||||||
|
.publish(context::current(), "history".into(), "napoleon".to_string())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
drop(_subscriber0);
|
||||||
|
|
||||||
|
publisher
|
||||||
|
.publish(
|
||||||
|
context::current(),
|
||||||
|
"cool shorts".into(),
|
||||||
|
"hello to who?".into(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
info!("done.");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
|
||||||
tokio::run(run().boxed().map_err(|e| panic!(e)).boxed().compat());
|
|
||||||
thread::sleep(Duration::from_millis(100));
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,32 +4,30 @@
|
|||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://opensource.org/licenses/MIT.
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
#![feature(arbitrary_self_types, async_await, proc_macro_hygiene)]
|
|
||||||
|
|
||||||
use futures::{
|
use futures::{
|
||||||
compat::Executor01CompatExt,
|
|
||||||
future::{self, Ready},
|
future::{self, Ready},
|
||||||
prelude::*,
|
prelude::*,
|
||||||
};
|
};
|
||||||
use rpc::{
|
|
||||||
client, context,
|
|
||||||
server::{Handler, Server},
|
|
||||||
};
|
|
||||||
use std::io;
|
use std::io;
|
||||||
|
use tarpc::{
|
||||||
|
client, context,
|
||||||
|
server::{BaseChannel, Channel},
|
||||||
|
};
|
||||||
|
use tokio_serde::formats::Json;
|
||||||
|
|
||||||
// This is the service definition. It looks a lot like a trait definition.
|
/// This is the service definition. It looks a lot like a trait definition.
|
||||||
// It defines one RPC, hello, which takes one arg, name, and returns a String.
|
/// It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
|
#[tarpc::service]
|
||||||
tarpc::service! {
|
pub trait World {
|
||||||
rpc hello(name: String) -> String;
|
async fn hello(name: String) -> String;
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is the type that implements the generated Service trait. It is the business logic
|
/// This is the type that implements the generated World trait. It is the business logic
|
||||||
// and is used to start the server.
|
/// and is used to start the server.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct HelloServer;
|
struct HelloServer;
|
||||||
|
|
||||||
impl Service for HelloServer {
|
impl World for HelloServer {
|
||||||
// Each defined rpc generates two items in the trait, a fn that serves the RPC, and
|
// Each defined rpc generates two items in the trait, a fn that serves the RPC, and
|
||||||
// an associated type representing the future output by the fn.
|
// an associated type representing the future output by the fn.
|
||||||
|
|
||||||
@@ -40,48 +38,42 @@ impl Service for HelloServer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> io::Result<()> {
|
#[tokio::main]
|
||||||
// bincode_transport is provided by the associated crate bincode-transport. It makes it easy
|
async fn main() -> io::Result<()> {
|
||||||
// to start up a serde-powered bincode serialization strategy over TCP.
|
// tarpc_json_transport is provided by the associated crate json_transport. It makes it
|
||||||
let transport = bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
// easy to start up a serde-powered JSON serialization strategy over TCP.
|
||||||
|
let mut transport = tarpc::serde_transport::tcp::listen("localhost:0", Json::default).await?;
|
||||||
let addr = transport.local_addr();
|
let addr = transport.local_addr();
|
||||||
|
|
||||||
// The server is configured with the defaults.
|
let server = async move {
|
||||||
let server = Server::default()
|
// For this example, we're just going to wait for one connection.
|
||||||
// Server can listen on any type that implements the Transport trait.
|
let client = transport.next().await.unwrap().unwrap();
|
||||||
.incoming(transport)
|
|
||||||
// Close the stream after the client connects
|
|
||||||
.take(1)
|
|
||||||
// serve is generated by the tarpc::service! macro. It takes as input any type implementing
|
|
||||||
// the generated Service trait.
|
|
||||||
.respond_with(serve(HelloServer));
|
|
||||||
|
|
||||||
tokio_executor::spawn(server.unit_error().boxed().compat());
|
// `Channel` is a trait representing a server-side connection. It is a trait to allow
|
||||||
|
// for some channels to be instrumented: for example, to track the number of open connections.
|
||||||
|
// BaseChannel is the most basic channel, simply wrapping a transport with no added
|
||||||
|
// functionality.
|
||||||
|
BaseChannel::with_defaults(client)
|
||||||
|
// serve_world is generated by the tarpc::service attribute. It takes as input any type
|
||||||
|
// implementing the generated World trait.
|
||||||
|
.respond_with(HelloServer.serve())
|
||||||
|
.execute()
|
||||||
|
.await;
|
||||||
|
};
|
||||||
|
tokio::spawn(server);
|
||||||
|
|
||||||
let transport = bincode_transport::connect(&addr).await?;
|
let transport = tarpc::serde_transport::tcp::connect(addr, Json::default).await?;
|
||||||
|
|
||||||
// new_stub is generated by the tarpc::service! macro. Like Server, it takes a config and any
|
// WorldClient is generated by the tarpc::service attribute. It has a constructor `new` that
|
||||||
// Transport as input, and returns a Client, also generated by the macro.
|
// takes a config and any Transport as input.
|
||||||
// by the service mcro.
|
let mut client = WorldClient::new(client::Config::default(), transport).spawn()?;
|
||||||
let mut client = new_stub(client::Config::default(), transport).await?;
|
|
||||||
|
|
||||||
// The client has an RPC method for each RPC defined in tarpc::service!. It takes the same args
|
// The client has an RPC method for each RPC defined in the annotated trait. It takes the same
|
||||||
// as defined, with the addition of a Context, which is always the first arg. The Context
|
// args as defined, with the addition of a Context, which is always the first arg. The Context
|
||||||
// specifies a deadline and trace information which can be helpful in debugging requests.
|
// specifies a deadline and trace information which can be helpful in debugging requests.
|
||||||
let hello = client.hello(context::current(), "Stim".to_string()).await?;
|
let hello = client.hello(context::current(), "Stim".to_string()).await?;
|
||||||
|
|
||||||
println!("{}", hello);
|
eprintln!("{}", hello);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
|
||||||
tarpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
|
|
||||||
tokio::run(
|
|
||||||
run()
|
|
||||||
.map_err(|e| eprintln!("Oh no: {}", e))
|
|
||||||
.boxed()
|
|
||||||
.compat(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,101 +4,89 @@
|
|||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://opensource.org/licenses/MIT.
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
#![feature(
|
use crate::{add::Add as AddService, double::Double as DoubleService};
|
||||||
existential_type,
|
use futures::{future, prelude::*};
|
||||||
arbitrary_self_types,
|
use std::io;
|
||||||
async_await,
|
use tarpc::{
|
||||||
proc_macro_hygiene
|
|
||||||
)]
|
|
||||||
|
|
||||||
use crate::{add::Service as AddService, double::Service as DoubleService};
|
|
||||||
use futures::{
|
|
||||||
compat::Executor01CompatExt,
|
|
||||||
future::{self, Ready},
|
|
||||||
prelude::*,
|
|
||||||
};
|
|
||||||
use rpc::{
|
|
||||||
client, context,
|
client, context,
|
||||||
server::{Handler, Server},
|
server::{Handler, Server},
|
||||||
};
|
};
|
||||||
use std::io;
|
use tokio_serde::formats::Json;
|
||||||
|
|
||||||
pub mod add {
|
pub mod add {
|
||||||
tarpc::service! {
|
#[tarpc::service]
|
||||||
|
pub trait Add {
|
||||||
/// Add two ints together.
|
/// Add two ints together.
|
||||||
rpc add(x: i32, y: i32) -> i32;
|
async fn add(x: i32, y: i32) -> i32;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod double {
|
pub mod double {
|
||||||
tarpc::service! {
|
#[tarpc::service]
|
||||||
|
pub trait Double {
|
||||||
/// 2 * x
|
/// 2 * x
|
||||||
rpc double(x: i32) -> Result<i32, String>;
|
async fn double(x: i32) -> Result<i32, String>;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct AddServer;
|
struct AddServer;
|
||||||
|
|
||||||
|
#[tarpc::server]
|
||||||
impl AddService for AddServer {
|
impl AddService for AddServer {
|
||||||
type AddFut = Ready<i32>;
|
async fn add(self, _: context::Context, x: i32, y: i32) -> i32 {
|
||||||
|
x + y
|
||||||
fn add(self, _: context::Context, x: i32, y: i32) -> Self::AddFut {
|
|
||||||
future::ready(x + y)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct DoubleServer {
|
struct DoubleServer {
|
||||||
add_client: add::Client,
|
add_client: add::AddClient,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tarpc::server]
|
||||||
impl DoubleService for DoubleServer {
|
impl DoubleService for DoubleServer {
|
||||||
existential type DoubleFut: Future<Output = Result<i32, String>> + Send;
|
async fn double(mut self, _: context::Context, x: i32) -> Result<i32, String> {
|
||||||
|
self.add_client
|
||||||
fn double(self, _: context::Context, x: i32) -> Self::DoubleFut {
|
.add(context::current(), x, x)
|
||||||
async fn double(mut client: add::Client, x: i32) -> Result<i32, String> {
|
.await
|
||||||
client
|
.map_err(|e| e.to_string())
|
||||||
.add(context::current(), x, x)
|
|
||||||
.await
|
|
||||||
.map_err(|e| e.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
double(self.add_client.clone(), x)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> io::Result<()> {
|
#[tokio::main]
|
||||||
let add_listener = bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
async fn main() -> io::Result<()> {
|
||||||
let addr = add_listener.local_addr();
|
env_logger::init();
|
||||||
|
|
||||||
|
let add_listener = tarpc::serde_transport::tcp::listen("localhost:0", Json::default)
|
||||||
|
.await?
|
||||||
|
.filter_map(|r| future::ready(r.ok()));
|
||||||
|
let addr = add_listener.get_ref().local_addr();
|
||||||
let add_server = Server::default()
|
let add_server = Server::default()
|
||||||
.incoming(add_listener)
|
.incoming(add_listener)
|
||||||
.take(1)
|
.take(1)
|
||||||
.respond_with(add::serve(AddServer));
|
.respond_with(AddServer.serve());
|
||||||
tokio_executor::spawn(add_server.unit_error().boxed().compat());
|
tokio::spawn(add_server);
|
||||||
|
|
||||||
let to_add_server = bincode_transport::connect(&addr).await?;
|
let to_add_server = tarpc::serde_transport::tcp::connect(addr, Json::default).await?;
|
||||||
let add_client = add::new_stub(client::Config::default(), to_add_server).await?;
|
let add_client = add::AddClient::new(client::Config::default(), to_add_server).spawn()?;
|
||||||
|
|
||||||
let double_listener = bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
let double_listener = tarpc::serde_transport::tcp::listen("localhost:0", Json::default)
|
||||||
let addr = double_listener.local_addr();
|
.await?
|
||||||
let double_server = rpc::Server::default()
|
.filter_map(|r| future::ready(r.ok()));
|
||||||
|
let addr = double_listener.get_ref().local_addr();
|
||||||
|
let double_server = tarpc::Server::default()
|
||||||
.incoming(double_listener)
|
.incoming(double_listener)
|
||||||
.take(1)
|
.take(1)
|
||||||
.respond_with(double::serve(DoubleServer { add_client }));
|
.respond_with(DoubleServer { add_client }.serve());
|
||||||
tokio_executor::spawn(double_server.unit_error().boxed().compat());
|
tokio::spawn(double_server);
|
||||||
|
|
||||||
let to_double_server = bincode_transport::connect(&addr).await?;
|
let to_double_server = tarpc::serde_transport::tcp::connect(addr, Json::default).await?;
|
||||||
let mut double_client = double::new_stub(client::Config::default(), to_double_server).await?;
|
let mut double_client =
|
||||||
|
double::DoubleClient::new(client::Config::default(), to_double_server).spawn()?;
|
||||||
|
|
||||||
for i in 1..=5 {
|
for i in 1..=5 {
|
||||||
println!("{:?}", double_client.double(context::current(), i).await?);
|
eprintln!("{:?}", double_client.double(context::current(), i).await?);
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
|
||||||
env_logger::init();
|
|
||||||
tarpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
tokio::run(run().map_err(|e| panic!(e)).boxed().compat());
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,416 +0,0 @@
|
|||||||
#![feature(
|
|
||||||
async_await,
|
|
||||||
arbitrary_self_types,
|
|
||||||
proc_macro_hygiene,
|
|
||||||
impl_trait_in_bindings
|
|
||||||
)]
|
|
||||||
|
|
||||||
mod registry {
|
|
||||||
use bytes::Bytes;
|
|
||||||
use futures::{
|
|
||||||
future::{ready, Ready},
|
|
||||||
prelude::*,
|
|
||||||
};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::{
|
|
||||||
io,
|
|
||||||
pin::Pin,
|
|
||||||
sync::Arc,
|
|
||||||
task::{Context, Poll},
|
|
||||||
};
|
|
||||||
use tarpc::{
|
|
||||||
client::{self, Client},
|
|
||||||
context,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// A request to a named service.
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
pub struct ServiceRequest {
|
|
||||||
service_name: String,
|
|
||||||
request: Bytes,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A response from a named service.
|
|
||||||
#[derive(Serialize, Deserialize)]
|
|
||||||
pub struct ServiceResponse {
|
|
||||||
response: Bytes,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A list of registered services.
|
|
||||||
pub struct Registry<Services> {
|
|
||||||
registrations: Services,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Registry<Nil> {
|
|
||||||
fn default() -> Self {
|
|
||||||
Registry { registrations: Nil }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Services: MaybeServe + Sync> Registry<Services> {
|
|
||||||
/// Returns a function that serves requests for the registered services.
|
|
||||||
pub fn serve(
|
|
||||||
self,
|
|
||||||
) -> impl FnOnce(
|
|
||||||
context::Context,
|
|
||||||
ServiceRequest,
|
|
||||||
) -> Either<Services::Future, Ready<io::Result<ServiceResponse>>>
|
|
||||||
+ Clone {
|
|
||||||
let registrations = Arc::new(self.registrations);
|
|
||||||
move |cx, req: ServiceRequest| match registrations.serve(cx, &req) {
|
|
||||||
Some(serve) => Either::Left(serve),
|
|
||||||
None => Either::Right(ready(Err(io::Error::new(
|
|
||||||
io::ErrorKind::NotFound,
|
|
||||||
format!("Service '{}' not registered", req.service_name),
|
|
||||||
)))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Registers `serve` with the given `name` using the given serialization scheme.
|
|
||||||
pub fn register<S, Req, Resp, RespFut, Ser, De>(
|
|
||||||
self,
|
|
||||||
name: String,
|
|
||||||
serve: S,
|
|
||||||
deserialize: De,
|
|
||||||
serialize: Ser,
|
|
||||||
) -> Registry<Registration<impl Serve + Send + 'static, Services>>
|
|
||||||
where
|
|
||||||
Req: Send,
|
|
||||||
S: FnOnce(context::Context, Req) -> RespFut + Send + 'static + Clone,
|
|
||||||
RespFut: Future<Output = io::Result<Resp>> + Send + 'static,
|
|
||||||
De: FnOnce(Bytes) -> io::Result<Req> + Send + 'static + Clone,
|
|
||||||
Ser: FnOnce(Resp) -> io::Result<Bytes> + Send + 'static + Clone,
|
|
||||||
{
|
|
||||||
let registrations = Registration {
|
|
||||||
name: name,
|
|
||||||
serve: move |cx, req: Bytes| {
|
|
||||||
async move {
|
|
||||||
let req = deserialize.clone()(req)?;
|
|
||||||
let response = serve.clone()(cx, req).await?;
|
|
||||||
let response = serialize.clone()(response)?;
|
|
||||||
Ok(ServiceResponse { response })
|
|
||||||
}
|
|
||||||
},
|
|
||||||
rest: self.registrations,
|
|
||||||
};
|
|
||||||
Registry { registrations }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a client that sends requests to a service
|
|
||||||
/// named `service_name`, over the given channel, using
|
|
||||||
/// the specified serialization scheme.
|
|
||||||
pub fn new_client<Req, Resp, Ser, De>(
|
|
||||||
service_name: String,
|
|
||||||
channel: &client::Channel<ServiceRequest, ServiceResponse>,
|
|
||||||
mut serialize: Ser,
|
|
||||||
mut deserialize: De,
|
|
||||||
) -> client::MapResponse<
|
|
||||||
client::WithRequest<
|
|
||||||
client::Channel<ServiceRequest, ServiceResponse>,
|
|
||||||
impl FnMut(Req) -> ServiceRequest,
|
|
||||||
>,
|
|
||||||
impl FnMut(ServiceResponse) -> Resp,
|
|
||||||
>
|
|
||||||
where
|
|
||||||
Req: Send + 'static,
|
|
||||||
Resp: Send + 'static,
|
|
||||||
De: FnMut(Bytes) -> io::Result<Resp> + Clone + Send + 'static,
|
|
||||||
Ser: FnMut(Req) -> io::Result<Bytes> + Clone + Send + 'static,
|
|
||||||
{
|
|
||||||
channel
|
|
||||||
.clone()
|
|
||||||
.with_request(move |req| {
|
|
||||||
ServiceRequest {
|
|
||||||
service_name: service_name.clone(),
|
|
||||||
// TODO: shouldn't need to unwrap here. Maybe with_request should allow for
|
|
||||||
// returning Result.
|
|
||||||
request: serialize(req).unwrap(),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
// TODO: same thing. Maybe this should be more like and_then rather than map.
|
|
||||||
.map_response(move |resp| deserialize(resp.response).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serves a request.
|
|
||||||
///
|
|
||||||
/// This trait is mostly an implementation detail that isn't used outside of the registry
|
|
||||||
/// internals.
|
|
||||||
pub trait Serve: Clone + Send + 'static {
|
|
||||||
type Response: Future<Output = io::Result<ServiceResponse>> + Send + 'static;
|
|
||||||
fn serve(self, cx: context::Context, request: Bytes) -> Self::Response;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serves a request if the request is for a registered service.
|
|
||||||
///
|
|
||||||
/// This trait is mostly an implementation detail that isn't used outside of the registry
|
|
||||||
/// internals.
|
|
||||||
pub trait MaybeServe: Send + 'static {
|
|
||||||
type Future: Future<Output = io::Result<ServiceResponse>> + Send + 'static;
|
|
||||||
|
|
||||||
fn serve(&self, cx: context::Context, request: &ServiceRequest) -> Option<Self::Future>;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A registry starting with service S, followed by Rest.
|
|
||||||
///
|
|
||||||
/// This type is mostly an implementation detail that is not used directly
|
|
||||||
/// outside of the registry internals.
|
|
||||||
pub struct Registration<S, Rest> {
|
|
||||||
/// The registered service's name. Must be unique across all registered services.
|
|
||||||
name: String,
|
|
||||||
/// The registered service.
|
|
||||||
serve: S,
|
|
||||||
/// Any remaining registered services.
|
|
||||||
rest: Rest,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An empty registry.
|
|
||||||
///
|
|
||||||
/// This type is mostly an implementation detail that is not used directly
|
|
||||||
/// outside of the registry internals.
|
|
||||||
pub struct Nil;
|
|
||||||
|
|
||||||
impl MaybeServe for Nil {
|
|
||||||
type Future = futures::future::Ready<io::Result<ServiceResponse>>;
|
|
||||||
|
|
||||||
fn serve(&self, _: context::Context, _: &ServiceRequest) -> Option<Self::Future> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Rest> MaybeServe for Registration<S, Rest>
|
|
||||||
where
|
|
||||||
S: Serve,
|
|
||||||
Rest: MaybeServe,
|
|
||||||
{
|
|
||||||
type Future = Either<S::Response, Rest::Future>;
|
|
||||||
|
|
||||||
fn serve(&self, cx: context::Context, request: &ServiceRequest) -> Option<Self::Future> {
|
|
||||||
if self.name == request.service_name {
|
|
||||||
Some(Either::Left(
|
|
||||||
self.serve.clone().serve(cx, request.request.clone()),
|
|
||||||
))
|
|
||||||
} else {
|
|
||||||
self.rest.serve(cx, request).map(Either::Right)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wraps either of two future types that both resolve to the same output type.
|
|
||||||
#[derive(Debug)]
|
|
||||||
#[must_use = "futures do nothing unless polled"]
|
|
||||||
pub enum Either<Left, Right> {
|
|
||||||
Left(Left),
|
|
||||||
Right(Right),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Output, Left, Right> Future for Either<Left, Right>
|
|
||||||
where
|
|
||||||
Left: Future<Output = Output>,
|
|
||||||
Right: Future<Output = Output>,
|
|
||||||
{
|
|
||||||
type Output = Output;
|
|
||||||
|
|
||||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Output> {
|
|
||||||
unsafe {
|
|
||||||
match Pin::get_unchecked_mut(self) {
|
|
||||||
Either::Left(car) => Pin::new_unchecked(car).poll(cx),
|
|
||||||
Either::Right(cdr) => Pin::new_unchecked(cdr).poll(cx),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Resp, F> Serve for F
|
|
||||||
where
|
|
||||||
F: FnOnce(context::Context, Bytes) -> Resp + Clone + Send + 'static,
|
|
||||||
Resp: Future<Output = io::Result<ServiceResponse>> + Send + 'static,
|
|
||||||
{
|
|
||||||
type Response = Resp;
|
|
||||||
|
|
||||||
fn serve(self, cx: context::Context, request: Bytes) -> Resp {
|
|
||||||
self(cx, request)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Example
|
|
||||||
use bytes::Bytes;
|
|
||||||
use futures::{
|
|
||||||
compat::Executor01CompatExt,
|
|
||||||
future::{ready, Ready},
|
|
||||||
prelude::*,
|
|
||||||
};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::{
|
|
||||||
collections::HashMap,
|
|
||||||
io,
|
|
||||||
sync::{Arc, RwLock},
|
|
||||||
};
|
|
||||||
use tarpc::{client, context, server::Handler};
|
|
||||||
|
|
||||||
fn deserialize<Req>(req: Bytes) -> io::Result<Req>
|
|
||||||
where
|
|
||||||
Req: for<'a> Deserialize<'a> + Send,
|
|
||||||
{
|
|
||||||
bincode::deserialize(req.as_ref()).map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn serialize<Resp>(resp: Resp) -> io::Result<Bytes>
|
|
||||||
where
|
|
||||||
Resp: Serialize,
|
|
||||||
{
|
|
||||||
Ok(bincode::serialize(&resp)
|
|
||||||
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
mod write_service {
|
|
||||||
tarpc::service! {
|
|
||||||
rpc write(key: String, value: String);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mod read_service {
|
|
||||||
tarpc::service! {
|
|
||||||
rpc read(key: String) -> Option<String>;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Default, Clone)]
|
|
||||||
struct Server {
|
|
||||||
data: Arc<RwLock<HashMap<String, String>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl write_service::Service for Server {
|
|
||||||
type WriteFut = Ready<()>;
|
|
||||||
|
|
||||||
fn write(self, _: context::Context, key: String, value: String) -> Self::WriteFut {
|
|
||||||
self.data.write().unwrap().insert(key, value);
|
|
||||||
ready(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl read_service::Service for Server {
|
|
||||||
type ReadFut = Ready<Option<String>>;
|
|
||||||
|
|
||||||
fn read(self, _: context::Context, key: String) -> Self::ReadFut {
|
|
||||||
ready(self.data.read().unwrap().get(&key).cloned())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trait DefaultSpawn {
|
|
||||||
fn spawn(self);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<F> DefaultSpawn for F
|
|
||||||
where
|
|
||||||
F: Future<Output = ()> + Send + 'static,
|
|
||||||
{
|
|
||||||
fn spawn(self) {
|
|
||||||
tokio_executor::spawn(self.unit_error().boxed().compat())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct BincodeRegistry<Services> {
|
|
||||||
registry: registry::Registry<Services>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for BincodeRegistry<registry::Nil> {
|
|
||||||
fn default() -> Self {
|
|
||||||
BincodeRegistry {
|
|
||||||
registry: registry::Registry::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Services: registry::MaybeServe + Sync> BincodeRegistry<Services> {
|
|
||||||
fn serve(
|
|
||||||
self,
|
|
||||||
) -> impl FnOnce(
|
|
||||||
context::Context,
|
|
||||||
registry::ServiceRequest,
|
|
||||||
) -> registry::Either<
|
|
||||||
Services::Future,
|
|
||||||
Ready<io::Result<registry::ServiceResponse>>,
|
|
||||||
> + Clone {
|
|
||||||
self.registry.serve()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn register<S, Req, Resp, RespFut>(
|
|
||||||
self,
|
|
||||||
name: String,
|
|
||||||
serve: S,
|
|
||||||
) -> BincodeRegistry<registry::Registration<impl registry::Serve + Send + 'static, Services>>
|
|
||||||
where
|
|
||||||
Req: for<'a> Deserialize<'a> + Send + 'static,
|
|
||||||
Resp: Serialize + 'static,
|
|
||||||
S: FnOnce(context::Context, Req) -> RespFut + Send + 'static + Clone,
|
|
||||||
RespFut: Future<Output = io::Result<Resp>> + Send + 'static,
|
|
||||||
{
|
|
||||||
let registry = self.registry.register(name, serve, deserialize, serialize);
|
|
||||||
BincodeRegistry { registry }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_client<Req, Resp>(
|
|
||||||
service_name: String,
|
|
||||||
channel: &client::Channel<registry::ServiceRequest, registry::ServiceResponse>,
|
|
||||||
) -> client::MapResponse<
|
|
||||||
client::WithRequest<
|
|
||||||
client::Channel<registry::ServiceRequest, registry::ServiceResponse>,
|
|
||||||
impl FnMut(Req) -> registry::ServiceRequest,
|
|
||||||
>,
|
|
||||||
impl FnMut(registry::ServiceResponse) -> Resp,
|
|
||||||
>
|
|
||||||
where
|
|
||||||
Req: Serialize + Send + 'static,
|
|
||||||
Resp: for<'a> Deserialize<'a> + Send + 'static,
|
|
||||||
{
|
|
||||||
registry::new_client(service_name, channel, serialize, deserialize)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn run() -> io::Result<()> {
|
|
||||||
let server = Server::default();
|
|
||||||
let registry = BincodeRegistry::default()
|
|
||||||
.register(
|
|
||||||
"WriteService".to_string(),
|
|
||||||
write_service::serve(server.clone()),
|
|
||||||
)
|
|
||||||
.register(
|
|
||||||
"ReadService".to_string(),
|
|
||||||
read_service::serve(server.clone()),
|
|
||||||
);
|
|
||||||
|
|
||||||
let listener = bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
|
||||||
let server_addr = listener.local_addr();
|
|
||||||
let server = tarpc::Server::default()
|
|
||||||
.incoming(listener)
|
|
||||||
.take(1)
|
|
||||||
.respond_with(registry.serve());
|
|
||||||
tokio_executor::spawn(server.unit_error().boxed().compat());
|
|
||||||
|
|
||||||
let transport = bincode_transport::connect(&server_addr).await?;
|
|
||||||
let channel = client::new(client::Config::default(), transport).await?;
|
|
||||||
|
|
||||||
let write_client = new_client("WriteService".to_string(), &channel);
|
|
||||||
let mut write_client = write_service::Client::from(write_client);
|
|
||||||
|
|
||||||
let read_client = new_client("ReadService".to_string(), &channel);
|
|
||||||
let mut read_client = read_service::Client::from(read_client);
|
|
||||||
|
|
||||||
write_client
|
|
||||||
.write(context::current(), "key".to_string(), "val".to_string())
|
|
||||||
.await?;
|
|
||||||
let val = read_client
|
|
||||||
.read(context::current(), "key".to_string())
|
|
||||||
.await?;
|
|
||||||
println!("{:?}", val);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
tarpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
tokio::run(run().boxed().map_err(|e| panic!(e)).boxed().compat());
|
|
||||||
}
|
|
||||||
295
tarpc/src/lib.rs
295
tarpc/src/lib.rs
@@ -4,20 +4,287 @@
|
|||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://opensource.org/licenses/MIT.
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
#![doc(include = "../README.md")]
|
//! *Disclaimer*: This is not an official Google product.
|
||||||
#![deny(missing_docs, missing_debug_implementations)]
|
//!
|
||||||
#![feature(async_await, external_doc)]
|
//! tarpc is an RPC framework for rust with a focus on ease of use. Defining a
|
||||||
#![cfg_attr(test, feature(proc_macro_hygiene, arbitrary_self_types))]
|
//! service can be done in just a few lines of code, and most of the boilerplate of
|
||||||
|
//! writing a server is taken care of for you.
|
||||||
|
//!
|
||||||
|
//! [Documentation](https://docs.rs/crate/tarpc/)
|
||||||
|
//!
|
||||||
|
//! ## What is an RPC framework?
|
||||||
|
//! "RPC" stands for "Remote Procedure Call," a function call where the work of
|
||||||
|
//! producing the return value is being done somewhere else. When an rpc function is
|
||||||
|
//! invoked, behind the scenes the function contacts some other process somewhere
|
||||||
|
//! and asks them to evaluate the function instead. The original function then
|
||||||
|
//! returns the value produced by the other process.
|
||||||
|
//!
|
||||||
|
//! RPC frameworks are a fundamental building block of most microservices-oriented
|
||||||
|
//! architectures. Two well-known ones are [gRPC](http://www.grpc.io) and
|
||||||
|
//! [Cap'n Proto](https://capnproto.org/).
|
||||||
|
//!
|
||||||
|
//! tarpc differentiates itself from other RPC frameworks by defining the schema in code,
|
||||||
|
//! rather than in a separate language such as .proto. This means there's no separate compilation
|
||||||
|
//! process, and no context switching between different languages.
|
||||||
|
//!
|
||||||
|
//! Some other features of tarpc:
|
||||||
|
//! - Pluggable transport: any type impling `Stream<Item = Request> + Sink<Response>` can be
|
||||||
|
//! used as a transport to connect the client and server.
|
||||||
|
//! - `Send + 'static` optional: if the transport doesn't require it, neither does tarpc!
|
||||||
|
//! - Cascading cancellation: dropping a request will send a cancellation message to the server.
|
||||||
|
//! The server will cease any unfinished work on the request, subsequently cancelling any of its
|
||||||
|
//! own requests, repeating for the entire chain of transitive dependencies.
|
||||||
|
//! - Configurable deadlines and deadline propagation: request deadlines default to 10s if
|
||||||
|
//! unspecified. The server will automatically cease work when the deadline has passed. Any
|
||||||
|
//! requests sent by the server that use the request context will propagate the request deadline.
|
||||||
|
//! For example, if a server is handling a request with a 10s deadline, does 2s of work, then
|
||||||
|
//! sends a request to another server, that server will see an 8s deadline.
|
||||||
|
//! - Serde serialization: enabling the `serde1` Cargo feature will make service requests and
|
||||||
|
//! responses `Serialize + Deserialize`. It's entirely optional, though: in-memory transports can
|
||||||
|
//! be used, as well, so the price of serialization doesn't have to be paid when it's not needed.
|
||||||
|
//!
|
||||||
|
//! ## Usage
|
||||||
|
//! Add to your `Cargo.toml` dependencies:
|
||||||
|
//!
|
||||||
|
//! ```toml
|
||||||
|
//! tarpc = "0.22.0"
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! The `tarpc::service` attribute expands to a collection of items that form an rpc service.
|
||||||
|
//! These generated types make it easy and ergonomic to write servers with less boilerplate.
|
||||||
|
//! Simply implement the generated service trait, and you're off to the races!
|
||||||
|
//!
|
||||||
|
//! ## Example
|
||||||
|
//!
|
||||||
|
//! For this example, in addition to tarpc, also add two other dependencies to
|
||||||
|
//! your `Cargo.toml`:
|
||||||
|
//!
|
||||||
|
//! ```toml
|
||||||
|
//! futures = "0.3"
|
||||||
|
//! tokio = "0.2"
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! In the following example, we use an in-process channel for communication between
|
||||||
|
//! client and server. In real code, you will likely communicate over the network.
|
||||||
|
//! For a more real-world example, see [example-service](example-service).
|
||||||
|
//!
|
||||||
|
//! First, let's set up the dependencies and service definition.
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! # extern crate futures;
|
||||||
|
//!
|
||||||
|
//! use futures::{
|
||||||
|
//! future::{self, Ready},
|
||||||
|
//! prelude::*,
|
||||||
|
//! };
|
||||||
|
//! use tarpc::{
|
||||||
|
//! client, context,
|
||||||
|
//! server::{self, Handler},
|
||||||
|
//! };
|
||||||
|
//! use std::io;
|
||||||
|
//!
|
||||||
|
//! // This is the service definition. It looks a lot like a trait definition.
|
||||||
|
//! // It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
|
//! #[tarpc::service]
|
||||||
|
//! trait World {
|
||||||
|
//! /// Returns a greeting for name.
|
||||||
|
//! async fn hello(name: String) -> String;
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! This service definition generates a trait called `World`. Next we need to
|
||||||
|
//! implement it for our Server struct.
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! # extern crate futures;
|
||||||
|
//! # use futures::{
|
||||||
|
//! # future::{self, Ready},
|
||||||
|
//! # prelude::*,
|
||||||
|
//! # };
|
||||||
|
//! # use tarpc::{
|
||||||
|
//! # client, context,
|
||||||
|
//! # server::{self, Handler},
|
||||||
|
//! # };
|
||||||
|
//! # use std::io;
|
||||||
|
//! # // This is the service definition. It looks a lot like a trait definition.
|
||||||
|
//! # // It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
|
//! # #[tarpc::service]
|
||||||
|
//! # trait World {
|
||||||
|
//! # /// Returns a greeting for name.
|
||||||
|
//! # async fn hello(name: String) -> String;
|
||||||
|
//! # }
|
||||||
|
//! // This is the type that implements the generated World trait. It is the business logic
|
||||||
|
//! // and is used to start the server.
|
||||||
|
//! #[derive(Clone)]
|
||||||
|
//! struct HelloServer;
|
||||||
|
//!
|
||||||
|
//! impl World for HelloServer {
|
||||||
|
//! // Each defined rpc generates two items in the trait, a fn that serves the RPC, and
|
||||||
|
//! // an associated type representing the future output by the fn.
|
||||||
|
//!
|
||||||
|
//! type HelloFut = Ready<String>;
|
||||||
|
//!
|
||||||
|
//! fn hello(self, _: context::Context, name: String) -> Self::HelloFut {
|
||||||
|
//! future::ready(format!("Hello, {}!", name))
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Lastly let's write our `main` that will start the server. While this example uses an
|
||||||
|
//! [in-process channel](rpc::transport::channel), tarpc also ships a generic [`serde_transport`]
|
||||||
|
//! behind the `serde-transport` feature, with additional [TCP](serde_transport::tcp) functionality
|
||||||
|
//! available behind the `tcp` feature.
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! # extern crate futures;
|
||||||
|
//! # use futures::{
|
||||||
|
//! # future::{self, Ready},
|
||||||
|
//! # prelude::*,
|
||||||
|
//! # };
|
||||||
|
//! # use tarpc::{
|
||||||
|
//! # client, context,
|
||||||
|
//! # server::{self, Handler},
|
||||||
|
//! # };
|
||||||
|
//! # use std::io;
|
||||||
|
//! # // This is the service definition. It looks a lot like a trait definition.
|
||||||
|
//! # // It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
|
//! # #[tarpc::service]
|
||||||
|
//! # trait World {
|
||||||
|
//! # /// Returns a greeting for name.
|
||||||
|
//! # async fn hello(name: String) -> String;
|
||||||
|
//! # }
|
||||||
|
//! # // This is the type that implements the generated World trait. It is the business logic
|
||||||
|
//! # // and is used to start the server.
|
||||||
|
//! # #[derive(Clone)]
|
||||||
|
//! # struct HelloServer;
|
||||||
|
//! # impl World for HelloServer {
|
||||||
|
//! # // Each defined rpc generates two items in the trait, a fn that serves the RPC, and
|
||||||
|
//! # // an associated type representing the future output by the fn.
|
||||||
|
//! # type HelloFut = Ready<String>;
|
||||||
|
//! # fn hello(self, _: context::Context, name: String) -> Self::HelloFut {
|
||||||
|
//! # future::ready(format!("Hello, {}!", name))
|
||||||
|
//! # }
|
||||||
|
//! # }
|
||||||
|
//! #[tokio::main]
|
||||||
|
//! async fn main() -> io::Result<()> {
|
||||||
|
//! let (client_transport, server_transport) = tarpc::transport::channel::unbounded();
|
||||||
|
//!
|
||||||
|
//! let server = server::new(server::Config::default())
|
||||||
|
//! // incoming() takes a stream of transports such as would be returned by
|
||||||
|
//! // TcpListener::incoming (but a stream instead of an iterator).
|
||||||
|
//! .incoming(stream::once(future::ready(server_transport)))
|
||||||
|
//! .respond_with(HelloServer.serve());
|
||||||
|
//!
|
||||||
|
//! tokio::spawn(server);
|
||||||
|
//!
|
||||||
|
//! // WorldClient is generated by the macro. It has a constructor `new` that takes a config and
|
||||||
|
//! // any Transport as input
|
||||||
|
//! let mut client = WorldClient::new(client::Config::default(), client_transport).spawn()?;
|
||||||
|
//!
|
||||||
|
//! // The client has an RPC method for each RPC defined in the annotated trait. It takes the same
|
||||||
|
//! // args as defined, with the addition of a Context, which is always the first arg. The Context
|
||||||
|
//! // specifies a deadline and trace information which can be helpful in debugging requests.
|
||||||
|
//! let hello = client.hello(context::current(), "Stim".to_string()).await?;
|
||||||
|
//!
|
||||||
|
//! println!("{}", hello);
|
||||||
|
//!
|
||||||
|
//! Ok(())
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! ## Service Documentation
|
||||||
|
//!
|
||||||
|
//! Use `cargo doc` as you normally would to see the documentation created for all
|
||||||
|
//! items expanded by a `service!` invocation.
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
#![allow(clippy::type_complexity)]
|
||||||
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
|
|
||||||
#[doc(hidden)]
|
pub mod rpc;
|
||||||
pub use futures;
|
|
||||||
pub use rpc::*;
|
pub use rpc::*;
|
||||||
#[cfg(feature = "serde")]
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub use serde;
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub use tarpc_plugins::*;
|
|
||||||
|
|
||||||
/// Provides the macro used for constructing rpc services and client stubs.
|
#[cfg(feature = "serde-transport")]
|
||||||
#[macro_use]
|
#[cfg_attr(docsrs, doc(cfg(feature = "serde-transport")))]
|
||||||
mod macros;
|
pub mod serde_transport;
|
||||||
|
|
||||||
|
pub mod trace;
|
||||||
|
|
||||||
|
/// The main macro that creates RPC services.
|
||||||
|
///
|
||||||
|
/// Rpc methods are specified, mirroring trait syntax:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// #[tarpc::service]
|
||||||
|
/// trait Service {
|
||||||
|
/// /// Say hello
|
||||||
|
/// async fn hello(name: String) -> String;
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Attributes can be attached to each rpc. These attributes
|
||||||
|
/// will then be attached to the generated service traits'
|
||||||
|
/// corresponding `fn`s, as well as to the client stubs' RPCs.
|
||||||
|
///
|
||||||
|
/// The following items are expanded in the enclosing module:
|
||||||
|
///
|
||||||
|
/// * `trait Service` -- defines the RPC service.
|
||||||
|
/// * `fn serve` -- turns a service impl into a request handler.
|
||||||
|
/// * `Client` -- a client stub with a fn for each RPC.
|
||||||
|
/// * `fn new_stub` -- creates a new Client stub.
|
||||||
|
pub use tarpc_plugins::service;
|
||||||
|
|
||||||
|
/// A utility macro that can be used for RPC server implementations.
|
||||||
|
///
|
||||||
|
/// Syntactic sugar to make using async functions in the server implementation
|
||||||
|
/// easier. It does this by rewriting code like this, which would normally not
|
||||||
|
/// compile because async functions are disallowed in trait implementations:
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// # use tarpc::context;
|
||||||
|
/// # use std::net::SocketAddr;
|
||||||
|
/// #[tarpc::service]
|
||||||
|
/// trait World {
|
||||||
|
/// async fn hello(name: String) -> String;
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// #[derive(Clone)]
|
||||||
|
/// struct HelloServer(SocketAddr);
|
||||||
|
///
|
||||||
|
/// #[tarpc::server]
|
||||||
|
/// impl World for HelloServer {
|
||||||
|
/// async fn hello(self, _: context::Context, name: String) -> String {
|
||||||
|
/// format!("Hello, {}! You are connected from {:?}.", name, self.0)
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Into code like this, which matches the service trait definition:
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// # use tarpc::context;
|
||||||
|
/// # use std::pin::Pin;
|
||||||
|
/// # use futures::Future;
|
||||||
|
/// # use std::net::SocketAddr;
|
||||||
|
/// #[derive(Clone)]
|
||||||
|
/// struct HelloServer(SocketAddr);
|
||||||
|
///
|
||||||
|
/// #[tarpc::service]
|
||||||
|
/// trait World {
|
||||||
|
/// async fn hello(name: String) -> String;
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl World for HelloServer {
|
||||||
|
/// type HelloFut = Pin<Box<dyn Future<Output = String> + Send>>;
|
||||||
|
///
|
||||||
|
/// fn hello(self, _: context::Context, name: String) -> Pin<Box<dyn Future<Output = String>
|
||||||
|
/// + Send>> {
|
||||||
|
/// Box::pin(async move {
|
||||||
|
/// format!("Hello, {}! You are connected from {:?}.", name, self.0)
|
||||||
|
/// })
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Note that this won't touch functions unless they have been annotated with
|
||||||
|
/// `async`, meaning that this should not break existing code.
|
||||||
|
pub use tarpc_plugins::server;
|
||||||
|
|||||||
@@ -1,377 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
#[cfg(feature = "serde")]
|
|
||||||
#[doc(hidden)]
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! add_serde_if_enabled {
|
|
||||||
($(#[$attr:meta])* -- $i:item) => {
|
|
||||||
$(#[$attr])*
|
|
||||||
#[derive($crate::serde::Serialize, $crate::serde::Deserialize)]
|
|
||||||
$i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "serde"))]
|
|
||||||
#[doc(hidden)]
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! add_serde_if_enabled {
|
|
||||||
($(#[$attr:meta])* -- $i:item) => {
|
|
||||||
$(#[$attr])*
|
|
||||||
$i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The main macro that creates RPC services.
|
|
||||||
///
|
|
||||||
/// Rpc methods are specified, mirroring trait syntax:
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// # #![feature(arbitrary_self_types, async_await, proc_macro_hygiene)]
|
|
||||||
/// # fn main() {}
|
|
||||||
/// # tarpc::service! {
|
|
||||||
/// /// Say hello
|
|
||||||
/// rpc hello(name: String) -> String;
|
|
||||||
/// # }
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// Attributes can be attached to each rpc. These attributes
|
|
||||||
/// will then be attached to the generated service traits'
|
|
||||||
/// corresponding `fn`s, as well as to the client stubs' RPCs.
|
|
||||||
///
|
|
||||||
/// The following items are expanded in the enclosing module:
|
|
||||||
///
|
|
||||||
/// * `trait Service` -- defines the RPC service.
|
|
||||||
/// * `fn serve` -- turns a service impl into a request handler.
|
|
||||||
/// * `Client` -- a client stub with a fn for each RPC.
|
|
||||||
/// * `fn new_stub` -- creates a new Client stub.
|
|
||||||
///
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! service {
|
|
||||||
() => {
|
|
||||||
compile_error!("Must define at least one RPC method.");
|
|
||||||
};
|
|
||||||
// Entry point
|
|
||||||
(
|
|
||||||
$(
|
|
||||||
$(#[$attr:meta])*
|
|
||||||
rpc $fn_name:ident( $( $arg:ident : $in_:ty ),* ) $(-> $out:ty)*;
|
|
||||||
)*
|
|
||||||
) => {
|
|
||||||
$crate::service! {{
|
|
||||||
$(
|
|
||||||
$(#[$attr])*
|
|
||||||
rpc $fn_name( $( $arg : $in_ ),* ) $(-> $out)*;
|
|
||||||
)*
|
|
||||||
}}
|
|
||||||
};
|
|
||||||
// Pattern for when the next rpc has an implicit unit return type.
|
|
||||||
(
|
|
||||||
{
|
|
||||||
$(#[$attr:meta])*
|
|
||||||
rpc $fn_name:ident( $( $arg:ident : $in_:ty ),* );
|
|
||||||
|
|
||||||
$( $unexpanded:tt )*
|
|
||||||
}
|
|
||||||
$( $expanded:tt )*
|
|
||||||
) => {
|
|
||||||
$crate::service! {
|
|
||||||
{ $( $unexpanded )* }
|
|
||||||
|
|
||||||
$( $expanded )*
|
|
||||||
|
|
||||||
$(#[$attr])*
|
|
||||||
rpc $fn_name( $( $arg : $in_ ),* ) -> ();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// Pattern for when the next rpc has an explicit return type.
|
|
||||||
(
|
|
||||||
{
|
|
||||||
$(#[$attr:meta])*
|
|
||||||
rpc $fn_name:ident( $( $arg:ident : $in_:ty ),* ) -> $out:ty;
|
|
||||||
|
|
||||||
$( $unexpanded:tt )*
|
|
||||||
}
|
|
||||||
$( $expanded:tt )*
|
|
||||||
) => {
|
|
||||||
$crate::service! {
|
|
||||||
{ $( $unexpanded )* }
|
|
||||||
|
|
||||||
$( $expanded )*
|
|
||||||
|
|
||||||
$(#[$attr])*
|
|
||||||
rpc $fn_name( $( $arg : $in_ ),* ) -> $out;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// Pattern for when all return types have been expanded
|
|
||||||
(
|
|
||||||
{ } // none left to expand
|
|
||||||
$(
|
|
||||||
$(#[$attr:meta])*
|
|
||||||
rpc $fn_name:ident ( $( $arg:ident : $in_:ty ),* ) -> $out:ty;
|
|
||||||
)*
|
|
||||||
) => {
|
|
||||||
$crate::add_serde_if_enabled! {
|
|
||||||
/// The request sent over the wire from the client to the server.
|
|
||||||
#[derive(Debug)]
|
|
||||||
#[allow(non_camel_case_types, unused)]
|
|
||||||
--
|
|
||||||
pub enum Request {
|
|
||||||
$(
|
|
||||||
$(#[$attr])*
|
|
||||||
$fn_name{ $($arg: $in_,)* }
|
|
||||||
),*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
$crate::add_serde_if_enabled! {
|
|
||||||
/// The response sent over the wire from the server to the client.
|
|
||||||
#[derive(Debug)]
|
|
||||||
#[allow(non_camel_case_types, unused)]
|
|
||||||
--
|
|
||||||
pub enum Response {
|
|
||||||
$(
|
|
||||||
$(#[$attr])*
|
|
||||||
$fn_name($out)
|
|
||||||
),*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: proc_macro can't currently parse $crate, so this needs to be imported for the
|
|
||||||
// usage of snake_to_camel! to work.
|
|
||||||
use $crate::futures::Future as Future__;
|
|
||||||
|
|
||||||
/// Defines the RPC service. The additional trait bounds are required so that services can
|
|
||||||
/// multiplex requests across multiple tasks, potentially on multiple threads.
|
|
||||||
pub trait Service: Clone + Send + 'static {
|
|
||||||
$(
|
|
||||||
$crate::snake_to_camel! {
|
|
||||||
/// The type of future returned by `{}`.
|
|
||||||
type $fn_name: Future__<Output = $out> + Send;
|
|
||||||
}
|
|
||||||
|
|
||||||
$(#[$attr])*
|
|
||||||
fn $fn_name(self, ctx: $crate::context::Context, $($arg:$in_),*) -> $crate::ty_snake_to_camel!(Self::$fn_name);
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: use an existential type instead of this when existential types work.
|
|
||||||
/// A future resolving to a server [`Response`].
|
|
||||||
#[allow(non_camel_case_types)]
|
|
||||||
pub enum ResponseFut<S: Service> {
|
|
||||||
$(
|
|
||||||
$(#[$attr])*
|
|
||||||
$fn_name($crate::ty_snake_to_camel!(<S as Service>::$fn_name)),
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Service> ::std::fmt::Debug for ResponseFut<S> {
|
|
||||||
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
|
||||||
fmt.debug_struct("Response").finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Service> ::std::future::Future for ResponseFut<S> {
|
|
||||||
type Output = ::std::io::Result<Response>;
|
|
||||||
|
|
||||||
fn poll(self: ::std::pin::Pin<&mut Self>, cx: &mut ::std::task::Context<'_>)
|
|
||||||
-> ::std::task::Poll<::std::io::Result<Response>>
|
|
||||||
{
|
|
||||||
unsafe {
|
|
||||||
match ::std::pin::Pin::get_unchecked_mut(self) {
|
|
||||||
$(
|
|
||||||
ResponseFut::$fn_name(resp) =>
|
|
||||||
::std::pin::Pin::new_unchecked(resp)
|
|
||||||
.poll(cx)
|
|
||||||
.map(Response::$fn_name)
|
|
||||||
.map(Ok),
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a serving function to use with rpc::server::Server.
|
|
||||||
pub fn serve<S: Service>(service: S)
|
|
||||||
-> impl FnOnce($crate::context::Context, Request) -> ResponseFut<S> + Send + 'static + Clone {
|
|
||||||
move |ctx, req| {
|
|
||||||
match req {
|
|
||||||
$(
|
|
||||||
Request::$fn_name{ $($arg,)* } => {
|
|
||||||
let resp = Service::$fn_name(service.clone(), ctx, $($arg),*);
|
|
||||||
ResponseFut::$fn_name(resp)
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(unused)]
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
/// The client stub that makes RPC calls to the server. Exposes a Future interface.
|
|
||||||
pub struct Client<C = $crate::client::Channel<Request, Response>>(C);
|
|
||||||
|
|
||||||
/// Returns a new client stub that sends requests over the given transport.
|
|
||||||
pub async fn new_stub<T>(config: $crate::client::Config, transport: T)
|
|
||||||
-> ::std::io::Result<Client>
|
|
||||||
where
|
|
||||||
T: $crate::Transport<
|
|
||||||
Item = $crate::Response<Response>,
|
|
||||||
SinkItem = $crate::ClientMessage<Request>> + Send + 'static,
|
|
||||||
{
|
|
||||||
Ok(Client($crate::client::new(config, transport).await?))
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<C> From<C> for Client<C>
|
|
||||||
where for <'a> C: $crate::Client<'a, Request, Response = Response>
|
|
||||||
{
|
|
||||||
fn from(client: C) -> Self {
|
|
||||||
Client(client)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<C> Client<C>
|
|
||||||
where for<'a> C: $crate::Client<'a, Request, Response = Response>
|
|
||||||
{
|
|
||||||
$(
|
|
||||||
#[allow(unused)]
|
|
||||||
$(#[$attr])*
|
|
||||||
pub fn $fn_name(&mut self, ctx: $crate::context::Context, $($arg: $in_),*)
|
|
||||||
-> impl ::std::future::Future<Output = ::std::io::Result<$out>> + '_ {
|
|
||||||
let request__ = Request::$fn_name { $($arg,)* };
|
|
||||||
let resp = $crate::Client::call(&mut self.0, ctx, request__);
|
|
||||||
async move {
|
|
||||||
match resp.await? {
|
|
||||||
Response::$fn_name(msg__) => ::std::result::Result::Ok(msg__),
|
|
||||||
_ => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// allow dead code; we're just testing that the macro expansion compiles
|
|
||||||
#[allow(dead_code)]
|
|
||||||
#[cfg(test)]
|
|
||||||
mod syntax_test {
|
|
||||||
service! {
|
|
||||||
#[deny(warnings)]
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
rpc TestCamelCaseDoesntConflict();
|
|
||||||
rpc hello() -> String;
|
|
||||||
#[doc="attr"]
|
|
||||||
rpc attr(s: String) -> String;
|
|
||||||
rpc no_args_no_return();
|
|
||||||
rpc no_args() -> ();
|
|
||||||
rpc one_arg(foo: String) -> i32;
|
|
||||||
rpc two_args_no_return(bar: String, baz: u64);
|
|
||||||
rpc two_args(bar: String, baz: u64) -> String;
|
|
||||||
rpc no_args_ret_error() -> i32;
|
|
||||||
rpc one_arg_ret_error(foo: String) -> String;
|
|
||||||
rpc no_arg_implicit_return_error();
|
|
||||||
#[doc="attr"]
|
|
||||||
rpc one_arg_implicit_return_error(foo: String);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod functional_test {
|
|
||||||
use futures::{
|
|
||||||
compat::Executor01CompatExt,
|
|
||||||
future::{ready, Ready},
|
|
||||||
prelude::*,
|
|
||||||
};
|
|
||||||
use rpc::{client, context, server::Handler, transport::channel};
|
|
||||||
use std::io;
|
|
||||||
use tokio::runtime::current_thread;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc add(x: i32, y: i32) -> i32;
|
|
||||||
rpc hey(name: String) -> String;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct Server;
|
|
||||||
|
|
||||||
impl Service for Server {
|
|
||||||
type AddFut = Ready<i32>;
|
|
||||||
|
|
||||||
fn add(self, _: context::Context, x: i32, y: i32) -> Self::AddFut {
|
|
||||||
ready(x + y)
|
|
||||||
}
|
|
||||||
|
|
||||||
type HeyFut = Ready<String>;
|
|
||||||
|
|
||||||
fn hey(self, _: context::Context, name: String) -> Self::HeyFut {
|
|
||||||
ready(format!("Hey, {}.", name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn sequential() {
|
|
||||||
let _ = env_logger::try_init();
|
|
||||||
rpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
|
|
||||||
let test = async {
|
|
||||||
let (tx, rx) = channel::unbounded();
|
|
||||||
tokio_executor::spawn(
|
|
||||||
crate::Server::default()
|
|
||||||
.incoming(stream::once(ready(Ok(rx))))
|
|
||||||
.respond_with(serve(Server))
|
|
||||||
.unit_error()
|
|
||||||
.boxed()
|
|
||||||
.compat(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut client = new_stub(client::Config::default(), tx).await?;
|
|
||||||
assert_eq!(3, client.add(context::current(), 1, 2).await?);
|
|
||||||
assert_eq!(
|
|
||||||
"Hey, Tim.",
|
|
||||||
client.hey(context::current(), "Tim".to_string()).await?
|
|
||||||
);
|
|
||||||
Ok::<_, io::Error>(())
|
|
||||||
}
|
|
||||||
.map_err(|e| panic!(e.to_string()));
|
|
||||||
|
|
||||||
current_thread::block_on_all(test.boxed().compat()).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn concurrent() {
|
|
||||||
let _ = env_logger::try_init();
|
|
||||||
rpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
|
|
||||||
let test = async {
|
|
||||||
let (tx, rx) = channel::unbounded();
|
|
||||||
tokio_executor::spawn(
|
|
||||||
rpc::Server::default()
|
|
||||||
.incoming(stream::once(ready(Ok(rx))))
|
|
||||||
.respond_with(serve(Server))
|
|
||||||
.unit_error()
|
|
||||||
.boxed()
|
|
||||||
.compat(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let client = new_stub(client::Config::default(), tx).await?;
|
|
||||||
let mut c = client.clone();
|
|
||||||
let req1 = c.add(context::current(), 1, 2);
|
|
||||||
let mut c = client.clone();
|
|
||||||
let req2 = c.add(context::current(), 3, 4);
|
|
||||||
let mut c = client.clone();
|
|
||||||
let req3 = c.hey(context::current(), "Tim".to_string());
|
|
||||||
|
|
||||||
assert_eq!(3, req1.await?);
|
|
||||||
assert_eq!(7, req2.await?);
|
|
||||||
assert_eq!("Hey, Tim.", req3.await?);
|
|
||||||
Ok::<_, io::Error>(())
|
|
||||||
}
|
|
||||||
.map_err(|e| panic!("test failed: {}", e));
|
|
||||||
|
|
||||||
current_thread::block_on_all(test.boxed().compat()).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -4,13 +4,6 @@
|
|||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://opensource.org/licenses/MIT.
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
#![feature(
|
|
||||||
non_exhaustive,
|
|
||||||
integer_atomics,
|
|
||||||
try_trait,
|
|
||||||
arbitrary_self_types,
|
|
||||||
async_await
|
|
||||||
)]
|
|
||||||
#![deny(missing_docs, missing_debug_implementations)]
|
#![deny(missing_docs, missing_debug_implementations)]
|
||||||
|
|
||||||
//! An RPC framework providing client and server.
|
//! An RPC framework providing client and server.
|
||||||
@@ -37,31 +30,17 @@ pub mod server;
|
|||||||
pub mod transport;
|
pub mod transport;
|
||||||
pub(crate) mod util;
|
pub(crate) mod util;
|
||||||
|
|
||||||
pub use crate::{client::Client, server::Server, transport::Transport};
|
pub use crate::{client::Client, server::Server, trace, transport::sealed::Transport};
|
||||||
|
|
||||||
use futures::{
|
use anyhow::Context as _;
|
||||||
task::{Poll, Spawn, SpawnError, SpawnExt},
|
use futures::task::*;
|
||||||
Future,
|
use std::{fmt::Display, io, time::SystemTime};
|
||||||
};
|
|
||||||
use std::{cell::RefCell, io, sync::Once, time::SystemTime};
|
|
||||||
|
|
||||||
/// A message from a client to a server.
|
/// A message from a client to a server.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
pub struct ClientMessage<T> {
|
pub enum ClientMessage<T> {
|
||||||
/// The trace context associates the message with a specific chain of causally-related actions,
|
|
||||||
/// possibly orchestrated across many distributed systems.
|
|
||||||
pub trace_context: trace::Context,
|
|
||||||
/// The message payload.
|
|
||||||
pub message: ClientMessageKind<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Different messages that can be sent from a client to a server.
|
|
||||||
#[derive(Debug)]
|
|
||||||
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub enum ClientMessageKind<T> {
|
|
||||||
/// A request initiated by a user. The server responds to a request by invoking a
|
/// A request initiated by a user. The server responds to a request by invoking a
|
||||||
/// service-provided request handler. The handler completes with a [`response`](Response), which
|
/// service-provided request handler. The handler completes with a [`response`](Response), which
|
||||||
/// the server sends back to the client.
|
/// the server sends back to the client.
|
||||||
@@ -74,37 +53,32 @@ pub enum ClientMessageKind<T> {
|
|||||||
/// not be canceled, because the framework layer does not
|
/// not be canceled, because the framework layer does not
|
||||||
/// know about them.
|
/// know about them.
|
||||||
Cancel {
|
Cancel {
|
||||||
|
/// The trace context associates the message with a specific chain of causally-related actions,
|
||||||
|
/// possibly orchestrated across many distributed systems.
|
||||||
|
#[cfg_attr(feature = "serde", serde(default))]
|
||||||
|
trace_context: trace::Context,
|
||||||
/// The ID of the request to cancel.
|
/// The ID of the request to cancel.
|
||||||
request_id: u64,
|
request_id: u64,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request from a client to a server.
|
/// A request from a client to a server.
|
||||||
#[derive(Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
pub struct Request<T> {
|
pub struct Request<T> {
|
||||||
|
/// Trace context, deadline, and other cross-cutting concerns.
|
||||||
|
pub context: context::Context,
|
||||||
/// Uniquely identifies the request across all requests sent over a single channel.
|
/// Uniquely identifies the request across all requests sent over a single channel.
|
||||||
pub id: u64,
|
pub id: u64,
|
||||||
/// The request body.
|
/// The request body.
|
||||||
pub message: T,
|
pub message: T,
|
||||||
/// When the client expects the request to be complete by. The server will cancel the request
|
|
||||||
/// if it is not complete by this time.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "serde1",
|
|
||||||
serde(serialize_with = "util::serde::serialize_epoch_secs")
|
|
||||||
)]
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "serde1",
|
|
||||||
serde(deserialize_with = "util::serde::deserialize_epoch_secs")
|
|
||||||
)]
|
|
||||||
pub deadline: SystemTime,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A response from a server to a client.
|
/// A response from a server to a client.
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
pub struct Response<T> {
|
pub struct Response<T> {
|
||||||
/// The ID of the request being responded to.
|
/// The ID of the request being responded to.
|
||||||
pub request_id: u64,
|
pub request_id: u64,
|
||||||
@@ -113,9 +87,9 @@ pub struct Response<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// An error response from a server to a client.
|
/// An error response from a server to a client.
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
pub struct ServerError {
|
pub struct ServerError {
|
||||||
#[cfg_attr(
|
#[cfg_attr(
|
||||||
feature = "serde1",
|
feature = "serde1",
|
||||||
@@ -140,48 +114,35 @@ impl From<ServerError> for io::Error {
|
|||||||
impl<T> Request<T> {
|
impl<T> Request<T> {
|
||||||
/// Returns the deadline for this request.
|
/// Returns the deadline for this request.
|
||||||
pub fn deadline(&self) -> &SystemTime {
|
pub fn deadline(&self) -> &SystemTime {
|
||||||
&self.deadline
|
&self.context.deadline
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) type PollIo<T> = Poll<Option<io::Result<T>>>;
|
pub(crate) type PollIo<T> = Poll<Option<io::Result<T>>>;
|
||||||
|
pub(crate) trait PollContext<T> {
|
||||||
|
fn context<C>(self, context: C) -> Poll<Option<anyhow::Result<T>>>
|
||||||
|
where
|
||||||
|
C: Display + Send + Sync + 'static;
|
||||||
|
|
||||||
static INIT: Once = Once::new();
|
fn with_context<C, F>(self, f: F) -> Poll<Option<anyhow::Result<T>>>
|
||||||
static mut SEED_SPAWN: Option<Box<dyn CloneSpawn>> = None;
|
where
|
||||||
thread_local! {
|
C: Display + Send + Sync + 'static,
|
||||||
static SPAWN: RefCell<Box<dyn CloneSpawn>> = {
|
F: FnOnce() -> C;
|
||||||
unsafe {
|
|
||||||
// INIT must always be called before accessing SPAWN.
|
|
||||||
// Otherwise, accessing SPAWN can trigger undefined behavior due to race conditions.
|
|
||||||
INIT.call_once(|| {});
|
|
||||||
RefCell::new(SEED_SPAWN.as_ref().expect("init() must be called.").box_clone())
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initializes the RPC library with a mechanism to spawn futures on the user's runtime.
|
impl<T> PollContext<T> for PollIo<T> {
|
||||||
/// Client stubs and servers both use the initialized spawn.
|
fn context<C>(self, context: C) -> Poll<Option<anyhow::Result<T>>>
|
||||||
///
|
where
|
||||||
/// Init only has an effect the first time it is called. If called previously, successive calls to
|
C: Display + Send + Sync + 'static,
|
||||||
/// init are noops.
|
{
|
||||||
pub fn init(spawn: impl Spawn + Clone + 'static) {
|
self.map(|o| o.map(|r| r.context(context)))
|
||||||
unsafe {
|
}
|
||||||
INIT.call_once(|| {
|
|
||||||
SEED_SPAWN = Some(Box::new(spawn));
|
fn with_context<C, F>(self, f: F) -> Poll<Option<anyhow::Result<T>>>
|
||||||
});
|
where
|
||||||
}
|
C: Display + Send + Sync + 'static,
|
||||||
}
|
F: FnOnce() -> C,
|
||||||
|
{
|
||||||
pub(crate) fn spawn(future: impl Future<Output = ()> + Send + 'static) -> Result<(), SpawnError> {
|
self.map(|o| o.map(|r| r.with_context(f)))
|
||||||
SPAWN.with(|spawn| spawn.borrow_mut().spawn(future))
|
|
||||||
}
|
|
||||||
|
|
||||||
trait CloneSpawn: Spawn {
|
|
||||||
fn box_clone(&self) -> Box<dyn CloneSpawn>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Spawn + Clone + 'static> CloneSpawn for S {
|
|
||||||
fn box_clone(&self) -> Box<dyn CloneSpawn> {
|
|
||||||
Box::new(self.clone())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -6,17 +6,13 @@
|
|||||||
|
|
||||||
//! Provides a client that connects to a server and sends multiplexed requests.
|
//! Provides a client that connects to a server and sends multiplexed requests.
|
||||||
|
|
||||||
use crate::{context, ClientMessage, Response, Transport};
|
use crate::context;
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use log::warn;
|
use std::io;
|
||||||
use std::{
|
|
||||||
io,
|
|
||||||
net::{Ipv4Addr, SocketAddr},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Provides a [`Client`] backed by a transport.
|
/// Provides a [`Client`] backed by a transport.
|
||||||
pub mod channel;
|
pub mod channel;
|
||||||
pub use self::channel::Channel;
|
pub use channel::{new, Channel};
|
||||||
|
|
||||||
/// Sends multiplexed requests to, and receives responses from, a server.
|
/// Sends multiplexed requests to, and receives responses from, a server.
|
||||||
pub trait Client<'a, Req> {
|
pub trait Client<'a, Req> {
|
||||||
@@ -107,8 +103,8 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Settings that control the behavior of the client.
|
/// Settings that control the behavior of the client.
|
||||||
#[non_exhaustive]
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
|
#[non_exhaustive]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
/// The number of requests that can be in flight at once.
|
/// The number of requests that can be in flight at once.
|
||||||
/// `max_in_flight_requests` controls the size of the map used by the client
|
/// `max_in_flight_requests` controls the size of the map used by the client
|
||||||
@@ -129,23 +125,31 @@ impl Default for Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new Client by wrapping a [`Transport`] and spawning a dispatch task
|
/// A channel and dispatch pair. The dispatch drives the sending and receiving of requests
|
||||||
/// that manages the lifecycle of requests.
|
/// and must be polled continuously or spawned.
|
||||||
///
|
#[derive(Debug)]
|
||||||
/// Must only be called from on an executor.
|
pub struct NewClient<C, D> {
|
||||||
pub async fn new<Req, Resp, T>(config: Config, transport: T) -> io::Result<Channel<Req, Resp>>
|
/// The new client.
|
||||||
where
|
pub client: C,
|
||||||
Req: Send + 'static,
|
/// The client's dispatch.
|
||||||
Resp: Send + 'static,
|
pub dispatch: D,
|
||||||
T: Transport<Item = Response<Resp>, SinkItem = ClientMessage<Req>> + Send + 'static,
|
}
|
||||||
{
|
|
||||||
let server_addr = transport.peer_addr().unwrap_or_else(|e| {
|
impl<C, D, E> NewClient<C, D>
|
||||||
warn!(
|
where
|
||||||
"Setting peer to unspecified because peer could not be determined: {}",
|
D: Future<Output = Result<(), E>> + Send + 'static,
|
||||||
e
|
E: std::fmt::Display,
|
||||||
);
|
{
|
||||||
SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0)
|
/// Helper method to spawn the dispatch on the default executor.
|
||||||
});
|
#[cfg(feature = "tokio1")]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(feature = "tokio1")))]
|
||||||
Ok(channel::spawn(config, transport, server_addr).await?)
|
pub fn spawn(self) -> io::Result<C> {
|
||||||
|
use log::error;
|
||||||
|
|
||||||
|
let dispatch = self
|
||||||
|
.dispatch
|
||||||
|
.unwrap_or_else(move |e| error!("Connection broken: {}", e));
|
||||||
|
tokio::spawn(dispatch);
|
||||||
|
Ok(self.client)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -7,8 +7,9 @@
|
|||||||
//! Provides a request context that carries a deadline and trace context. This context is sent from
|
//! Provides a request context that carries a deadline and trace context. This context is sent from
|
||||||
//! client to server and is used by the server to enforce response deadlines.
|
//! client to server and is used by the server to enforce response deadlines.
|
||||||
|
|
||||||
|
use crate::trace::{self, TraceId};
|
||||||
|
use static_assertions::assert_impl_all;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
use trace::{self, TraceId};
|
|
||||||
|
|
||||||
/// A request context that carries request-scoped information like deadlines and trace information.
|
/// A request context that carries request-scoped information like deadlines and trace information.
|
||||||
/// It is sent from client to server and is used by the server to enforce response deadlines.
|
/// It is sent from client to server and is used by the server to enforce response deadlines.
|
||||||
@@ -17,9 +18,19 @@ use trace::{self, TraceId};
|
|||||||
/// be different for each request in scope.
|
/// be different for each request in scope.
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
pub struct Context {
|
pub struct Context {
|
||||||
/// When the client expects the request to be complete by. The server should cancel the request
|
/// When the client expects the request to be complete by. The server should cancel the request
|
||||||
/// if it is not complete by this time.
|
/// if it is not complete by this time.
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "serde1",
|
||||||
|
serde(serialize_with = "crate::util::serde::serialize_epoch_secs")
|
||||||
|
)]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "serde1",
|
||||||
|
serde(deserialize_with = "crate::util::serde::deserialize_epoch_secs")
|
||||||
|
)]
|
||||||
|
#[cfg_attr(feature = "serde1", serde(default = "ten_seconds_from_now"))]
|
||||||
pub deadline: SystemTime,
|
pub deadline: SystemTime,
|
||||||
/// Uniquely identifies requests originating from the same source.
|
/// Uniquely identifies requests originating from the same source.
|
||||||
/// When a service handles a request by making requests itself, those requests should
|
/// When a service handles a request by making requests itself, those requests should
|
||||||
@@ -28,6 +39,13 @@ pub struct Context {
|
|||||||
pub trace_context: trace::Context,
|
pub trace_context: trace::Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert_impl_all!(Context: Send, Sync);
|
||||||
|
|
||||||
|
#[cfg(feature = "serde1")]
|
||||||
|
fn ten_seconds_from_now() -> SystemTime {
|
||||||
|
SystemTime::now() + Duration::from_secs(10)
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the context for the current request, or a default Context if no request is active.
|
/// Returns the context for the current request, or a default Context if no request is active.
|
||||||
// TODO: populate Context with request-scoped data, with default fallbacks.
|
// TODO: populate Context with request-scoped data, with default fallbacks.
|
||||||
pub fn current() -> Context {
|
pub fn current() -> Context {
|
||||||
707
tarpc/src/rpc/server.rs
Normal file
707
tarpc/src/rpc/server.rs
Normal file
@@ -0,0 +1,707 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! Provides a server that concurrently handles many connections sending multiplexed requests.
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
context, trace, util::Compact, util::TimeUntil, ClientMessage, PollIo, Request, Response,
|
||||||
|
ServerError, Transport,
|
||||||
|
};
|
||||||
|
use fnv::FnvHashMap;
|
||||||
|
use futures::{
|
||||||
|
channel::mpsc,
|
||||||
|
future::{AbortHandle, AbortRegistration, Abortable},
|
||||||
|
prelude::*,
|
||||||
|
ready,
|
||||||
|
stream::Fuse,
|
||||||
|
task::*,
|
||||||
|
};
|
||||||
|
use humantime::format_rfc3339;
|
||||||
|
use log::{debug, trace};
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use std::{fmt, hash::Hash, io, marker::PhantomData, pin::Pin, time::SystemTime};
|
||||||
|
use tokio::time::Timeout;
|
||||||
|
|
||||||
|
mod filter;
|
||||||
|
#[cfg(test)]
|
||||||
|
mod testing;
|
||||||
|
mod throttle;
|
||||||
|
|
||||||
|
pub use self::{
|
||||||
|
filter::ChannelFilter,
|
||||||
|
throttle::{Throttler, ThrottlerStream},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Manages clients, serving multiplexed requests over each connection.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Server<Req, Resp> {
|
||||||
|
config: Config,
|
||||||
|
ghost: PhantomData<(Req, Resp)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp> Default for Server<Req, Resp> {
|
||||||
|
fn default() -> Self {
|
||||||
|
new(Config::default())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Settings that control the behavior of the server.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct Config {
|
||||||
|
/// The number of responses per client that can be buffered server-side before being sent.
|
||||||
|
/// `pending_response_buffer` controls the buffer size of the channel that a server's
|
||||||
|
/// response tasks use to send responses to the client handler task.
|
||||||
|
pub pending_response_buffer: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Config {
|
||||||
|
fn default() -> Self {
|
||||||
|
Config {
|
||||||
|
pending_response_buffer: 100,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
/// Returns a channel backed by `transport` and configured with `self`.
|
||||||
|
pub fn channel<Req, Resp, T>(self, transport: T) -> BaseChannel<Req, Resp, T>
|
||||||
|
where
|
||||||
|
T: Transport<Response<Resp>, ClientMessage<Req>>,
|
||||||
|
{
|
||||||
|
BaseChannel::new(self, transport)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a new server with configuration specified `config`.
|
||||||
|
pub fn new<Req, Resp>(config: Config) -> Server<Req, Resp> {
|
||||||
|
Server {
|
||||||
|
config,
|
||||||
|
ghost: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp> Server<Req, Resp> {
|
||||||
|
/// Returns the config for this server.
|
||||||
|
pub fn config(&self) -> &Config {
|
||||||
|
&self.config
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a stream of server channels.
|
||||||
|
pub fn incoming<S, T>(self, listener: S) -> impl Stream<Item = BaseChannel<Req, Resp, T>>
|
||||||
|
where
|
||||||
|
S: Stream<Item = T>,
|
||||||
|
T: Transport<Response<Resp>, ClientMessage<Req>>,
|
||||||
|
{
|
||||||
|
listener.map(move |t| BaseChannel::new(self.config.clone(), t))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Basically a Fn(Req) -> impl Future<Output = Resp>;
|
||||||
|
pub trait Serve<Req>: Sized + Clone {
|
||||||
|
/// Type of response.
|
||||||
|
type Resp;
|
||||||
|
|
||||||
|
/// Type of response future.
|
||||||
|
type Fut: Future<Output = Self::Resp>;
|
||||||
|
|
||||||
|
/// Responds to a single request.
|
||||||
|
fn serve(self, ctx: context::Context, req: Req) -> Self::Fut;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp, Fut, F> Serve<Req> for F
|
||||||
|
where
|
||||||
|
F: FnOnce(context::Context, Req) -> Fut + Clone,
|
||||||
|
Fut: Future<Output = Resp>,
|
||||||
|
{
|
||||||
|
type Resp = Resp;
|
||||||
|
type Fut = Fut;
|
||||||
|
|
||||||
|
fn serve(self, ctx: context::Context, req: Req) -> Self::Fut {
|
||||||
|
self(ctx, req)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A utility trait enabling a stream to fluently chain a request handler.
|
||||||
|
pub trait Handler<C>
|
||||||
|
where
|
||||||
|
Self: Sized + Stream<Item = C>,
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
/// Enforces channel per-key limits.
|
||||||
|
fn max_channels_per_key<K, KF>(self, n: u32, keymaker: KF) -> filter::ChannelFilter<Self, K, KF>
|
||||||
|
where
|
||||||
|
K: fmt::Display + Eq + Hash + Clone + Unpin,
|
||||||
|
KF: Fn(&C) -> K,
|
||||||
|
{
|
||||||
|
ChannelFilter::new(self, n, keymaker)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Caps the number of concurrent requests per channel.
|
||||||
|
fn max_concurrent_requests_per_channel(self, n: usize) -> ThrottlerStream<Self> {
|
||||||
|
ThrottlerStream::new(self, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Responds to all requests with [`server::serve`](Serve).
|
||||||
|
#[cfg(feature = "tokio1")]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(feature = "tokio1")))]
|
||||||
|
fn respond_with<S>(self, server: S) -> Running<Self, S>
|
||||||
|
where
|
||||||
|
S: Serve<C::Req, Resp = C::Resp>,
|
||||||
|
{
|
||||||
|
Running {
|
||||||
|
incoming: self,
|
||||||
|
server,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, C> Handler<C> for S
|
||||||
|
where
|
||||||
|
S: Sized + Stream<Item = C>,
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
/// BaseChannel lifts a Transport to a Channel by tracking in-flight requests.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BaseChannel<Req, Resp, T> {
|
||||||
|
config: Config,
|
||||||
|
/// Writes responses to the wire and reads requests off the wire.
|
||||||
|
#[pin]
|
||||||
|
transport: Fuse<T>,
|
||||||
|
/// Number of requests currently being responded to.
|
||||||
|
in_flight_requests: FnvHashMap<u64, AbortHandle>,
|
||||||
|
/// Types the request and response.
|
||||||
|
ghost: PhantomData<(Req, Resp)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp, T> BaseChannel<Req, Resp, T>
|
||||||
|
where
|
||||||
|
T: Transport<Response<Resp>, ClientMessage<Req>>,
|
||||||
|
{
|
||||||
|
/// Creates a new channel backed by `transport` and configured with `config`.
|
||||||
|
pub fn new(config: Config, transport: T) -> Self {
|
||||||
|
BaseChannel {
|
||||||
|
config,
|
||||||
|
transport: transport.fuse(),
|
||||||
|
in_flight_requests: FnvHashMap::default(),
|
||||||
|
ghost: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new channel backed by `transport` and configured with the defaults.
|
||||||
|
pub fn with_defaults(transport: T) -> Self {
|
||||||
|
Self::new(Config::default(), transport)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the inner transport over which messages are sent and received.
|
||||||
|
pub fn get_ref(&self) -> &T {
|
||||||
|
self.transport.get_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the inner transport over which messages are sent and received.
|
||||||
|
pub fn get_pin_ref(self: Pin<&mut Self>) -> Pin<&mut T> {
|
||||||
|
self.project().transport.get_pin_mut()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cancel_request(mut self: Pin<&mut Self>, trace_context: &trace::Context, request_id: u64) {
|
||||||
|
// It's possible the request was already completed, so it's fine
|
||||||
|
// if this is None.
|
||||||
|
if let Some(cancel_handle) = self
|
||||||
|
.as_mut()
|
||||||
|
.project()
|
||||||
|
.in_flight_requests
|
||||||
|
.remove(&request_id)
|
||||||
|
{
|
||||||
|
self.as_mut().project().in_flight_requests.compact(0.1);
|
||||||
|
|
||||||
|
cancel_handle.abort();
|
||||||
|
let remaining = self.as_mut().project().in_flight_requests.len();
|
||||||
|
trace!(
|
||||||
|
"[{}] Request canceled. In-flight requests = {}",
|
||||||
|
trace_context.trace_id,
|
||||||
|
remaining,
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
trace!(
|
||||||
|
"[{}] Received cancellation, but response handler \
|
||||||
|
is already complete.",
|
||||||
|
trace_context.trace_id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The server end of an open connection with a client, streaming in requests from, and sinking
|
||||||
|
/// responses to, the client.
|
||||||
|
///
|
||||||
|
/// Channels are free to somewhat rely on the assumption that all in-flight requests are eventually
|
||||||
|
/// either [cancelled](BaseChannel::cancel_request) or [responded to](Sink::start_send). Safety cannot
|
||||||
|
/// rely on this assumption, but it is best for `Channel` users to always account for all outstanding
|
||||||
|
/// requests.
|
||||||
|
pub trait Channel
|
||||||
|
where
|
||||||
|
Self: Transport<Response<<Self as Channel>::Resp>, Request<<Self as Channel>::Req>>,
|
||||||
|
{
|
||||||
|
/// Type of request item.
|
||||||
|
type Req;
|
||||||
|
|
||||||
|
/// Type of response sink item.
|
||||||
|
type Resp;
|
||||||
|
|
||||||
|
/// Configuration of the channel.
|
||||||
|
fn config(&self) -> &Config;
|
||||||
|
|
||||||
|
/// Returns the number of in-flight requests over this channel.
|
||||||
|
fn in_flight_requests(self: Pin<&mut Self>) -> usize;
|
||||||
|
|
||||||
|
/// Caps the number of concurrent requests.
|
||||||
|
fn max_concurrent_requests(self, n: usize) -> Throttler<Self>
|
||||||
|
where
|
||||||
|
Self: Sized,
|
||||||
|
{
|
||||||
|
Throttler::new(self, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tells the Channel that request with ID `request_id` is being handled.
|
||||||
|
/// The request will be tracked until a response with the same ID is sent
|
||||||
|
/// to the Channel.
|
||||||
|
fn start_request(self: Pin<&mut Self>, request_id: u64) -> AbortRegistration;
|
||||||
|
|
||||||
|
/// Respond to requests coming over the channel with `f`. Returns a future that drives the
|
||||||
|
/// responses and resolves when the connection is closed.
|
||||||
|
fn respond_with<S>(self, server: S) -> ClientHandler<Self, S>
|
||||||
|
where
|
||||||
|
S: Serve<Self::Req, Resp = Self::Resp>,
|
||||||
|
Self: Sized,
|
||||||
|
{
|
||||||
|
let (responses_tx, responses) = mpsc::channel(self.config().pending_response_buffer);
|
||||||
|
let responses = responses.fuse();
|
||||||
|
|
||||||
|
ClientHandler {
|
||||||
|
channel: self,
|
||||||
|
server,
|
||||||
|
pending_responses: responses,
|
||||||
|
responses_tx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp, T> Stream for BaseChannel<Req, Resp, T>
|
||||||
|
where
|
||||||
|
T: Transport<Response<Resp>, ClientMessage<Req>>,
|
||||||
|
{
|
||||||
|
type Item = io::Result<Request<Req>>;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
loop {
|
||||||
|
match ready!(self.as_mut().project().transport.poll_next(cx)?) {
|
||||||
|
Some(message) => match message {
|
||||||
|
ClientMessage::Request(request) => {
|
||||||
|
return Poll::Ready(Some(Ok(request)));
|
||||||
|
}
|
||||||
|
ClientMessage::Cancel {
|
||||||
|
trace_context,
|
||||||
|
request_id,
|
||||||
|
} => {
|
||||||
|
self.as_mut().cancel_request(&trace_context, request_id);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => return Poll::Ready(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp, T> Sink<Response<Resp>> for BaseChannel<Req, Resp, T>
|
||||||
|
where
|
||||||
|
T: Transport<Response<Resp>, ClientMessage<Req>>,
|
||||||
|
{
|
||||||
|
type Error = io::Error;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().transport.poll_ready(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(mut self: Pin<&mut Self>, response: Response<Resp>) -> Result<(), Self::Error> {
|
||||||
|
if self
|
||||||
|
.as_mut()
|
||||||
|
.project()
|
||||||
|
.in_flight_requests
|
||||||
|
.remove(&response.request_id)
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
|
self.as_mut().project().in_flight_requests.compact(0.1);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.project().transport.start_send(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().transport.poll_flush(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().transport.poll_close(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp, T> AsRef<T> for BaseChannel<Req, Resp, T> {
|
||||||
|
fn as_ref(&self) -> &T {
|
||||||
|
self.transport.get_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp, T> Channel for BaseChannel<Req, Resp, T>
|
||||||
|
where
|
||||||
|
T: Transport<Response<Resp>, ClientMessage<Req>>,
|
||||||
|
{
|
||||||
|
type Req = Req;
|
||||||
|
type Resp = Resp;
|
||||||
|
|
||||||
|
fn config(&self) -> &Config {
|
||||||
|
&self.config
|
||||||
|
}
|
||||||
|
|
||||||
|
fn in_flight_requests(mut self: Pin<&mut Self>) -> usize {
|
||||||
|
self.as_mut().project().in_flight_requests.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_request(self: Pin<&mut Self>, request_id: u64) -> AbortRegistration {
|
||||||
|
let (abort_handle, abort_registration) = AbortHandle::new_pair();
|
||||||
|
assert!(self
|
||||||
|
.project()
|
||||||
|
.in_flight_requests
|
||||||
|
.insert(request_id, abort_handle)
|
||||||
|
.is_none());
|
||||||
|
abort_registration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A running handler serving all requests coming over a channel.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ClientHandler<C, S>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
#[pin]
|
||||||
|
channel: C,
|
||||||
|
/// Responses waiting to be written to the wire.
|
||||||
|
#[pin]
|
||||||
|
pending_responses: Fuse<mpsc::Receiver<(context::Context, Response<C::Resp>)>>,
|
||||||
|
/// Handed out to request handlers to fan in responses.
|
||||||
|
#[pin]
|
||||||
|
responses_tx: mpsc::Sender<(context::Context, Response<C::Resp>)>,
|
||||||
|
/// Server
|
||||||
|
server: S,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, S> ClientHandler<C, S>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
S: Serve<C::Req, Resp = C::Resp>,
|
||||||
|
{
|
||||||
|
/// Returns the inner channel over which messages are sent and received.
|
||||||
|
pub fn get_pin_channel(self: Pin<&mut Self>) -> Pin<&mut C> {
|
||||||
|
self.project().channel
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pump_read(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> PollIo<RequestHandler<S::Fut, C::Resp>> {
|
||||||
|
match ready!(self.as_mut().project().channel.poll_next(cx)?) {
|
||||||
|
Some(request) => Poll::Ready(Some(Ok(self.handle_request(request)))),
|
||||||
|
None => Poll::Ready(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pump_write(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
read_half_closed: bool,
|
||||||
|
) -> PollIo<()> {
|
||||||
|
match self.as_mut().poll_next_response(cx)? {
|
||||||
|
Poll::Ready(Some((ctx, response))) => {
|
||||||
|
trace!(
|
||||||
|
"[{}] Staging response. In-flight requests = {}.",
|
||||||
|
ctx.trace_id(),
|
||||||
|
self.as_mut().project().channel.in_flight_requests(),
|
||||||
|
);
|
||||||
|
self.as_mut().project().channel.start_send(response)?;
|
||||||
|
Poll::Ready(Some(Ok(())))
|
||||||
|
}
|
||||||
|
Poll::Ready(None) => {
|
||||||
|
// Shutdown can't be done before we finish pumping out remaining responses.
|
||||||
|
ready!(self.as_mut().project().channel.poll_flush(cx)?);
|
||||||
|
Poll::Ready(None)
|
||||||
|
}
|
||||||
|
Poll::Pending => {
|
||||||
|
// No more requests to process, so flush any requests buffered in the transport.
|
||||||
|
ready!(self.as_mut().project().channel.poll_flush(cx)?);
|
||||||
|
|
||||||
|
// Being here means there are no staged requests and all written responses are
|
||||||
|
// fully flushed. So, if the read half is closed and there are no in-flight
|
||||||
|
// requests, then we can close the write half.
|
||||||
|
if read_half_closed && self.as_mut().project().channel.in_flight_requests() == 0 {
|
||||||
|
Poll::Ready(None)
|
||||||
|
} else {
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_next_response(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> PollIo<(context::Context, Response<C::Resp>)> {
|
||||||
|
// Ensure there's room to write a response.
|
||||||
|
while let Poll::Pending = self.as_mut().project().channel.poll_ready(cx)? {
|
||||||
|
ready!(self.as_mut().project().channel.poll_flush(cx)?);
|
||||||
|
}
|
||||||
|
|
||||||
|
match ready!(self.as_mut().project().pending_responses.poll_next(cx)) {
|
||||||
|
Some((ctx, response)) => Poll::Ready(Some(Ok((ctx, response)))),
|
||||||
|
None => {
|
||||||
|
// This branch likely won't happen, since the ClientHandler is holding a Sender.
|
||||||
|
Poll::Ready(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_request(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
request: Request<C::Req>,
|
||||||
|
) -> RequestHandler<S::Fut, C::Resp> {
|
||||||
|
let request_id = request.id;
|
||||||
|
let deadline = request.context.deadline;
|
||||||
|
let timeout = deadline.time_until();
|
||||||
|
trace!(
|
||||||
|
"[{}] Received request with deadline {} (timeout {:?}).",
|
||||||
|
request.context.trace_id(),
|
||||||
|
format_rfc3339(deadline),
|
||||||
|
timeout,
|
||||||
|
);
|
||||||
|
let ctx = request.context;
|
||||||
|
let request = request.message;
|
||||||
|
|
||||||
|
let response = self.as_mut().project().server.clone().serve(ctx, request);
|
||||||
|
let response = Resp {
|
||||||
|
state: RespState::PollResp,
|
||||||
|
request_id,
|
||||||
|
ctx,
|
||||||
|
deadline,
|
||||||
|
f: tokio::time::timeout(timeout, response),
|
||||||
|
response: None,
|
||||||
|
response_tx: self.as_mut().project().responses_tx.clone(),
|
||||||
|
};
|
||||||
|
let abort_registration = self.as_mut().project().channel.start_request(request_id);
|
||||||
|
RequestHandler {
|
||||||
|
resp: Abortable::new(response, abort_registration),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A future fulfilling a single client request.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct RequestHandler<F, R> {
|
||||||
|
#[pin]
|
||||||
|
resp: Abortable<Resp<F, R>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F, R> Future for RequestHandler<F, R>
|
||||||
|
where
|
||||||
|
F: Future<Output = R>,
|
||||||
|
{
|
||||||
|
type Output = ();
|
||||||
|
|
||||||
|
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
|
||||||
|
let _ = ready!(self.project().resp.poll(cx));
|
||||||
|
Poll::Ready(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Resp<F, R> {
|
||||||
|
state: RespState,
|
||||||
|
request_id: u64,
|
||||||
|
ctx: context::Context,
|
||||||
|
deadline: SystemTime,
|
||||||
|
#[pin]
|
||||||
|
f: Timeout<F>,
|
||||||
|
response: Option<Response<R>>,
|
||||||
|
#[pin]
|
||||||
|
response_tx: mpsc::Sender<(context::Context, Response<R>)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[allow(clippy::enum_variant_names)]
|
||||||
|
enum RespState {
|
||||||
|
PollResp,
|
||||||
|
PollReady,
|
||||||
|
PollFlush,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F, R> Future for Resp<F, R>
|
||||||
|
where
|
||||||
|
F: Future<Output = R>,
|
||||||
|
{
|
||||||
|
type Output = ();
|
||||||
|
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
|
||||||
|
loop {
|
||||||
|
match self.as_mut().project().state {
|
||||||
|
RespState::PollResp => {
|
||||||
|
let result = ready!(self.as_mut().project().f.poll(cx));
|
||||||
|
*self.as_mut().project().response = Some(Response {
|
||||||
|
request_id: self.request_id,
|
||||||
|
message: match result {
|
||||||
|
Ok(message) => Ok(message),
|
||||||
|
Err(tokio::time::Elapsed { .. }) => {
|
||||||
|
debug!(
|
||||||
|
"[{}] Response did not complete before deadline of {}s.",
|
||||||
|
self.ctx.trace_id(),
|
||||||
|
format_rfc3339(self.deadline)
|
||||||
|
);
|
||||||
|
// No point in responding, since the client will have dropped the
|
||||||
|
// request.
|
||||||
|
Err(ServerError {
|
||||||
|
kind: io::ErrorKind::TimedOut,
|
||||||
|
detail: Some(format!(
|
||||||
|
"Response did not complete before deadline of {}s.",
|
||||||
|
format_rfc3339(self.deadline)
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
*self.as_mut().project().state = RespState::PollReady;
|
||||||
|
}
|
||||||
|
RespState::PollReady => {
|
||||||
|
let ready = ready!(self.as_mut().project().response_tx.poll_ready(cx));
|
||||||
|
if ready.is_err() {
|
||||||
|
return Poll::Ready(());
|
||||||
|
}
|
||||||
|
let resp = (self.ctx, self.as_mut().project().response.take().unwrap());
|
||||||
|
if self
|
||||||
|
.as_mut()
|
||||||
|
.project()
|
||||||
|
.response_tx
|
||||||
|
.start_send(resp)
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return Poll::Ready(());
|
||||||
|
}
|
||||||
|
*self.as_mut().project().state = RespState::PollFlush;
|
||||||
|
}
|
||||||
|
RespState::PollFlush => {
|
||||||
|
let ready = ready!(self.as_mut().project().response_tx.poll_flush(cx));
|
||||||
|
if ready.is_err() {
|
||||||
|
return Poll::Ready(());
|
||||||
|
}
|
||||||
|
return Poll::Ready(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, S> Stream for ClientHandler<C, S>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
S: Serve<C::Req, Resp = C::Resp>,
|
||||||
|
{
|
||||||
|
type Item = io::Result<RequestHandler<S::Fut, C::Resp>>;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
|
loop {
|
||||||
|
let read = self.as_mut().pump_read(cx)?;
|
||||||
|
let read_closed = if let Poll::Ready(None) = read {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
};
|
||||||
|
match (read, self.as_mut().pump_write(cx, read_closed)?) {
|
||||||
|
(Poll::Ready(None), Poll::Ready(None)) => {
|
||||||
|
return Poll::Ready(None);
|
||||||
|
}
|
||||||
|
(Poll::Ready(Some(request_handler)), _) => {
|
||||||
|
return Poll::Ready(Some(Ok(request_handler)));
|
||||||
|
}
|
||||||
|
(_, Poll::Ready(Some(()))) => {}
|
||||||
|
_ => {
|
||||||
|
return Poll::Pending;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send + 'static execution helper methods.
|
||||||
|
|
||||||
|
impl<C, S> ClientHandler<C, S>
|
||||||
|
where
|
||||||
|
C: Channel + 'static,
|
||||||
|
C::Req: Send + 'static,
|
||||||
|
C::Resp: Send + 'static,
|
||||||
|
S: Serve<C::Req, Resp = C::Resp> + Send + 'static,
|
||||||
|
S::Fut: Send + 'static,
|
||||||
|
{
|
||||||
|
/// Runs the client handler until completion by [spawning](tokio::spawn) each
|
||||||
|
/// request handler onto the default executor.
|
||||||
|
#[cfg(feature = "tokio1")]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(feature = "tokio1")))]
|
||||||
|
pub fn execute(self) -> impl Future<Output = ()> {
|
||||||
|
self.try_for_each(|request_handler| async {
|
||||||
|
tokio::spawn(request_handler);
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.map_ok(|()| log::info!("ClientHandler finished."))
|
||||||
|
.unwrap_or_else(|e| log::info!("ClientHandler errored out: {}", e))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A future that drives the server by [spawning](tokio::spawn) channels and request handlers on the default
|
||||||
|
/// executor.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[cfg(feature = "tokio1")]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(feature = "tokio1")))]
|
||||||
|
pub struct Running<St, Se> {
|
||||||
|
#[pin]
|
||||||
|
incoming: St,
|
||||||
|
server: Se,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tokio1")]
|
||||||
|
impl<St, C, Se> Future for Running<St, Se>
|
||||||
|
where
|
||||||
|
St: Sized + Stream<Item = C>,
|
||||||
|
C: Channel + Send + 'static,
|
||||||
|
C::Req: Send + 'static,
|
||||||
|
C::Resp: Send + 'static,
|
||||||
|
Se: Serve<C::Req, Resp = C::Resp> + Send + 'static + Clone,
|
||||||
|
Se::Fut: Send + 'static,
|
||||||
|
{
|
||||||
|
type Output = ();
|
||||||
|
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
|
||||||
|
while let Some(channel) = ready!(self.as_mut().project().incoming.poll_next(cx)) {
|
||||||
|
tokio::spawn(
|
||||||
|
channel
|
||||||
|
.respond_with(self.as_mut().project().server.clone())
|
||||||
|
.execute(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
log::info!("Server shutting down.");
|
||||||
|
Poll::Ready(())
|
||||||
|
}
|
||||||
|
}
|
||||||
471
tarpc/src/rpc/server/filter.rs
Normal file
471
tarpc/src/rpc/server/filter.rs
Normal file
@@ -0,0 +1,471 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
server::{self, Channel},
|
||||||
|
util::Compact,
|
||||||
|
};
|
||||||
|
use fnv::FnvHashMap;
|
||||||
|
use futures::{channel::mpsc, future::AbortRegistration, prelude::*, ready, stream::Fuse, task::*};
|
||||||
|
use log::{debug, info, trace};
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
use std::{
|
||||||
|
collections::hash_map::Entry, convert::TryInto, fmt, hash::Hash, marker::Unpin, pin::Pin,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// A single-threaded filter that drops channels based on per-key limits.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ChannelFilter<S, K, F>
|
||||||
|
where
|
||||||
|
K: Eq + Hash,
|
||||||
|
{
|
||||||
|
#[pin]
|
||||||
|
listener: Fuse<S>,
|
||||||
|
channels_per_key: u32,
|
||||||
|
#[pin]
|
||||||
|
dropped_keys: mpsc::UnboundedReceiver<K>,
|
||||||
|
#[pin]
|
||||||
|
dropped_keys_tx: mpsc::UnboundedSender<K>,
|
||||||
|
key_counts: FnvHashMap<K, Weak<Tracker<K>>>,
|
||||||
|
keymaker: F,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A channel that is tracked by a ChannelFilter.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct TrackedChannel<C, K> {
|
||||||
|
#[pin]
|
||||||
|
inner: C,
|
||||||
|
tracker: Arc<Tracker<K>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Tracker<K> {
|
||||||
|
key: Option<K>,
|
||||||
|
dropped_keys: mpsc::UnboundedSender<K>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K> Drop for Tracker<K> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Don't care if the listener is dropped.
|
||||||
|
let _ = self.dropped_keys.unbounded_send(self.key.take().unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, K> Stream for TrackedChannel<C, K>
|
||||||
|
where
|
||||||
|
C: Stream,
|
||||||
|
{
|
||||||
|
type Item = <C as Stream>::Item;
|
||||||
|
|
||||||
|
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
self.channel().poll_next(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, I, K> Sink<I> for TrackedChannel<C, K>
|
||||||
|
where
|
||||||
|
C: Sink<I>,
|
||||||
|
{
|
||||||
|
type Error = C::Error;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.channel().poll_ready(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> {
|
||||||
|
self.channel().start_send(item)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.channel().poll_flush(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.channel().poll_close(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, K> AsRef<C> for TrackedChannel<C, K> {
|
||||||
|
fn as_ref(&self) -> &C {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, K> Channel for TrackedChannel<C, K>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
type Req = C::Req;
|
||||||
|
type Resp = C::Resp;
|
||||||
|
|
||||||
|
fn config(&self) -> &server::Config {
|
||||||
|
self.inner.config()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn in_flight_requests(self: Pin<&mut Self>) -> usize {
|
||||||
|
self.project().inner.in_flight_requests()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_request(self: Pin<&mut Self>, request_id: u64) -> AbortRegistration {
|
||||||
|
self.project().inner.start_request(request_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, K> TrackedChannel<C, K> {
|
||||||
|
/// Returns the inner channel.
|
||||||
|
pub fn get_ref(&self) -> &C {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the pinned inner channel.
|
||||||
|
fn channel<'a>(self: Pin<&'a mut Self>) -> Pin<&'a mut C> {
|
||||||
|
self.project().inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, K, F> ChannelFilter<S, K, F>
|
||||||
|
where
|
||||||
|
K: Eq + Hash,
|
||||||
|
S: Stream,
|
||||||
|
F: Fn(&S::Item) -> K,
|
||||||
|
{
|
||||||
|
/// Sheds new channels to stay under configured limits.
|
||||||
|
pub(crate) fn new(listener: S, channels_per_key: u32, keymaker: F) -> Self {
|
||||||
|
let (dropped_keys_tx, dropped_keys) = mpsc::unbounded();
|
||||||
|
ChannelFilter {
|
||||||
|
listener: listener.fuse(),
|
||||||
|
channels_per_key,
|
||||||
|
dropped_keys,
|
||||||
|
dropped_keys_tx,
|
||||||
|
key_counts: FnvHashMap::default(),
|
||||||
|
keymaker,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, K, F> ChannelFilter<S, K, F>
|
||||||
|
where
|
||||||
|
S: Stream,
|
||||||
|
K: fmt::Display + Eq + Hash + Clone + Unpin,
|
||||||
|
F: Fn(&S::Item) -> K,
|
||||||
|
{
|
||||||
|
fn handle_new_channel(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
stream: S::Item,
|
||||||
|
) -> Result<TrackedChannel<S::Item, K>, K> {
|
||||||
|
let key = (self.as_mut().keymaker)(&stream);
|
||||||
|
let tracker = self.as_mut().increment_channels_for_key(key.clone())?;
|
||||||
|
|
||||||
|
trace!(
|
||||||
|
"[{}] Opening channel ({}/{}) channels for key.",
|
||||||
|
key,
|
||||||
|
Arc::strong_count(&tracker),
|
||||||
|
self.as_mut().project().channels_per_key
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(TrackedChannel {
|
||||||
|
tracker,
|
||||||
|
inner: stream,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment_channels_for_key(mut self: Pin<&mut Self>, key: K) -> Result<Arc<Tracker<K>>, K> {
|
||||||
|
let channels_per_key = self.channels_per_key;
|
||||||
|
let dropped_keys = self.dropped_keys_tx.clone();
|
||||||
|
let key_counts = &mut self.as_mut().project().key_counts;
|
||||||
|
match key_counts.entry(key.clone()) {
|
||||||
|
Entry::Vacant(vacant) => {
|
||||||
|
let tracker = Arc::new(Tracker {
|
||||||
|
key: Some(key),
|
||||||
|
dropped_keys,
|
||||||
|
});
|
||||||
|
|
||||||
|
vacant.insert(Arc::downgrade(&tracker));
|
||||||
|
Ok(tracker)
|
||||||
|
}
|
||||||
|
Entry::Occupied(mut o) => {
|
||||||
|
let count = o.get().strong_count();
|
||||||
|
if count >= channels_per_key.try_into().unwrap() {
|
||||||
|
info!(
|
||||||
|
"[{}] Opened max channels from key ({}/{}).",
|
||||||
|
key, count, channels_per_key
|
||||||
|
);
|
||||||
|
Err(key)
|
||||||
|
} else {
|
||||||
|
Ok(o.get().upgrade().unwrap_or_else(|| {
|
||||||
|
let tracker = Arc::new(Tracker {
|
||||||
|
key: Some(key),
|
||||||
|
dropped_keys,
|
||||||
|
});
|
||||||
|
|
||||||
|
*o.get_mut() = Arc::downgrade(&tracker);
|
||||||
|
tracker
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_listener(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<Option<Result<TrackedChannel<S::Item, K>, K>>> {
|
||||||
|
match ready!(self.as_mut().project().listener.poll_next_unpin(cx)) {
|
||||||
|
Some(codec) => Poll::Ready(Some(self.handle_new_channel(codec))),
|
||||||
|
None => Poll::Ready(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_closed_channels(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
|
||||||
|
match ready!(self.as_mut().project().dropped_keys.poll_next_unpin(cx)) {
|
||||||
|
Some(key) => {
|
||||||
|
debug!("All channels dropped for key [{}]", key);
|
||||||
|
self.as_mut().project().key_counts.remove(&key);
|
||||||
|
self.as_mut().project().key_counts.compact(0.1);
|
||||||
|
Poll::Ready(())
|
||||||
|
}
|
||||||
|
None => unreachable!("Holding a copy of closed_channels and didn't close it."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, K, F> Stream for ChannelFilter<S, K, F>
|
||||||
|
where
|
||||||
|
S: Stream,
|
||||||
|
K: fmt::Display + Eq + Hash + Clone + Unpin,
|
||||||
|
F: Fn(&S::Item) -> K,
|
||||||
|
{
|
||||||
|
type Item = TrackedChannel<S::Item, K>;
|
||||||
|
|
||||||
|
fn poll_next(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<Option<TrackedChannel<S::Item, K>>> {
|
||||||
|
loop {
|
||||||
|
match (
|
||||||
|
self.as_mut().poll_listener(cx),
|
||||||
|
self.as_mut().poll_closed_channels(cx),
|
||||||
|
) {
|
||||||
|
(Poll::Ready(Some(Ok(channel))), _) => {
|
||||||
|
return Poll::Ready(Some(channel));
|
||||||
|
}
|
||||||
|
(Poll::Ready(Some(Err(_))), _) => {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
(_, Poll::Ready(())) => continue,
|
||||||
|
(Poll::Pending, Poll::Pending) => return Poll::Pending,
|
||||||
|
(Poll::Ready(None), Poll::Pending) => {
|
||||||
|
trace!("Shutting down listener.");
|
||||||
|
return Poll::Ready(None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
fn ctx() -> Context<'static> {
|
||||||
|
use futures::task::*;
|
||||||
|
|
||||||
|
Context::from_waker(&noop_waker_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tracker_drop() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
|
||||||
|
let (tx, mut rx) = mpsc::unbounded();
|
||||||
|
Tracker {
|
||||||
|
key: Some(1),
|
||||||
|
dropped_keys: tx,
|
||||||
|
};
|
||||||
|
assert_matches!(rx.try_next(), Ok(Some(1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tracked_channel_stream() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
let (chan_tx, chan) = mpsc::unbounded();
|
||||||
|
let (dropped_keys, _) = mpsc::unbounded();
|
||||||
|
let channel = TrackedChannel {
|
||||||
|
inner: chan,
|
||||||
|
tracker: Arc::new(Tracker {
|
||||||
|
key: Some(1),
|
||||||
|
dropped_keys,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
chan_tx.unbounded_send("test").unwrap();
|
||||||
|
pin_mut!(channel);
|
||||||
|
assert_matches!(channel.poll_next(&mut ctx()), Poll::Ready(Some("test")));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tracked_channel_sink() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
let (chan, mut chan_rx) = mpsc::unbounded();
|
||||||
|
let (dropped_keys, _) = mpsc::unbounded();
|
||||||
|
let channel = TrackedChannel {
|
||||||
|
inner: chan,
|
||||||
|
tracker: Arc::new(Tracker {
|
||||||
|
key: Some(1),
|
||||||
|
dropped_keys,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(channel);
|
||||||
|
assert_matches!(channel.as_mut().poll_ready(&mut ctx()), Poll::Ready(Ok(())));
|
||||||
|
assert_matches!(channel.as_mut().start_send("test"), Ok(()));
|
||||||
|
assert_matches!(channel.as_mut().poll_flush(&mut ctx()), Poll::Ready(Ok(())));
|
||||||
|
assert_matches!(chan_rx.try_next(), Ok(Some("test")));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn channel_filter_increment_channels_for_key() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
struct TestChannel {
|
||||||
|
key: &'static str,
|
||||||
|
}
|
||||||
|
let (_, listener) = mpsc::unbounded();
|
||||||
|
let filter = ChannelFilter::new(listener, 2, |chan: &TestChannel| chan.key);
|
||||||
|
pin_mut!(filter);
|
||||||
|
let tracker1 = filter.as_mut().increment_channels_for_key("key").unwrap();
|
||||||
|
assert_eq!(Arc::strong_count(&tracker1), 1);
|
||||||
|
let tracker2 = filter.as_mut().increment_channels_for_key("key").unwrap();
|
||||||
|
assert_eq!(Arc::strong_count(&tracker1), 2);
|
||||||
|
assert_matches!(filter.increment_channels_for_key("key"), Err("key"));
|
||||||
|
drop(tracker2);
|
||||||
|
assert_eq!(Arc::strong_count(&tracker1), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn channel_filter_handle_new_channel() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct TestChannel {
|
||||||
|
key: &'static str,
|
||||||
|
}
|
||||||
|
let (_, listener) = mpsc::unbounded();
|
||||||
|
let filter = ChannelFilter::new(listener, 2, |chan: &TestChannel| chan.key);
|
||||||
|
pin_mut!(filter);
|
||||||
|
let channel1 = filter
|
||||||
|
.as_mut()
|
||||||
|
.handle_new_channel(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 1);
|
||||||
|
|
||||||
|
let channel2 = filter
|
||||||
|
.as_mut()
|
||||||
|
.handle_new_channel(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 2);
|
||||||
|
|
||||||
|
assert_matches!(
|
||||||
|
filter.handle_new_channel(TestChannel { key: "key" }),
|
||||||
|
Err("key")
|
||||||
|
);
|
||||||
|
drop(channel2);
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn channel_filter_poll_listener() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct TestChannel {
|
||||||
|
key: &'static str,
|
||||||
|
}
|
||||||
|
let (new_channels, listener) = mpsc::unbounded();
|
||||||
|
let filter = ChannelFilter::new(listener, 2, |chan: &TestChannel| chan.key);
|
||||||
|
pin_mut!(filter);
|
||||||
|
|
||||||
|
new_channels
|
||||||
|
.unbounded_send(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
let channel1 =
|
||||||
|
assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Ok(c))) => c);
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 1);
|
||||||
|
|
||||||
|
new_channels
|
||||||
|
.unbounded_send(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
let _channel2 =
|
||||||
|
assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Ok(c))) => c);
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 2);
|
||||||
|
|
||||||
|
new_channels
|
||||||
|
.unbounded_send(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
let key =
|
||||||
|
assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Err(k))) => k);
|
||||||
|
assert_eq!(key, "key");
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn channel_filter_poll_closed_channels() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct TestChannel {
|
||||||
|
key: &'static str,
|
||||||
|
}
|
||||||
|
let (new_channels, listener) = mpsc::unbounded();
|
||||||
|
let filter = ChannelFilter::new(listener, 2, |chan: &TestChannel| chan.key);
|
||||||
|
pin_mut!(filter);
|
||||||
|
|
||||||
|
new_channels
|
||||||
|
.unbounded_send(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
let channel =
|
||||||
|
assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Ok(c))) => c);
|
||||||
|
assert_eq!(filter.key_counts.len(), 1);
|
||||||
|
|
||||||
|
drop(channel);
|
||||||
|
assert_matches!(
|
||||||
|
filter.as_mut().poll_closed_channels(&mut ctx()),
|
||||||
|
Poll::Ready(())
|
||||||
|
);
|
||||||
|
assert!(filter.key_counts.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn channel_filter_stream() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct TestChannel {
|
||||||
|
key: &'static str,
|
||||||
|
}
|
||||||
|
let (new_channels, listener) = mpsc::unbounded();
|
||||||
|
let filter = ChannelFilter::new(listener, 2, |chan: &TestChannel| chan.key);
|
||||||
|
pin_mut!(filter);
|
||||||
|
|
||||||
|
new_channels
|
||||||
|
.unbounded_send(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
let channel = assert_matches!(filter.as_mut().poll_next(&mut ctx()), Poll::Ready(Some(c)) => c);
|
||||||
|
assert_eq!(filter.key_counts.len(), 1);
|
||||||
|
|
||||||
|
drop(channel);
|
||||||
|
assert_matches!(filter.as_mut().poll_next(&mut ctx()), Poll::Pending);
|
||||||
|
assert!(filter.key_counts.is_empty());
|
||||||
|
}
|
||||||
129
tarpc/src/rpc/server/testing.rs
Normal file
129
tarpc/src/rpc/server/testing.rs
Normal file
@@ -0,0 +1,129 @@
|
|||||||
|
// Copyright 2020 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use crate::server::{Channel, Config};
|
||||||
|
use crate::{context, Request, Response};
|
||||||
|
use fnv::FnvHashSet;
|
||||||
|
use futures::{
|
||||||
|
future::{AbortHandle, AbortRegistration},
|
||||||
|
task::*,
|
||||||
|
Sink, Stream,
|
||||||
|
};
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
use std::io;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::time::SystemTime;
|
||||||
|
|
||||||
|
#[pin_project]
|
||||||
|
pub(crate) struct FakeChannel<In, Out> {
|
||||||
|
#[pin]
|
||||||
|
pub stream: VecDeque<In>,
|
||||||
|
#[pin]
|
||||||
|
pub sink: VecDeque<Out>,
|
||||||
|
pub config: Config,
|
||||||
|
pub in_flight_requests: FnvHashSet<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<In, Out> Stream for FakeChannel<In, Out>
|
||||||
|
where
|
||||||
|
In: Unpin,
|
||||||
|
{
|
||||||
|
type Item = In;
|
||||||
|
|
||||||
|
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
Poll::Ready(self.project().stream.pop_front())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<In, Resp> Sink<Response<Resp>> for FakeChannel<In, Response<Resp>> {
|
||||||
|
type Error = io::Error;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().sink.poll_ready(cx).map_err(|e| match e {})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(mut self: Pin<&mut Self>, response: Response<Resp>) -> Result<(), Self::Error> {
|
||||||
|
self.as_mut()
|
||||||
|
.project()
|
||||||
|
.in_flight_requests
|
||||||
|
.remove(&response.request_id);
|
||||||
|
self.project()
|
||||||
|
.sink
|
||||||
|
.start_send(response)
|
||||||
|
.map_err(|e| match e {})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().sink.poll_flush(cx).map_err(|e| match e {})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().sink.poll_close(cx).map_err(|e| match e {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp> Channel for FakeChannel<io::Result<Request<Req>>, Response<Resp>>
|
||||||
|
where
|
||||||
|
Req: Unpin,
|
||||||
|
{
|
||||||
|
type Req = Req;
|
||||||
|
type Resp = Resp;
|
||||||
|
|
||||||
|
fn config(&self) -> &Config {
|
||||||
|
&self.config
|
||||||
|
}
|
||||||
|
|
||||||
|
fn in_flight_requests(self: Pin<&mut Self>) -> usize {
|
||||||
|
self.in_flight_requests.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_request(self: Pin<&mut Self>, id: u64) -> AbortRegistration {
|
||||||
|
self.project().in_flight_requests.insert(id);
|
||||||
|
AbortHandle::new_pair().1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp> FakeChannel<io::Result<Request<Req>>, Response<Resp>> {
|
||||||
|
pub fn push_req(&mut self, id: u64, message: Req) {
|
||||||
|
self.stream.push_back(Ok(Request {
|
||||||
|
context: context::Context {
|
||||||
|
deadline: SystemTime::UNIX_EPOCH,
|
||||||
|
trace_context: Default::default(),
|
||||||
|
},
|
||||||
|
id,
|
||||||
|
message,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FakeChannel<(), ()> {
|
||||||
|
pub fn default<Req, Resp>() -> FakeChannel<io::Result<Request<Req>>, Response<Resp>> {
|
||||||
|
FakeChannel {
|
||||||
|
stream: Default::default(),
|
||||||
|
sink: Default::default(),
|
||||||
|
config: Default::default(),
|
||||||
|
in_flight_requests: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait PollExt {
|
||||||
|
fn is_done(&self) -> bool;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> PollExt for Poll<Option<T>> {
|
||||||
|
fn is_done(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
Poll::Ready(None) => true,
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cx() -> Context<'static> {
|
||||||
|
Context::from_waker(&noop_waker_ref())
|
||||||
|
}
|
||||||
328
tarpc/src/rpc/server/throttle.rs
Normal file
328
tarpc/src/rpc/server/throttle.rs
Normal file
@@ -0,0 +1,328 @@
|
|||||||
|
// Copyright 2020 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use super::{Channel, Config};
|
||||||
|
use crate::{Response, ServerError};
|
||||||
|
use futures::{future::AbortRegistration, prelude::*, ready, task::*};
|
||||||
|
use log::debug;
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use std::{io, pin::Pin};
|
||||||
|
|
||||||
|
/// A [`Channel`] that limits the number of concurrent
|
||||||
|
/// requests by throttling.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Throttler<C> {
|
||||||
|
max_in_flight_requests: usize,
|
||||||
|
#[pin]
|
||||||
|
inner: C,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Throttler<C> {
|
||||||
|
/// Returns the inner channel.
|
||||||
|
pub fn get_ref(&self) -> &C {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Throttler<C>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
/// Returns a new `Throttler` that wraps the given channel and limits concurrent requests to
|
||||||
|
/// `max_in_flight_requests`.
|
||||||
|
pub fn new(inner: C, max_in_flight_requests: usize) -> Self {
|
||||||
|
Throttler {
|
||||||
|
inner,
|
||||||
|
max_in_flight_requests,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Stream for Throttler<C>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
type Item = <C as Stream>::Item;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
while self.as_mut().in_flight_requests() >= *self.as_mut().project().max_in_flight_requests
|
||||||
|
{
|
||||||
|
ready!(self.as_mut().project().inner.poll_ready(cx)?);
|
||||||
|
|
||||||
|
match ready!(self.as_mut().project().inner.poll_next(cx)?) {
|
||||||
|
Some(request) => {
|
||||||
|
debug!(
|
||||||
|
"[{}] Client has reached in-flight request limit ({}/{}).",
|
||||||
|
request.context.trace_id(),
|
||||||
|
self.as_mut().in_flight_requests(),
|
||||||
|
self.as_mut().project().max_in_flight_requests,
|
||||||
|
);
|
||||||
|
|
||||||
|
self.as_mut().start_send(Response {
|
||||||
|
request_id: request.id,
|
||||||
|
message: Err(ServerError {
|
||||||
|
kind: io::ErrorKind::WouldBlock,
|
||||||
|
detail: Some("Server throttled the request.".into()),
|
||||||
|
}),
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
None => return Poll::Ready(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.project().inner.poll_next(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Sink<Response<<C as Channel>::Resp>> for Throttler<C>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
type Error = io::Error;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().inner.poll_ready(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(self: Pin<&mut Self>, item: Response<<C as Channel>::Resp>) -> io::Result<()> {
|
||||||
|
self.project().inner.start_send(item)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
|
||||||
|
self.project().inner.poll_flush(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<io::Result<()>> {
|
||||||
|
self.project().inner.poll_close(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> AsRef<C> for Throttler<C> {
|
||||||
|
fn as_ref(&self) -> &C {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Channel for Throttler<C>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
type Req = <C as Channel>::Req;
|
||||||
|
type Resp = <C as Channel>::Resp;
|
||||||
|
|
||||||
|
fn in_flight_requests(self: Pin<&mut Self>) -> usize {
|
||||||
|
self.project().inner.in_flight_requests()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn config(&self) -> &Config {
|
||||||
|
self.inner.config()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_request(self: Pin<&mut Self>, request_id: u64) -> AbortRegistration {
|
||||||
|
self.project().inner.start_request(request_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A stream of throttling channels.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ThrottlerStream<S> {
|
||||||
|
#[pin]
|
||||||
|
inner: S,
|
||||||
|
max_in_flight_requests: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> ThrottlerStream<S>
|
||||||
|
where
|
||||||
|
S: Stream,
|
||||||
|
<S as Stream>::Item: Channel,
|
||||||
|
{
|
||||||
|
pub(crate) fn new(inner: S, max_in_flight_requests: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
inner,
|
||||||
|
max_in_flight_requests,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> Stream for ThrottlerStream<S>
|
||||||
|
where
|
||||||
|
S: Stream,
|
||||||
|
<S as Stream>::Item: Channel,
|
||||||
|
{
|
||||||
|
type Item = Throttler<<S as Stream>::Item>;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
match ready!(self.as_mut().project().inner.poll_next(cx)) {
|
||||||
|
Some(channel) => Poll::Ready(Some(Throttler::new(
|
||||||
|
channel,
|
||||||
|
*self.project().max_in_flight_requests,
|
||||||
|
))),
|
||||||
|
None => Poll::Ready(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
use super::testing::{self, FakeChannel, PollExt};
|
||||||
|
#[cfg(test)]
|
||||||
|
use crate::Request;
|
||||||
|
#[cfg(test)]
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
#[cfg(test)]
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_in_flight_requests() {
|
||||||
|
let throttler = Throttler {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
for i in 0..5 {
|
||||||
|
throttler.inner.in_flight_requests.insert(i);
|
||||||
|
}
|
||||||
|
assert_eq!(throttler.as_mut().in_flight_requests(), 5);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_start_request() {
|
||||||
|
let throttler = Throttler {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
throttler.as_mut().start_request(1);
|
||||||
|
assert_eq!(throttler.inner.in_flight_requests.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_poll_next_done() {
|
||||||
|
let throttler = Throttler {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
assert!(throttler.as_mut().poll_next(&mut testing::cx()).is_done());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_poll_next_some() -> io::Result<()> {
|
||||||
|
let throttler = Throttler {
|
||||||
|
max_in_flight_requests: 1,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
throttler.inner.push_req(0, 1);
|
||||||
|
assert!(throttler.as_mut().poll_ready(&mut testing::cx()).is_ready());
|
||||||
|
assert_eq!(
|
||||||
|
throttler
|
||||||
|
.as_mut()
|
||||||
|
.poll_next(&mut testing::cx())?
|
||||||
|
.map(|r| r.map(|r| (r.id, r.message))),
|
||||||
|
Poll::Ready(Some((0, 1)))
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_poll_next_throttled() {
|
||||||
|
let throttler = Throttler {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
throttler.inner.push_req(1, 1);
|
||||||
|
assert!(throttler.as_mut().poll_next(&mut testing::cx()).is_done());
|
||||||
|
assert_eq!(throttler.inner.sink.len(), 1);
|
||||||
|
let resp = throttler.inner.sink.get(0).unwrap();
|
||||||
|
assert_eq!(resp.request_id, 1);
|
||||||
|
assert!(resp.message.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_poll_next_throttled_sink_not_ready() {
|
||||||
|
let throttler = Throttler {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: PendingSink::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
pin_mut!(throttler);
|
||||||
|
assert!(throttler.poll_next(&mut testing::cx()).is_pending());
|
||||||
|
|
||||||
|
struct PendingSink<In, Out> {
|
||||||
|
ghost: PhantomData<fn(Out) -> In>,
|
||||||
|
}
|
||||||
|
impl PendingSink<(), ()> {
|
||||||
|
pub fn default<Req, Resp>() -> PendingSink<io::Result<Request<Req>>, Response<Resp>> {
|
||||||
|
PendingSink { ghost: PhantomData }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<In, Out> Stream for PendingSink<In, Out> {
|
||||||
|
type Item = In;
|
||||||
|
fn poll_next(self: Pin<&mut Self>, _: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<In, Out> Sink<Out> for PendingSink<In, Out> {
|
||||||
|
type Error = io::Error;
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
fn start_send(self: Pin<&mut Self>, _: Out) -> Result<(), Self::Error> {
|
||||||
|
Err(io::Error::from(io::ErrorKind::WouldBlock))
|
||||||
|
}
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, _: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<Req, Resp> Channel for PendingSink<io::Result<Request<Req>>, Response<Resp>> {
|
||||||
|
type Req = Req;
|
||||||
|
type Resp = Resp;
|
||||||
|
fn config(&self) -> &Config {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
fn in_flight_requests(self: Pin<&mut Self>) -> usize {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
fn start_request(self: Pin<&mut Self>, _: u64) -> AbortRegistration {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_start_send() {
|
||||||
|
let throttler = Throttler {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
throttler.inner.in_flight_requests.insert(0);
|
||||||
|
throttler
|
||||||
|
.as_mut()
|
||||||
|
.start_send(Response {
|
||||||
|
request_id: 0,
|
||||||
|
message: Ok(1),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
assert!(throttler.inner.in_flight_requests.is_empty());
|
||||||
|
assert_eq!(
|
||||||
|
throttler.inner.sink.get(0),
|
||||||
|
Some(&Response {
|
||||||
|
request_id: 0,
|
||||||
|
message: Ok(1),
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
30
tarpc/src/rpc/transport.rs
Normal file
30
tarpc/src/rpc/transport.rs
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! Provides a [`Transport`](sealed::Transport) trait as well as implementations.
|
||||||
|
//!
|
||||||
|
//! The rpc crate is transport- and protocol-agnostic. Any transport that impls [`Transport`](sealed::Transport)
|
||||||
|
//! can be plugged in, using whatever protocol it wants.
|
||||||
|
|
||||||
|
use futures::prelude::*;
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
pub mod channel;
|
||||||
|
|
||||||
|
pub(crate) mod sealed {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
/// A bidirectional stream ([`Sink`] + [`Stream`]) of messages.
|
||||||
|
pub trait Transport<SinkItem, Item>:
|
||||||
|
Stream<Item = io::Result<Item>> + Sink<SinkItem, Error = io::Error>
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, SinkItem, Item> Transport<SinkItem, Item> for T where
|
||||||
|
T: Stream<Item = io::Result<Item>> + Sink<SinkItem, Error = io::Error> + ?Sized
|
||||||
|
{
|
||||||
|
}
|
||||||
|
}
|
||||||
123
tarpc/src/rpc/transport/channel.rs
Normal file
123
tarpc/src/rpc/transport/channel.rs
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! Transports backed by in-memory channels.
|
||||||
|
|
||||||
|
use crate::PollIo;
|
||||||
|
use futures::{channel::mpsc, task::*, Sink, Stream};
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use std::io;
|
||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
|
/// Returns two unbounded channel peers. Each [`Stream`] yields items sent through the other's
|
||||||
|
/// [`Sink`].
|
||||||
|
pub fn unbounded<SinkItem, Item>() -> (
|
||||||
|
UnboundedChannel<SinkItem, Item>,
|
||||||
|
UnboundedChannel<Item, SinkItem>,
|
||||||
|
) {
|
||||||
|
let (tx1, rx2) = mpsc::unbounded();
|
||||||
|
let (tx2, rx1) = mpsc::unbounded();
|
||||||
|
(
|
||||||
|
UnboundedChannel { tx: tx1, rx: rx1 },
|
||||||
|
UnboundedChannel { tx: tx2, rx: rx2 },
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A bi-directional channel backed by an [`UnboundedSender`](mpsc::UnboundedSender)
|
||||||
|
/// and [`UnboundedReceiver`](mpsc::UnboundedReceiver).
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct UnboundedChannel<Item, SinkItem> {
|
||||||
|
#[pin]
|
||||||
|
rx: mpsc::UnboundedReceiver<Item>,
|
||||||
|
#[pin]
|
||||||
|
tx: mpsc::UnboundedSender<SinkItem>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem> Stream for UnboundedChannel<Item, SinkItem> {
|
||||||
|
type Item = Result<Item, io::Error>;
|
||||||
|
|
||||||
|
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> PollIo<Item> {
|
||||||
|
self.project().rx.poll_next(cx).map(|option| option.map(Ok))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem> Sink<SinkItem> for UnboundedChannel<Item, SinkItem> {
|
||||||
|
type Error = io::Error;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
self.project()
|
||||||
|
.tx
|
||||||
|
.poll_ready(cx)
|
||||||
|
.map_err(|_| io::Error::from(io::ErrorKind::NotConnected))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(self: Pin<&mut Self>, item: SinkItem) -> io::Result<()> {
|
||||||
|
self.project()
|
||||||
|
.tx
|
||||||
|
.start_send(item)
|
||||||
|
.map_err(|_| io::Error::from(io::ErrorKind::NotConnected))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project()
|
||||||
|
.tx
|
||||||
|
.poll_flush(cx)
|
||||||
|
.map_err(|_| io::Error::from(io::ErrorKind::NotConnected))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
self.project()
|
||||||
|
.tx
|
||||||
|
.poll_close(cx)
|
||||||
|
.map_err(|_| io::Error::from(io::ErrorKind::NotConnected))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use crate::{
|
||||||
|
client, context,
|
||||||
|
server::{Handler, Server},
|
||||||
|
transport,
|
||||||
|
};
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use futures::{prelude::*, stream};
|
||||||
|
use log::trace;
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
#[cfg(feature = "tokio1")]
|
||||||
|
#[tokio::test(threaded_scheduler)]
|
||||||
|
async fn integration() -> io::Result<()> {
|
||||||
|
let _ = env_logger::try_init();
|
||||||
|
|
||||||
|
let (client_channel, server_channel) = transport::channel::unbounded();
|
||||||
|
tokio::spawn(
|
||||||
|
Server::default()
|
||||||
|
.incoming(stream::once(future::ready(server_channel)))
|
||||||
|
.respond_with(|_ctx, request: String| {
|
||||||
|
future::ready(request.parse::<u64>().map_err(|_| {
|
||||||
|
io::Error::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
format!("{:?} is not an int", request),
|
||||||
|
)
|
||||||
|
}))
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut client = client::new(client::Config::default(), client_channel).spawn()?;
|
||||||
|
|
||||||
|
let response1 = client.call(context::current(), "123".into()).await?;
|
||||||
|
let response2 = client.call(context::current(), "abc".into()).await?;
|
||||||
|
|
||||||
|
trace!("response1: {:?}, response2: {:?}", response1, response2);
|
||||||
|
|
||||||
|
assert_matches!(response1, Ok(123));
|
||||||
|
assert_matches!(response2, Err(ref e) if e.kind() == io::ErrorKind::InvalidInput);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -10,18 +10,18 @@ use std::{
|
|||||||
time::{Duration, SystemTime},
|
time::{Duration, SystemTime},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod deadline_compat;
|
|
||||||
#[cfg(feature = "serde")]
|
#[cfg(feature = "serde")]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
|
||||||
pub mod serde;
|
pub mod serde;
|
||||||
|
|
||||||
/// Types that can be represented by a [`Duration`].
|
/// Extension trait for [SystemTimes](SystemTime) in the future, i.e. deadlines.
|
||||||
pub trait AsDuration {
|
pub trait TimeUntil {
|
||||||
fn as_duration(&self) -> Duration;
|
/// How much time from now until this time is reached.
|
||||||
|
fn time_until(&self) -> Duration;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AsDuration for SystemTime {
|
impl TimeUntil for SystemTime {
|
||||||
/// Duration of 0 if self is earlier than [`SystemTime::now`].
|
fn time_until(&self) -> Duration {
|
||||||
fn as_duration(&self) -> Duration {
|
|
||||||
self.duration_since(SystemTime::now()).unwrap_or_default()
|
self.duration_since(SystemTime::now()).unwrap_or_default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -38,9 +38,11 @@ where
|
|||||||
H: BuildHasher,
|
H: BuildHasher,
|
||||||
{
|
{
|
||||||
fn compact(&mut self, usage_ratio_threshold: f64) {
|
fn compact(&mut self, usage_ratio_threshold: f64) {
|
||||||
let usage_ratio = self.len() as f64 / self.capacity() as f64;
|
if self.capacity() > 1000 {
|
||||||
if usage_ratio < usage_ratio_threshold {
|
let usage_ratio = self.len() as f64 / self.capacity() as f64;
|
||||||
self.shrink_to_fit();
|
if usage_ratio < usage_ratio_threshold {
|
||||||
|
self.shrink_to_fit();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -15,9 +15,10 @@ pub fn serialize_epoch_secs<S>(system_time: &SystemTime, serializer: S) -> Resul
|
|||||||
where
|
where
|
||||||
S: Serializer,
|
S: Serializer,
|
||||||
{
|
{
|
||||||
|
const ZERO_SECS: Duration = Duration::from_secs(0);
|
||||||
system_time
|
system_time
|
||||||
.duration_since(SystemTime::UNIX_EPOCH)
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
.unwrap_or(Duration::from_secs(0))
|
.unwrap_or(ZERO_SECS)
|
||||||
.as_secs() // Only care about second precision
|
.as_secs() // Only care about second precision
|
||||||
.serialize(serializer)
|
.serialize(serializer)
|
||||||
}
|
}
|
||||||
393
tarpc/src/serde_transport.rs
Normal file
393
tarpc/src/serde_transport.rs
Normal file
@@ -0,0 +1,393 @@
|
|||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! A generic Serde-based `Transport` that can serialize anything supported by `tokio-serde` via any medium that implements `AsyncRead` and `AsyncWrite`.
|
||||||
|
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
use futures::{prelude::*, task::*};
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::{error::Error, io, pin::Pin};
|
||||||
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
|
use tokio_serde::{Framed as SerdeFramed, *};
|
||||||
|
use tokio_util::codec::{
|
||||||
|
length_delimited::{self, LengthDelimitedCodec},
|
||||||
|
Framed,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// A transport that serializes to, and deserializes from, a byte stream.
|
||||||
|
#[pin_project]
|
||||||
|
pub struct Transport<S, Item, SinkItem, Codec> {
|
||||||
|
#[pin]
|
||||||
|
inner: SerdeFramed<Framed<S, LengthDelimitedCodec>, Item, SinkItem, Codec>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, Item, SinkItem, Codec> Transport<S, Item, SinkItem, Codec> {
|
||||||
|
/// Returns the inner transport over which messages are sent and received.
|
||||||
|
pub fn get_ref(&self) -> &S {
|
||||||
|
self.inner.get_ref().get_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, Item, SinkItem, Codec, CodecError> Stream for Transport<S, Item, SinkItem, Codec>
|
||||||
|
where
|
||||||
|
S: AsyncWrite + AsyncRead,
|
||||||
|
Item: for<'a> Deserialize<'a>,
|
||||||
|
Codec: Deserializer<Item>,
|
||||||
|
CodecError: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||||
|
SerdeFramed<Framed<S, LengthDelimitedCodec>, Item, SinkItem, Codec>:
|
||||||
|
Stream<Item = Result<Item, CodecError>>,
|
||||||
|
{
|
||||||
|
type Item = io::Result<Item>;
|
||||||
|
|
||||||
|
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<io::Result<Item>>> {
|
||||||
|
match self.project().inner.poll_next(cx) {
|
||||||
|
Poll::Pending => Poll::Pending,
|
||||||
|
Poll::Ready(None) => Poll::Ready(None),
|
||||||
|
Poll::Ready(Some(Ok::<_, CodecError>(next))) => Poll::Ready(Some(Ok(next))),
|
||||||
|
Poll::Ready(Some(Err::<_, CodecError>(e))) => {
|
||||||
|
Poll::Ready(Some(Err(io::Error::new(io::ErrorKind::Other, e))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, Item, SinkItem, Codec, CodecError> Sink<SinkItem> for Transport<S, Item, SinkItem, Codec>
|
||||||
|
where
|
||||||
|
S: AsyncWrite,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem>,
|
||||||
|
CodecError: Into<Box<dyn Error + Send + Sync>>,
|
||||||
|
SerdeFramed<Framed<S, LengthDelimitedCodec>, Item, SinkItem, Codec>:
|
||||||
|
Sink<SinkItem, Error = CodecError>,
|
||||||
|
{
|
||||||
|
type Error = io::Error;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
convert(self.project().inner.poll_ready(cx))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(self: Pin<&mut Self>, item: SinkItem) -> io::Result<()> {
|
||||||
|
self.project()
|
||||||
|
.inner
|
||||||
|
.start_send(item)
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
convert(self.project().inner.poll_flush(cx))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
convert(self.project().inner.poll_close(cx))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn convert<E: Into<Box<dyn Error + Send + Sync>>>(
|
||||||
|
poll: Poll<Result<(), E>>,
|
||||||
|
) -> Poll<io::Result<()>> {
|
||||||
|
poll.map(|ready| ready.map_err(|e| io::Error::new(io::ErrorKind::Other, e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Constructs a new transport from a framed transport and a serialization codec.
|
||||||
|
pub fn new<S, Item, SinkItem, Codec>(
|
||||||
|
framed_io: Framed<S, LengthDelimitedCodec>,
|
||||||
|
codec: Codec,
|
||||||
|
) -> Transport<S, Item, SinkItem, Codec>
|
||||||
|
where
|
||||||
|
S: AsyncWrite + AsyncRead,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
{
|
||||||
|
Transport {
|
||||||
|
inner: SerdeFramed::new(framed_io, codec),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, Item, SinkItem, Codec> From<(S, Codec)> for Transport<S, Item, SinkItem, Codec>
|
||||||
|
where
|
||||||
|
S: AsyncWrite + AsyncRead,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
{
|
||||||
|
fn from((io, codec): (S, Codec)) -> Self {
|
||||||
|
new(Framed::new(io, LengthDelimitedCodec::new()), codec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tcp")]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))]
|
||||||
|
/// TCP support for generic transport using Tokio.
|
||||||
|
pub mod tcp {
|
||||||
|
use {
|
||||||
|
super::*,
|
||||||
|
futures::ready,
|
||||||
|
std::{marker::PhantomData, net::SocketAddr},
|
||||||
|
tokio::net::{TcpListener, TcpStream, ToSocketAddrs},
|
||||||
|
};
|
||||||
|
|
||||||
|
mod private {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
pub trait Sealed {}
|
||||||
|
|
||||||
|
impl<Item, SinkItem, Codec> Sealed for Transport<TcpStream, Item, SinkItem, Codec> {}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem, Codec> Transport<TcpStream, Item, SinkItem, Codec> {
|
||||||
|
/// Returns the peer address of the underlying TcpStream.
|
||||||
|
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
|
||||||
|
self.inner.get_ref().get_ref().peer_addr()
|
||||||
|
}
|
||||||
|
/// Returns the local address of the underlying TcpStream.
|
||||||
|
pub fn local_addr(&self) -> io::Result<SocketAddr> {
|
||||||
|
self.inner.get_ref().get_ref().local_addr()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A connection Future that also exposes the length-delimited framing config.
|
||||||
|
#[pin_project]
|
||||||
|
pub struct Connect<T, Item, SinkItem, CodecFn> {
|
||||||
|
#[pin]
|
||||||
|
inner: T,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
config: length_delimited::Builder,
|
||||||
|
ghost: PhantomData<(fn(SinkItem), fn() -> Item)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, Item, SinkItem, Codec, CodecFn> Future for Connect<T, Item, SinkItem, CodecFn>
|
||||||
|
where
|
||||||
|
T: Future<Output = io::Result<TcpStream>>,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
type Output = io::Result<Transport<TcpStream, Item, SinkItem, Codec>>;
|
||||||
|
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||||
|
let io = ready!(self.as_mut().project().inner.poll(cx))?;
|
||||||
|
Poll::Ready(Ok(new(self.config.new_framed(io), (self.codec_fn)())))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, Item, SinkItem, CodecFn> Connect<T, Item, SinkItem, CodecFn> {
|
||||||
|
/// Returns an immutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config(&self) -> &length_delimited::Builder {
|
||||||
|
&self.config
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config_mut(&mut self) -> &mut length_delimited::Builder {
|
||||||
|
&mut self.config
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Connects to `addr`, wrapping the connection in a TCP transport.
|
||||||
|
pub fn connect<A, Item, SinkItem, Codec, CodecFn>(
|
||||||
|
addr: A,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
) -> Connect<impl Future<Output = io::Result<TcpStream>>, Item, SinkItem, CodecFn>
|
||||||
|
where
|
||||||
|
A: ToSocketAddrs,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
Connect {
|
||||||
|
inner: TcpStream::connect(addr),
|
||||||
|
codec_fn,
|
||||||
|
config: LengthDelimitedCodec::builder(),
|
||||||
|
ghost: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Listens on `addr`, wrapping accepted connections in TCP transports.
|
||||||
|
pub async fn listen<A, Item, SinkItem, Codec, CodecFn>(
|
||||||
|
addr: A,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
) -> io::Result<Incoming<Item, SinkItem, Codec, CodecFn>>
|
||||||
|
where
|
||||||
|
A: ToSocketAddrs,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
let listener = TcpListener::bind(addr).await?;
|
||||||
|
let local_addr = listener.local_addr()?;
|
||||||
|
Ok(Incoming {
|
||||||
|
listener,
|
||||||
|
codec_fn,
|
||||||
|
local_addr,
|
||||||
|
config: LengthDelimitedCodec::builder(),
|
||||||
|
ghost: PhantomData,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A [`TcpListener`] that wraps connections in [transports](Transport).
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Incoming<Item, SinkItem, Codec, CodecFn> {
|
||||||
|
listener: TcpListener,
|
||||||
|
local_addr: SocketAddr,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
config: length_delimited::Builder,
|
||||||
|
ghost: PhantomData<(fn() -> Item, fn(SinkItem), Codec)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem, Codec, CodecFn> Incoming<Item, SinkItem, Codec, CodecFn> {
|
||||||
|
/// Returns the address being listened on.
|
||||||
|
pub fn local_addr(&self) -> SocketAddr {
|
||||||
|
self.local_addr
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an immutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config(&self) -> &length_delimited::Builder {
|
||||||
|
&self.config
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config_mut(&mut self) -> &mut length_delimited::Builder {
|
||||||
|
&mut self.config
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem, Codec, CodecFn> Stream for Incoming<Item, SinkItem, Codec, CodecFn>
|
||||||
|
where
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
type Item = io::Result<Transport<TcpStream, Item, SinkItem, Codec>>;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
|
let next =
|
||||||
|
ready!(Pin::new(&mut self.as_mut().project().listener.incoming()).poll_next(cx)?);
|
||||||
|
Poll::Ready(next.map(|conn| Ok(new(self.config.new_framed(conn), (self.codec_fn)()))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::Transport;
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use futures::{task::*, Sink, Stream};
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
use std::{
|
||||||
|
io::{self, Cursor},
|
||||||
|
pin::Pin,
|
||||||
|
};
|
||||||
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
|
use tokio_serde::formats::SymmetricalJson;
|
||||||
|
|
||||||
|
fn ctx() -> Context<'static> {
|
||||||
|
Context::from_waker(&noop_waker_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_stream() {
|
||||||
|
struct TestIo(Cursor<&'static [u8]>);
|
||||||
|
|
||||||
|
impl AsyncRead for TestIo {
|
||||||
|
fn poll_read(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &mut [u8],
|
||||||
|
) -> Poll<io::Result<usize>> {
|
||||||
|
AsyncRead::poll_read(Pin::new(self.0.get_mut()), cx, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncWrite for TestIo {
|
||||||
|
fn poll_write(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
_cx: &mut Context<'_>,
|
||||||
|
_buf: &[u8],
|
||||||
|
) -> Poll<io::Result<usize>> {
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let data = b"\x00\x00\x00\x18\"Test one, check check.\"";
|
||||||
|
let transport = Transport::from((
|
||||||
|
TestIo(Cursor::new(data)),
|
||||||
|
SymmetricalJson::<String>::default(),
|
||||||
|
));
|
||||||
|
pin_mut!(transport);
|
||||||
|
|
||||||
|
assert_matches!(
|
||||||
|
transport.poll_next(&mut ctx()),
|
||||||
|
Poll::Ready(Some(Ok(ref s))) if s == "Test one, check check.");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_sink() {
|
||||||
|
struct TestIo<'a>(&'a mut Vec<u8>);
|
||||||
|
|
||||||
|
impl<'a> AsyncRead for TestIo<'a> {
|
||||||
|
fn poll_read(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
_cx: &mut Context<'_>,
|
||||||
|
_buf: &mut [u8],
|
||||||
|
) -> Poll<io::Result<usize>> {
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> AsyncWrite for TestIo<'a> {
|
||||||
|
fn poll_write(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &[u8],
|
||||||
|
) -> Poll<io::Result<usize>> {
|
||||||
|
AsyncWrite::poll_write(Pin::new(&mut *self.0), cx, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
AsyncWrite::poll_flush(Pin::new(&mut *self.0), cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_shutdown(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<io::Result<()>> {
|
||||||
|
AsyncWrite::poll_shutdown(Pin::new(&mut *self.0), cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut writer = vec![];
|
||||||
|
let transport =
|
||||||
|
Transport::from((TestIo(&mut writer), SymmetricalJson::<String>::default()));
|
||||||
|
pin_mut!(transport);
|
||||||
|
|
||||||
|
assert_matches!(
|
||||||
|
transport.as_mut().poll_ready(&mut ctx()),
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
transport
|
||||||
|
.as_mut()
|
||||||
|
.start_send("Test one, check check.".into()),
|
||||||
|
Ok(())
|
||||||
|
);
|
||||||
|
assert_matches!(transport.poll_flush(&mut ctx()), Poll::Ready(Ok(())));
|
||||||
|
assert_eq!(writer, b"\x00\x00\x00\x18\"Test one, check check.\"");
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -26,7 +26,7 @@ use std::{
|
|||||||
///
|
///
|
||||||
/// Consists of a span identifying an event, an optional parent span identifying a causal event
|
/// Consists of a span identifying an event, an optional parent span identifying a causal event
|
||||||
/// that triggered the current span, and a trace with which all related spans are associated.
|
/// that triggered the current span, and a trace with which all related spans are associated.
|
||||||
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
|
#[derive(Debug, Default, PartialEq, Eq, Hash, Clone, Copy)]
|
||||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||||
pub struct Context {
|
pub struct Context {
|
||||||
/// An identifier of the trace associated with the current context. A trace ID is typically
|
/// An identifier of the trace associated with the current context. A trace ID is typically
|
||||||
@@ -46,12 +46,12 @@ pub struct Context {
|
|||||||
|
|
||||||
/// A 128-bit UUID identifying a trace. All spans caused by the same originating span share the
|
/// A 128-bit UUID identifying a trace. All spans caused by the same originating span share the
|
||||||
/// same trace ID.
|
/// same trace ID.
|
||||||
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
|
#[derive(Debug, Default, PartialEq, Eq, Hash, Clone, Copy)]
|
||||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||||
pub struct TraceId(u128);
|
pub struct TraceId(u128);
|
||||||
|
|
||||||
/// A 64-bit identifier of a span within a trace. The identifier is unique within the span's trace.
|
/// A 64-bit identifier of a span within a trace. The identifier is unique within the span's trace.
|
||||||
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
|
#[derive(Debug, Default, PartialEq, Eq, Hash, Clone, Copy)]
|
||||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||||
pub struct SpanId(u64);
|
pub struct SpanId(u64);
|
||||||
|
|
||||||
5
tarpc/tests/compile_fail.rs
Normal file
5
tarpc/tests/compile_fail.rs
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
#[test]
|
||||||
|
fn ui() {
|
||||||
|
let t = trybuild::TestCases::new();
|
||||||
|
t.compile_fail("tests/compile_fail/*.rs");
|
||||||
|
}
|
||||||
15
tarpc/tests/compile_fail/tarpc_server_missing_async.rs
Normal file
15
tarpc/tests/compile_fail/tarpc_server_missing_async.rs
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
#[tarpc::service]
|
||||||
|
trait World {
|
||||||
|
async fn hello(name: String) -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct HelloServer;
|
||||||
|
|
||||||
|
#[tarpc::server]
|
||||||
|
impl World for HelloServer {
|
||||||
|
fn hello(name: String) -> String {
|
||||||
|
format!("Hello, {}!", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {}
|
||||||
19
tarpc/tests/compile_fail/tarpc_server_missing_async.stderr
Normal file
19
tarpc/tests/compile_fail/tarpc_server_missing_async.stderr
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
error: not all trait items implemented, missing: `HelloFut`
|
||||||
|
--> $DIR/tarpc_server_missing_async.rs:9:1
|
||||||
|
|
|
||||||
|
9 | impl World for HelloServer {
|
||||||
|
| ^^^^
|
||||||
|
|
||||||
|
error: hint: `#[tarpc::server]` only rewrites async fns, and `fn hello` is not async
|
||||||
|
--> $DIR/tarpc_server_missing_async.rs:10:5
|
||||||
|
|
|
||||||
|
10 | fn hello(name: String) -> String {
|
||||||
|
| ^^
|
||||||
|
|
||||||
|
error[E0433]: failed to resolve: use of undeclared type or module `serde`
|
||||||
|
--> $DIR/tarpc_server_missing_async.rs:1:1
|
||||||
|
|
|
||||||
|
1 | #[tarpc::service]
|
||||||
|
| ^^^^^^^^^^^^^^^^^ use of undeclared type or module `serde`
|
||||||
|
|
|
||||||
|
= note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info)
|
||||||
6
tarpc/tests/compile_fail/tarpc_service_arg_pat.rs
Normal file
6
tarpc/tests/compile_fail/tarpc_service_arg_pat.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#[tarpc::service]
|
||||||
|
trait World {
|
||||||
|
async fn pat((a, b): (u8, u32));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {}
|
||||||
5
tarpc/tests/compile_fail/tarpc_service_arg_pat.stderr
Normal file
5
tarpc/tests/compile_fail/tarpc_service_arg_pat.stderr
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
error: patterns aren't allowed in RPC args
|
||||||
|
--> $DIR/tarpc_service_arg_pat.rs:3:18
|
||||||
|
|
|
||||||
|
3 | async fn pat((a, b): (u8, u32));
|
||||||
|
| ^^^^^^
|
||||||
6
tarpc/tests/compile_fail/tarpc_service_fn_new.rs
Normal file
6
tarpc/tests/compile_fail/tarpc_service_fn_new.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#[tarpc::service]
|
||||||
|
trait World {
|
||||||
|
async fn new();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {}
|
||||||
5
tarpc/tests/compile_fail/tarpc_service_fn_new.stderr
Normal file
5
tarpc/tests/compile_fail/tarpc_service_fn_new.stderr
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
error: method name conflicts with generated fn `WorldClient::new`
|
||||||
|
--> $DIR/tarpc_service_fn_new.rs:3:14
|
||||||
|
|
|
||||||
|
3 | async fn new();
|
||||||
|
| ^^^
|
||||||
6
tarpc/tests/compile_fail/tarpc_service_fn_serve.rs
Normal file
6
tarpc/tests/compile_fail/tarpc_service_fn_serve.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#[tarpc::service]
|
||||||
|
trait World {
|
||||||
|
async fn serve();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {}
|
||||||
5
tarpc/tests/compile_fail/tarpc_service_fn_serve.stderr
Normal file
5
tarpc/tests/compile_fail/tarpc_service_fn_serve.stderr
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
error: method name conflicts with generated fn `World::serve`
|
||||||
|
--> $DIR/tarpc_service_fn_serve.rs:3:14
|
||||||
|
|
|
||||||
|
3 | async fn serve();
|
||||||
|
| ^^^^^
|
||||||
@@ -1,120 +0,0 @@
|
|||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by an MIT-style
|
|
||||||
// license that can be found in the LICENSE file or at
|
|
||||||
// https://opensource.org/licenses/MIT.
|
|
||||||
|
|
||||||
#![feature(
|
|
||||||
test,
|
|
||||||
arbitrary_self_types,
|
|
||||||
integer_atomics,
|
|
||||||
async_await,
|
|
||||||
proc_macro_hygiene
|
|
||||||
)]
|
|
||||||
|
|
||||||
use futures::{compat::Executor01CompatExt, future, prelude::*};
|
|
||||||
use libtest::stats::Stats;
|
|
||||||
use rpc::{
|
|
||||||
client, context,
|
|
||||||
server::{Handler, Server},
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
io,
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
|
|
||||||
mod ack {
|
|
||||||
tarpc::service! {
|
|
||||||
rpc ack();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct Serve;
|
|
||||||
|
|
||||||
impl ack::Service for Serve {
|
|
||||||
type AckFut = future::Ready<()>;
|
|
||||||
|
|
||||||
fn ack(self, _: context::Context) -> Self::AckFut {
|
|
||||||
future::ready(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn bench() -> io::Result<()> {
|
|
||||||
let listener = bincode_transport::listen(&"0.0.0.0:0".parse().unwrap())?;
|
|
||||||
let addr = listener.local_addr();
|
|
||||||
|
|
||||||
tokio_executor::spawn(
|
|
||||||
Server::default()
|
|
||||||
.incoming(listener)
|
|
||||||
.take(1)
|
|
||||||
.respond_with(ack::serve(Serve))
|
|
||||||
.unit_error()
|
|
||||||
.boxed()
|
|
||||||
.compat(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let conn = bincode_transport::connect(&addr).await?;
|
|
||||||
let mut client = ack::new_stub(client::Config::default(), conn).await?;
|
|
||||||
|
|
||||||
let total = 10_000usize;
|
|
||||||
let mut successful = 0u32;
|
|
||||||
let mut unsuccessful = 0u32;
|
|
||||||
let mut durations = vec![];
|
|
||||||
for _ in 1..=total {
|
|
||||||
let now = Instant::now();
|
|
||||||
let response = client.ack(context::current()).await;
|
|
||||||
let elapsed = now.elapsed();
|
|
||||||
|
|
||||||
match response {
|
|
||||||
Ok(_) => successful += 1,
|
|
||||||
Err(_) => unsuccessful += 1,
|
|
||||||
};
|
|
||||||
durations.push(elapsed);
|
|
||||||
}
|
|
||||||
|
|
||||||
let durations_nanos = durations
|
|
||||||
.iter()
|
|
||||||
.map(|duration| duration.as_secs() as f64 * 1E9 + duration.subsec_nanos() as f64)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let (lower, median, upper) = durations_nanos.quartiles();
|
|
||||||
|
|
||||||
println!("Of {:?} runs:", durations_nanos.len());
|
|
||||||
println!("\tSuccessful: {:?}", successful);
|
|
||||||
println!("\tUnsuccessful: {:?}", unsuccessful);
|
|
||||||
println!(
|
|
||||||
"\tMean: {:?}",
|
|
||||||
Duration::from_nanos(durations_nanos.mean() as u64)
|
|
||||||
);
|
|
||||||
println!("\tMedian: {:?}", Duration::from_nanos(median as u64));
|
|
||||||
println!(
|
|
||||||
"\tStd Dev: {:?}",
|
|
||||||
Duration::from_nanos(durations_nanos.std_dev() as u64)
|
|
||||||
);
|
|
||||||
println!(
|
|
||||||
"\tMin: {:?}",
|
|
||||||
Duration::from_nanos(durations_nanos.min() as u64)
|
|
||||||
);
|
|
||||||
println!(
|
|
||||||
"\tMax: {:?}",
|
|
||||||
Duration::from_nanos(durations_nanos.max() as u64)
|
|
||||||
);
|
|
||||||
println!(
|
|
||||||
"\tQuartiles: ({:?}, {:?}, {:?})",
|
|
||||||
Duration::from_nanos(lower as u64),
|
|
||||||
Duration::from_nanos(median as u64),
|
|
||||||
Duration::from_nanos(upper as u64)
|
|
||||||
);
|
|
||||||
|
|
||||||
println!("done");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn bench_small_packet() {
|
|
||||||
env_logger::init();
|
|
||||||
tarpc::init(tokio::executor::DefaultExecutor::current().compat());
|
|
||||||
|
|
||||||
tokio::run(bench().map_err(|e| panic!(e.to_string())).boxed().compat())
|
|
||||||
}
|
|
||||||
169
tarpc/tests/service_functional.rs
Normal file
169
tarpc/tests/service_functional.rs
Normal file
@@ -0,0 +1,169 @@
|
|||||||
|
use assert_matches::assert_matches;
|
||||||
|
use futures::{
|
||||||
|
future::{join_all, ready, Ready},
|
||||||
|
prelude::*,
|
||||||
|
};
|
||||||
|
use std::io;
|
||||||
|
use tarpc::{
|
||||||
|
client::{self},
|
||||||
|
context, serde_transport,
|
||||||
|
server::{self, BaseChannel, Channel, Handler},
|
||||||
|
transport::channel,
|
||||||
|
};
|
||||||
|
use tokio::join;
|
||||||
|
use tokio_serde::formats::Json;
|
||||||
|
|
||||||
|
#[tarpc_plugins::service]
|
||||||
|
trait Service {
|
||||||
|
async fn add(x: i32, y: i32) -> i32;
|
||||||
|
async fn hey(name: String) -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct Server;
|
||||||
|
|
||||||
|
impl Service for Server {
|
||||||
|
type AddFut = Ready<i32>;
|
||||||
|
|
||||||
|
fn add(self, _: context::Context, x: i32, y: i32) -> Self::AddFut {
|
||||||
|
ready(x + y)
|
||||||
|
}
|
||||||
|
|
||||||
|
type HeyFut = Ready<String>;
|
||||||
|
|
||||||
|
fn hey(self, _: context::Context, name: String) -> Self::HeyFut {
|
||||||
|
ready(format!("Hey, {}.", name))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(threaded_scheduler)]
|
||||||
|
async fn sequential() -> io::Result<()> {
|
||||||
|
let _ = env_logger::try_init();
|
||||||
|
|
||||||
|
let (tx, rx) = channel::unbounded();
|
||||||
|
|
||||||
|
tokio::spawn(
|
||||||
|
BaseChannel::new(server::Config::default(), rx)
|
||||||
|
.respond_with(Server.serve())
|
||||||
|
.execute(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut client = ServiceClient::new(client::Config::default(), tx).spawn()?;
|
||||||
|
|
||||||
|
assert_matches!(client.add(context::current(), 1, 2).await, Ok(3));
|
||||||
|
assert_matches!(
|
||||||
|
client.hey(context::current(), "Tim".into()).await,
|
||||||
|
Ok(ref s) if s == "Hey, Tim.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "serde1")]
|
||||||
|
#[tokio::test(threaded_scheduler)]
|
||||||
|
async fn serde() -> io::Result<()> {
|
||||||
|
let _ = env_logger::try_init();
|
||||||
|
|
||||||
|
let transport = serde_transport::tcp::listen("localhost:56789", Json::default).await?;
|
||||||
|
let addr = transport.local_addr();
|
||||||
|
tokio::spawn(
|
||||||
|
tarpc::Server::default()
|
||||||
|
.incoming(transport.take(1).filter_map(|r| async { r.ok() }))
|
||||||
|
.respond_with(Server.serve()),
|
||||||
|
);
|
||||||
|
|
||||||
|
let transport = serde_transport::tcp::connect(addr, Json::default).await?;
|
||||||
|
let mut client = ServiceClient::new(client::Config::default(), transport).spawn()?;
|
||||||
|
|
||||||
|
assert_matches!(client.add(context::current(), 1, 2).await, Ok(3));
|
||||||
|
assert_matches!(
|
||||||
|
client.hey(context::current(), "Tim".to_string()).await,
|
||||||
|
Ok(ref s) if s == "Hey, Tim."
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(threaded_scheduler)]
|
||||||
|
async fn concurrent() -> io::Result<()> {
|
||||||
|
let _ = env_logger::try_init();
|
||||||
|
|
||||||
|
let (tx, rx) = channel::unbounded();
|
||||||
|
tokio::spawn(
|
||||||
|
tarpc::Server::default()
|
||||||
|
.incoming(stream::once(ready(rx)))
|
||||||
|
.respond_with(Server.serve()),
|
||||||
|
);
|
||||||
|
|
||||||
|
let client = ServiceClient::new(client::Config::default(), tx).spawn()?;
|
||||||
|
|
||||||
|
let mut c = client.clone();
|
||||||
|
let req1 = c.add(context::current(), 1, 2);
|
||||||
|
|
||||||
|
let mut c = client.clone();
|
||||||
|
let req2 = c.add(context::current(), 3, 4);
|
||||||
|
|
||||||
|
let mut c = client.clone();
|
||||||
|
let req3 = c.hey(context::current(), "Tim".to_string());
|
||||||
|
|
||||||
|
assert_matches!(req1.await, Ok(3));
|
||||||
|
assert_matches!(req2.await, Ok(7));
|
||||||
|
assert_matches!(req3.await, Ok(ref s) if s == "Hey, Tim.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(threaded_scheduler)]
|
||||||
|
async fn concurrent_join() -> io::Result<()> {
|
||||||
|
let _ = env_logger::try_init();
|
||||||
|
|
||||||
|
let (tx, rx) = channel::unbounded();
|
||||||
|
tokio::spawn(
|
||||||
|
tarpc::Server::default()
|
||||||
|
.incoming(stream::once(ready(rx)))
|
||||||
|
.respond_with(Server.serve()),
|
||||||
|
);
|
||||||
|
|
||||||
|
let client = ServiceClient::new(client::Config::default(), tx).spawn()?;
|
||||||
|
|
||||||
|
let mut c = client.clone();
|
||||||
|
let req1 = c.add(context::current(), 1, 2);
|
||||||
|
|
||||||
|
let mut c = client.clone();
|
||||||
|
let req2 = c.add(context::current(), 3, 4);
|
||||||
|
|
||||||
|
let mut c = client.clone();
|
||||||
|
let req3 = c.hey(context::current(), "Tim".to_string());
|
||||||
|
|
||||||
|
let (resp1, resp2, resp3) = join!(req1, req2, req3);
|
||||||
|
assert_matches!(resp1, Ok(3));
|
||||||
|
assert_matches!(resp2, Ok(7));
|
||||||
|
assert_matches!(resp3, Ok(ref s) if s == "Hey, Tim.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(threaded_scheduler)]
|
||||||
|
async fn concurrent_join_all() -> io::Result<()> {
|
||||||
|
let _ = env_logger::try_init();
|
||||||
|
|
||||||
|
let (tx, rx) = channel::unbounded();
|
||||||
|
tokio::spawn(
|
||||||
|
tarpc::Server::default()
|
||||||
|
.incoming(stream::once(ready(rx)))
|
||||||
|
.respond_with(Server.serve()),
|
||||||
|
);
|
||||||
|
|
||||||
|
let client = ServiceClient::new(client::Config::default(), tx).spawn()?;
|
||||||
|
|
||||||
|
let mut c1 = client.clone();
|
||||||
|
let mut c2 = client.clone();
|
||||||
|
|
||||||
|
let req1 = c1.add(context::current(), 1, 2);
|
||||||
|
let req2 = c2.add(context::current(), 3, 4);
|
||||||
|
|
||||||
|
let responses = join_all(vec![req1, req2]).await;
|
||||||
|
assert_matches!(responses[0], Ok(3));
|
||||||
|
assert_matches!(responses[1], Ok(7));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "tarpc-trace"
|
|
||||||
version = "0.2.0"
|
|
||||||
authors = ["tikue <tikue@google.com>"]
|
|
||||||
edition = '2018'
|
|
||||||
license = "MIT"
|
|
||||||
documentation = "https://docs.rs/tarpc-trace"
|
|
||||||
homepage = "https://github.com/google/tarpc"
|
|
||||||
repository = "https://github.com/google/tarpc"
|
|
||||||
keywords = ["rpc", "network", "server", "api", "tls"]
|
|
||||||
categories = ["asynchronous", "network-programming"]
|
|
||||||
readme = "../README.md"
|
|
||||||
description = "foundations for tracing in tarpc"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
rand = "0.6"
|
|
||||||
|
|
||||||
[dependencies.serde]
|
|
||||||
version = "1.0"
|
|
||||||
optional = true
|
|
||||||
features = ["derive"]
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
edition = "2018"
|
|
||||||
Reference in New Issue
Block a user