mirror of
https://github.com/OMGeeky/tarpc.git
synced 2026-02-23 15:49:54 +01:00
Compare commits
345 Commits
v0.7.2
...
request-ho
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a6758fd1f9 | ||
|
|
2c241cc809 | ||
|
|
263ef8a897 | ||
|
|
d50290a21c | ||
|
|
26988cb833 | ||
|
|
6cf18a1caf | ||
|
|
84932df9b4 | ||
|
|
8dc3711a80 | ||
|
|
7c5afa97bb | ||
|
|
324df5cd15 | ||
|
|
3264979993 | ||
|
|
dd63fb59bf | ||
|
|
f4db8cc5b4 | ||
|
|
e9ba350496 | ||
|
|
e6d779e70b | ||
|
|
ce5f8cfb0c | ||
|
|
4b69dc8db5 | ||
|
|
866db2a2cd | ||
|
|
bed85e2827 | ||
|
|
93f3880025 | ||
|
|
878f594d5b | ||
|
|
aa9bbad109 | ||
|
|
7e872ce925 | ||
|
|
62541b709d | ||
|
|
8c43f94fb6 | ||
|
|
7fa4e5064d | ||
|
|
94db7610bb | ||
|
|
0c08d5e8ca | ||
|
|
75b15fe2aa | ||
|
|
863a08d87e | ||
|
|
49ba8f8b1b | ||
|
|
d832209da3 | ||
|
|
584426d414 | ||
|
|
50eb80c883 | ||
|
|
1f0c80d8c9 | ||
|
|
99bf3e62a3 | ||
|
|
68863e3db0 | ||
|
|
453ba1c074 | ||
|
|
e3eac1b4f5 | ||
|
|
0e102288a5 | ||
|
|
4c8ba41b2f | ||
|
|
946c627579 | ||
|
|
104dd71bba | ||
|
|
012c481861 | ||
|
|
dc12bd09aa | ||
|
|
2594ea8ce9 | ||
|
|
839b87e394 | ||
|
|
57d0638a99 | ||
|
|
a3a6404a30 | ||
|
|
b36eac80b1 | ||
|
|
d7070e4bc3 | ||
|
|
b5d1828308 | ||
|
|
92cfe63c4f | ||
|
|
839a2f067c | ||
|
|
b5d593488c | ||
|
|
eea38b8bf4 | ||
|
|
70493c15f4 | ||
|
|
f7c5d6a7c3 | ||
|
|
98c5d2a18b | ||
|
|
46b534f7c6 | ||
|
|
42b4fc52b1 | ||
|
|
350dbcdad0 | ||
|
|
b1b4461d89 | ||
|
|
f694b7573a | ||
|
|
1e680e3a5a | ||
|
|
2591d21e94 | ||
|
|
6632f68d95 | ||
|
|
25985ad56a | ||
|
|
d6a24e9420 | ||
|
|
281a78f3c7 | ||
|
|
a0787d0091 | ||
|
|
d2acba0e8a | ||
|
|
ea7b6763c4 | ||
|
|
eb67c540b9 | ||
|
|
4151d0abd3 | ||
|
|
d0c11a6efa | ||
|
|
82c4da1743 | ||
|
|
0a15e0b75c | ||
|
|
0b315c29bf | ||
|
|
56f09bf61f | ||
|
|
6d82e82419 | ||
|
|
9bebaf814a | ||
|
|
5f4d6e6008 | ||
|
|
07d07d7ba3 | ||
|
|
a41bbf65b2 | ||
|
|
21e2f7ca62 | ||
|
|
7b7c182411 | ||
|
|
db0c778ead | ||
|
|
c3efb83ac1 | ||
|
|
3d7b0171fe | ||
|
|
c191ff5b2e | ||
|
|
90bc7f741d | ||
|
|
d3f6c01df2 | ||
|
|
c6450521e6 | ||
|
|
1da6bcec57 | ||
|
|
75a5591158 | ||
|
|
9462aad3bf | ||
|
|
0964fc51ff | ||
|
|
27aacab432 | ||
|
|
3feb465ad3 | ||
|
|
66cdc99ae0 | ||
|
|
66419db6fd | ||
|
|
72d5dbba89 | ||
|
|
e75193c191 | ||
|
|
ce4fd49161 | ||
|
|
3c978c5bf6 | ||
|
|
6f419e9a9a | ||
|
|
b3eb8d0b7a | ||
|
|
3b422eb179 | ||
|
|
4b513bad73 | ||
|
|
e71e17866d | ||
|
|
7e3fbec077 | ||
|
|
e4bc5e8e32 | ||
|
|
bc982c5584 | ||
|
|
d440e12c19 | ||
|
|
bc8128af69 | ||
|
|
1d87c14262 | ||
|
|
ca929c2178 | ||
|
|
569039734b | ||
|
|
3d43310e6a | ||
|
|
d21cbddb0d | ||
|
|
25aa857edf | ||
|
|
0bb2e2bbbe | ||
|
|
dc376343d6 | ||
|
|
2e7d1f8a88 | ||
|
|
6314591c65 | ||
|
|
7dd7494420 | ||
|
|
6c10e3649f | ||
|
|
4c6dee13d2 | ||
|
|
e45abe953a | ||
|
|
dec3e491b5 | ||
|
|
6ce341cf79 | ||
|
|
b9868250f8 | ||
|
|
a3f1064efe | ||
|
|
026083d653 | ||
|
|
d27f341bde | ||
|
|
2264ebecfc | ||
|
|
3207affb4a | ||
|
|
0602afd50c | ||
|
|
4343e12217 | ||
|
|
7fda862fb8 | ||
|
|
aa7b875b1a | ||
|
|
54d6e0e3b6 | ||
|
|
bea3b442aa | ||
|
|
954a2502e7 | ||
|
|
e3f34917c5 | ||
|
|
f65dd05949 | ||
|
|
240c436b34 | ||
|
|
c9803688cc | ||
|
|
4987094483 | ||
|
|
ff55080193 | ||
|
|
258193c932 | ||
|
|
67823ef5de | ||
|
|
a671457243 | ||
|
|
cf654549da | ||
|
|
6a01e32a2d | ||
|
|
e6597fab03 | ||
|
|
ebd245a93d | ||
|
|
3ebc3b5845 | ||
|
|
0e5973109d | ||
|
|
5f02d7383a | ||
|
|
2bae148529 | ||
|
|
42a2e03aab | ||
|
|
b566d0c646 | ||
|
|
b359f16767 | ||
|
|
f8681ab134 | ||
|
|
7e521768ab | ||
|
|
e9b1e7d101 | ||
|
|
f0322fb892 | ||
|
|
617daebb88 | ||
|
|
a11d4fff58 | ||
|
|
bf42a04d83 | ||
|
|
06528d6953 | ||
|
|
9f00395746 | ||
|
|
e0674cd57f | ||
|
|
7e49bd9ee7 | ||
|
|
8a1baa9c4e | ||
|
|
31c713d188 | ||
|
|
d905bc1591 | ||
|
|
7f946c7f83 | ||
|
|
36cfdb6c6f | ||
|
|
dbabe9774f | ||
|
|
deb041b8d3 | ||
|
|
85d49477f5 | ||
|
|
45af6ccdeb | ||
|
|
917c0c5e2d | ||
|
|
bbbd43e282 | ||
|
|
f945392b5a | ||
|
|
f4060779e4 | ||
|
|
7cc8d9640b | ||
|
|
7f871f03ef | ||
|
|
709b966150 | ||
|
|
5e19b79aa4 | ||
|
|
6eb806907a | ||
|
|
8250ca31ff | ||
|
|
7cd776143b | ||
|
|
5f6c3d7d98 | ||
|
|
915fe3ed4e | ||
|
|
d8c7b9feb2 | ||
|
|
5ab3866d96 | ||
|
|
184ea42033 | ||
|
|
014c209b8e | ||
|
|
e91005855c | ||
|
|
46bcc0f559 | ||
|
|
61322ebf41 | ||
|
|
db0c9c4182 | ||
|
|
9ee3011687 | ||
|
|
5aa4a2cef6 | ||
|
|
f38a172523 | ||
|
|
66dbca80b2 | ||
|
|
61377dd4ff | ||
|
|
cd03f3ff8c | ||
|
|
9479963773 | ||
|
|
f974533bf7 | ||
|
|
d560ac6197 | ||
|
|
1cdff15412 | ||
|
|
f8ba7d9f4e | ||
|
|
41c1aafaf7 | ||
|
|
75d1e877be | ||
|
|
88e1cf558b | ||
|
|
50879d2acb | ||
|
|
13cb14a119 | ||
|
|
22ef6b7800 | ||
|
|
e48e6dfe67 | ||
|
|
1b58914d59 | ||
|
|
2f24842b2d | ||
|
|
5c485fe608 | ||
|
|
b0319e7db9 | ||
|
|
a4d9581888 | ||
|
|
fb5022b1c0 | ||
|
|
abb0b5b3ac | ||
|
|
49f2641e3c | ||
|
|
650c60fe44 | ||
|
|
1d0bbcb36c | ||
|
|
c456ad7fa5 | ||
|
|
537446a5c9 | ||
|
|
94b5b2c431 | ||
|
|
9863433fea | ||
|
|
9a27465a25 | ||
|
|
263cfe1435 | ||
|
|
6ae5302a70 | ||
|
|
c67b7283e7 | ||
|
|
7b6e98da7b | ||
|
|
15b65fa20f | ||
|
|
372900173a | ||
|
|
1089415451 | ||
|
|
8dbeeff0eb | ||
|
|
85312d430c | ||
|
|
9843af9e00 | ||
|
|
a6bd423ef0 | ||
|
|
146496d08c | ||
|
|
b562051c38 | ||
|
|
fe164ca368 | ||
|
|
950ad5187c | ||
|
|
e6ab69c314 | ||
|
|
373dcbed57 | ||
|
|
ce9c057b1b | ||
|
|
6745cee72c | ||
|
|
31abea18b3 | ||
|
|
593ac135ce | ||
|
|
05a924d27f | ||
|
|
af9d71ed0d | ||
|
|
9b90f6ae51 | ||
|
|
bbfc8ac352 | ||
|
|
ad86a967ba | ||
|
|
58a0eced19 | ||
|
|
46fffd13e7 | ||
|
|
6c8d4be462 | ||
|
|
e3a517bf0d | ||
|
|
f4e22bdc2e | ||
|
|
46f56fbdc0 | ||
|
|
8665655592 | ||
|
|
4569d26d81 | ||
|
|
b8b92ddb5f | ||
|
|
8dd3390876 | ||
|
|
06c420b60c | ||
|
|
a7fb4d22cc | ||
|
|
b1cd5f34e5 | ||
|
|
088e5f8f2c | ||
|
|
4e0be5b626 | ||
|
|
5516034bbc | ||
|
|
06544faa5a | ||
|
|
39737b720a | ||
|
|
0f36985440 | ||
|
|
959bb691cd | ||
|
|
2a3162c5fa | ||
|
|
0cc976b729 | ||
|
|
4d2d3f24c6 | ||
|
|
2c7c64841f | ||
|
|
4ea142d0f3 | ||
|
|
00751d2518 | ||
|
|
4394a52b65 | ||
|
|
70938501d7 | ||
|
|
d5f5cf4300 | ||
|
|
e2c4164d8c | ||
|
|
78124ef7a8 | ||
|
|
096d354b7e | ||
|
|
7ad0e4b070 | ||
|
|
64755d5329 | ||
|
|
3071422132 | ||
|
|
8847330dbe | ||
|
|
6d396520f4 | ||
|
|
79a2f7fe2f | ||
|
|
af66841f68 | ||
|
|
1ab4cfdff9 | ||
|
|
f7e03eeeb7 | ||
|
|
29067b7773 | ||
|
|
905e5be8bb | ||
|
|
5e4b97e589 | ||
|
|
9bd66b7e49 | ||
|
|
0ecc7a80c1 | ||
|
|
92f157206d | ||
|
|
b093db63a3 | ||
|
|
8c3e3df47f | ||
|
|
6907c6e0a3 | ||
|
|
4b5273127d | ||
|
|
4b763e9f52 | ||
|
|
848eb00bea | ||
|
|
44ec68c002 | ||
|
|
b2282f9d7a | ||
|
|
326f0270b9 | ||
|
|
fd47a6c038 | ||
|
|
77cfffaaed | ||
|
|
118893678b | ||
|
|
ae3985de46 | ||
|
|
49f36e0b2b | ||
|
|
4a7082b27c | ||
|
|
3aa53a06fb | ||
|
|
a0afbefef4 | ||
|
|
5b554f7062 | ||
|
|
0411a90be9 | ||
|
|
9ce7938fdc | ||
|
|
650dc88da5 | ||
|
|
3601763442 | ||
|
|
4aaaea1e04 | ||
|
|
2e214c85d3 | ||
|
|
0676ab67df | ||
|
|
0b843512dd | ||
|
|
85d9416750 | ||
|
|
5e3cf3c807 | ||
|
|
4dfb3a48c3 | ||
|
|
21e8883877 | ||
|
|
7dbfe07c97 | ||
|
|
8bc01a993b | ||
|
|
e2728d84f3 |
67
.github/workflows/main.yml
vendored
Normal file
67
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
name: Continuous integration
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
check:
|
||||||
|
name: Check
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Cancel previous
|
||||||
|
uses: styfle/cancel-workflow-action@0.10.0
|
||||||
|
with:
|
||||||
|
access_token: ${{ github.token }}
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: dtolnay/rust-toolchain@stable
|
||||||
|
- run: cargo check --all-features
|
||||||
|
|
||||||
|
test:
|
||||||
|
name: Test Suite
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Cancel previous
|
||||||
|
uses: styfle/cancel-workflow-action@0.10.0
|
||||||
|
with:
|
||||||
|
access_token: ${{ github.token }}
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: dtolnay/rust-toolchain@stable
|
||||||
|
- run: cargo test
|
||||||
|
- run: cargo test --manifest-path tarpc/Cargo.toml --features serde1
|
||||||
|
- run: cargo test --manifest-path tarpc/Cargo.toml --features tokio1
|
||||||
|
- run: cargo test --manifest-path tarpc/Cargo.toml --features serde-transport
|
||||||
|
- run: cargo test --manifest-path tarpc/Cargo.toml --features tcp
|
||||||
|
- run: cargo test --all-features
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
name: Rustfmt
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Cancel previous
|
||||||
|
uses: styfle/cancel-workflow-action@0.10.0
|
||||||
|
with:
|
||||||
|
access_token: ${{ github.token }}
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: dtolnay/rust-toolchain@stable
|
||||||
|
with:
|
||||||
|
components: rustfmt
|
||||||
|
- run: cargo fmt --all -- --check
|
||||||
|
|
||||||
|
clippy:
|
||||||
|
name: Clippy
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Cancel previous
|
||||||
|
uses: styfle/cancel-workflow-action@0.10.0
|
||||||
|
with:
|
||||||
|
access_token: ${{ github.token }}
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- uses: dtolnay/rust-toolchain@stable
|
||||||
|
with:
|
||||||
|
components: clippy
|
||||||
|
- run: cargo clippy --all-features -- -D warnings
|
||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,3 +3,5 @@ Cargo.lock
|
|||||||
.cargo
|
.cargo
|
||||||
*.swp
|
*.swp
|
||||||
*.bk
|
*.bk
|
||||||
|
tarpc.iml
|
||||||
|
.idea
|
||||||
|
|||||||
34
.travis.yml
34
.travis.yml
@@ -1,34 +0,0 @@
|
|||||||
language: rust
|
|
||||||
sudo: false
|
|
||||||
|
|
||||||
rust:
|
|
||||||
- nightly
|
|
||||||
|
|
||||||
os:
|
|
||||||
- linux
|
|
||||||
|
|
||||||
addons:
|
|
||||||
apt:
|
|
||||||
packages:
|
|
||||||
- libcurl4-openssl-dev
|
|
||||||
- libelf-dev
|
|
||||||
- libdw-dev
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- |
|
|
||||||
pip install 'travis-cargo<0.2' --user &&
|
|
||||||
export PATH=$HOME/.local/bin:$PATH
|
|
||||||
|
|
||||||
script:
|
|
||||||
- |
|
|
||||||
travis-cargo build -- --features tls && travis-cargo test -- --features tls && travis-cargo bench -- --features tls &&
|
|
||||||
rustdoc --test README.md -L target/debug/deps -L target/debug &&
|
|
||||||
travis-cargo build && travis-cargo test && travis-cargo bench
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- travis-cargo coveralls --no-sudo
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
# override the default `--features unstable` used for the nightly branch
|
|
||||||
- TRAVIS_CARGO_NIGHTLY_FEATURE=""
|
|
||||||
63
Cargo.toml
63
Cargo.toml
@@ -1,54 +1,11 @@
|
|||||||
[package]
|
|
||||||
name = "tarpc"
|
|
||||||
version = "0.7.2"
|
|
||||||
authors = ["Adam Wright <adam.austin.wright@gmail.com>", "Tim Kuehn <timothy.j.kuehn@gmail.com>"]
|
|
||||||
license = "MIT"
|
|
||||||
documentation = "https://docs.rs/tarpc"
|
|
||||||
homepage = "https://github.com/google/tarpc"
|
|
||||||
repository = "https://github.com/google/tarpc"
|
|
||||||
keywords = ["rpc", "network", "server", "api", "tls"]
|
|
||||||
categories = ["asynchronous", "network-programming"]
|
|
||||||
readme = "README.md"
|
|
||||||
description = "An RPC framework for Rust with a focus on ease of use."
|
|
||||||
|
|
||||||
[badges]
|
|
||||||
travis-ci = { repository = "google/tarpc" }
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
bincode = "1.0.0-alpha6"
|
|
||||||
byteorder = "1.0"
|
|
||||||
bytes = "0.4"
|
|
||||||
cfg-if = "0.1.0"
|
|
||||||
futures = "0.1.11"
|
|
||||||
lazy_static = "0.2"
|
|
||||||
log = "0.3"
|
|
||||||
net2 = "0.2"
|
|
||||||
num_cpus = "1.0"
|
|
||||||
serde = "0.9"
|
|
||||||
serde_derive = "0.9"
|
|
||||||
tarpc-plugins = { path = "src/plugins", version = "0.1.1" }
|
|
||||||
thread-pool = "0.1.1"
|
|
||||||
tokio-core = "0.1.6"
|
|
||||||
tokio-io = "0.1"
|
|
||||||
tokio-proto = "0.1.1"
|
|
||||||
tokio-service = "0.1"
|
|
||||||
|
|
||||||
# Optional dependencies
|
|
||||||
native-tls = { version = "0.1.1", optional = true }
|
|
||||||
tokio-tls = { version = "0.1", optional = true }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
chrono = "0.3"
|
|
||||||
env_logger = "0.3"
|
|
||||||
futures-cpupool = "0.1"
|
|
||||||
clap = "2.0"
|
|
||||||
|
|
||||||
[target.'cfg(target_os = "macos")'.dev-dependencies]
|
|
||||||
security-framework = "0.1"
|
|
||||||
|
|
||||||
[features]
|
|
||||||
default = []
|
|
||||||
tls = ["tokio-tls", "native-tls"]
|
|
||||||
unstable = ["serde/unstable"]
|
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
|
resolver = "2"
|
||||||
|
|
||||||
|
members = [
|
||||||
|
"example-service",
|
||||||
|
"tarpc",
|
||||||
|
"plugins",
|
||||||
|
]
|
||||||
|
|
||||||
|
[profile.dev]
|
||||||
|
split-debuginfo = "unpacked"
|
||||||
|
|||||||
371
README.md
371
README.md
@@ -1,9 +1,20 @@
|
|||||||
## tarpc: Tim & Adam's RPC lib
|
[![Crates.io][crates-badge]][crates-url]
|
||||||
[](https://travis-ci.org/google/tarpc)
|
[![MIT licensed][mit-badge]][mit-url]
|
||||||
[](https://coveralls.io/github/google/tarpc?branch=master)
|
[![Build status][gh-actions-badge]][gh-actions-url]
|
||||||
[](LICENSE)
|
[![Discord chat][discord-badge]][discord-url]
|
||||||
[](https://crates.io/crates/tarpc)
|
|
||||||
[](https://gitter.im/tarpc/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
[crates-badge]: https://img.shields.io/crates/v/tarpc.svg
|
||||||
|
[crates-url]: https://crates.io/crates/tarpc
|
||||||
|
[mit-badge]: https://img.shields.io/badge/license-MIT-blue.svg
|
||||||
|
[mit-url]: LICENSE
|
||||||
|
[gh-actions-badge]: https://github.com/google/tarpc/workflows/Continuous%20integration/badge.svg
|
||||||
|
[gh-actions-url]: https://github.com/google/tarpc/actions?query=workflow%3A%22Continuous+integration%22
|
||||||
|
[discord-badge]: https://img.shields.io/discord/647529123996237854.svg?logo=discord&style=flat-square
|
||||||
|
[discord-url]: https://discord.gg/gXwpdSt
|
||||||
|
|
||||||
|
# tarpc
|
||||||
|
|
||||||
|
<!-- cargo-sync-readme start -->
|
||||||
|
|
||||||
*Disclaimer*: This is not an official Google product.
|
*Disclaimer*: This is not an official Google product.
|
||||||
|
|
||||||
@@ -11,7 +22,7 @@ tarpc is an RPC framework for rust with a focus on ease of use. Defining a
|
|||||||
service can be done in just a few lines of code, and most of the boilerplate of
|
service can be done in just a few lines of code, and most of the boilerplate of
|
||||||
writing a server is taken care of for you.
|
writing a server is taken care of for you.
|
||||||
|
|
||||||
[Documentation](https://docs.rs/tarpc)
|
[Documentation](https://docs.rs/crate/tarpc/)
|
||||||
|
|
||||||
## What is an RPC framework?
|
## What is an RPC framework?
|
||||||
"RPC" stands for "Remote Procedure Call," a function call where the work of
|
"RPC" stands for "Remote Procedure Call," a function call where the work of
|
||||||
@@ -26,286 +37,126 @@ architectures. Two well-known ones are [gRPC](http://www.grpc.io) and
|
|||||||
|
|
||||||
tarpc differentiates itself from other RPC frameworks by defining the schema in code,
|
tarpc differentiates itself from other RPC frameworks by defining the schema in code,
|
||||||
rather than in a separate language such as .proto. This means there's no separate compilation
|
rather than in a separate language such as .proto. This means there's no separate compilation
|
||||||
process, and no cognitive context switching between different languages. Additionally, it
|
process, and no context switching between different languages.
|
||||||
works with the community-backed library serde: any serde-serializable type can be used as
|
|
||||||
arguments to tarpc fns.
|
Some other features of tarpc:
|
||||||
|
- Pluggable transport: any type implementing `Stream<Item = Request> + Sink<Response>` can be
|
||||||
|
used as a transport to connect the client and server.
|
||||||
|
- `Send + 'static` optional: if the transport doesn't require it, neither does tarpc!
|
||||||
|
- Cascading cancellation: dropping a request will send a cancellation message to the server.
|
||||||
|
The server will cease any unfinished work on the request, subsequently cancelling any of its
|
||||||
|
own requests, repeating for the entire chain of transitive dependencies.
|
||||||
|
- Configurable deadlines and deadline propagation: request deadlines default to 10s if
|
||||||
|
unspecified. The server will automatically cease work when the deadline has passed. Any
|
||||||
|
requests sent by the server that use the request context will propagate the request deadline.
|
||||||
|
For example, if a server is handling a request with a 10s deadline, does 2s of work, then
|
||||||
|
sends a request to another server, that server will see an 8s deadline.
|
||||||
|
- Distributed tracing: tarpc is instrumented with
|
||||||
|
[tracing](https://github.com/tokio-rs/tracing) primitives extended with
|
||||||
|
[OpenTelemetry](https://opentelemetry.io/) traces. Using a compatible tracing subscriber like
|
||||||
|
[Jaeger](https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger),
|
||||||
|
each RPC can be traced through the client, server, and other dependencies downstream of the
|
||||||
|
server. Even for applications not connected to a distributed tracing collector, the
|
||||||
|
instrumentation can also be ingested by regular loggers like
|
||||||
|
[env_logger](https://github.com/env-logger-rs/env_logger/).
|
||||||
|
- Serde serialization: enabling the `serde1` Cargo feature will make service requests and
|
||||||
|
responses `Serialize + Deserialize`. It's entirely optional, though: in-memory transports can
|
||||||
|
be used, as well, so the price of serialization doesn't have to be paid when it's not needed.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
**NB**: *this example is for master. Are you looking for other
|
|
||||||
[versions](https://docs.rs/tarpc)?*
|
|
||||||
|
|
||||||
Add to your `Cargo.toml` dependencies:
|
Add to your `Cargo.toml` dependencies:
|
||||||
|
|
||||||
```toml
|
```toml
|
||||||
tarpc = "0.7.2"
|
tarpc = "0.34"
|
||||||
tarpc-plugins = "0.1.1"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Example: Sync
|
The `tarpc::service` attribute expands to a collection of items that form an rpc service.
|
||||||
|
These generated types make it easy and ergonomic to write servers with less boilerplate.
|
||||||
|
Simply implement the generated service trait, and you're off to the races!
|
||||||
|
|
||||||
tarpc has two APIs: `sync` for blocking code and `future` for asynchronous
|
## Example
|
||||||
code. Here's how to use the sync api.
|
|
||||||
|
This example uses [tokio](https://tokio.rs), so add the following dependencies to
|
||||||
|
your `Cargo.toml`:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
anyhow = "1.0"
|
||||||
|
futures = "0.3"
|
||||||
|
tarpc = { version = "0.31", features = ["tokio1"] }
|
||||||
|
tokio = { version = "1.0", features = ["rt-multi-thread", "macros"] }
|
||||||
|
```
|
||||||
|
|
||||||
|
In the following example, we use an in-process channel for communication between
|
||||||
|
client and server. In real code, you will likely communicate over the network.
|
||||||
|
For a more real-world example, see [example-service](example-service).
|
||||||
|
|
||||||
|
First, let's set up the dependencies and service definition.
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
#![feature(plugin)]
|
use futures::future::{self, Ready};
|
||||||
#![plugin(tarpc_plugins)]
|
use tarpc::{
|
||||||
|
client, context,
|
||||||
|
server::{self, Channel},
|
||||||
|
};
|
||||||
|
|
||||||
#[macro_use]
|
// This is the service definition. It looks a lot like a trait definition.
|
||||||
extern crate tarpc;
|
// It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
|
#[tarpc::service]
|
||||||
use std::sync::mpsc;
|
trait World {
|
||||||
use std::thread;
|
/// Returns a greeting for name.
|
||||||
use tarpc::sync::{client, server};
|
async fn hello(name: String) -> String;
|
||||||
use tarpc::sync::client::ClientExt;
|
|
||||||
use tarpc::util::{FirstSocketAddr, Never};
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc hello(name: String) -> String;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct HelloServer;
|
|
||||||
|
|
||||||
impl SyncService for HelloServer {
|
|
||||||
fn hello(&self, name: String) -> Result<String, Never> {
|
|
||||||
Ok(format!("Hello, {}!", name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let mut handle = HelloServer.listen("localhost:0", server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
tx.send(handle.addr()).unwrap();
|
|
||||||
handle.run();
|
|
||||||
});
|
|
||||||
let client = SyncClient::connect(rx.recv().unwrap(), client::Options::default()).unwrap();
|
|
||||||
println!("{}", client.hello("Mom".to_string()).unwrap());
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
The `service!` macro expands to a collection of items that form an
|
This service definition generates a trait called `World`. Next we need to
|
||||||
rpc service. In the above example, the macro is called within the
|
implement it for our Server struct.
|
||||||
`hello_service` module. This module will contain `SyncClient`, `AsyncClient`,
|
|
||||||
and `FutureClient` types, and `SyncService` and `AsyncService` traits. There is
|
|
||||||
also a `ServiceExt` trait that provides starter `fn`s for services, with an
|
|
||||||
umbrella impl for all services. These generated types make it easy and
|
|
||||||
ergonomic to write servers without dealing with sockets or serialization
|
|
||||||
directly. Simply implement one of the generated traits, and you're off to the
|
|
||||||
races! See the `tarpc_examples` package for more examples.
|
|
||||||
|
|
||||||
## Example: Futures
|
|
||||||
|
|
||||||
Here's the same service, implemented using futures.
|
|
||||||
|
|
||||||
```rust
|
```rust
|
||||||
#![feature(plugin)]
|
// This is the type that implements the generated World trait. It is the business logic
|
||||||
#![plugin(tarpc_plugins)]
|
// and is used to start the server.
|
||||||
|
|
||||||
extern crate futures;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use futures::Future;
|
|
||||||
use tarpc::future::{client, server};
|
|
||||||
use tarpc::future::client::ClientExt;
|
|
||||||
use tarpc::util::{FirstSocketAddr, Never};
|
|
||||||
use tokio_core::reactor;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc hello(name: String) -> String;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct HelloServer;
|
struct HelloServer;
|
||||||
|
|
||||||
impl FutureService for HelloServer {
|
impl World for HelloServer {
|
||||||
type HelloFut = Result<String, Never>;
|
async fn hello(self, _: context::Context, name: String) -> String {
|
||||||
|
format!("Hello, {name}!")
|
||||||
fn hello(&self, name: String) -> Self::HelloFut {
|
|
||||||
Ok(format!("Hello, {}!", name))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
fn main() {
|
Lastly let's write our `main` that will start the server. While this example uses an
|
||||||
let mut reactor = reactor::Core::new().unwrap();
|
[in-process channel](transport::channel), tarpc also ships a generic [`serde_transport`]
|
||||||
let (handle, server) = HelloServer.listen("localhost:10000".first_socket_addr(),
|
behind the `serde-transport` feature, with additional [TCP](serde_transport::tcp) functionality
|
||||||
&reactor.handle(),
|
available behind the `tcp` feature.
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
```rust
|
||||||
reactor.handle().spawn(server);
|
#[tokio::main]
|
||||||
let options = client::Options::default().handle(reactor.handle());
|
async fn main() -> anyhow::Result<()> {
|
||||||
reactor.run(FutureClient::connect(handle.addr(), options)
|
let (client_transport, server_transport) = tarpc::transport::channel::unbounded();
|
||||||
.map_err(tarpc::Error::from)
|
|
||||||
.and_then(|client| client.hello("Mom".to_string()))
|
let server = server::BaseChannel::with_defaults(server_transport);
|
||||||
.map(|resp| println!("{}", resp)))
|
tokio::spawn(server.execute(HelloServer.serve()));
|
||||||
.unwrap();
|
|
||||||
|
// WorldClient is generated by the #[tarpc::service] attribute. It has a constructor `new`
|
||||||
|
// that takes a config and any Transport as input.
|
||||||
|
let client = WorldClient::new(client::Config::default(), client_transport).spawn();
|
||||||
|
|
||||||
|
// The client has an RPC method for each RPC defined in the annotated trait. It takes the same
|
||||||
|
// args as defined, with the addition of a Context, which is always the first arg. The Context
|
||||||
|
// specifies a deadline and trace information which can be helpful in debugging requests.
|
||||||
|
let hello = client.hello(context::current(), "Stim".to_string()).await?;
|
||||||
|
|
||||||
|
println!("{hello}");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Example: Futures + TLS
|
## Service Documentation
|
||||||
|
|
||||||
By default, tarpc internally uses a [`TcpStream`] for communication between your clients and
|
|
||||||
servers. However, TCP by itself has no encryption. As a result, your communication will be sent in
|
|
||||||
the clear. If you want your RPC communications to be encrypted, you can choose to use [TLS]. TLS
|
|
||||||
operates as an encryption layer on top of TCP. When using TLS, your communication will occur over a
|
|
||||||
[`TlsStream<TcpStream>`]. You can add the ability to make TLS clients and servers by adding `tarpc`
|
|
||||||
with the `tls` feature flag enabled.
|
|
||||||
|
|
||||||
When using TLS, some additional information is required. You will need to make [`TlsAcceptor`] and
|
|
||||||
`client::tls::Context` structs; `client::tls::Context` requires a [`TlsConnector`]. The
|
|
||||||
[`TlsAcceptor`] and [`TlsConnector`] types are defined in the [native-tls]. tarpc re-exports
|
|
||||||
external TLS-related types in its `native_tls` module (`tarpc::native_tls`).
|
|
||||||
|
|
||||||
[TLS]: https://en.wikipedia.org/wiki/Transport_Layer_Security
|
|
||||||
[`TcpStream`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpStream.html
|
|
||||||
[`TlsStream<TcpStream>`]: https://docs.rs/native-tls/0.1/native_tls/struct.TlsStream.html
|
|
||||||
[`TlsAcceptor`]: https://docs.rs/native-tls/0.1/native_tls/struct.TlsAcceptor.html
|
|
||||||
[`TlsConnector`]: https://docs.rs/native-tls/0.1/native_tls/struct.TlsConnector.html
|
|
||||||
[native-tls]: https://github.com/sfackler/rust-native-tls
|
|
||||||
|
|
||||||
Both TLS streams and TCP streams are supported in the same binary when the `tls` feature is enabled.
|
|
||||||
However, if you are working with both stream types, ensure that you use the TLS clients with TLS
|
|
||||||
servers and TCP clients with TCP servers.
|
|
||||||
|
|
||||||
```rust,no_run
|
|
||||||
#![feature(plugin)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
extern crate futures;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use futures::Future;
|
|
||||||
use tarpc::future::{client, server};
|
|
||||||
use tarpc::future::client::ClientExt;
|
|
||||||
use tarpc::tls;
|
|
||||||
use tarpc::util::{FirstSocketAddr, Never};
|
|
||||||
use tokio_core::reactor;
|
|
||||||
use tarpc::native_tls::{Pkcs12, TlsAcceptor};
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc hello(name: String) -> String;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct HelloServer;
|
|
||||||
|
|
||||||
impl FutureService for HelloServer {
|
|
||||||
type HelloFut = Result<String, Never>;
|
|
||||||
|
|
||||||
fn hello(&self, name: String) -> Self::HelloFut {
|
|
||||||
Ok(format!("Hello, {}!", name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_acceptor() -> TlsAcceptor {
|
|
||||||
let buf = include_bytes!("test/identity.p12");
|
|
||||||
let pkcs12 = Pkcs12::from_der(buf, "password").unwrap();
|
|
||||||
TlsAcceptor::builder(pkcs12).unwrap().build().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let mut reactor = reactor::Core::new().unwrap();
|
|
||||||
let acceptor = get_acceptor();
|
|
||||||
let (handle, server) = HelloServer.listen("localhost:10000".first_socket_addr(),
|
|
||||||
&reactor.handle(),
|
|
||||||
server::Options::default().tls(acceptor)).unwrap();
|
|
||||||
reactor.handle().spawn(server);
|
|
||||||
let options = client::Options::default()
|
|
||||||
.handle(reactor.handle())
|
|
||||||
.tls(tls::client::Context::new("foobar.com").unwrap());
|
|
||||||
reactor.run(FutureClient::connect(handle.addr(), options)
|
|
||||||
.map_err(tarpc::Error::from)
|
|
||||||
.and_then(|client| client.hello("Mom".to_string()))
|
|
||||||
.map(|resp| println!("{}", resp)))
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Tips
|
|
||||||
|
|
||||||
### Sync vs Futures
|
|
||||||
|
|
||||||
A single `service!` invocation generates code for both synchronous and future-based applications.
|
|
||||||
It's up to the user whether they want to implement the sync API or the futures API. The sync API has
|
|
||||||
the simplest programming model, at the cost of some overhead - each RPC is handled in its own
|
|
||||||
thread. The futures API is based on tokio and can run on any tokio-compatible executor. This mean a
|
|
||||||
service that implements the futures API for a tarpc service can run on a single thread, avoiding
|
|
||||||
context switches and the memory overhead of having a thread per RPC.
|
|
||||||
|
|
||||||
### Errors
|
|
||||||
|
|
||||||
All generated tarpc RPC methods return either `tarpc::Result<T, E>` or something like `Future<T,
|
|
||||||
E>`. The error type defaults to `tarpc::util::Never` (a wrapper for `!` which implements
|
|
||||||
`std::error::Error`) if no error type is explicitly specified in the `service!` macro invocation. An
|
|
||||||
error type can be specified like so:
|
|
||||||
|
|
||||||
```rust,ignore
|
|
||||||
use tarpc::util::Message;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc hello(name: String) -> String | Message
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
`tarpc::util::Message` is just a wrapper around string that implements `std::error::Error` provided
|
|
||||||
for service implementations that don't require complex error handling. The pipe is used as syntax
|
|
||||||
for specifying the error type in a way that's agnostic of whether the service implementation is
|
|
||||||
synchronous or future-based. Note that in the simpler examples in the readme, no pipe is used, and
|
|
||||||
the macro automatically chooses `tarpc::util::Never` as the error type.
|
|
||||||
|
|
||||||
The above declaration would produce the following synchronous service trait:
|
|
||||||
|
|
||||||
```rust,ignore
|
|
||||||
trait SyncService {
|
|
||||||
fn hello(&self, name: String) -> Result<String, Message>;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
and the following future-based trait:
|
|
||||||
|
|
||||||
```rust,ignore
|
|
||||||
trait FutureService {
|
|
||||||
type HelloFut: IntoFuture<String, Message>;
|
|
||||||
|
|
||||||
fn hello(&mut self, name: String) -> Self::HelloFut;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Documentation
|
|
||||||
|
|
||||||
Use `cargo doc` as you normally would to see the documentation created for all
|
Use `cargo doc` as you normally would to see the documentation created for all
|
||||||
items expanded by a `service!` invocation.
|
items expanded by a `service!` invocation.
|
||||||
|
|
||||||
## Additional Features
|
<!-- cargo-sync-readme end -->
|
||||||
|
|
||||||
- Concurrent requests from a single client.
|
License: MIT
|
||||||
- Compatible with tokio services.
|
|
||||||
- Run any number of clients and services on a single event loop.
|
|
||||||
- Any type that `impl`s `serde`'s `Serialize` and `Deserialize` can be used in
|
|
||||||
rpc signatures.
|
|
||||||
- Attributes can be specified on rpc methods. These will be included on both the
|
|
||||||
services' trait methods as well as on the clients' stub methods.
|
|
||||||
|
|
||||||
## Gaps/Potential Improvements (not necessarily actively being worked on)
|
|
||||||
|
|
||||||
- Configurable server rate limiting.
|
|
||||||
- Automatic client retries with exponential backoff when server is busy.
|
|
||||||
- Load balancing
|
|
||||||
- Service discovery
|
|
||||||
- Automatically reconnect on the client side when the connection cuts out.
|
|
||||||
- Support generic serialization protocols.
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
To contribute to tarpc, please see [CONTRIBUTING](CONTRIBUTING.md).
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
tarpc is distributed under the terms of the MIT license.
|
|
||||||
|
|
||||||
See [LICENSE](LICENSE) for details.
|
|
||||||
|
|||||||
467
RELEASES.md
467
RELEASES.md
@@ -1,6 +1,471 @@
|
|||||||
|
## 0.34.0 (2023-12-29)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
- `#[tarpc::server]` is no more! Service traits now use async fns.
|
||||||
|
- `Channel::execute` no longer spawns request handlers. Async-fn-in-traits makes it impossible to
|
||||||
|
add a Send bound to the future returned by `Serve::serve`. Instead, `Channel::execute` returns a
|
||||||
|
stream of futures, where each future is a request handler. To achieve the former behavior:
|
||||||
|
```rust
|
||||||
|
channel.execute(server.serve())
|
||||||
|
.for_each(|rpc| { tokio::spawn(rpc); })
|
||||||
|
```
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
- Request hooks are added to the serve trait, so that it's easy to hook in cross-cutting
|
||||||
|
functionality like throttling, authorization, etc.
|
||||||
|
- The Client trait is back! This makes it possible to hook in generic client functionality like load
|
||||||
|
balancing, retries, etc.
|
||||||
|
|
||||||
|
## 0.33.0 (2023-04-01)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
Opentelemetry dependency version increased to 0.18.
|
||||||
|
|
||||||
|
## 0.32.0 (2023-03-24)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
- As part of a fix to return more channel errors in RPC results, a few error types have changed:
|
||||||
|
|
||||||
|
0. `client::RpcError::Disconnected` was split into the following errors:
|
||||||
|
- Shutdown: the client was shutdown, either intentionally or due to an error. If due to an
|
||||||
|
error, pending RPCs should see the more specific errors below.
|
||||||
|
- Send: an RPC message failed to send over the transport. Only the RPC that failed to be sent
|
||||||
|
will see this error.
|
||||||
|
- Receive: a fatal error occurred while receiving from the transport. All in-flight RPCs will
|
||||||
|
receive this error.
|
||||||
|
0. `client::ChannelError` and `server::ChannelError` are unified in `tarpc::ChannelError`.
|
||||||
|
Previously, server transport errors would not indicate during which activity the transport
|
||||||
|
error occurred. Now, just like the client already was, it will be specific: reading, readying,
|
||||||
|
sending, flushing, or closing.
|
||||||
|
|
||||||
|
## 0.31.0 (2022-11-03)
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
This release adds Unix Domain Sockets to the `serde_transport` module.
|
||||||
|
To use it, enable the "unix" feature. See the docs for more information.
|
||||||
|
|
||||||
|
## 0.30.0 (2022-08-12)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
- Some types that impl Future are now annotated with `#[must_use]`. Code that previously created
|
||||||
|
these types but did not use them will now receive a warning. Code that disallows warnings will
|
||||||
|
receive a compilation error.
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
- Servers will more reliably clean up request state for requests with long deadlines when response
|
||||||
|
processing is aborted without sending a response.
|
||||||
|
|
||||||
|
### Other Changes
|
||||||
|
|
||||||
|
- `TrackedRequest` now contains a response guard that can be used to ensure state cleanup for
|
||||||
|
aborted requests. (This was already handled automatically by `InFlightRequests`).
|
||||||
|
- When the feature serde-transport is enabled, the crate tokio_serde is now re-exported.
|
||||||
|
|
||||||
|
## 0.29.0 (2022-05-26)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
`Context.deadline` is now serialized as a Duration. This prevents clock skew from affecting deadline
|
||||||
|
behavior. For more details see https://github.com/google/tarpc/pull/367 and its [related
|
||||||
|
issue](https://github.com/google/tarpc/issues/366).
|
||||||
|
|
||||||
|
## 0.28.0 (2022-04-06)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
- The minimum supported Rust version has increased to 1.58.0.
|
||||||
|
- The version of opentelemetry depended on by tarpc has increased to 0.17.0.
|
||||||
|
|
||||||
|
## 0.27.2 (2021-10-08)
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
Clients will now close their transport before dropping it. An attempt at a clean shutdown can help
|
||||||
|
the server drop its connections more quickly.
|
||||||
|
|
||||||
|
## 0.27.1 (2021-09-22)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
#### RPC error type is changing
|
||||||
|
|
||||||
|
RPC return types are changing from `Result<Response, io::Error>` to `Result<Response,
|
||||||
|
tarpc::client::RpcError>`.
|
||||||
|
|
||||||
|
Becaue tarpc is a library, not an application, it should strive to
|
||||||
|
use structured errors in its API so that users have maximal flexibility
|
||||||
|
in how they handle errors. io::Error makes that hard, because it is a
|
||||||
|
kitchen-sink error type.
|
||||||
|
|
||||||
|
RPCs in particular only have 3 classes of errors:
|
||||||
|
|
||||||
|
- The connection breaks.
|
||||||
|
- The request expires.
|
||||||
|
- The server decides not to process the request.
|
||||||
|
|
||||||
|
RPC responses can also contain application-specific errors, but from the
|
||||||
|
perspective of the RPC library, those are opaque to the framework, classified
|
||||||
|
as successful responsees.
|
||||||
|
|
||||||
|
### Open Telemetry
|
||||||
|
|
||||||
|
The Opentelemetry dependency is updated to version 0.16.x.
|
||||||
|
|
||||||
|
## 0.27.0 (2021-09-22)
|
||||||
|
|
||||||
|
This version was yanked due to tarpc-plugins version mismatches.
|
||||||
|
|
||||||
|
|
||||||
|
## 0.26.0 (2021-04-14)
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
#### Tracing
|
||||||
|
|
||||||
|
tarpc is now instrumented with tracing primitives extended with
|
||||||
|
OpenTelemetry traces. Using a compatible tracing-opentelemetry
|
||||||
|
subscriber like Jaeger, each RPC can be traced through the client,
|
||||||
|
server, amd other dependencies downstream of the server. Even for
|
||||||
|
applications not connected to a distributed tracing collector, the
|
||||||
|
instrumentation can also be ingested by regular loggers like env_logger.
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
#### Logging
|
||||||
|
|
||||||
|
Logged events are now structured using tracing. For applications using a
|
||||||
|
logger and not a tracing subscriber, these logs may look different or
|
||||||
|
contain information in a less consumable manner. The easiest solution is
|
||||||
|
to add a tracing subscriber that logs to stdout, such as
|
||||||
|
tracing_subscriber::fmt.
|
||||||
|
|
||||||
|
#### Context
|
||||||
|
|
||||||
|
- Context no longer has parent_span, which was actually never needed,
|
||||||
|
because the context sent in an RPC is inherently the parent context.
|
||||||
|
For purposes of distributed tracing, the client side of the RPC has all
|
||||||
|
necessary information to link the span to its parent; the server side
|
||||||
|
need do nothing more than export the (trace ID, span ID) tuple.
|
||||||
|
- Context has a new field, SamplingDecision, which has two variants,
|
||||||
|
Sampled and Unsampled. This field can be used by downstream systems to
|
||||||
|
determine whether a trace needs to be exported. If the parent span is
|
||||||
|
sampled, the expectation is that all child spans be exported, as well;
|
||||||
|
to do otherwise could result in lossy traces being exported. Note that
|
||||||
|
if an Openetelemetry tracing subscriber is not installed, the fallback
|
||||||
|
context will still be used, but the Context's sampling decision will
|
||||||
|
always be inherited by the parent Context's sampling decision.
|
||||||
|
- Context::scope has been removed. Context propagation is now done via
|
||||||
|
tracing's task-local spans. Spans can be propagated across tasks via
|
||||||
|
Span::in_scope. When a service receives a request, it attaches an
|
||||||
|
Opentelemetry context to the local Span created before request handling,
|
||||||
|
and this context contains the request deadline. This span-local deadline
|
||||||
|
is retrieved by Context::current, but it cannot be modified so that
|
||||||
|
future Context::current calls contain a different deadline. However, the
|
||||||
|
deadline in the context passed into an RPC call will override it, so
|
||||||
|
users can retrieve the current context and then modify the deadline
|
||||||
|
field, as has been historically possible.
|
||||||
|
- Context propgation precedence changes: when an RPC is initiated, the
|
||||||
|
current Span's Opentelemetry context takes precedence over the trace
|
||||||
|
context passed into the RPC method. If there is no current Span, then
|
||||||
|
the trace context argument is used as it has been historically. Note
|
||||||
|
that Opentelemetry context propagation requires an Opentelemetry
|
||||||
|
tracing subscriber to be installed.
|
||||||
|
|
||||||
|
#### Server
|
||||||
|
|
||||||
|
- The server::Channel trait now has an additional required associated
|
||||||
|
type and method which returns the underlying transport. This makes it
|
||||||
|
more ergonomic for users to retrieve transport-specific information,
|
||||||
|
like IP Address. BaseChannel implements Channel::transport by returning
|
||||||
|
the underlying transport, and channel decorators like Throttler just
|
||||||
|
delegate to the Channel::transport method of the wrapped channel.
|
||||||
|
|
||||||
|
#### Client
|
||||||
|
|
||||||
|
- NewClient::spawn no longer returns a result, as spawn can't fail.
|
||||||
|
|
||||||
|
### References
|
||||||
|
|
||||||
|
1. https://github.com/tokio-rs/tracing
|
||||||
|
2. https://opentelemetry.io
|
||||||
|
3. https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger
|
||||||
|
4. https://github.com/env-logger-rs/env_logger
|
||||||
|
|
||||||
|
## 0.25.0 (2021-03-10)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
#### Major server module refactoring
|
||||||
|
|
||||||
|
1. Renames
|
||||||
|
|
||||||
|
Some of the items in this module were renamed to be less generic:
|
||||||
|
|
||||||
|
- Handler => Incoming
|
||||||
|
- ClientHandler => Requests
|
||||||
|
- ResponseHandler => InFlightRequest
|
||||||
|
- Channel::{respond_with => requests}
|
||||||
|
|
||||||
|
In the case of Handler: handler of *what*? Now it's a bit clearer that this is a stream of Channels
|
||||||
|
(aka *incoming* connections).
|
||||||
|
|
||||||
|
Similarly, ClientHandler was a stream of requests over a single connection. Hopefully Requests
|
||||||
|
better reflects that.
|
||||||
|
|
||||||
|
ResponseHandler was renamed InFlightRequest because it no longer contains the serving function.
|
||||||
|
Instead, it is just the request, plus the response channel and an abort hook. As a result of this,
|
||||||
|
Channel::respond_with underwent a big change: it used to take the serving function and return a
|
||||||
|
ClientHandler; now it has been renamed Channel::requests and does not take any args.
|
||||||
|
|
||||||
|
2. Execute methods
|
||||||
|
|
||||||
|
All methods thats actually result in responses being generated have been consolidated into methods
|
||||||
|
named `execute`:
|
||||||
|
|
||||||
|
- InFlightRequest::execute returns a future that completes when a response has been generated and
|
||||||
|
sent to the server Channel.
|
||||||
|
- Requests::execute automatically spawns response handlers for all requests over a single channel.
|
||||||
|
- Channel::execute is a convenience for `channel.requests().execute()`.
|
||||||
|
- Incoming::execute automatically spawns response handlers for all requests over all channels.
|
||||||
|
|
||||||
|
3. Removal of Server.
|
||||||
|
|
||||||
|
server::Server was removed, as it provided no value over the Incoming/Channel abstractions.
|
||||||
|
Additionally, server::new was removed, since it just returned a Server.
|
||||||
|
|
||||||
|
#### Client RPC methods now take &self
|
||||||
|
|
||||||
|
This required the breaking change of removing the Client trait. The intent of the Client trait was
|
||||||
|
to facilitate the decorator pattern by allowing users to create their own Clients that added
|
||||||
|
behavior on top of the base client. Unfortunately, this trait had become a maintenance burden,
|
||||||
|
consistently causing issues with lifetimes and the lack of generic associated types. Specifically,
|
||||||
|
it meant that Client impls could not use async fns, which is no longer tenable today, with channel
|
||||||
|
libraries moving to async fns.
|
||||||
|
|
||||||
|
#### Servers no longer send deadline-exceed responses.
|
||||||
|
|
||||||
|
The deadline-exceeded response was largely redundant, because the client
|
||||||
|
shouldn't normally be waiting for such a response, anyway -- the normal
|
||||||
|
client will automatically remove the in-flight request when it reaches
|
||||||
|
the deadline.
|
||||||
|
|
||||||
|
This also allows for internalizing the expiration+cleanup logic entirely
|
||||||
|
within BaseChannel, without having it leak into the Channel trait and
|
||||||
|
requiring action taken by the Requests struct.
|
||||||
|
|
||||||
|
#### Clients no longer send cancel messages when the request deadline is exceeded.
|
||||||
|
|
||||||
|
The server already knows when the request deadline was exceeded, so the client didn't need to inform
|
||||||
|
it.
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
- When a channel is dropped, all in-flight requests for that channel are now aborted.
|
||||||
|
|
||||||
|
## 0.24.1 (2020-12-28)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
Upgrades tokio to 1.0.
|
||||||
|
|
||||||
|
## 0.24.0 (2020-12-28)
|
||||||
|
|
||||||
|
This release was yanked.
|
||||||
|
|
||||||
|
## 0.23.0 (2020-10-19)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
Upgrades tokio to 0.3.
|
||||||
|
|
||||||
|
## 0.22.0 (2020-08-02)
|
||||||
|
|
||||||
|
This release adds some flexibility and consistency to `serde_transport`, with one new feature and
|
||||||
|
one small breaking change.
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
`serde_transport::tcp` now exposes framing configuration on `connect()` and `listen()`. This is
|
||||||
|
useful if, for instance, you want to send requests or responses that are larger than the maximum
|
||||||
|
payload allowed by default:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
let mut transport = tarpc::serde_transport::tcp::connect(server_addr, Json::default);
|
||||||
|
transport.config_mut().max_frame_length(4294967296);
|
||||||
|
let mut client = MyClient::new(client::Config::default(), transport.await?).spawn()?;
|
||||||
|
```
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
The codec argument to `serde_transport::tcp::connect` changed from a Codec to impl Fn() -> Codec,
|
||||||
|
to be consistent with `serde_transport::tcp::listen`. While only one Codec is needed, more than one
|
||||||
|
person has been tripped up by the inconsistency between `connect` and `listen`. Unfortunately, the
|
||||||
|
compiler errors are not much help in this case, so it was decided to simply do the more intuitive
|
||||||
|
thing so that the compiler doesn't need to step in in the first place.
|
||||||
|
|
||||||
|
|
||||||
|
## 0.21.1 (2020-08-02)
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
#### #[tarpc::server] diagnostics
|
||||||
|
|
||||||
|
When a service impl uses #[tarpc::server], only `async fn`s are re-written. This can lead to
|
||||||
|
confusing compiler errors about missing associated types:
|
||||||
|
|
||||||
|
```
|
||||||
|
error: not all trait items implemented, missing: `HelloFut`
|
||||||
|
--> $DIR/tarpc_server_missing_async.rs:9:1
|
||||||
|
|
|
||||||
|
9 | impl World for HelloServer {
|
||||||
|
| ^^^^
|
||||||
|
```
|
||||||
|
|
||||||
|
The proc macro now provides better diagnostics for this case:
|
||||||
|
|
||||||
|
```
|
||||||
|
error: not all trait items implemented, missing: `HelloFut`
|
||||||
|
--> $DIR/tarpc_server_missing_async.rs:9:1
|
||||||
|
|
|
||||||
|
9 | impl World for HelloServer {
|
||||||
|
| ^^^^
|
||||||
|
|
||||||
|
error: hint: `#[tarpc::server]` only rewrites async fns, and `fn hello` is not async
|
||||||
|
--> $DIR/tarpc_server_missing_async.rs:10:5
|
||||||
|
|
|
||||||
|
10 | fn hello(name: String) -> String {
|
||||||
|
| ^^
|
||||||
|
```
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
#### Fixed client hanging when server shuts down
|
||||||
|
|
||||||
|
Previously, clients would ignore when the read half of the transport was closed, continuing to
|
||||||
|
write requests. This didn't make much sense, because without the ability to receive responses,
|
||||||
|
clients have no way to know if requests were actually processed by the server. It basically just
|
||||||
|
led to clients that would hang for a few seconds before shutting down. This has now been
|
||||||
|
corrected: clients will immediately shut down when the read-half of the transport is closed.
|
||||||
|
|
||||||
|
#### More docs.rs documentation
|
||||||
|
|
||||||
|
Previously, docs.rs only documented items enabled by default, notably leaving out documentation
|
||||||
|
for tokio and serde features. This has now been corrected: docs.rs should have documentation
|
||||||
|
for all optional features.
|
||||||
|
|
||||||
|
## 0.21.0 (2020-06-26)
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
|
||||||
|
A new proc macro, `#[tarpc::server]` was added! This enables service impls to elide the boilerplate
|
||||||
|
of specifying associated types for each RPC. With the ubiquity of async-await, most code won't have
|
||||||
|
nameable futures and will just be boxing the return type anyway. This macro does that for you.
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
- Enums had `_non_exhaustive` fields replaced with the #[non_exhaustive] attribute.
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- https://github.com/google/tarpc/issues/304
|
||||||
|
|
||||||
|
A race condition in code that limits number of connections per client caused occasional panics.
|
||||||
|
|
||||||
|
- https://github.com/google/tarpc/pull/295
|
||||||
|
|
||||||
|
Made request timeouts account for time spent in the outbound buffer. Previously, a large outbound
|
||||||
|
queue would lead to requests not timing out correctly.
|
||||||
|
|
||||||
|
## 0.20.0 (2019-12-11)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
1. tarpc has updated its tokio dependency to the latest 0.2 version.
|
||||||
|
2. The tarpc crates have been unified into just `tarpc`, with new Cargo features to enable
|
||||||
|
functionality.
|
||||||
|
- The bincode-transport and json-transport crates are deprecated and superseded by
|
||||||
|
the `serde_transport` module, which unifies much of the logic present in both crates.
|
||||||
|
|
||||||
|
## 0.13.0 (2018-10-16)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
|
||||||
|
Version 0.13 marks a significant departure from previous versions of tarpc. The
|
||||||
|
API has changed significantly. The tokio-proto crate has been torn out and
|
||||||
|
replaced with a homegrown rpc framework. Additionally, the crate has been
|
||||||
|
modularized, so that the tarpc crate itself contains only the macro code.
|
||||||
|
|
||||||
|
### New Crates
|
||||||
|
|
||||||
|
- crate rpc contains the core client/server request-response framework, as well as a transport trait.
|
||||||
|
- crate bincode-transport implements a transport that works almost exactly as tarpc works today (not to say it's wire-compatible).
|
||||||
|
- crate trace has some foundational types for tracing. This isn't really fleshed out yet, but it's useful for in-process log tracing, at least.
|
||||||
|
|
||||||
|
All crates are now at the top level. e.g. tarpc-plugins is now tarpc/plugins rather than tarpc/src/plugins. tarpc itself is now a *very* small code surface, as most functionality has been moved into the other more granular crates.
|
||||||
|
|
||||||
|
### New Features
|
||||||
|
- deadlines: all requests specify a deadline, and a server will stop processing a response when past its deadline.
|
||||||
|
- client cancellation propagation: when a client drops a request, the client sends a message to the server informing it to cancel its response. This means cancellations can propagate across multiple server hops.
|
||||||
|
- trace context stuff as mentioned above
|
||||||
|
- more server configuration for total connection limits, per-connection request limits, etc.
|
||||||
|
|
||||||
|
### Removals
|
||||||
|
- no more shutdown handle. I left it out for now because of time and not being sure what the right solution is.
|
||||||
|
- all async now, no blocking stub or server interface. This helps with maintainability, and async/await makes async code much more usable. The service trait is thusly renamed Service, and the client is renamed Client.
|
||||||
|
- no built-in transport. Tarpc is now transport agnostic (see bincode-transport for transitioning existing uses).
|
||||||
|
- going along with the previous bullet, no preferred transport means no TLS support at this time. We could make a tls transport or make bincode-transport compatible with TLS.
|
||||||
|
- a lot of examples were removed because I couldn't keep up with maintaining all of them. Hopefully the ones I kept are still illustrative.
|
||||||
|
- no more plugins!
|
||||||
|
|
||||||
|
## 0.10.0 (2018-04-08)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
Fixed rustc breakage in tarpc-plugins. These changes require a recent version of rustc.
|
||||||
|
|
||||||
|
## 0.10.0 (2018-03-26)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
Updates bincode to version 1.0.
|
||||||
|
|
||||||
|
## 0.9.0 (2017-09-17)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
Updates tarpc to use tarpc-plugins 0.2.
|
||||||
|
|
||||||
|
## 0.8.0 (2017-05-05)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
This release updates tarpc to use serde 1.0.
|
||||||
|
As such, users must also update to use serde 1.0.
|
||||||
|
The serde 1.0 [release notes](https://github.com/serde-rs/serde/releases/tag/v1.0.0)
|
||||||
|
detail migration paths.
|
||||||
|
|
||||||
|
## 0.7.3 (2017-04-26)
|
||||||
|
|
||||||
|
This release removes the `Sync` bound on RPC args for both sync and future
|
||||||
|
clients. No breaking changes.
|
||||||
|
|
||||||
|
## 0.7.2 (2017-04-22)
|
||||||
|
|
||||||
|
### Breaking Changes
|
||||||
|
This release updates tarpc-plugins to work with rustc master. Thus, older
|
||||||
|
versions of rustc are no longer supported. We chose a minor version bump
|
||||||
|
because it is still source-compatible with existing code using tarpc.
|
||||||
|
|
||||||
|
## 0.7.1 (2017-03-31)
|
||||||
|
|
||||||
|
This release was purely doc fixes. No breaking changes.
|
||||||
|
|
||||||
## 0.7 (2017-03-31)
|
## 0.7 (2017-03-31)
|
||||||
|
|
||||||
## Breaking Changes
|
### Breaking Changes
|
||||||
This release is a complete overhaul to build tarpc on top of the tokio stack.
|
This release is a complete overhaul to build tarpc on top of the tokio stack.
|
||||||
It's safe to assume that everything broke with this release.
|
It's safe to assume that everything broke with this release.
|
||||||
|
|
||||||
|
|||||||
@@ -1,53 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
#![feature(plugin, test)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
#[cfg(test)]
|
|
||||||
extern crate test;
|
|
||||||
extern crate env_logger;
|
|
||||||
extern crate futures;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use tarpc::future::{client, server};
|
|
||||||
use tarpc::future::client::ClientExt;
|
|
||||||
use tarpc::util::{FirstSocketAddr, Never};
|
|
||||||
#[cfg(test)]
|
|
||||||
use test::Bencher;
|
|
||||||
use tokio_core::reactor;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc ack();
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct Server;
|
|
||||||
|
|
||||||
impl FutureService for Server {
|
|
||||||
type AckFut = futures::Finished<(), Never>;
|
|
||||||
fn ack(&self) -> Self::AckFut {
|
|
||||||
futures::finished(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
#[bench]
|
|
||||||
fn latency(bencher: &mut Bencher) {
|
|
||||||
let _ = env_logger::init();
|
|
||||||
let mut reactor = reactor::Core::new().unwrap();
|
|
||||||
let (handle, server) = Server.listen("localhost:0".first_socket_addr(),
|
|
||||||
&reactor.handle(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
reactor.handle().spawn(server);
|
|
||||||
let client = FutureClient::connect(handle.addr(),
|
|
||||||
client::Options::default().handle(reactor.handle()));
|
|
||||||
let client = reactor.run(client).unwrap();
|
|
||||||
|
|
||||||
bencher.iter(|| reactor.run(client.ack()).unwrap());
|
|
||||||
}
|
|
||||||
41
example-service/Cargo.toml
Normal file
41
example-service/Cargo.toml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
[package]
|
||||||
|
name = "tarpc-example-service"
|
||||||
|
version = "0.15.0"
|
||||||
|
rust-version = "1.56"
|
||||||
|
authors = ["Tim Kuehn <tikue@google.com>"]
|
||||||
|
edition = "2021"
|
||||||
|
license = "MIT"
|
||||||
|
documentation = "https://docs.rs/tarpc-example-service"
|
||||||
|
homepage = "https://github.com/google/tarpc"
|
||||||
|
repository = "https://github.com/google/tarpc"
|
||||||
|
keywords = ["rpc", "network", "server", "microservices", "example"]
|
||||||
|
categories = ["asynchronous", "network-programming"]
|
||||||
|
readme = "../README.md"
|
||||||
|
description = "An example server built on tarpc."
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow = "1.0"
|
||||||
|
clap = { version = "3.0.0-rc.9", features = ["derive"] }
|
||||||
|
log = "0.4"
|
||||||
|
futures = "0.3"
|
||||||
|
opentelemetry = { version = "0.21.0" }
|
||||||
|
opentelemetry-jaeger = { version = "0.20.0", features = ["rt-tokio"] }
|
||||||
|
rand = "0.8"
|
||||||
|
tarpc = { version = "0.34", path = "../tarpc", features = ["full"] }
|
||||||
|
tokio = { version = "1", features = ["macros", "net", "rt-multi-thread"] }
|
||||||
|
tracing = { version = "0.1" }
|
||||||
|
tracing-opentelemetry = "0.22.0"
|
||||||
|
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
|
||||||
|
opentelemetry_sdk = "0.21.1"
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "service"
|
||||||
|
path = "src/lib.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "server"
|
||||||
|
path = "src/server.rs"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "client"
|
||||||
|
path = "src/client.rs"
|
||||||
15
example-service/README.md
Normal file
15
example-service/README.md
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
# Example
|
||||||
|
|
||||||
|
Example service to demonstrate how to set up `tarpc` with [Jaeger](https://www.jaegertracing.io). To see traces Jaeger, run the following with `RUST_LOG=trace`.
|
||||||
|
|
||||||
|
## Server
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run --bin server -- --port 50051
|
||||||
|
```
|
||||||
|
|
||||||
|
## Client
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo run --bin client -- --server-addr "[::1]:50051" --name "Bob"
|
||||||
|
```
|
||||||
56
example-service/src/client.rs
Normal file
56
example-service/src/client.rs
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use service::{init_tracing, WorldClient};
|
||||||
|
use std::{net::SocketAddr, time::Duration};
|
||||||
|
use tarpc::{client, context, tokio_serde::formats::Json};
|
||||||
|
use tokio::time::sleep;
|
||||||
|
use tracing::Instrument;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
struct Flags {
|
||||||
|
/// Sets the server address to connect to.
|
||||||
|
#[clap(long)]
|
||||||
|
server_addr: SocketAddr,
|
||||||
|
/// Sets the name to say hello to.
|
||||||
|
#[clap(long)]
|
||||||
|
name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let flags = Flags::parse();
|
||||||
|
init_tracing("Tarpc Example Client")?;
|
||||||
|
|
||||||
|
let mut transport = tarpc::serde_transport::tcp::connect(flags.server_addr, Json::default);
|
||||||
|
transport.config_mut().max_frame_length(usize::MAX);
|
||||||
|
|
||||||
|
// WorldClient is generated by the service attribute. It has a constructor `new` that takes a
|
||||||
|
// config and any Transport as input.
|
||||||
|
let client = WorldClient::new(client::Config::default(), transport.await?).spawn();
|
||||||
|
|
||||||
|
let hello = async move {
|
||||||
|
// Send the request twice, just to be safe! ;)
|
||||||
|
tokio::select! {
|
||||||
|
hello1 = client.hello(context::current(), format!("{}1", flags.name)) => { hello1 }
|
||||||
|
hello2 = client.hello(context::current(), format!("{}2", flags.name)) => { hello2 }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.instrument(tracing::info_span!("Two Hellos"))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match hello {
|
||||||
|
Ok(hello) => tracing::info!("{hello:?}"),
|
||||||
|
Err(e) => tracing::warn!("{:?}", anyhow::Error::from(e)),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Let the background span processor finish.
|
||||||
|
sleep(Duration::from_micros(1)).await;
|
||||||
|
opentelemetry::global::shutdown_tracer_provider();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
34
example-service/src/lib.rs
Normal file
34
example-service/src/lib.rs
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use std::env;
|
||||||
|
use tracing_subscriber::{fmt::format::FmtSpan, prelude::*};
|
||||||
|
|
||||||
|
/// This is the service definition. It looks a lot like a trait definition.
|
||||||
|
/// It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
|
#[tarpc::service]
|
||||||
|
pub trait World {
|
||||||
|
/// Returns a greeting for name.
|
||||||
|
async fn hello(name: String) -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initializes an OpenTelemetry tracing subscriber with a Jaeger backend.
|
||||||
|
pub fn init_tracing(service_name: &str) -> anyhow::Result<()> {
|
||||||
|
env::set_var("OTEL_BSP_MAX_EXPORT_BATCH_SIZE", "12");
|
||||||
|
|
||||||
|
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||||
|
.with_service_name(service_name)
|
||||||
|
.with_max_packet_size(2usize.pow(13))
|
||||||
|
.install_batch(opentelemetry_sdk::runtime::Tokio)?;
|
||||||
|
|
||||||
|
tracing_subscriber::registry()
|
||||||
|
.with(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.with(tracing_subscriber::fmt::layer().with_span_events(FmtSpan::NEW | FmtSpan::CLOSE))
|
||||||
|
.with(tracing_opentelemetry::layer().with_tracer(tracer))
|
||||||
|
.try_init()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
80
example-service/src/server.rs
Normal file
80
example-service/src/server.rs
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use futures::{future, prelude::*};
|
||||||
|
use rand::{
|
||||||
|
distributions::{Distribution, Uniform},
|
||||||
|
thread_rng,
|
||||||
|
};
|
||||||
|
use service::{init_tracing, World};
|
||||||
|
use std::{
|
||||||
|
net::{IpAddr, Ipv6Addr, SocketAddr},
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
use tarpc::{
|
||||||
|
context,
|
||||||
|
server::{self, incoming::Incoming, Channel},
|
||||||
|
tokio_serde::formats::Json,
|
||||||
|
};
|
||||||
|
use tokio::time;
|
||||||
|
|
||||||
|
#[derive(Parser)]
|
||||||
|
struct Flags {
|
||||||
|
/// Sets the port number to listen on.
|
||||||
|
#[clap(long)]
|
||||||
|
port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is the type that implements the generated World trait. It is the business logic
|
||||||
|
// and is used to start the server.
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct HelloServer(SocketAddr);
|
||||||
|
|
||||||
|
impl World for HelloServer {
|
||||||
|
async fn hello(self, _: context::Context, name: String) -> String {
|
||||||
|
let sleep_time =
|
||||||
|
Duration::from_millis(Uniform::new_inclusive(1, 10).sample(&mut thread_rng()));
|
||||||
|
time::sleep(sleep_time).await;
|
||||||
|
format!("Hello, {name}! You are connected from {}", self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn spawn(fut: impl Future<Output = ()> + Send + 'static) {
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let flags = Flags::parse();
|
||||||
|
init_tracing("Tarpc Example Server")?;
|
||||||
|
|
||||||
|
let server_addr = (IpAddr::V6(Ipv6Addr::LOCALHOST), flags.port);
|
||||||
|
|
||||||
|
// JSON transport is provided by the json_transport tarpc module. It makes it easy
|
||||||
|
// to start up a serde-powered json serialization strategy over TCP.
|
||||||
|
let mut listener = tarpc::serde_transport::tcp::listen(&server_addr, Json::default).await?;
|
||||||
|
tracing::info!("Listening on port {}", listener.local_addr().port());
|
||||||
|
listener.config_mut().max_frame_length(usize::MAX);
|
||||||
|
listener
|
||||||
|
// Ignore accept errors.
|
||||||
|
.filter_map(|r| future::ready(r.ok()))
|
||||||
|
.map(server::BaseChannel::with_defaults)
|
||||||
|
// Limit channels to 1 per IP.
|
||||||
|
.max_channels_per_key(1, |t| t.transport().peer_addr().unwrap().ip())
|
||||||
|
// serve is generated by the service attribute. It takes as input any type implementing
|
||||||
|
// the generated World trait.
|
||||||
|
.map(|channel| {
|
||||||
|
let server = HelloServer(channel.transport().peer_addr().unwrap());
|
||||||
|
channel.execute(server.serve()).for_each(spawn)
|
||||||
|
})
|
||||||
|
// Max 10 channels.
|
||||||
|
.buffer_unordered(10)
|
||||||
|
.for_each(|_| async {})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@@ -1,192 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
#![feature(inclusive_range_syntax, conservative_impl_trait, plugin, never_type)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
extern crate chrono;
|
|
||||||
extern crate clap;
|
|
||||||
extern crate env_logger;
|
|
||||||
extern crate futures;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate log;
|
|
||||||
extern crate serde;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
extern crate tokio_core;
|
|
||||||
extern crate futures_cpupool;
|
|
||||||
|
|
||||||
use clap::{Arg, App};
|
|
||||||
use futures::{Future, Stream};
|
|
||||||
use futures_cpupool::{CpuFuture, CpuPool};
|
|
||||||
use std::{cmp, thread};
|
|
||||||
use std::sync::{Arc, mpsc};
|
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
use std::time::{Duration, Instant};
|
|
||||||
use tarpc::future::{client, server};
|
|
||||||
use tarpc::future::client::ClientExt;
|
|
||||||
use tarpc::util::{FirstSocketAddr, Never};
|
|
||||||
use tokio_core::reactor;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc read(size: u32) -> serde::bytes::ByteBuf;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct Server {
|
|
||||||
pool: CpuPool,
|
|
||||||
request_count: Arc<AtomicUsize>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Server {
|
|
||||||
fn new() -> Self {
|
|
||||||
Server {
|
|
||||||
pool: CpuPool::new_num_cpus(),
|
|
||||||
request_count: Arc::new(AtomicUsize::new(1)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FutureService for Server {
|
|
||||||
type ReadFut = CpuFuture<serde::bytes::ByteBuf, Never>;
|
|
||||||
|
|
||||||
fn read(&self, size: u32) -> Self::ReadFut {
|
|
||||||
let request_number = self.request_count.fetch_add(1, Ordering::SeqCst);
|
|
||||||
debug!("Server received read({}) no. {}", size, request_number);
|
|
||||||
self.pool
|
|
||||||
.spawn(futures::lazy(move || {
|
|
||||||
let mut vec = Vec::with_capacity(size as usize);
|
|
||||||
for i in 0..size {
|
|
||||||
vec.push(((i % 2) << 8) as u8);
|
|
||||||
}
|
|
||||||
debug!("Server sending response no. {}", request_number);
|
|
||||||
Ok(vec.into())
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const CHUNK_SIZE: u32 = 1 << 10;
|
|
||||||
|
|
||||||
trait Microseconds {
|
|
||||||
fn microseconds(&self) -> i64;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Microseconds for Duration {
|
|
||||||
fn microseconds(&self) -> i64 {
|
|
||||||
chrono::Duration::from_std(*self)
|
|
||||||
.unwrap()
|
|
||||||
.num_microseconds()
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
struct Stats {
|
|
||||||
sum: Duration,
|
|
||||||
count: u64,
|
|
||||||
min: Option<Duration>,
|
|
||||||
max: Option<Duration>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Spawns a `reactor::Core` running forever on a new thread.
|
|
||||||
fn spawn_core() -> reactor::Remote {
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let mut core = reactor::Core::new().unwrap();
|
|
||||||
tx.send(core.handle().remote().clone()).unwrap();
|
|
||||||
|
|
||||||
// Run forever
|
|
||||||
core.run(futures::empty::<(), !>()).unwrap();
|
|
||||||
});
|
|
||||||
rx.recv().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn run_once(clients: Vec<FutureClient>,
|
|
||||||
concurrency: u32)
|
|
||||||
-> impl Future<Item = (), Error = ()> + 'static {
|
|
||||||
let start = Instant::now();
|
|
||||||
futures::stream::futures_unordered((0..concurrency as usize)
|
|
||||||
.zip(clients.iter().enumerate().cycle())
|
|
||||||
.map(|(iteration, (client_idx, client))| {
|
|
||||||
let start = Instant::now();
|
|
||||||
debug!("Client {} reading (iteration {})...", client_idx, iteration);
|
|
||||||
client.read(CHUNK_SIZE)
|
|
||||||
.map(move |_| (client_idx, iteration, start))
|
|
||||||
}))
|
|
||||||
.map(|(client_idx, iteration, start)| {
|
|
||||||
let elapsed = start.elapsed();
|
|
||||||
debug!("Client {} received reply (iteration {}).",
|
|
||||||
client_idx,
|
|
||||||
iteration);
|
|
||||||
elapsed
|
|
||||||
})
|
|
||||||
.map_err(|e| panic!(e))
|
|
||||||
.fold(Stats::default(), move |mut stats, elapsed| {
|
|
||||||
stats.sum += elapsed;
|
|
||||||
stats.count += 1;
|
|
||||||
stats.min = Some(cmp::min(stats.min.unwrap_or(elapsed), elapsed));
|
|
||||||
stats.max = Some(cmp::max(stats.max.unwrap_or(elapsed), elapsed));
|
|
||||||
Ok(stats)
|
|
||||||
})
|
|
||||||
.map(move |stats| {
|
|
||||||
info!("{} requests => Mean={}µs, Min={}µs, Max={}µs, Total={}µs",
|
|
||||||
stats.count,
|
|
||||||
stats.sum.microseconds() as f64 / stats.count as f64,
|
|
||||||
stats.min.unwrap().microseconds(),
|
|
||||||
stats.max.unwrap().microseconds(),
|
|
||||||
start.elapsed().microseconds());
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let _ = env_logger::init();
|
|
||||||
let matches = App::new("Tarpc Concurrency")
|
|
||||||
.about("Demonstrates making concurrent requests to a tarpc service.")
|
|
||||||
.arg(Arg::with_name("concurrency")
|
|
||||||
.short("c")
|
|
||||||
.long("concurrency")
|
|
||||||
.value_name("LEVEL")
|
|
||||||
.help("Sets a custom concurrency level")
|
|
||||||
.takes_value(true))
|
|
||||||
.arg(Arg::with_name("clients")
|
|
||||||
.short("n")
|
|
||||||
.long("num_clients")
|
|
||||||
.value_name("AMOUNT")
|
|
||||||
.help("How many clients to distribute requests between")
|
|
||||||
.takes_value(true))
|
|
||||||
.get_matches();
|
|
||||||
let concurrency = matches.value_of("concurrency")
|
|
||||||
.map(&str::parse)
|
|
||||||
.map(Result::unwrap)
|
|
||||||
.unwrap_or(10);
|
|
||||||
let num_clients = matches.value_of("clients")
|
|
||||||
.map(&str::parse)
|
|
||||||
.map(Result::unwrap)
|
|
||||||
.unwrap_or(4);
|
|
||||||
|
|
||||||
let mut reactor = reactor::Core::new().unwrap();
|
|
||||||
let (handle, server) = Server::new()
|
|
||||||
.listen("localhost:0".first_socket_addr(),
|
|
||||||
&reactor.handle(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
reactor.handle().spawn(server);
|
|
||||||
info!("Server listening on {}.", handle.addr());
|
|
||||||
|
|
||||||
let clients = (0..num_clients)
|
|
||||||
// Spin up a couple threads to drive the clients.
|
|
||||||
.map(|i| (i, spawn_core()))
|
|
||||||
.map(|(i, remote)| {
|
|
||||||
info!("Client {} connecting...", i);
|
|
||||||
FutureClient::connect(handle.addr(), client::Options::default().remote(remote))
|
|
||||||
.map_err(|e| panic!(e))
|
|
||||||
});
|
|
||||||
|
|
||||||
let run = futures::collect(clients).and_then(|clients| run_once(clients, concurrency));
|
|
||||||
|
|
||||||
info!("Starting...");
|
|
||||||
|
|
||||||
reactor.run(run).unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,151 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
#![feature(plugin)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
extern crate env_logger;
|
|
||||||
extern crate futures;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use futures::{Future, future};
|
|
||||||
use publisher::FutureServiceExt as PublisherExt;
|
|
||||||
use std::cell::RefCell;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::rc::Rc;
|
|
||||||
use std::thread;
|
|
||||||
use std::time::Duration;
|
|
||||||
use subscriber::FutureServiceExt as SubscriberExt;
|
|
||||||
use tarpc::future::{client, server};
|
|
||||||
use tarpc::future::client::ClientExt;
|
|
||||||
use tarpc::util::{FirstSocketAddr, Message, Never};
|
|
||||||
use tokio_core::reactor;
|
|
||||||
|
|
||||||
pub mod subscriber {
|
|
||||||
service! {
|
|
||||||
rpc receive(message: String);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub mod publisher {
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use tarpc::util::Message;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc broadcast(message: String);
|
|
||||||
rpc subscribe(id: u32, address: SocketAddr) | Message;
|
|
||||||
rpc unsubscribe(id: u32);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
struct Subscriber {
|
|
||||||
id: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl subscriber::FutureService for Subscriber {
|
|
||||||
type ReceiveFut = Result<(), Never>;
|
|
||||||
|
|
||||||
fn receive(&self, message: String) -> Self::ReceiveFut {
|
|
||||||
println!("{} received message: {}", self.id, message);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Subscriber {
|
|
||||||
fn listen(id: u32,
|
|
||||||
handle: &reactor::Handle,
|
|
||||||
options: server::Options)
|
|
||||||
-> server::Handle {
|
|
||||||
let (server_handle, server) = Subscriber { id: id }
|
|
||||||
.listen("localhost:0".first_socket_addr(), handle, options)
|
|
||||||
.unwrap();
|
|
||||||
handle.spawn(server);
|
|
||||||
server_handle
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
struct Publisher {
|
|
||||||
clients: Rc<RefCell<HashMap<u32, subscriber::FutureClient>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Publisher {
|
|
||||||
fn new() -> Publisher {
|
|
||||||
Publisher { clients: Rc::new(RefCell::new(HashMap::new())) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl publisher::FutureService for Publisher {
|
|
||||||
type BroadcastFut = Box<Future<Item = (), Error = Never>>;
|
|
||||||
|
|
||||||
fn broadcast(&self, message: String) -> Self::BroadcastFut {
|
|
||||||
let acks = self.clients
|
|
||||||
.borrow()
|
|
||||||
.values()
|
|
||||||
.map(move |client| client.receive(message.clone())
|
|
||||||
// Ignore failing subscribers. In a real pubsub,
|
|
||||||
// you'd want to continually retry until subscribers
|
|
||||||
// ack.
|
|
||||||
.then(|_| Ok(())))
|
|
||||||
// Collect to a vec to end the borrow on `self.clients`.
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
Box::new(future::join_all(acks).map(|_| ()))
|
|
||||||
}
|
|
||||||
|
|
||||||
type SubscribeFut = Box<Future<Item = (), Error = Message>>;
|
|
||||||
|
|
||||||
fn subscribe(&self, id: u32, address: SocketAddr) -> Self::SubscribeFut {
|
|
||||||
let clients = self.clients.clone();
|
|
||||||
Box::new(subscriber::FutureClient::connect(address, client::Options::default())
|
|
||||||
.map(move |subscriber| {
|
|
||||||
println!("Subscribing {}.", id);
|
|
||||||
clients.borrow_mut().insert(id, subscriber);
|
|
||||||
()
|
|
||||||
})
|
|
||||||
.map_err(|e| e.to_string().into()))
|
|
||||||
}
|
|
||||||
|
|
||||||
type UnsubscribeFut = Box<Future<Item = (), Error = Never>>;
|
|
||||||
|
|
||||||
fn unsubscribe(&self, id: u32) -> Self::UnsubscribeFut {
|
|
||||||
println!("Unsubscribing {}", id);
|
|
||||||
self.clients.borrow_mut().remove(&id).unwrap();
|
|
||||||
futures::finished(()).boxed()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let _ = env_logger::init();
|
|
||||||
let mut reactor = reactor::Core::new().unwrap();
|
|
||||||
let (publisher_handle, server) = Publisher::new()
|
|
||||||
.listen("localhost:0".first_socket_addr(),
|
|
||||||
&reactor.handle(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
reactor.handle().spawn(server);
|
|
||||||
|
|
||||||
let subscriber1 = Subscriber::listen(0, &reactor.handle(), server::Options::default());
|
|
||||||
let subscriber2 = Subscriber::listen(1, &reactor.handle(), server::Options::default());
|
|
||||||
|
|
||||||
let publisher =
|
|
||||||
reactor.run(publisher::FutureClient::connect(publisher_handle.addr(),
|
|
||||||
client::Options::default()))
|
|
||||||
.unwrap();
|
|
||||||
reactor.run(publisher.subscribe(0, subscriber1.addr())
|
|
||||||
.and_then(|_| publisher.subscribe(1, subscriber2.addr()))
|
|
||||||
.map_err(|e| panic!(e))
|
|
||||||
.and_then(|_| {
|
|
||||||
println!("Broadcasting...");
|
|
||||||
publisher.broadcast("hello to all".to_string())
|
|
||||||
})
|
|
||||||
.and_then(|_| publisher.unsubscribe(1))
|
|
||||||
.and_then(|_| publisher.broadcast("hi again".to_string())))
|
|
||||||
.unwrap();
|
|
||||||
thread::sleep(Duration::from_millis(300));
|
|
||||||
}
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
#![feature(plugin)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
extern crate futures;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate serde_derive;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use std::error::Error;
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::mpsc;
|
|
||||||
use std::thread;
|
|
||||||
use tarpc::sync::{client, server};
|
|
||||||
use tarpc::sync::client::ClientExt;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc hello(name: String) -> String | NoNameGiven;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Deserialize, Serialize)]
|
|
||||||
pub struct NoNameGiven;
|
|
||||||
|
|
||||||
impl fmt::Display for NoNameGiven {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(f, "{}", self.description())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for NoNameGiven {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
r#"The empty String, "", is not a valid argument to rpc `hello`."#
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct HelloServer;
|
|
||||||
|
|
||||||
impl SyncService for HelloServer {
|
|
||||||
fn hello(&self, name: String) -> Result<String, NoNameGiven> {
|
|
||||||
if name == "" {
|
|
||||||
Err(NoNameGiven)
|
|
||||||
} else {
|
|
||||||
Ok(format!("Hello, {}!", name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let handle = HelloServer.listen("localhost:10000", server::Options::default()).unwrap();
|
|
||||||
tx.send(handle.addr()).unwrap();
|
|
||||||
handle.run();
|
|
||||||
});
|
|
||||||
let client = SyncClient::connect(rx.recv().unwrap(), client::Options::default()).unwrap();
|
|
||||||
println!("{}", client.hello("Mom".to_string()).unwrap());
|
|
||||||
println!("{}", client.hello("".to_string()).unwrap_err());
|
|
||||||
}
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
#![feature(plugin)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
extern crate futures;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use futures::Future;
|
|
||||||
use tarpc::future::{client, server};
|
|
||||||
use tarpc::future::client::ClientExt;
|
|
||||||
use tarpc::util::{FirstSocketAddr, Never};
|
|
||||||
use tokio_core::reactor;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc hello(name: String) -> String;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct HelloServer;
|
|
||||||
|
|
||||||
impl FutureService for HelloServer {
|
|
||||||
type HelloFut = Result<String, Never>;
|
|
||||||
|
|
||||||
fn hello(&self, name: String) -> Self::HelloFut {
|
|
||||||
Ok(format!("Hello, {}!", name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let mut reactor = reactor::Core::new().unwrap();
|
|
||||||
let (handle, server) = HelloServer.listen("localhost:10000".first_socket_addr(),
|
|
||||||
&reactor.handle(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
reactor.handle().spawn(server);
|
|
||||||
|
|
||||||
let options = client::Options::default().handle(reactor.handle());
|
|
||||||
reactor.run(FutureClient::connect(handle.addr(), options)
|
|
||||||
.map_err(tarpc::Error::from)
|
|
||||||
.and_then(|client| client.hello("Mom".to_string()))
|
|
||||||
.map(|resp| println!("{}", resp)))
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,43 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
// required by `FutureClient` (not used directly in this example)
|
|
||||||
#![feature(plugin)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
extern crate futures;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use std::sync::mpsc;
|
|
||||||
use std::thread;
|
|
||||||
use tarpc::sync::{client, server};
|
|
||||||
use tarpc::sync::client::ClientExt;
|
|
||||||
use tarpc::util::Never;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc hello(name: String) -> String;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct HelloServer;
|
|
||||||
|
|
||||||
impl SyncService for HelloServer {
|
|
||||||
fn hello(&self, name: String) -> Result<String, Never> {
|
|
||||||
Ok(format!("Hello from thread {}, {}!", thread::current().name().unwrap(), name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let handle = HelloServer.listen("localhost:0", server::Options::default()).unwrap();
|
|
||||||
tx.send(handle.addr()).unwrap();
|
|
||||||
handle.run();
|
|
||||||
});
|
|
||||||
let client = SyncClient::connect(rx.recv().unwrap(), client::Options::default()).unwrap();
|
|
||||||
println!("{}", client.hello("Mom".to_string()).unwrap());
|
|
||||||
}
|
|
||||||
@@ -1,101 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
#![feature(plugin)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
extern crate env_logger;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
extern crate futures;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use add::{FutureService as AddFutureService, FutureServiceExt as AddExt};
|
|
||||||
use double::{FutureService as DoubleFutureService, FutureServiceExt as DoubleExt};
|
|
||||||
use futures::{BoxFuture, Future, Stream};
|
|
||||||
use tarpc::future::{client, server};
|
|
||||||
use tarpc::future::client::ClientExt as Fc;
|
|
||||||
use tarpc::util::{FirstSocketAddr, Message, Never};
|
|
||||||
use tokio_core::reactor;
|
|
||||||
|
|
||||||
pub mod add {
|
|
||||||
service! {
|
|
||||||
/// Add two ints together.
|
|
||||||
rpc add(x: i32, y: i32) -> i32;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub mod double {
|
|
||||||
use tarpc::util::Message;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
/// 2 * x
|
|
||||||
rpc double(x: i32) -> i32 | Message;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct AddServer;
|
|
||||||
|
|
||||||
impl AddFutureService for AddServer {
|
|
||||||
type AddFut = Result<i32, Never>;
|
|
||||||
|
|
||||||
fn add(&self, x: i32, y: i32) -> Self::AddFut {
|
|
||||||
Ok(x + y)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct DoubleServer {
|
|
||||||
client: add::FutureClient,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DoubleServer {
|
|
||||||
fn new(client: add::FutureClient) -> Self {
|
|
||||||
DoubleServer { client: client }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DoubleFutureService for DoubleServer {
|
|
||||||
type DoubleFut = BoxFuture<i32, Message>;
|
|
||||||
|
|
||||||
fn double(&self, x: i32) -> Self::DoubleFut {
|
|
||||||
self.client
|
|
||||||
.add(x, x)
|
|
||||||
.map_err(|e| e.to_string().into())
|
|
||||||
.boxed()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let _ = env_logger::init();
|
|
||||||
let mut reactor = reactor::Core::new().unwrap();
|
|
||||||
let (add, server) = AddServer.listen("localhost:0".first_socket_addr(),
|
|
||||||
&reactor.handle(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
reactor.handle().spawn(server);
|
|
||||||
|
|
||||||
let options = client::Options::default().handle(reactor.handle());
|
|
||||||
let add_client = reactor.run(add::FutureClient::connect(add.addr(), options)).unwrap();
|
|
||||||
|
|
||||||
let (double, server) = DoubleServer::new(add_client)
|
|
||||||
.listen("localhost:0".first_socket_addr(),
|
|
||||||
&reactor.handle(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
reactor.handle().spawn(server);
|
|
||||||
|
|
||||||
let double_client =
|
|
||||||
reactor.run(double::FutureClient::connect(double.addr(), client::Options::default()))
|
|
||||||
.unwrap();
|
|
||||||
reactor.run(futures::stream::futures_unordered((0..5).map(|i| double_client.double(i)))
|
|
||||||
.map_err(|e| println!("{}", e))
|
|
||||||
.for_each(|i| {
|
|
||||||
println!("{:?}", i);
|
|
||||||
Ok(())
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
@@ -1,97 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
#![feature(plugin)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
extern crate env_logger;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
extern crate futures;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use add::{SyncService as AddSyncService, SyncServiceExt as AddExt};
|
|
||||||
use double::{SyncService as DoubleSyncService, SyncServiceExt as DoubleExt};
|
|
||||||
use std::sync::mpsc;
|
|
||||||
use std::thread;
|
|
||||||
use tarpc::sync::{client, server};
|
|
||||||
use tarpc::sync::client::ClientExt as Fc;
|
|
||||||
use tarpc::util::{FirstSocketAddr, Message, Never};
|
|
||||||
|
|
||||||
pub mod add {
|
|
||||||
service! {
|
|
||||||
/// Add two ints together.
|
|
||||||
rpc add(x: i32, y: i32) -> i32;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub mod double {
|
|
||||||
use tarpc::util::Message;
|
|
||||||
|
|
||||||
service! {
|
|
||||||
/// 2 * x
|
|
||||||
rpc double(x: i32) -> i32 | Message;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct AddServer;
|
|
||||||
|
|
||||||
impl AddSyncService for AddServer {
|
|
||||||
fn add(&self, x: i32, y: i32) -> Result<i32, Never> {
|
|
||||||
Ok(x + y)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct DoubleServer {
|
|
||||||
client: add::SyncClient,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DoubleServer {
|
|
||||||
fn new(client: add::SyncClient) -> Self {
|
|
||||||
DoubleServer { client: client }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DoubleSyncService for DoubleServer {
|
|
||||||
fn double(&self, x: i32) -> Result<i32, Message> {
|
|
||||||
self.client
|
|
||||||
.add(x, x)
|
|
||||||
.map_err(|e| e.to_string().into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let _ = env_logger::init();
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let handle = AddServer.listen("localhost:0".first_socket_addr(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
tx.send(handle.addr()).unwrap();
|
|
||||||
handle.run();
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
let add = rx.recv().unwrap();
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let add_client = add::SyncClient::connect(add, client::Options::default()).unwrap();
|
|
||||||
let handle = DoubleServer::new(add_client)
|
|
||||||
.listen("localhost:0".first_socket_addr(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
tx.send(handle.addr()).unwrap();
|
|
||||||
handle.run();
|
|
||||||
});
|
|
||||||
let double = rx.recv().unwrap();
|
|
||||||
|
|
||||||
let double_client = double::SyncClient::connect(double, client::Options::default()).unwrap();
|
|
||||||
for i in 0..5 {
|
|
||||||
let doubled = double_client.double(i).unwrap();
|
|
||||||
println!("{:?}", doubled);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,114 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
#![feature(plugin)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
extern crate lazy_static;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
extern crate env_logger;
|
|
||||||
extern crate futures;
|
|
||||||
extern crate serde;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use std::io::{Read, Write, stdout};
|
|
||||||
use std::net;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::mpsc;
|
|
||||||
use std::thread;
|
|
||||||
use std::time;
|
|
||||||
use tarpc::future::server;
|
|
||||||
use tarpc::sync::client::{self, ClientExt};
|
|
||||||
use tarpc::util::{FirstSocketAddr, Never};
|
|
||||||
use tokio_core::reactor;
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref BUF: Arc<serde::bytes::ByteBuf> = Arc::new(gen_vec(CHUNK_SIZE as usize).into());
|
|
||||||
}
|
|
||||||
|
|
||||||
fn gen_vec(size: usize) -> Vec<u8> {
|
|
||||||
let mut vec: Vec<u8> = Vec::with_capacity(size);
|
|
||||||
for i in 0..size {
|
|
||||||
vec.push(((i % 2) << 8) as u8);
|
|
||||||
}
|
|
||||||
vec
|
|
||||||
}
|
|
||||||
|
|
||||||
service! {
|
|
||||||
rpc read() -> Arc<serde::bytes::ByteBuf>;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct Server;
|
|
||||||
|
|
||||||
impl FutureService for Server {
|
|
||||||
type ReadFut = Result<Arc<serde::bytes::ByteBuf>, Never>;
|
|
||||||
|
|
||||||
fn read(&self) -> Self::ReadFut {
|
|
||||||
Ok(BUF.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const CHUNK_SIZE: u32 = 1 << 19;
|
|
||||||
|
|
||||||
fn bench_tarpc(target: u64) {
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let mut reactor = reactor::Core::new().unwrap();
|
|
||||||
let (addr, server) = Server.listen("localhost:0".first_socket_addr(),
|
|
||||||
&reactor.handle(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
tx.send(addr).unwrap();
|
|
||||||
reactor.run(server).unwrap();
|
|
||||||
});
|
|
||||||
let client = SyncClient::connect(rx.recv().unwrap().addr(), client::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
let start = time::Instant::now();
|
|
||||||
let mut nread = 0;
|
|
||||||
while nread < target {
|
|
||||||
nread += client.read().unwrap().len() as u64;
|
|
||||||
print!(".");
|
|
||||||
stdout().flush().unwrap();
|
|
||||||
}
|
|
||||||
println!("done");
|
|
||||||
let duration = time::Instant::now() - start;
|
|
||||||
println!("TARPC: {}MB/s",
|
|
||||||
(target as f64 / (1024f64 * 1024f64)) /
|
|
||||||
(duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 10E9));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn bench_tcp(target: u64) {
|
|
||||||
let l = net::TcpListener::bind("localhost:0").unwrap();
|
|
||||||
let addr = l.local_addr().unwrap();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let (mut stream, _) = l.accept().unwrap();
|
|
||||||
while let Ok(_) = stream.write_all(&*BUF) {}
|
|
||||||
});
|
|
||||||
let mut stream = net::TcpStream::connect(&addr).unwrap();
|
|
||||||
let mut buf = vec![0; CHUNK_SIZE as usize];
|
|
||||||
let start = time::Instant::now();
|
|
||||||
let mut nread = 0;
|
|
||||||
while nread < target {
|
|
||||||
stream.read_exact(&mut buf[..]).unwrap();
|
|
||||||
nread += CHUNK_SIZE as u64;
|
|
||||||
print!(".");
|
|
||||||
stdout().flush().unwrap();
|
|
||||||
}
|
|
||||||
println!("done");
|
|
||||||
let duration = time::Instant::now() - start;
|
|
||||||
println!("TCP: {}MB/s",
|
|
||||||
(target as f64 / (1024f64 * 1024f64)) /
|
|
||||||
(duration.as_secs() as f64 + duration.subsec_nanos() as f64 / 10E9));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let _ = env_logger::init();
|
|
||||||
let _ = *BUF; // To non-lazily initialize it.
|
|
||||||
bench_tcp(256 << 20);
|
|
||||||
bench_tarpc(256 << 20);
|
|
||||||
}
|
|
||||||
@@ -1,109 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
#![feature(plugin)]
|
|
||||||
#![plugin(tarpc_plugins)]
|
|
||||||
|
|
||||||
#[macro_use]
|
|
||||||
extern crate log;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate tarpc;
|
|
||||||
extern crate bincode;
|
|
||||||
extern crate env_logger;
|
|
||||||
extern crate futures;
|
|
||||||
extern crate tokio_core;
|
|
||||||
|
|
||||||
use bar::FutureServiceExt as BarExt;
|
|
||||||
use baz::FutureServiceExt as BazExt;
|
|
||||||
use std::sync::mpsc;
|
|
||||||
use std::thread;
|
|
||||||
use tarpc::future::server;
|
|
||||||
use tarpc::sync::client;
|
|
||||||
use tarpc::sync::client::ClientExt;
|
|
||||||
use tarpc::util::{FirstSocketAddr, Never};
|
|
||||||
use tokio_core::reactor;
|
|
||||||
|
|
||||||
mod bar {
|
|
||||||
service! {
|
|
||||||
rpc bar(i: i32) -> i32;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct Bar;
|
|
||||||
impl bar::FutureService for Bar {
|
|
||||||
type BarFut = Result<i32, Never>;
|
|
||||||
|
|
||||||
fn bar(&self, i: i32) -> Self::BarFut {
|
|
||||||
Ok(i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mod baz {
|
|
||||||
service! {
|
|
||||||
rpc baz(s: String) -> String;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
struct Baz;
|
|
||||||
impl baz::FutureService for Baz {
|
|
||||||
type BazFut = Result<String, Never>;
|
|
||||||
|
|
||||||
fn baz(&self, s: String) -> Self::BazFut {
|
|
||||||
Ok(format!("Hello, {}!", s))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! pos {
|
|
||||||
() => (concat!(file!(), ":", line!()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
let _ = env_logger::init();
|
|
||||||
let bar_client = {
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let mut reactor = reactor::Core::new().unwrap();
|
|
||||||
let (handle, server) = Bar.listen("localhost:0".first_socket_addr(),
|
|
||||||
&reactor.handle(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
tx.send(handle).unwrap();
|
|
||||||
reactor.run(server).unwrap();
|
|
||||||
});
|
|
||||||
let handle = rx.recv().unwrap();
|
|
||||||
bar::SyncClient::connect(handle.addr(), client::Options::default()).unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
let baz_client = {
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let mut reactor = reactor::Core::new().unwrap();
|
|
||||||
let (handle, server) = Baz.listen("localhost:0".first_socket_addr(),
|
|
||||||
&reactor.handle(),
|
|
||||||
server::Options::default())
|
|
||||||
.unwrap();
|
|
||||||
tx.send(handle).unwrap();
|
|
||||||
reactor.run(server).unwrap();
|
|
||||||
});
|
|
||||||
let handle = rx.recv().unwrap();
|
|
||||||
baz::SyncClient::connect(handle.addr(), client::Options::default()).unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
info!("Result: {:?}", bar_client.bar(17));
|
|
||||||
|
|
||||||
let total = 20;
|
|
||||||
for i in 1..(total + 1) {
|
|
||||||
if i % 2 == 0 {
|
|
||||||
info!("Result 1: {:?}", bar_client.bar(i));
|
|
||||||
} else {
|
|
||||||
info!("Result 2: {:?}", baz_client.baz(i.to_string()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("Done.");
|
|
||||||
}
|
|
||||||
@@ -93,19 +93,19 @@ diff=""
|
|||||||
for file in $(git diff --name-only --cached);
|
for file in $(git diff --name-only --cached);
|
||||||
do
|
do
|
||||||
if [ ${file: -3} == ".rs" ]; then
|
if [ ${file: -3} == ".rs" ]; then
|
||||||
diff="$diff$(rustfmt --skip-children --write-mode=diff $file)"
|
diff="$diff$(rustfmt --edition 2018 --check $file)"
|
||||||
|
if [ $? != 0 ]; then
|
||||||
|
FMTRESULT=1
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
if grep --quiet "^Diff at line" <<< "$diff"; then
|
|
||||||
FMTRESULT=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "${TARPC_SKIP_RUSTFMT}" == 1 ]; then
|
if [ "${TARPC_SKIP_RUSTFMT}" == 1 ]; then
|
||||||
printf "${SKIPPED}\n"$?
|
printf "${SKIPPED}\n"$?
|
||||||
elif [ ${FMTRESULT} != 0 ]; then
|
elif [ ${FMTRESULT} != 0 ]; then
|
||||||
FAILED=1
|
FAILED=1
|
||||||
printf "${FAILURE}\n"
|
printf "${FAILURE}\n"
|
||||||
echo "$diff" | sed 's/Using rustfmt config file.*$/d/'
|
echo "$diff"
|
||||||
else
|
else
|
||||||
printf "${SUCCESS}\n"
|
printf "${SUCCESS}\n"
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -84,18 +84,20 @@ command -v rustup &>/dev/null
|
|||||||
if [ "$?" == 0 ]; then
|
if [ "$?" == 0 ]; then
|
||||||
printf "${SUCCESS}\n"
|
printf "${SUCCESS}\n"
|
||||||
|
|
||||||
|
try_run "Building ... " cargo +stable build --color=always
|
||||||
|
try_run "Testing ... " cargo +stable test --color=always
|
||||||
|
try_run "Testing with all features enabled ... " cargo +stable test --all-features --color=always
|
||||||
|
for EXAMPLE in $(cargo +stable run --example 2>&1 | grep ' ' | awk '{print $1}')
|
||||||
|
do
|
||||||
|
try_run "Running example \"$EXAMPLE\" ... " cargo +stable run --example $EXAMPLE
|
||||||
|
done
|
||||||
|
|
||||||
check_toolchain nightly
|
check_toolchain nightly
|
||||||
if [ ${TOOLCHAIN_RESULT} == 1 ]; then
|
if [ ${TOOLCHAIN_RESULT} != 1 ]; then
|
||||||
exit 1
|
try_run "Running clippy ... " cargo +nightly clippy --color=always -Z unstable-options -- --deny warnings
|
||||||
fi
|
fi
|
||||||
|
|
||||||
try_run "Building ... " cargo build --color=always
|
|
||||||
try_run "Testing ... " cargo test --color=always
|
|
||||||
try_run "Benching ... " cargo bench --color=always
|
|
||||||
|
|
||||||
try_run "Building with tls ... " cargo build --color=always --features tls
|
|
||||||
try_run "Testing with tls ... " cargo test --color=always --features tls
|
|
||||||
try_run "Benching with tls ... " cargo bench --color=always --features tls
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exit $PREPUSH_RESULT
|
exit $PREPUSH_RESULT
|
||||||
|
|||||||
34
plugins/Cargo.toml
Normal file
34
plugins/Cargo.toml
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
[package]
|
||||||
|
name = "tarpc-plugins"
|
||||||
|
version = "0.13.0"
|
||||||
|
rust-version = "1.56"
|
||||||
|
authors = ["Adam Wright <adam.austin.wright@gmail.com>", "Tim Kuehn <timothy.j.kuehn@gmail.com>"]
|
||||||
|
edition = "2021"
|
||||||
|
license = "MIT"
|
||||||
|
documentation = "https://docs.rs/tarpc-plugins"
|
||||||
|
homepage = "https://github.com/google/tarpc"
|
||||||
|
repository = "https://github.com/google/tarpc"
|
||||||
|
keywords = ["rpc", "network", "server", "api", "microservices"]
|
||||||
|
categories = ["asynchronous", "network-programming"]
|
||||||
|
readme = "../README.md"
|
||||||
|
description = "Proc macros for tarpc."
|
||||||
|
|
||||||
|
[features]
|
||||||
|
serde1 = []
|
||||||
|
|
||||||
|
[badges]
|
||||||
|
travis-ci = { repository = "google/tarpc" }
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
proc-macro2 = "1.0"
|
||||||
|
quote = "1.0"
|
||||||
|
syn = { version = "1.0", features = ["full"] }
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
proc-macro = true
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
assert-type-eq = "0.1.0"
|
||||||
|
futures = "0.3"
|
||||||
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
tarpc = { path = "../tarpc", features = ["serde1"] }
|
||||||
9
plugins/LICENSE
Normal file
9
plugins/LICENSE
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright 2016 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
1
plugins/rustfmt.toml
Normal file
1
plugins/rustfmt.toml
Normal file
@@ -0,0 +1 @@
|
|||||||
|
edition = "2018"
|
||||||
654
plugins/src/lib.rs
Normal file
654
plugins/src/lib.rs
Normal file
@@ -0,0 +1,654 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
#![recursion_limit = "512"]
|
||||||
|
|
||||||
|
extern crate proc_macro;
|
||||||
|
extern crate proc_macro2;
|
||||||
|
extern crate quote;
|
||||||
|
extern crate syn;
|
||||||
|
|
||||||
|
use proc_macro::TokenStream;
|
||||||
|
use proc_macro2::TokenStream as TokenStream2;
|
||||||
|
use quote::{format_ident, quote, ToTokens};
|
||||||
|
use syn::{
|
||||||
|
braced,
|
||||||
|
ext::IdentExt,
|
||||||
|
parenthesized,
|
||||||
|
parse::{Parse, ParseStream},
|
||||||
|
parse_macro_input, parse_quote,
|
||||||
|
spanned::Spanned,
|
||||||
|
token::Comma,
|
||||||
|
Attribute, FnArg, Ident, Lit, LitBool, MetaNameValue, Pat, PatType, ReturnType, Token, Type,
|
||||||
|
Visibility,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Accumulates multiple errors into a result.
|
||||||
|
/// Only use this for recoverable errors, i.e. non-parse errors. Fatal errors should early exit to
|
||||||
|
/// avoid further complications.
|
||||||
|
macro_rules! extend_errors {
|
||||||
|
($errors: ident, $e: expr) => {
|
||||||
|
match $errors {
|
||||||
|
Ok(_) => $errors = Err($e),
|
||||||
|
Err(ref mut errors) => errors.extend($e),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Service {
|
||||||
|
attrs: Vec<Attribute>,
|
||||||
|
vis: Visibility,
|
||||||
|
ident: Ident,
|
||||||
|
rpcs: Vec<RpcMethod>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct RpcMethod {
|
||||||
|
attrs: Vec<Attribute>,
|
||||||
|
ident: Ident,
|
||||||
|
args: Vec<PatType>,
|
||||||
|
output: ReturnType,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Parse for Service {
|
||||||
|
fn parse(input: ParseStream) -> syn::Result<Self> {
|
||||||
|
let attrs = input.call(Attribute::parse_outer)?;
|
||||||
|
let vis = input.parse()?;
|
||||||
|
input.parse::<Token![trait]>()?;
|
||||||
|
let ident: Ident = input.parse()?;
|
||||||
|
let content;
|
||||||
|
braced!(content in input);
|
||||||
|
let mut rpcs = Vec::<RpcMethod>::new();
|
||||||
|
while !content.is_empty() {
|
||||||
|
rpcs.push(content.parse()?);
|
||||||
|
}
|
||||||
|
let mut ident_errors = Ok(());
|
||||||
|
for rpc in &rpcs {
|
||||||
|
if rpc.ident == "new" {
|
||||||
|
extend_errors!(
|
||||||
|
ident_errors,
|
||||||
|
syn::Error::new(
|
||||||
|
rpc.ident.span(),
|
||||||
|
format!(
|
||||||
|
"method name conflicts with generated fn `{}Client::new`",
|
||||||
|
ident.unraw()
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if rpc.ident == "serve" {
|
||||||
|
extend_errors!(
|
||||||
|
ident_errors,
|
||||||
|
syn::Error::new(
|
||||||
|
rpc.ident.span(),
|
||||||
|
format!("method name conflicts with generated fn `{ident}::serve`")
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ident_errors?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
attrs,
|
||||||
|
vis,
|
||||||
|
ident,
|
||||||
|
rpcs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Parse for RpcMethod {
|
||||||
|
fn parse(input: ParseStream) -> syn::Result<Self> {
|
||||||
|
let attrs = input.call(Attribute::parse_outer)?;
|
||||||
|
input.parse::<Token![async]>()?;
|
||||||
|
input.parse::<Token![fn]>()?;
|
||||||
|
let ident = input.parse()?;
|
||||||
|
let content;
|
||||||
|
parenthesized!(content in input);
|
||||||
|
let mut args = Vec::new();
|
||||||
|
let mut errors = Ok(());
|
||||||
|
for arg in content.parse_terminated::<FnArg, Comma>(FnArg::parse)? {
|
||||||
|
match arg {
|
||||||
|
FnArg::Typed(captured) if matches!(&*captured.pat, Pat::Ident(_)) => {
|
||||||
|
args.push(captured);
|
||||||
|
}
|
||||||
|
FnArg::Typed(captured) => {
|
||||||
|
extend_errors!(
|
||||||
|
errors,
|
||||||
|
syn::Error::new(captured.pat.span(), "patterns aren't allowed in RPC args")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
FnArg::Receiver(_) => {
|
||||||
|
extend_errors!(
|
||||||
|
errors,
|
||||||
|
syn::Error::new(arg.span(), "method args cannot start with self")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
errors?;
|
||||||
|
let output = input.parse()?;
|
||||||
|
input.parse::<Token![;]>()?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
attrs,
|
||||||
|
ident,
|
||||||
|
args,
|
||||||
|
output,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If `derive_serde` meta item is not present, defaults to cfg!(feature = "serde1").
|
||||||
|
// `derive_serde` can only be true when serde1 is enabled.
|
||||||
|
struct DeriveSerde(bool);
|
||||||
|
|
||||||
|
impl Parse for DeriveSerde {
|
||||||
|
fn parse(input: ParseStream) -> syn::Result<Self> {
|
||||||
|
let mut result = Ok(None);
|
||||||
|
let mut derive_serde = Vec::new();
|
||||||
|
let meta_items = input.parse_terminated::<MetaNameValue, Comma>(MetaNameValue::parse)?;
|
||||||
|
for meta in meta_items {
|
||||||
|
if meta.path.segments.len() != 1 {
|
||||||
|
extend_errors!(
|
||||||
|
result,
|
||||||
|
syn::Error::new(
|
||||||
|
meta.span(),
|
||||||
|
"tarpc::service does not support this meta item"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let segment = meta.path.segments.first().unwrap();
|
||||||
|
if segment.ident != "derive_serde" {
|
||||||
|
extend_errors!(
|
||||||
|
result,
|
||||||
|
syn::Error::new(
|
||||||
|
meta.span(),
|
||||||
|
"tarpc::service does not support this meta item"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
match meta.lit {
|
||||||
|
Lit::Bool(LitBool { value: true, .. }) if cfg!(feature = "serde1") => {
|
||||||
|
result = result.and(Ok(Some(true)))
|
||||||
|
}
|
||||||
|
Lit::Bool(LitBool { value: true, .. }) => {
|
||||||
|
extend_errors!(
|
||||||
|
result,
|
||||||
|
syn::Error::new(
|
||||||
|
meta.span(),
|
||||||
|
"To enable serde, first enable the `serde1` feature of tarpc"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Lit::Bool(LitBool { value: false, .. }) => result = result.and(Ok(Some(false))),
|
||||||
|
_ => extend_errors!(
|
||||||
|
result,
|
||||||
|
syn::Error::new(
|
||||||
|
meta.lit.span(),
|
||||||
|
"`derive_serde` expects a value of type `bool`"
|
||||||
|
)
|
||||||
|
),
|
||||||
|
}
|
||||||
|
derive_serde.push(meta);
|
||||||
|
}
|
||||||
|
if derive_serde.len() > 1 {
|
||||||
|
for (i, derive_serde) in derive_serde.iter().enumerate() {
|
||||||
|
extend_errors!(
|
||||||
|
result,
|
||||||
|
syn::Error::new(
|
||||||
|
derive_serde.span(),
|
||||||
|
format!(
|
||||||
|
"`derive_serde` appears more than once (occurrence #{})",
|
||||||
|
i + 1
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let derive_serde = result?.unwrap_or(cfg!(feature = "serde1"));
|
||||||
|
Ok(Self(derive_serde))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A helper attribute to avoid a direct dependency on Serde.
|
||||||
|
///
|
||||||
|
/// Adds the following annotations to the annotated item:
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// #[derive(tarpc::serde::Serialize, tarpc::serde::Deserialize)]
|
||||||
|
/// #[serde(crate = "tarpc::serde")]
|
||||||
|
/// # struct Foo;
|
||||||
|
/// ```
|
||||||
|
#[proc_macro_attribute]
|
||||||
|
pub fn derive_serde(_attr: TokenStream, item: TokenStream) -> TokenStream {
|
||||||
|
let mut gen: proc_macro2::TokenStream = quote! {
|
||||||
|
#[derive(tarpc::serde::Serialize, tarpc::serde::Deserialize)]
|
||||||
|
#[serde(crate = "tarpc::serde")]
|
||||||
|
};
|
||||||
|
gen.extend(proc_macro2::TokenStream::from(item));
|
||||||
|
proc_macro::TokenStream::from(gen)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generates:
|
||||||
|
/// - service trait
|
||||||
|
/// - serve fn
|
||||||
|
/// - client stub struct
|
||||||
|
/// - new_stub client factory fn
|
||||||
|
/// - Request and Response enums
|
||||||
|
/// - ResponseFut Future
|
||||||
|
#[proc_macro_attribute]
|
||||||
|
pub fn service(attr: TokenStream, input: TokenStream) -> TokenStream {
|
||||||
|
let derive_serde = parse_macro_input!(attr as DeriveSerde);
|
||||||
|
let unit_type: &Type = &parse_quote!(());
|
||||||
|
let Service {
|
||||||
|
ref attrs,
|
||||||
|
ref vis,
|
||||||
|
ref ident,
|
||||||
|
ref rpcs,
|
||||||
|
} = parse_macro_input!(input as Service);
|
||||||
|
|
||||||
|
let camel_case_fn_names: &Vec<_> = &rpcs
|
||||||
|
.iter()
|
||||||
|
.map(|rpc| snake_to_camel(&rpc.ident.unraw().to_string()))
|
||||||
|
.collect();
|
||||||
|
let args: &[&[PatType]] = &rpcs.iter().map(|rpc| &*rpc.args).collect::<Vec<_>>();
|
||||||
|
let derive_serialize = if derive_serde.0 {
|
||||||
|
Some(
|
||||||
|
quote! {#[derive(tarpc::serde::Serialize, tarpc::serde::Deserialize)]
|
||||||
|
#[serde(crate = "tarpc::serde")]},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let methods = rpcs.iter().map(|rpc| &rpc.ident).collect::<Vec<_>>();
|
||||||
|
let request_names = methods
|
||||||
|
.iter()
|
||||||
|
.map(|m| format!("{ident}.{m}"))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
ServiceGenerator {
|
||||||
|
service_ident: ident,
|
||||||
|
client_stub_ident: &format_ident!("{}Stub", ident),
|
||||||
|
server_ident: &format_ident!("Serve{}", ident),
|
||||||
|
client_ident: &format_ident!("{}Client", ident),
|
||||||
|
request_ident: &format_ident!("{}Request", ident),
|
||||||
|
response_ident: &format_ident!("{}Response", ident),
|
||||||
|
vis,
|
||||||
|
args,
|
||||||
|
method_attrs: &rpcs.iter().map(|rpc| &*rpc.attrs).collect::<Vec<_>>(),
|
||||||
|
method_idents: &methods,
|
||||||
|
request_names: &request_names,
|
||||||
|
attrs,
|
||||||
|
rpcs,
|
||||||
|
return_types: &rpcs
|
||||||
|
.iter()
|
||||||
|
.map(|rpc| match rpc.output {
|
||||||
|
ReturnType::Type(_, ref ty) => ty,
|
||||||
|
ReturnType::Default => unit_type,
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
arg_pats: &args
|
||||||
|
.iter()
|
||||||
|
.map(|args| args.iter().map(|arg| &*arg.pat).collect())
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
camel_case_idents: &rpcs
|
||||||
|
.iter()
|
||||||
|
.zip(camel_case_fn_names.iter())
|
||||||
|
.map(|(rpc, name)| Ident::new(name, rpc.ident.span()))
|
||||||
|
.collect::<Vec<_>>(),
|
||||||
|
derive_serialize: derive_serialize.as_ref(),
|
||||||
|
}
|
||||||
|
.into_token_stream()
|
||||||
|
.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Things needed to generate the service items: trait, serve impl, request/response enums, and
|
||||||
|
// the client stub.
|
||||||
|
struct ServiceGenerator<'a> {
|
||||||
|
service_ident: &'a Ident,
|
||||||
|
client_stub_ident: &'a Ident,
|
||||||
|
server_ident: &'a Ident,
|
||||||
|
client_ident: &'a Ident,
|
||||||
|
request_ident: &'a Ident,
|
||||||
|
response_ident: &'a Ident,
|
||||||
|
vis: &'a Visibility,
|
||||||
|
attrs: &'a [Attribute],
|
||||||
|
rpcs: &'a [RpcMethod],
|
||||||
|
camel_case_idents: &'a [Ident],
|
||||||
|
method_idents: &'a [&'a Ident],
|
||||||
|
request_names: &'a [String],
|
||||||
|
method_attrs: &'a [&'a [Attribute]],
|
||||||
|
args: &'a [&'a [PatType]],
|
||||||
|
return_types: &'a [&'a Type],
|
||||||
|
arg_pats: &'a [Vec<&'a Pat>],
|
||||||
|
derive_serialize: Option<&'a TokenStream2>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ServiceGenerator<'a> {
|
||||||
|
fn trait_service(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
attrs,
|
||||||
|
rpcs,
|
||||||
|
vis,
|
||||||
|
return_types,
|
||||||
|
service_ident,
|
||||||
|
client_stub_ident,
|
||||||
|
request_ident,
|
||||||
|
response_ident,
|
||||||
|
server_ident,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
let rpc_fns = rpcs
|
||||||
|
.iter()
|
||||||
|
.zip(return_types.iter())
|
||||||
|
.map(
|
||||||
|
|(
|
||||||
|
RpcMethod {
|
||||||
|
attrs, ident, args, ..
|
||||||
|
},
|
||||||
|
output,
|
||||||
|
)| {
|
||||||
|
quote! {
|
||||||
|
#( #attrs )*
|
||||||
|
async fn #ident(self, context: tarpc::context::Context, #( #args ),*) -> #output;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let stub_doc = format!("The stub trait for service [`{service_ident}`].");
|
||||||
|
quote! {
|
||||||
|
#( #attrs )*
|
||||||
|
#vis trait #service_ident: Sized {
|
||||||
|
#( #rpc_fns )*
|
||||||
|
|
||||||
|
/// Returns a serving function to use with
|
||||||
|
/// [InFlightRequest::execute](tarpc::server::InFlightRequest::execute).
|
||||||
|
fn serve(self) -> #server_ident<Self> {
|
||||||
|
#server_ident { service: self }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[doc = #stub_doc]
|
||||||
|
#vis trait #client_stub_ident: tarpc::client::stub::Stub<Req = #request_ident, Resp = #response_ident> {
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> #client_stub_ident for S
|
||||||
|
where S: tarpc::client::stub::Stub<Req = #request_ident, Resp = #response_ident>
|
||||||
|
{
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn struct_server(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
vis, server_ident, ..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
/// A serving function to use with [tarpc::server::InFlightRequest::execute].
|
||||||
|
#[derive(Clone)]
|
||||||
|
#vis struct #server_ident<S> {
|
||||||
|
service: S,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_serve_for_server(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
request_ident,
|
||||||
|
server_ident,
|
||||||
|
service_ident,
|
||||||
|
response_ident,
|
||||||
|
camel_case_idents,
|
||||||
|
arg_pats,
|
||||||
|
method_idents,
|
||||||
|
request_names,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
impl<S> tarpc::server::Serve for #server_ident<S>
|
||||||
|
where S: #service_ident
|
||||||
|
{
|
||||||
|
type Req = #request_ident;
|
||||||
|
type Resp = #response_ident;
|
||||||
|
|
||||||
|
fn method(&self, req: &#request_ident) -> Option<&'static str> {
|
||||||
|
Some(match req {
|
||||||
|
#(
|
||||||
|
#request_ident::#camel_case_idents{..} => {
|
||||||
|
#request_names
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn serve(self, ctx: tarpc::context::Context, req: #request_ident)
|
||||||
|
-> Result<#response_ident, tarpc::ServerError> {
|
||||||
|
match req {
|
||||||
|
#(
|
||||||
|
#request_ident::#camel_case_idents{ #( #arg_pats ),* } => {
|
||||||
|
Ok(#response_ident::#camel_case_idents(
|
||||||
|
#service_ident::#method_idents(
|
||||||
|
self.service, ctx, #( #arg_pats ),*
|
||||||
|
).await
|
||||||
|
))
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn enum_request(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
derive_serialize,
|
||||||
|
vis,
|
||||||
|
request_ident,
|
||||||
|
camel_case_idents,
|
||||||
|
args,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
/// The request sent over the wire from the client to the server.
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
#[derive(Debug)]
|
||||||
|
#derive_serialize
|
||||||
|
#vis enum #request_ident {
|
||||||
|
#( #camel_case_idents{ #( #args ),* } ),*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn enum_response(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
derive_serialize,
|
||||||
|
vis,
|
||||||
|
response_ident,
|
||||||
|
camel_case_idents,
|
||||||
|
return_types,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
/// The response sent over the wire from the server to the client.
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
#[derive(Debug)]
|
||||||
|
#derive_serialize
|
||||||
|
#vis enum #response_ident {
|
||||||
|
#( #camel_case_idents(#return_types) ),*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn struct_client(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
vis,
|
||||||
|
client_ident,
|
||||||
|
request_ident,
|
||||||
|
response_ident,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
#[allow(unused)]
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
/// The client stub that makes RPC calls to the server. All request methods return
|
||||||
|
/// [Futures](std::future::Future).
|
||||||
|
#vis struct #client_ident<
|
||||||
|
Stub = tarpc::client::Channel<#request_ident, #response_ident>
|
||||||
|
>(Stub);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_client_new(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
client_ident,
|
||||||
|
vis,
|
||||||
|
request_ident,
|
||||||
|
response_ident,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
impl #client_ident {
|
||||||
|
/// Returns a new client stub that sends requests over the given transport.
|
||||||
|
#vis fn new<T>(config: tarpc::client::Config, transport: T)
|
||||||
|
-> tarpc::client::NewClient<
|
||||||
|
Self,
|
||||||
|
tarpc::client::RequestDispatch<#request_ident, #response_ident, T>
|
||||||
|
>
|
||||||
|
where
|
||||||
|
T: tarpc::Transport<tarpc::ClientMessage<#request_ident>, tarpc::Response<#response_ident>>
|
||||||
|
{
|
||||||
|
let new_client = tarpc::client::new(config, transport);
|
||||||
|
tarpc::client::NewClient {
|
||||||
|
client: #client_ident(new_client.client),
|
||||||
|
dispatch: new_client.dispatch,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Stub> From<Stub> for #client_ident<Stub>
|
||||||
|
where Stub: tarpc::client::stub::Stub<
|
||||||
|
Req = #request_ident,
|
||||||
|
Resp = #response_ident>
|
||||||
|
{
|
||||||
|
/// Returns a new client stub that sends requests over the given transport.
|
||||||
|
fn from(stub: Stub) -> Self {
|
||||||
|
#client_ident(stub)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn impl_client_rpc_methods(&self) -> TokenStream2 {
|
||||||
|
let &Self {
|
||||||
|
client_ident,
|
||||||
|
request_ident,
|
||||||
|
response_ident,
|
||||||
|
method_attrs,
|
||||||
|
vis,
|
||||||
|
method_idents,
|
||||||
|
request_names,
|
||||||
|
args,
|
||||||
|
return_types,
|
||||||
|
arg_pats,
|
||||||
|
camel_case_idents,
|
||||||
|
..
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
impl<Stub> #client_ident<Stub>
|
||||||
|
where Stub: tarpc::client::stub::Stub<
|
||||||
|
Req = #request_ident,
|
||||||
|
Resp = #response_ident>
|
||||||
|
{
|
||||||
|
#(
|
||||||
|
#[allow(unused)]
|
||||||
|
#( #method_attrs )*
|
||||||
|
#vis fn #method_idents(&self, ctx: tarpc::context::Context, #( #args ),*)
|
||||||
|
-> impl std::future::Future<Output = Result<#return_types, tarpc::client::RpcError>> + '_ {
|
||||||
|
let request = #request_ident::#camel_case_idents { #( #arg_pats ),* };
|
||||||
|
let resp = self.0.call(ctx, #request_names, request);
|
||||||
|
async move {
|
||||||
|
match resp.await? {
|
||||||
|
#response_ident::#camel_case_idents(msg) => std::result::Result::Ok(msg),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> ToTokens for ServiceGenerator<'a> {
|
||||||
|
fn to_tokens(&self, output: &mut TokenStream2) {
|
||||||
|
output.extend(vec![
|
||||||
|
self.trait_service(),
|
||||||
|
self.struct_server(),
|
||||||
|
self.impl_serve_for_server(),
|
||||||
|
self.enum_request(),
|
||||||
|
self.enum_response(),
|
||||||
|
self.struct_client(),
|
||||||
|
self.impl_client_new(),
|
||||||
|
self.impl_client_rpc_methods(),
|
||||||
|
])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn snake_to_camel(ident_str: &str) -> String {
|
||||||
|
let mut camel_ty = String::with_capacity(ident_str.len());
|
||||||
|
|
||||||
|
let mut last_char_was_underscore = true;
|
||||||
|
for c in ident_str.chars() {
|
||||||
|
match c {
|
||||||
|
'_' => last_char_was_underscore = true,
|
||||||
|
c if last_char_was_underscore => {
|
||||||
|
camel_ty.extend(c.to_uppercase());
|
||||||
|
last_char_was_underscore = false;
|
||||||
|
}
|
||||||
|
c => camel_ty.extend(c.to_lowercase()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
camel_ty.shrink_to_fit();
|
||||||
|
camel_ty
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn snake_to_camel_basic() {
|
||||||
|
assert_eq!(snake_to_camel("abc_def"), "AbcDef");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn snake_to_camel_underscore_suffix() {
|
||||||
|
assert_eq!(snake_to_camel("abc_def_"), "AbcDef");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn snake_to_camel_underscore_prefix() {
|
||||||
|
assert_eq!(snake_to_camel("_abc_def"), "AbcDef");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn snake_to_camel_underscore_consecutive() {
|
||||||
|
assert_eq!(snake_to_camel("abc__def"), "AbcDef");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn snake_to_camel_capital_in_middle() {
|
||||||
|
assert_eq!(snake_to_camel("aBc_dEf"), "AbcDef");
|
||||||
|
}
|
||||||
44
plugins/tests/server.rs
Normal file
44
plugins/tests/server.rs
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
// these need to be out here rather than inside the function so that the
|
||||||
|
// assert_type_eq macro can pick them up.
|
||||||
|
#[tarpc::service]
|
||||||
|
trait Foo {
|
||||||
|
async fn two_part(s: String, i: i32) -> (String, i32);
|
||||||
|
async fn bar(s: String) -> String;
|
||||||
|
async fn baz();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[test]
|
||||||
|
fn raw_idents_work() {
|
||||||
|
type r#yield = String;
|
||||||
|
|
||||||
|
#[tarpc::service]
|
||||||
|
trait r#trait {
|
||||||
|
async fn r#await(r#struct: r#yield, r#enum: i32) -> (r#yield, i32);
|
||||||
|
async fn r#fn(r#impl: r#yield) -> r#yield;
|
||||||
|
async fn r#async();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn syntax() {
|
||||||
|
#[tarpc::service]
|
||||||
|
trait Syntax {
|
||||||
|
#[deny(warnings)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
async fn TestCamelCaseDoesntConflict();
|
||||||
|
async fn hello() -> String;
|
||||||
|
#[doc = "attr"]
|
||||||
|
async fn attr(s: String) -> String;
|
||||||
|
async fn no_args_no_return();
|
||||||
|
async fn no_args() -> ();
|
||||||
|
async fn one_arg(one: String) -> i32;
|
||||||
|
async fn two_args_no_return(one: String, two: u64);
|
||||||
|
async fn two_args(one: String, two: u64) -> String;
|
||||||
|
async fn no_args_ret_error() -> i32;
|
||||||
|
async fn one_arg_ret_error(one: String) -> String;
|
||||||
|
async fn no_arg_implicit_return_error();
|
||||||
|
#[doc = "attr"]
|
||||||
|
async fn one_arg_implicit_return_error(one: String);
|
||||||
|
}
|
||||||
|
}
|
||||||
80
plugins/tests/service.rs
Normal file
80
plugins/tests/service.rs
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
use tarpc::context;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn att_service_trait() {
|
||||||
|
#[tarpc::service]
|
||||||
|
trait Foo {
|
||||||
|
async fn two_part(s: String, i: i32) -> (String, i32);
|
||||||
|
async fn bar(s: String) -> String;
|
||||||
|
async fn baz();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Foo for () {
|
||||||
|
async fn two_part(self, _: context::Context, s: String, i: i32) -> (String, i32) {
|
||||||
|
(s, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn bar(self, _: context::Context, s: String) -> String {
|
||||||
|
s
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn baz(self, _: context::Context) {
|
||||||
|
()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(non_camel_case_types)]
|
||||||
|
#[test]
|
||||||
|
fn raw_idents() {
|
||||||
|
type r#yield = String;
|
||||||
|
|
||||||
|
#[tarpc::service]
|
||||||
|
trait r#trait {
|
||||||
|
async fn r#await(r#struct: r#yield, r#enum: i32) -> (r#yield, i32);
|
||||||
|
async fn r#fn(r#impl: r#yield) -> r#yield;
|
||||||
|
async fn r#async();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl r#trait for () {
|
||||||
|
async fn r#await(
|
||||||
|
self,
|
||||||
|
_: context::Context,
|
||||||
|
r#struct: r#yield,
|
||||||
|
r#enum: i32,
|
||||||
|
) -> (r#yield, i32) {
|
||||||
|
(r#struct, r#enum)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn r#fn(self, _: context::Context, r#impl: r#yield) -> r#yield {
|
||||||
|
r#impl
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn r#async(self, _: context::Context) {
|
||||||
|
()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn syntax() {
|
||||||
|
#[tarpc::service]
|
||||||
|
trait Syntax {
|
||||||
|
#[deny(warnings)]
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
async fn TestCamelCaseDoesntConflict();
|
||||||
|
async fn hello() -> String;
|
||||||
|
#[doc = "attr"]
|
||||||
|
async fn attr(s: String) -> String;
|
||||||
|
async fn no_args_no_return();
|
||||||
|
async fn no_args() -> ();
|
||||||
|
async fn one_arg(one: String) -> i32;
|
||||||
|
async fn two_args_no_return(one: String, two: u64);
|
||||||
|
async fn two_args(one: String, two: u64) -> String;
|
||||||
|
async fn no_args_ret_error() -> i32;
|
||||||
|
async fn one_arg_ret_error(one: String) -> String;
|
||||||
|
async fn no_arg_implicit_return_error();
|
||||||
|
#[doc = "attr"]
|
||||||
|
async fn one_arg_implicit_return_error(one: String);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
ideal_width = 100
|
|
||||||
reorder_imports = true
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::{fmt, io};
|
|
||||||
use std::error::Error as StdError;
|
|
||||||
|
|
||||||
/// All errors that can occur during the use of tarpc.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Error<E> {
|
|
||||||
/// Any IO error.
|
|
||||||
Io(io::Error),
|
|
||||||
/// Error deserializing the server response.
|
|
||||||
///
|
|
||||||
/// Typically this indicates a faulty implementation of `serde::Serialize` or
|
|
||||||
/// `serde::Deserialize`.
|
|
||||||
ResponseDeserialize(::bincode::Error),
|
|
||||||
/// Error deserializing the client request.
|
|
||||||
///
|
|
||||||
/// Typically this indicates a faulty implementation of `serde::Serialize` or
|
|
||||||
/// `serde::Deserialize`.
|
|
||||||
RequestDeserialize(String),
|
|
||||||
/// The server was unable to reply to the rpc for some reason.
|
|
||||||
///
|
|
||||||
/// This is a service-specific error. Its type is individually specified in the
|
|
||||||
/// `service!` macro for each rpc.
|
|
||||||
App(E),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: StdError + Deserialize + Serialize + Send + 'static> fmt::Display for Error<E> {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
match *self {
|
|
||||||
Error::ResponseDeserialize(ref e) => write!(f, r#"{}: "{}""#, self.description(), e),
|
|
||||||
Error::RequestDeserialize(ref e) => write!(f, r#"{}: "{}""#, self.description(), e),
|
|
||||||
Error::App(ref e) => fmt::Display::fmt(e, f),
|
|
||||||
Error::Io(ref e) => fmt::Display::fmt(e, f),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E: StdError + Deserialize + Serialize + Send + 'static> StdError for Error<E> {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
match *self {
|
|
||||||
Error::ResponseDeserialize(_) => "The client failed to deserialize the response.",
|
|
||||||
Error::RequestDeserialize(_) => "The server failed to deserialize the request.",
|
|
||||||
Error::App(ref e) => e.description(),
|
|
||||||
Error::Io(ref e) => e.description(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn cause(&self) -> Option<&StdError> {
|
|
||||||
match *self {
|
|
||||||
Error::ResponseDeserialize(ref e) => e.cause(),
|
|
||||||
Error::RequestDeserialize(_) |
|
|
||||||
Error::App(_) => None,
|
|
||||||
Error::Io(ref e) => e.cause(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E> From<io::Error> for Error<E> {
|
|
||||||
fn from(err: io::Error) -> Self {
|
|
||||||
Error::Io(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<E> From<WireError<E>> for Error<E> {
|
|
||||||
fn from(err: WireError<E>) -> Self {
|
|
||||||
match err {
|
|
||||||
WireError::RequestDeserialize(s) => Error::RequestDeserialize(s),
|
|
||||||
WireError::App(e) => Error::App(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A serializable, server-supplied error.
|
|
||||||
#[doc(hidden)]
|
|
||||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
|
||||||
pub enum WireError<E> {
|
|
||||||
/// Server-side error in deserializing the client request.
|
|
||||||
RequestDeserialize(String),
|
|
||||||
/// The server was unable to reply to the rpc for some reason.
|
|
||||||
App(E),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert `native_tls::Error` to `std::io::Error`
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
pub fn native_to_io(e: ::native_tls::Error) -> io::Error {
|
|
||||||
io::Error::new(io::ErrorKind::Other, e)
|
|
||||||
}
|
|
||||||
@@ -1,262 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
use {REMOTE, bincode};
|
|
||||||
use future::server::Response;
|
|
||||||
use futures::{self, Future, future};
|
|
||||||
use protocol::Proto;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::fmt;
|
|
||||||
use std::io;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use stream_type::StreamType;
|
|
||||||
use tokio_core::net::TcpStream;
|
|
||||||
use tokio_core::reactor;
|
|
||||||
use tokio_proto::BindClient as ProtoBindClient;
|
|
||||||
use tokio_proto::multiplex::ClientService;
|
|
||||||
use tokio_service::Service;
|
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(feature = "tls")] {
|
|
||||||
use errors::native_to_io;
|
|
||||||
use tls::client::Context;
|
|
||||||
use tokio_tls::TlsConnectorExt;
|
|
||||||
} else {}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Additional options to configure how the client connects and operates.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Options {
|
|
||||||
/// Max packet size in bytes.
|
|
||||||
max_payload_size: u64,
|
|
||||||
reactor: Option<Reactor>,
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
tls_ctx: Option<Context>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Options {
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
fn default() -> Self {
|
|
||||||
Options {
|
|
||||||
max_payload_size: 2 << 20,
|
|
||||||
reactor: None,
|
|
||||||
tls_ctx: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "tls"))]
|
|
||||||
fn default() -> Self {
|
|
||||||
Options {
|
|
||||||
max_payload_size: 2 << 20,
|
|
||||||
reactor: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Options {
|
|
||||||
/// Set the max payload size in bytes. The default is 2 << 20 (2 MiB).
|
|
||||||
pub fn max_payload_size(mut self, bytes: u64) -> Self {
|
|
||||||
self.max_payload_size = bytes;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Drive using the given reactor handle.
|
|
||||||
pub fn handle(mut self, handle: reactor::Handle) -> Self {
|
|
||||||
self.reactor = Some(Reactor::Handle(handle));
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Drive using the given reactor remote.
|
|
||||||
pub fn remote(mut self, remote: reactor::Remote) -> Self {
|
|
||||||
self.reactor = Some(Reactor::Remote(remote));
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Connect using the given `Context`
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
pub fn tls(mut self, tls_ctx: Context) -> Self {
|
|
||||||
self.tls_ctx = Some(tls_ctx);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enum Reactor {
|
|
||||||
Handle(reactor::Handle),
|
|
||||||
Remote(reactor::Remote),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Reactor {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
|
||||||
const HANDLE: &'static &'static str = &"Reactor::Handle";
|
|
||||||
const HANDLE_INNER: &'static &'static str = &"Handle { .. }";
|
|
||||||
const REMOTE: &'static &'static str = &"Reactor::Remote";
|
|
||||||
const REMOTE_INNER: &'static &'static str = &"Remote { .. }";
|
|
||||||
|
|
||||||
match *self {
|
|
||||||
Reactor::Handle(_) => f.debug_tuple(HANDLE).field(HANDLE_INNER).finish(),
|
|
||||||
Reactor::Remote(_) => f.debug_tuple(REMOTE).field(REMOTE_INNER).finish(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub struct Client<Req, Resp, E>
|
|
||||||
where Req: Serialize + 'static,
|
|
||||||
Resp: Deserialize + 'static,
|
|
||||||
E: Deserialize + 'static
|
|
||||||
{
|
|
||||||
inner: ClientService<StreamType, Proto<Req, Response<Resp, E>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, E> Clone for Client<Req, Resp, E>
|
|
||||||
where Req: Serialize + 'static,
|
|
||||||
Resp: Deserialize + 'static,
|
|
||||||
E: Deserialize + 'static
|
|
||||||
{
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Client { inner: self.inner.clone() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, E> Service for Client<Req, Resp, E>
|
|
||||||
where Req: Serialize + Sync + Send + 'static,
|
|
||||||
Resp: Deserialize + Sync + Send + 'static,
|
|
||||||
E: Deserialize + Sync + Send + 'static
|
|
||||||
{
|
|
||||||
type Request = Req;
|
|
||||||
type Response = Resp;
|
|
||||||
type Error = ::Error<E>;
|
|
||||||
type Future = ResponseFuture<Req, Resp, E>;
|
|
||||||
|
|
||||||
fn call(&self, request: Self::Request) -> Self::Future {
|
|
||||||
fn identity<T>(t: T) -> T {
|
|
||||||
t
|
|
||||||
}
|
|
||||||
self.inner
|
|
||||||
.call(request)
|
|
||||||
.map(Self::map_err as _)
|
|
||||||
.map_err(::Error::from as _)
|
|
||||||
.and_then(identity as _)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, E> Client<Req, Resp, E>
|
|
||||||
where Req: Serialize + 'static,
|
|
||||||
Resp: Deserialize + 'static,
|
|
||||||
E: Deserialize + 'static
|
|
||||||
{
|
|
||||||
fn bind(handle: &reactor::Handle, tcp: StreamType, max_payload_size: u64) -> Self
|
|
||||||
where Req: Serialize + Sync + Send + 'static,
|
|
||||||
Resp: Deserialize + Sync + Send + 'static,
|
|
||||||
E: Deserialize + Sync + Send + 'static
|
|
||||||
{
|
|
||||||
let inner = Proto::new(max_payload_size).bind_client(&handle, tcp);
|
|
||||||
Client { inner }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn map_err(resp: WireResponse<Resp, E>) -> Result<Resp, ::Error<E>> {
|
|
||||||
resp.map(|r| r.map_err(::Error::from))
|
|
||||||
.map_err(::Error::ResponseDeserialize)
|
|
||||||
.and_then(|r| r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, E> fmt::Debug for Client<Req, Resp, E>
|
|
||||||
where Req: Serialize + 'static,
|
|
||||||
Resp: Deserialize + 'static,
|
|
||||||
E: Deserialize + 'static
|
|
||||||
{
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
|
||||||
write!(f, "Client {{ .. }}")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extension methods for clients.
|
|
||||||
pub trait ClientExt: Sized {
|
|
||||||
/// The type of the future returned when calling `connect`.
|
|
||||||
type ConnectFut: Future<Item = Self, Error = io::Error>;
|
|
||||||
|
|
||||||
/// Connects to a server located at the given address, using the given options.
|
|
||||||
fn connect(addr: SocketAddr, options: Options) -> Self::ConnectFut;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A future that resolves to a `Client` or an `io::Error`.
|
|
||||||
pub type ConnectFuture<Req, Resp, E> =
|
|
||||||
futures::Flatten<futures::MapErr<futures::Oneshot<io::Result<Client<Req, Resp, E>>>,
|
|
||||||
fn(futures::Canceled) -> io::Error>>;
|
|
||||||
|
|
||||||
impl<Req, Resp, E> ClientExt for Client<Req, Resp, E>
|
|
||||||
where Req: Serialize + Sync + Send + 'static,
|
|
||||||
Resp: Deserialize + Sync + Send + 'static,
|
|
||||||
E: Deserialize + Sync + Send + 'static
|
|
||||||
{
|
|
||||||
type ConnectFut = ConnectFuture<Req, Resp, E>;
|
|
||||||
|
|
||||||
fn connect(addr: SocketAddr, options: Options) -> Self::ConnectFut {
|
|
||||||
// we need to do this for tls because we need to avoid moving the entire `Options`
|
|
||||||
// struct into the `setup` closure, since `Reactor` is not `Send`.
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
let mut options = options;
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
let tls_ctx = options.tls_ctx.take();
|
|
||||||
|
|
||||||
let max_payload_size = options.max_payload_size;
|
|
||||||
|
|
||||||
let connect = move |handle: &reactor::Handle| {
|
|
||||||
let handle2 = handle.clone();
|
|
||||||
TcpStream::connect(&addr, handle)
|
|
||||||
.and_then(move |socket| {
|
|
||||||
// TODO(https://github.com/tokio-rs/tokio-proto/issues/132): move this into the
|
|
||||||
// ServerProto impl
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
match tls_ctx {
|
|
||||||
Some(tls_ctx) => {
|
|
||||||
future::Either::A(tls_ctx.tls_connector
|
|
||||||
.connect_async(&tls_ctx.domain, socket)
|
|
||||||
.map(StreamType::Tls)
|
|
||||||
.map_err(native_to_io))
|
|
||||||
}
|
|
||||||
None => future::Either::B(future::ok(StreamType::Tcp(socket))),
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "tls"))]
|
|
||||||
future::ok(StreamType::Tcp(socket))
|
|
||||||
})
|
|
||||||
.map(move |tcp| Client::bind(&handle2, tcp, max_payload_size))
|
|
||||||
};
|
|
||||||
let (tx, rx) = futures::oneshot();
|
|
||||||
let setup = move |handle: &reactor::Handle| {
|
|
||||||
connect(handle).then(move |result| {
|
|
||||||
// If send fails it means the client no longer cared about connecting.
|
|
||||||
let _ = tx.send(result);
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
match options.reactor {
|
|
||||||
Some(Reactor::Handle(handle)) => {
|
|
||||||
handle.spawn(setup(&handle));
|
|
||||||
}
|
|
||||||
Some(Reactor::Remote(remote)) => {
|
|
||||||
remote.spawn(setup);
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
REMOTE.spawn(setup);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fn panic(canceled: futures::Canceled) -> io::Error {
|
|
||||||
unreachable!(canceled)
|
|
||||||
}
|
|
||||||
rx.map_err(panic as _).flatten()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResponseFuture<Req, Resp, E> =
|
|
||||||
futures::AndThen<futures::MapErr<
|
|
||||||
futures::Map<<ClientService<StreamType, Proto<Req, Response<Resp, E>>> as Service>::Future,
|
|
||||||
fn(WireResponse<Resp, E>) -> Result<Resp, ::Error<E>>>,
|
|
||||||
fn(io::Error) -> ::Error<E>>,
|
|
||||||
Result<Resp, ::Error<E>>,
|
|
||||||
fn(Result<Resp, ::Error<E>>) -> Result<Resp, ::Error<E>>>;
|
|
||||||
|
|
||||||
type WireResponse<R, E> = Result<Response<R, E>, bincode::Error>;
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
/// Provides the base client stubs used by the service macro.
|
|
||||||
pub mod client;
|
|
||||||
/// Provides the base server boilerplate used by service implementations.
|
|
||||||
pub mod server;
|
|
||||||
@@ -1,76 +0,0 @@
|
|||||||
use futures::unsync;
|
|
||||||
use std::io;
|
|
||||||
use tokio_service::{NewService, Service};
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum Action {
|
|
||||||
Increment,
|
|
||||||
Decrement,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct Tracker {
|
|
||||||
pub tx: unsync::mpsc::UnboundedSender<Action>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Tracker {
|
|
||||||
pub fn pair() -> (Self, unsync::mpsc::UnboundedReceiver<Action>) {
|
|
||||||
let (tx, rx) = unsync::mpsc::unbounded();
|
|
||||||
(Self { tx }, rx)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn increment(&self) {
|
|
||||||
let _ = self.tx.send(Action::Increment);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn decrement(&self) {
|
|
||||||
debug!("Closing connection");
|
|
||||||
let _ = self.tx.send(Action::Decrement);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct TrackingService<S> {
|
|
||||||
pub service: S,
|
|
||||||
pub tracker: Tracker,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct TrackingNewService<S> {
|
|
||||||
pub new_service: S,
|
|
||||||
pub connection_tracker: Tracker,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Service> Service for TrackingService<S> {
|
|
||||||
type Request = S::Request;
|
|
||||||
type Response = S::Response;
|
|
||||||
type Error = S::Error;
|
|
||||||
type Future = S::Future;
|
|
||||||
|
|
||||||
fn call(&self, req: Self::Request) -> Self::Future {
|
|
||||||
trace!("Calling service.");
|
|
||||||
self.service.call(req)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S> Drop for TrackingService<S> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
debug!("Dropping ConnnectionTrackingService.");
|
|
||||||
self.tracker.decrement();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: NewService> NewService for TrackingNewService<S> {
|
|
||||||
type Request = S::Request;
|
|
||||||
type Response = S::Response;
|
|
||||||
type Error = S::Error;
|
|
||||||
type Instance = TrackingService<S::Instance>;
|
|
||||||
|
|
||||||
fn new_service(&self) -> io::Result<Self::Instance> {
|
|
||||||
self.connection_tracker.increment();
|
|
||||||
Ok(TrackingService {
|
|
||||||
service: self.new_service.new_service()?,
|
|
||||||
tracker: self.connection_tracker.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,448 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
use {bincode, net2};
|
|
||||||
use errors::WireError;
|
|
||||||
use futures::{Async, Future, Poll, Stream, future as futures};
|
|
||||||
use protocol::Proto;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::fmt;
|
|
||||||
use std::io;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use stream_type::StreamType;
|
|
||||||
use tokio_io::{AsyncRead, AsyncWrite};
|
|
||||||
use tokio_core::net::{Incoming, TcpListener, TcpStream};
|
|
||||||
use tokio_core::reactor;
|
|
||||||
use tokio_proto::BindServer;
|
|
||||||
use tokio_service::NewService;
|
|
||||||
|
|
||||||
mod connection;
|
|
||||||
mod shutdown;
|
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(feature = "tls")] {
|
|
||||||
use native_tls::{self, TlsAcceptor};
|
|
||||||
use tokio_tls::{AcceptAsync, TlsAcceptorExt, TlsStream};
|
|
||||||
use errors::native_to_io;
|
|
||||||
} else {}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub use self::shutdown::{Shutdown, ShutdownFuture};
|
|
||||||
|
|
||||||
/// A handle to a bound server.
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct Handle {
|
|
||||||
addr: SocketAddr,
|
|
||||||
shutdown: Shutdown,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Handle {
|
|
||||||
/// Returns a hook for shutting down the server.
|
|
||||||
pub fn shutdown(&self) -> &Shutdown {
|
|
||||||
&self.shutdown
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The socket address the server is bound to.
|
|
||||||
pub fn addr(&self) -> SocketAddr {
|
|
||||||
self.addr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enum Acceptor {
|
|
||||||
Tcp,
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
Tls(TlsAcceptor),
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Accept {
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
inner: futures::Either<futures::MapErr<futures::Map<AcceptAsync<TcpStream>,
|
|
||||||
fn(TlsStream<TcpStream>) -> StreamType>,
|
|
||||||
fn(native_tls::Error) -> io::Error>,
|
|
||||||
futures::FutureResult<StreamType, io::Error>>,
|
|
||||||
#[cfg(not(feature = "tls"))]
|
|
||||||
inner: futures::FutureResult<StreamType, io::Error>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Future for Accept {
|
|
||||||
type Item = StreamType;
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
self.inner.poll()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Acceptor {
|
|
||||||
// TODO(https://github.com/tokio-rs/tokio-proto/issues/132): move this into the ServerProto impl
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
fn accept(&self, socket: TcpStream) -> Accept {
|
|
||||||
Accept {
|
|
||||||
inner: match *self {
|
|
||||||
Acceptor::Tls(ref tls_acceptor) => {
|
|
||||||
futures::Either::A(tls_acceptor.accept_async(socket)
|
|
||||||
.map(StreamType::Tls as _)
|
|
||||||
.map_err(native_to_io))
|
|
||||||
}
|
|
||||||
Acceptor::Tcp => futures::Either::B(futures::ok(StreamType::Tcp(socket))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "tls"))]
|
|
||||||
fn accept(&self, socket: TcpStream) -> Accept {
|
|
||||||
Accept {
|
|
||||||
inner: futures::ok(StreamType::Tcp(socket))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
impl From<Options> for Acceptor {
|
|
||||||
fn from(options: Options) -> Self {
|
|
||||||
match options.tls_acceptor {
|
|
||||||
Some(tls_acceptor) => Acceptor::Tls(tls_acceptor),
|
|
||||||
None => Acceptor::Tcp,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "tls"))]
|
|
||||||
impl From<Options> for Acceptor {
|
|
||||||
fn from(_: Options) -> Self {
|
|
||||||
Acceptor::Tcp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Acceptor {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
use self::Acceptor::*;
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
const TLS: &'static &'static str = &"TlsAcceptor { .. }";
|
|
||||||
|
|
||||||
match *self {
|
|
||||||
Tcp => fmt.debug_tuple("Acceptor::Tcp").finish(),
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
Tls(_) => fmt.debug_tuple("Acceptlr::Tls").field(TLS).finish(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Accept {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
fmt.debug_struct("Accept").finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct AcceptStream<S> {
|
|
||||||
stream: S,
|
|
||||||
acceptor: Acceptor,
|
|
||||||
future: Option<Accept>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S> Stream for AcceptStream<S>
|
|
||||||
where S: Stream<Item=(TcpStream, SocketAddr), Error = io::Error>,
|
|
||||||
{
|
|
||||||
type Item = <Accept as Future>::Item;
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Option<Self::Item>, io::Error> {
|
|
||||||
if self.future.is_none() {
|
|
||||||
let stream = match try_ready!(self.stream.poll()) {
|
|
||||||
None => return Ok(Async::Ready(None)),
|
|
||||||
Some((stream, _)) => stream,
|
|
||||||
};
|
|
||||||
self.future = Some(self.acceptor.accept(stream));
|
|
||||||
}
|
|
||||||
assert!(self.future.is_some());
|
|
||||||
match self.future.as_mut().unwrap().poll() {
|
|
||||||
Ok(Async::Ready(e)) => {
|
|
||||||
self.future = None;
|
|
||||||
Ok(Async::Ready(Some(e)))
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
self.future = None;
|
|
||||||
Err(e)
|
|
||||||
}
|
|
||||||
Ok(Async::NotReady) => Ok(Async::NotReady)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Additional options to configure how the server operates.
|
|
||||||
pub struct Options {
|
|
||||||
/// Max packet size in bytes.
|
|
||||||
max_payload_size: u64,
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
tls_acceptor: Option<TlsAcceptor>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Options {
|
|
||||||
#[cfg(not(feature = "tls"))]
|
|
||||||
fn default() -> Self {
|
|
||||||
Options {
|
|
||||||
max_payload_size: 2 << 20,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
fn default() -> Self {
|
|
||||||
Options {
|
|
||||||
max_payload_size: 2 << 20,
|
|
||||||
tls_acceptor: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Options {
|
|
||||||
/// Set the max payload size in bytes. The default is 2 << 20 (2 MiB).
|
|
||||||
pub fn max_payload_size(mut self, bytes: u64) -> Self {
|
|
||||||
self.max_payload_size = bytes;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets the `TlsAcceptor`
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
pub fn tls(mut self, tls_acceptor: TlsAcceptor) -> Self {
|
|
||||||
self.tls_acceptor = Some(tls_acceptor);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Options {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
const SOME: &'static &'static str = &"Some(_)";
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
const NONE: &'static &'static str = &"None";
|
|
||||||
|
|
||||||
let mut debug_struct = fmt.debug_struct("Options");
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
debug_struct.field("tls_acceptor", if self.tls_acceptor.is_some() { SOME } else { NONE });
|
|
||||||
debug_struct.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A message from server to client.
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub type Response<T, E> = Result<T, WireError<E>>;
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn listen<S, Req, Resp, E>(new_service: S,
|
|
||||||
addr: SocketAddr,
|
|
||||||
handle: &reactor::Handle,
|
|
||||||
options: Options)
|
|
||||||
-> io::Result<(Handle, Listen<S, Req, Resp, E>)>
|
|
||||||
where S: NewService<Request = Result<Req, bincode::Error>,
|
|
||||||
Response = Response<Resp, E>,
|
|
||||||
Error = io::Error> + 'static,
|
|
||||||
Req: Deserialize + 'static,
|
|
||||||
Resp: Serialize + 'static,
|
|
||||||
E: Serialize + 'static
|
|
||||||
{
|
|
||||||
let (addr, shutdown, server) = listen_with(
|
|
||||||
new_service, addr, handle, options.max_payload_size, Acceptor::from(options))?;
|
|
||||||
Ok((Handle {
|
|
||||||
addr: addr,
|
|
||||||
shutdown: shutdown,
|
|
||||||
},
|
|
||||||
server))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Spawns a service that binds to the given address using the given handle.
|
|
||||||
fn listen_with<S, Req, Resp, E>(new_service: S,
|
|
||||||
addr: SocketAddr,
|
|
||||||
handle: &reactor::Handle,
|
|
||||||
max_payload_size: u64,
|
|
||||||
acceptor: Acceptor)
|
|
||||||
-> io::Result<(SocketAddr, Shutdown, Listen<S, Req, Resp, E>)>
|
|
||||||
where S: NewService<Request = Result<Req, bincode::Error>,
|
|
||||||
Response = Response<Resp, E>,
|
|
||||||
Error = io::Error> + 'static,
|
|
||||||
Req: Deserialize + 'static,
|
|
||||||
Resp: Serialize + 'static,
|
|
||||||
E: Serialize + 'static
|
|
||||||
{
|
|
||||||
let listener = listener(&addr, handle)?;
|
|
||||||
let addr = listener.local_addr()?;
|
|
||||||
debug!("Listening on {}.", addr);
|
|
||||||
|
|
||||||
let handle = handle.clone();
|
|
||||||
let (connection_tracker, shutdown, shutdown_future) = shutdown::Watcher::triple();
|
|
||||||
let server = BindStream {
|
|
||||||
handle: handle,
|
|
||||||
new_service: connection::TrackingNewService {
|
|
||||||
connection_tracker: connection_tracker,
|
|
||||||
new_service: new_service,
|
|
||||||
},
|
|
||||||
stream: AcceptStream {
|
|
||||||
stream: listener.incoming(),
|
|
||||||
acceptor: acceptor,
|
|
||||||
future: None,
|
|
||||||
},
|
|
||||||
max_payload_size: max_payload_size,
|
|
||||||
};
|
|
||||||
|
|
||||||
let server = AlwaysOkUnit(server.select(shutdown_future));
|
|
||||||
Ok((addr, shutdown, Listen { inner: server }))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn listener(addr: &SocketAddr, handle: &reactor::Handle) -> io::Result<TcpListener> {
|
|
||||||
const PENDING_CONNECTION_BACKLOG: i32 = 1024;
|
|
||||||
|
|
||||||
let builder = match *addr {
|
|
||||||
SocketAddr::V4(_) => net2::TcpBuilder::new_v4(),
|
|
||||||
SocketAddr::V6(_) => net2::TcpBuilder::new_v6(),
|
|
||||||
}?;
|
|
||||||
configure_tcp(&builder)?;
|
|
||||||
builder.reuse_address(true)?;
|
|
||||||
builder.bind(addr)?
|
|
||||||
.listen(PENDING_CONNECTION_BACKLOG)
|
|
||||||
.and_then(|l| TcpListener::from_listener(l, addr, handle))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
fn configure_tcp(tcp: &net2::TcpBuilder) -> io::Result<()> {
|
|
||||||
use net2::unix::UnixTcpBuilderExt;
|
|
||||||
tcp.reuse_port(true)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(windows)]
|
|
||||||
fn configure_tcp(_tcp: &net2::TcpBuilder) -> io::Result<()> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
struct BindStream<S, St> {
|
|
||||||
handle: reactor::Handle,
|
|
||||||
new_service: connection::TrackingNewService<S>,
|
|
||||||
stream: St,
|
|
||||||
max_payload_size: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, St> fmt::Debug for BindStream<S, St>
|
|
||||||
where S: fmt::Debug,
|
|
||||||
St: fmt::Debug,
|
|
||||||
{
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
|
||||||
const HANDLE: &'static &'static str = &"Handle { .. }";
|
|
||||||
f.debug_struct("BindStream")
|
|
||||||
.field("handle", HANDLE)
|
|
||||||
.field("new_service", &self.new_service)
|
|
||||||
.field("stream", &self.stream)
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Req, Resp, E, I, St> BindStream<S, St>
|
|
||||||
where S: NewService<Request = Result<Req, bincode::Error>,
|
|
||||||
Response = Response<Resp, E>,
|
|
||||||
Error = io::Error> + 'static,
|
|
||||||
Req: Deserialize + 'static,
|
|
||||||
Resp: Serialize + 'static,
|
|
||||||
E: Serialize + 'static,
|
|
||||||
I: AsyncRead + AsyncWrite + 'static,
|
|
||||||
St: Stream<Item=I, Error=io::Error>,
|
|
||||||
{
|
|
||||||
fn bind_each(&mut self) -> Poll<(), io::Error> {
|
|
||||||
loop {
|
|
||||||
match try!(self.stream.poll()) {
|
|
||||||
Async::Ready(Some(socket)) => {
|
|
||||||
Proto::new(self.max_payload_size)
|
|
||||||
.bind_server(&self.handle, socket, self.new_service.new_service()?);
|
|
||||||
}
|
|
||||||
Async::Ready(None) => return Ok(Async::Ready(())),
|
|
||||||
Async::NotReady => return Ok(Async::NotReady),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Req, Resp, E, I, St> Future for BindStream<S, St>
|
|
||||||
where S: NewService<Request = Result<Req, bincode::Error>,
|
|
||||||
Response = Response<Resp, E>,
|
|
||||||
Error = io::Error> + 'static,
|
|
||||||
Req: Deserialize + 'static,
|
|
||||||
Resp: Serialize + 'static,
|
|
||||||
E: Serialize + 'static,
|
|
||||||
I: AsyncRead + AsyncWrite + 'static,
|
|
||||||
St: Stream<Item=I, Error=io::Error>,
|
|
||||||
{
|
|
||||||
type Item = ();
|
|
||||||
type Error = ();
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
match self.bind_each() {
|
|
||||||
Ok(Async::Ready(())) => Ok(Async::Ready(())),
|
|
||||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
|
||||||
Err(e) => {
|
|
||||||
error!("While processing incoming connections: {}", e);
|
|
||||||
Err(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The future representing a running server.
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub struct Listen<S, Req, Resp, E>
|
|
||||||
where S: NewService<Request = Result<Req, bincode::Error>,
|
|
||||||
Response = Response<Resp, E>,
|
|
||||||
Error = io::Error> + 'static,
|
|
||||||
Req: Deserialize + 'static,
|
|
||||||
Resp: Serialize + 'static,
|
|
||||||
E: Serialize + 'static
|
|
||||||
{
|
|
||||||
inner: AlwaysOkUnit<futures::Select<BindStream<S, AcceptStream<Incoming>>,
|
|
||||||
shutdown::Watcher>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Req, Resp, E> Future for Listen<S, Req, Resp, E>
|
|
||||||
where S: NewService<Request = Result<Req, bincode::Error>,
|
|
||||||
Response = Response<Resp, E>,
|
|
||||||
Error = io::Error> + 'static,
|
|
||||||
Req: Deserialize + 'static,
|
|
||||||
Resp: Serialize + 'static,
|
|
||||||
E: Serialize + 'static
|
|
||||||
{
|
|
||||||
type Item = ();
|
|
||||||
type Error = ();
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<(), ()> {
|
|
||||||
self.inner.poll()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S, Req, Resp, E> fmt::Debug for Listen<S, Req, Resp, E>
|
|
||||||
where S: NewService<Request = Result<Req, bincode::Error>,
|
|
||||||
Response = Response<Resp, E>,
|
|
||||||
Error = io::Error> + 'static,
|
|
||||||
Req: Deserialize + 'static,
|
|
||||||
Resp: Serialize + 'static,
|
|
||||||
E: Serialize + 'static,
|
|
||||||
{
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
|
||||||
f.debug_struct("Listen").finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct AlwaysOkUnit<F>(F);
|
|
||||||
|
|
||||||
impl<F> Future for AlwaysOkUnit<F>
|
|
||||||
where F: Future,
|
|
||||||
{
|
|
||||||
type Item = ();
|
|
||||||
type Error = ();
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<(), ()> {
|
|
||||||
match self.0.poll() {
|
|
||||||
Ok(Async::Ready(_)) | Err(_) => Ok(Async::Ready(())),
|
|
||||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,181 +0,0 @@
|
|||||||
use futures::{Async, Future, Poll, Stream, future as futures, stream};
|
|
||||||
use futures::sync::{mpsc, oneshot};
|
|
||||||
use futures::unsync;
|
|
||||||
|
|
||||||
use super::{AlwaysOkUnit, connection};
|
|
||||||
|
|
||||||
/// A hook to shut down a running server.
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct Shutdown {
|
|
||||||
tx: mpsc::UnboundedSender<oneshot::Sender<()>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A future that resolves when server shutdown completes.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct ShutdownFuture {
|
|
||||||
inner: futures::Either<futures::FutureResult<(), ()>,
|
|
||||||
AlwaysOkUnit<oneshot::Receiver<()>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Future for ShutdownFuture {
|
|
||||||
type Item = ();
|
|
||||||
type Error = ();
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<(), ()> {
|
|
||||||
self.inner.poll()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Shutdown {
|
|
||||||
/// Initiates an orderly server shutdown.
|
|
||||||
///
|
|
||||||
/// First, the server enters lameduck mode, in which
|
|
||||||
/// existing connections are honored but no new connections are accepted. Then, once all
|
|
||||||
/// connections are closed, it initates total shutdown.
|
|
||||||
///
|
|
||||||
/// The returned future resolves when the server is completely shut down.
|
|
||||||
pub fn shutdown(&self) -> ShutdownFuture {
|
|
||||||
let (tx, rx) = oneshot::channel();
|
|
||||||
let inner = if let Err(_) = self.tx.send(tx) {
|
|
||||||
trace!("Server already initiated shutdown.");
|
|
||||||
futures::Either::A(futures::ok(()))
|
|
||||||
} else {
|
|
||||||
futures::Either::B(AlwaysOkUnit(rx))
|
|
||||||
};
|
|
||||||
ShutdownFuture { inner: inner }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Watcher {
|
|
||||||
shutdown_rx: stream::Take<mpsc::UnboundedReceiver<oneshot::Sender<()>>>,
|
|
||||||
connections: unsync::mpsc::UnboundedReceiver<connection::Action>,
|
|
||||||
queued_error: Option<()>,
|
|
||||||
shutdown: Option<oneshot::Sender<()>>,
|
|
||||||
done: bool,
|
|
||||||
num_connections: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Watcher {
|
|
||||||
pub fn triple() -> (connection::Tracker, Shutdown, Self) {
|
|
||||||
let (connection_tx, connections) = connection::Tracker::pair();
|
|
||||||
let (shutdown_tx, shutdown_rx) = mpsc::unbounded();
|
|
||||||
(connection_tx,
|
|
||||||
Shutdown { tx: shutdown_tx },
|
|
||||||
Watcher {
|
|
||||||
shutdown_rx: shutdown_rx.take(1),
|
|
||||||
connections: connections,
|
|
||||||
queued_error: None,
|
|
||||||
shutdown: None,
|
|
||||||
done: false,
|
|
||||||
num_connections: 0,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_connection(&mut self, action: connection::Action) {
|
|
||||||
match action {
|
|
||||||
connection::Action::Increment => self.num_connections += 1,
|
|
||||||
connection::Action::Decrement => self.num_connections -= 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_shutdown_requests(&mut self) -> Poll<Option<()>, ()> {
|
|
||||||
Ok(Async::Ready(match try_ready!(self.shutdown_rx.poll()) {
|
|
||||||
Some(tx) => {
|
|
||||||
debug!("Received shutdown request.");
|
|
||||||
self.shutdown = Some(tx);
|
|
||||||
Some(())
|
|
||||||
}
|
|
||||||
None => None,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_connections(&mut self) -> Poll<Option<()>, ()> {
|
|
||||||
Ok(Async::Ready(match try_ready!(self.connections.poll()) {
|
|
||||||
Some(action) => {
|
|
||||||
self.process_connection(action);
|
|
||||||
Some(())
|
|
||||||
}
|
|
||||||
None => None,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll_shutdown_requests_and_connections(&mut self) -> Poll<Option<()>, ()> {
|
|
||||||
if let Some(e) = self.queued_error.take() {
|
|
||||||
return Err(e)
|
|
||||||
}
|
|
||||||
|
|
||||||
match try!(self.poll_shutdown_requests()) {
|
|
||||||
Async::NotReady => {
|
|
||||||
match try_ready!(self.poll_connections()) {
|
|
||||||
Some(()) => Ok(Async::Ready(Some(()))),
|
|
||||||
None => Ok(Async::NotReady),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Async::Ready(None) => {
|
|
||||||
match try_ready!(self.poll_connections()) {
|
|
||||||
Some(()) => Ok(Async::Ready(Some(()))),
|
|
||||||
None => Ok(Async::Ready(None)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Async::Ready(Some(())) => {
|
|
||||||
match self.poll_connections() {
|
|
||||||
Err(e) => {
|
|
||||||
self.queued_error = Some(e);
|
|
||||||
Ok(Async::Ready(Some(())))
|
|
||||||
}
|
|
||||||
Ok(Async::NotReady) | Ok(Async::Ready(None)) | Ok(Async::Ready(Some(()))) => {
|
|
||||||
Ok(Async::Ready(Some(())))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn should_continue(&mut self) -> bool {
|
|
||||||
match self.shutdown.take() {
|
|
||||||
Some(shutdown) => {
|
|
||||||
debug!("Lameduck mode: {} open connections", self.num_connections);
|
|
||||||
if self.num_connections == 0 {
|
|
||||||
debug!("Shutting down.");
|
|
||||||
// Not required for the shutdown future to be waited on, so this
|
|
||||||
// can fail (which is fine).
|
|
||||||
let _ = shutdown.send(());
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
self.shutdown = Some(shutdown);
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_request(&mut self) -> Poll<Option<()>, ()> {
|
|
||||||
if self.done {
|
|
||||||
return Ok(Async::Ready(None));
|
|
||||||
}
|
|
||||||
if self.should_continue() {
|
|
||||||
self.poll_shutdown_requests_and_connections()
|
|
||||||
} else {
|
|
||||||
self.done = true;
|
|
||||||
Ok(Async::Ready(None))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Future for Watcher {
|
|
||||||
type Item = ();
|
|
||||||
type Error = ();
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<(), ()> {
|
|
||||||
loop {
|
|
||||||
match try!(self.process_request()) {
|
|
||||||
Async::Ready(Some(())) => continue,
|
|
||||||
Async::Ready(None) => return Ok(Async::Ready(())),
|
|
||||||
Async::NotReady => return Ok(Async::NotReady),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
211
src/lib.rs
211
src/lib.rs
@@ -1,211 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
//! tarpc is an RPC framework for rust with a focus on ease of use. Defining a
|
|
||||||
//! service can be done in just a few lines of code, and most of the boilerplate of
|
|
||||||
//! writing a server is taken care of for you.
|
|
||||||
//!
|
|
||||||
//! ## What is an RPC framework?
|
|
||||||
//! "RPC" stands for "Remote Procedure Call," a function call where the work of
|
|
||||||
//! producing the return value is being done somewhere else. When an rpc function is
|
|
||||||
//! invoked, behind the scenes the function contacts some other process somewhere
|
|
||||||
//! and asks them to evaluate the function instead. The original function then
|
|
||||||
//! returns the value produced by the other process.
|
|
||||||
//!
|
|
||||||
//! RPC frameworks are a fundamental building block of most microservices-oriented
|
|
||||||
//! architectures. Two well-known ones are [gRPC](http://www.grpc.io) and
|
|
||||||
//! [Cap'n Proto](https://capnproto.org/).
|
|
||||||
//!
|
|
||||||
//! tarpc differentiates itself from other RPC frameworks by defining the schema in code,
|
|
||||||
//! rather than in a separate language such as .proto. This means there's no separate compilation
|
|
||||||
//! process, and no cognitive context switching between different languages. Additionally, it
|
|
||||||
//! works with the community-backed library serde: any serde-serializable type can be used as
|
|
||||||
//! arguments to tarpc fns.
|
|
||||||
//!
|
|
||||||
//! Example usage:
|
|
||||||
//!
|
|
||||||
//! ```
|
|
||||||
//! #![feature(plugin)]
|
|
||||||
//! #![plugin(tarpc_plugins)]
|
|
||||||
//!
|
|
||||||
//! #[macro_use]
|
|
||||||
//! extern crate tarpc;
|
|
||||||
//! extern crate tokio_core;
|
|
||||||
//!
|
|
||||||
//! use tarpc::sync::{client, server};
|
|
||||||
//! use tarpc::sync::client::ClientExt;
|
|
||||||
//! use tarpc::util::Never;
|
|
||||||
//! use tokio_core::reactor;
|
|
||||||
//! use std::sync::mpsc;
|
|
||||||
//! use std::thread;
|
|
||||||
//!
|
|
||||||
//! service! {
|
|
||||||
//! rpc hello(name: String) -> String;
|
|
||||||
//! }
|
|
||||||
//!
|
|
||||||
//! #[derive(Clone)]
|
|
||||||
//! struct HelloServer;
|
|
||||||
//!
|
|
||||||
//! impl SyncService for HelloServer {
|
|
||||||
//! fn hello(&self, name: String) -> Result<String, Never> {
|
|
||||||
//! Ok(format!("Hello, {}!", name))
|
|
||||||
//! }
|
|
||||||
//! }
|
|
||||||
//!
|
|
||||||
//! fn main() {
|
|
||||||
//! let (tx, rx) = mpsc::channel();
|
|
||||||
//! thread::spawn(move || {
|
|
||||||
//! let mut handle = HelloServer.listen("localhost:10000",
|
|
||||||
//! server::Options::default()).unwrap();
|
|
||||||
//! tx.send(handle.addr()).unwrap();
|
|
||||||
//! handle.run();
|
|
||||||
//! });
|
|
||||||
//! let addr = rx.recv().unwrap();
|
|
||||||
//! let client = SyncClient::connect(addr, client::Options::default()).unwrap();
|
|
||||||
//! println!("{}", client.hello("Mom".to_string()).unwrap());
|
|
||||||
//! }
|
|
||||||
//! ```
|
|
||||||
//!
|
|
||||||
//! Example usage with TLS:
|
|
||||||
//!
|
|
||||||
//! ```no-run
|
|
||||||
//! #![feature(plugin)]
|
|
||||||
//! #![plugin(tarpc_plugins)]
|
|
||||||
//!
|
|
||||||
//! #[macro_use]
|
|
||||||
//! extern crate tarpc;
|
|
||||||
//!
|
|
||||||
//! use tarpc::sync::{client, server};
|
|
||||||
//! use tarpc::sync::client::ClientExt;
|
|
||||||
//! use tarpc::tls;
|
|
||||||
//! use tarpc::util::Never;
|
|
||||||
//! use tarpc::native_tls::{TlsAcceptor, Pkcs12};
|
|
||||||
//!
|
|
||||||
//! service! {
|
|
||||||
//! rpc hello(name: String) -> String;
|
|
||||||
//! }
|
|
||||||
//!
|
|
||||||
//! #[derive(Clone)]
|
|
||||||
//! struct HelloServer;
|
|
||||||
//!
|
|
||||||
//! impl SyncService for HelloServer {
|
|
||||||
//! fn hello(&self, name: String) -> Result<String, Never> {
|
|
||||||
//! Ok(format!("Hello, {}!", name))
|
|
||||||
//! }
|
|
||||||
//! }
|
|
||||||
//!
|
|
||||||
//! fn get_acceptor() -> TlsAcceptor {
|
|
||||||
//! let buf = include_bytes!("test/identity.p12");
|
|
||||||
//! let pkcs12 = Pkcs12::from_der(buf, "password").unwrap();
|
|
||||||
//! TlsAcceptor::builder(pkcs12).unwrap().build().unwrap()
|
|
||||||
//! }
|
|
||||||
//!
|
|
||||||
//! fn main() {
|
|
||||||
//! let addr = "localhost:10000";
|
|
||||||
//! let acceptor = get_acceptor();
|
|
||||||
//! let _server = HelloServer.listen(addr, server::Options::default().tls(acceptor));
|
|
||||||
//! let client = SyncClient::connect(addr,
|
|
||||||
//! client::Options::default()
|
|
||||||
//! .tls(tls::client::Context::new("foobar.com").unwrap()))
|
|
||||||
//! .unwrap();
|
|
||||||
//! println!("{}", client.hello("Mom".to_string()).unwrap());
|
|
||||||
//! }
|
|
||||||
//! ```
|
|
||||||
|
|
||||||
#![deny(missing_docs, missing_debug_implementations)]
|
|
||||||
#![feature(never_type)]
|
|
||||||
#![cfg_attr(test, feature(plugin))]
|
|
||||||
#![cfg_attr(test, plugin(tarpc_plugins))]
|
|
||||||
|
|
||||||
extern crate byteorder;
|
|
||||||
extern crate bytes;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate cfg_if;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate lazy_static;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate log;
|
|
||||||
extern crate net2;
|
|
||||||
extern crate num_cpus;
|
|
||||||
#[macro_use]
|
|
||||||
extern crate serde_derive;
|
|
||||||
extern crate thread_pool;
|
|
||||||
extern crate tokio_io;
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub extern crate bincode;
|
|
||||||
#[doc(hidden)]
|
|
||||||
#[macro_use]
|
|
||||||
pub extern crate futures;
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub extern crate serde;
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub extern crate tokio_core;
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub extern crate tokio_proto;
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub extern crate tokio_service;
|
|
||||||
|
|
||||||
pub use errors::Error;
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub use errors::WireError;
|
|
||||||
|
|
||||||
/// Provides some utility error types, as well as a trait for spawning futures on the default event
|
|
||||||
/// loop.
|
|
||||||
pub mod util;
|
|
||||||
|
|
||||||
/// Provides the macro used for constructing rpc services and client stubs.
|
|
||||||
#[macro_use]
|
|
||||||
mod macros;
|
|
||||||
/// Synchronous version of the tarpc API
|
|
||||||
pub mod sync;
|
|
||||||
/// Futures-based version of the tarpc API.
|
|
||||||
pub mod future;
|
|
||||||
/// TLS-specific functionality.
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
pub mod tls;
|
|
||||||
/// Provides implementations of `ClientProto` and `ServerProto` that implement the tarpc protocol.
|
|
||||||
/// The tarpc protocol is a length-delimited, bincode-serialized payload.
|
|
||||||
mod protocol;
|
|
||||||
/// Provides a few different error types.
|
|
||||||
mod errors;
|
|
||||||
/// Provides an abstraction over TLS and TCP streams.
|
|
||||||
mod stream_type;
|
|
||||||
|
|
||||||
use std::sync::mpsc;
|
|
||||||
use std::thread;
|
|
||||||
use tokio_core::reactor;
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
/// The `Remote` for the default reactor core.
|
|
||||||
static ref REMOTE: reactor::Remote = {
|
|
||||||
spawn_core()
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Spawns a `reactor::Core` running forever on a new thread.
|
|
||||||
fn spawn_core() -> reactor::Remote {
|
|
||||||
let (tx, rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
let mut core = reactor::Core::new().unwrap();
|
|
||||||
tx.send(core.handle().remote().clone()).unwrap();
|
|
||||||
|
|
||||||
// Run forever
|
|
||||||
core.run(futures::empty::<(), !>()).unwrap();
|
|
||||||
});
|
|
||||||
rx.recv().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg_if! {
|
|
||||||
if #[cfg(feature = "tls")] {
|
|
||||||
extern crate tokio_tls;
|
|
||||||
extern crate native_tls as native_tls_inner;
|
|
||||||
|
|
||||||
/// Re-exported TLS-related types from the `native_tls` crate.
|
|
||||||
pub mod native_tls {
|
|
||||||
pub use native_tls_inner::{Error, Pkcs12, TlsAcceptor, TlsConnector};
|
|
||||||
}
|
|
||||||
} else {}
|
|
||||||
}
|
|
||||||
1319
src/macros.rs
1319
src/macros.rs
File diff suppressed because it is too large
Load Diff
@@ -1,21 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "tarpc-plugins"
|
|
||||||
version = "0.1.1"
|
|
||||||
authors = ["Adam Wright <adam.austin.wright@gmail.com>", "Tim Kuehn <timothy.j.kuehn@gmail.com>"]
|
|
||||||
license = "MIT"
|
|
||||||
documentation = "https://docs.rs/tarpc"
|
|
||||||
homepage = "https://github.com/google/tarpc"
|
|
||||||
repository = "https://github.com/google/tarpc"
|
|
||||||
keywords = ["rpc", "network", "server", "api", "tls"]
|
|
||||||
categories = ["asynchronous", "network-programming"]
|
|
||||||
readme = "../../README.md"
|
|
||||||
description = "Plugins for tarpc, an RPC framework for Rust with a focus on ease of use."
|
|
||||||
|
|
||||||
[badges]
|
|
||||||
travis-ci = { repository = "google/tarpc" }
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
itertools = "0.5"
|
|
||||||
|
|
||||||
[lib]
|
|
||||||
plugin = true
|
|
||||||
@@ -1,198 +0,0 @@
|
|||||||
#![feature(plugin_registrar, rustc_private)]
|
|
||||||
|
|
||||||
extern crate itertools;
|
|
||||||
extern crate rustc_plugin;
|
|
||||||
extern crate syntax;
|
|
||||||
|
|
||||||
use itertools::Itertools;
|
|
||||||
use rustc_plugin::Registry;
|
|
||||||
use syntax::ast::{self, Ident, TraitRef, Ty, TyKind};
|
|
||||||
use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager};
|
|
||||||
use syntax::ext::quote::rt::Span;
|
|
||||||
use syntax::parse::{self, token, str_lit, PResult};
|
|
||||||
use syntax::parse::parser::{Parser, PathStyle};
|
|
||||||
use syntax::symbol::Symbol;
|
|
||||||
use syntax::ptr::P;
|
|
||||||
use syntax::tokenstream::{TokenTree, TokenStream};
|
|
||||||
use syntax::util::small_vector::SmallVector;
|
|
||||||
|
|
||||||
fn snake_to_camel(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult + 'static> {
|
|
||||||
let mut parser = parse::new_parser_from_tts(cx.parse_sess(), tts.into());
|
|
||||||
// The `expand_expr` method is called so that any macro calls in the
|
|
||||||
// parsed expression are expanded.
|
|
||||||
|
|
||||||
let mut item = match parser.parse_trait_item(&mut false) {
|
|
||||||
Ok(s) => s,
|
|
||||||
Err(mut diagnostic) => {
|
|
||||||
diagnostic.emit();
|
|
||||||
return DummyResult::any(sp);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(mut diagnostic) = parser.expect(&token::Eof) {
|
|
||||||
diagnostic.emit();
|
|
||||||
return DummyResult::any(sp);
|
|
||||||
}
|
|
||||||
|
|
||||||
let old_ident = convert(&mut item.ident);
|
|
||||||
|
|
||||||
// As far as I know, it's not possible in macro_rules! to reference an $ident in a doc string,
|
|
||||||
// so this is the hacky workaround.
|
|
||||||
//
|
|
||||||
// This code looks intimidating, but it's just iterating through the trait item's attributes
|
|
||||||
// copying non-doc attributes, and modifying doc attributes such that replacing any {} in the
|
|
||||||
// doc string instead holds the original, snake_case ident.
|
|
||||||
let attrs: Vec<_> = item.attrs
|
|
||||||
.drain(..)
|
|
||||||
.map(|mut attr| {
|
|
||||||
if !attr.is_sugared_doc {
|
|
||||||
return attr;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Getting at the underlying doc comment is surprisingly painful.
|
|
||||||
// The call-chain goes something like:
|
|
||||||
//
|
|
||||||
// - https://github.com/rust-lang/rust/blob/9c15de4fd59bee290848b5443c7e194fd5afb02c/src/libsyntax/attr.rs#L283
|
|
||||||
// - https://github.com/rust-lang/rust/blob/9c15de4fd59bee290848b5443c7e194fd5afb02c/src/libsyntax/attr.rs#L1067
|
|
||||||
// - https://github.com/rust-lang/rust/blob/9c15de4fd59bee290848b5443c7e194fd5afb02c/src/libsyntax/attr.rs#L1196
|
|
||||||
// - https://github.com/rust-lang/rust/blob/9c15de4fd59bee290848b5443c7e194fd5afb02c/src/libsyntax/parse/mod.rs#L399
|
|
||||||
// - https://github.com/rust-lang/rust/blob/9c15de4fd59bee290848b5443c7e194fd5afb02c/src/libsyntax/parse/mod.rs#L268
|
|
||||||
//
|
|
||||||
// Note that a docstring (i.e., something with is_sugared_doc) *always* has exactly two
|
|
||||||
// tokens: an Eq followed by a Literal, where the Literal contains a Str_. We therefore
|
|
||||||
// match against that, modifying the inner Str with our modified Symbol.
|
|
||||||
let mut tokens = attr.tokens.clone().into_trees();
|
|
||||||
if let Some(tt @ TokenTree::Token(_, token::Eq)) = tokens.next() {
|
|
||||||
let mut docstr = tokens.next().expect("Docstrings must have literal docstring");
|
|
||||||
if let TokenTree::Token(_, token::Literal(token::Str_(ref mut doc), _)) = docstr {
|
|
||||||
*doc = Symbol::intern(&str_lit(&doc.as_str()).replace("{}", &old_ident));
|
|
||||||
} else {
|
|
||||||
unreachable!();
|
|
||||||
}
|
|
||||||
attr.tokens = TokenStream::concat(vec![tt.into(), docstr.into()]);
|
|
||||||
} else {
|
|
||||||
unreachable!();
|
|
||||||
}
|
|
||||||
|
|
||||||
attr
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
item.attrs.extend(attrs.into_iter());
|
|
||||||
|
|
||||||
MacEager::trait_items(SmallVector::one(item))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn impl_snake_to_camel(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult + 'static> {
|
|
||||||
let mut parser = parse::new_parser_from_tts(cx.parse_sess(), tts.into());
|
|
||||||
// The `expand_expr` method is called so that any macro calls in the
|
|
||||||
// parsed expression are expanded.
|
|
||||||
|
|
||||||
let mut item = match parser.parse_impl_item(&mut false) {
|
|
||||||
Ok(s) => s,
|
|
||||||
Err(mut diagnostic) => {
|
|
||||||
diagnostic.emit();
|
|
||||||
return DummyResult::any(sp);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(mut diagnostic) = parser.expect(&token::Eof) {
|
|
||||||
diagnostic.emit();
|
|
||||||
return DummyResult::any(sp);
|
|
||||||
}
|
|
||||||
|
|
||||||
convert(&mut item.ident);
|
|
||||||
MacEager::impl_items(SmallVector::one(item))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ty_snake_to_camel(cx: &mut ExtCtxt, sp: Span, tts: &[TokenTree]) -> Box<MacResult + 'static> {
|
|
||||||
let mut parser = parse::new_parser_from_tts(cx.parse_sess(), tts.into());
|
|
||||||
// The `expand_expr` method is called so that any macro calls in the
|
|
||||||
// parsed expression are expanded.
|
|
||||||
|
|
||||||
let mut path = match parser.parse_path(PathStyle::Type) {
|
|
||||||
Ok(s) => s,
|
|
||||||
Err(mut diagnostic) => {
|
|
||||||
diagnostic.emit();
|
|
||||||
return DummyResult::any(sp);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Err(mut diagnostic) = parser.expect(&token::Eof) {
|
|
||||||
diagnostic.emit();
|
|
||||||
return DummyResult::any(sp);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only capitalize the final segment
|
|
||||||
convert(&mut path.segments
|
|
||||||
.last_mut()
|
|
||||||
.unwrap()
|
|
||||||
.identifier);
|
|
||||||
MacEager::ty(P(Ty {
|
|
||||||
id: ast::DUMMY_NODE_ID,
|
|
||||||
node: TyKind::Path(None, path),
|
|
||||||
span: sp,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Converts an ident in-place to CamelCase and returns the previous ident.
|
|
||||||
fn convert(ident: &mut Ident) -> String {
|
|
||||||
let ident_str = ident.to_string();
|
|
||||||
let mut camel_ty = String::new();
|
|
||||||
|
|
||||||
{
|
|
||||||
// Find the first non-underscore and add it capitalized.
|
|
||||||
let mut chars = ident_str.chars();
|
|
||||||
|
|
||||||
// Find the first non-underscore char, uppercase it, and append it.
|
|
||||||
// Guaranteed to succeed because all idents must have at least one non-underscore char.
|
|
||||||
camel_ty.extend(chars.find(|&c| c != '_').unwrap().to_uppercase());
|
|
||||||
|
|
||||||
// When we find an underscore, we remove it and capitalize the next char. To do this,
|
|
||||||
// we need to ensure the next char is not another underscore.
|
|
||||||
let mut chars = chars.coalesce(|c1, c2| {
|
|
||||||
if c1 == '_' && c2 == '_' {
|
|
||||||
Ok(c1)
|
|
||||||
} else {
|
|
||||||
Err((c1, c2))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
while let Some(c) = chars.next() {
|
|
||||||
if c != '_' {
|
|
||||||
camel_ty.push(c);
|
|
||||||
} else {
|
|
||||||
if let Some(c) = chars.next() {
|
|
||||||
camel_ty.extend(c.to_uppercase());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Fut suffix is hardcoded right now; this macro isn't really meant to be general-purpose.
|
|
||||||
camel_ty.push_str("Fut");
|
|
||||||
|
|
||||||
*ident = Ident::with_empty_ctxt(Symbol::intern(&camel_ty));
|
|
||||||
ident_str
|
|
||||||
}
|
|
||||||
|
|
||||||
trait ParseTraitRef {
|
|
||||||
fn parse_trait_ref(&mut self) -> PResult<TraitRef>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> ParseTraitRef for Parser<'a> {
|
|
||||||
/// Parse a::B<String,i32>
|
|
||||||
fn parse_trait_ref(&mut self) -> PResult<TraitRef> {
|
|
||||||
Ok(TraitRef {
|
|
||||||
path: self.parse_path(PathStyle::Type)?,
|
|
||||||
ref_id: ast::DUMMY_NODE_ID,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[plugin_registrar]
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn plugin_registrar(reg: &mut Registry) {
|
|
||||||
reg.register_macro("snake_to_camel", snake_to_camel);
|
|
||||||
reg.register_macro("impl_snake_to_camel", impl_snake_to_camel);
|
|
||||||
reg.register_macro("ty_snake_to_camel", ty_snake_to_camel);
|
|
||||||
}
|
|
||||||
219
src/protocol.rs
219
src/protocol.rs
@@ -1,219 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
use serde;
|
|
||||||
use bincode::{self, Infinite};
|
|
||||||
use byteorder::{BigEndian, ReadBytesExt};
|
|
||||||
use bytes::BytesMut;
|
|
||||||
use bytes::buf::BufMut;
|
|
||||||
use std::io::{self, Cursor};
|
|
||||||
use std::marker::PhantomData;
|
|
||||||
use std::mem;
|
|
||||||
use tokio_io::{AsyncRead, AsyncWrite};
|
|
||||||
use tokio_io::codec::{Encoder, Decoder, Framed};
|
|
||||||
use tokio_proto::multiplex::{ClientProto, ServerProto};
|
|
||||||
use tokio_proto::streaming::multiplex::RequestId;
|
|
||||||
|
|
||||||
// `Encode` is the type that `Codec` encodes. `Decode` is the type it decodes.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Codec<Encode, Decode> {
|
|
||||||
max_payload_size: u64,
|
|
||||||
state: CodecState,
|
|
||||||
_phantom_data: PhantomData<(Encode, Decode)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
enum CodecState {
|
|
||||||
Id,
|
|
||||||
Len { id: u64 },
|
|
||||||
Payload { id: u64, len: u64 },
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Encode, Decode> Codec<Encode, Decode> {
|
|
||||||
fn new(max_payload_size: u64) -> Self {
|
|
||||||
Codec {
|
|
||||||
max_payload_size: max_payload_size,
|
|
||||||
state: CodecState::Id,
|
|
||||||
_phantom_data: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn too_big(payload_size: u64, max_payload_size: u64) -> io::Error {
|
|
||||||
warn!("Not sending too-big packet of size {} (max is {})",
|
|
||||||
payload_size, max_payload_size);
|
|
||||||
io::Error::new(io::ErrorKind::InvalidData,
|
|
||||||
format!("Maximum payload size is {} bytes but got a payload of {}",
|
|
||||||
max_payload_size, payload_size))
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Encode, Decode> Encoder for Codec<Encode, Decode>
|
|
||||||
where Encode: serde::Serialize,
|
|
||||||
Decode: serde::Deserialize
|
|
||||||
{
|
|
||||||
type Item = (RequestId, Encode);
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn encode(&mut self, (id, message): Self::Item, buf: &mut BytesMut) -> io::Result<()> {
|
|
||||||
let payload_size = bincode::serialized_size(&message);
|
|
||||||
if payload_size > self.max_payload_size {
|
|
||||||
return Err(too_big(payload_size, self.max_payload_size));
|
|
||||||
}
|
|
||||||
let message_size = 2 * mem::size_of::<u64>() + payload_size as usize;
|
|
||||||
buf.reserve(message_size);
|
|
||||||
buf.put_u64::<BigEndian>(id);
|
|
||||||
trace!("Encoded request id = {} as {:?}", id, buf);
|
|
||||||
buf.put_u64::<BigEndian>(payload_size);
|
|
||||||
bincode::serialize_into(&mut buf.writer(),
|
|
||||||
&message,
|
|
||||||
Infinite)
|
|
||||||
.map_err(|serialize_err| io::Error::new(io::ErrorKind::Other, serialize_err))?;
|
|
||||||
trace!("Encoded buffer: {:?}", buf);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Encode, Decode> Decoder for Codec<Encode, Decode>
|
|
||||||
where Decode: serde::Deserialize
|
|
||||||
{
|
|
||||||
type Item = (RequestId, Result<Decode, bincode::Error>);
|
|
||||||
type Error = io::Error;
|
|
||||||
|
|
||||||
fn decode(&mut self, buf: &mut BytesMut) -> io::Result<Option<Self::Item>> {
|
|
||||||
use self::CodecState::*;
|
|
||||||
trace!("Codec::decode: {:?}", buf);
|
|
||||||
|
|
||||||
loop {
|
|
||||||
match self.state {
|
|
||||||
Id if buf.len() < mem::size_of::<u64>() => {
|
|
||||||
trace!("--> Buf len is {}; waiting for 8 to parse id.", buf.len());
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
Id => {
|
|
||||||
let mut id_buf = buf.split_to(mem::size_of::<u64>());
|
|
||||||
let id = Cursor::new(&mut id_buf).read_u64::<BigEndian>()?;
|
|
||||||
trace!("--> Parsed id = {} from {:?}", id, id_buf);
|
|
||||||
self.state = Len { id: id };
|
|
||||||
}
|
|
||||||
Len { .. } if buf.len() < mem::size_of::<u64>() => {
|
|
||||||
trace!("--> Buf len is {}; waiting for 8 to parse packet length.",
|
|
||||||
buf.len());
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
Len { id } => {
|
|
||||||
let len_buf = buf.split_to(mem::size_of::<u64>());
|
|
||||||
let len = Cursor::new(len_buf).read_u64::<BigEndian>()?;
|
|
||||||
trace!("--> Parsed payload length = {}, remaining buffer length = {}",
|
|
||||||
len,
|
|
||||||
buf.len());
|
|
||||||
if len > self.max_payload_size {
|
|
||||||
return Err(too_big(len, self.max_payload_size));
|
|
||||||
}
|
|
||||||
self.state = Payload { id: id, len: len };
|
|
||||||
}
|
|
||||||
Payload { len, .. } if buf.len() < len as usize => {
|
|
||||||
trace!("--> Buf len is {}; waiting for {} to parse payload.",
|
|
||||||
buf.len(),
|
|
||||||
len);
|
|
||||||
return Ok(None);
|
|
||||||
}
|
|
||||||
Payload { id, len } => {
|
|
||||||
let payload = buf.split_to(len as usize);
|
|
||||||
let result = bincode::deserialize_from(&mut Cursor::new(payload),
|
|
||||||
Infinite);
|
|
||||||
// Reset the state machine because, either way, we're done processing this
|
|
||||||
// message.
|
|
||||||
self.state = Id;
|
|
||||||
|
|
||||||
return Ok(Some((id, result)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Implements the `multiplex::ServerProto` trait.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Proto<Encode, Decode> {
|
|
||||||
max_payload_size: u64,
|
|
||||||
_phantom_data: PhantomData<(Encode, Decode)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Encode, Decode> Proto<Encode, Decode> {
|
|
||||||
/// Returns a new `Proto`.
|
|
||||||
pub fn new(max_payload_size: u64) -> Self {
|
|
||||||
Proto {
|
|
||||||
max_payload_size: max_payload_size,
|
|
||||||
_phantom_data: PhantomData
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, Encode, Decode> ServerProto<T> for Proto<Encode, Decode>
|
|
||||||
where T: AsyncRead + AsyncWrite + 'static,
|
|
||||||
Encode: serde::Serialize + 'static,
|
|
||||||
Decode: serde::Deserialize + 'static
|
|
||||||
{
|
|
||||||
type Response = Encode;
|
|
||||||
type Request = Result<Decode, bincode::Error>;
|
|
||||||
type Transport = Framed<T, Codec<Encode, Decode>>;
|
|
||||||
type BindTransport = Result<Self::Transport, io::Error>;
|
|
||||||
|
|
||||||
fn bind_transport(&self, io: T) -> Self::BindTransport {
|
|
||||||
Ok(io.framed(Codec::new(self.max_payload_size)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, Encode, Decode> ClientProto<T> for Proto<Encode, Decode>
|
|
||||||
where T: AsyncRead + AsyncWrite + 'static,
|
|
||||||
Encode: serde::Serialize + 'static,
|
|
||||||
Decode: serde::Deserialize + 'static
|
|
||||||
{
|
|
||||||
type Response = Result<Decode, bincode::Error>;
|
|
||||||
type Request = Encode;
|
|
||||||
type Transport = Framed<T, Codec<Encode, Decode>>;
|
|
||||||
type BindTransport = Result<Self::Transport, io::Error>;
|
|
||||||
|
|
||||||
fn bind_transport(&self, io: T) -> Self::BindTransport {
|
|
||||||
Ok(io.framed(Codec::new(self.max_payload_size)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn serialize() {
|
|
||||||
const MSG: (u64, (char, char, char)) = (4, ('a', 'b', 'c'));
|
|
||||||
let mut buf = BytesMut::with_capacity(10);
|
|
||||||
|
|
||||||
// Serialize twice to check for idempotence.
|
|
||||||
for _ in 0..2 {
|
|
||||||
let mut codec: Codec<(char, char, char), (char, char, char)> = Codec::new(2_000_000);
|
|
||||||
codec.encode(MSG, &mut buf).unwrap();
|
|
||||||
let actual: Result<Option<(u64, Result<(char, char, char), bincode::Error>)>, io::Error> =
|
|
||||||
codec.decode(&mut buf);
|
|
||||||
|
|
||||||
match actual {
|
|
||||||
Ok(Some((id, ref v))) if id == MSG.0 && *v.as_ref().unwrap() == MSG.1 => {}
|
|
||||||
bad => panic!("Expected {:?}, but got {:?}", Some(MSG), bad),
|
|
||||||
}
|
|
||||||
|
|
||||||
assert!(buf.is_empty(), "Expected empty buf but got {:?}", buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn deserialize_big() {
|
|
||||||
let mut codec: Codec<Vec<u8>, Vec<u8>> = Codec::new(24);
|
|
||||||
|
|
||||||
let mut buf = BytesMut::with_capacity(40);
|
|
||||||
assert_eq!(codec.encode((0, vec![0; 24]), &mut buf).err().unwrap().kind(),
|
|
||||||
io::ErrorKind::InvalidData);
|
|
||||||
|
|
||||||
// Header
|
|
||||||
buf.put_slice(&mut [0u8; 8]);
|
|
||||||
// Len
|
|
||||||
buf.put_slice(&mut [0u8, 0, 0, 0, 0, 0, 0, 25]);
|
|
||||||
assert_eq!(codec.decode(&mut buf).err().unwrap().kind(),
|
|
||||||
io::ErrorKind::InvalidData);
|
|
||||||
}
|
|
||||||
@@ -1,94 +0,0 @@
|
|||||||
use bytes::{Buf, BufMut};
|
|
||||||
use futures::Poll;
|
|
||||||
use std::io;
|
|
||||||
use tokio_core::net::TcpStream;
|
|
||||||
use tokio_io::{AsyncRead, AsyncWrite};
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
use tokio_tls::TlsStream;
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub enum StreamType {
|
|
||||||
Tcp(TcpStream),
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
Tls(TlsStream<TcpStream>),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<TcpStream> for StreamType {
|
|
||||||
fn from(stream: TcpStream) -> Self {
|
|
||||||
StreamType::Tcp(stream)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
impl From<TlsStream<TcpStream>> for StreamType {
|
|
||||||
fn from(stream: TlsStream<TcpStream>) -> Self {
|
|
||||||
StreamType::Tls(stream)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl io::Read for StreamType {
|
|
||||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
|
||||||
match *self {
|
|
||||||
StreamType::Tcp(ref mut stream) => stream.read(buf),
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
StreamType::Tls(ref mut stream) => stream.read(buf),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl io::Write for StreamType {
|
|
||||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
|
||||||
match *self {
|
|
||||||
StreamType::Tcp(ref mut stream) => stream.write(buf),
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
StreamType::Tls(ref mut stream) => stream.write(buf),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush(&mut self) -> io::Result<()> {
|
|
||||||
match *self {
|
|
||||||
StreamType::Tcp(ref mut stream) => stream.flush(),
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
StreamType::Tls(ref mut stream) => stream.flush(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsyncRead for StreamType {
|
|
||||||
// By overriding this fn, `StreamType` is obliged to never read the uninitialized buffer.
|
|
||||||
// Most sane implementations would never have a reason to, and `StreamType` does not, so
|
|
||||||
// this is safe.
|
|
||||||
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
|
|
||||||
match *self {
|
|
||||||
StreamType::Tcp(ref stream) => stream.prepare_uninitialized_buffer(buf),
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
StreamType::Tls(ref stream) => stream.prepare_uninitialized_buffer(buf),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_buf<B: BufMut>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
|
|
||||||
match *self {
|
|
||||||
StreamType::Tcp(ref mut stream) => stream.read_buf(buf),
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
StreamType::Tls(ref mut stream) => stream.read_buf(buf),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AsyncWrite for StreamType {
|
|
||||||
fn shutdown(&mut self) -> Poll<(), io::Error> {
|
|
||||||
match *self {
|
|
||||||
StreamType::Tcp(ref mut stream) => stream.shutdown(),
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
StreamType::Tls(ref mut stream) => stream.shutdown(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_buf<B: Buf>(&mut self, buf: &mut B) -> Poll<usize, io::Error> {
|
|
||||||
match *self {
|
|
||||||
StreamType::Tcp(ref mut stream) => stream.write_buf(buf),
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
StreamType::Tls(ref mut stream) => stream.write_buf(buf),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,237 +0,0 @@
|
|||||||
use future::client::{Client as FutureClient, ClientExt as FutureClientExt,
|
|
||||||
Options as FutureOptions};
|
|
||||||
/// Exposes a trait for connecting synchronously to servers.
|
|
||||||
use futures::{Future, Stream};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::fmt;
|
|
||||||
use std::io;
|
|
||||||
use std::net::{SocketAddr, ToSocketAddrs};
|
|
||||||
use std::sync::mpsc;
|
|
||||||
use std::thread;
|
|
||||||
use tokio_core::reactor;
|
|
||||||
use tokio_proto::util::client_proxy::{ClientProxy, Receiver, pair};
|
|
||||||
use tokio_service::Service;
|
|
||||||
use util::FirstSocketAddr;
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
use tls::client::Context;
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub struct Client<Req, Resp, E> {
|
|
||||||
proxy: ClientProxy<Req, Resp, ::Error<E>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, E> Clone for Client<Req, Resp, E> {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Client { proxy: self.proxy.clone() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, E> fmt::Debug for Client<Req, Resp, E> {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
|
||||||
const PROXY: &'static &'static str = &"ClientProxy { .. }";
|
|
||||||
f.debug_struct("Client")
|
|
||||||
.field("proxy", PROXY)
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, E> Client<Req, Resp, E>
|
|
||||||
where Req: Serialize + Sync + Send + 'static,
|
|
||||||
Resp: Deserialize + Sync + Send + 'static,
|
|
||||||
E: Deserialize + Sync + Send + 'static
|
|
||||||
{
|
|
||||||
/// Drives an RPC call for the given request.
|
|
||||||
pub fn call(&self, request: Req) -> Result<Resp, ::Error<E>> {
|
|
||||||
// Must call wait here to block on the response.
|
|
||||||
// The request handler relies on this fact to safely unwrap the
|
|
||||||
// oneshot send.
|
|
||||||
self.proxy.call(request).wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Additional options to configure how the client connects and operates.
|
|
||||||
pub struct Options {
|
|
||||||
/// Max packet size in bytes.
|
|
||||||
max_payload_size: u64,
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
tls_ctx: Option<Context>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Options {
|
|
||||||
#[cfg(not(feature = "tls"))]
|
|
||||||
fn default() -> Self {
|
|
||||||
Options {
|
|
||||||
max_payload_size: 2_000_000,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
fn default() -> Self {
|
|
||||||
Options {
|
|
||||||
max_payload_size: 2_000_000,
|
|
||||||
tls_ctx: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Options {
|
|
||||||
/// Set the max payload size in bytes. The default is 2,000,000 (2 MB).
|
|
||||||
pub fn max_payload_size(mut self, bytes: u64) -> Self {
|
|
||||||
self.max_payload_size = bytes;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Connect using the given `Context`
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
pub fn tls(mut self, ctx: Context) -> Self {
|
|
||||||
self.tls_ctx = Some(ctx);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Options {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
const SOME: &'static &'static str = &"Some(_)";
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
const NONE: &'static &'static str = &"None";
|
|
||||||
let mut f = f.debug_struct("Options");
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
f.field("tls_ctx", if self.tls_ctx.is_some() { SOME } else { NONE });
|
|
||||||
f.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<FutureOptions> for (reactor::Handle, Options) {
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
fn into(self) -> FutureOptions {
|
|
||||||
let (handle, options) = self;
|
|
||||||
let mut opts = FutureOptions::default().handle(handle);
|
|
||||||
if let Some(tls_ctx) = options.tls_ctx {
|
|
||||||
opts = opts.tls(tls_ctx);
|
|
||||||
}
|
|
||||||
opts
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "tls"))]
|
|
||||||
fn into(self) -> FutureOptions {
|
|
||||||
let (handle, _) = self;
|
|
||||||
FutureOptions::default().handle(handle)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extension methods for Clients.
|
|
||||||
pub trait ClientExt: Sized {
|
|
||||||
/// Connects to a server located at the given address.
|
|
||||||
fn connect<A>(addr: A, options: Options) -> io::Result<Self> where A: ToSocketAddrs;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, E> ClientExt for Client<Req, Resp, E>
|
|
||||||
where Req: Serialize + Sync + Send + 'static,
|
|
||||||
Resp: Deserialize + Sync + Send + 'static,
|
|
||||||
E: Deserialize + Sync + Send + 'static
|
|
||||||
{
|
|
||||||
fn connect<A>(addr: A, options: Options) -> io::Result<Self>
|
|
||||||
where A: ToSocketAddrs
|
|
||||||
{
|
|
||||||
let addr = addr.try_first_socket_addr()?;
|
|
||||||
let (connect_tx, connect_rx) = mpsc::channel();
|
|
||||||
thread::spawn(move || {
|
|
||||||
match RequestHandler::connect(addr, options) {
|
|
||||||
Ok((proxy, mut handler)) => {
|
|
||||||
connect_tx.send(Ok(proxy)).unwrap();
|
|
||||||
handler.handle_requests();
|
|
||||||
}
|
|
||||||
Err(e) => connect_tx.send(Err(e)).unwrap(),
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Ok(connect_rx.recv().unwrap()?)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Forwards incoming requests of type `Req`
|
|
||||||
/// with expected response `Result<Resp, ::Error<E>>`
|
|
||||||
/// to service `S`.
|
|
||||||
struct RequestHandler<Req, Resp, E, S> {
|
|
||||||
reactor: reactor::Core,
|
|
||||||
client: S,
|
|
||||||
requests: Receiver<Req, Resp, ::Error<E>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, E> RequestHandler<Req, Resp, E, FutureClient<Req, Resp, E>>
|
|
||||||
where Req: Serialize + Sync + Send + 'static,
|
|
||||||
Resp: Deserialize + Sync + Send + 'static,
|
|
||||||
E: Deserialize + Sync + Send + 'static
|
|
||||||
{
|
|
||||||
/// Creates a new `RequestHandler` by connecting a `FutureClient` to the given address
|
|
||||||
/// using the given options.
|
|
||||||
fn connect(addr: SocketAddr, options: Options)
|
|
||||||
-> io::Result<(Client<Req, Resp, E>, Self)>
|
|
||||||
{
|
|
||||||
let mut reactor = reactor::Core::new()?;
|
|
||||||
let options = (reactor.handle(), options).into();
|
|
||||||
let client = reactor.run(FutureClient::connect(addr, options))?;
|
|
||||||
let (proxy, requests) = pair();
|
|
||||||
Ok((Client { proxy }, RequestHandler { reactor, client, requests }))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Req, Resp, E, S> RequestHandler<Req, Resp, E, S>
|
|
||||||
where Req: Serialize + 'static,
|
|
||||||
Resp: Deserialize + 'static,
|
|
||||||
E: Deserialize + 'static,
|
|
||||||
S: Service<Request = Req, Response = Resp, Error = ::Error<E>>,
|
|
||||||
S::Future: 'static,
|
|
||||||
{
|
|
||||||
fn handle_requests(&mut self) {
|
|
||||||
let RequestHandler { ref mut reactor, ref mut requests, ref mut client } = *self;
|
|
||||||
let handle = reactor.handle();
|
|
||||||
let requests = requests
|
|
||||||
.map(|result| {
|
|
||||||
match result {
|
|
||||||
Ok(req) => req,
|
|
||||||
// The ClientProxy never sends Err currently
|
|
||||||
Err(e) => panic!("Unimplemented error handling in RequestHandler: {}", e),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.for_each(|(request, response_tx)| {
|
|
||||||
let request = client.call(request)
|
|
||||||
.then(move |response| {
|
|
||||||
// Safe to unwrap because clients always block on the response future.
|
|
||||||
response_tx.send(response)
|
|
||||||
.map_err(|_| ())
|
|
||||||
.expect("Client should block on response");
|
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
handle.spawn(request);
|
|
||||||
Ok(())
|
|
||||||
});
|
|
||||||
reactor.run(requests).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn handle_requests() {
|
|
||||||
use futures::future;
|
|
||||||
|
|
||||||
struct Client;
|
|
||||||
impl Service for Client {
|
|
||||||
type Request = i32;
|
|
||||||
type Response = i32;
|
|
||||||
type Error = ::Error<()>;
|
|
||||||
type Future = future::FutureResult<i32, ::Error<()>>;
|
|
||||||
|
|
||||||
fn call(&self, req: i32) -> Self::Future {
|
|
||||||
future::ok(req)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let (request, requests) = ::futures::sync::mpsc::unbounded();
|
|
||||||
let reactor = reactor::Core::new().unwrap();
|
|
||||||
let client = Client;
|
|
||||||
let mut request_handler = RequestHandler { reactor, client, requests };
|
|
||||||
// Test that `handle_requests` returns when all request senders are dropped.
|
|
||||||
drop(request);
|
|
||||||
request_handler.handle_requests();
|
|
||||||
}
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
/// Provides the base client stubs used by the service macro.
|
|
||||||
pub mod client;
|
|
||||||
/// Provides the base server boilerplate used by service implementations.
|
|
||||||
pub mod server;
|
|
||||||
@@ -1,225 +0,0 @@
|
|||||||
use {bincode, future, num_cpus};
|
|
||||||
use future::server::{Response, Shutdown};
|
|
||||||
use futures::{Future, future as futures};
|
|
||||||
use futures::sync::oneshot;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::io;
|
|
||||||
use std::fmt;
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::time::Duration;
|
|
||||||
use std::usize;
|
|
||||||
use thread_pool::{self, Sender, Task, ThreadPool};
|
|
||||||
use tokio_core::reactor;
|
|
||||||
use tokio_service::{NewService, Service};
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
use native_tls_inner::TlsAcceptor;
|
|
||||||
|
|
||||||
/// Additional options to configure how the server operates.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct Options {
|
|
||||||
thread_pool: thread_pool::Builder,
|
|
||||||
opts: future::server::Options,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Options {
|
|
||||||
fn default() -> Self {
|
|
||||||
let num_cpus = num_cpus::get();
|
|
||||||
Options {
|
|
||||||
thread_pool: thread_pool::Builder::new()
|
|
||||||
.keep_alive(Duration::from_secs(60))
|
|
||||||
.max_pool_size(num_cpus * 100)
|
|
||||||
.core_pool_size(num_cpus)
|
|
||||||
.work_queue_capacity(usize::MAX)
|
|
||||||
.name_prefix("request-thread-"),
|
|
||||||
opts: future::server::Options::default(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Options {
|
|
||||||
/// Set the max payload size in bytes. The default is 2,000,000 (2 MB).
|
|
||||||
pub fn max_payload_size(mut self, bytes: u64) -> Self {
|
|
||||||
self.opts = self.opts.max_payload_size(bytes);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sets the thread pool builder to use when creating the server's thread pool.
|
|
||||||
pub fn thread_pool(mut self, builder: thread_pool::Builder) -> Self {
|
|
||||||
self.thread_pool = builder;
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the `TlsAcceptor`
|
|
||||||
#[cfg(feature = "tls")]
|
|
||||||
pub fn tls(mut self, tls_acceptor: TlsAcceptor) -> Self {
|
|
||||||
self.opts = self.opts.tls(tls_acceptor);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A handle to a bound server. Must be run to start serving requests.
|
|
||||||
#[must_use = "A server does nothing until `run` is called."]
|
|
||||||
pub struct Handle {
|
|
||||||
reactor: reactor::Core,
|
|
||||||
handle: future::server::Handle,
|
|
||||||
server: Box<Future<Item = (), Error = ()>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Handle {
|
|
||||||
/// Runs the server on the current thread, blocking indefinitely.
|
|
||||||
pub fn run(mut self) {
|
|
||||||
trace!("Running...");
|
|
||||||
match self.reactor.run(self.server) {
|
|
||||||
Ok(()) => debug!("Server successfully shutdown."),
|
|
||||||
Err(()) => debug!("Server shutdown due to error."),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a hook for shutting down the server.
|
|
||||||
pub fn shutdown(&self) -> Shutdown {
|
|
||||||
self.handle.shutdown().clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The socket address the server is bound to.
|
|
||||||
pub fn addr(&self) -> SocketAddr {
|
|
||||||
self.handle.addr()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Handle {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
|
||||||
const CORE: &'static &'static str = &"Core { .. }";
|
|
||||||
const SERVER: &'static &'static str = &"Box<Future<Item = (), Error = ()>>";
|
|
||||||
|
|
||||||
f.debug_struct("Handle").field("reactor", CORE)
|
|
||||||
.field("handle", &self.handle)
|
|
||||||
.field("server", SERVER)
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[doc(hidden)]
|
|
||||||
pub fn listen<S, Req, Resp, E>(new_service: S, addr: SocketAddr, options: Options)
|
|
||||||
-> io::Result<Handle>
|
|
||||||
where S: NewService<Request = Result<Req, bincode::Error>,
|
|
||||||
Response = Response<Resp, E>,
|
|
||||||
Error = io::Error> + 'static,
|
|
||||||
<S::Instance as Service>::Future: Send + 'static,
|
|
||||||
S::Response: Send,
|
|
||||||
S::Error: Send,
|
|
||||||
Req: Deserialize + 'static,
|
|
||||||
Resp: Serialize + 'static,
|
|
||||||
E: Serialize + 'static
|
|
||||||
{
|
|
||||||
let new_service = NewThreadService::new(new_service, options.thread_pool);
|
|
||||||
let reactor = reactor::Core::new()?;
|
|
||||||
let (handle, server) =
|
|
||||||
future::server::listen(new_service, addr, &reactor.handle(), options.opts)?;
|
|
||||||
let server = Box::new(server);
|
|
||||||
Ok(Handle {
|
|
||||||
reactor: reactor,
|
|
||||||
handle: handle,
|
|
||||||
server: server,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A service that uses a thread pool.
|
|
||||||
struct NewThreadService<S> where S: NewService {
|
|
||||||
new_service: S,
|
|
||||||
sender: Sender<ServiceTask<<S::Instance as Service>::Future>>,
|
|
||||||
_pool: ThreadPool<ServiceTask<<S::Instance as Service>::Future>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A service that runs by executing request handlers in a thread pool.
|
|
||||||
struct ThreadService<S> where S: Service {
|
|
||||||
service: S,
|
|
||||||
sender: Sender<ServiceTask<S::Future>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A task that handles a single request.
|
|
||||||
struct ServiceTask<F> where F: Future {
|
|
||||||
future: F,
|
|
||||||
tx: oneshot::Sender<Result<F::Item, F::Error>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S> NewThreadService<S>
|
|
||||||
where S: NewService,
|
|
||||||
<S::Instance as Service>::Future: Send + 'static,
|
|
||||||
S::Response: Send,
|
|
||||||
S::Error: Send,
|
|
||||||
{
|
|
||||||
/// Create a NewThreadService by wrapping another service.
|
|
||||||
fn new(new_service: S, pool: thread_pool::Builder) -> Self {
|
|
||||||
let (sender, _pool) = pool.build();
|
|
||||||
NewThreadService { new_service, sender, _pool }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S> NewService for NewThreadService<S>
|
|
||||||
where S: NewService,
|
|
||||||
<S::Instance as Service>::Future: Send + 'static,
|
|
||||||
S::Response: Send,
|
|
||||||
S::Error: Send,
|
|
||||||
{
|
|
||||||
type Request = S::Request;
|
|
||||||
type Response = S::Response;
|
|
||||||
type Error = S::Error;
|
|
||||||
type Instance = ThreadService<S::Instance>;
|
|
||||||
|
|
||||||
fn new_service(&self) -> io::Result<Self::Instance> {
|
|
||||||
Ok(ThreadService {
|
|
||||||
service: self.new_service.new_service()?,
|
|
||||||
sender: self.sender.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<F> Task for ServiceTask<F>
|
|
||||||
where F: Future + Send + 'static,
|
|
||||||
F::Item: Send,
|
|
||||||
F::Error: Send,
|
|
||||||
{
|
|
||||||
fn run(self) {
|
|
||||||
// Don't care if sending fails. It just means the request is no longer
|
|
||||||
// being handled (I think).
|
|
||||||
let _ = self.tx.send(self.future.wait());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S> Service for ThreadService<S>
|
|
||||||
where S: Service,
|
|
||||||
S::Future: Send + 'static,
|
|
||||||
S::Response: Send,
|
|
||||||
S::Error: Send,
|
|
||||||
{
|
|
||||||
type Request = S::Request;
|
|
||||||
type Response = S::Response;
|
|
||||||
type Error = S::Error;
|
|
||||||
type Future =
|
|
||||||
futures::AndThen<
|
|
||||||
futures::MapErr<
|
|
||||||
oneshot::Receiver<Result<Self::Response, Self::Error>>,
|
|
||||||
fn(oneshot::Canceled) -> Self::Error>,
|
|
||||||
Result<Self::Response, Self::Error>,
|
|
||||||
fn(Result<Self::Response, Self::Error>) -> Result<Self::Response, Self::Error>>;
|
|
||||||
|
|
||||||
fn call(&self, request: Self::Request) -> Self::Future {
|
|
||||||
let (tx, rx) = oneshot::channel();
|
|
||||||
self.sender.send(ServiceTask {
|
|
||||||
future: self.service.call(request),
|
|
||||||
tx: tx,
|
|
||||||
}).unwrap();
|
|
||||||
rx.map_err(unreachable as _).and_then(ident)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn unreachable<T, U>(t: T) -> U
|
|
||||||
where T: fmt::Display
|
|
||||||
{
|
|
||||||
unreachable!(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn ident<T>(t: T) -> T {
|
|
||||||
t
|
|
||||||
}
|
|
||||||
|
|
||||||
51
src/tls.rs
51
src/tls.rs
@@ -1,51 +0,0 @@
|
|||||||
/// TLS-specific functionality for clients.
|
|
||||||
pub mod client {
|
|
||||||
use native_tls::{Error, TlsConnector};
|
|
||||||
use std::fmt;
|
|
||||||
|
|
||||||
/// TLS context for client
|
|
||||||
pub struct Context {
|
|
||||||
/// Domain to connect to
|
|
||||||
pub domain: String,
|
|
||||||
/// TLS connector
|
|
||||||
pub tls_connector: TlsConnector,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Context {
|
|
||||||
/// Try to construct a new `Context`.
|
|
||||||
///
|
|
||||||
/// The provided domain will be used for both
|
|
||||||
/// [SNI](https://en.wikipedia.org/wiki/Server_Name_Indication) and certificate hostname
|
|
||||||
/// validation.
|
|
||||||
pub fn new<S: Into<String>>(domain: S) -> Result<Self, Error> {
|
|
||||||
Ok(Context {
|
|
||||||
domain: domain.into(),
|
|
||||||
tls_connector: TlsConnector::builder()?.build()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Construct a new `Context` using the provided domain and `TlsConnector`
|
|
||||||
///
|
|
||||||
/// The domain will be used for both
|
|
||||||
/// [SNI](https://en.wikipedia.org/wiki/Server_Name_Indication) and certificate hostname
|
|
||||||
/// validation.
|
|
||||||
pub fn from_connector<S: Into<String>>(domain: S, tls_connector: TlsConnector) -> Self {
|
|
||||||
Context {
|
|
||||||
domain: domain.into(),
|
|
||||||
tls_connector: tls_connector,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Context {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
|
||||||
const TLS_CONNECTOR: &'static &'static str = &"TlsConnector { .. }";
|
|
||||||
f.debug_struct("Context")
|
|
||||||
.field("domain", &self.domain)
|
|
||||||
.field("tls_connector", TLS_CONNECTOR)
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
178
src/util.rs
178
src/util.rs
@@ -1,178 +0,0 @@
|
|||||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the MIT License, <LICENSE or http://opensource.org/licenses/MIT>.
|
|
||||||
// This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
use futures::{Future, IntoFuture, Poll};
|
|
||||||
use futures::stream::Stream;
|
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|
||||||
use std::{fmt, io, mem};
|
|
||||||
use std::error::Error;
|
|
||||||
use std::net::{SocketAddr, ToSocketAddrs};
|
|
||||||
|
|
||||||
/// A bottom type that impls `Error`, `Serialize`, and `Deserialize`. It is impossible to
|
|
||||||
/// instantiate this type.
|
|
||||||
#[allow(unreachable_code)]
|
|
||||||
pub struct Never(!);
|
|
||||||
|
|
||||||
impl fmt::Debug for Never {
|
|
||||||
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for Never {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Never {
|
|
||||||
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Future for Never {
|
|
||||||
type Item = Never;
|
|
||||||
type Error = Never;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Stream for Never {
|
|
||||||
type Item = Never;
|
|
||||||
type Error = Never;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Serialize for Never {
|
|
||||||
fn serialize<S>(&self, _: S) -> Result<S::Ok, S::Error>
|
|
||||||
where S: Serializer
|
|
||||||
{
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Please don't try to deserialize this. :(
|
|
||||||
impl Deserialize for Never {
|
|
||||||
fn deserialize<D>(_: D) -> Result<Self, D::Error>
|
|
||||||
where D: Deserializer
|
|
||||||
{
|
|
||||||
panic!("Never cannot be instantiated!");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A `String` that impls `std::error::Error`. Useful for quick-and-dirty error propagation.
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
|
||||||
pub struct Message(pub String);
|
|
||||||
|
|
||||||
impl Error for Message {
|
|
||||||
fn description(&self) -> &str {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for Message {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
fmt::Display::fmt(&self.0, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Into<String>> From<S> for Message {
|
|
||||||
fn from(s: S) -> Self {
|
|
||||||
Message(s.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/// Provides a utility method for more ergonomically parsing a `SocketAddr` when only one is
|
|
||||||
/// needed.
|
|
||||||
pub trait FirstSocketAddr: ToSocketAddrs {
|
|
||||||
/// Returns the first resolved `SocketAddr`, if one exists.
|
|
||||||
fn try_first_socket_addr(&self) -> io::Result<SocketAddr> {
|
|
||||||
if let Some(a) = self.to_socket_addrs()?.next() {
|
|
||||||
Ok(a)
|
|
||||||
} else {
|
|
||||||
Err(io::Error::new(io::ErrorKind::AddrNotAvailable,
|
|
||||||
"`ToSocketAddrs::to_socket_addrs` returned an empty iterator."))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the first resolved `SocketAddr` or panics otherwise.
|
|
||||||
fn first_socket_addr(&self) -> SocketAddr {
|
|
||||||
self.try_first_socket_addr().unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<A: ToSocketAddrs> FirstSocketAddr for A {}
|
|
||||||
|
|
||||||
/// Creates a new future which will eventually be the same as the one created
|
|
||||||
/// by calling the closure provided with the arguments provided.
|
|
||||||
///
|
|
||||||
/// The provided closure is only run once the future has a callback scheduled
|
|
||||||
/// on it, otherwise the callback never runs. Once run, however, this future is
|
|
||||||
/// the same as the one the closure creates.
|
|
||||||
pub fn lazy<F, A, R>(f: F, args: A) -> Lazy<F, A, R>
|
|
||||||
where F: FnOnce(A) -> R,
|
|
||||||
R: IntoFuture
|
|
||||||
{
|
|
||||||
Lazy {
|
|
||||||
inner: _Lazy::First(f, args),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A future which defers creation of the actual future until a callback is
|
|
||||||
/// scheduled.
|
|
||||||
///
|
|
||||||
/// This is created by the `lazy` function.
|
|
||||||
#[derive(Debug)]
|
|
||||||
#[must_use = "futures do nothing unless polled"]
|
|
||||||
pub struct Lazy<F, A, R: IntoFuture> {
|
|
||||||
inner: _Lazy<F, A, R::Future>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
enum _Lazy<F, A, R> {
|
|
||||||
First(F, A),
|
|
||||||
Second(R),
|
|
||||||
Moved,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<F, A, R> Lazy<F, A, R>
|
|
||||||
where F: FnOnce(A) -> R,
|
|
||||||
R: IntoFuture,
|
|
||||||
{
|
|
||||||
fn get(&mut self) -> &mut R::Future {
|
|
||||||
match self.inner {
|
|
||||||
_Lazy::First(..) => {}
|
|
||||||
_Lazy::Second(ref mut f) => return f,
|
|
||||||
_Lazy::Moved => panic!(), // can only happen if `f()` panics
|
|
||||||
}
|
|
||||||
match mem::replace(&mut self.inner, _Lazy::Moved) {
|
|
||||||
_Lazy::First(f, args) => self.inner = _Lazy::Second(f(args).into_future()),
|
|
||||||
_ => panic!(), // we already found First
|
|
||||||
}
|
|
||||||
match self.inner {
|
|
||||||
_Lazy::Second(ref mut f) => f,
|
|
||||||
_ => panic!(), // we just stored Second
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<F, A, R> Future for Lazy<F, A, R>
|
|
||||||
where F: FnOnce(A) -> R,
|
|
||||||
R: IntoFuture,
|
|
||||||
{
|
|
||||||
type Item = R::Item;
|
|
||||||
type Error = R::Error;
|
|
||||||
|
|
||||||
fn poll(&mut self) -> Poll<R::Item, R::Error> {
|
|
||||||
self.get().poll()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
119
tarpc/Cargo.toml
Normal file
119
tarpc/Cargo.toml
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
[package]
|
||||||
|
name = "tarpc"
|
||||||
|
version = "0.34.0"
|
||||||
|
rust-version = "1.58.0"
|
||||||
|
authors = [
|
||||||
|
"Adam Wright <adam.austin.wright@gmail.com>",
|
||||||
|
"Tim Kuehn <timothy.j.kuehn@gmail.com>",
|
||||||
|
]
|
||||||
|
edition = "2021"
|
||||||
|
license = "MIT"
|
||||||
|
documentation = "https://docs.rs/tarpc"
|
||||||
|
homepage = "https://github.com/google/tarpc"
|
||||||
|
repository = "https://github.com/google/tarpc"
|
||||||
|
keywords = ["rpc", "network", "server", "api", "microservices"]
|
||||||
|
categories = ["asynchronous", "network-programming"]
|
||||||
|
readme = "README.md"
|
||||||
|
description = "An RPC framework for Rust with a focus on ease of use."
|
||||||
|
|
||||||
|
[features]
|
||||||
|
default = []
|
||||||
|
|
||||||
|
serde1 = ["tarpc-plugins/serde1", "serde", "serde/derive", "serde/rc"]
|
||||||
|
tokio1 = ["tokio/rt"]
|
||||||
|
serde-transport = ["serde1", "tokio1", "tokio-serde", "tokio-util/codec"]
|
||||||
|
serde-transport-json = ["tokio-serde/json"]
|
||||||
|
serde-transport-bincode = ["tokio-serde/bincode"]
|
||||||
|
tcp = ["tokio/net"]
|
||||||
|
unix = ["tokio/net"]
|
||||||
|
|
||||||
|
full = [
|
||||||
|
"serde1",
|
||||||
|
"tokio1",
|
||||||
|
"serde-transport",
|
||||||
|
"serde-transport-json",
|
||||||
|
"serde-transport-bincode",
|
||||||
|
"tcp",
|
||||||
|
"unix",
|
||||||
|
]
|
||||||
|
|
||||||
|
[badges]
|
||||||
|
travis-ci = { repository = "google/tarpc" }
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
anyhow = "1.0"
|
||||||
|
fnv = "1.0"
|
||||||
|
futures = "0.3"
|
||||||
|
humantime = "2.0"
|
||||||
|
pin-project = "1.0"
|
||||||
|
rand = "0.8"
|
||||||
|
serde = { optional = true, version = "1.0", features = ["derive"] }
|
||||||
|
static_assertions = "1.1.0"
|
||||||
|
tarpc-plugins = { path = "../plugins", version = "0.13" }
|
||||||
|
thiserror = "1.0"
|
||||||
|
tokio = { version = "1", features = ["time"] }
|
||||||
|
tokio-util = { version = "0.7.3", features = ["time"] }
|
||||||
|
tokio-serde = { optional = true, version = "0.8" }
|
||||||
|
tracing = { version = "0.1", default-features = false, features = [
|
||||||
|
"attributes",
|
||||||
|
"log",
|
||||||
|
] }
|
||||||
|
tracing-opentelemetry = { version = "0.18.0", default-features = false }
|
||||||
|
opentelemetry = { version = "0.18.0", default-features = false }
|
||||||
|
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
assert_matches = "1.4"
|
||||||
|
bincode = "1.3"
|
||||||
|
bytes = { version = "1", features = ["serde"] }
|
||||||
|
flate2 = "1.0"
|
||||||
|
futures-test = "0.3"
|
||||||
|
opentelemetry = { version = "0.18.0", default-features = false, features = [
|
||||||
|
"rt-tokio",
|
||||||
|
] }
|
||||||
|
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
|
||||||
|
pin-utils = "0.1.0-alpha"
|
||||||
|
serde_bytes = "0.11"
|
||||||
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
|
tokio = { version = "1", features = ["full", "test-util", "tracing"] }
|
||||||
|
console-subscriber = "0.1"
|
||||||
|
tokio-serde = { version = "0.8", features = ["json", "bincode"] }
|
||||||
|
trybuild = "1.0"
|
||||||
|
tokio-rustls = "0.23"
|
||||||
|
rustls-pemfile = "1.0"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "compression"
|
||||||
|
required-features = ["serde-transport", "tcp"]
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "tracing"
|
||||||
|
required-features = ["full"]
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "readme"
|
||||||
|
required-features = ["full"]
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "pubsub"
|
||||||
|
required-features = ["full"]
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "custom_transport"
|
||||||
|
required-features = ["serde1", "tokio1", "serde-transport"]
|
||||||
|
|
||||||
|
[[example]]
|
||||||
|
name = "tls_over_tcp"
|
||||||
|
required-features = ["full"]
|
||||||
|
|
||||||
|
[[test]]
|
||||||
|
name = "service_functional"
|
||||||
|
required-features = ["serde-transport"]
|
||||||
|
|
||||||
|
[[test]]
|
||||||
|
name = "dataservice"
|
||||||
|
required-features = ["serde-transport", "tcp"]
|
||||||
9
tarpc/LICENSE
Normal file
9
tarpc/LICENSE
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright 2016 Google Inc. All Rights Reserved.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
1
tarpc/README.md
Symbolic link
1
tarpc/README.md
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../README.md
|
||||||
11
tarpc/examples/certs/eddsa/client.cert
Normal file
11
tarpc/examples/certs/eddsa/client.cert
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIBlDCCAUagAwIBAgICAxUwBQYDK2VwMC4xLDAqBgNVBAMMI3Bvbnl0b3duIEVk
|
||||||
|
RFNBIGxldmVsIDIgaW50ZXJtZWRpYXRlMB4XDTIzMDMxNzEwMTEwNFoXDTI4MDkw
|
||||||
|
NjEwMTEwNFowGjEYMBYGA1UEAwwPcG9ueXRvd24gY2xpZW50MCowBQYDK2VwAyEA
|
||||||
|
NTKuLume19IhJfEFd/5OZUuYDKZH6xvy4AGver17OoejgZswgZgwDAYDVR0TAQH/
|
||||||
|
BAIwADALBgNVHQ8EBAMCBsAwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwIwHQYDVR0O
|
||||||
|
BBYEFDjdrlMu4tyw5MHtbg7WnzSGRBpFMEQGA1UdIwQ9MDuAFHIl7fHKWP6/l8FE
|
||||||
|
fI2YEIM3oHxKoSCkHjAcMRowGAYDVQQDDBFwb255dG93biBFZERTQSBDQYIBezAF
|
||||||
|
BgMrZXADQQCaahfj/QLxoCOpvl6y0ZQ9CpojPqBnxV3460j5nUOp040Va2MpF137
|
||||||
|
izCBY7LwgUE/YG6E+kH30G4jMEnqVEYK
|
||||||
|
-----END CERTIFICATE-----
|
||||||
19
tarpc/examples/certs/eddsa/client.chain
Normal file
19
tarpc/examples/certs/eddsa/client.chain
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIBeDCCASqgAwIBAgIBezAFBgMrZXAwHDEaMBgGA1UEAwwRcG9ueXRvd24gRWRE
|
||||||
|
U0EgQ0EwHhcNMjMwMzE3MTAxMTA0WhcNMzMwMzE0MTAxMTA0WjAuMSwwKgYDVQQD
|
||||||
|
DCNwb255dG93biBFZERTQSBsZXZlbCAyIGludGVybWVkaWF0ZTAqMAUGAytlcAMh
|
||||||
|
AEFsAexz4x2R4k4+PnTbvRVn0r3F/qw/zVnNBxfGcoEpo38wfTAdBgNVHQ4EFgQU
|
||||||
|
ciXt8cpY/r+XwUR8jZgQgzegfEowIAYDVR0lAQH/BBYwFAYIKwYBBQUHAwEGCCsG
|
||||||
|
AQUFBwMCMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgH+MB8GA1UdIwQYMBaAFKYU
|
||||||
|
oLdKeY7mp7QgMZKrkVtSWYBKMAUGAytlcANBAHVpNpCV8nu4fkH3Smikx5A9qtHc
|
||||||
|
zgLIyp+wrF1a4YSa6sfTvuQmJd5aF23OXgq5grCOPXtdpHO50Mx5Qy74zQg=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIBTDCB/6ADAgECAhRZLuF0TWjDs/31OO8VeKHkNIJQaDAFBgMrZXAwHDEaMBgG
|
||||||
|
A1UEAwwRcG9ueXRvd24gRWREU0EgQ0EwHhcNMjMwMzE3MTAxMTA0WhcNMzMwMzE0
|
||||||
|
MTAxMTA0WjAcMRowGAYDVQQDDBFwb255dG93biBFZERTQSBDQTAqMAUGAytlcAMh
|
||||||
|
ABRPZ4TiuBE8CqAFByZvqpMo/unjnnryfG2AkkWGXpa3o1MwUTAdBgNVHQ4EFgQU
|
||||||
|
phSgt0p5juantCAxkquRW1JZgEowHwYDVR0jBBgwFoAUphSgt0p5juantCAxkquR
|
||||||
|
W1JZgEowDwYDVR0TAQH/BAUwAwEB/zAFBgMrZXADQQB29o8erJA0/a8/xOHilOCC
|
||||||
|
t/s5wPHHnS5NSKx/m2N2nRn3zPxEnETlrAmGulJoeKOx8OblwmPi9rBT2K+QY2UB
|
||||||
|
-----END CERTIFICATE-----
|
||||||
3
tarpc/examples/certs/eddsa/client.key
Normal file
3
tarpc/examples/certs/eddsa/client.key
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MC4CAQAwBQYDK2VwBCIEIIJX9ThTHpVS1SNZb6HP4myg4fRInIVGunTRdgnc+weH
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
12
tarpc/examples/certs/eddsa/end.cert
Normal file
12
tarpc/examples/certs/eddsa/end.cert
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIBuDCCAWqgAwIBAgICAcgwBQYDK2VwMC4xLDAqBgNVBAMMI3Bvbnl0b3duIEVk
|
||||||
|
RFNBIGxldmVsIDIgaW50ZXJtZWRpYXRlMB4XDTIzMDMxNzEwMTEwNFoXDTI4MDkw
|
||||||
|
NjEwMTEwNFowGTEXMBUGA1UEAwwOdGVzdHNlcnZlci5jb20wKjAFBgMrZXADIQDc
|
||||||
|
RLl3/N2tPoWnzBV3noVn/oheEl8IUtiY11Vg/QXTUKOBwDCBvTAMBgNVHRMBAf8E
|
||||||
|
AjAAMAsGA1UdDwQEAwIGwDAdBgNVHQ4EFgQUk7U2mnxedNWBAH84BsNy5si3ZQow
|
||||||
|
RAYDVR0jBD0wO4AUciXt8cpY/r+XwUR8jZgQgzegfEqhIKQeMBwxGjAYBgNVBAMM
|
||||||
|
EXBvbnl0b3duIEVkRFNBIENBggF7MDsGA1UdEQQ0MDKCDnRlc3RzZXJ2ZXIuY29t
|
||||||
|
ghVzZWNvbmQudGVzdHNlcnZlci5jb22CCWxvY2FsaG9zdDAFBgMrZXADQQCFWIcF
|
||||||
|
9FiztCuUNzgXDNu5kshuflt0RjkjWpGlWzQjGoYM2IvYhNVPeqnCiY92gqwDSBtq
|
||||||
|
amD2TBup4eNUCsQB
|
||||||
|
-----END CERTIFICATE-----
|
||||||
19
tarpc/examples/certs/eddsa/end.chain
Normal file
19
tarpc/examples/certs/eddsa/end.chain
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIBeDCCASqgAwIBAgIBezAFBgMrZXAwHDEaMBgGA1UEAwwRcG9ueXRvd24gRWRE
|
||||||
|
U0EgQ0EwHhcNMjMwMzE3MTAxMTA0WhcNMzMwMzE0MTAxMTA0WjAuMSwwKgYDVQQD
|
||||||
|
DCNwb255dG93biBFZERTQSBsZXZlbCAyIGludGVybWVkaWF0ZTAqMAUGAytlcAMh
|
||||||
|
AEFsAexz4x2R4k4+PnTbvRVn0r3F/qw/zVnNBxfGcoEpo38wfTAdBgNVHQ4EFgQU
|
||||||
|
ciXt8cpY/r+XwUR8jZgQgzegfEowIAYDVR0lAQH/BBYwFAYIKwYBBQUHAwEGCCsG
|
||||||
|
AQUFBwMCMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgH+MB8GA1UdIwQYMBaAFKYU
|
||||||
|
oLdKeY7mp7QgMZKrkVtSWYBKMAUGAytlcANBAHVpNpCV8nu4fkH3Smikx5A9qtHc
|
||||||
|
zgLIyp+wrF1a4YSa6sfTvuQmJd5aF23OXgq5grCOPXtdpHO50Mx5Qy74zQg=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIBTDCB/6ADAgECAhRZLuF0TWjDs/31OO8VeKHkNIJQaDAFBgMrZXAwHDEaMBgG
|
||||||
|
A1UEAwwRcG9ueXRvd24gRWREU0EgQ0EwHhcNMjMwMzE3MTAxMTA0WhcNMzMwMzE0
|
||||||
|
MTAxMTA0WjAcMRowGAYDVQQDDBFwb255dG93biBFZERTQSBDQTAqMAUGAytlcAMh
|
||||||
|
ABRPZ4TiuBE8CqAFByZvqpMo/unjnnryfG2AkkWGXpa3o1MwUTAdBgNVHQ4EFgQU
|
||||||
|
phSgt0p5juantCAxkquRW1JZgEowHwYDVR0jBBgwFoAUphSgt0p5juantCAxkquR
|
||||||
|
W1JZgEowDwYDVR0TAQH/BAUwAwEB/zAFBgMrZXADQQB29o8erJA0/a8/xOHilOCC
|
||||||
|
t/s5wPHHnS5NSKx/m2N2nRn3zPxEnETlrAmGulJoeKOx8OblwmPi9rBT2K+QY2UB
|
||||||
|
-----END CERTIFICATE-----
|
||||||
3
tarpc/examples/certs/eddsa/end.key
Normal file
3
tarpc/examples/certs/eddsa/end.key
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MC4CAQAwBQYDK2VwBCIEIMU6xGVe8JTpZ3bN/wajHfw6pEHt0Rd7wPBxds9eEFy2
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
138
tarpc/examples/compression.rs
Normal file
138
tarpc/examples/compression.rs
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use flate2::{read::DeflateDecoder, write::DeflateEncoder, Compression};
|
||||||
|
use futures::{prelude::*, Sink, SinkExt, Stream, StreamExt, TryStreamExt};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_bytes::ByteBuf;
|
||||||
|
use std::{io, io::Read, io::Write};
|
||||||
|
use tarpc::{
|
||||||
|
client, context,
|
||||||
|
serde_transport::tcp,
|
||||||
|
server::{BaseChannel, Channel},
|
||||||
|
tokio_serde::formats::Bincode,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Type of compression that should be enabled on the request. The transport is free to ignore this.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Copy, Deserialize, Serialize)]
|
||||||
|
pub enum CompressionAlgorithm {
|
||||||
|
Deflate,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize, Serialize)]
|
||||||
|
pub enum CompressedMessage<T> {
|
||||||
|
Uncompressed(T),
|
||||||
|
Compressed {
|
||||||
|
algorithm: CompressionAlgorithm,
|
||||||
|
payload: ByteBuf,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
enum CompressionType {
|
||||||
|
Uncompressed,
|
||||||
|
Compressed,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn compress<T>(message: T) -> io::Result<CompressedMessage<T>>
|
||||||
|
where
|
||||||
|
T: Serialize,
|
||||||
|
{
|
||||||
|
let message = serialize(message)?;
|
||||||
|
let mut encoder = DeflateEncoder::new(Vec::new(), Compression::default());
|
||||||
|
encoder.write_all(&message).unwrap();
|
||||||
|
let compressed = encoder.finish()?;
|
||||||
|
Ok(CompressedMessage::Compressed {
|
||||||
|
algorithm: CompressionAlgorithm::Deflate,
|
||||||
|
payload: ByteBuf::from(compressed),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn decompress<T>(message: CompressedMessage<T>) -> io::Result<T>
|
||||||
|
where
|
||||||
|
for<'a> T: Deserialize<'a>,
|
||||||
|
{
|
||||||
|
match message {
|
||||||
|
CompressedMessage::Compressed { algorithm, payload } => {
|
||||||
|
if algorithm != CompressionAlgorithm::Deflate {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::InvalidData,
|
||||||
|
format!("Compression algorithm {algorithm:?} not supported"),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
let mut deflater = DeflateDecoder::new(payload.as_slice());
|
||||||
|
let mut payload = ByteBuf::new();
|
||||||
|
deflater.read_to_end(&mut payload)?;
|
||||||
|
let message = deserialize(payload)?;
|
||||||
|
Ok(message)
|
||||||
|
}
|
||||||
|
CompressedMessage::Uncompressed(message) => Ok(message),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn serialize<T: Serialize>(t: T) -> io::Result<ByteBuf> {
|
||||||
|
bincode::serialize(&t)
|
||||||
|
.map(ByteBuf::from)
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize<D>(message: ByteBuf) -> io::Result<D>
|
||||||
|
where
|
||||||
|
for<'a> D: Deserialize<'a>,
|
||||||
|
{
|
||||||
|
bincode::deserialize(message.as_ref()).map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_compression<In, Out>(
|
||||||
|
transport: impl Stream<Item = io::Result<CompressedMessage<In>>>
|
||||||
|
+ Sink<CompressedMessage<Out>, Error = io::Error>,
|
||||||
|
) -> impl Stream<Item = io::Result<In>> + Sink<Out, Error = io::Error>
|
||||||
|
where
|
||||||
|
Out: Serialize,
|
||||||
|
for<'a> In: Deserialize<'a>,
|
||||||
|
{
|
||||||
|
transport.with(compress).and_then(decompress)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tarpc::service]
|
||||||
|
pub trait World {
|
||||||
|
async fn hello(name: String) -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct HelloServer;
|
||||||
|
|
||||||
|
impl World for HelloServer {
|
||||||
|
async fn hello(self, _: context::Context, name: String) -> String {
|
||||||
|
format!("Hey, {name}!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn spawn(fut: impl Future<Output = ()> + Send + 'static) {
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let mut incoming = tcp::listen("localhost:0", Bincode::default).await?;
|
||||||
|
let addr = incoming.local_addr();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let transport = incoming.next().await.unwrap().unwrap();
|
||||||
|
BaseChannel::with_defaults(add_compression(transport))
|
||||||
|
.execute(HelloServer.serve())
|
||||||
|
.for_each(spawn)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
|
||||||
|
let transport = tcp::connect(addr, Bincode::default).await?;
|
||||||
|
let client = WorldClient::new(client::Config::default(), add_compression(transport)).spawn();
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"{}",
|
||||||
|
client.hello(context::current(), "friend".into()).await?
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
59
tarpc/examples/custom_transport.rs
Normal file
59
tarpc/examples/custom_transport.rs
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use futures::prelude::*;
|
||||||
|
use tarpc::context::Context;
|
||||||
|
use tarpc::serde_transport as transport;
|
||||||
|
use tarpc::server::{BaseChannel, Channel};
|
||||||
|
use tarpc::tokio_serde::formats::Bincode;
|
||||||
|
use tarpc::tokio_util::codec::length_delimited::LengthDelimitedCodec;
|
||||||
|
use tokio::net::{UnixListener, UnixStream};
|
||||||
|
|
||||||
|
#[tarpc::service]
|
||||||
|
pub trait PingService {
|
||||||
|
async fn ping();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct Service;
|
||||||
|
|
||||||
|
impl PingService for Service {
|
||||||
|
async fn ping(self, _: Context) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let bind_addr = "/tmp/tarpc_on_unix_example.sock";
|
||||||
|
|
||||||
|
let _ = std::fs::remove_file(bind_addr);
|
||||||
|
|
||||||
|
let listener = UnixListener::bind(bind_addr).unwrap();
|
||||||
|
let codec_builder = LengthDelimitedCodec::builder();
|
||||||
|
async fn spawn(fut: impl Future<Output = ()> + Send + 'static) {
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
let (conn, _addr) = listener.accept().await.unwrap();
|
||||||
|
let framed = codec_builder.new_framed(conn);
|
||||||
|
let transport = transport::new(framed, Bincode::default());
|
||||||
|
|
||||||
|
let fut = BaseChannel::with_defaults(transport)
|
||||||
|
.execute(Service.serve())
|
||||||
|
.for_each(spawn);
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let conn = UnixStream::connect(bind_addr).await?;
|
||||||
|
let transport = transport::new(codec_builder.new_framed(conn), Bincode::default());
|
||||||
|
PingServiceClient::new(Default::default(), transport)
|
||||||
|
.spawn()
|
||||||
|
.ping(tarpc::context::current())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
362
tarpc/examples/pubsub.rs
Normal file
362
tarpc/examples/pubsub.rs
Normal file
@@ -0,0 +1,362 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
/// - The PubSub server sets up TCP listeners on 2 ports, the "subscriber" port and the "publisher"
|
||||||
|
/// port. Because both publishers and subscribers initiate their connections to the PubSub
|
||||||
|
/// server, the server requires no prior knowledge of either publishers or subscribers.
|
||||||
|
///
|
||||||
|
/// - Subscribers connect to the server on the server's "subscriber" port. Once a connection is
|
||||||
|
/// established, the server acts as the client of the Subscriber service, initially requesting
|
||||||
|
/// the topics the subscriber is interested in, and subsequently sending topical messages to the
|
||||||
|
/// subscriber.
|
||||||
|
///
|
||||||
|
/// - Publishers connect to the server on the "publisher" port and, once connected, they send
|
||||||
|
/// topical messages via Publisher service to the server. The server then broadcasts each
|
||||||
|
/// messages to all clients subscribed to the topic of that message.
|
||||||
|
///
|
||||||
|
/// Subscriber Publisher PubSub Server
|
||||||
|
/// T1 | | |
|
||||||
|
/// T2 |-----Connect------------------------------------------------------>|
|
||||||
|
/// T3 | | |
|
||||||
|
/// T2 |<-------------------------------------------------------Topics-----|
|
||||||
|
/// T2 |-----(OK) Topics-------------------------------------------------->|
|
||||||
|
/// T3 | | |
|
||||||
|
/// T4 | |-----Connect-------------------->|
|
||||||
|
/// T5 | | |
|
||||||
|
/// T6 | |-----Publish-------------------->|
|
||||||
|
/// T7 | | |
|
||||||
|
/// T8 |<------------------------------------------------------Receive-----|
|
||||||
|
/// T9 |-----(OK) Receive------------------------------------------------->|
|
||||||
|
/// T10 | | |
|
||||||
|
/// T11 | |<--------------(OK) Publish------|
|
||||||
|
use anyhow::anyhow;
|
||||||
|
use futures::{
|
||||||
|
channel::oneshot,
|
||||||
|
future::{self, AbortHandle},
|
||||||
|
prelude::*,
|
||||||
|
};
|
||||||
|
use publisher::Publisher as _;
|
||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
env,
|
||||||
|
error::Error,
|
||||||
|
io,
|
||||||
|
net::SocketAddr,
|
||||||
|
sync::{Arc, Mutex, RwLock},
|
||||||
|
};
|
||||||
|
use subscriber::Subscriber as _;
|
||||||
|
use tarpc::{
|
||||||
|
client, context,
|
||||||
|
serde_transport::tcp,
|
||||||
|
server::{self, Channel},
|
||||||
|
tokio_serde::formats::Json,
|
||||||
|
};
|
||||||
|
use tokio::net::ToSocketAddrs;
|
||||||
|
use tracing::info;
|
||||||
|
use tracing_subscriber::prelude::*;
|
||||||
|
|
||||||
|
pub mod subscriber {
|
||||||
|
#[tarpc::service]
|
||||||
|
pub trait Subscriber {
|
||||||
|
async fn topics() -> Vec<String>;
|
||||||
|
async fn receive(topic: String, message: String);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod publisher {
|
||||||
|
#[tarpc::service]
|
||||||
|
pub trait Publisher {
|
||||||
|
async fn publish(topic: String, message: String);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct Subscriber {
|
||||||
|
local_addr: SocketAddr,
|
||||||
|
topics: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl subscriber::Subscriber for Subscriber {
|
||||||
|
async fn topics(self, _: context::Context) -> Vec<String> {
|
||||||
|
self.topics.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn receive(self, _: context::Context, topic: String, message: String) {
|
||||||
|
info!(local_addr = %self.local_addr, %topic, %message, "ReceivedMessage")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct SubscriberHandle(AbortHandle);
|
||||||
|
|
||||||
|
impl Drop for SubscriberHandle {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.0.abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Subscriber {
|
||||||
|
async fn connect(
|
||||||
|
publisher_addr: impl ToSocketAddrs,
|
||||||
|
topics: Vec<String>,
|
||||||
|
) -> anyhow::Result<SubscriberHandle> {
|
||||||
|
let publisher = tcp::connect(publisher_addr, Json::default).await?;
|
||||||
|
let local_addr = publisher.local_addr()?;
|
||||||
|
let mut handler = server::BaseChannel::with_defaults(publisher).requests();
|
||||||
|
let subscriber = Subscriber { local_addr, topics };
|
||||||
|
// The first request is for the topics being subscribed to.
|
||||||
|
match handler.next().await {
|
||||||
|
Some(init_topics) => init_topics?.execute(subscriber.clone().serve()).await,
|
||||||
|
None => {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"[{}] Server never initialized the subscriber.",
|
||||||
|
local_addr
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let (handler, abort_handle) =
|
||||||
|
future::abortable(handler.execute(subscriber.serve()).for_each(spawn));
|
||||||
|
tokio::spawn(async move {
|
||||||
|
match handler.await {
|
||||||
|
Ok(()) | Err(future::Aborted) => info!(?local_addr, "subscriber shutdown."),
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Ok(SubscriberHandle(abort_handle))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Subscription {
|
||||||
|
topics: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
struct Publisher {
|
||||||
|
clients: Arc<Mutex<HashMap<SocketAddr, Subscription>>>,
|
||||||
|
subscriptions: Arc<RwLock<HashMap<String, HashMap<SocketAddr, subscriber::SubscriberClient>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct PublisherAddrs {
|
||||||
|
publisher: SocketAddr,
|
||||||
|
subscriptions: SocketAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn spawn(fut: impl Future<Output = ()> + Send + 'static) {
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Publisher {
|
||||||
|
async fn start(self) -> io::Result<PublisherAddrs> {
|
||||||
|
let mut connecting_publishers = tcp::listen("localhost:0", Json::default).await?;
|
||||||
|
|
||||||
|
let publisher_addrs = PublisherAddrs {
|
||||||
|
publisher: connecting_publishers.local_addr(),
|
||||||
|
subscriptions: self.clone().start_subscription_manager().await?,
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(publisher_addr = %publisher_addrs.publisher, "listening for publishers.",);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
// Because this is just an example, we know there will only be one publisher. In more
|
||||||
|
// realistic code, this would be a loop to continually accept new publisher
|
||||||
|
// connections.
|
||||||
|
let publisher = connecting_publishers.next().await.unwrap().unwrap();
|
||||||
|
info!(publisher.peer_addr = ?publisher.peer_addr(), "publisher connected.");
|
||||||
|
|
||||||
|
server::BaseChannel::with_defaults(publisher)
|
||||||
|
.execute(self.serve())
|
||||||
|
.for_each(spawn)
|
||||||
|
.await
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(publisher_addrs)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn start_subscription_manager(mut self) -> io::Result<SocketAddr> {
|
||||||
|
let mut connecting_subscribers = tcp::listen("localhost:0", Json::default)
|
||||||
|
.await?
|
||||||
|
.filter_map(|r| future::ready(r.ok()));
|
||||||
|
let new_subscriber_addr = connecting_subscribers.get_ref().local_addr();
|
||||||
|
info!(?new_subscriber_addr, "listening for subscribers.");
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
while let Some(conn) = connecting_subscribers.next().await {
|
||||||
|
let subscriber_addr = conn.peer_addr().unwrap();
|
||||||
|
|
||||||
|
let tarpc::client::NewClient {
|
||||||
|
client: subscriber,
|
||||||
|
dispatch,
|
||||||
|
} = subscriber::SubscriberClient::new(client::Config::default(), conn);
|
||||||
|
let (ready_tx, ready) = oneshot::channel();
|
||||||
|
self.clone()
|
||||||
|
.start_subscriber_gc(subscriber_addr, dispatch, ready);
|
||||||
|
|
||||||
|
// Populate the topics
|
||||||
|
self.initialize_subscription(subscriber_addr, subscriber)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Signal that initialization is done.
|
||||||
|
ready_tx.send(()).unwrap();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(new_subscriber_addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn initialize_subscription(
|
||||||
|
&mut self,
|
||||||
|
subscriber_addr: SocketAddr,
|
||||||
|
subscriber: subscriber::SubscriberClient,
|
||||||
|
) {
|
||||||
|
// Populate the topics
|
||||||
|
if let Ok(topics) = subscriber.topics(context::current()).await {
|
||||||
|
self.clients.lock().unwrap().insert(
|
||||||
|
subscriber_addr,
|
||||||
|
Subscription {
|
||||||
|
topics: topics.clone(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
info!(%subscriber_addr, ?topics, "subscribed to new topics");
|
||||||
|
let mut subscriptions = self.subscriptions.write().unwrap();
|
||||||
|
for topic in topics {
|
||||||
|
subscriptions
|
||||||
|
.entry(topic)
|
||||||
|
.or_insert_with(HashMap::new)
|
||||||
|
.insert(subscriber_addr, subscriber.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_subscriber_gc<E: Error>(
|
||||||
|
self,
|
||||||
|
subscriber_addr: SocketAddr,
|
||||||
|
client_dispatch: impl Future<Output = Result<(), E>> + Send + 'static,
|
||||||
|
subscriber_ready: oneshot::Receiver<()>,
|
||||||
|
) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = client_dispatch.await {
|
||||||
|
info!(
|
||||||
|
%subscriber_addr,
|
||||||
|
error = %e,
|
||||||
|
"subscriber connection broken");
|
||||||
|
}
|
||||||
|
// Don't clean up the subscriber until initialization is done.
|
||||||
|
let _ = subscriber_ready.await;
|
||||||
|
if let Some(subscription) = self.clients.lock().unwrap().remove(&subscriber_addr) {
|
||||||
|
info!(
|
||||||
|
"[{} unsubscribing from topics: {:?}",
|
||||||
|
subscriber_addr, subscription.topics
|
||||||
|
);
|
||||||
|
let mut subscriptions = self.subscriptions.write().unwrap();
|
||||||
|
for topic in subscription.topics {
|
||||||
|
let subscribers = subscriptions.get_mut(&topic).unwrap();
|
||||||
|
subscribers.remove(&subscriber_addr);
|
||||||
|
if subscribers.is_empty() {
|
||||||
|
subscriptions.remove(&topic);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl publisher::Publisher for Publisher {
|
||||||
|
async fn publish(self, _: context::Context, topic: String, message: String) {
|
||||||
|
info!("received message to publish.");
|
||||||
|
let mut subscribers = match self.subscriptions.read().unwrap().get(&topic) {
|
||||||
|
None => return,
|
||||||
|
Some(subscriptions) => subscriptions.clone(),
|
||||||
|
};
|
||||||
|
let mut publications = Vec::new();
|
||||||
|
for client in subscribers.values_mut() {
|
||||||
|
publications.push(client.receive(context::current(), topic.clone(), message.clone()));
|
||||||
|
}
|
||||||
|
// Ignore failing subscribers. In a real pubsub, you'd want to continually retry until
|
||||||
|
// subscribers ack. Of course, a lot would be different in a real pubsub :)
|
||||||
|
for response in future::join_all(publications).await {
|
||||||
|
if let Err(e) = response {
|
||||||
|
info!("failed to broadcast to subscriber: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initializes an OpenTelemetry tracing subscriber with a Jaeger backend.
|
||||||
|
fn init_tracing(service_name: &str) -> anyhow::Result<()> {
|
||||||
|
env::set_var("OTEL_BSP_MAX_EXPORT_BATCH_SIZE", "12");
|
||||||
|
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||||
|
.with_service_name(service_name)
|
||||||
|
.with_max_packet_size(2usize.pow(13))
|
||||||
|
.install_batch(opentelemetry::runtime::Tokio)?;
|
||||||
|
|
||||||
|
tracing_subscriber::registry()
|
||||||
|
.with(tracing_subscriber::filter::EnvFilter::from_default_env())
|
||||||
|
.with(tracing_subscriber::fmt::layer())
|
||||||
|
.with(tracing_opentelemetry::layer().with_tracer(tracer))
|
||||||
|
.try_init()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
init_tracing("Pub/Sub")?;
|
||||||
|
|
||||||
|
let addrs = Publisher {
|
||||||
|
clients: Arc::new(Mutex::new(HashMap::new())),
|
||||||
|
subscriptions: Arc::new(RwLock::new(HashMap::new())),
|
||||||
|
}
|
||||||
|
.start()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let _subscriber0 = Subscriber::connect(
|
||||||
|
addrs.subscriptions,
|
||||||
|
vec!["calculus".into(), "cool shorts".into()],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let _subscriber1 = Subscriber::connect(
|
||||||
|
addrs.subscriptions,
|
||||||
|
vec!["cool shorts".into(), "history".into()],
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let publisher = publisher::PublisherClient::new(
|
||||||
|
client::Config::default(),
|
||||||
|
tcp::connect(addrs.publisher, Json::default).await?,
|
||||||
|
)
|
||||||
|
.spawn();
|
||||||
|
|
||||||
|
publisher
|
||||||
|
.publish(context::current(), "calculus".into(), "sqrt(2)".into())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
publisher
|
||||||
|
.publish(
|
||||||
|
context::current(),
|
||||||
|
"cool shorts".into(),
|
||||||
|
"hello to all".into(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
publisher
|
||||||
|
.publish(context::current(), "history".into(), "napoleon".to_string())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
drop(_subscriber0);
|
||||||
|
|
||||||
|
publisher
|
||||||
|
.publish(
|
||||||
|
context::current(),
|
||||||
|
"cool shorts".into(),
|
||||||
|
"hello to who?".into(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
opentelemetry::global::shutdown_tracer_provider();
|
||||||
|
info!("done.");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
54
tarpc/examples/readme.rs
Normal file
54
tarpc/examples/readme.rs
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use futures::prelude::*;
|
||||||
|
use tarpc::{
|
||||||
|
client, context,
|
||||||
|
server::{self, Channel},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// This is the service definition. It looks a lot like a trait definition.
|
||||||
|
/// It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
|
#[tarpc::service]
|
||||||
|
pub trait World {
|
||||||
|
async fn hello(name: String) -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is the type that implements the generated World trait. It is the business logic
|
||||||
|
/// and is used to start the server.
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct HelloServer;
|
||||||
|
|
||||||
|
impl World for HelloServer {
|
||||||
|
async fn hello(self, _: context::Context, name: String) -> String {
|
||||||
|
format!("Hello, {name}!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn spawn(fut: impl Future<Output = ()> + Send + 'static) {
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
let (client_transport, server_transport) = tarpc::transport::channel::unbounded();
|
||||||
|
|
||||||
|
let server = server::BaseChannel::with_defaults(server_transport);
|
||||||
|
tokio::spawn(server.execute(HelloServer.serve()).for_each(spawn));
|
||||||
|
|
||||||
|
// WorldClient is generated by the #[tarpc::service] attribute. It has a constructor `new`
|
||||||
|
// that takes a config and any Transport as input.
|
||||||
|
let client = WorldClient::new(client::Config::default(), client_transport).spawn();
|
||||||
|
|
||||||
|
// The client has an RPC method for each RPC defined in the annotated trait. It takes the same
|
||||||
|
// args as defined, with the addition of a Context, which is always the first arg. The Context
|
||||||
|
// specifies a deadline and trace information which can be helpful in debugging requests.
|
||||||
|
let hello = client.hello(context::current(), "Stim".to_string()).await?;
|
||||||
|
|
||||||
|
println!("{hello}");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
150
tarpc/examples/tls_over_tcp.rs
Normal file
150
tarpc/examples/tls_over_tcp.rs
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
// Copyright 2023 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use futures::prelude::*;
|
||||||
|
use rustls_pemfile::certs;
|
||||||
|
use std::io::{BufReader, Cursor};
|
||||||
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
use tokio_rustls::rustls::server::AllowAnyAuthenticatedClient;
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::net::TcpStream;
|
||||||
|
use tokio_rustls::rustls::{self, RootCertStore};
|
||||||
|
use tokio_rustls::{TlsAcceptor, TlsConnector};
|
||||||
|
|
||||||
|
use tarpc::context::Context;
|
||||||
|
use tarpc::serde_transport as transport;
|
||||||
|
use tarpc::server::{BaseChannel, Channel};
|
||||||
|
use tarpc::tokio_serde::formats::Bincode;
|
||||||
|
use tarpc::tokio_util::codec::length_delimited::LengthDelimitedCodec;
|
||||||
|
|
||||||
|
#[tarpc::service]
|
||||||
|
pub trait PingService {
|
||||||
|
async fn ping() -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct Service;
|
||||||
|
|
||||||
|
impl PingService for Service {
|
||||||
|
async fn ping(self, _: Context) -> String {
|
||||||
|
"🔒".to_owned()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// certs were generated with openssl 3 https://github.com/rustls/rustls/tree/main/test-ca
|
||||||
|
// used on client-side for server tls
|
||||||
|
const END_CHAIN: &str = include_str!("certs/eddsa/end.chain");
|
||||||
|
// used on client-side for client-auth
|
||||||
|
const CLIENT_PRIVATEKEY_CLIENT_AUTH: &str = include_str!("certs/eddsa/client.key");
|
||||||
|
const CLIENT_CERT_CLIENT_AUTH: &str = include_str!("certs/eddsa/client.cert");
|
||||||
|
|
||||||
|
// used on server-side for server tls
|
||||||
|
const END_CERT: &str = include_str!("certs/eddsa/end.cert");
|
||||||
|
const END_PRIVATEKEY: &str = include_str!("certs/eddsa/end.key");
|
||||||
|
// used on server-side for client-auth
|
||||||
|
const CLIENT_CHAIN_CLIENT_AUTH: &str = include_str!("certs/eddsa/client.chain");
|
||||||
|
|
||||||
|
pub fn load_certs(data: &str) -> Vec<rustls::Certificate> {
|
||||||
|
certs(&mut BufReader::new(Cursor::new(data)))
|
||||||
|
.unwrap()
|
||||||
|
.into_iter()
|
||||||
|
.map(rustls::Certificate)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_private_key(key: &str) -> rustls::PrivateKey {
|
||||||
|
let mut reader = BufReader::new(Cursor::new(key));
|
||||||
|
loop {
|
||||||
|
match rustls_pemfile::read_one(&mut reader).expect("cannot parse private key .pem file") {
|
||||||
|
Some(rustls_pemfile::Item::RSAKey(key)) => return rustls::PrivateKey(key),
|
||||||
|
Some(rustls_pemfile::Item::PKCS8Key(key)) => return rustls::PrivateKey(key),
|
||||||
|
Some(rustls_pemfile::Item::ECKey(key)) => return rustls::PrivateKey(key),
|
||||||
|
None => break,
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic!("no keys found in {:?} (encrypted keys not supported)", key);
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn spawn(fut: impl Future<Output = ()> + Send + 'static) {
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
// -------------------- start here to setup tls tcp tokio stream --------------------------
|
||||||
|
// ref certs and loading from: https://github.com/tokio-rs/tls/blob/master/tokio-rustls/tests/test.rs
|
||||||
|
// ref basic tls server setup from: https://github.com/tokio-rs/tls/blob/master/tokio-rustls/examples/server/src/main.rs
|
||||||
|
let cert = load_certs(END_CERT);
|
||||||
|
let key = load_private_key(END_PRIVATEKEY);
|
||||||
|
let server_addr = (IpAddr::V4(Ipv4Addr::LOCALHOST), 5000);
|
||||||
|
|
||||||
|
// ------------- server side client_auth cert loading start
|
||||||
|
let mut client_auth_roots = RootCertStore::empty();
|
||||||
|
for root in load_certs(CLIENT_CHAIN_CLIENT_AUTH) {
|
||||||
|
client_auth_roots.add(&root).unwrap();
|
||||||
|
}
|
||||||
|
let client_auth = AllowAnyAuthenticatedClient::new(client_auth_roots);
|
||||||
|
// ------------- server side client_auth cert loading end
|
||||||
|
|
||||||
|
let config = rustls::ServerConfig::builder()
|
||||||
|
.with_safe_defaults()
|
||||||
|
.with_client_cert_verifier(client_auth) // use .with_no_client_auth() instead if you don't want client-auth
|
||||||
|
.with_single_cert(cert, key)
|
||||||
|
.unwrap();
|
||||||
|
let acceptor = TlsAcceptor::from(Arc::new(config));
|
||||||
|
let listener = TcpListener::bind(&server_addr).await.unwrap();
|
||||||
|
let codec_builder = LengthDelimitedCodec::builder();
|
||||||
|
|
||||||
|
// ref ./custom_transport.rs server side
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
let (stream, _peer_addr) = listener.accept().await.unwrap();
|
||||||
|
let tls_stream = acceptor.accept(stream).await.unwrap();
|
||||||
|
let framed = codec_builder.new_framed(tls_stream);
|
||||||
|
|
||||||
|
let transport = transport::new(framed, Bincode::default());
|
||||||
|
|
||||||
|
let fut = BaseChannel::with_defaults(transport)
|
||||||
|
.execute(Service.serve())
|
||||||
|
.for_each(spawn);
|
||||||
|
tokio::spawn(fut);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// ---------------------- client connection ---------------------
|
||||||
|
// tls client connection from https://github.com/tokio-rs/tls/blob/master/tokio-rustls/examples/client/src/main.rs
|
||||||
|
let mut root_store = rustls::RootCertStore::empty();
|
||||||
|
for root in load_certs(END_CHAIN) {
|
||||||
|
root_store.add(&root).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let client_auth_private_key = load_private_key(CLIENT_PRIVATEKEY_CLIENT_AUTH);
|
||||||
|
let client_auth_certs = load_certs(CLIENT_CERT_CLIENT_AUTH);
|
||||||
|
|
||||||
|
let config = rustls::ClientConfig::builder()
|
||||||
|
.with_safe_defaults()
|
||||||
|
.with_root_certificates(root_store)
|
||||||
|
.with_single_cert(client_auth_certs, client_auth_private_key)?; // use .with_no_client_auth() instead if you don't want client-auth
|
||||||
|
|
||||||
|
let domain = rustls::ServerName::try_from("localhost")?;
|
||||||
|
let connector = TlsConnector::from(Arc::new(config));
|
||||||
|
|
||||||
|
let stream = TcpStream::connect(server_addr).await?;
|
||||||
|
let stream = connector.connect(domain, stream).await?;
|
||||||
|
|
||||||
|
let transport = transport::new(codec_builder.new_framed(stream), Bincode::default());
|
||||||
|
let answer = PingServiceClient::new(Default::default(), transport)
|
||||||
|
.spawn()
|
||||||
|
.ping(tarpc::context::current())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
println!("ping answer: {answer}");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
190
tarpc/examples/tracing.rs
Normal file
190
tarpc/examples/tracing.rs
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
add::{Add as AddService, AddStub},
|
||||||
|
double::Double as DoubleService,
|
||||||
|
};
|
||||||
|
use futures::{future, prelude::*};
|
||||||
|
use std::{
|
||||||
|
io,
|
||||||
|
sync::{
|
||||||
|
atomic::{AtomicBool, Ordering},
|
||||||
|
Arc,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use tarpc::{
|
||||||
|
client::{
|
||||||
|
self,
|
||||||
|
stub::{load_balance, retry},
|
||||||
|
RpcError,
|
||||||
|
},
|
||||||
|
context, serde_transport,
|
||||||
|
server::{
|
||||||
|
incoming::{spawn_incoming, Incoming},
|
||||||
|
request_hook::{self, BeforeRequestList},
|
||||||
|
BaseChannel,
|
||||||
|
},
|
||||||
|
tokio_serde::formats::Json,
|
||||||
|
ClientMessage, Response, ServerError, Transport,
|
||||||
|
};
|
||||||
|
use tokio::net::TcpStream;
|
||||||
|
use tracing_subscriber::prelude::*;
|
||||||
|
|
||||||
|
pub mod add {
|
||||||
|
#[tarpc::service]
|
||||||
|
pub trait Add {
|
||||||
|
/// Add two ints together.
|
||||||
|
async fn add(x: i32, y: i32) -> i32;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod double {
|
||||||
|
#[tarpc::service]
|
||||||
|
pub trait Double {
|
||||||
|
/// 2 * x
|
||||||
|
async fn double(x: i32) -> Result<i32, String>;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct AddServer;
|
||||||
|
|
||||||
|
impl AddService for AddServer {
|
||||||
|
async fn add(self, _: context::Context, x: i32, y: i32) -> i32 {
|
||||||
|
x + y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct DoubleServer<Stub> {
|
||||||
|
add_client: add::AddClient<Stub>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Stub> DoubleService for DoubleServer<Stub>
|
||||||
|
where
|
||||||
|
Stub: AddStub + Clone + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
async fn double(self, _: context::Context, x: i32) -> Result<i32, String> {
|
||||||
|
self.add_client
|
||||||
|
.add(context::current(), x, x)
|
||||||
|
.await
|
||||||
|
.map_err(|e| e.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn init_tracing(service_name: &str) -> anyhow::Result<()> {
|
||||||
|
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||||
|
.with_service_name(service_name)
|
||||||
|
.with_auto_split_batch(true)
|
||||||
|
.with_max_packet_size(2usize.pow(13))
|
||||||
|
.install_batch(opentelemetry::runtime::Tokio)?;
|
||||||
|
|
||||||
|
tracing_subscriber::registry()
|
||||||
|
.with(tracing_subscriber::EnvFilter::from_default_env())
|
||||||
|
.with(tracing_subscriber::fmt::layer())
|
||||||
|
.with(tracing_opentelemetry::layer().with_tracer(tracer))
|
||||||
|
.try_init()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn listen_on_random_port<Item, SinkItem>() -> anyhow::Result<(
|
||||||
|
impl Stream<Item = serde_transport::Transport<TcpStream, Item, SinkItem, Json<Item, SinkItem>>>,
|
||||||
|
std::net::SocketAddr,
|
||||||
|
)>
|
||||||
|
where
|
||||||
|
Item: for<'de> serde::Deserialize<'de>,
|
||||||
|
SinkItem: serde::Serialize,
|
||||||
|
{
|
||||||
|
let listener = tarpc::serde_transport::tcp::listen("localhost:0", Json::default)
|
||||||
|
.await?
|
||||||
|
.filter_map(|r| future::ready(r.ok()))
|
||||||
|
.take(1);
|
||||||
|
let addr = listener.get_ref().get_ref().local_addr();
|
||||||
|
Ok((listener, addr))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_stub<Req, Resp, const N: usize>(
|
||||||
|
backends: [impl Transport<ClientMessage<Arc<Req>>, Response<Resp>> + Send + Sync + 'static; N],
|
||||||
|
) -> retry::Retry<
|
||||||
|
impl Fn(&Result<Resp, RpcError>, u32) -> bool + Clone,
|
||||||
|
load_balance::RoundRobin<client::Channel<Arc<Req>, Resp>>,
|
||||||
|
>
|
||||||
|
where
|
||||||
|
Req: Send + Sync + 'static,
|
||||||
|
Resp: Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
let stub = load_balance::RoundRobin::new(
|
||||||
|
backends
|
||||||
|
.into_iter()
|
||||||
|
.map(|transport| tarpc::client::new(client::Config::default(), transport).spawn())
|
||||||
|
.collect(),
|
||||||
|
);
|
||||||
|
let stub = retry::Retry::new(stub, |resp, attempts| {
|
||||||
|
if let Err(e) = resp {
|
||||||
|
tracing::warn!("Got an error: {e:?}");
|
||||||
|
attempts < 3
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
});
|
||||||
|
stub
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> anyhow::Result<()> {
|
||||||
|
init_tracing("tarpc_tracing_example")?;
|
||||||
|
|
||||||
|
let (add_listener1, addr1) = listen_on_random_port().await?;
|
||||||
|
let (add_listener2, addr2) = listen_on_random_port().await?;
|
||||||
|
let something_bad_happened = Arc::new(AtomicBool::new(false));
|
||||||
|
let server = request_hook::before()
|
||||||
|
.then_fn(move |_: &mut _, _: &_| {
|
||||||
|
let something_bad_happened = something_bad_happened.clone();
|
||||||
|
async move {
|
||||||
|
if something_bad_happened.fetch_xor(true, Ordering::Relaxed) {
|
||||||
|
Err(ServerError::new(
|
||||||
|
io::ErrorKind::NotFound,
|
||||||
|
"Gamma Ray!".into(),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.serving(AddServer.serve());
|
||||||
|
let add_server = add_listener1
|
||||||
|
.chain(add_listener2)
|
||||||
|
.map(BaseChannel::with_defaults);
|
||||||
|
tokio::spawn(spawn_incoming(add_server.execute(server)));
|
||||||
|
|
||||||
|
let add_client = add::AddClient::from(make_stub([
|
||||||
|
tarpc::serde_transport::tcp::connect(addr1, Json::default).await?,
|
||||||
|
tarpc::serde_transport::tcp::connect(addr2, Json::default).await?,
|
||||||
|
]));
|
||||||
|
|
||||||
|
let double_listener = tarpc::serde_transport::tcp::listen("localhost:0", Json::default)
|
||||||
|
.await?
|
||||||
|
.filter_map(|r| future::ready(r.ok()));
|
||||||
|
let addr = double_listener.get_ref().local_addr();
|
||||||
|
let double_server = double_listener.map(BaseChannel::with_defaults).take(1);
|
||||||
|
let server = DoubleServer { add_client }.serve();
|
||||||
|
tokio::spawn(spawn_incoming(double_server.execute(server)));
|
||||||
|
|
||||||
|
let to_double_server = tarpc::serde_transport::tcp::connect(addr, Json::default).await?;
|
||||||
|
let double_client =
|
||||||
|
double::DoubleClient::new(client::Config::default(), to_double_server).spawn();
|
||||||
|
|
||||||
|
let ctx = context::current();
|
||||||
|
for _ in 1..=5 {
|
||||||
|
tracing::info!("{:?}", double_client.double(ctx, 1).await?);
|
||||||
|
}
|
||||||
|
|
||||||
|
opentelemetry::global::shutdown_tracer_provider();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
1
tarpc/rustfmt.toml
Normal file
1
tarpc/rustfmt.toml
Normal file
@@ -0,0 +1 @@
|
|||||||
|
edition = "2018"
|
||||||
49
tarpc/src/cancellations.rs
Normal file
49
tarpc/src/cancellations.rs
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
use futures::{prelude::*, task::*};
|
||||||
|
use std::pin::Pin;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
/// Sends request cancellation signals.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct RequestCancellation(mpsc::UnboundedSender<u64>);
|
||||||
|
|
||||||
|
/// A stream of IDs of requests that have been canceled.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct CanceledRequests(mpsc::UnboundedReceiver<u64>);
|
||||||
|
|
||||||
|
/// Returns a channel to send request cancellation messages.
|
||||||
|
pub fn cancellations() -> (RequestCancellation, CanceledRequests) {
|
||||||
|
// Unbounded because messages are sent in the drop fn. This is fine, because it's still
|
||||||
|
// bounded by the number of in-flight requests.
|
||||||
|
let (tx, rx) = mpsc::unbounded_channel();
|
||||||
|
(RequestCancellation(tx), CanceledRequests(rx))
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RequestCancellation {
|
||||||
|
/// Cancels the request with ID `request_id`.
|
||||||
|
///
|
||||||
|
/// No validation is done of `request_id`. There is no way to know if the request id provided
|
||||||
|
/// corresponds to a request actually tracked by the backing channel. `RequestCancellation` is
|
||||||
|
/// a one-way communication channel.
|
||||||
|
///
|
||||||
|
/// Once request data is cleaned up, a response will never be received by the client. This is
|
||||||
|
/// useful primarily when request processing ends prematurely for requests with long deadlines
|
||||||
|
/// which would otherwise continue to be tracked by the backing channel—a kind of leak.
|
||||||
|
pub fn cancel(&self, request_id: u64) {
|
||||||
|
let _ = self.0.send(request_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CanceledRequests {
|
||||||
|
/// Polls for a cancelled request.
|
||||||
|
pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<u64>> {
|
||||||
|
self.0.poll_recv(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Stream for CanceledRequests {
|
||||||
|
type Item = u64;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<u64>> {
|
||||||
|
self.poll_recv(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
1065
tarpc/src/client.rs
Normal file
1065
tarpc/src/client.rs
Normal file
File diff suppressed because it is too large
Load Diff
137
tarpc/src/client/in_flight_requests.rs
Normal file
137
tarpc/src/client/in_flight_requests.rs
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
use crate::{
|
||||||
|
context,
|
||||||
|
util::{Compact, TimeUntil},
|
||||||
|
};
|
||||||
|
use fnv::FnvHashMap;
|
||||||
|
use std::{
|
||||||
|
collections::hash_map,
|
||||||
|
task::{Context, Poll},
|
||||||
|
};
|
||||||
|
use tokio::sync::oneshot;
|
||||||
|
use tokio_util::time::delay_queue::{self, DelayQueue};
|
||||||
|
use tracing::Span;
|
||||||
|
|
||||||
|
/// Requests already written to the wire that haven't yet received responses.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct InFlightRequests<Resp> {
|
||||||
|
request_data: FnvHashMap<u64, RequestData<Resp>>,
|
||||||
|
deadlines: DelayQueue<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Resp> Default for InFlightRequests<Resp> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
request_data: Default::default(),
|
||||||
|
deadlines: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct RequestData<Res> {
|
||||||
|
ctx: context::Context,
|
||||||
|
span: Span,
|
||||||
|
response_completion: oneshot::Sender<Res>,
|
||||||
|
/// The key to remove the timer for the request's deadline.
|
||||||
|
deadline_key: delay_queue::Key,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An error returned when an attempt is made to insert a request with an ID that is already in
|
||||||
|
/// use.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct AlreadyExistsError;
|
||||||
|
|
||||||
|
impl<Res> InFlightRequests<Res> {
|
||||||
|
/// Returns the number of in-flight requests.
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.request_data.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true iff there are no requests in flight.
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.request_data.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Starts a request, unless a request with the same ID is already in flight.
|
||||||
|
pub fn insert_request(
|
||||||
|
&mut self,
|
||||||
|
request_id: u64,
|
||||||
|
ctx: context::Context,
|
||||||
|
span: Span,
|
||||||
|
response_completion: oneshot::Sender<Res>,
|
||||||
|
) -> Result<(), AlreadyExistsError> {
|
||||||
|
match self.request_data.entry(request_id) {
|
||||||
|
hash_map::Entry::Vacant(vacant) => {
|
||||||
|
let timeout = ctx.deadline.time_until();
|
||||||
|
let deadline_key = self.deadlines.insert(request_id, timeout);
|
||||||
|
vacant.insert(RequestData {
|
||||||
|
ctx,
|
||||||
|
span,
|
||||||
|
response_completion,
|
||||||
|
deadline_key,
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(_) => Err(AlreadyExistsError),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes a request without aborting. Returns true iff the request was found.
|
||||||
|
pub fn complete_request(&mut self, request_id: u64, result: Res) -> Option<Span> {
|
||||||
|
if let Some(request_data) = self.request_data.remove(&request_id) {
|
||||||
|
self.request_data.compact(0.1);
|
||||||
|
self.deadlines.remove(&request_data.deadline_key);
|
||||||
|
let _ = request_data.response_completion.send(result);
|
||||||
|
return Some(request_data.span);
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::debug!("No in-flight request found for request_id = {request_id}.");
|
||||||
|
|
||||||
|
// If the response completion was absent, then the request was already canceled.
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Completes all requests using the provided function.
|
||||||
|
/// Returns Spans for all completes requests.
|
||||||
|
pub fn complete_all_requests<'a>(
|
||||||
|
&'a mut self,
|
||||||
|
mut result: impl FnMut() -> Res + 'a,
|
||||||
|
) -> impl Iterator<Item = Span> + 'a {
|
||||||
|
self.deadlines.clear();
|
||||||
|
self.request_data.drain().map(move |(_, request_data)| {
|
||||||
|
let _ = request_data.response_completion.send(result());
|
||||||
|
request_data.span
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cancels a request without completing (typically used when a request handle was dropped
|
||||||
|
/// before the request completed).
|
||||||
|
pub fn cancel_request(&mut self, request_id: u64) -> Option<(context::Context, Span)> {
|
||||||
|
if let Some(request_data) = self.request_data.remove(&request_id) {
|
||||||
|
self.request_data.compact(0.1);
|
||||||
|
self.deadlines.remove(&request_data.deadline_key);
|
||||||
|
Some((request_data.ctx, request_data.span))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Yields a request that has expired, completing it with a TimedOut error.
|
||||||
|
/// The caller should send cancellation messages for any yielded request ID.
|
||||||
|
pub fn poll_expired(
|
||||||
|
&mut self,
|
||||||
|
cx: &mut Context,
|
||||||
|
expired_error: impl Fn() -> Res,
|
||||||
|
) -> Poll<Option<u64>> {
|
||||||
|
self.deadlines.poll_expired(cx).map(|expired| {
|
||||||
|
let request_id = expired?.into_inner();
|
||||||
|
if let Some(request_data) = self.request_data.remove(&request_id) {
|
||||||
|
let _entered = request_data.span.enter();
|
||||||
|
tracing::error!("DeadlineExceeded");
|
||||||
|
self.request_data.compact(0.1);
|
||||||
|
let _ = request_data.response_completion.send(expired_error());
|
||||||
|
}
|
||||||
|
Some(request_id)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
45
tarpc/src/client/stub.rs
Normal file
45
tarpc/src/client/stub.rs
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
//! Provides a Stub trait, implemented by types that can call remote services.
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
client::{Channel, RpcError},
|
||||||
|
context,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub mod load_balance;
|
||||||
|
pub mod retry;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod mock;
|
||||||
|
|
||||||
|
/// A connection to a remote service.
|
||||||
|
/// Calls the service with requests of type `Req` and receives responses of type `Resp`.
|
||||||
|
#[allow(async_fn_in_trait)]
|
||||||
|
pub trait Stub {
|
||||||
|
/// The service request type.
|
||||||
|
type Req;
|
||||||
|
|
||||||
|
/// The service response type.
|
||||||
|
type Resp;
|
||||||
|
|
||||||
|
/// Calls a remote service.
|
||||||
|
async fn call(
|
||||||
|
&self,
|
||||||
|
ctx: context::Context,
|
||||||
|
request_name: &'static str,
|
||||||
|
request: Self::Req,
|
||||||
|
) -> Result<Self::Resp, RpcError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp> Stub for Channel<Req, Resp> {
|
||||||
|
type Req = Req;
|
||||||
|
type Resp = Resp;
|
||||||
|
|
||||||
|
async fn call(
|
||||||
|
&self,
|
||||||
|
ctx: context::Context,
|
||||||
|
request_name: &'static str,
|
||||||
|
request: Req,
|
||||||
|
) -> Result<Self::Resp, RpcError> {
|
||||||
|
Self::call(self, ctx, request_name, request).await
|
||||||
|
}
|
||||||
|
}
|
||||||
279
tarpc/src/client/stub/load_balance.rs
Normal file
279
tarpc/src/client/stub/load_balance.rs
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
//! Provides load-balancing [Stubs](crate::client::stub::Stub).
|
||||||
|
|
||||||
|
pub use consistent_hash::ConsistentHash;
|
||||||
|
pub use round_robin::RoundRobin;
|
||||||
|
|
||||||
|
/// Provides a stub that load-balances with a simple round-robin strategy.
|
||||||
|
mod round_robin {
|
||||||
|
use crate::{
|
||||||
|
client::{stub, RpcError},
|
||||||
|
context,
|
||||||
|
};
|
||||||
|
use cycle::AtomicCycle;
|
||||||
|
|
||||||
|
impl<Stub> stub::Stub for RoundRobin<Stub>
|
||||||
|
where
|
||||||
|
Stub: stub::Stub,
|
||||||
|
{
|
||||||
|
type Req = Stub::Req;
|
||||||
|
type Resp = Stub::Resp;
|
||||||
|
|
||||||
|
async fn call(
|
||||||
|
&self,
|
||||||
|
ctx: context::Context,
|
||||||
|
request_name: &'static str,
|
||||||
|
request: Self::Req,
|
||||||
|
) -> Result<Stub::Resp, RpcError> {
|
||||||
|
let next = self.stubs.next();
|
||||||
|
next.call(ctx, request_name, request).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A Stub that load-balances across backing stubs by round robin.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct RoundRobin<Stub> {
|
||||||
|
stubs: AtomicCycle<Stub>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Stub> RoundRobin<Stub>
|
||||||
|
where
|
||||||
|
Stub: stub::Stub,
|
||||||
|
{
|
||||||
|
/// Returns a new RoundRobin stub.
|
||||||
|
pub fn new(stubs: Vec<Stub>) -> Self {
|
||||||
|
Self {
|
||||||
|
stubs: AtomicCycle::new(stubs),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod cycle {
|
||||||
|
use std::sync::{
|
||||||
|
atomic::{AtomicUsize, Ordering},
|
||||||
|
Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Cycles endlessly and atomically over a collection of elements of type T.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct AtomicCycle<T>(Arc<State<T>>);
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct State<T> {
|
||||||
|
elements: Vec<T>,
|
||||||
|
next: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> AtomicCycle<T> {
|
||||||
|
pub fn new(elements: Vec<T>) -> Self {
|
||||||
|
Self(Arc::new(State {
|
||||||
|
elements,
|
||||||
|
next: Default::default(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn next(&self) -> &T {
|
||||||
|
self.0.next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> State<T> {
|
||||||
|
pub fn next(&self) -> &T {
|
||||||
|
let next = self.next.fetch_add(1, Ordering::Relaxed);
|
||||||
|
&self.elements[next % self.elements.len()]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_cycle() {
|
||||||
|
let cycle = AtomicCycle::new(vec![1, 2, 3]);
|
||||||
|
assert_eq!(cycle.next(), &1);
|
||||||
|
assert_eq!(cycle.next(), &2);
|
||||||
|
assert_eq!(cycle.next(), &3);
|
||||||
|
assert_eq!(cycle.next(), &1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provides a stub that load-balances with a consistent hashing strategy.
|
||||||
|
///
|
||||||
|
/// Each request is hashed, then mapped to a stub based on the hash. Equivalent requests will use
|
||||||
|
/// the same stub.
|
||||||
|
mod consistent_hash {
|
||||||
|
use crate::{
|
||||||
|
client::{stub, RpcError},
|
||||||
|
context,
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
collections::hash_map::RandomState,
|
||||||
|
hash::{BuildHasher, Hash, Hasher},
|
||||||
|
num::TryFromIntError,
|
||||||
|
};
|
||||||
|
|
||||||
|
impl<Stub, S> stub::Stub for ConsistentHash<Stub, S>
|
||||||
|
where
|
||||||
|
Stub: stub::Stub,
|
||||||
|
Stub::Req: Hash,
|
||||||
|
S: BuildHasher,
|
||||||
|
{
|
||||||
|
type Req = Stub::Req;
|
||||||
|
type Resp = Stub::Resp;
|
||||||
|
|
||||||
|
async fn call(
|
||||||
|
&self,
|
||||||
|
ctx: context::Context,
|
||||||
|
request_name: &'static str,
|
||||||
|
request: Self::Req,
|
||||||
|
) -> Result<Stub::Resp, RpcError> {
|
||||||
|
let index = usize::try_from(self.hash_request(&request) % self.stubs_len).expect(
|
||||||
|
"invariant broken: stubs_len is not larger than a usize, \
|
||||||
|
so the hash modulo stubs_len should always fit in a usize",
|
||||||
|
);
|
||||||
|
let next = &self.stubs[index];
|
||||||
|
next.call(ctx, request_name, request).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A Stub that load-balances across backing stubs by round robin.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct ConsistentHash<Stub, S = RandomState> {
|
||||||
|
stubs: Vec<Stub>,
|
||||||
|
stubs_len: u64,
|
||||||
|
hasher: S,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Stub> ConsistentHash<Stub, RandomState>
|
||||||
|
where
|
||||||
|
Stub: stub::Stub,
|
||||||
|
Stub::Req: Hash,
|
||||||
|
{
|
||||||
|
/// Returns a new RoundRobin stub.
|
||||||
|
/// Returns an err if the length of `stubs` overflows a u64.
|
||||||
|
pub fn new(stubs: Vec<Stub>) -> Result<Self, TryFromIntError> {
|
||||||
|
Ok(Self {
|
||||||
|
stubs_len: stubs.len().try_into()?,
|
||||||
|
stubs,
|
||||||
|
hasher: RandomState::new(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Stub, S> ConsistentHash<Stub, S>
|
||||||
|
where
|
||||||
|
Stub: stub::Stub,
|
||||||
|
Stub::Req: Hash,
|
||||||
|
S: BuildHasher,
|
||||||
|
{
|
||||||
|
/// Returns a new RoundRobin stub.
|
||||||
|
/// Returns an err if the length of `stubs` overflows a u64.
|
||||||
|
pub fn with_hasher(stubs: Vec<Stub>, hasher: S) -> Result<Self, TryFromIntError> {
|
||||||
|
Ok(Self {
|
||||||
|
stubs_len: stubs.len().try_into()?,
|
||||||
|
stubs,
|
||||||
|
hasher,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hash_request(&self, req: &Stub::Req) -> u64 {
|
||||||
|
let mut hasher = self.hasher.build_hasher();
|
||||||
|
req.hash(&mut hasher);
|
||||||
|
hasher.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::ConsistentHash;
|
||||||
|
use crate::{
|
||||||
|
client::stub::{mock::Mock, Stub},
|
||||||
|
context,
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
hash::{BuildHasher, Hash, Hasher},
|
||||||
|
rc::Rc,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test() -> anyhow::Result<()> {
|
||||||
|
let stub = ConsistentHash::<_, FakeHasherBuilder>::with_hasher(
|
||||||
|
vec![
|
||||||
|
// For easier reading of the assertions made in this test, each Mock's response
|
||||||
|
// value is equal to a hash value that should map to its index: 3 % 3 = 0, 1 %
|
||||||
|
// 3 = 1, etc.
|
||||||
|
Mock::new([('a', 3), ('b', 3), ('c', 3)]),
|
||||||
|
Mock::new([('a', 1), ('b', 1), ('c', 1)]),
|
||||||
|
Mock::new([('a', 2), ('b', 2), ('c', 2)]),
|
||||||
|
],
|
||||||
|
FakeHasherBuilder::new([('a', 1), ('b', 2), ('c', 3)]),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
for _ in 0..2 {
|
||||||
|
let resp = stub.call(context::current(), "", 'a').await?;
|
||||||
|
assert_eq!(resp, 1);
|
||||||
|
|
||||||
|
let resp = stub.call(context::current(), "", 'b').await?;
|
||||||
|
assert_eq!(resp, 2);
|
||||||
|
|
||||||
|
let resp = stub.call(context::current(), "", 'c').await?;
|
||||||
|
assert_eq!(resp, 3);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
struct HashRecorder(Vec<u8>);
|
||||||
|
impl Hasher for HashRecorder {
|
||||||
|
fn write(&mut self, bytes: &[u8]) {
|
||||||
|
self.0 = Vec::from(bytes);
|
||||||
|
}
|
||||||
|
fn finish(&self) -> u64 {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct FakeHasherBuilder {
|
||||||
|
recorded_hashes: Rc<HashMap<Vec<u8>, u64>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct FakeHasher {
|
||||||
|
recorded_hashes: Rc<HashMap<Vec<u8>, u64>>,
|
||||||
|
output: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BuildHasher for FakeHasherBuilder {
|
||||||
|
type Hasher = FakeHasher;
|
||||||
|
|
||||||
|
fn build_hasher(&self) -> Self::Hasher {
|
||||||
|
FakeHasher {
|
||||||
|
recorded_hashes: self.recorded_hashes.clone(),
|
||||||
|
output: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FakeHasherBuilder {
|
||||||
|
fn new<T: Hash, const N: usize>(fake_hashes: [(T, u64); N]) -> Self {
|
||||||
|
let mut recorded_hashes = HashMap::new();
|
||||||
|
for (to_hash, fake_hash) in fake_hashes {
|
||||||
|
let mut recorder = HashRecorder(vec![]);
|
||||||
|
to_hash.hash(&mut recorder);
|
||||||
|
recorded_hashes.insert(recorder.0, fake_hash);
|
||||||
|
}
|
||||||
|
Self {
|
||||||
|
recorded_hashes: Rc::new(recorded_hashes),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hasher for FakeHasher {
|
||||||
|
fn write(&mut self, bytes: &[u8]) {
|
||||||
|
if let Some(hash) = self.recorded_hashes.get(bytes) {
|
||||||
|
self.output = *hash;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn finish(&self) -> u64 {
|
||||||
|
self.output
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
49
tarpc/src/client/stub/mock.rs
Normal file
49
tarpc/src/client/stub/mock.rs
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
use crate::{
|
||||||
|
client::{stub::Stub, RpcError},
|
||||||
|
context, ServerError,
|
||||||
|
};
|
||||||
|
use std::{collections::HashMap, hash::Hash, io};
|
||||||
|
|
||||||
|
/// A mock stub that returns user-specified responses.
|
||||||
|
pub struct Mock<Req, Resp> {
|
||||||
|
responses: HashMap<Req, Resp>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp> Mock<Req, Resp>
|
||||||
|
where
|
||||||
|
Req: Eq + Hash,
|
||||||
|
{
|
||||||
|
/// Returns a new mock, mocking the specified (request, response) pairs.
|
||||||
|
pub fn new<const N: usize>(responses: [(Req, Resp); N]) -> Self {
|
||||||
|
Self {
|
||||||
|
responses: HashMap::from(responses),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp> Stub for Mock<Req, Resp>
|
||||||
|
where
|
||||||
|
Req: Eq + Hash,
|
||||||
|
Resp: Clone,
|
||||||
|
{
|
||||||
|
type Req = Req;
|
||||||
|
type Resp = Resp;
|
||||||
|
|
||||||
|
async fn call(
|
||||||
|
&self,
|
||||||
|
_: context::Context,
|
||||||
|
_: &'static str,
|
||||||
|
request: Self::Req,
|
||||||
|
) -> Result<Resp, RpcError> {
|
||||||
|
self.responses
|
||||||
|
.get(&request)
|
||||||
|
.cloned()
|
||||||
|
.map(Ok)
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Err(RpcError::Server(ServerError {
|
||||||
|
kind: io::ErrorKind::NotFound,
|
||||||
|
detail: "mock (request, response) entry not found".into(),
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
56
tarpc/src/client/stub/retry.rs
Normal file
56
tarpc/src/client/stub/retry.rs
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
//! Provides a stub that retries requests based on response contents..
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
client::{stub, RpcError},
|
||||||
|
context,
|
||||||
|
};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
impl<Stub, Req, F> stub::Stub for Retry<F, Stub>
|
||||||
|
where
|
||||||
|
Stub: stub::Stub<Req = Arc<Req>>,
|
||||||
|
F: Fn(&Result<Stub::Resp, RpcError>, u32) -> bool,
|
||||||
|
{
|
||||||
|
type Req = Req;
|
||||||
|
type Resp = Stub::Resp;
|
||||||
|
|
||||||
|
async fn call(
|
||||||
|
&self,
|
||||||
|
ctx: context::Context,
|
||||||
|
request_name: &'static str,
|
||||||
|
request: Self::Req,
|
||||||
|
) -> Result<Stub::Resp, RpcError> {
|
||||||
|
let request = Arc::new(request);
|
||||||
|
for i in 1.. {
|
||||||
|
let result = self
|
||||||
|
.stub
|
||||||
|
.call(ctx, request_name, Arc::clone(&request))
|
||||||
|
.await;
|
||||||
|
if (self.should_retry)(&result, i) {
|
||||||
|
tracing::trace!("Retrying on attempt {i}");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
unreachable!("Wow, that was a lot of attempts!");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A Stub that retries requests based on response contents.
|
||||||
|
/// Note: to use this stub with Serde serialization, the "rc" feature of Serde needs to be enabled.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct Retry<F, Stub> {
|
||||||
|
should_retry: F,
|
||||||
|
stub: Stub,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Stub, Req, F> Retry<F, Stub>
|
||||||
|
where
|
||||||
|
Stub: stub::Stub<Req = Arc<Req>>,
|
||||||
|
F: Fn(&Result<Stub::Resp, RpcError>, u32) -> bool,
|
||||||
|
{
|
||||||
|
/// Creates a new Retry stub that delegates calls to the underlying `stub`.
|
||||||
|
pub fn new(stub: Stub, should_retry: F) -> Self {
|
||||||
|
Self { stub, should_retry }
|
||||||
|
}
|
||||||
|
}
|
||||||
152
tarpc/src/context.rs
Normal file
152
tarpc/src/context.rs
Normal file
@@ -0,0 +1,152 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! Provides a request context that carries a deadline and trace context. This context is sent from
|
||||||
|
//! client to server and is used by the server to enforce response deadlines.
|
||||||
|
|
||||||
|
use crate::trace::{self, TraceId};
|
||||||
|
use opentelemetry::trace::TraceContextExt;
|
||||||
|
use static_assertions::assert_impl_all;
|
||||||
|
use std::{
|
||||||
|
convert::TryFrom,
|
||||||
|
time::{Duration, SystemTime},
|
||||||
|
};
|
||||||
|
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||||
|
|
||||||
|
/// A request context that carries request-scoped information like deadlines and trace information.
|
||||||
|
/// It is sent from client to server and is used by the server to enforce response deadlines.
|
||||||
|
///
|
||||||
|
/// The context should not be stored directly in a server implementation, because the context will
|
||||||
|
/// be different for each request in scope.
|
||||||
|
#[derive(Clone, Copy, Debug)]
|
||||||
|
#[non_exhaustive]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
|
pub struct Context {
|
||||||
|
/// When the client expects the request to be complete by. The server should cancel the request
|
||||||
|
/// if it is not complete by this time.
|
||||||
|
#[cfg_attr(feature = "serde1", serde(default = "ten_seconds_from_now"))]
|
||||||
|
// Serialized as a Duration to prevent clock skew issues.
|
||||||
|
#[cfg_attr(feature = "serde1", serde(with = "absolute_to_relative_time"))]
|
||||||
|
pub deadline: SystemTime,
|
||||||
|
/// Uniquely identifies requests originating from the same source.
|
||||||
|
/// When a service handles a request by making requests itself, those requests should
|
||||||
|
/// include the same `trace_id` as that included on the original request. This way,
|
||||||
|
/// users can trace related actions across a distributed system.
|
||||||
|
pub trace_context: trace::Context,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "serde1")]
|
||||||
|
mod absolute_to_relative_time {
|
||||||
|
pub use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
pub use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
|
pub fn serialize<S>(deadline: &SystemTime, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
let deadline = deadline
|
||||||
|
.duration_since(SystemTime::now())
|
||||||
|
.unwrap_or(Duration::ZERO);
|
||||||
|
deadline.serialize(serializer)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deserialize<'de, D>(deserializer: D) -> Result<SystemTime, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let deadline = Duration::deserialize(deserializer)?;
|
||||||
|
Ok(SystemTime::now() + deadline)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[derive(serde::Serialize, serde::Deserialize)]
|
||||||
|
struct AbsoluteToRelative(#[serde(with = "self")] SystemTime);
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_serialize() {
|
||||||
|
let now = SystemTime::now();
|
||||||
|
let deadline = now + Duration::from_secs(10);
|
||||||
|
let serialized_deadline = bincode::serialize(&AbsoluteToRelative(deadline)).unwrap();
|
||||||
|
let deserialized_deadline: Duration = bincode::deserialize(&serialized_deadline).unwrap();
|
||||||
|
// TODO: how to avoid flakiness?
|
||||||
|
assert!(deserialized_deadline > Duration::from_secs(9));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_deserialize() {
|
||||||
|
let deadline = Duration::from_secs(10);
|
||||||
|
let serialized_deadline = bincode::serialize(&deadline).unwrap();
|
||||||
|
let AbsoluteToRelative(deserialized_deadline) =
|
||||||
|
bincode::deserialize(&serialized_deadline).unwrap();
|
||||||
|
// TODO: how to avoid flakiness?
|
||||||
|
assert!(deserialized_deadline > SystemTime::now() + Duration::from_secs(9));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_impl_all!(Context: Send, Sync);
|
||||||
|
|
||||||
|
fn ten_seconds_from_now() -> SystemTime {
|
||||||
|
SystemTime::now() + Duration::from_secs(10)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the context for the current request, or a default Context if no request is active.
|
||||||
|
pub fn current() -> Context {
|
||||||
|
Context::current()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct Deadline(SystemTime);
|
||||||
|
|
||||||
|
impl Default for Deadline {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self(ten_seconds_from_now())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Context {
|
||||||
|
/// Returns the context for the current request, or a default Context if no request is active.
|
||||||
|
pub fn current() -> Self {
|
||||||
|
let span = tracing::Span::current();
|
||||||
|
Self {
|
||||||
|
trace_context: trace::Context::try_from(&span)
|
||||||
|
.unwrap_or_else(|_| trace::Context::default()),
|
||||||
|
deadline: span
|
||||||
|
.context()
|
||||||
|
.get::<Deadline>()
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_default()
|
||||||
|
.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the ID of the request-scoped trace.
|
||||||
|
pub fn trace_id(&self) -> &TraceId {
|
||||||
|
&self.trace_context.trace_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An extension trait for [`tracing::Span`] for propagating tarpc Contexts.
|
||||||
|
pub(crate) trait SpanExt {
|
||||||
|
/// Sets the given context on this span. Newly-created spans will be children of the given
|
||||||
|
/// context's trace context.
|
||||||
|
fn set_context(&self, context: &Context);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SpanExt for tracing::Span {
|
||||||
|
fn set_context(&self, context: &Context) {
|
||||||
|
self.set_parent(
|
||||||
|
opentelemetry::Context::new()
|
||||||
|
.with_remote_span_context(opentelemetry::trace::SpanContext::new(
|
||||||
|
opentelemetry::trace::TraceId::from(context.trace_context.trace_id),
|
||||||
|
opentelemetry::trace::SpanId::from(context.trace_context.span_id),
|
||||||
|
opentelemetry::trace::TraceFlags::from(context.trace_context.sampling_decision),
|
||||||
|
true,
|
||||||
|
opentelemetry::trace::TraceState::default(),
|
||||||
|
))
|
||||||
|
.with_value(Deadline(context.deadline)),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
397
tarpc/src/lib.rs
Normal file
397
tarpc/src/lib.rs
Normal file
@@ -0,0 +1,397 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
//! *Disclaimer*: This is not an official Google product.
|
||||||
|
//!
|
||||||
|
//! tarpc is an RPC framework for rust with a focus on ease of use. Defining a
|
||||||
|
//! service can be done in just a few lines of code, and most of the boilerplate of
|
||||||
|
//! writing a server is taken care of for you.
|
||||||
|
//!
|
||||||
|
//! [Documentation](https://docs.rs/crate/tarpc/)
|
||||||
|
//!
|
||||||
|
//! ## What is an RPC framework?
|
||||||
|
//! "RPC" stands for "Remote Procedure Call," a function call where the work of
|
||||||
|
//! producing the return value is being done somewhere else. When an rpc function is
|
||||||
|
//! invoked, behind the scenes the function contacts some other process somewhere
|
||||||
|
//! and asks them to evaluate the function instead. The original function then
|
||||||
|
//! returns the value produced by the other process.
|
||||||
|
//!
|
||||||
|
//! RPC frameworks are a fundamental building block of most microservices-oriented
|
||||||
|
//! architectures. Two well-known ones are [gRPC](http://www.grpc.io) and
|
||||||
|
//! [Cap'n Proto](https://capnproto.org/).
|
||||||
|
//!
|
||||||
|
//! tarpc differentiates itself from other RPC frameworks by defining the schema in code,
|
||||||
|
//! rather than in a separate language such as .proto. This means there's no separate compilation
|
||||||
|
//! process, and no context switching between different languages.
|
||||||
|
//!
|
||||||
|
//! Some other features of tarpc:
|
||||||
|
//! - Pluggable transport: any type implementing `Stream<Item = Request> + Sink<Response>` can be
|
||||||
|
//! used as a transport to connect the client and server.
|
||||||
|
//! - `Send + 'static` optional: if the transport doesn't require it, neither does tarpc!
|
||||||
|
//! - Cascading cancellation: dropping a request will send a cancellation message to the server.
|
||||||
|
//! The server will cease any unfinished work on the request, subsequently cancelling any of its
|
||||||
|
//! own requests, repeating for the entire chain of transitive dependencies.
|
||||||
|
//! - Configurable deadlines and deadline propagation: request deadlines default to 10s if
|
||||||
|
//! unspecified. The server will automatically cease work when the deadline has passed. Any
|
||||||
|
//! requests sent by the server that use the request context will propagate the request deadline.
|
||||||
|
//! For example, if a server is handling a request with a 10s deadline, does 2s of work, then
|
||||||
|
//! sends a request to another server, that server will see an 8s deadline.
|
||||||
|
//! - Distributed tracing: tarpc is instrumented with
|
||||||
|
//! [tracing](https://github.com/tokio-rs/tracing) primitives extended with
|
||||||
|
//! [OpenTelemetry](https://opentelemetry.io/) traces. Using a compatible tracing subscriber like
|
||||||
|
//! [Jaeger](https://github.com/open-telemetry/opentelemetry-rust/tree/main/opentelemetry-jaeger),
|
||||||
|
//! each RPC can be traced through the client, server, and other dependencies downstream of the
|
||||||
|
//! server. Even for applications not connected to a distributed tracing collector, the
|
||||||
|
//! instrumentation can also be ingested by regular loggers like
|
||||||
|
//! [env_logger](https://github.com/env-logger-rs/env_logger/).
|
||||||
|
//! - Serde serialization: enabling the `serde1` Cargo feature will make service requests and
|
||||||
|
//! responses `Serialize + Deserialize`. It's entirely optional, though: in-memory transports can
|
||||||
|
//! be used, as well, so the price of serialization doesn't have to be paid when it's not needed.
|
||||||
|
//!
|
||||||
|
//! ## Usage
|
||||||
|
//! Add to your `Cargo.toml` dependencies:
|
||||||
|
//!
|
||||||
|
//! ```toml
|
||||||
|
//! tarpc = "0.29"
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! The `tarpc::service` attribute expands to a collection of items that form an rpc service.
|
||||||
|
//! These generated types make it easy and ergonomic to write servers with less boilerplate.
|
||||||
|
//! Simply implement the generated service trait, and you're off to the races!
|
||||||
|
//!
|
||||||
|
//! ## Example
|
||||||
|
//!
|
||||||
|
//! This example uses [tokio](https://tokio.rs), so add the following dependencies to
|
||||||
|
//! your `Cargo.toml`:
|
||||||
|
//!
|
||||||
|
//! ```toml
|
||||||
|
//! anyhow = "1.0"
|
||||||
|
//! futures = "0.3"
|
||||||
|
//! tarpc = { version = "0.29", features = ["tokio1"] }
|
||||||
|
//! tokio = { version = "1.0", features = ["macros"] }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! In the following example, we use an in-process channel for communication between
|
||||||
|
//! client and server. In real code, you will likely communicate over the network.
|
||||||
|
//! For a more real-world example, see [example-service](example-service).
|
||||||
|
//!
|
||||||
|
//! First, let's set up the dependencies and service definition.
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! # extern crate futures;
|
||||||
|
//!
|
||||||
|
//! use futures::{
|
||||||
|
//! future::{self, Ready},
|
||||||
|
//! prelude::*,
|
||||||
|
//! };
|
||||||
|
//! use tarpc::{
|
||||||
|
//! client, context,
|
||||||
|
//! server::{self, incoming::Incoming, Channel},
|
||||||
|
//! };
|
||||||
|
//!
|
||||||
|
//! // This is the service definition. It looks a lot like a trait definition.
|
||||||
|
//! // It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
|
//! #[tarpc::service]
|
||||||
|
//! trait World {
|
||||||
|
//! /// Returns a greeting for name.
|
||||||
|
//! async fn hello(name: String) -> String;
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! This service definition generates a trait called `World`. Next we need to
|
||||||
|
//! implement it for our Server struct.
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! # extern crate futures;
|
||||||
|
//! # use futures::{
|
||||||
|
//! # future::{self, Ready},
|
||||||
|
//! # prelude::*,
|
||||||
|
//! # };
|
||||||
|
//! # use tarpc::{
|
||||||
|
//! # client, context,
|
||||||
|
//! # server::{self, incoming::Incoming},
|
||||||
|
//! # };
|
||||||
|
//! # // This is the service definition. It looks a lot like a trait definition.
|
||||||
|
//! # // It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
|
//! # #[tarpc::service]
|
||||||
|
//! # trait World {
|
||||||
|
//! # /// Returns a greeting for name.
|
||||||
|
//! # async fn hello(name: String) -> String;
|
||||||
|
//! # }
|
||||||
|
//! // This is the type that implements the generated World trait. It is the business logic
|
||||||
|
//! // and is used to start the server.
|
||||||
|
//! #[derive(Clone)]
|
||||||
|
//! struct HelloServer;
|
||||||
|
//!
|
||||||
|
//! impl World for HelloServer {
|
||||||
|
//! // Each defined rpc generates an async fn that serves the RPC
|
||||||
|
//! async fn hello(self, _: context::Context, name: String) -> String {
|
||||||
|
//! format!("Hello, {name}!")
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Lastly let's write our `main` that will start the server. While this example uses an
|
||||||
|
//! [in-process channel](transport::channel), tarpc also ships a generic [`serde_transport`]
|
||||||
|
//! behind the `serde-transport` feature, with additional [TCP](serde_transport::tcp) functionality
|
||||||
|
//! available behind the `tcp` feature.
|
||||||
|
//!
|
||||||
|
//! ```rust
|
||||||
|
//! # extern crate futures;
|
||||||
|
//! # use futures::{
|
||||||
|
//! # future::{self, Ready},
|
||||||
|
//! # prelude::*,
|
||||||
|
//! # };
|
||||||
|
//! # use tarpc::{
|
||||||
|
//! # client, context,
|
||||||
|
//! # server::{self, Channel},
|
||||||
|
//! # };
|
||||||
|
//! # // This is the service definition. It looks a lot like a trait definition.
|
||||||
|
//! # // It defines one RPC, hello, which takes one arg, name, and returns a String.
|
||||||
|
//! # #[tarpc::service]
|
||||||
|
//! # trait World {
|
||||||
|
//! # /// Returns a greeting for name.
|
||||||
|
//! # async fn hello(name: String) -> String;
|
||||||
|
//! # }
|
||||||
|
//! # // This is the type that implements the generated World trait. It is the business logic
|
||||||
|
//! # // and is used to start the server.
|
||||||
|
//! # #[derive(Clone)]
|
||||||
|
//! # struct HelloServer;
|
||||||
|
//! # impl World for HelloServer {
|
||||||
|
//! // Each defined rpc generates an async fn that serves the RPC
|
||||||
|
//! # async fn hello(self, _: context::Context, name: String) -> String {
|
||||||
|
//! # format!("Hello, {name}!")
|
||||||
|
//! # }
|
||||||
|
//! # }
|
||||||
|
//! # #[cfg(not(feature = "tokio1"))]
|
||||||
|
//! # fn main() {}
|
||||||
|
//! # #[cfg(feature = "tokio1")]
|
||||||
|
//! #[tokio::main]
|
||||||
|
//! async fn main() -> anyhow::Result<()> {
|
||||||
|
//! let (client_transport, server_transport) = tarpc::transport::channel::unbounded();
|
||||||
|
//!
|
||||||
|
//! let server = server::BaseChannel::with_defaults(server_transport);
|
||||||
|
//! tokio::spawn(
|
||||||
|
//! server.execute(HelloServer.serve())
|
||||||
|
//! // Handle all requests concurrently.
|
||||||
|
//! .for_each(|response| async move {
|
||||||
|
//! tokio::spawn(response);
|
||||||
|
//! }));
|
||||||
|
//!
|
||||||
|
//! // WorldClient is generated by the #[tarpc::service] attribute. It has a constructor `new`
|
||||||
|
//! // that takes a config and any Transport as input.
|
||||||
|
//! let mut client = WorldClient::new(client::Config::default(), client_transport).spawn();
|
||||||
|
//!
|
||||||
|
//! // The client has an RPC method for each RPC defined in the annotated trait. It takes the same
|
||||||
|
//! // args as defined, with the addition of a Context, which is always the first arg. The Context
|
||||||
|
//! // specifies a deadline and trace information which can be helpful in debugging requests.
|
||||||
|
//! let hello = client.hello(context::current(), "Stim".to_string()).await?;
|
||||||
|
//!
|
||||||
|
//! println!("{hello}");
|
||||||
|
//!
|
||||||
|
//! Ok(())
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! ## Service Documentation
|
||||||
|
//!
|
||||||
|
//! Use `cargo doc` as you normally would to see the documentation created for all
|
||||||
|
//! items expanded by a `service!` invocation.
|
||||||
|
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
#![allow(clippy::type_complexity)]
|
||||||
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
|
|
||||||
|
#[cfg(feature = "serde1")]
|
||||||
|
#[doc(hidden)]
|
||||||
|
pub use serde;
|
||||||
|
|
||||||
|
#[cfg(feature = "serde-transport")]
|
||||||
|
pub use {tokio_serde, tokio_util};
|
||||||
|
|
||||||
|
#[cfg(feature = "serde-transport")]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(feature = "serde-transport")))]
|
||||||
|
pub mod serde_transport;
|
||||||
|
|
||||||
|
pub mod trace;
|
||||||
|
|
||||||
|
#[cfg(feature = "serde1")]
|
||||||
|
pub use tarpc_plugins::derive_serde;
|
||||||
|
|
||||||
|
/// The main macro that creates RPC services.
|
||||||
|
///
|
||||||
|
/// Rpc methods are specified, mirroring trait syntax:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// #[tarpc::service]
|
||||||
|
/// trait Service {
|
||||||
|
/// /// Say hello
|
||||||
|
/// async fn hello(name: String) -> String;
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Attributes can be attached to each rpc. These attributes
|
||||||
|
/// will then be attached to the generated service traits'
|
||||||
|
/// corresponding `fn`s, as well as to the client stubs' RPCs.
|
||||||
|
///
|
||||||
|
/// The following items are expanded in the enclosing module:
|
||||||
|
///
|
||||||
|
/// * `trait Service` -- defines the RPC service.
|
||||||
|
/// * `fn serve` -- turns a service impl into a request handler.
|
||||||
|
/// * `Client` -- a client stub with a fn for each RPC.
|
||||||
|
/// * `fn new_stub` -- creates a new Client stub.
|
||||||
|
pub use tarpc_plugins::service;
|
||||||
|
|
||||||
|
pub(crate) mod cancellations;
|
||||||
|
pub mod client;
|
||||||
|
pub mod context;
|
||||||
|
pub mod server;
|
||||||
|
pub mod transport;
|
||||||
|
pub(crate) mod util;
|
||||||
|
|
||||||
|
pub use crate::transport::sealed::Transport;
|
||||||
|
|
||||||
|
use anyhow::Context as _;
|
||||||
|
use futures::task::*;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::{error::Error, fmt::Display, io, time::SystemTime};
|
||||||
|
|
||||||
|
/// A message from a client to a server.
|
||||||
|
#[derive(Debug)]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
|
#[non_exhaustive]
|
||||||
|
pub enum ClientMessage<T> {
|
||||||
|
/// A request initiated by a user. The server responds to a request by invoking a
|
||||||
|
/// service-provided request handler. The handler completes with a [`response`](Response), which
|
||||||
|
/// the server sends back to the client.
|
||||||
|
Request(Request<T>),
|
||||||
|
/// A command to cancel an in-flight request, automatically sent by the client when a response
|
||||||
|
/// future is dropped.
|
||||||
|
///
|
||||||
|
/// When received, the server will immediately cancel the main task (top-level future) of the
|
||||||
|
/// request handler for the associated request. Any tasks spawned by the request handler will
|
||||||
|
/// not be canceled, because the framework layer does not
|
||||||
|
/// know about them.
|
||||||
|
Cancel {
|
||||||
|
/// The trace context associates the message with a specific chain of causally-related actions,
|
||||||
|
/// possibly orchestrated across many distributed systems.
|
||||||
|
#[cfg_attr(feature = "serde1", serde(default))]
|
||||||
|
trace_context: trace::Context,
|
||||||
|
/// The ID of the request to cancel.
|
||||||
|
request_id: u64,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A request from a client to a server.
|
||||||
|
#[derive(Clone, Copy, Debug)]
|
||||||
|
#[non_exhaustive]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
|
pub struct Request<T> {
|
||||||
|
/// Trace context, deadline, and other cross-cutting concerns.
|
||||||
|
pub context: context::Context,
|
||||||
|
/// Uniquely identifies the request across all requests sent over a single channel.
|
||||||
|
pub id: u64,
|
||||||
|
/// The request body.
|
||||||
|
pub message: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A response from a server to a client.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
#[non_exhaustive]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
|
pub struct Response<T> {
|
||||||
|
/// The ID of the request being responded to.
|
||||||
|
pub request_id: u64,
|
||||||
|
/// The response body, or an error if the request failed.
|
||||||
|
pub message: Result<T, ServerError>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An error indicating the server aborted the request early, e.g., due to request throttling.
|
||||||
|
#[derive(thiserror::Error, Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
#[error("{kind:?}: {detail}")]
|
||||||
|
#[non_exhaustive]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
|
pub struct ServerError {
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "serde1",
|
||||||
|
serde(serialize_with = "util::serde::serialize_io_error_kind_as_u32")
|
||||||
|
)]
|
||||||
|
#[cfg_attr(
|
||||||
|
feature = "serde1",
|
||||||
|
serde(deserialize_with = "util::serde::deserialize_io_error_kind_from_u32")
|
||||||
|
)]
|
||||||
|
/// The type of error that occurred to fail the request.
|
||||||
|
pub kind: io::ErrorKind,
|
||||||
|
/// A message describing more detail about the error that occurred.
|
||||||
|
pub detail: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Critical errors that result in a Channel disconnecting.
|
||||||
|
#[derive(thiserror::Error, Debug, PartialEq, Eq)]
|
||||||
|
pub enum ChannelError<E>
|
||||||
|
where
|
||||||
|
E: Error + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
/// Could not read from the transport.
|
||||||
|
#[error("could not read from the transport")]
|
||||||
|
Read(#[source] Arc<E>),
|
||||||
|
/// Could not ready the transport for writes.
|
||||||
|
#[error("could not ready the transport for writes")]
|
||||||
|
Ready(#[source] E),
|
||||||
|
/// Could not write to the transport.
|
||||||
|
#[error("could not write to the transport")]
|
||||||
|
Write(#[source] E),
|
||||||
|
/// Could not flush the transport.
|
||||||
|
#[error("could not flush the transport")]
|
||||||
|
Flush(#[source] E),
|
||||||
|
/// Could not close the write end of the transport.
|
||||||
|
#[error("could not close the write end of the transport")]
|
||||||
|
Close(#[source] E),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServerError {
|
||||||
|
/// Returns a new server error with `kind` and `detail`.
|
||||||
|
pub fn new(kind: io::ErrorKind, detail: String) -> ServerError {
|
||||||
|
Self { kind, detail }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Request<T> {
|
||||||
|
/// Returns the deadline for this request.
|
||||||
|
pub fn deadline(&self) -> &SystemTime {
|
||||||
|
&self.context.deadline
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) trait PollContext<T> {
|
||||||
|
fn context<C>(self, context: C) -> Poll<Option<anyhow::Result<T>>>
|
||||||
|
where
|
||||||
|
C: Display + Send + Sync + 'static;
|
||||||
|
|
||||||
|
fn with_context<C, F>(self, f: F) -> Poll<Option<anyhow::Result<T>>>
|
||||||
|
where
|
||||||
|
C: Display + Send + Sync + 'static,
|
||||||
|
F: FnOnce() -> C;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, E> PollContext<T> for Poll<Option<Result<T, E>>>
|
||||||
|
where
|
||||||
|
E: Error + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
fn context<C>(self, context: C) -> Poll<Option<anyhow::Result<T>>>
|
||||||
|
where
|
||||||
|
C: Display + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
self.map(|o| o.map(|r| r.context(context)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn with_context<C, F>(self, f: F) -> Poll<Option<anyhow::Result<T>>>
|
||||||
|
where
|
||||||
|
C: Display + Send + Sync + 'static,
|
||||||
|
F: FnOnce() -> C,
|
||||||
|
{
|
||||||
|
self.map(|o| o.map(|r| r.with_context(f)))
|
||||||
|
}
|
||||||
|
}
|
||||||
672
tarpc/src/serde_transport.rs
Normal file
672
tarpc/src/serde_transport.rs
Normal file
@@ -0,0 +1,672 @@
|
|||||||
|
// Copyright 2019 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! A generic Serde-based `Transport` that can serialize anything supported by `tokio-serde` via any medium that implements `AsyncRead` and `AsyncWrite`.
|
||||||
|
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
use futures::{prelude::*, task::*};
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::{error::Error, io, pin::Pin};
|
||||||
|
use tokio::io::{AsyncRead, AsyncWrite};
|
||||||
|
use tokio_serde::{Framed as SerdeFramed, *};
|
||||||
|
use tokio_util::codec::{length_delimited::LengthDelimitedCodec, Framed};
|
||||||
|
|
||||||
|
/// A transport that serializes to, and deserializes from, a byte stream.
|
||||||
|
#[pin_project]
|
||||||
|
pub struct Transport<S, Item, SinkItem, Codec> {
|
||||||
|
#[pin]
|
||||||
|
inner: SerdeFramed<Framed<S, LengthDelimitedCodec>, Item, SinkItem, Codec>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, Item, SinkItem, Codec> Transport<S, Item, SinkItem, Codec> {
|
||||||
|
/// Returns the inner transport over which messages are sent and received.
|
||||||
|
pub fn get_ref(&self) -> &S {
|
||||||
|
self.inner.get_ref().get_ref()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, Item, SinkItem, Codec, CodecError> Stream for Transport<S, Item, SinkItem, Codec>
|
||||||
|
where
|
||||||
|
S: AsyncWrite + AsyncRead,
|
||||||
|
Item: for<'a> Deserialize<'a>,
|
||||||
|
Codec: Deserializer<Item>,
|
||||||
|
CodecError: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||||
|
SerdeFramed<Framed<S, LengthDelimitedCodec>, Item, SinkItem, Codec>:
|
||||||
|
Stream<Item = Result<Item, CodecError>>,
|
||||||
|
{
|
||||||
|
type Item = io::Result<Item>;
|
||||||
|
|
||||||
|
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<io::Result<Item>>> {
|
||||||
|
self.project()
|
||||||
|
.inner
|
||||||
|
.poll_next(cx)
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, Item, SinkItem, Codec, CodecError> Sink<SinkItem> for Transport<S, Item, SinkItem, Codec>
|
||||||
|
where
|
||||||
|
S: AsyncWrite,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem>,
|
||||||
|
CodecError: Into<Box<dyn Error + Send + Sync>>,
|
||||||
|
SerdeFramed<Framed<S, LengthDelimitedCodec>, Item, SinkItem, Codec>:
|
||||||
|
Sink<SinkItem, Error = CodecError>,
|
||||||
|
{
|
||||||
|
type Error = io::Error;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
self.project()
|
||||||
|
.inner
|
||||||
|
.poll_ready(cx)
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(self: Pin<&mut Self>, item: SinkItem) -> io::Result<()> {
|
||||||
|
self.project()
|
||||||
|
.inner
|
||||||
|
.start_send(item)
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
self.project()
|
||||||
|
.inner
|
||||||
|
.poll_flush(cx)
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
self.project()
|
||||||
|
.inner
|
||||||
|
.poll_close(cx)
|
||||||
|
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Constructs a new transport from a framed transport and a serialization codec.
|
||||||
|
pub fn new<S, Item, SinkItem, Codec>(
|
||||||
|
framed_io: Framed<S, LengthDelimitedCodec>,
|
||||||
|
codec: Codec,
|
||||||
|
) -> Transport<S, Item, SinkItem, Codec>
|
||||||
|
where
|
||||||
|
S: AsyncWrite + AsyncRead,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
{
|
||||||
|
Transport {
|
||||||
|
inner: SerdeFramed::new(framed_io, codec),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, Item, SinkItem, Codec> From<(S, Codec)> for Transport<S, Item, SinkItem, Codec>
|
||||||
|
where
|
||||||
|
S: AsyncWrite + AsyncRead,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
{
|
||||||
|
fn from((io, codec): (S, Codec)) -> Self {
|
||||||
|
new(Framed::new(io, LengthDelimitedCodec::new()), codec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tcp")]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(feature = "tcp")))]
|
||||||
|
/// TCP support for generic transport using Tokio.
|
||||||
|
pub mod tcp {
|
||||||
|
use {
|
||||||
|
super::*,
|
||||||
|
futures::ready,
|
||||||
|
std::{marker::PhantomData, net::SocketAddr},
|
||||||
|
tokio::net::{TcpListener, TcpStream, ToSocketAddrs},
|
||||||
|
tokio_util::codec::length_delimited,
|
||||||
|
};
|
||||||
|
|
||||||
|
impl<Item, SinkItem, Codec> Transport<TcpStream, Item, SinkItem, Codec> {
|
||||||
|
/// Returns the peer address of the underlying TcpStream.
|
||||||
|
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
|
||||||
|
self.inner.get_ref().get_ref().peer_addr()
|
||||||
|
}
|
||||||
|
/// Returns the local address of the underlying TcpStream.
|
||||||
|
pub fn local_addr(&self) -> io::Result<SocketAddr> {
|
||||||
|
self.inner.get_ref().get_ref().local_addr()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A connection Future that also exposes the length-delimited framing config.
|
||||||
|
#[must_use]
|
||||||
|
#[pin_project]
|
||||||
|
pub struct Connect<T, Item, SinkItem, CodecFn> {
|
||||||
|
#[pin]
|
||||||
|
inner: T,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
config: length_delimited::Builder,
|
||||||
|
ghost: PhantomData<(fn(SinkItem), fn() -> Item)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, Item, SinkItem, Codec, CodecFn> Future for Connect<T, Item, SinkItem, CodecFn>
|
||||||
|
where
|
||||||
|
T: Future<Output = io::Result<TcpStream>>,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
type Output = io::Result<Transport<TcpStream, Item, SinkItem, Codec>>;
|
||||||
|
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||||
|
let io = ready!(self.as_mut().project().inner.poll(cx))?;
|
||||||
|
Poll::Ready(Ok(new(self.config.new_framed(io), (self.codec_fn)())))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, Item, SinkItem, CodecFn> Connect<T, Item, SinkItem, CodecFn> {
|
||||||
|
/// Returns an immutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config(&self) -> &length_delimited::Builder {
|
||||||
|
&self.config
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config_mut(&mut self) -> &mut length_delimited::Builder {
|
||||||
|
&mut self.config
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Connects to `addr`, wrapping the connection in a TCP transport.
|
||||||
|
pub fn connect<A, Item, SinkItem, Codec, CodecFn>(
|
||||||
|
addr: A,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
) -> Connect<impl Future<Output = io::Result<TcpStream>>, Item, SinkItem, CodecFn>
|
||||||
|
where
|
||||||
|
A: ToSocketAddrs,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
Connect {
|
||||||
|
inner: TcpStream::connect(addr),
|
||||||
|
codec_fn,
|
||||||
|
config: LengthDelimitedCodec::builder(),
|
||||||
|
ghost: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Listens on `addr`, wrapping accepted connections in TCP transports.
|
||||||
|
pub async fn listen<A, Item, SinkItem, Codec, CodecFn>(
|
||||||
|
addr: A,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
) -> io::Result<Incoming<Item, SinkItem, Codec, CodecFn>>
|
||||||
|
where
|
||||||
|
A: ToSocketAddrs,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
let listener = TcpListener::bind(addr).await?;
|
||||||
|
let local_addr = listener.local_addr()?;
|
||||||
|
Ok(Incoming {
|
||||||
|
listener,
|
||||||
|
codec_fn,
|
||||||
|
local_addr,
|
||||||
|
config: LengthDelimitedCodec::builder(),
|
||||||
|
ghost: PhantomData,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A [`TcpListener`] that wraps connections in [transports](Transport).
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Incoming<Item, SinkItem, Codec, CodecFn> {
|
||||||
|
listener: TcpListener,
|
||||||
|
local_addr: SocketAddr,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
config: length_delimited::Builder,
|
||||||
|
ghost: PhantomData<(fn() -> Item, fn(SinkItem), Codec)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem, Codec, CodecFn> Incoming<Item, SinkItem, Codec, CodecFn> {
|
||||||
|
/// Returns the address being listened on.
|
||||||
|
pub fn local_addr(&self) -> SocketAddr {
|
||||||
|
self.local_addr
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an immutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config(&self) -> &length_delimited::Builder {
|
||||||
|
&self.config
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config_mut(&mut self) -> &mut length_delimited::Builder {
|
||||||
|
&mut self.config
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem, Codec, CodecFn> Stream for Incoming<Item, SinkItem, Codec, CodecFn>
|
||||||
|
where
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
type Item = io::Result<Transport<TcpStream, Item, SinkItem, Codec>>;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
|
let conn: TcpStream =
|
||||||
|
ready!(Pin::new(&mut self.as_mut().project().listener).poll_accept(cx)?).0;
|
||||||
|
Poll::Ready(Some(Ok(new(
|
||||||
|
self.config.new_framed(conn),
|
||||||
|
(self.codec_fn)(),
|
||||||
|
))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(unix, feature = "unix"))]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(all(unix, feature = "unix"))))]
|
||||||
|
/// Unix Domain Socket support for generic transport using Tokio.
|
||||||
|
pub mod unix {
|
||||||
|
use {
|
||||||
|
super::*,
|
||||||
|
futures::ready,
|
||||||
|
std::{marker::PhantomData, path::Path},
|
||||||
|
tokio::net::{unix::SocketAddr, UnixListener, UnixStream},
|
||||||
|
tokio_util::codec::length_delimited,
|
||||||
|
};
|
||||||
|
|
||||||
|
impl<Item, SinkItem, Codec> Transport<UnixStream, Item, SinkItem, Codec> {
|
||||||
|
/// Returns the socket address of the remote half of the underlying [`UnixStream`].
|
||||||
|
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
|
||||||
|
self.inner.get_ref().get_ref().peer_addr()
|
||||||
|
}
|
||||||
|
/// Returns the socket address of the local half of the underlying [`UnixStream`].
|
||||||
|
pub fn local_addr(&self) -> io::Result<SocketAddr> {
|
||||||
|
self.inner.get_ref().get_ref().local_addr()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A connection Future that also exposes the length-delimited framing config.
|
||||||
|
#[must_use]
|
||||||
|
#[pin_project]
|
||||||
|
pub struct Connect<T, Item, SinkItem, CodecFn> {
|
||||||
|
#[pin]
|
||||||
|
inner: T,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
config: length_delimited::Builder,
|
||||||
|
ghost: PhantomData<(fn(SinkItem), fn() -> Item)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, Item, SinkItem, Codec, CodecFn> Future for Connect<T, Item, SinkItem, CodecFn>
|
||||||
|
where
|
||||||
|
T: Future<Output = io::Result<UnixStream>>,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
type Output = io::Result<Transport<UnixStream, Item, SinkItem, Codec>>;
|
||||||
|
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
|
||||||
|
let io = ready!(self.as_mut().project().inner.poll(cx))?;
|
||||||
|
Poll::Ready(Ok(new(self.config.new_framed(io), (self.codec_fn)())))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, Item, SinkItem, CodecFn> Connect<T, Item, SinkItem, CodecFn> {
|
||||||
|
/// Returns an immutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config(&self) -> &length_delimited::Builder {
|
||||||
|
&self.config
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config_mut(&mut self) -> &mut length_delimited::Builder {
|
||||||
|
&mut self.config
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Connects to socket named by `path`, wrapping the connection in a Unix Domain Socket
|
||||||
|
/// transport.
|
||||||
|
pub fn connect<P, Item, SinkItem, Codec, CodecFn>(
|
||||||
|
path: P,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
) -> Connect<impl Future<Output = io::Result<UnixStream>>, Item, SinkItem, CodecFn>
|
||||||
|
where
|
||||||
|
P: AsRef<Path>,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
Connect {
|
||||||
|
inner: UnixStream::connect(path),
|
||||||
|
codec_fn,
|
||||||
|
config: LengthDelimitedCodec::builder(),
|
||||||
|
ghost: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Listens on the socket named by `path`, wrapping accepted connections in Unix Domain Socket
|
||||||
|
/// transports.
|
||||||
|
pub async fn listen<P, Item, SinkItem, Codec, CodecFn>(
|
||||||
|
path: P,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
) -> io::Result<Incoming<Item, SinkItem, Codec, CodecFn>>
|
||||||
|
where
|
||||||
|
P: AsRef<Path>,
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
let listener = UnixListener::bind(path)?;
|
||||||
|
let local_addr = listener.local_addr()?;
|
||||||
|
Ok(Incoming {
|
||||||
|
listener,
|
||||||
|
codec_fn,
|
||||||
|
local_addr,
|
||||||
|
config: LengthDelimitedCodec::builder(),
|
||||||
|
ghost: PhantomData,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A [`UnixListener`] that wraps connections in [transports](Transport).
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Incoming<Item, SinkItem, Codec, CodecFn> {
|
||||||
|
listener: UnixListener,
|
||||||
|
local_addr: SocketAddr,
|
||||||
|
codec_fn: CodecFn,
|
||||||
|
config: length_delimited::Builder,
|
||||||
|
ghost: PhantomData<(fn() -> Item, fn(SinkItem), Codec)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem, Codec, CodecFn> Incoming<Item, SinkItem, Codec, CodecFn> {
|
||||||
|
/// Returns the the socket address being listened on.
|
||||||
|
pub fn local_addr(&self) -> &SocketAddr {
|
||||||
|
&self.local_addr
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an immutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config(&self) -> &length_delimited::Builder {
|
||||||
|
&self.config
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the length-delimited codec's config.
|
||||||
|
pub fn config_mut(&mut self) -> &mut length_delimited::Builder {
|
||||||
|
&mut self.config
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem, Codec, CodecFn> Stream for Incoming<Item, SinkItem, Codec, CodecFn>
|
||||||
|
where
|
||||||
|
Item: for<'de> Deserialize<'de>,
|
||||||
|
SinkItem: Serialize,
|
||||||
|
Codec: Serializer<SinkItem> + Deserializer<Item>,
|
||||||
|
CodecFn: Fn() -> Codec,
|
||||||
|
{
|
||||||
|
type Item = io::Result<Transport<UnixStream, Item, SinkItem, Codec>>;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||||
|
let conn: UnixStream = ready!(self.as_mut().project().listener.poll_accept(cx)?).0;
|
||||||
|
Poll::Ready(Some(Ok(new(
|
||||||
|
self.config.new_framed(conn),
|
||||||
|
(self.codec_fn)(),
|
||||||
|
))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A temporary `PathBuf` that lives in `std::env::temp_dir` and is removed on drop.
|
||||||
|
pub struct TempPathBuf(std::path::PathBuf);
|
||||||
|
|
||||||
|
impl TempPathBuf {
|
||||||
|
/// A named socket that results in `<tempdir>/<name>`
|
||||||
|
pub fn new<S: AsRef<str>>(name: S) -> Self {
|
||||||
|
let mut sock = std::env::temp_dir();
|
||||||
|
sock.push(name.as_ref());
|
||||||
|
Self(sock)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Appends a random hex string to the socket name resulting in
|
||||||
|
/// `<tempdir>/<name>_<xxxxx>`
|
||||||
|
pub fn with_random<S: AsRef<str>>(name: S) -> Self {
|
||||||
|
Self::new(format!("{}_{:016x}", name.as_ref(), rand::random::<u64>()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<std::path::Path> for TempPathBuf {
|
||||||
|
fn as_ref(&self) -> &std::path::Path {
|
||||||
|
self.0.as_path()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for TempPathBuf {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// This will remove the file pointed to by this PathBuf if it exists, however Err's can
|
||||||
|
// be returned such as attempting to remove a non-existing file, or one which we don't
|
||||||
|
// have permission to remove. In these cases the Err is swallowed
|
||||||
|
let _ = std::fs::remove_file(&self.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use tokio_serde::formats::SymmetricalJson;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn temp_path_buf_non_random() {
|
||||||
|
let sock = TempPathBuf::new("test");
|
||||||
|
let mut good = std::env::temp_dir();
|
||||||
|
good.push("test");
|
||||||
|
assert_eq!(sock.as_ref(), good);
|
||||||
|
assert_eq!(sock.as_ref().file_name().unwrap(), "test");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn temp_path_buf_random() {
|
||||||
|
let sock = TempPathBuf::with_random("test");
|
||||||
|
let good = std::env::temp_dir();
|
||||||
|
assert!(sock.as_ref().starts_with(good));
|
||||||
|
// Since there are 16 random characters we just assert the file_name has the right name
|
||||||
|
// and starts with the correct string 'test_'
|
||||||
|
// file name: test_xxxxxxxxxxxxxxxx
|
||||||
|
// test = 4
|
||||||
|
// _ = 1
|
||||||
|
// <hex> = 16
|
||||||
|
// total = 21
|
||||||
|
let fname = sock.as_ref().file_name().unwrap().to_string_lossy();
|
||||||
|
assert!(fname.starts_with("test_"));
|
||||||
|
assert_eq!(fname.len(), 21);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn temp_path_buf_non_existing() {
|
||||||
|
let sock = TempPathBuf::with_random("test");
|
||||||
|
let sock_path = std::path::PathBuf::from(sock.as_ref());
|
||||||
|
|
||||||
|
// No actual file has been created yet
|
||||||
|
assert!(!sock_path.exists());
|
||||||
|
// Should not panic
|
||||||
|
std::mem::drop(sock);
|
||||||
|
assert!(!sock_path.exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn temp_path_buf_existing_file() {
|
||||||
|
let sock = TempPathBuf::with_random("test");
|
||||||
|
let sock_path = std::path::PathBuf::from(sock.as_ref());
|
||||||
|
let _file = std::fs::File::create(&sock).unwrap();
|
||||||
|
assert!(sock_path.exists());
|
||||||
|
std::mem::drop(sock);
|
||||||
|
assert!(!sock_path.exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn temp_path_buf_preexisting_file() {
|
||||||
|
let mut pre_existing = std::env::temp_dir();
|
||||||
|
pre_existing.push("test");
|
||||||
|
let _file = std::fs::File::create(&pre_existing).unwrap();
|
||||||
|
let sock = TempPathBuf::new("test");
|
||||||
|
let sock_path = std::path::PathBuf::from(sock.as_ref());
|
||||||
|
assert!(sock_path.exists());
|
||||||
|
std::mem::drop(sock);
|
||||||
|
assert!(!sock_path.exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn temp_path_buf_for_socket() {
|
||||||
|
let sock = TempPathBuf::with_random("test");
|
||||||
|
// Save path for testing after drop
|
||||||
|
let sock_path = std::path::PathBuf::from(sock.as_ref());
|
||||||
|
// create the actual socket
|
||||||
|
let _ = listen(&sock, SymmetricalJson::<String>::default).await;
|
||||||
|
assert!(sock_path.exists());
|
||||||
|
std::mem::drop(sock);
|
||||||
|
assert!(!sock_path.exists());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::Transport;
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use futures::{task::*, Sink, Stream};
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
use std::{
|
||||||
|
io::{self, Cursor},
|
||||||
|
pin::Pin,
|
||||||
|
};
|
||||||
|
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||||
|
use tokio_serde::formats::SymmetricalJson;
|
||||||
|
|
||||||
|
fn ctx() -> Context<'static> {
|
||||||
|
Context::from_waker(noop_waker_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
struct TestIo(Cursor<Vec<u8>>);
|
||||||
|
|
||||||
|
impl AsyncRead for TestIo {
|
||||||
|
fn poll_read(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &mut ReadBuf<'_>,
|
||||||
|
) -> Poll<io::Result<()>> {
|
||||||
|
AsyncRead::poll_read(Pin::new(&mut self.0), cx, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsyncWrite for TestIo {
|
||||||
|
fn poll_write(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
buf: &[u8],
|
||||||
|
) -> Poll<io::Result<usize>> {
|
||||||
|
AsyncWrite::poll_write(Pin::new(&mut self.0), cx, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
AsyncWrite::poll_flush(Pin::new(&mut self.0), cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||||
|
AsyncWrite::poll_shutdown(Pin::new(&mut self.0), cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn close() {
|
||||||
|
let (tx, _rx) = crate::transport::channel::bounded::<(), ()>(0);
|
||||||
|
pin_mut!(tx);
|
||||||
|
assert_matches!(tx.as_mut().poll_close(&mut ctx()), Poll::Ready(Ok(())));
|
||||||
|
assert_matches!(tx.as_mut().start_send(()), Err(_));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_stream() {
|
||||||
|
let data: &[u8] = b"\x00\x00\x00\x18\"Test one, check check.\"";
|
||||||
|
let transport = Transport::from((
|
||||||
|
TestIo(Cursor::new(Vec::from(data))),
|
||||||
|
SymmetricalJson::<String>::default(),
|
||||||
|
));
|
||||||
|
pin_mut!(transport);
|
||||||
|
|
||||||
|
assert_matches!(
|
||||||
|
transport.as_mut().poll_next(&mut ctx()),
|
||||||
|
Poll::Ready(Some(Ok(ref s))) if s == "Test one, check check.");
|
||||||
|
assert_matches!(transport.as_mut().poll_next(&mut ctx()), Poll::Ready(None));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_sink() {
|
||||||
|
let writer = Cursor::new(vec![]);
|
||||||
|
let mut transport = Box::pin(Transport::from((
|
||||||
|
TestIo(writer),
|
||||||
|
SymmetricalJson::<String>::default(),
|
||||||
|
)));
|
||||||
|
|
||||||
|
assert_matches!(
|
||||||
|
transport.as_mut().poll_ready(&mut ctx()),
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
transport
|
||||||
|
.as_mut()
|
||||||
|
.start_send("Test one, check check.".into()),
|
||||||
|
Ok(())
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
transport.as_mut().poll_flush(&mut ctx()),
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
transport.get_ref().0.get_ref(),
|
||||||
|
b"\x00\x00\x00\x18\"Test one, check check.\""
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(tcp)]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn tcp() -> io::Result<()> {
|
||||||
|
use super::tcp;
|
||||||
|
|
||||||
|
let mut listener = tcp::listen("0.0.0.0:0", SymmetricalJson::<String>::default).await?;
|
||||||
|
let addr = listener.local_addr();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut transport = listener.next().await.unwrap().unwrap();
|
||||||
|
let message = transport.next().await.unwrap().unwrap();
|
||||||
|
transport.send(message).await.unwrap();
|
||||||
|
});
|
||||||
|
let mut transport = tcp::connect(addr, SymmetricalJson::<String>::default).await?;
|
||||||
|
transport.send(String::from("test")).await?;
|
||||||
|
assert_matches!(transport.next().await, Some(Ok(s)) if s == "test");
|
||||||
|
assert_matches!(transport.next().await, None);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(unix, feature = "unix"))]
|
||||||
|
#[tokio::test]
|
||||||
|
async fn uds() -> io::Result<()> {
|
||||||
|
use super::unix;
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
let sock = unix::TempPathBuf::with_random("uds");
|
||||||
|
let mut listener = unix::listen(&sock, SymmetricalJson::<String>::default).await?;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let mut transport = listener.next().await.unwrap().unwrap();
|
||||||
|
let message = transport.next().await.unwrap().unwrap();
|
||||||
|
transport.send(message).await.unwrap();
|
||||||
|
});
|
||||||
|
let mut transport = unix::connect(&sock, SymmetricalJson::<String>::default).await?;
|
||||||
|
transport.send(String::from("test")).await?;
|
||||||
|
assert_matches!(transport.next().await, Some(Ok(s)) if s == "test");
|
||||||
|
assert_matches!(transport.next().await, None);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
1592
tarpc/src/server.rs
Normal file
1592
tarpc/src/server.rs
Normal file
File diff suppressed because it is too large
Load Diff
221
tarpc/src/server/in_flight_requests.rs
Normal file
221
tarpc/src/server/in_flight_requests.rs
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
use crate::util::{Compact, TimeUntil};
|
||||||
|
use fnv::FnvHashMap;
|
||||||
|
use futures::future::{AbortHandle, AbortRegistration};
|
||||||
|
use std::{
|
||||||
|
collections::hash_map,
|
||||||
|
task::{Context, Poll},
|
||||||
|
time::SystemTime,
|
||||||
|
};
|
||||||
|
use tokio_util::time::delay_queue::{self, DelayQueue};
|
||||||
|
use tracing::Span;
|
||||||
|
|
||||||
|
/// A data structure that tracks in-flight requests. It aborts requests,
|
||||||
|
/// either on demand or when a request deadline expires.
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct InFlightRequests {
|
||||||
|
request_data: FnvHashMap<u64, RequestData>,
|
||||||
|
deadlines: DelayQueue<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data needed to clean up a single in-flight request.
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct RequestData {
|
||||||
|
/// Aborts the response handler for the associated request.
|
||||||
|
abort_handle: AbortHandle,
|
||||||
|
/// The key to remove the timer for the request's deadline.
|
||||||
|
deadline_key: delay_queue::Key,
|
||||||
|
/// The client span.
|
||||||
|
span: Span,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An error returned when a request attempted to start with the same ID as a request already
|
||||||
|
/// in flight.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct AlreadyExistsError;
|
||||||
|
|
||||||
|
impl InFlightRequests {
|
||||||
|
/// Returns the number of in-flight requests.
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.request_data.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Starts a request, unless a request with the same ID is already in flight.
|
||||||
|
pub fn start_request(
|
||||||
|
&mut self,
|
||||||
|
request_id: u64,
|
||||||
|
deadline: SystemTime,
|
||||||
|
span: Span,
|
||||||
|
) -> Result<AbortRegistration, AlreadyExistsError> {
|
||||||
|
match self.request_data.entry(request_id) {
|
||||||
|
hash_map::Entry::Vacant(vacant) => {
|
||||||
|
let timeout = deadline.time_until();
|
||||||
|
let (abort_handle, abort_registration) = AbortHandle::new_pair();
|
||||||
|
let deadline_key = self.deadlines.insert(request_id, timeout);
|
||||||
|
vacant.insert(RequestData {
|
||||||
|
abort_handle,
|
||||||
|
deadline_key,
|
||||||
|
span,
|
||||||
|
});
|
||||||
|
Ok(abort_registration)
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(_) => Err(AlreadyExistsError),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cancels an in-flight request. Returns true iff the request was found.
|
||||||
|
pub fn cancel_request(&mut self, request_id: u64) -> bool {
|
||||||
|
if let Some(RequestData {
|
||||||
|
span,
|
||||||
|
abort_handle,
|
||||||
|
deadline_key,
|
||||||
|
}) = self.request_data.remove(&request_id)
|
||||||
|
{
|
||||||
|
let _entered = span.enter();
|
||||||
|
self.request_data.compact(0.1);
|
||||||
|
abort_handle.abort();
|
||||||
|
self.deadlines.remove(&deadline_key);
|
||||||
|
tracing::info!("ReceiveCancel");
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes a request without aborting. Returns true iff the request was found.
|
||||||
|
/// This method should be used when a response is being sent.
|
||||||
|
pub fn remove_request(&mut self, request_id: u64) -> Option<Span> {
|
||||||
|
if let Some(request_data) = self.request_data.remove(&request_id) {
|
||||||
|
self.request_data.compact(0.1);
|
||||||
|
self.deadlines.remove(&request_data.deadline_key);
|
||||||
|
Some(request_data.span)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Yields a request that has expired, aborting any ongoing processing of that request.
|
||||||
|
pub fn poll_expired(&mut self, cx: &mut Context) -> Poll<Option<u64>> {
|
||||||
|
if self.deadlines.is_empty() {
|
||||||
|
// TODO(https://github.com/tokio-rs/tokio/issues/4161)
|
||||||
|
// This is a workaround for DelayQueue not always treating this case correctly.
|
||||||
|
return Poll::Ready(None);
|
||||||
|
}
|
||||||
|
self.deadlines.poll_expired(cx).map(|expired| {
|
||||||
|
let expired = expired?;
|
||||||
|
if let Some(RequestData {
|
||||||
|
abort_handle, span, ..
|
||||||
|
}) = self.request_data.remove(expired.get_ref())
|
||||||
|
{
|
||||||
|
let _entered = span.enter();
|
||||||
|
self.request_data.compact(0.1);
|
||||||
|
abort_handle.abort();
|
||||||
|
tracing::error!("DeadlineExceeded");
|
||||||
|
}
|
||||||
|
Some(expired.into_inner())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// When InFlightRequests is dropped, any outstanding requests are aborted.
|
||||||
|
impl Drop for InFlightRequests {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.request_data
|
||||||
|
.values()
|
||||||
|
.for_each(|request_data| request_data.abort_handle.abort())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use futures::{
|
||||||
|
future::{pending, Abortable},
|
||||||
|
FutureExt,
|
||||||
|
};
|
||||||
|
use futures_test::task::noop_context;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn start_request_increases_len() {
|
||||||
|
let mut in_flight_requests = InFlightRequests::default();
|
||||||
|
assert_eq!(in_flight_requests.len(), 0);
|
||||||
|
in_flight_requests
|
||||||
|
.start_request(0, SystemTime::now(), Span::current())
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(in_flight_requests.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn polling_expired_aborts() {
|
||||||
|
let mut in_flight_requests = InFlightRequests::default();
|
||||||
|
let abort_registration = in_flight_requests
|
||||||
|
.start_request(0, SystemTime::now(), Span::current())
|
||||||
|
.unwrap();
|
||||||
|
let mut abortable_future = Box::new(Abortable::new(pending::<()>(), abort_registration));
|
||||||
|
|
||||||
|
tokio::time::pause();
|
||||||
|
tokio::time::advance(std::time::Duration::from_secs(1000)).await;
|
||||||
|
|
||||||
|
assert_matches!(
|
||||||
|
in_flight_requests.poll_expired(&mut noop_context()),
|
||||||
|
Poll::Ready(Some(_))
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
abortable_future.poll_unpin(&mut noop_context()),
|
||||||
|
Poll::Ready(Err(_))
|
||||||
|
);
|
||||||
|
assert_eq!(in_flight_requests.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn cancel_request_aborts() {
|
||||||
|
let mut in_flight_requests = InFlightRequests::default();
|
||||||
|
let abort_registration = in_flight_requests
|
||||||
|
.start_request(0, SystemTime::now(), Span::current())
|
||||||
|
.unwrap();
|
||||||
|
let mut abortable_future = Box::new(Abortable::new(pending::<()>(), abort_registration));
|
||||||
|
|
||||||
|
assert!(in_flight_requests.cancel_request(0));
|
||||||
|
assert_matches!(
|
||||||
|
abortable_future.poll_unpin(&mut noop_context()),
|
||||||
|
Poll::Ready(Err(_))
|
||||||
|
);
|
||||||
|
assert_eq!(in_flight_requests.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn remove_request_doesnt_abort() {
|
||||||
|
let mut in_flight_requests = InFlightRequests::default();
|
||||||
|
assert!(in_flight_requests.deadlines.is_empty());
|
||||||
|
|
||||||
|
let abort_registration = in_flight_requests
|
||||||
|
.start_request(
|
||||||
|
0,
|
||||||
|
SystemTime::now() + std::time::Duration::from_secs(10),
|
||||||
|
Span::current(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let mut abortable_future = Box::new(Abortable::new(pending::<()>(), abort_registration));
|
||||||
|
|
||||||
|
// Precondition: Pending expiration
|
||||||
|
assert_matches!(
|
||||||
|
in_flight_requests.poll_expired(&mut noop_context()),
|
||||||
|
Poll::Pending
|
||||||
|
);
|
||||||
|
assert!(!in_flight_requests.deadlines.is_empty());
|
||||||
|
|
||||||
|
assert_matches!(in_flight_requests.remove_request(0), Some(_));
|
||||||
|
// Postcondition: No pending expirations
|
||||||
|
assert!(in_flight_requests.deadlines.is_empty());
|
||||||
|
assert_matches!(
|
||||||
|
in_flight_requests.poll_expired(&mut noop_context()),
|
||||||
|
Poll::Ready(None)
|
||||||
|
);
|
||||||
|
assert_matches!(
|
||||||
|
abortable_future.poll_unpin(&mut noop_context()),
|
||||||
|
Poll::Pending
|
||||||
|
);
|
||||||
|
assert_eq!(in_flight_requests.len(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
92
tarpc/src/server/incoming.rs
Normal file
92
tarpc/src/server/incoming.rs
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
use super::{
|
||||||
|
limits::{channels_per_key::MaxChannelsPerKey, requests_per_channel::MaxRequestsPerChannel},
|
||||||
|
Channel, Serve,
|
||||||
|
};
|
||||||
|
use futures::prelude::*;
|
||||||
|
use std::{fmt, hash::Hash};
|
||||||
|
|
||||||
|
/// An extension trait for [streams](futures::prelude::Stream) of [`Channels`](Channel).
|
||||||
|
pub trait Incoming<C>
|
||||||
|
where
|
||||||
|
Self: Sized + Stream<Item = C>,
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
/// Enforces channel per-key limits.
|
||||||
|
fn max_channels_per_key<K, KF>(self, n: u32, keymaker: KF) -> MaxChannelsPerKey<Self, K, KF>
|
||||||
|
where
|
||||||
|
K: fmt::Display + Eq + Hash + Clone + Unpin,
|
||||||
|
KF: Fn(&C) -> K,
|
||||||
|
{
|
||||||
|
MaxChannelsPerKey::new(self, n, keymaker)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Caps the number of concurrent requests per channel.
|
||||||
|
fn max_concurrent_requests_per_channel(self, n: usize) -> MaxRequestsPerChannel<Self> {
|
||||||
|
MaxRequestsPerChannel::new(self, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a stream of channels in execution. Each channel in execution is a stream of
|
||||||
|
/// futures, where each future is an in-flight request being rsponded to.
|
||||||
|
fn execute<S>(
|
||||||
|
self,
|
||||||
|
serve: S,
|
||||||
|
) -> impl Stream<Item = impl Stream<Item = impl Future<Output = ()>>>
|
||||||
|
where
|
||||||
|
S: Serve<Req = C::Req, Resp = C::Resp> + Clone,
|
||||||
|
{
|
||||||
|
self.map(move |channel| channel.execute(serve.clone()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tokio1")]
|
||||||
|
/// Spawns all channels-in-execution, delegating to the tokio runtime to manage their completion.
|
||||||
|
/// Each channel is spawned, and each request from each channel is spawned.
|
||||||
|
/// Note that this function is generic over any stream-of-streams-of-futures, but it is intended
|
||||||
|
/// for spawning streams of channels.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
/// ```rust
|
||||||
|
/// use tarpc::{
|
||||||
|
/// context,
|
||||||
|
/// client::{self, NewClient},
|
||||||
|
/// server::{self, BaseChannel, Channel, incoming::{Incoming, spawn_incoming}, serve},
|
||||||
|
/// transport,
|
||||||
|
/// };
|
||||||
|
/// use futures::prelude::*;
|
||||||
|
///
|
||||||
|
/// #[tokio::main]
|
||||||
|
/// async fn main() {
|
||||||
|
/// let (tx, rx) = transport::channel::unbounded();
|
||||||
|
/// let NewClient { client, dispatch } = client::new(client::Config::default(), tx);
|
||||||
|
/// tokio::spawn(dispatch);
|
||||||
|
///
|
||||||
|
/// let incoming = stream::once(async move {
|
||||||
|
/// BaseChannel::new(server::Config::default(), rx)
|
||||||
|
/// }).execute(serve(|_, i| async move { Ok(i + 1) }));
|
||||||
|
/// tokio::spawn(spawn_incoming(incoming));
|
||||||
|
/// assert_eq!(client.call(context::current(), "AddOne", 1).await.unwrap(), 2);
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
pub async fn spawn_incoming(
|
||||||
|
incoming: impl Stream<
|
||||||
|
Item = impl Stream<Item = impl Future<Output = ()> + Send + 'static> + Send + 'static,
|
||||||
|
>,
|
||||||
|
) {
|
||||||
|
use futures::pin_mut;
|
||||||
|
pin_mut!(incoming);
|
||||||
|
while let Some(channel) = incoming.next().await {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
pin_mut!(channel);
|
||||||
|
while let Some(request) = channel.next().await {
|
||||||
|
tokio::spawn(request);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, C> Incoming<C> for S
|
||||||
|
where
|
||||||
|
S: Sized + Stream<Item = C>,
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
}
|
||||||
5
tarpc/src/server/limits.rs
Normal file
5
tarpc/src/server/limits.rs
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
/// Provides functionality to limit the number of active channels.
|
||||||
|
pub mod channels_per_key;
|
||||||
|
|
||||||
|
/// Provides a [channel](crate::server::Channel) that limits the number of in-flight requests.
|
||||||
|
pub mod requests_per_channel;
|
||||||
480
tarpc/src/server/limits/channels_per_key.rs
Normal file
480
tarpc/src/server/limits/channels_per_key.rs
Normal file
@@ -0,0 +1,480 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
server::{self, Channel},
|
||||||
|
util::Compact,
|
||||||
|
};
|
||||||
|
use fnv::FnvHashMap;
|
||||||
|
use futures::{prelude::*, ready, stream::Fuse, task::*};
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
use std::{
|
||||||
|
collections::hash_map::Entry, convert::TryFrom, fmt, hash::Hash, marker::Unpin, pin::Pin,
|
||||||
|
};
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tracing::{debug, info, trace};
|
||||||
|
|
||||||
|
/// An [`Incoming`](crate::server::incoming::Incoming) stream that drops new channels based on
|
||||||
|
/// per-key limits.
|
||||||
|
///
|
||||||
|
/// The decision to drop a Channel is made once at the time the Channel materializes. Once a
|
||||||
|
/// Channel is yielded, it will not be prematurely dropped.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct MaxChannelsPerKey<S, K, F>
|
||||||
|
where
|
||||||
|
K: Eq + Hash,
|
||||||
|
{
|
||||||
|
#[pin]
|
||||||
|
listener: Fuse<S>,
|
||||||
|
channels_per_key: u32,
|
||||||
|
dropped_keys: mpsc::UnboundedReceiver<K>,
|
||||||
|
dropped_keys_tx: mpsc::UnboundedSender<K>,
|
||||||
|
key_counts: FnvHashMap<K, Weak<Tracker<K>>>,
|
||||||
|
keymaker: F,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A channel that is tracked by [`MaxChannelsPerKey`].
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct TrackedChannel<C, K> {
|
||||||
|
#[pin]
|
||||||
|
inner: C,
|
||||||
|
tracker: Arc<Tracker<K>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Tracker<K> {
|
||||||
|
key: Option<K>,
|
||||||
|
dropped_keys: mpsc::UnboundedSender<K>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K> Drop for Tracker<K> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Don't care if the listener is dropped.
|
||||||
|
let _ = self.dropped_keys.send(self.key.take().unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, K> Stream for TrackedChannel<C, K>
|
||||||
|
where
|
||||||
|
C: Stream,
|
||||||
|
{
|
||||||
|
type Item = <C as Stream>::Item;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
self.inner_pin_mut().poll_next(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, I, K> Sink<I> for TrackedChannel<C, K>
|
||||||
|
where
|
||||||
|
C: Sink<I>,
|
||||||
|
{
|
||||||
|
type Error = C::Error;
|
||||||
|
|
||||||
|
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.inner_pin_mut().poll_ready(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(mut self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> {
|
||||||
|
self.inner_pin_mut().start_send(item)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.inner_pin_mut().poll_flush(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.inner_pin_mut().poll_close(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, K> AsRef<C> for TrackedChannel<C, K> {
|
||||||
|
fn as_ref(&self) -> &C {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, K> Channel for TrackedChannel<C, K>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
type Req = C::Req;
|
||||||
|
type Resp = C::Resp;
|
||||||
|
type Transport = C::Transport;
|
||||||
|
|
||||||
|
fn config(&self) -> &server::Config {
|
||||||
|
self.inner.config()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn in_flight_requests(&self) -> usize {
|
||||||
|
self.inner.in_flight_requests()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transport(&self) -> &Self::Transport {
|
||||||
|
self.inner.transport()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C, K> TrackedChannel<C, K> {
|
||||||
|
/// Returns the inner channel.
|
||||||
|
pub fn get_ref(&self) -> &C {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the pinned inner channel.
|
||||||
|
fn inner_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut C> {
|
||||||
|
self.as_mut().project().inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, K, F> MaxChannelsPerKey<S, K, F>
|
||||||
|
where
|
||||||
|
K: Eq + Hash,
|
||||||
|
S: Stream,
|
||||||
|
F: Fn(&S::Item) -> K,
|
||||||
|
{
|
||||||
|
/// Sheds new channels to stay under configured limits.
|
||||||
|
pub(crate) fn new(listener: S, channels_per_key: u32, keymaker: F) -> Self {
|
||||||
|
let (dropped_keys_tx, dropped_keys) = mpsc::unbounded_channel();
|
||||||
|
MaxChannelsPerKey {
|
||||||
|
listener: listener.fuse(),
|
||||||
|
channels_per_key,
|
||||||
|
dropped_keys,
|
||||||
|
dropped_keys_tx,
|
||||||
|
key_counts: FnvHashMap::default(),
|
||||||
|
keymaker,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, K, F> MaxChannelsPerKey<S, K, F>
|
||||||
|
where
|
||||||
|
S: Stream,
|
||||||
|
K: fmt::Display + Eq + Hash + Clone + Unpin,
|
||||||
|
F: Fn(&S::Item) -> K,
|
||||||
|
{
|
||||||
|
fn listener_pin_mut<'a>(self: &'a mut Pin<&mut Self>) -> Pin<&'a mut Fuse<S>> {
|
||||||
|
self.as_mut().project().listener
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_new_channel(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
stream: S::Item,
|
||||||
|
) -> Result<TrackedChannel<S::Item, K>, K> {
|
||||||
|
let key = (self.as_mut().keymaker)(&stream);
|
||||||
|
let tracker = self.as_mut().increment_channels_for_key(key.clone())?;
|
||||||
|
|
||||||
|
trace!(
|
||||||
|
channel_filter_key = %key,
|
||||||
|
open_channels = Arc::strong_count(&tracker),
|
||||||
|
max_open_channels = self.channels_per_key,
|
||||||
|
"Opening channel");
|
||||||
|
|
||||||
|
Ok(TrackedChannel {
|
||||||
|
tracker,
|
||||||
|
inner: stream,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment_channels_for_key(self: Pin<&mut Self>, key: K) -> Result<Arc<Tracker<K>>, K> {
|
||||||
|
let self_ = self.project();
|
||||||
|
let dropped_keys = self_.dropped_keys_tx;
|
||||||
|
match self_.key_counts.entry(key.clone()) {
|
||||||
|
Entry::Vacant(vacant) => {
|
||||||
|
let tracker = Arc::new(Tracker {
|
||||||
|
key: Some(key),
|
||||||
|
dropped_keys: dropped_keys.clone(),
|
||||||
|
});
|
||||||
|
|
||||||
|
vacant.insert(Arc::downgrade(&tracker));
|
||||||
|
Ok(tracker)
|
||||||
|
}
|
||||||
|
Entry::Occupied(mut o) => {
|
||||||
|
let count = o.get().strong_count();
|
||||||
|
if count >= TryFrom::try_from(*self_.channels_per_key).unwrap() {
|
||||||
|
info!(
|
||||||
|
channel_filter_key = %key,
|
||||||
|
open_channels = count,
|
||||||
|
max_open_channels = *self_.channels_per_key,
|
||||||
|
"At open channel limit");
|
||||||
|
Err(key)
|
||||||
|
} else {
|
||||||
|
Ok(o.get().upgrade().unwrap_or_else(|| {
|
||||||
|
let tracker = Arc::new(Tracker {
|
||||||
|
key: Some(key),
|
||||||
|
dropped_keys: dropped_keys.clone(),
|
||||||
|
});
|
||||||
|
|
||||||
|
*o.get_mut() = Arc::downgrade(&tracker);
|
||||||
|
tracker
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_listener(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<Option<Result<TrackedChannel<S::Item, K>, K>>> {
|
||||||
|
match ready!(self.listener_pin_mut().poll_next_unpin(cx)) {
|
||||||
|
Some(codec) => Poll::Ready(Some(self.handle_new_channel(codec))),
|
||||||
|
None => Poll::Ready(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_closed_channels(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
|
||||||
|
let self_ = self.project();
|
||||||
|
match ready!(self_.dropped_keys.poll_recv(cx)) {
|
||||||
|
Some(key) => {
|
||||||
|
debug!(
|
||||||
|
channel_filter_key = %key,
|
||||||
|
"All channels dropped");
|
||||||
|
self_.key_counts.remove(&key);
|
||||||
|
self_.key_counts.compact(0.1);
|
||||||
|
Poll::Ready(())
|
||||||
|
}
|
||||||
|
None => unreachable!("Holding a copy of closed_channels and didn't close it."),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S, K, F> Stream for MaxChannelsPerKey<S, K, F>
|
||||||
|
where
|
||||||
|
S: Stream,
|
||||||
|
K: fmt::Display + Eq + Hash + Clone + Unpin,
|
||||||
|
F: Fn(&S::Item) -> K,
|
||||||
|
{
|
||||||
|
type Item = TrackedChannel<S::Item, K>;
|
||||||
|
|
||||||
|
fn poll_next(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<Option<TrackedChannel<S::Item, K>>> {
|
||||||
|
loop {
|
||||||
|
match (
|
||||||
|
self.as_mut().poll_listener(cx),
|
||||||
|
self.as_mut().poll_closed_channels(cx),
|
||||||
|
) {
|
||||||
|
(Poll::Ready(Some(Ok(channel))), _) => {
|
||||||
|
return Poll::Ready(Some(channel));
|
||||||
|
}
|
||||||
|
(Poll::Ready(Some(Err(_))), _) => {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
(_, Poll::Ready(())) => continue,
|
||||||
|
(Poll::Pending, Poll::Pending) => return Poll::Pending,
|
||||||
|
(Poll::Ready(None), Poll::Pending) => {
|
||||||
|
trace!("Shutting down listener.");
|
||||||
|
return Poll::Ready(None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
fn ctx() -> Context<'static> {
|
||||||
|
use futures::task::*;
|
||||||
|
|
||||||
|
Context::from_waker(noop_waker_ref())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tracker_drop() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
|
||||||
|
let (tx, mut rx) = mpsc::unbounded_channel();
|
||||||
|
Tracker {
|
||||||
|
key: Some(1),
|
||||||
|
dropped_keys: tx,
|
||||||
|
};
|
||||||
|
assert_matches!(rx.poll_recv(&mut ctx()), Poll::Ready(Some(1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tracked_channel_stream() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
let (chan_tx, chan) = futures::channel::mpsc::unbounded();
|
||||||
|
let (dropped_keys, _) = mpsc::unbounded_channel();
|
||||||
|
let channel = TrackedChannel {
|
||||||
|
inner: chan,
|
||||||
|
tracker: Arc::new(Tracker {
|
||||||
|
key: Some(1),
|
||||||
|
dropped_keys,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
chan_tx.unbounded_send("test").unwrap();
|
||||||
|
pin_mut!(channel);
|
||||||
|
assert_matches!(channel.poll_next(&mut ctx()), Poll::Ready(Some("test")));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tracked_channel_sink() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
let (chan, mut chan_rx) = futures::channel::mpsc::unbounded();
|
||||||
|
let (dropped_keys, _) = mpsc::unbounded_channel();
|
||||||
|
let channel = TrackedChannel {
|
||||||
|
inner: chan,
|
||||||
|
tracker: Arc::new(Tracker {
|
||||||
|
key: Some(1),
|
||||||
|
dropped_keys,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(channel);
|
||||||
|
assert_matches!(channel.as_mut().poll_ready(&mut ctx()), Poll::Ready(Ok(())));
|
||||||
|
assert_matches!(channel.as_mut().start_send("test"), Ok(()));
|
||||||
|
assert_matches!(channel.as_mut().poll_flush(&mut ctx()), Poll::Ready(Ok(())));
|
||||||
|
assert_matches!(chan_rx.try_next(), Ok(Some("test")));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn channel_filter_increment_channels_for_key() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
struct TestChannel {
|
||||||
|
key: &'static str,
|
||||||
|
}
|
||||||
|
let (_, listener) = futures::channel::mpsc::unbounded();
|
||||||
|
let filter = MaxChannelsPerKey::new(listener, 2, |chan: &TestChannel| chan.key);
|
||||||
|
pin_mut!(filter);
|
||||||
|
let tracker1 = filter.as_mut().increment_channels_for_key("key").unwrap();
|
||||||
|
assert_eq!(Arc::strong_count(&tracker1), 1);
|
||||||
|
let tracker2 = filter.as_mut().increment_channels_for_key("key").unwrap();
|
||||||
|
assert_eq!(Arc::strong_count(&tracker1), 2);
|
||||||
|
assert_matches!(filter.increment_channels_for_key("key"), Err("key"));
|
||||||
|
drop(tracker2);
|
||||||
|
assert_eq!(Arc::strong_count(&tracker1), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn channel_filter_handle_new_channel() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct TestChannel {
|
||||||
|
key: &'static str,
|
||||||
|
}
|
||||||
|
let (_, listener) = futures::channel::mpsc::unbounded();
|
||||||
|
let filter = MaxChannelsPerKey::new(listener, 2, |chan: &TestChannel| chan.key);
|
||||||
|
pin_mut!(filter);
|
||||||
|
let channel1 = filter
|
||||||
|
.as_mut()
|
||||||
|
.handle_new_channel(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 1);
|
||||||
|
|
||||||
|
let channel2 = filter
|
||||||
|
.as_mut()
|
||||||
|
.handle_new_channel(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 2);
|
||||||
|
|
||||||
|
assert_matches!(
|
||||||
|
filter.handle_new_channel(TestChannel { key: "key" }),
|
||||||
|
Err("key")
|
||||||
|
);
|
||||||
|
drop(channel2);
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn channel_filter_poll_listener() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct TestChannel {
|
||||||
|
key: &'static str,
|
||||||
|
}
|
||||||
|
let (new_channels, listener) = futures::channel::mpsc::unbounded();
|
||||||
|
let filter = MaxChannelsPerKey::new(listener, 2, |chan: &TestChannel| chan.key);
|
||||||
|
pin_mut!(filter);
|
||||||
|
|
||||||
|
new_channels
|
||||||
|
.unbounded_send(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
let channel1 =
|
||||||
|
assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Ok(c))) => c);
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 1);
|
||||||
|
|
||||||
|
new_channels
|
||||||
|
.unbounded_send(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
let _channel2 =
|
||||||
|
assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Ok(c))) => c);
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 2);
|
||||||
|
|
||||||
|
new_channels
|
||||||
|
.unbounded_send(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
let key =
|
||||||
|
assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Err(k))) => k);
|
||||||
|
assert_eq!(key, "key");
|
||||||
|
assert_eq!(Arc::strong_count(&channel1.tracker), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn channel_filter_poll_closed_channels() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct TestChannel {
|
||||||
|
key: &'static str,
|
||||||
|
}
|
||||||
|
let (new_channels, listener) = futures::channel::mpsc::unbounded();
|
||||||
|
let filter = MaxChannelsPerKey::new(listener, 2, |chan: &TestChannel| chan.key);
|
||||||
|
pin_mut!(filter);
|
||||||
|
|
||||||
|
new_channels
|
||||||
|
.unbounded_send(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
let channel =
|
||||||
|
assert_matches!(filter.as_mut().poll_listener(&mut ctx()), Poll::Ready(Some(Ok(c))) => c);
|
||||||
|
assert_eq!(filter.key_counts.len(), 1);
|
||||||
|
|
||||||
|
drop(channel);
|
||||||
|
assert_matches!(
|
||||||
|
filter.as_mut().poll_closed_channels(&mut ctx()),
|
||||||
|
Poll::Ready(())
|
||||||
|
);
|
||||||
|
assert!(filter.key_counts.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn channel_filter_stream() {
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct TestChannel {
|
||||||
|
key: &'static str,
|
||||||
|
}
|
||||||
|
let (new_channels, listener) = futures::channel::mpsc::unbounded();
|
||||||
|
let filter = MaxChannelsPerKey::new(listener, 2, |chan: &TestChannel| chan.key);
|
||||||
|
pin_mut!(filter);
|
||||||
|
|
||||||
|
new_channels
|
||||||
|
.unbounded_send(TestChannel { key: "key" })
|
||||||
|
.unwrap();
|
||||||
|
let channel = assert_matches!(filter.as_mut().poll_next(&mut ctx()), Poll::Ready(Some(c)) => c);
|
||||||
|
assert_eq!(filter.key_counts.len(), 1);
|
||||||
|
|
||||||
|
drop(channel);
|
||||||
|
assert_matches!(filter.as_mut().poll_next(&mut ctx()), Poll::Pending);
|
||||||
|
assert!(filter.key_counts.is_empty());
|
||||||
|
}
|
||||||
349
tarpc/src/server/limits/requests_per_channel.rs
Normal file
349
tarpc/src/server/limits/requests_per_channel.rs
Normal file
@@ -0,0 +1,349 @@
|
|||||||
|
// Copyright 2020 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
server::{Channel, Config},
|
||||||
|
Response, ServerError,
|
||||||
|
};
|
||||||
|
use futures::{prelude::*, ready, task::*};
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use std::{io, pin::Pin};
|
||||||
|
|
||||||
|
/// A [`Channel`] that limits the number of concurrent requests by throttling.
|
||||||
|
///
|
||||||
|
/// Note that this is a very basic throttling heuristic. It is easy to set a number that is too low
|
||||||
|
/// for the resources available to the server. For production use cases, a more advanced throttler
|
||||||
|
/// is likely needed.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct MaxRequests<C> {
|
||||||
|
max_in_flight_requests: usize,
|
||||||
|
#[pin]
|
||||||
|
inner: C,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> MaxRequests<C> {
|
||||||
|
/// Returns the inner channel.
|
||||||
|
pub fn get_ref(&self) -> &C {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> MaxRequests<C>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
/// Returns a new `MaxRequests` that wraps the given channel and limits concurrent requests to
|
||||||
|
/// `max_in_flight_requests`.
|
||||||
|
pub fn new(inner: C, max_in_flight_requests: usize) -> Self {
|
||||||
|
MaxRequests {
|
||||||
|
max_in_flight_requests,
|
||||||
|
inner,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Stream for MaxRequests<C>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
type Item = <C as Stream>::Item;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
while self.as_mut().in_flight_requests() >= *self.as_mut().project().max_in_flight_requests
|
||||||
|
{
|
||||||
|
ready!(self.as_mut().project().inner.poll_ready(cx)?);
|
||||||
|
|
||||||
|
match ready!(self.as_mut().project().inner.poll_next(cx)?) {
|
||||||
|
Some(r) => {
|
||||||
|
let _entered = r.span.enter();
|
||||||
|
tracing::info!(
|
||||||
|
in_flight_requests = self.as_mut().in_flight_requests(),
|
||||||
|
"ThrottleRequest",
|
||||||
|
);
|
||||||
|
|
||||||
|
self.as_mut().start_send(Response {
|
||||||
|
request_id: r.request.id,
|
||||||
|
message: Err(ServerError {
|
||||||
|
kind: io::ErrorKind::WouldBlock,
|
||||||
|
detail: "server throttled the request.".into(),
|
||||||
|
}),
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
None => return Poll::Ready(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.project().inner.poll_next(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Sink<Response<<C as Channel>::Resp>> for MaxRequests<C>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
type Error = C::Error;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().inner.poll_ready(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
item: Response<<C as Channel>::Resp>,
|
||||||
|
) -> Result<(), Self::Error> {
|
||||||
|
self.project().inner.start_send(item)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().inner.poll_flush(cx)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().inner.poll_close(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> AsRef<C> for MaxRequests<C> {
|
||||||
|
fn as_ref(&self) -> &C {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<C> Channel for MaxRequests<C>
|
||||||
|
where
|
||||||
|
C: Channel,
|
||||||
|
{
|
||||||
|
type Req = <C as Channel>::Req;
|
||||||
|
type Resp = <C as Channel>::Resp;
|
||||||
|
type Transport = <C as Channel>::Transport;
|
||||||
|
|
||||||
|
fn in_flight_requests(&self) -> usize {
|
||||||
|
self.inner.in_flight_requests()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn config(&self) -> &Config {
|
||||||
|
self.inner.config()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transport(&self) -> &Self::Transport {
|
||||||
|
self.inner.transport()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An [`Incoming`](crate::server::incoming::Incoming) stream of channels that enforce limits on
|
||||||
|
/// the number of in-flight requests.
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct MaxRequestsPerChannel<S> {
|
||||||
|
#[pin]
|
||||||
|
inner: S,
|
||||||
|
max_in_flight_requests: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> MaxRequestsPerChannel<S>
|
||||||
|
where
|
||||||
|
S: Stream,
|
||||||
|
<S as Stream>::Item: Channel,
|
||||||
|
{
|
||||||
|
pub(crate) fn new(inner: S, max_in_flight_requests: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
inner,
|
||||||
|
max_in_flight_requests,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S> Stream for MaxRequestsPerChannel<S>
|
||||||
|
where
|
||||||
|
S: Stream,
|
||||||
|
<S as Stream>::Item: Channel,
|
||||||
|
{
|
||||||
|
type Item = MaxRequests<<S as Stream>::Item>;
|
||||||
|
|
||||||
|
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
match ready!(self.as_mut().project().inner.poll_next(cx)) {
|
||||||
|
Some(channel) => Poll::Ready(Some(MaxRequests::new(
|
||||||
|
channel,
|
||||||
|
*self.project().max_in_flight_requests,
|
||||||
|
))),
|
||||||
|
None => Poll::Ready(None),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
use crate::server::{
|
||||||
|
testing::{self, FakeChannel, PollExt},
|
||||||
|
TrackedRequest,
|
||||||
|
};
|
||||||
|
use pin_utils::pin_mut;
|
||||||
|
use std::{
|
||||||
|
marker::PhantomData,
|
||||||
|
time::{Duration, SystemTime},
|
||||||
|
};
|
||||||
|
use tracing::Span;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn throttler_in_flight_requests() {
|
||||||
|
let throttler = MaxRequests {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
for i in 0..5 {
|
||||||
|
throttler
|
||||||
|
.inner
|
||||||
|
.in_flight_requests
|
||||||
|
.start_request(
|
||||||
|
i,
|
||||||
|
SystemTime::now() + Duration::from_secs(1),
|
||||||
|
Span::current(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
assert_eq!(throttler.as_mut().in_flight_requests(), 5);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_poll_next_done() {
|
||||||
|
let throttler = MaxRequests {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
assert!(throttler.as_mut().poll_next(&mut testing::cx()).is_done());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_poll_next_some() -> io::Result<()> {
|
||||||
|
let throttler = MaxRequests {
|
||||||
|
max_in_flight_requests: 1,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
throttler.inner.push_req(0, 1);
|
||||||
|
assert!(throttler.as_mut().poll_ready(&mut testing::cx()).is_ready());
|
||||||
|
assert_eq!(
|
||||||
|
throttler
|
||||||
|
.as_mut()
|
||||||
|
.poll_next(&mut testing::cx())?
|
||||||
|
.map(|r| r.map(|r| (r.request.id, r.request.message))),
|
||||||
|
Poll::Ready(Some((0, 1)))
|
||||||
|
);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_poll_next_throttled() {
|
||||||
|
let throttler = MaxRequests {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
throttler.inner.push_req(1, 1);
|
||||||
|
assert!(throttler.as_mut().poll_next(&mut testing::cx()).is_done());
|
||||||
|
assert_eq!(throttler.inner.sink.len(), 1);
|
||||||
|
let resp = throttler.inner.sink.get(0).unwrap();
|
||||||
|
assert_eq!(resp.request_id, 1);
|
||||||
|
assert!(resp.message.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn throttler_poll_next_throttled_sink_not_ready() {
|
||||||
|
let throttler = MaxRequests {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: PendingSink::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
pin_mut!(throttler);
|
||||||
|
assert!(throttler.poll_next(&mut testing::cx()).is_pending());
|
||||||
|
|
||||||
|
struct PendingSink<In, Out> {
|
||||||
|
ghost: PhantomData<fn(Out) -> In>,
|
||||||
|
}
|
||||||
|
impl PendingSink<(), ()> {
|
||||||
|
pub fn default<Req, Resp>(
|
||||||
|
) -> PendingSink<io::Result<TrackedRequest<Req>>, Response<Resp>> {
|
||||||
|
PendingSink { ghost: PhantomData }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<In, Out> Stream for PendingSink<In, Out> {
|
||||||
|
type Item = In;
|
||||||
|
fn poll_next(self: Pin<&mut Self>, _: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<In, Out> Sink<Out> for PendingSink<In, Out> {
|
||||||
|
type Error = io::Error;
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
fn start_send(self: Pin<&mut Self>, _: Out) -> Result<(), Self::Error> {
|
||||||
|
Err(io::Error::from(io::ErrorKind::WouldBlock))
|
||||||
|
}
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, _: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<Req, Resp> Channel for PendingSink<io::Result<TrackedRequest<Req>>, Response<Resp>> {
|
||||||
|
type Req = Req;
|
||||||
|
type Resp = Resp;
|
||||||
|
type Transport = ();
|
||||||
|
fn config(&self) -> &Config {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
fn in_flight_requests(&self) -> usize {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
fn transport(&self) -> &() {
|
||||||
|
&()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn throttler_start_send() {
|
||||||
|
let throttler = MaxRequests {
|
||||||
|
max_in_flight_requests: 0,
|
||||||
|
inner: FakeChannel::default::<isize, isize>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
pin_mut!(throttler);
|
||||||
|
throttler
|
||||||
|
.inner
|
||||||
|
.in_flight_requests
|
||||||
|
.start_request(
|
||||||
|
0,
|
||||||
|
SystemTime::now() + Duration::from_secs(1),
|
||||||
|
Span::current(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
throttler
|
||||||
|
.as_mut()
|
||||||
|
.start_send(Response {
|
||||||
|
request_id: 0,
|
||||||
|
message: Ok(1),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(throttler.inner.in_flight_requests.len(), 0);
|
||||||
|
assert_eq!(
|
||||||
|
throttler.inner.sink.get(0),
|
||||||
|
Some(&Response {
|
||||||
|
request_id: 0,
|
||||||
|
message: Ok(1),
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
25
tarpc/src/server/request_hook.rs
Normal file
25
tarpc/src/server/request_hook.rs
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! Hooks for horizontal functionality that can run either before or after a request is executed.
|
||||||
|
|
||||||
|
/// A request hook that runs before a request is executed.
|
||||||
|
mod before;
|
||||||
|
|
||||||
|
/// A request hook that runs after a request is completed.
|
||||||
|
mod after;
|
||||||
|
|
||||||
|
/// A request hook that runs both before a request is executed and after it is completed.
|
||||||
|
mod before_and_after;
|
||||||
|
|
||||||
|
pub use {
|
||||||
|
after::{AfterRequest, ServeThenHook},
|
||||||
|
before::{
|
||||||
|
before, BeforeRequest, BeforeRequestCons, BeforeRequestList, BeforeRequestNil,
|
||||||
|
HookThenServe,
|
||||||
|
},
|
||||||
|
before_and_after::HookThenServeThenHook,
|
||||||
|
};
|
||||||
72
tarpc/src/server/request_hook/after.rs
Normal file
72
tarpc/src/server/request_hook/after.rs
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! Provides a hook that runs after request execution.
|
||||||
|
|
||||||
|
use crate::{context, server::Serve, ServerError};
|
||||||
|
use futures::prelude::*;
|
||||||
|
|
||||||
|
/// A hook that runs after request execution.
|
||||||
|
#[allow(async_fn_in_trait)]
|
||||||
|
pub trait AfterRequest<Resp> {
|
||||||
|
/// The function that is called after request execution.
|
||||||
|
///
|
||||||
|
/// The hook can modify the request context and the response.
|
||||||
|
async fn after(&mut self, ctx: &mut context::Context, resp: &mut Result<Resp, ServerError>);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F, Fut, Resp> AfterRequest<Resp> for F
|
||||||
|
where
|
||||||
|
F: FnMut(&mut context::Context, &mut Result<Resp, ServerError>) -> Fut,
|
||||||
|
Fut: Future<Output = ()>,
|
||||||
|
{
|
||||||
|
async fn after(&mut self, ctx: &mut context::Context, resp: &mut Result<Resp, ServerError>) {
|
||||||
|
self(ctx, resp).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A Service function that runs a hook after request execution.
|
||||||
|
pub struct ServeThenHook<Serv, Hook> {
|
||||||
|
serve: Serv,
|
||||||
|
hook: Hook,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Serv, Hook> ServeThenHook<Serv, Hook> {
|
||||||
|
pub(crate) fn new(serve: Serv, hook: Hook) -> Self {
|
||||||
|
Self { serve, hook }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Serv: Clone, Hook: Clone> Clone for ServeThenHook<Serv, Hook> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
serve: self.serve.clone(),
|
||||||
|
hook: self.hook.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Serv, Hook> Serve for ServeThenHook<Serv, Hook>
|
||||||
|
where
|
||||||
|
Serv: Serve,
|
||||||
|
Hook: AfterRequest<Serv::Resp>,
|
||||||
|
{
|
||||||
|
type Req = Serv::Req;
|
||||||
|
type Resp = Serv::Resp;
|
||||||
|
|
||||||
|
async fn serve(
|
||||||
|
self,
|
||||||
|
mut ctx: context::Context,
|
||||||
|
req: Serv::Req,
|
||||||
|
) -> Result<Serv::Resp, ServerError> {
|
||||||
|
let ServeThenHook {
|
||||||
|
serve, mut hook, ..
|
||||||
|
} = self;
|
||||||
|
let mut resp = serve.serve(ctx, req).await;
|
||||||
|
hook.after(&mut ctx, &mut resp).await;
|
||||||
|
resp
|
||||||
|
}
|
||||||
|
}
|
||||||
210
tarpc/src/server/request_hook/before.rs
Normal file
210
tarpc/src/server/request_hook/before.rs
Normal file
@@ -0,0 +1,210 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! Provides a hook that runs before request execution.
|
||||||
|
|
||||||
|
use crate::{context, server::Serve, ServerError};
|
||||||
|
use futures::prelude::*;
|
||||||
|
|
||||||
|
/// A hook that runs before request execution.
|
||||||
|
#[allow(async_fn_in_trait)]
|
||||||
|
pub trait BeforeRequest<Req> {
|
||||||
|
/// The function that is called before request execution.
|
||||||
|
///
|
||||||
|
/// If this function returns an error, the request will not be executed and the error will be
|
||||||
|
/// returned instead.
|
||||||
|
///
|
||||||
|
/// This function can also modify the request context. This could be used, for example, to
|
||||||
|
/// enforce a maximum deadline on all requests.
|
||||||
|
async fn before(&mut self, ctx: &mut context::Context, req: &Req) -> Result<(), ServerError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A list of hooks that run in order before request execution.
|
||||||
|
pub trait BeforeRequestList<Req>: BeforeRequest<Req> {
|
||||||
|
/// The hook returned by `BeforeRequestList::then`.
|
||||||
|
type Then<Next>: BeforeRequest<Req>
|
||||||
|
where
|
||||||
|
Next: BeforeRequest<Req>;
|
||||||
|
|
||||||
|
/// Returns a hook that, when run, runs two hooks, first `self` and then `next`.
|
||||||
|
fn then<Next: BeforeRequest<Req>>(self, next: Next) -> Self::Then<Next>;
|
||||||
|
|
||||||
|
/// Same as `then`, but helps the compiler with type inference when Next is a closure.
|
||||||
|
fn then_fn<
|
||||||
|
Next: FnMut(&mut context::Context, &Req) -> Fut,
|
||||||
|
Fut: Future<Output = Result<(), ServerError>>,
|
||||||
|
>(
|
||||||
|
self,
|
||||||
|
next: Next,
|
||||||
|
) -> Self::Then<Next>
|
||||||
|
where
|
||||||
|
Self: Sized,
|
||||||
|
{
|
||||||
|
self.then(next)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The service fn returned by `BeforeRequestList::serving`.
|
||||||
|
type Serve<S: Serve<Req = Req>>: Serve<Req = Req>;
|
||||||
|
|
||||||
|
/// Runs the list of request hooks before execution of the given serve fn.
|
||||||
|
/// This is equivalent to `serve.before(before_request_chain)` but may be syntactically nicer.
|
||||||
|
fn serving<S: Serve<Req = Req>>(self, serve: S) -> Self::Serve<S>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F, Fut, Req> BeforeRequest<Req> for F
|
||||||
|
where
|
||||||
|
F: FnMut(&mut context::Context, &Req) -> Fut,
|
||||||
|
Fut: Future<Output = Result<(), ServerError>>,
|
||||||
|
{
|
||||||
|
async fn before(&mut self, ctx: &mut context::Context, req: &Req) -> Result<(), ServerError> {
|
||||||
|
self(ctx, req).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A Service function that runs a hook before request execution.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct HookThenServe<Serv, Hook> {
|
||||||
|
serve: Serv,
|
||||||
|
hook: Hook,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Serv, Hook> HookThenServe<Serv, Hook> {
|
||||||
|
pub(crate) fn new(serve: Serv, hook: Hook) -> Self {
|
||||||
|
Self { serve, hook }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Serv, Hook> Serve for HookThenServe<Serv, Hook>
|
||||||
|
where
|
||||||
|
Serv: Serve,
|
||||||
|
Hook: BeforeRequest<Serv::Req>,
|
||||||
|
{
|
||||||
|
type Req = Serv::Req;
|
||||||
|
type Resp = Serv::Resp;
|
||||||
|
|
||||||
|
async fn serve(
|
||||||
|
self,
|
||||||
|
mut ctx: context::Context,
|
||||||
|
req: Self::Req,
|
||||||
|
) -> Result<Serv::Resp, ServerError> {
|
||||||
|
let HookThenServe {
|
||||||
|
serve, mut hook, ..
|
||||||
|
} = self;
|
||||||
|
hook.before(&mut ctx, &req).await?;
|
||||||
|
serve.serve(ctx, req).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a request hook builder that runs a series of hooks before request execution.
|
||||||
|
///
|
||||||
|
/// Example
|
||||||
|
///
|
||||||
|
/// ```rust
|
||||||
|
/// use futures::{executor::block_on, future};
|
||||||
|
/// use tarpc::{context, ServerError, server::{Serve, serve, request_hook::{self,
|
||||||
|
/// BeforeRequest, BeforeRequestList}}};
|
||||||
|
/// use std::{cell::Cell, io};
|
||||||
|
///
|
||||||
|
/// let i = Cell::new(0);
|
||||||
|
/// let serve = request_hook::before()
|
||||||
|
/// .then_fn(|_, _| async {
|
||||||
|
/// assert!(i.get() == 0);
|
||||||
|
/// i.set(1);
|
||||||
|
/// Ok(())
|
||||||
|
/// })
|
||||||
|
/// .then_fn(|_, _| async {
|
||||||
|
/// assert!(i.get() == 1);
|
||||||
|
/// i.set(2);
|
||||||
|
/// Ok(())
|
||||||
|
/// })
|
||||||
|
/// .serving(serve(|_ctx, i| async move { Ok(i + 1) }));
|
||||||
|
/// let response = serve.clone().serve(context::current(), 1);
|
||||||
|
/// assert!(block_on(response).is_ok());
|
||||||
|
/// assert!(i.get() == 2);
|
||||||
|
/// ```
|
||||||
|
pub fn before() -> BeforeRequestNil {
|
||||||
|
BeforeRequestNil
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A list of hooks that run in order before a request is executed.
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
pub struct BeforeRequestCons<First, Rest>(First, Rest);
|
||||||
|
|
||||||
|
/// A noop hook that runs before a request is executed.
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
pub struct BeforeRequestNil;
|
||||||
|
|
||||||
|
impl<Req, First: BeforeRequest<Req>, Rest: BeforeRequest<Req>> BeforeRequest<Req>
|
||||||
|
for BeforeRequestCons<First, Rest>
|
||||||
|
{
|
||||||
|
async fn before(&mut self, ctx: &mut context::Context, req: &Req) -> Result<(), ServerError> {
|
||||||
|
let BeforeRequestCons(first, rest) = self;
|
||||||
|
first.before(ctx, req).await?;
|
||||||
|
rest.before(ctx, req).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req> BeforeRequest<Req> for BeforeRequestNil {
|
||||||
|
async fn before(&mut self, _: &mut context::Context, _: &Req) -> Result<(), ServerError> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, First: BeforeRequest<Req>, Rest: BeforeRequestList<Req>> BeforeRequestList<Req>
|
||||||
|
for BeforeRequestCons<First, Rest>
|
||||||
|
{
|
||||||
|
type Then<Next> = BeforeRequestCons<First, Rest::Then<Next>> where Next: BeforeRequest<Req>;
|
||||||
|
|
||||||
|
fn then<Next: BeforeRequest<Req>>(self, next: Next) -> Self::Then<Next> {
|
||||||
|
let BeforeRequestCons(first, rest) = self;
|
||||||
|
BeforeRequestCons(first, rest.then(next))
|
||||||
|
}
|
||||||
|
|
||||||
|
type Serve<S: Serve<Req = Req>> = HookThenServe<S, Self>;
|
||||||
|
|
||||||
|
fn serving<S: Serve<Req = Req>>(self, serve: S) -> Self::Serve<S> {
|
||||||
|
HookThenServe::new(serve, self)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req> BeforeRequestList<Req> for BeforeRequestNil {
|
||||||
|
type Then<Next> = BeforeRequestCons<Next, BeforeRequestNil> where Next: BeforeRequest<Req>;
|
||||||
|
|
||||||
|
fn then<Next: BeforeRequest<Req>>(self, next: Next) -> Self::Then<Next> {
|
||||||
|
BeforeRequestCons(next, BeforeRequestNil)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Serve<S: Serve<Req = Req>> = S;
|
||||||
|
|
||||||
|
fn serving<S: Serve<Req = Req>>(self, serve: S) -> S {
|
||||||
|
serve
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn before_request_list() {
|
||||||
|
use crate::server::serve;
|
||||||
|
use futures::executor::block_on;
|
||||||
|
use std::cell::Cell;
|
||||||
|
|
||||||
|
let i = Cell::new(0);
|
||||||
|
let serve = before()
|
||||||
|
.then_fn(|_, _| async {
|
||||||
|
assert!(i.get() == 0);
|
||||||
|
i.set(1);
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.then_fn(|_, _| async {
|
||||||
|
assert!(i.get() == 1);
|
||||||
|
i.set(2);
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
.serving(serve(|_ctx, i| async move { Ok(i + 1) }));
|
||||||
|
let response = serve.clone().serve(context::current(), 1);
|
||||||
|
assert!(block_on(response).is_ok());
|
||||||
|
assert!(i.get() == 2);
|
||||||
|
}
|
||||||
57
tarpc/src/server/request_hook/before_and_after.rs
Normal file
57
tarpc/src/server/request_hook/before_and_after.rs
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
// Copyright 2022 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! Provides a hook that runs both before and after request execution.
|
||||||
|
|
||||||
|
use super::{after::AfterRequest, before::BeforeRequest};
|
||||||
|
use crate::{context, server::Serve, ServerError};
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
|
/// A Service function that runs a hook both before and after request execution.
|
||||||
|
pub struct HookThenServeThenHook<Req, Resp, Serv, Hook> {
|
||||||
|
serve: Serv,
|
||||||
|
hook: Hook,
|
||||||
|
fns: PhantomData<(fn(Req), fn(Resp))>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp, Serv, Hook> HookThenServeThenHook<Req, Resp, Serv, Hook> {
|
||||||
|
pub(crate) fn new(serve: Serv, hook: Hook) -> Self {
|
||||||
|
Self {
|
||||||
|
serve,
|
||||||
|
hook,
|
||||||
|
fns: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp, Serv: Clone, Hook: Clone> Clone for HookThenServeThenHook<Req, Resp, Serv, Hook> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
serve: self.serve.clone(),
|
||||||
|
hook: self.hook.clone(),
|
||||||
|
fns: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp, Serv, Hook> Serve for HookThenServeThenHook<Req, Resp, Serv, Hook>
|
||||||
|
where
|
||||||
|
Serv: Serve<Req = Req, Resp = Resp>,
|
||||||
|
Hook: BeforeRequest<Req> + AfterRequest<Resp>,
|
||||||
|
{
|
||||||
|
type Req = Req;
|
||||||
|
type Resp = Resp;
|
||||||
|
|
||||||
|
async fn serve(self, mut ctx: context::Context, req: Req) -> Result<Serv::Resp, ServerError> {
|
||||||
|
let HookThenServeThenHook {
|
||||||
|
serve, mut hook, ..
|
||||||
|
} = self;
|
||||||
|
hook.before(&mut ctx, &req).await?;
|
||||||
|
let mut resp = serve.serve(ctx, req).await;
|
||||||
|
hook.after(&mut ctx, &mut resp).await;
|
||||||
|
resp
|
||||||
|
}
|
||||||
|
}
|
||||||
139
tarpc/src/server/testing.rs
Normal file
139
tarpc/src/server/testing.rs
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
// Copyright 2020 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
cancellations::{cancellations, CanceledRequests, RequestCancellation},
|
||||||
|
context,
|
||||||
|
server::{Channel, Config, ResponseGuard, TrackedRequest},
|
||||||
|
Request, Response,
|
||||||
|
};
|
||||||
|
use futures::{task::*, Sink, Stream};
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use std::{collections::VecDeque, io, pin::Pin, time::SystemTime};
|
||||||
|
use tracing::Span;
|
||||||
|
|
||||||
|
#[pin_project]
|
||||||
|
pub(crate) struct FakeChannel<In, Out> {
|
||||||
|
#[pin]
|
||||||
|
pub stream: VecDeque<In>,
|
||||||
|
#[pin]
|
||||||
|
pub sink: VecDeque<Out>,
|
||||||
|
pub config: Config,
|
||||||
|
pub in_flight_requests: super::in_flight_requests::InFlightRequests,
|
||||||
|
pub request_cancellation: RequestCancellation,
|
||||||
|
pub canceled_requests: CanceledRequests,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<In, Out> Stream for FakeChannel<In, Out>
|
||||||
|
where
|
||||||
|
In: Unpin,
|
||||||
|
{
|
||||||
|
type Item = In;
|
||||||
|
|
||||||
|
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||||
|
Poll::Ready(self.project().stream.pop_front())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<In, Resp> Sink<Response<Resp>> for FakeChannel<In, Response<Resp>> {
|
||||||
|
type Error = io::Error;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().sink.poll_ready(cx).map_err(|e| match e {})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(mut self: Pin<&mut Self>, response: Response<Resp>) -> Result<(), Self::Error> {
|
||||||
|
self.as_mut()
|
||||||
|
.project()
|
||||||
|
.in_flight_requests
|
||||||
|
.remove_request(response.request_id);
|
||||||
|
self.project()
|
||||||
|
.sink
|
||||||
|
.start_send(response)
|
||||||
|
.map_err(|e| match e {})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().sink.poll_flush(cx).map_err(|e| match e {})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project().sink.poll_close(cx).map_err(|e| match e {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp> Channel for FakeChannel<io::Result<TrackedRequest<Req>>, Response<Resp>>
|
||||||
|
where
|
||||||
|
Req: Unpin,
|
||||||
|
{
|
||||||
|
type Req = Req;
|
||||||
|
type Resp = Resp;
|
||||||
|
type Transport = ();
|
||||||
|
|
||||||
|
fn config(&self) -> &Config {
|
||||||
|
&self.config
|
||||||
|
}
|
||||||
|
|
||||||
|
fn in_flight_requests(&self) -> usize {
|
||||||
|
self.in_flight_requests.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transport(&self) -> &() {
|
||||||
|
&()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Req, Resp> FakeChannel<io::Result<TrackedRequest<Req>>, Response<Resp>> {
|
||||||
|
pub fn push_req(&mut self, id: u64, message: Req) {
|
||||||
|
let (_, abort_registration) = futures::future::AbortHandle::new_pair();
|
||||||
|
let (request_cancellation, _) = cancellations();
|
||||||
|
self.stream.push_back(Ok(TrackedRequest {
|
||||||
|
request: Request {
|
||||||
|
context: context::Context {
|
||||||
|
deadline: SystemTime::UNIX_EPOCH,
|
||||||
|
trace_context: Default::default(),
|
||||||
|
},
|
||||||
|
id,
|
||||||
|
message,
|
||||||
|
},
|
||||||
|
abort_registration,
|
||||||
|
span: Span::none(),
|
||||||
|
response_guard: ResponseGuard {
|
||||||
|
request_cancellation,
|
||||||
|
request_id: id,
|
||||||
|
cancel: false,
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FakeChannel<(), ()> {
|
||||||
|
pub fn default<Req, Resp>() -> FakeChannel<io::Result<TrackedRequest<Req>>, Response<Resp>> {
|
||||||
|
let (request_cancellation, canceled_requests) = cancellations();
|
||||||
|
FakeChannel {
|
||||||
|
stream: Default::default(),
|
||||||
|
sink: Default::default(),
|
||||||
|
config: Default::default(),
|
||||||
|
in_flight_requests: Default::default(),
|
||||||
|
request_cancellation,
|
||||||
|
canceled_requests,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub trait PollExt {
|
||||||
|
fn is_done(&self) -> bool;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> PollExt for Poll<Option<T>> {
|
||||||
|
fn is_done(&self) -> bool {
|
||||||
|
matches!(self, Poll::Ready(None))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cx() -> Context<'static> {
|
||||||
|
Context::from_waker(noop_waker_ref())
|
||||||
|
}
|
||||||
261
tarpc/src/trace.rs
Normal file
261
tarpc/src/trace.rs
Normal file
@@ -0,0 +1,261 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
#![deny(missing_docs, missing_debug_implementations)]
|
||||||
|
|
||||||
|
//! Provides building blocks for tracing distributed programs.
|
||||||
|
//!
|
||||||
|
//! A trace is logically a tree of causally-related events called spans. Traces are tracked via a
|
||||||
|
//! [context](Context) that identifies the current trace, span, and parent of the current span. In
|
||||||
|
//! distributed systems, a context can be sent from client to server to connect events occurring on
|
||||||
|
//! either side.
|
||||||
|
//!
|
||||||
|
//! This crate's design is based on [opencensus
|
||||||
|
//! tracing](https://opencensus.io/core-concepts/tracing/).
|
||||||
|
|
||||||
|
use opentelemetry::trace::TraceContextExt;
|
||||||
|
use rand::Rng;
|
||||||
|
use std::{
|
||||||
|
convert::TryFrom,
|
||||||
|
fmt::{self, Formatter},
|
||||||
|
num::{NonZeroU128, NonZeroU64},
|
||||||
|
};
|
||||||
|
use tracing_opentelemetry::OpenTelemetrySpanExt;
|
||||||
|
|
||||||
|
/// A context for tracing the execution of processes, distributed or otherwise.
|
||||||
|
///
|
||||||
|
/// Consists of a span identifying an event, an optional parent span identifying a causal event
|
||||||
|
/// that triggered the current span, and a trace with which all related spans are associated.
|
||||||
|
#[derive(Debug, Default, PartialEq, Eq, Hash, Clone, Copy)]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
|
pub struct Context {
|
||||||
|
/// An identifier of the trace associated with the current context. A trace ID is typically
|
||||||
|
/// created at a root span and passed along through all causal events.
|
||||||
|
pub trace_id: TraceId,
|
||||||
|
/// An identifier of the current span. In typical RPC usage, a span is created by a client
|
||||||
|
/// before making an RPC, and the span ID is sent to the server. The server is free to create
|
||||||
|
/// its own spans, for which it sets the client's span as the parent span.
|
||||||
|
pub span_id: SpanId,
|
||||||
|
/// Indicates whether a sampler has already decided whether or not to sample the trace
|
||||||
|
/// associated with the Context. If `sampling_decision` is None, then a decision has not yet
|
||||||
|
/// been made. Downstream samplers do not need to abide by "no sample" decisions--for example,
|
||||||
|
/// an upstream client may choose to never sample, which may not make sense for the client's
|
||||||
|
/// dependencies. On the other hand, if an upstream process has chosen to sample this trace,
|
||||||
|
/// then the downstream samplers are expected to respect that decision and also sample the
|
||||||
|
/// trace. Otherwise, the full trace would not be able to be reconstructed.
|
||||||
|
pub sampling_decision: SamplingDecision,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A 128-bit UUID identifying a trace. All spans caused by the same originating span share the
|
||||||
|
/// same trace ID.
|
||||||
|
#[derive(Default, PartialEq, Eq, Hash, Clone, Copy)]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
|
pub struct TraceId(#[cfg_attr(feature = "serde1", serde(with = "u128_serde"))] u128);
|
||||||
|
|
||||||
|
/// A 64-bit identifier of a span within a trace. The identifier is unique within the span's trace.
|
||||||
|
#[derive(Default, PartialEq, Eq, Hash, Clone, Copy)]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
|
pub struct SpanId(u64);
|
||||||
|
|
||||||
|
/// Indicates whether a sampler has decided whether or not to sample the trace associated with the
|
||||||
|
/// Context. Downstream samplers do not need to abide by "no sample" decisions--for example, an
|
||||||
|
/// upstream client may choose to never sample, which may not make sense for the client's
|
||||||
|
/// dependencies. On the other hand, if an upstream process has chosen to sample this trace, then
|
||||||
|
/// the downstream samplers are expected to respect that decision and also sample the trace.
|
||||||
|
/// Otherwise, the full trace would not be able to be reconstructed reliably.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
|
||||||
|
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
|
||||||
|
#[repr(u8)]
|
||||||
|
pub enum SamplingDecision {
|
||||||
|
/// The associated span was sampled by its creating process. Child spans must also be sampled.
|
||||||
|
Sampled,
|
||||||
|
/// The associated span was not sampled by its creating process.
|
||||||
|
Unsampled,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Context {
|
||||||
|
/// Constructs a new context with the trace ID and sampling decision inherited from the parent.
|
||||||
|
pub(crate) fn new_child(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
trace_id: self.trace_id,
|
||||||
|
span_id: SpanId::random(&mut rand::thread_rng()),
|
||||||
|
sampling_decision: self.sampling_decision,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TraceId {
|
||||||
|
/// Returns a random trace ID that can be assumed to be globally unique if `rng` generates
|
||||||
|
/// actually-random numbers.
|
||||||
|
pub fn random<R: Rng>(rng: &mut R) -> Self {
|
||||||
|
TraceId(rng.gen::<NonZeroU128>().get())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true iff the trace ID is 0.
|
||||||
|
pub fn is_none(&self) -> bool {
|
||||||
|
self.0 == 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SpanId {
|
||||||
|
/// Returns a random span ID that can be assumed to be unique within a single trace.
|
||||||
|
pub fn random<R: Rng>(rng: &mut R) -> Self {
|
||||||
|
SpanId(rng.gen::<NonZeroU64>().get())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true iff the span ID is 0.
|
||||||
|
pub fn is_none(&self) -> bool {
|
||||||
|
self.0 == 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<TraceId> for u128 {
|
||||||
|
fn from(trace_id: TraceId) -> Self {
|
||||||
|
trace_id.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<u128> for TraceId {
|
||||||
|
fn from(trace_id: u128) -> Self {
|
||||||
|
Self(trace_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<SpanId> for u64 {
|
||||||
|
fn from(span_id: SpanId) -> Self {
|
||||||
|
span_id.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<u64> for SpanId {
|
||||||
|
fn from(span_id: u64) -> Self {
|
||||||
|
Self(span_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<opentelemetry::trace::TraceId> for TraceId {
|
||||||
|
fn from(trace_id: opentelemetry::trace::TraceId) -> Self {
|
||||||
|
Self::from(u128::from_be_bytes(trace_id.to_bytes()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<TraceId> for opentelemetry::trace::TraceId {
|
||||||
|
fn from(trace_id: TraceId) -> Self {
|
||||||
|
Self::from_bytes(u128::from(trace_id).to_be_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<opentelemetry::trace::SpanId> for SpanId {
|
||||||
|
fn from(span_id: opentelemetry::trace::SpanId) -> Self {
|
||||||
|
Self::from(u64::from_be_bytes(span_id.to_bytes()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<SpanId> for opentelemetry::trace::SpanId {
|
||||||
|
fn from(span_id: SpanId) -> Self {
|
||||||
|
Self::from_bytes(u64::from(span_id).to_be_bytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<&tracing::Span> for Context {
|
||||||
|
type Error = NoActiveSpan;
|
||||||
|
|
||||||
|
fn try_from(span: &tracing::Span) -> Result<Self, NoActiveSpan> {
|
||||||
|
let context = span.context();
|
||||||
|
if context.has_active_span() {
|
||||||
|
Ok(Self::from(context.span()))
|
||||||
|
} else {
|
||||||
|
Err(NoActiveSpan)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<opentelemetry::trace::SpanRef<'_>> for Context {
|
||||||
|
fn from(span: opentelemetry::trace::SpanRef<'_>) -> Self {
|
||||||
|
let otel_ctx = span.span_context();
|
||||||
|
Self {
|
||||||
|
trace_id: TraceId::from(otel_ctx.trace_id()),
|
||||||
|
span_id: SpanId::from(otel_ctx.span_id()),
|
||||||
|
sampling_decision: SamplingDecision::from(otel_ctx),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<SamplingDecision> for opentelemetry::trace::TraceFlags {
|
||||||
|
fn from(decision: SamplingDecision) -> Self {
|
||||||
|
match decision {
|
||||||
|
SamplingDecision::Sampled => opentelemetry::trace::TraceFlags::SAMPLED,
|
||||||
|
SamplingDecision::Unsampled => opentelemetry::trace::TraceFlags::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&opentelemetry::trace::SpanContext> for SamplingDecision {
|
||||||
|
fn from(context: &opentelemetry::trace::SpanContext) -> Self {
|
||||||
|
if context.is_sampled() {
|
||||||
|
SamplingDecision::Sampled
|
||||||
|
} else {
|
||||||
|
SamplingDecision::Unsampled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for SamplingDecision {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::Unsampled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returned when a [`Context`] cannot be constructed from a [`Span`](tracing::Span).
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NoActiveSpan;
|
||||||
|
|
||||||
|
impl fmt::Display for TraceId {
|
||||||
|
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
|
||||||
|
write!(f, "{:02x}", self.0)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for TraceId {
|
||||||
|
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
|
||||||
|
write!(f, "{:02x}", self.0)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for SpanId {
|
||||||
|
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
|
||||||
|
write!(f, "{:02x}", self.0)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for SpanId {
|
||||||
|
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
|
||||||
|
write!(f, "{:02x}", self.0)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "serde1")]
|
||||||
|
mod u128_serde {
|
||||||
|
pub fn serialize<S>(u: &u128, serializer: S) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: serde::Serializer,
|
||||||
|
{
|
||||||
|
serde::Serialize::serialize(&u.to_le_bytes(), serializer)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deserialize<'de, D>(deserializer: D) -> Result<u128, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
Ok(u128::from_le_bytes(serde::Deserialize::deserialize(
|
||||||
|
deserializer,
|
||||||
|
)?))
|
||||||
|
}
|
||||||
|
}
|
||||||
40
tarpc/src/transport.rs
Normal file
40
tarpc/src/transport.rs
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! Provides a [`Transport`](sealed::Transport) trait as well as implementations.
|
||||||
|
//!
|
||||||
|
//! The rpc crate is transport- and protocol-agnostic. Any transport that impls [`Transport`](sealed::Transport)
|
||||||
|
//! can be plugged in, using whatever protocol it wants.
|
||||||
|
|
||||||
|
pub mod channel;
|
||||||
|
|
||||||
|
pub(crate) mod sealed {
|
||||||
|
use futures::prelude::*;
|
||||||
|
use std::error::Error;
|
||||||
|
|
||||||
|
/// A bidirectional stream ([`Sink`] + [`Stream`]) of messages.
|
||||||
|
pub trait Transport<SinkItem, Item>
|
||||||
|
where
|
||||||
|
Self: Stream<Item = Result<Item, <Self as Sink<SinkItem>>::Error>>,
|
||||||
|
Self: Sink<SinkItem, Error = <Self as Transport<SinkItem, Item>>::TransportError>,
|
||||||
|
<Self as Sink<SinkItem>>::Error: Error,
|
||||||
|
{
|
||||||
|
/// Associated type where clauses are not elaborated; this associated type allows users
|
||||||
|
/// bounding types by Transport to avoid having to explicitly add `T::Error: Error` to their
|
||||||
|
/// bounds.
|
||||||
|
type TransportError: Error + Send + Sync + 'static;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, SinkItem, Item, E> Transport<SinkItem, Item> for T
|
||||||
|
where
|
||||||
|
T: ?Sized,
|
||||||
|
T: Stream<Item = Result<Item, E>>,
|
||||||
|
T: Sink<SinkItem, Error = E>,
|
||||||
|
T::Error: Error + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
type TransportError = E;
|
||||||
|
}
|
||||||
|
}
|
||||||
219
tarpc/src/transport/channel.rs
Normal file
219
tarpc/src/transport/channel.rs
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
//! Transports backed by in-memory channels.
|
||||||
|
|
||||||
|
use futures::{task::*, Sink, Stream};
|
||||||
|
use pin_project::pin_project;
|
||||||
|
use std::{error::Error, pin::Pin};
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
/// Errors that occur in the sending or receiving of messages over a channel.
|
||||||
|
#[derive(thiserror::Error, Debug)]
|
||||||
|
pub enum ChannelError {
|
||||||
|
/// An error occurred readying to send into the channel.
|
||||||
|
#[error("an error occurred readying to send into the channel")]
|
||||||
|
Ready(#[source] Box<dyn Error + Send + Sync + 'static>),
|
||||||
|
/// An error occurred sending into the channel.
|
||||||
|
#[error("an error occurred sending into the channel")]
|
||||||
|
Send(#[source] Box<dyn Error + Send + Sync + 'static>),
|
||||||
|
/// An error occurred receiving from the channel.
|
||||||
|
#[error("an error occurred receiving from the channel")]
|
||||||
|
Receive(#[source] Box<dyn Error + Send + Sync + 'static>),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns two unbounded channel peers. Each [`Stream`] yields items sent through the other's
|
||||||
|
/// [`Sink`].
|
||||||
|
pub fn unbounded<SinkItem, Item>() -> (
|
||||||
|
UnboundedChannel<SinkItem, Item>,
|
||||||
|
UnboundedChannel<Item, SinkItem>,
|
||||||
|
) {
|
||||||
|
let (tx1, rx2) = mpsc::unbounded_channel();
|
||||||
|
let (tx2, rx1) = mpsc::unbounded_channel();
|
||||||
|
(
|
||||||
|
UnboundedChannel { tx: tx1, rx: rx1 },
|
||||||
|
UnboundedChannel { tx: tx2, rx: rx2 },
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A bi-directional channel backed by an [`UnboundedSender`](mpsc::UnboundedSender)
|
||||||
|
/// and [`UnboundedReceiver`](mpsc::UnboundedReceiver).
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct UnboundedChannel<Item, SinkItem> {
|
||||||
|
rx: mpsc::UnboundedReceiver<Item>,
|
||||||
|
tx: mpsc::UnboundedSender<SinkItem>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem> Stream for UnboundedChannel<Item, SinkItem> {
|
||||||
|
type Item = Result<Item, ChannelError>;
|
||||||
|
|
||||||
|
fn poll_next(
|
||||||
|
mut self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<Option<Result<Item, ChannelError>>> {
|
||||||
|
self.rx
|
||||||
|
.poll_recv(cx)
|
||||||
|
.map(|option| option.map(Ok))
|
||||||
|
.map_err(ChannelError::Receive)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const CLOSED_MESSAGE: &str = "the channel is closed and cannot accept new items for sending";
|
||||||
|
|
||||||
|
impl<Item, SinkItem> Sink<SinkItem> for UnboundedChannel<Item, SinkItem> {
|
||||||
|
type Error = ChannelError;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
Poll::Ready(if self.tx.is_closed() {
|
||||||
|
Err(ChannelError::Ready(CLOSED_MESSAGE.into()))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(self: Pin<&mut Self>, item: SinkItem) -> Result<(), Self::Error> {
|
||||||
|
self.tx
|
||||||
|
.send(item)
|
||||||
|
.map_err(|_| ChannelError::Send(CLOSED_MESSAGE.into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
// UnboundedSender requires no flushing.
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
// UnboundedSender can't initiate closure.
|
||||||
|
Poll::Ready(Ok(()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns two channel peers with buffer equal to `capacity`. Each [`Stream`] yields items sent
|
||||||
|
/// through the other's [`Sink`].
|
||||||
|
pub fn bounded<SinkItem, Item>(
|
||||||
|
capacity: usize,
|
||||||
|
) -> (Channel<SinkItem, Item>, Channel<Item, SinkItem>) {
|
||||||
|
let (tx1, rx2) = futures::channel::mpsc::channel(capacity);
|
||||||
|
let (tx2, rx1) = futures::channel::mpsc::channel(capacity);
|
||||||
|
(Channel { tx: tx1, rx: rx1 }, Channel { tx: tx2, rx: rx2 })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A bi-directional channel backed by a [`Sender`](futures::channel::mpsc::Sender)
|
||||||
|
/// and [`Receiver`](futures::channel::mpsc::Receiver).
|
||||||
|
#[pin_project]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Channel<Item, SinkItem> {
|
||||||
|
#[pin]
|
||||||
|
rx: futures::channel::mpsc::Receiver<Item>,
|
||||||
|
#[pin]
|
||||||
|
tx: futures::channel::mpsc::Sender<SinkItem>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem> Stream for Channel<Item, SinkItem> {
|
||||||
|
type Item = Result<Item, ChannelError>;
|
||||||
|
|
||||||
|
fn poll_next(
|
||||||
|
self: Pin<&mut Self>,
|
||||||
|
cx: &mut Context<'_>,
|
||||||
|
) -> Poll<Option<Result<Item, ChannelError>>> {
|
||||||
|
self.project()
|
||||||
|
.rx
|
||||||
|
.poll_next(cx)
|
||||||
|
.map(|option| option.map(Ok))
|
||||||
|
.map_err(ChannelError::Receive)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Item, SinkItem> Sink<SinkItem> for Channel<Item, SinkItem> {
|
||||||
|
type Error = ChannelError;
|
||||||
|
|
||||||
|
fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project()
|
||||||
|
.tx
|
||||||
|
.poll_ready(cx)
|
||||||
|
.map_err(|e| ChannelError::Ready(Box::new(e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start_send(self: Pin<&mut Self>, item: SinkItem) -> Result<(), Self::Error> {
|
||||||
|
self.project()
|
||||||
|
.tx
|
||||||
|
.start_send(item)
|
||||||
|
.map_err(|e| ChannelError::Send(Box::new(e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project()
|
||||||
|
.tx
|
||||||
|
.poll_flush(cx)
|
||||||
|
.map_err(|e| ChannelError::Send(Box::new(e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||||
|
self.project()
|
||||||
|
.tx
|
||||||
|
.poll_close(cx)
|
||||||
|
.map_err(|e| ChannelError::Send(Box::new(e)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, feature = "tokio1"))]
|
||||||
|
mod tests {
|
||||||
|
use crate::{
|
||||||
|
client::{self, RpcError},
|
||||||
|
context,
|
||||||
|
server::{incoming::Incoming, serve, BaseChannel},
|
||||||
|
transport::{
|
||||||
|
self,
|
||||||
|
channel::{Channel, UnboundedChannel},
|
||||||
|
},
|
||||||
|
ServerError,
|
||||||
|
};
|
||||||
|
use assert_matches::assert_matches;
|
||||||
|
use futures::{prelude::*, stream};
|
||||||
|
use std::io;
|
||||||
|
use tracing::trace;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ensure_is_transport() {
|
||||||
|
fn is_transport<SinkItem, Item, T: crate::Transport<SinkItem, Item>>() {}
|
||||||
|
is_transport::<(), (), UnboundedChannel<(), ()>>();
|
||||||
|
is_transport::<(), (), Channel<(), ()>>();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn integration() -> anyhow::Result<()> {
|
||||||
|
let _ = tracing_subscriber::fmt::try_init();
|
||||||
|
|
||||||
|
let (client_channel, server_channel) = transport::channel::unbounded();
|
||||||
|
tokio::spawn(
|
||||||
|
stream::once(future::ready(server_channel))
|
||||||
|
.map(BaseChannel::with_defaults)
|
||||||
|
.execute(serve(|_ctx, request: String| async move {
|
||||||
|
request.parse::<u64>().map_err(|_| {
|
||||||
|
ServerError::new(
|
||||||
|
io::ErrorKind::InvalidInput,
|
||||||
|
format!("{request:?} is not an int"),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
.for_each(|channel| async move {
|
||||||
|
tokio::spawn(channel.for_each(|response| response));
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
let client = client::new(client::Config::default(), client_channel).spawn();
|
||||||
|
|
||||||
|
let response1 = client.call(context::current(), "", "123".into()).await;
|
||||||
|
let response2 = client.call(context::current(), "", "abc".into()).await;
|
||||||
|
|
||||||
|
trace!("response1: {:?}, response2: {:?}", response1, response2);
|
||||||
|
|
||||||
|
assert_matches!(response1, Ok(123));
|
||||||
|
assert_matches!(response2, Err(RpcError::Server(e)) if e.kind == io::ErrorKind::InvalidInput);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
71
tarpc/src/util.rs
Normal file
71
tarpc/src/util.rs
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
hash::{BuildHasher, Hash},
|
||||||
|
time::{Duration, SystemTime},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[cfg(feature = "serde1")]
|
||||||
|
#[cfg_attr(docsrs, doc(cfg(feature = "serde1")))]
|
||||||
|
pub mod serde;
|
||||||
|
|
||||||
|
/// Extension trait for [SystemTimes](SystemTime) in the future, i.e. deadlines.
|
||||||
|
pub trait TimeUntil {
|
||||||
|
/// How much time from now until this time is reached.
|
||||||
|
fn time_until(&self) -> Duration;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TimeUntil for SystemTime {
|
||||||
|
fn time_until(&self) -> Duration {
|
||||||
|
self.duration_since(SystemTime::now()).unwrap_or_default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Collection compaction; configurable `shrink_to_fit`.
|
||||||
|
pub trait Compact {
|
||||||
|
/// Compacts space if the ratio of length : capacity is less than `usage_ratio_threshold`.
|
||||||
|
fn compact(&mut self, usage_ratio_threshold: f64);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K, V, H> Compact for HashMap<K, V, H>
|
||||||
|
where
|
||||||
|
K: Eq + Hash,
|
||||||
|
H: BuildHasher,
|
||||||
|
{
|
||||||
|
fn compact(&mut self, usage_ratio_threshold: f64) {
|
||||||
|
let usage_ratio_threshold = usage_ratio_threshold.clamp(f64::MIN_POSITIVE, 1.);
|
||||||
|
let cap = f64::max(1000., self.len() as f64 / usage_ratio_threshold);
|
||||||
|
self.shrink_to(cap as usize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_compact() {
|
||||||
|
let mut map = HashMap::with_capacity(2048);
|
||||||
|
assert_eq!(map.capacity(), 3584);
|
||||||
|
|
||||||
|
// Make usage ratio 25%
|
||||||
|
for i in 0..896 {
|
||||||
|
map.insert(format!("k{i}"), "v");
|
||||||
|
}
|
||||||
|
|
||||||
|
map.compact(-1.0);
|
||||||
|
assert_eq!(map.capacity(), 3584);
|
||||||
|
|
||||||
|
map.compact(0.25);
|
||||||
|
assert_eq!(map.capacity(), 3584);
|
||||||
|
|
||||||
|
map.compact(0.50);
|
||||||
|
assert_eq!(map.capacity(), 1792);
|
||||||
|
|
||||||
|
map.compact(1.0);
|
||||||
|
assert_eq!(map.capacity(), 1792);
|
||||||
|
|
||||||
|
map.compact(2.0);
|
||||||
|
assert_eq!(map.capacity(), 1792);
|
||||||
|
}
|
||||||
73
tarpc/src/util/serde.rs
Normal file
73
tarpc/src/util/serde.rs
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
// Copyright 2018 Google LLC
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by an MIT-style
|
||||||
|
// license that can be found in the LICENSE file or at
|
||||||
|
// https://opensource.org/licenses/MIT.
|
||||||
|
|
||||||
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
/// Serializes [`io::ErrorKind`] as a `u32`.
|
||||||
|
#[allow(clippy::trivially_copy_pass_by_ref)] // Exact fn signature required by serde derive
|
||||||
|
pub fn serialize_io_error_kind_as_u32<S>(
|
||||||
|
kind: &io::ErrorKind,
|
||||||
|
serializer: S,
|
||||||
|
) -> Result<S::Ok, S::Error>
|
||||||
|
where
|
||||||
|
S: Serializer,
|
||||||
|
{
|
||||||
|
use std::io::ErrorKind::*;
|
||||||
|
match *kind {
|
||||||
|
NotFound => 0,
|
||||||
|
PermissionDenied => 1,
|
||||||
|
ConnectionRefused => 2,
|
||||||
|
ConnectionReset => 3,
|
||||||
|
ConnectionAborted => 4,
|
||||||
|
NotConnected => 5,
|
||||||
|
AddrInUse => 6,
|
||||||
|
AddrNotAvailable => 7,
|
||||||
|
BrokenPipe => 8,
|
||||||
|
AlreadyExists => 9,
|
||||||
|
WouldBlock => 10,
|
||||||
|
InvalidInput => 11,
|
||||||
|
InvalidData => 12,
|
||||||
|
TimedOut => 13,
|
||||||
|
WriteZero => 14,
|
||||||
|
Interrupted => 15,
|
||||||
|
Other => 16,
|
||||||
|
UnexpectedEof => 17,
|
||||||
|
_ => 16,
|
||||||
|
}
|
||||||
|
.serialize(serializer)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deserializes [`io::ErrorKind`] from a `u32`.
|
||||||
|
pub fn deserialize_io_error_kind_from_u32<'de, D>(
|
||||||
|
deserializer: D,
|
||||||
|
) -> Result<io::ErrorKind, D::Error>
|
||||||
|
where
|
||||||
|
D: Deserializer<'de>,
|
||||||
|
{
|
||||||
|
use std::io::ErrorKind::*;
|
||||||
|
Ok(match u32::deserialize(deserializer)? {
|
||||||
|
0 => NotFound,
|
||||||
|
1 => PermissionDenied,
|
||||||
|
2 => ConnectionRefused,
|
||||||
|
3 => ConnectionReset,
|
||||||
|
4 => ConnectionAborted,
|
||||||
|
5 => NotConnected,
|
||||||
|
6 => AddrInUse,
|
||||||
|
7 => AddrNotAvailable,
|
||||||
|
8 => BrokenPipe,
|
||||||
|
9 => AlreadyExists,
|
||||||
|
10 => WouldBlock,
|
||||||
|
11 => InvalidInput,
|
||||||
|
12 => InvalidData,
|
||||||
|
13 => TimedOut,
|
||||||
|
14 => WriteZero,
|
||||||
|
15 => Interrupted,
|
||||||
|
16 => Other,
|
||||||
|
17 => UnexpectedEof,
|
||||||
|
_ => Other,
|
||||||
|
})
|
||||||
|
}
|
||||||
7
tarpc/tests/compile_fail.rs
Normal file
7
tarpc/tests/compile_fail.rs
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#[test]
|
||||||
|
fn ui() {
|
||||||
|
let t = trybuild::TestCases::new();
|
||||||
|
t.compile_fail("tests/compile_fail/*.rs");
|
||||||
|
#[cfg(all(feature = "serde-transport", feature = "tcp"))]
|
||||||
|
t.compile_fail("tests/compile_fail/serde_transport/*.rs");
|
||||||
|
}
|
||||||
15
tarpc/tests/compile_fail/must_use_request_dispatch.rs
Normal file
15
tarpc/tests/compile_fail/must_use_request_dispatch.rs
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
use tarpc::client;
|
||||||
|
|
||||||
|
#[tarpc::service]
|
||||||
|
trait World {
|
||||||
|
async fn hello(name: String) -> String;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
let (client_transport, _) = tarpc::transport::channel::unbounded();
|
||||||
|
|
||||||
|
#[deny(unused_must_use)]
|
||||||
|
{
|
||||||
|
WorldClient::new(client::Config::default(), client_transport).dispatch;
|
||||||
|
}
|
||||||
|
}
|
||||||
15
tarpc/tests/compile_fail/must_use_request_dispatch.stderr
Normal file
15
tarpc/tests/compile_fail/must_use_request_dispatch.stderr
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
error: unused `RequestDispatch` that must be used
|
||||||
|
--> tests/compile_fail/must_use_request_dispatch.rs:13:9
|
||||||
|
|
|
||||||
|
13 | WorldClient::new(client::Config::default(), client_transport).dispatch;
|
||||||
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
|
||||||
|
note: the lint level is defined here
|
||||||
|
--> tests/compile_fail/must_use_request_dispatch.rs:11:12
|
||||||
|
|
|
||||||
|
11 | #[deny(unused_must_use)]
|
||||||
|
| ^^^^^^^^^^^^^^^
|
||||||
|
help: use `let _ = ...` to ignore the resulting value
|
||||||
|
|
|
||||||
|
13 | let _ = WorldClient::new(client::Config::default(), client_transport).dispatch;
|
||||||
|
| +++++++
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
use tarpc::serde_transport;
|
||||||
|
use tokio_serde::formats::Json;
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
#[deny(unused_must_use)]
|
||||||
|
{
|
||||||
|
serde_transport::tcp::connect::<_, (), (), _, _>("0.0.0.0:0", Json::default);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
error: unused `tarpc::serde_transport::tcp::Connect` that must be used
|
||||||
|
--> tests/compile_fail/serde_transport/must_use_tcp_connect.rs:7:9
|
||||||
|
|
|
||||||
|
7 | serde_transport::tcp::connect::<_, (), (), _, _>("0.0.0.0:0", Json::default);
|
||||||
|
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
|
|
||||||
|
note: the lint level is defined here
|
||||||
|
--> tests/compile_fail/serde_transport/must_use_tcp_connect.rs:5:12
|
||||||
|
|
|
||||||
|
5 | #[deny(unused_must_use)]
|
||||||
|
| ^^^^^^^^^^^^^^^
|
||||||
|
help: use `let _ = ...` to ignore the resulting value
|
||||||
|
|
|
||||||
|
7 | let _ = serde_transport::tcp::connect::<_, (), (), _, _>("0.0.0.0:0", Json::default);
|
||||||
|
| +++++++
|
||||||
6
tarpc/tests/compile_fail/tarpc_service_arg_pat.rs
Normal file
6
tarpc/tests/compile_fail/tarpc_service_arg_pat.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#[tarpc::service]
|
||||||
|
trait World {
|
||||||
|
async fn pat((a, b): (u8, u32));
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {}
|
||||||
5
tarpc/tests/compile_fail/tarpc_service_arg_pat.stderr
Normal file
5
tarpc/tests/compile_fail/tarpc_service_arg_pat.stderr
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
error: patterns aren't allowed in RPC args
|
||||||
|
--> $DIR/tarpc_service_arg_pat.rs:3:18
|
||||||
|
|
|
||||||
|
3 | async fn pat((a, b): (u8, u32));
|
||||||
|
| ^^^^^^
|
||||||
6
tarpc/tests/compile_fail/tarpc_service_fn_new.rs
Normal file
6
tarpc/tests/compile_fail/tarpc_service_fn_new.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#[tarpc::service]
|
||||||
|
trait World {
|
||||||
|
async fn new();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {}
|
||||||
5
tarpc/tests/compile_fail/tarpc_service_fn_new.stderr
Normal file
5
tarpc/tests/compile_fail/tarpc_service_fn_new.stderr
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
error: method name conflicts with generated fn `WorldClient::new`
|
||||||
|
--> $DIR/tarpc_service_fn_new.rs:3:14
|
||||||
|
|
|
||||||
|
3 | async fn new();
|
||||||
|
| ^^^
|
||||||
6
tarpc/tests/compile_fail/tarpc_service_fn_serve.rs
Normal file
6
tarpc/tests/compile_fail/tarpc_service_fn_serve.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#[tarpc::service]
|
||||||
|
trait World {
|
||||||
|
async fn serve();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user