diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 6df6dfee79..a32bce7c38 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -16,7 +16,9 @@ jobs: - style - test - msrv + - miri - features + - deprecated - ffi - ffi-header - doc @@ -61,11 +63,11 @@ jobs: include: - rust: stable - features: "--features full" + features: "--features full,backports,deprecated" - rust: beta - features: "--features full" + features: "--features full,backports" - rust: nightly - features: "--features full,nightly" + features: "--features full,nightly,backports" benches: true runs-on: ${{ matrix.os }} @@ -100,7 +102,7 @@ jobs: strategy: matrix: rust: - - 1.49 # keep in sync with MSRV.md dev doc + - 1.56 # keep in sync with MSRV.md dev doc os: - ubuntu-latest @@ -124,6 +126,27 @@ jobs: command: check args: --features full + miri: + name: Test with Miri + needs: [style] + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v1 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: nightly + components: miri + override: true + + - name: Test + # Can't enable tcp feature since Miri does not support the tokio runtime + run: MIRIFLAGS="-Zmiri-disable-isolation" cargo miri test --features http1,http2,client,server,stream,nightly + features: name: features needs: [style] @@ -145,6 +168,34 @@ jobs: - name: check --feature-powerset run: cargo hack check --feature-powerset --depth 2 --skip ffi -Z avoid-dev-deps + deprecated: + name: Check deprecated on ${{ matrix.rust }} + needs: [style] + strategy: + matrix: + rust: + - stable + - beta + + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v1 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - name: Check + uses: actions-rs/cargo@v1 + with: + command: check + args: --features full,backports,deprecated + ffi: name: Test C API (FFI) needs: [style] diff --git a/CHANGELOG.md b/CHANGELOG.md index bcbd12760b..e05105d334 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,108 @@ +### v0.14.26 (2023-04-13) + + +#### Features + +* **http2:** add `max_pending_accept_reset_streams` configuration option (#3201) ([a6f7571a](https://github.com/hyperium/hyper/commit/a6f7571a5299793aef8f1aa4194574438b9df64c)) + + +### v0.14.25 (2023-03-10) + + +#### Features + +* **client:** + * deprecate `client::conn` types (#3156) ([0ced15d3](https://github.com/hyperium/hyper/commit/0ced15d3cc10ace477ebda13ead8e6857b51867e)) + * add 1.0 compatible client conn API (#3155) ([253cc74d](https://github.com/hyperium/hyper/commit/253cc74d86b082067aa884a0a63a089d7d19401d), closes [#3053](https://github.com/hyperium/hyper/issues/3053)) + * add `client::connect::capture_connection()` (#3144) ([c8493399](https://github.com/hyperium/hyper/commit/c8493399b2929a86f3020ae77304a00e43cfd161)) + * add `poison` to `Connected` (#3145) ([37ed5a2e](https://github.com/hyperium/hyper/commit/37ed5a2e3cab76a11092823a80afd8fe2f2a9693)) +* **server:** + * deprecate server conn structs (#3161) ([02fe20f2](https://github.com/hyperium/hyper/commit/02fe20f232a7c3cf24d505b121ce4d428a93254d)) + * backport the split server conn modules from 1.0 (#3102) ([84881c9e](https://github.com/hyperium/hyper/commit/84881c9e5160167a89d18d30c0ef6856dc859839), closes [#3079](https://github.com/hyperium/hyper/issues/3079)) + * remove some `Unpin` and `'static` constraints (#3119) ([0368a41a](https://github.com/hyperium/hyper/commit/0368a41a6cc1a5c6f1eada0d88e38b7dce261587)) + + +### v0.14.24 (2023-02-02) + + +#### Bug Fixes + +* **body:** set an internal max to reserve in `to_bytes` ([4d89adce](https://github.com/hyperium/hyper/commit/4d89adce6122af1650165337d9d814314e7ee409)) +* **server:** prevent sending 100-continue if user drops request body (#3138) ([92443d7e](https://github.com/hyperium/hyper/commit/92443d7ef57ed474f0add7dd1f114c81a3faa8fe)) + + +#### Features + +* **http2:** add `http2_max_header_list_size` to `hyper::server::Builder` (#3006) ([031425f0](https://github.com/hyperium/hyper/commit/031425f087219f02a87eea3d01b14e75e35a5209)) + + +### v0.14.23 (2022-11-07) + + +#### Bug Fixes + +* **http2:** Fix race condition in client dispatcher (#3041) ([2f1c0b72](https://github.com/hyperium/hyper/commit/2f1c0b720da4553fff216a38018a78ecafe23d60), closes [#2419](https://github.com/hyperium/hyper/issues/2419)) + + +### v0.14.22 (2022-10-31) + + +#### Bug Fixes + +* **server:** fix compile-time cfgs for TCP keepalive options (#3039) ([e8765e0f](https://github.com/hyperium/hyper/commit/e8765e0febd0267472799dcd1109af75944c2637), closes [#3038](https://github.com/hyperium/hyper/issues/3038)) + + +### v0.14.21 (2022-10-31) + + +#### Bug Fixes + +* **client:** send an error back to client when dispatch misbehaves () ([9fa36382](https://github.com/hyperium/hyper/commit/9fa363829ced232acb18c31ebab8ffb93f691ecc), closes [#2649](https://github.com/hyperium/hyper/issues/2649)) +* **http1:** fix `http1_header_read_timeout` to use same future (#2891) ([c5a14e7c](https://github.com/hyperium/hyper/commit/c5a14e7c087424001223aaeb2dad532ba4ee6063)) + + +#### Features + +* **http1:** allow ignoring invalid header lines in requests ([73dd4746](https://github.com/hyperium/hyper/commit/73dd474652f5e71fe8a87baa6f9b2490ae746eb3)) +* **server:** add `Server::tcp_keepalive_interval` and `Server::tcp_keepalive_retries` (#2991) ([287d7124](https://github.com/hyperium/hyper/commit/287d712483aec6671427438d60ed2a72f856fd9f)) + + +### v0.14.20 (2022-07-07) + + +#### Bug Fixes + +* **http1:** fix `http1_header_read_timeout` to use same future (#2891) ([c5a14e7c](https://github.com/hyperium/hyper/commit/c5a14e7c087424001223aaeb2dad532ba4ee6063)) + + +#### Features + +* **ext:** support non-canonical HTTP/1 reason phrases (#2792) ([b2052a43](https://github.com/hyperium/hyper/commit/b2052a433fd151d7d745ee9c5b27a2031db1dc32)) + + +### v0.14.19 (2022-05-27) + + +#### Bug Fixes + +* **http1:** fix preserving header case without enabling ffi (#2820) ([6a35c175](https://github.com/hyperium/hyper/commit/6a35c175f2b416851518b5831c2c7827d6dbd822)) +* **server:** don't add implicit content-length to HEAD responses (#2836) ([67b73138](https://github.com/hyperium/hyper/commit/67b73138f110979f3c77ef7b56588f018837e592)) + + +#### Features + +* **server:** + * add `Connection::http2_max_header_list_size` option (#2828) ([a32658c1](https://github.com/hyperium/hyper/commit/a32658c1ae7f1261fa234a767df963be4fc63521), closes [#2826](https://github.com/hyperium/hyper/issues/2826)) + * add `AddrStream::local_addr()` (#2816) ([ffbf610b](https://github.com/hyperium/hyper/commit/ffbf610b1631cabfacb20886270e3c137fa93800), closes [#2773](https://github.com/hyperium/hyper/issues/2773)) + + +#### Breaking Changes + +* **ffi (unstable):** + * `hyper_clientconn_options_new` no longer sets the `http1_preserve_header_case` connection option by default. + Users should now call `hyper_clientconn_options_set_preserve_header_case` if they desire that functionality. ([78de8914](https://github.com/hyperium/hyper/commit/78de8914eadeab4b9a2c71a82c77b2ce33fe6c74)) + + ### v0.14.18 (2022-03-22) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5cbe51bd44..56e31dd2bb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,6 +5,7 @@ You want to contribute? You're awesome! Don't know where to start? Check the [li [easy tag]: https://github.com/hyperium/hyper/issues?q=label%3AE-easy+is%3Aopen -## Pull Requests +## [Pull Requests](./docs/PULL_REQUESTS.md) +- [Submitting a Pull Request](./docs/PULL_REQUESTS.md#submitting-a-pull-request) - [Commit Guidelines](./docs/COMMITS.md) diff --git a/Cargo.toml b/Cargo.toml index a52d991002..a8b2550c48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "hyper" -version = "0.14.18" +version = "0.14.26" description = "A fast and correct HTTP library." readme = "README.md" homepage = "https://hyper.rs" @@ -27,8 +27,8 @@ futures-util = { version = "0.3", default-features = false } http = "0.2" http-body = "0.4" httpdate = "1.0" -httparse = "1.6" -h2 = { version = "0.3.9", optional = true } +httparse = "1.8" +h2 = { version = "0.3.17", optional = true } itoa = "1" tracing = { version = "0.1", default-features = false, features = ["std"] } pin-project-lite = "0.2.4" @@ -39,7 +39,7 @@ want = "0.3" # Optional libc = { version = "0.2", optional = true } -socket2 = { version = "0.4", optional = true } +socket2 = { version = "0.4.7", optional = true, features = ["all"] } [dev-dependencies] futures-util = { version = "0.3", default-features = false, features = ["alloc"] } @@ -61,7 +61,7 @@ tokio = { version = "1", features = [ "test-util", ] } tokio-test = "0.4" -tokio-util = { version = "0.6", features = ["codec"] } +tokio-util = { version = "0.7", features = ["codec"] } tower = { version = "0.4", features = ["make", "util"] } url = "2.2" @@ -109,6 +109,12 @@ tcp = [ # C-API support (currently unstable (no semver)) ffi = ["libc"] +# enable 1.0 backports +backports = [] + +# whether or not to display deprecation warnings +deprecated = [] + # internal features used in CI nightly = [] __internal_happy_eyeballs_tests = [] diff --git a/README.md b/README.md index c3c73d7ed7..328f47bd36 100644 --- a/README.md +++ b/README.md @@ -24,7 +24,7 @@ libraries and applications. If you are looking for a convenient HTTP client, then you may wish to consider [reqwest](https://github.com/seanmonstar/reqwest). If you are looking for a -convenient HTTP server, then you may wish to consider [warp](https://github.com/seanmonstar/warp). +convenient HTTP server, then you may wish to consider [Axum](https://github.com/tokio-rs/tokio). Both are built on top of this library. ## Contributing diff --git a/capi/examples/client.c b/capi/examples/client.c index 3cccdd6ae1..57a3e7b6c7 100644 --- a/capi/examples/client.c +++ b/capi/examples/client.c @@ -24,44 +24,42 @@ static size_t read_cb(void *userdata, hyper_context *ctx, uint8_t *buf, size_t b struct conn_data *conn = (struct conn_data *)userdata; ssize_t ret = read(conn->fd, buf, buf_len); - if (ret < 0) { - int err = errno; - if (err == EAGAIN) { - // would block, register interest - if (conn->read_waker != NULL) { - hyper_waker_free(conn->read_waker); - } - conn->read_waker = hyper_context_waker(ctx); - return HYPER_IO_PENDING; - } else { - // kaboom - return HYPER_IO_ERROR; - } - } else { + if (ret >= 0) { return ret; } + + if (errno != EAGAIN) { + // kaboom + return HYPER_IO_ERROR; + } + + // would block, register interest + if (conn->read_waker != NULL) { + hyper_waker_free(conn->read_waker); + } + conn->read_waker = hyper_context_waker(ctx); + return HYPER_IO_PENDING; } static size_t write_cb(void *userdata, hyper_context *ctx, const uint8_t *buf, size_t buf_len) { struct conn_data *conn = (struct conn_data *)userdata; ssize_t ret = write(conn->fd, buf, buf_len); - if (ret < 0) { - int err = errno; - if (err == EAGAIN) { - // would block, register interest - if (conn->write_waker != NULL) { - hyper_waker_free(conn->write_waker); - } - conn->write_waker = hyper_context_waker(ctx); - return HYPER_IO_PENDING; - } else { - // kaboom - return HYPER_IO_ERROR; - } - } else { + if (ret >= 0) { return ret; } + + if (errno != EAGAIN) { + // kaboom + return HYPER_IO_ERROR; + } + + // would block, register interest + if (conn->write_waker != NULL) { + hyper_waker_free(conn->write_waker); + } + conn->write_waker = hyper_context_waker(ctx); + return HYPER_IO_PENDING; } static void free_conn_data(struct conn_data *conn) { @@ -98,9 +96,9 @@ static int connect_to(const char *host, const char *port) { if (connect(sfd, rp->ai_addr, rp->ai_addrlen) != -1) { break; - } else { - close(sfd); } + + close(sfd); } freeaddrinfo(result); @@ -142,17 +140,17 @@ typedef enum { #define STR_ARG(XX) (uint8_t *)XX, strlen(XX) int main(int argc, char *argv[]) { - const char *host = argc > 1 ? argv[1] : "httpbin.org"; - const char *port = argc > 2 ? argv[2] : "80"; - const char *path = argc > 3 ? argv[3] : "/"; - printf("connecting to port %s on %s...\n", port, host); + const char *host = argc > 1 ? argv[1] : "httpbin.org"; + const char *port = argc > 2 ? argv[2] : "80"; + const char *path = argc > 3 ? argv[3] : "/"; + printf("connecting to port %s on %s...\n", port, host); - int fd = connect_to(host, port); + int fd = connect_to(host, port); if (fd < 0) { return 1; } - printf("connected to %s, now get %s\n", host, path); + printf("connected to %s, now get %s\n", host, path); if (fcntl(fd, F_SETFL, O_NONBLOCK) != 0) { printf("failed to set socket to non-blocking\n"); return 1; @@ -168,7 +166,6 @@ int main(int argc, char *argv[]) { conn->read_waker = NULL; conn->write_waker = NULL; - // Hookup the IO hyper_io *io = hyper_io_new(); hyper_io_set_userdata(io, (void *)conn); @@ -315,15 +312,16 @@ int main(int argc, char *argv[]) { if (sel_ret < 0) { printf("select() error\n"); return 1; - } else { - if (FD_ISSET(conn->fd, &fds_read)) { - hyper_waker_wake(conn->read_waker); - conn->read_waker = NULL; - } - if (FD_ISSET(conn->fd, &fds_write)) { - hyper_waker_wake(conn->write_waker); - conn->write_waker = NULL; - } + } + + if (FD_ISSET(conn->fd, &fds_read)) { + hyper_waker_wake(conn->read_waker); + conn->read_waker = NULL; + } + + if (FD_ISSET(conn->fd, &fds_write)) { + hyper_waker_wake(conn->write_waker); + conn->write_waker = NULL; } } diff --git a/capi/examples/upload.c b/capi/examples/upload.c index 10582c8867..9b791eedf8 100644 --- a/capi/examples/upload.c +++ b/capi/examples/upload.c @@ -24,44 +24,42 @@ static size_t read_cb(void *userdata, hyper_context *ctx, uint8_t *buf, size_t b struct conn_data *conn = (struct conn_data *)userdata; ssize_t ret = read(conn->fd, buf, buf_len); - if (ret < 0) { - int err = errno; - if (err == EAGAIN) { - // would block, register interest - if (conn->read_waker != NULL) { - hyper_waker_free(conn->read_waker); - } - conn->read_waker = hyper_context_waker(ctx); - return HYPER_IO_PENDING; - } else { - // kaboom - return HYPER_IO_ERROR; - } - } else { + if (ret >= 0) { return ret; } + + if (errno != EAGAIN) { + // kaboom + return HYPER_IO_ERROR; + } + + // would block, register interest + if (conn->read_waker != NULL) { + hyper_waker_free(conn->read_waker); + } + conn->read_waker = hyper_context_waker(ctx); + return HYPER_IO_PENDING; } static size_t write_cb(void *userdata, hyper_context *ctx, const uint8_t *buf, size_t buf_len) { struct conn_data *conn = (struct conn_data *)userdata; ssize_t ret = write(conn->fd, buf, buf_len); - if (ret < 0) { - int err = errno; - if (err == EAGAIN) { - // would block, register interest - if (conn->write_waker != NULL) { - hyper_waker_free(conn->write_waker); - } - conn->write_waker = hyper_context_waker(ctx); - return HYPER_IO_PENDING; - } else { - // kaboom - return HYPER_IO_ERROR; - } - } else { + if (ret >= 0) { return ret; } + + if (errno != EAGAIN) { + // kaboom + return HYPER_IO_ERROR; + } + + // would block, register interest + if (conn->write_waker != NULL) { + hyper_waker_free(conn->write_waker); + } + conn->write_waker = hyper_context_waker(ctx); + return HYPER_IO_PENDING; } static void free_conn_data(struct conn_data *conn) { @@ -98,9 +96,9 @@ static int connect_to(const char *host, const char *port) { if (connect(sfd, rp->ai_addr, rp->ai_addrlen) != -1) { break; - } else { - close(sfd); } + + close(sfd); } freeaddrinfo(result); @@ -126,17 +124,20 @@ static int poll_req_upload(void *userdata, struct upload_body* upload = userdata; ssize_t res = read(upload->fd, upload->buf, upload->len); - if (res < 0) { - printf("error reading upload file: %d", errno); - return HYPER_POLL_ERROR; - } else if (res == 0) { + if (res > 0) { + *chunk = hyper_buf_copy(upload->buf, res); + return HYPER_POLL_READY; + } + + if (res == 0) { // All done! *chunk = NULL; return HYPER_POLL_READY; - } else { - *chunk = hyper_buf_copy(upload->buf, res); - return HYPER_POLL_READY; } + + // Oh no! + printf("error reading upload file: %d", errno); + return HYPER_POLL_ERROR; } static int print_each_header(void *userdata, @@ -348,20 +349,20 @@ int main(int argc, char *argv[]) { hyper_executor_push(exec, body_data); break; - } else { - assert(task_type == HYPER_TASK_EMPTY); - hyper_task_free(task); - hyper_body_free(resp_body); + } - printf("\n -- Done! -- \n"); + assert(task_type == HYPER_TASK_EMPTY); + hyper_task_free(task); + hyper_body_free(resp_body); - // Cleaning up before exiting - hyper_executor_free(exec); - free_conn_data(conn); - free(upload.buf); + printf("\n -- Done! -- \n"); - return 0; - } + // Cleaning up before exiting + hyper_executor_free(exec); + free_conn_data(conn); + free(upload.buf); + + return 0; case EXAMPLE_NOT_SET: // A background task for hyper completed... hyper_task_free(task); @@ -387,17 +388,17 @@ int main(int argc, char *argv[]) { if (sel_ret < 0) { printf("select() error\n"); return 1; - } else { - if (FD_ISSET(conn->fd, &fds_read)) { - hyper_waker_wake(conn->read_waker); - conn->read_waker = NULL; - } - if (FD_ISSET(conn->fd, &fds_write)) { - hyper_waker_wake(conn->write_waker); - conn->write_waker = NULL; - } } + if (FD_ISSET(conn->fd, &fds_read)) { + hyper_waker_wake(conn->read_waker); + conn->read_waker = NULL; + } + + if (FD_ISSET(conn->fd, &fds_write)) { + hyper_waker_wake(conn->write_waker); + conn->write_waker = NULL; + } } diff --git a/capi/include/hyper.h b/capi/include/hyper.h index efe5f06106..1f938b8714 100644 --- a/capi/include/hyper.h +++ b/capi/include/hyper.h @@ -355,6 +355,22 @@ void hyper_clientconn_free(struct hyper_clientconn *conn); */ struct hyper_clientconn_options *hyper_clientconn_options_new(void); +/* + Set the whether or not header case is preserved. + + Pass `0` to allow lowercase normalization (default), `1` to retain original case. + */ +void hyper_clientconn_options_set_preserve_header_case(struct hyper_clientconn_options *opts, + int enabled); + +/* + Set the whether or not header order is preserved. + + Pass `0` to allow reordering (default), `1` to retain original ordering. + */ +void hyper_clientconn_options_set_preserve_header_order(struct hyper_clientconn_options *opts, + int enabled); + /* Free a `hyper_clientconn_options *`. */ diff --git a/docs/CODE_OF_CONDUCT.md b/docs/CODE_OF_CONDUCT.md index 4888458ce0..32b8d28528 100644 --- a/docs/CODE_OF_CONDUCT.md +++ b/docs/CODE_OF_CONDUCT.md @@ -4,7 +4,7 @@ - Don't be mean. - Insulting anyone is prohibited. -- Harrassment of any kind is prohibited. +- Harassment of any kind is prohibited. - If another person feels uncomfortable with your remarks, stop it. - If a moderator deems your comment or conduct as inappropriate, stop it. - Disagreeing is fine, but keep it to technical arguments. Never attack the person. diff --git a/docs/ISSUES.md b/docs/ISSUES.md index 403143bdd9..31a9027337 100644 --- a/docs/ISSUES.md +++ b/docs/ISSUES.md @@ -2,6 +2,68 @@ The [issue tracker][issues] for hyper is where we track all features, bugs, and discuss proposals. +## Triaging + +Once an issue has been opened, it is normal for there to be discussion +around it. Some contributors may have differing opinions about the issue, +including whether the behavior being seen is a bug or a feature. This +discussion is part of the process and should be kept focused, helpful, and +professional. + +The objective of helping with triaging issues is to help reduce the issue +backlog and keep the issue tracker healthy, while enabling newcomers another +meaningful way to get engaged and contribute. + +### Acknowledge + +Acknowledge the human. This is meant actively, such as giving a welcome, or +thanks for a detailed report, or any other greeting that makes the person feel +that their contribution (issues are contributions!) is valued. It also is meant +to be internalized, and be sure to always [treat the person kindly][COC] +throughout the rest of the steps of triaging. + +### Ask for more info + +Frequently, we need more information than was originally provided to fully +evaluate an issue. + +If it is a bug report, ask follow up questions that help us get a [minimum +reproducible example][MRE]. This may take several round-trip questions. Once +all the details are gathered, it may be helpful to edit the original issue text +to include them all. + +### Categorize + +Once enough information has been gathered, the issue should be categorized +with [labels][#labels]. Ideally, most issues should be labelled with an area, +effort, and severity. An issue _can_ have multiple areas, pick what fits. There +should be only one severity, and the descriptions of each should help to pick +the right one. The hardest label to select is "effort". If after reading the +descriptions of each effort level, you're still unsure, you can ping a +maintainer to pick one. + +### Adjust the title + +An optional step when triaging is to adjust the title once more information is +known. Sometimes an issue starts as a question, and through discussion, it +turns out to be a feature request, or a bug report. In those cases, the title +should be changed from a question, and the title should be a succinct action to +be taken. For example, a question about an non-existent configuration option +may be reworded to "Add option to Client to do Zed". + +### Mentoring + +The last part of triaging is to try to make the issue a learning experience. +After a discussion with the reporter, it would be good to ask if they are now +interested in submitting the change described in the issue. + +Otherwise, it would be best to leave the issue with a series of steps for +anyone else to try to write the change. That could be pointing out that a +design proposal is needed, addressing certain points. Or, if the required +changes are mostly know, a list of links to modules and functions where code +needs to be changed, and to what. That way we mentor newcomers to become +successful contributors of new [pull requests][PRs]. + ## Labels Issues are organized with a set of labels. Most labels follow a system of being prefixed by a "type". @@ -47,3 +109,5 @@ The severity marks how _severe_ the issue is. Note this isn't "importance" or "p - **S-refactor**: improve internal code to help readability and maintenance. [issues]: https://github.com/hyperium/hyper/issues +[COC]: ./CODE_OF_CONDUCT.md +[PRs]: ./PULL_REQUESTS.md diff --git a/docs/MSRV.md b/docs/MSRV.md index ddc8534e2e..65127c99bd 100644 --- a/docs/MSRV.md +++ b/docs/MSRV.md @@ -6,4 +6,4 @@ hyper. It is possible that an older compiler can work, but that is not guaranteed. We try to increase the MSRV responsibly, only when a significant new feature is needed. -The current MSRV is: **1.49**. +The current MSRV is: **1.56**. diff --git a/docs/PULL_REQUESTS.md b/docs/PULL_REQUESTS.md new file mode 100644 index 0000000000..0df7d73658 --- /dev/null +++ b/docs/PULL_REQUESTS.md @@ -0,0 +1,49 @@ +# Pull Requests + +Pull requests are the way to submit changes to the hyper repository. + +## Submitting a Pull Request + +In most cases, it a good idea to discuss a potential change in an +[issue](ISSUES.md). This will allow other contributors to provide guidance and +feedback _before_ significant code work is done, and can increase the +likelihood of getting the pull request merged. + +### Tests + +If the change being proposed alters code (as opposed to only documentation for +example), it is either adding new functionality to hyper or it is fixing +existing, broken functionality. In both of these cases, the pull request should +include one or more tests to ensure that hyper does not regress in the future. + +### Commits + +Once code, tests, and documentation have been written, a commit needs to be +made. Following the [commit guidelines](COMMITS.md) will help with the review +process by making your change easier to understand, and makes it easier for +hyper to produce a valuable changelog with each release. + +However, if your message doesn't perfectly match the guidelines, **do not +worry!** The person that eventually merges can easily fixup the message at that +time. + +### Opening the Pull Request + +From within GitHub, open a new pull request from your personal branch. + +Once opened, pull requests are usually reviewed within a few days. + +### Discuss and Update + +You will probably get feedback or requests for changes to your Pull Request. +This is a big part of the submission process so don't be discouraged! Some +contributors may sign off on the Pull Request right away, others may have more +detailed comments or feedback. This is a necessary part of the process in order +to evaluate whether the changes are correct and necessary. + +Any community member can review a PR and you might get conflicting feedback. +Keep an eye out for comments from code owners to provide guidance on +conflicting feedback. + +You don't need to close the PR and create a new one to address feedback. You +may simply push new commits to the existing branch. diff --git a/docs/ROADMAP.md b/docs/ROADMAP.md new file mode 100644 index 0000000000..184cc7adf5 --- /dev/null +++ b/docs/ROADMAP.md @@ -0,0 +1,404 @@ +# hyper 1.0 Roadmap + +## Goal + +Align current hyper to the [hyper VISION][VISION]. + +The VISION outlines a decision-making framework, use-cases, and general shape +of hyper. This roadmap describes the currently known problems with hyper, and +then shows what changes are needed to make hyper 1.0 look more like what is in +the VISION. + +## Known Issues + + +> **Note**: These known issues are as of hyper v0.14.x. After v1.0 is released, +ideally these issues will have been solved. Keeping this history may be helpful +to Future Us, though. + +### Higher-level Client and Server problems + +Both the higher-level `Client` and `Server` types have stability concerns. + +For the `hyper::Server`: + +- The `Accept` trait is complex, and too easy to get wrong. If used with TLS, a slow TLS handshake + can affect all other new connections waiting for it to finish. +- The `MakeService<&IO>` is confusing. The bounds are an assault on the eyes. +- The `MakeService` API doesn't allow to easily annotate the HTTP connection with `tracing`. +- Graceful shutdown doesn't give enough control. + + +It's more common for people to simply use `hyper::server::conn` at this point, +than to bother with the `hyper::Server`. + +While the `hyper::Client` is much easier to use, problems still exist: + +- The whole `Connect` design isn't stable. + - ALPN and proxies can provide surprising extra configuration of connections. + - Some `Connect` implementations may wish to view the path, in addition to the scheme, host, and port. + - Wants `runtime` feature +- The Pool could be made more general or composable. At the same time, more customization is + desired, and it's not clear +how to expose it yet. + + +### Runtime woes + +hyper has been able to support different runtimes, but it has sometimes awkward +default support for Tokio. + +- The `runtime` cargo-feature isn't additive +- Built-in Tokio support can be confusing +- Executors and Timers + - The `runtime` feature currently enables a few options that require a timer, such as timeouts and + keepalive intervals. It implicitly relies on Tokio's timer context. This can be quite confusing. +- IO traits + - Should we publicly depend on Tokio's traits? + - `futures-io`? + - Definitely nope. + - Not stable. (0.3?) + - No uninitialized memory. + - Eventual `std` traits? + - They've been in design for years. + - We cannot base our schedule on them. + - When they are stable, we can: + - Provide a bridge in `hyper-util`. + - Consider a 2.0 of hyper. + - Define our own traits, provide util wrappers? + +### Forwards-compatibility + +There's a concern about forwards-compatibility. We want to be able to add +support for new HTTP features without needing a new major version. While most +of `http` and `hyper` are prepared for that, there's two potential problems. + +- New frames on an HTTP stream (body) + - Receiving a new frame type would require a new trait method + - There's no way to implement a "receive unknown frame" that hyper doesn't know about. + - Sending an unknown frame type would be even harder. + - Besides being able to pass an "unknown" type through the trait, the user would need to be + able to describe how that frame is encoded in HTTP/2/3. +- New HTTP versions + - HTTP/3 will require a new transport abstraction. It's not as simple as just using some + `impl AsyncRead + AsyncWrite`. While HTTP/2 bundled the concept of stream creation internally, + and thus could be managed wholly on top of a read-write transport, HTTP/3 is different. Stream + creation is shifted to the QUIC protocol, and HTTP/3 needs to be able to use that directly. + - This means the existing `Connection` types for both client and server will not be able to + accept a QUIC transport so we can add HTTP/3 support. + +### Errors + +It's not easy to match for specific errors. + +The `Error::source()` can leak an internal dependency. For example, a +`hyper::Error` may wrap an `h2::Error`. Users can downcast the source at +runtime, and hyper internally changing the version of its `h2` dependency can +cause runtime breakage for users. + +Formatting errors is in conflict with the current expected norm. The +`fmt::Display` implementation for `hyper::Error` currently prints its own +message, and then prints the message of any wrapped source error. The Errors +Working Group currently recommends that errors only print their own message +(link?). This conflict means that error "reporters", which crawl a source chain +and print each error, has a lot of duplicated information. + +``` +error fetching website: error trying to connect: tcp connect error: Connection refused (os error 61) +tcp connect error: Connection refused (os error 61) +Connection refused (os error 61) +``` + +While there is a good reason for why hyper's `Error` types do this, at the very +least, it _is_ unfortunate. + +### You call hyper, or hyper calls you? + +> Note: this problem space, of who calls whom, will be explored more deeply in +> a future article. + +At times, it's been wondered whether hyper should call user code, or if user +code should call hyper. For instance, should a `Service` be called with a +request when the connection receives one, or should the user always poll for +the next request. + +There's a similar question around sending a message body. Should hyper ask the +body for more data to write, or should the user call a `write` method directly? + +These both get at a root topic about [write +observability](https://github.com/hyperium/hyper/issues/2181). How do you know +when a response, or when body data, has been written successfully? This is +desirable for metrics, or for triggering other side-effects. + +The `Service` trait also has some other frequently mentioned issues. Does +`poll_ready` pull its complexity weight for servers? What about returning +errors, what does that mean? Ideally users would turn all errors into +appropriate `http::Response`s. But in HTTP/2 and beyond, stream errors are +different from HTTP Server Error responses. Could the `Service::Error` type do +more to encourage best practices? + +## Design + +The goal is to get hyper closer to the [VISION][], using that to determine the +best way to solve the known issues above. The main thrust of the proposed +changes are to make hyper more **Flexible** and stable. + +In order to keep hyper **Understandable**, however, the proposed changes *must* +be accompanied by providing utilities that solve the common usage patterns, +documentation explaining how to use the more flexible pieces, and guides on how +to reach for the `hyper-util`ity belt. + +The majority of the changes are smaller and can be contained to the *Public +API* section, since they usually only apply to a single module or type. But the +biggest changes are explained in detail here. + +### Split per HTTP version + +The existing `Connection` types, both for the client and server, abstract over +HTTP version by requiring a generic `AsyncRead + AsyncWrite` transport type. +But as we figure out HTTP/3, that needs to change. So to prepare now, the +`Connection` types will be split up. + +For example, there will now be `hyper::server::conn::http1::Connection` and +`hyper::server::conn::http2::Connection` types. + +These specific types will still have a very similar looking API that, as the +VISION describes, provides **Correct** connection management as it pertains to +HTTP. + +There will be still be a type to wrap the different versions. It will no longer +be generic over the transport type, to prepare for being able to wrap HTTP/3 +connections. Exactly how it will wrap, either by using internal trait objects, +or an `enum Either` style, or using a `trait Connection` that each type +implements, is something to be determined. It's likely that this "auto" type +will start in `hyper-util`. + +### Focus on the `Connection` level + +As mentioned in the *Known Issues*, the higher-level `Client` and `Server` have +stability and complexity problems. Therefore, for hyper 1.0, the main API will +focus on the "lower-level" connection types. The `Client` and `Server` helpers +will be moved to `hyper-util`. + +## Public API + +### body + +The `Body` struct is removed. Its internal "variants" are [separated into +distinct types](https://github.com/hyperium/hyper/issues/2345), and can start +in either `hyper-util` or `http-body-util`. + +The exported trait `HttpBody` is renamed to `Body`. + +A single `Body` implementation in `hyper` is the one provided by receiving +client responses and server requests. It has the name `Streaming`. + +> **Unresolved**: Other names can be considered during implementation. Another +> option is to not publicly name the implementation, but return `Response`s. + +The `Body` trait will be experimented on to see about making it possible to +return more frame types beyonds just data and trailers. + +> **Unresolved**: What exactly this looks like will only be known after +> experimentation. + +### client + +The high-level `hyper::Client` will be removed, along with the +`hyper::client::connect` module. They will be explored more in `hyper-util`. + +As described in *Design*, the `client::conn` module will gain `http1` and +`http2` sub-modules, providing per-version `SendRequest`, `Connection`, and +`Builder` structs. An `auto` version can be explored in `hyper-util`. + +### error + +The `hyper::Error` struct remains in place. + +All errors returned from `Error::source()` are made opaque. They are wrapped an +internal `Opaque` newtype that still allows printing, but prevents downcasting +to the internal dependency. + +A new `hyper::error::Code` struct is defined. It is an opaque struct, with +associated constants defining various code variants. + +> Alternative: define a non-exhaustive enum. It's not clear that this is +> definitely better, though. Keeping it an opaque struct means we can add +> secondary parts to the code in the future, or add bit flags, or similar +> extensions. + +The purpose of `Code` is to provide an abstraction over the kind of error that +is encountered. The `Code` could be some behavior noticed inside hyper, such as +an incomplete HTTP message. Or it can be "translated" from the underlying +protocol, if it defines protocol level errors. For example, an +`h2::Reason::CANCEL`. + +### rt + +The `Executor` trait stays in here. + +Define a new trait `Timer`, which describes a way for users to provide a source +of sleeping/timeout futures. Similar to `Executor`, a new generic is added to +connection builders to provide a `Timer`. + +### server + +The higher-level `hyper::Server` struct, its related `Builder`, and the +`Accept` trait are all removed. + +The `AddrStream` struct will be completely removed, as it provides no value but +causes binary bloat. + +Similar to `client`, and as describe in the *Design*, the `conn` modules will +be expanded to support `http1` and `http2` submodules. An `auto` version can be +explored in `hyper-util`. + +### service + +A vendored and simplified `Service` trait will be explored. + +The error type for `Service`s used for a server will explore having the return +type changed from any error to one that can become a `hyper::error::Code`. + +> **Unresolved**: Both of the above points are not set in stone. We will +> explore and decide if they are the best outcome during development. + +The `MakeService` pieces will be removed. + +### Cargo Features + +Remove the `stream` feature. The `Stream` trait is not stable, and we cannot +depend on an unstable API. + +Remove the `tcp` and `runtime` features. The automatic executor and timer parts +are handled by providing implementations of `Executor` and `Timer`. The +`connect` and `Accept` parts are also moving to `hyper-util`. + +### Public Dependencies + +- `http` +- `http-body` +- `bytes` + +Cannot be public while "unstable": + +- `tracing` + +## `hyper-util` + + +### body + +A channel implementation of `Body` that has an API to know when the data has +been successfully written is provided in `hyper_util::body::channel`. + +### client + +A `Pool` struct that implements `Service` is provided. It fills a similar role +as the previous `hyper::Client`. + +> **Note**: The `Pool` might be something that goes into the `tower` crate +> instead. Or it might stay here as a slightly more specialized racing-connect +> pool. We'll find out as we go. + +A `connect` submodule that mostly mirrors the existing `hyper::client::connect` +module is moved here. Connectors can be used as a source to provide `Service`s +used by the `Pool`. + +### rt + +We can provide Tokio-backed implementations of `Executor` and `Timer`. + +### server + +A `GracefulShutdown` helper is provided, to allow for similar style of graceful +shutdown as the previous `hyper::Server` did, but with better control. + +# Appendix + +## Unresolved Questions + +There are some parts of the proposal which are not fully resolved. They are +mentioned in Design and API sections above, but also collected here for easy +finding. While they all have _plans_, they are more exploratory parts of the +API, and thus they have a higher possibility of changing as we implement them. + +The goal is to have these questions resolved and removed from the document by +the time there is a [Release Candidate][timeline]. + +### Should there be `hyper::io` traits? + +Depending on `tokio` just for `AsyncRead` and `AsyncWrite` is convenient, but +can be confusing for users integrating hyper with other runtimes. It also ties +our version directly to Tokio. We can consider having vendored traits, and +providing Tokio wrappers in `hyper-util`. + +### Should returned body types be `impl Body`? + +### How could the `Body` trait prepare for unknown frames? + +We will experiment with this, and keep track of those experiments in a +dedicated issue. It might be possible to use something like this: + +```rust +pub trait Body { + type Data; + fn poll_frame(..) -> Result>>; +} + +pub struct Frame(Kind); + +enum Kind { + Data(T), + Trailers(HeaderMap), + Unknown(Box), +} +``` + +### Should there be a simplified `hyper::Service` trait, or should hyper depend on `tower-service`? + +- There's still a few uncertain decisions around tower, such as if it should be + changed to `async fn call`, and if `poll_ready` is the best way to handle + backpressure. +- It's not clear that the backpressure is something needed at the `Server` + boundary, thus meaning we should remove `poll_ready` from hyper. +- It's not 100% clear if we should keep the service pattern, or use a + pull-based API. This will be explored in a future blog post. + +## FAQ + +### Why did you pick _that_ name? Why not this other better name? + +Naming is hard. We certainly should solve it, but discussion for particular +names for structs and traits should be scoped to the specific issues. This +document is to define the shape of the library API. + +### Should I publicly depend on `hyper-util`? + +The `hyper-util` crate will not reach 1.0 when `hyper` does. Some types and +traits are being moved to `hyper-util`. As with any pre-1.0 crate, you _can_ +publicly depend on it, but it is explicitly less stable. + +In most cases, it's recommended to not publicly expose your dependency on +`hyper-util`. If you depend on a trait, such as used by the moved higher-level +`Client` or `Server`, it may be better for your users to define your own +abstraction, and then make an internal adapter. + +### Isn't this making hyper harder? + +We are making hyper more **flexible**. As noted in the [VISION][], most use +cases of hyper require it to be flexible. That _can_ mean that the exposed API +is lower level, and that it feels more complicated. It should still be +**understandable**. + +But the hyper 1.0 effort is more than just the single `hyper` crate. Many +useful helpers will be migrated to a `hyper-util` crate, and likely improved in +the process. The [timeline][] also points out that we will have a significant +documentation push. While the flexible pieces will be in hyper to compose how +they need, we will also write guides for the [hyper.rs][] showing people how to +accomplish the most common tasks. + +[timeline]: https://seanmonstar.com/post/676912131372875776/hyper-10-timeline +[VISION]: https://github.com/hyperium/hyper/pull/2772 +[hyper.rs]: https://hyper.rs diff --git a/docs/TENETS.md b/docs/TENETS.md new file mode 100644 index 0000000000..5517289c67 --- /dev/null +++ b/docs/TENETS.md @@ -0,0 +1,100 @@ +# Charter + +> hyper is a protective and efficient HTTP library for all. + +# Tenets + +Tenets are guiding principles. They guide how decisions are made for the whole +project. Ideally, we do all of them all the time. In some cases, though, we may +be forced to decide between slightly penalizing one goal or another. In that +case, we tend to support those goals that come earlier in the list over those +that come later (but every case is different). + +## 0. Open + +hyper is open source, always. The success of hyper depends on the health of the +community building and using it. All contributions are in the open. We don't +maintain private versions, and don't include features that aren't useful to +others. + +[We prioritize kindness][CONDUCT], compassion and empathy towards all +contributors. Technical skill is not a substitute for human decency. + +[CONDUCT]: https://github.com/hyperium/hyper/blob/master/docs/CODE_OF_CONDUCT.md + +### Examples + +It's not usually hard for an open source library to stay open and also meet its +other priorities. Here's some instances where being **Open** would be more +important than **Correct** or **Fast**: + +- Say an individual were to bring forward a contribution that makes hyper more + correct, or faster, perhaps fixing some serious bug. But in doing so, they + also insulted people, harassed other contributors or users, or shamed + everyone for the previous code. They felt their contribution was "invaluable". + We would not accept such a contribution, instead banning the user and + rewriting the code amongst the kind collaborators of the project. + +- Say someone brings a contribution that adds a new feature useful for + performance or correctness, but their work accomplishes this by integrating + hyper with a proprietary library. We would not accept such a contribution, + because we don't want such a feature limited only to those users willing to + compromise openness, and we don't want to bifurcate the ecosystem between those + who make that compromise and those who don't. + +## 1. Correct + +hyper is a memory safe and precise implementation of the HTTP specification. +Memory safety is vital in a core Internet technology. Following the HTTP +specifications correctly protects users. It makes the software durable to the +“real world”. Where feasible, hyper enforces correct usage. + +This is more than just "don't write bugs". hyper actively protects the user. + +### Examples + +- Even though we follow the **HTTP/\*** specs, hyper doesn't blindly implement + everything without considering if it is safe to do so. + +## 2. Fast + +A fast experience delights users. A faster network library means a faster +application, resulting in delighting our users’ users. Whether with one request, +or millions. + +Being _fast_ means we improve throughput, drive down CPU usage, and improve +sustainability. + +Fast _enough_. We don't sacrifice sanity for speed. + +## 3. HTTP/* + +hyper is specifically focused on HTTP. Supporting new HTTP versions is in scope, +but supporting separate protocols is not. + +This also defines what the abstraction layer is: the API is designed around +sending and receiving HTTP messages. + +## 4. Flexible + +hyper enables as many usecases as possible. It has no opinion on application +structure, and makes few assumptions about its environment. This includes being +portable to different operating systems. + +### Examples + +- While we choose safer defaults to be **Correct**, hyper includes options to + _allow_ different behavior, when the user requires them. +- Providing choice usually makes things more complex, so being **Flexible** does + mean it's less _easy_. That can sometimes conflict with simplest way of making + hyper **Understandable**. + +## 5. Understandable + +hyper is [no more complicated than it has to +be](https://en.wikipedia.org/wiki/Occam%27s_razor). HTTP is not simple. It may +not be as "easy" as 1-line to do everything, but it shouldn't be "hard" to find +the answers. + +From logical and misuse-resistant APIs, to stellar documentation, to transparent +metrics. diff --git a/docs/VISION.md b/docs/VISION.md new file mode 100644 index 0000000000..75104c3ebb --- /dev/null +++ b/docs/VISION.md @@ -0,0 +1,230 @@ +# hyper Vision + +## Purpose + +This is an overview of what the shape of hyper looks like, but also somewhat +zoomed out, so that the _vision_ can survive while the exact minute details +might shift and change over time. + +### Charter + +> hyper is a protective and efficient HTTP library for all. + +### Tenets + +Tenets are guiding principles. They guide how decisions are made for the whole +project. Ideally, we do all of them all the time. In some cases, though, we may +be forced to decide between slightly penalizing one goal or another. In that +case, we tend to support those goals that come earlier in the list over those +that come later (but every case is different). + +0. Open +1. Correct +2. Fast +3. HTTP/\* +4. Flexible +5. Understandable + +There's a lot more detail about each in [TENETS](./TENETS.md). + +## Use Cases + +Who are the *users* of hyper? How would they use hyper? + +### Low-Level Client Library (curl, reqwest, aws-sdk) + +These client libraries care that hyper is **Flexible**, since they are +expressing their own opinion on how a more-featured HTTP client should act. +This includes opinions on connection establishment, management, pooling, HTTP +version options, and even runtimes. + +curl's main reason for using hyper is that it is **Safe**. + +### Web Server Frameworks (deno, axum) + +These are using hyper's server feature to expose a different, higher-level API +to users. Besides the obvious requirements, these require that hyper is +**Fast**. Servers are costly, handling more requests faster is important to +them. + +That hyper is **Flexible** is also important, in that it needs to be flexible +enough for them to build a server framework, and allow them to express their +own opinions about API to their users. + +### Services and Proxies (linkerd, cloudflare, fastly) + +These are using hyper directly, likely both the client and server, in order to +build efficient and powerful services, applications, and tools for their end +users. They care greatly that hyper is **Correct**, since web traffic can +stretch the limits of what is valid HTTP, and exercise less-common parts of the +specifications. + +They also require hyper to be **Fast**, for similar reasons that the web server +frameworks do. + +### New Rust Web Developers + +These are developers who are either new to Rust, or new to web servers, and +have reached for hyper to start with. + +It's likely that these users don't have strong opinions about how an HTTP +server or client should work, just that it _should_ handle all the things they +normally assume it would. For these users, it would be best to quickly help +them compare their own expectations with hyper's capabilities, and may +suggest reaching for higher-level, _easier_ libraries instead. + +Those that stick around after that recommendation are users that wish both to +learn at a lower level, and to pick and choose what batteries they plug in to +hyper as they move along. While they do care about the other tenets, that hyper +is **Understandable** is of extra importance to them. + +## The Library + +So with all that context in mind, what does hyper, the library, actually look +like? This doesn't highlight what _is_ and _isn't_ present. What currently +needs to change to reach this vision is left to individual version roadmaps. + +### Layers + +In all cases, a user brings their own runtime and IO to work with hyper. The IO +is provided to hyper, and hyper acts on top of it. hyper returns `Future`s that +the user then decides how to poll, likely involving their runtime options. + +![architecture diagram](./vision-arch.svg) + + +#### Protocol Codecs + +hyper has dedicated codecs for the major HTTP versions. Each is internally +designed to be **Correct** and **Fast** when it comes to encoding and decoding. + +The individual codecs may be implemented as sub-crates, with a less-stable +promise, to support the **Flexible** needs of some users who wish to build +their own connection management, or customize encoding and decoding beyond what +is officially supported. + +#### Connection State Management + +A **Correct** implementation includes more than just enforcing certain +characters when encoding and decoding. Order of frames, and flags in certain +frames can affect the state of the connection. Some examples of things enforced +at this layer: + +- If a message has a `content-length`, enforce only that many bytes are read or + written. +- Reading a `Response` before a `Request` is even written implies a mismatched + reply that should be interpreted as an error. +- The presence of some headers, such as `Connection: close`, or the absence of + others, such as `content-length` and `transfer-encoding`, can mean that the + connection should terminate after the current message. +- HTTP/2 and HTTP/3 may send connection-level frames that don't pertain to any + specific transaction, and must be read and handled regardless of if a user is + currently checking for a message. + +#### HTTP Role and Version Abstraction + +This is the public API layer. Methods exposed are around sending and receiving +`http::Request`s and `http::Response`s, not around framing specifics of the +different versions. These are built around a client or server `Connection` +interface. + +By exposing this layer publicly, we take care of the **Correct** tenet, by not +forcing the user to send the specific frames themselves. The API should be +designed in a way that a user cannot easily (if at all) create an _incorrect_ +HTTP connection. + +Motivated by the **Flexible** tenet, there _are_ version-specific options that +can be configured at this level, and version-specific functionality can usually +be handled via `http::Extensions`. + +### Not quite stable, but utile (useful) + +Beyond what is directly in the hyper crate, there are useful (utile) parts that +may not meet hyper's stability promise. Developing, experimenting, and exposing +those parts is the purpose of the `hyper-util` crate. That crate does not have +the same stability level as hyper. However, the goal is that things that other +libraries might want to expose as a public dependency do not live in +`hyper-util` forever, but rather stabilize and get promoted into `hyper`. + +Exactly what gets put into `hyper-util` presently is kept in the roadmap +documents. + +### Stability Promise + +What even is hyper's stability promise? Does it mean we are "done"? No. Will we +ever make breaking changes again? Probably. We'll still follow the [semantic +versioning](https://semver.org). + +Prior to 1.0, hyper has already only done breaking changes once a year. So 1 +year isn't much of a promise. We'll have significant more use and understanding +after a few years, and that could prompt some redesign. + +As of this writing, we'll promise that _major_ versions of hyper are stable for +3 years. New features will come out in _minor_ versions frequently. If it is +determined necessary to make breaking changes to the API, we'll save them for +after the 3 years. + +hyper also establishes a Minimum Supported Rust Version (MSRV). hyper will +support Rust versions at least 6 months old. If a new Rust version is released +with a feature hyper wishes to use, we won't do so until at least 6 months +afterwards. hyper will only ever require a new Rust version as a _minor_ +release (1.x), not as a patch (1.x.y). + +## Security + +The security of hyper is a large part of what makes hyper _protective_. We make +hyper secure via the combined efforts of being **Correct**, focusing on +**HTTP/\***, and making it all **Understandable**. + +### Memory Safety + +Being **Correct** requires that hyper be memory-safe. Using the Rust language +gets us most of the way there. But there is the ability to write `unsafe` +Rust. Does being **Correct** mean that we can _never_ write `unsafe` code +anywhere? Even if it helps make hyper **Fast**? We can, carefully. + +How do we balance the two, so that hyper is secure? + +hyper prefers not to have large modules of intertwined `unsafe` code. hyper +does allow small `unsafe` blocks, no more than a few lines, where it's easier +to verify that the `unsafe` code was written **Correctly**. + +### Meticulous Testing + +hyper's test suite grows and grows. There's a lot that needs to be right. +Parsers, encoders, state machines. When easily isolated, those pieces have +internal unit tests. But hyper also keeps a large list of growing integration +tests that make sure all the parts are **Correct**. + +Making writing new tests easy is a high priority. Investing in the testing +infrastructure is a proven way to make sure hyper stays **Correct** and secure. + +### Constant Fuzzing + +One thing is to know specific cases to test for. But we can't know all the +inputs or states that *might* cause a bug. That's why hyper has rounds of +fuzzing built into its CI. It's also why hyper signs up for and uses resources +to provide *constant*, around-the-clock fuzzing, always looking for something +that hyper should be hardened against. + +### Security Process + +hyper has an outlined +[SECURITY](https://github.com/hyperium/hyper/blob/master/SECURITY.md) process, +so we can safely report and fix issues. + +## Non-goals + +After writing this up, it is easier to articulate what sorts of things many +might associate with an HTTP library, but which are explicitly *not* for hyper. +These are all things that definitely **out of scope**. + +- TLS: We learned early that bundling TLS directly in hyper [has + problems](https://github.com/hyperium/hyper/issues/985). People also have + very strong opinions about which TLS implementation to use. The design of + hyper allows users to bring their own TLS. +- Routing +- Cookies +- Not-HTTP: WebSockets, or other protocols that are built next to HTTP. It + should be possible to _use_ hyper to upgrade, but the actual next-protocol + should be handled by a different library. diff --git a/docs/vision-arch.svg b/docs/vision-arch.svg new file mode 100644 index 0000000000..c0a8f9b0e6 --- /dev/null +++ b/docs/vision-arch.svg @@ -0,0 +1,4 @@ + + + +
client
client
server
server
hyper::proto
hyper::proto
http1
http1
h2
h2
h3
h3
IO
IO
executor / runtime
executor / runtime
Text is not SVG - cannot display
diff --git a/examples/tower_client.rs b/examples/tower_client.rs index 5a2a6e78df..f733fe0d35 100644 --- a/examples/tower_client.rs +++ b/examples/tower_client.rs @@ -1,20 +1,20 @@ #![deny(warnings)] -use hyper::client::conn::Builder; -use hyper::client::connect::HttpConnector; -use hyper::client::service::Connect; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; + use hyper::service::Service; -use hyper::{Body, Request}; +use hyper::{Body, Request, Response}; +use tokio::net::TcpStream; #[tokio::main] -async fn main() -> Result<(), Box> { +async fn main() -> Result<(), Box> { pretty_env_logger::init(); - let mut mk_svc = Connect::new(HttpConnector::new(), Builder::new()); - let uri = "http://127.0.0.1:8080".parse::()?; - let mut svc = mk_svc.call(uri.clone()).await?; + let mut svc = Connector; let body = Body::empty(); @@ -25,3 +25,35 @@ async fn main() -> Result<(), Box> { Ok(()) } + +struct Connector; + +impl Service> for Connector { + type Response = Response; + type Error = Box; + type Future = Pin>>>; + + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> std::task::Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + Box::pin(async move { + let host = req.uri().host().expect("no host in uri"); + let port = req.uri().port_u16().expect("no port in uri"); + + let stream = TcpStream::connect(format!("{}:{}", host, port)).await?; + + let (mut sender, conn) = hyper::client::conn::http1::handshake(stream).await?; + + tokio::task::spawn(async move { + if let Err(err) = conn.await { + println!("Connection error: {:?}", err); + } + }); + + let res = sender.send_request(req).await?; + Ok(res) + }) + } +} diff --git a/src/body/body.rs b/src/body/body.rs index 9dc1a034f9..699398b86f 100644 --- a/src/body/body.rs +++ b/src/body/body.rs @@ -30,8 +30,8 @@ type TrailersSender = oneshot::Sender; /// A good default [`HttpBody`](crate::body::HttpBody) to use in many /// applications. /// -/// Note: To read the full body, use [`body::to_bytes`](crate::body::to_bytes) -/// or [`body::aggregate`](crate::body::aggregate). +/// Note: To read the full body, use [`body::to_bytes`](crate::body::to_bytes()) +/// or [`body::aggregate`](crate::body::aggregate()). #[must_use = "streams do nothing unless polled"] pub struct Body { kind: Kind, diff --git a/src/body/to_bytes.rs b/src/body/to_bytes.rs index c3c48d3440..038c6fd0f3 100644 --- a/src/body/to_bytes.rs +++ b/src/body/to_bytes.rs @@ -17,7 +17,7 @@ use super::HttpBody; /// # Example /// /// ``` -/// # #[cfg(all(feature = "client", any(feature = "http1", feature = "http2")))] +/// # #[cfg(all(feature = "client", feature = "tcp", any(feature = "http1", feature = "http2")))] /// # async fn doc() -> hyper::Result<()> { /// use hyper::{body::HttpBody}; /// @@ -63,8 +63,13 @@ where return Ok(first.copy_to_bytes(first.remaining())); }; + // Don't pre-emptively reserve *too* much. + let rest = (body.size_hint().lower() as usize).min(1024 * 16); + let cap = first + .remaining() + .saturating_add(second.remaining()) + .saturating_add(rest); // With more than 1 buf, we gotta flatten into a Vec first. - let cap = first.remaining() + second.remaining() + body.size_hint().lower() as usize; let mut vec = Vec::with_capacity(cap); vec.put(first); vec.put(second); diff --git a/src/client/client.rs b/src/client/client.rs index cfdd267a11..9baead7349 100644 --- a/src/client/client.rs +++ b/src/client/client.rs @@ -10,6 +10,14 @@ use http::uri::{Port, Scheme}; use http::{Method, Request, Response, Uri, Version}; use tracing::{debug, trace, warn}; +use crate::body::{Body, HttpBody}; +use crate::client::connect::CaptureConnectionExtension; +use crate::common::{ + exec::BoxSendFuture, lazy as hyper_lazy, sync_wrapper::SyncWrapper, task, Future, Lazy, Pin, + Poll, +}; +use crate::rt::Executor; + use super::conn; use super::connect::{self, sealed::Connect, Alpn, Connected, Connection}; use super::pool::{ @@ -17,9 +25,6 @@ use super::pool::{ }; #[cfg(feature = "tcp")] use super::HttpConnector; -use crate::body::{Body, HttpBody}; -use crate::common::{exec::BoxSendFuture, sync_wrapper::SyncWrapper, lazy as hyper_lazy, task, Future, Lazy, Pin, Poll}; -use crate::rt::Executor; /// A Client to make outgoing HTTP requests. /// @@ -28,6 +33,7 @@ use crate::rt::Executor; #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Client { config: Config, + #[cfg_attr(feature = "deprecated", allow(deprecated))] conn_builder: conn::Builder, connector: C, pool: Pool>, @@ -238,7 +244,9 @@ where }) } }; - + req.extensions_mut() + .get_mut::() + .map(|conn| conn.set(&pooled.conn_info)); if pooled.is_http1() { if req.version() == Version::HTTP_2 { warn!("Connection is HTTP/1, but request requires HTTP/2"); @@ -320,12 +328,14 @@ where drop(delayed_tx); }); + #[cfg_attr(feature = "deprecated", allow(deprecated))] self.conn_builder.exec.execute(on_idle); } else { // There's no body to delay, but the connection isn't // ready yet. Only re-insert when it's ready let on_idle = future::poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); + #[cfg_attr(feature = "deprecated", allow(deprecated))] self.conn_builder.exec.execute(on_idle); } @@ -379,6 +389,7 @@ where }); // An execute error here isn't important, we're just trying // to prevent a waste of a socket... + #[cfg_attr(feature = "deprecated", allow(deprecated))] self.conn_builder.exec.execute(bg); } Ok(checked_out) @@ -423,6 +434,7 @@ where &self, pool_key: PoolKey, ) -> impl Lazy>>> + Unpin { + #[cfg_attr(feature = "deprecated", allow(deprecated))] let executor = self.conn_builder.exec.clone(); let pool = self.pool.clone(); #[cfg(not(feature = "http2"))] @@ -622,6 +634,7 @@ struct PoolClient { } enum PoolTx { + #[cfg_attr(feature = "deprecated", allow(deprecated))] Http1(conn::SendRequest), #[cfg(feature = "http2")] Http2(conn::Http2SendRequest), @@ -689,6 +702,10 @@ where B: Send + 'static, { fn is_open(&self) -> bool { + if self.conn_info.poisoned.poisoned() { + trace!("marking {:?} as closed because it was poisoned", self.conn_info); + return false; + } match self.tx { PoolTx::Http1(ref tx) => tx.is_ready(), #[cfg(feature = "http2")] @@ -894,6 +911,7 @@ fn is_schema_secure(uri: &Uri) -> bool { #[derive(Clone)] pub struct Builder { client_config: Config, + #[cfg_attr(feature = "deprecated", allow(deprecated))] conn_builder: conn::Builder, pool_config: pool::Config, } @@ -906,6 +924,7 @@ impl Default for Builder { set_host: true, ver: Ver::Auto, }, + #[cfg_attr(feature = "deprecated", allow(deprecated))] conn_builder: conn::Builder::new(), pool_config: pool::Config { idle_timeout: Some(Duration::from_secs(90)), @@ -1063,6 +1082,39 @@ impl Builder { self } + /// Sets whether invalid header lines should be silently ignored in HTTP/1 responses. + /// + /// This mimicks the behaviour of major browsers. You probably don't want this. + /// You should only want this if you are implementing a proxy whose main + /// purpose is to sit in front of browsers whose users access arbitrary content + /// which may be malformed, and they expect everything that works without + /// the proxy to keep working with the proxy. + /// + /// This option will prevent Hyper's client from returning an error encountered + /// when parsing a header, except if the error was caused by the character NUL + /// (ASCII code 0), as Chrome specifically always reject those. + /// + /// The ignorable errors are: + /// * empty header names; + /// * characters that are not allowed in header names, except for `\0` and `\r`; + /// * when `allow_spaces_after_header_name_in_responses` is not enabled, + /// spaces and tabs between the header name and the colon; + /// * missing colon between header name and colon; + /// * characters that are not allowed in header values except for `\0` and `\r`. + /// + /// If an ignorable error is encountered, the parser tries to find the next + /// line in the input to resume parsing the rest of the headers. An error + /// will be emitted nonetheless if it finds `\0` or a lone `\r` while + /// looking for the next line. + pub fn http1_ignore_invalid_headers_in_responses( + &mut self, + val: bool, + ) -> &mut Builder { + self.conn_builder + .http1_ignore_invalid_headers_in_responses(val); + self + } + /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// @@ -1337,6 +1389,7 @@ impl Builder { B: HttpBody + Send, B::Data: Send, { + #[cfg_attr(feature = "deprecated", allow(deprecated))] Client { config: self.client_config, conn_builder: self.conn_builder.clone(), diff --git a/src/client/conn.rs b/src/client/conn.rs index 85bc366be9..88e2c413a7 100644 --- a/src/client/conn.rs +++ b/src/client/conn.rs @@ -54,6 +54,11 @@ //! # } //! ``` +#[cfg(all(feature = "backports", feature = "http1"))] +pub mod http1; +#[cfg(all(feature = "backports", feature = "http2"))] +pub mod http2; + use std::error::Error as StdError; use std::fmt; #[cfg(not(all(feature = "http1", feature = "http2")))] @@ -118,16 +123,30 @@ pin_project! { /// /// This is a shortcut for `Builder::new().handshake(io)`. /// See [`client::conn`](crate::client::conn) for more. +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This function will be replaced with `client::conn::http1::handshake` and `client::conn::http2::handshake` in 1.0, enable the \"backports\" feature to use them now." + ) +)] +#[cfg_attr(feature = "deprecated", allow(deprecated))] pub async fn handshake( io: T, ) -> crate::Result<(SendRequest, Connection)> where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { + #[allow(deprecated)] Builder::new().handshake(io).await } /// The sender side of an established connection. +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This type will be replaced with `client::conn::http1::SendRequest` and `client::conn::http2::SendRequest` in 1.0, enable the \"backports\" feature to use them now." + ) +)] pub struct SendRequest { dispatch: dispatch::Sender, Response>, } @@ -137,6 +156,12 @@ pub struct SendRequest { /// In most cases, this should just be spawned into an executor, so that it /// can process incoming and outgoing messages, notice hangups, and the like. #[must_use = "futures do nothing unless polled"] +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This type will be replaced with `client::conn::http1::Connection` and `client::conn::http2::Connection` in 1.0, enable the \"backports\" feature to use them now." + ) +)] pub struct Connection where T: AsyncRead + AsyncWrite + Send + 'static, @@ -149,6 +174,12 @@ where /// /// After setting options, the builder is used to create a handshake future. #[derive(Clone, Debug)] +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This type will be replaced with `client::conn::http1::Builder` and `client::conn::http2::Builder` in 1.0, enable the \"backports\" feature to use them now." + ) +)] pub struct Builder { pub(super) exec: Exec, h09_responses: bool, @@ -156,6 +187,8 @@ pub struct Builder { h1_writev: Option, h1_title_case_headers: bool, h1_preserve_header_case: bool, + #[cfg(feature = "ffi")] + h1_preserve_header_order: bool, h1_read_buf_exact_size: Option, h1_max_buf_size: Option, #[cfg(feature = "ffi")] @@ -219,6 +252,7 @@ pub(super) struct Http2SendRequest { // ===== impl SendRequest +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl SendRequest { /// Polls to determine whether this sender can be used yet for a request. /// @@ -252,6 +286,7 @@ impl SendRequest { } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl SendRequest where B: HttpBody + 'static, @@ -337,6 +372,7 @@ where } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl Service> for SendRequest where B: HttpBody + 'static, @@ -354,6 +390,7 @@ where } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl fmt::Debug for SendRequest { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SendRequest").finish() @@ -423,6 +460,7 @@ impl Clone for Http2SendRequest { // ===== impl Connection +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl Connection where T: AsyncRead + AsyncWrite + Unpin + Send + 'static, @@ -492,7 +530,7 @@ where /// /// This setting is configured by the server peer by sending the /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. - /// This method returns the currently acknowledged value recieved from the + /// This method returns the currently acknowledged value received from the /// remote. /// /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 @@ -506,9 +544,10 @@ where } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl Future for Connection where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + T: AsyncRead + AsyncWrite + Unpin + Send, B: HttpBody + Send + 'static, B::Data: Send, B::Error: Into>, @@ -534,6 +573,7 @@ where } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl fmt::Debug for Connection where T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, @@ -546,6 +586,7 @@ where // ===== impl Builder +#[cfg_attr(feature = "deprecated", allow(deprecated))] impl Builder { /// Creates a new connection builder. #[inline] @@ -558,6 +599,8 @@ impl Builder { h1_parser_config: Default::default(), h1_title_case_headers: false, h1_preserve_header_case: false, + #[cfg(feature = "ffi")] + h1_preserve_header_order: false, h1_max_buf_size: None, #[cfg(feature = "ffi")] h1_headers_raw: false, @@ -658,6 +701,24 @@ impl Builder { self } + /// Set whether HTTP/1 connections will silently ignored malformed header lines. + /// + /// If this is enabled and and a header line does not start with a valid header + /// name, or does not include a colon at all, the line will be silently ignored + /// and no error will be reported. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + pub fn http1_ignore_invalid_headers_in_responses( + &mut self, + enabled: bool, + ) -> &mut Builder { + self.h1_parser_config + .ignore_invalid_headers_in_responses(enabled); + self + } + /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. /// @@ -704,6 +765,21 @@ impl Builder { self } + /// Set whether to support preserving original header order. + /// + /// Currently, this will record the order in which headers are received, and store this + /// ordering in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "ffi")] + pub fn http1_preserve_header_order(&mut self, enabled: bool) -> &mut Builder { + self.h1_preserve_header_order = enabled; + self + } + /// Sets the exact size of the read buffer to *always* use. /// /// Note that setting this option unsets the `http1_max_buf_size` option. @@ -951,6 +1027,10 @@ impl Builder { if opts.h1_preserve_header_case { conn.set_preserve_header_case(); } + #[cfg(feature = "ffi")] + if opts.h1_preserve_header_order { + conn.set_preserve_header_order(); + } if opts.h09_responses { conn.set_h09_responses(); } @@ -1044,9 +1124,11 @@ where trait AssertSend: Send {} trait AssertSendSync: Send + Sync {} +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[doc(hidden)] impl AssertSendSync for SendRequest {} +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[doc(hidden)] impl AssertSend for Connection where @@ -1056,6 +1138,7 @@ where { } +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[doc(hidden)] impl AssertSendSync for Connection where @@ -1065,6 +1148,7 @@ where { } +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[doc(hidden)] impl AssertSendSync for Builder {} diff --git a/src/client/conn/http1.rs b/src/client/conn/http1.rs new file mode 100644 index 0000000000..d8936d8655 --- /dev/null +++ b/src/client/conn/http1.rs @@ -0,0 +1,539 @@ +//! HTTP/1 client connections + +use std::error::Error as StdError; +use std::fmt; + +use bytes::Bytes; +use http::{Request, Response}; +use httparse::ParserConfig; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use super::super::dispatch; +use crate::common::{ + task, Future, Pin, Poll, +}; +use crate::proto; +use crate::upgrade::Upgraded; + +type Dispatcher = + proto::dispatch::Dispatcher, B, T, proto::h1::ClientTransaction>; + +/// The sender side of an established connection. +pub struct SendRequest { + dispatch: dispatch::Sender, Response>, +} + +/// Deconstructed parts of a `Connection`. +/// +/// This allows taking apart a `Connection` at a later time, in order to +/// reclaim the IO object, and additional related pieces. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used in the handshake. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// For instance, if the `Connection` is used for an HTTP upgrade request, + /// it is possible the server sent back the first bytes of the new protocol + /// along with the response upgrade. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + _inner: (), +} + + +/// A future that processes all HTTP state for the IO object. +/// +/// In most cases, this should just be spawned into an executor, so that it +/// can process incoming and outgoing messages, notice hangups, and the like. +#[must_use = "futures do nothing unless polled"] +pub struct Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Body + 'static, +{ + inner: Option>, +} + +impl Connection +where + T: AsyncRead + AsyncWrite + Send + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, +{ + /// Return the inner IO object, and additional information. + /// + /// Only works for HTTP/1 connections. HTTP/2 connections will panic. + pub fn into_parts(self) -> Parts { + let (io, read_buf, _) = self.inner.expect("already upgraded").into_inner(); + Parts { + io, + read_buf, + _inner: (), + } + } + + /// Poll the connection for completion, but without calling `shutdown` + /// on the underlying IO. + /// + /// This is useful to allow running a connection while doing an HTTP + /// upgrade. Once the upgrade is completed, the connection would be "done", + /// but it is not desired to actually shutdown the IO object. Instead you + /// would take it back using `into_parts`. + /// + /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html) + /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html) + /// to work with this function; or use the `without_shutdown` wrapper. + pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { + self.inner.as_mut().expect("algready upgraded").poll_without_shutdown(cx) + } +} + +/// A builder to configure an HTTP connection. +/// +/// After setting options, the builder is used to create a handshake future. +#[derive(Clone, Debug)] +pub struct Builder { + h09_responses: bool, + h1_parser_config: ParserConfig, + h1_writev: Option, + h1_title_case_headers: bool, + h1_preserve_header_case: bool, + #[cfg(feature = "ffi")] + h1_preserve_header_order: bool, + h1_read_buf_exact_size: Option, + h1_max_buf_size: Option, +} + +/// Returns a handshake future over some IO. +/// +/// This is a shortcut for `Builder::new().handshake(io)`. +/// See [`client::conn`](crate::client::conn) for more. +pub async fn handshake( + io: T, +) -> crate::Result<(SendRequest, Connection)> +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, +{ + Builder::new().handshake(io).await +} + +// ===== impl SendRequest + +impl SendRequest { + /// Polls to determine whether this sender can be used yet for a request. + /// + /// If the associated connection is closed, this returns an Error. + pub fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + self.dispatch.poll_ready(cx) + } + + /// Waits until the dispatcher is ready + /// + /// If the associated connection is closed, this returns an Error. + pub async fn ready(&mut self) -> crate::Result<()> { + futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await + } + + /* + pub(super) async fn when_ready(self) -> crate::Result { + let mut me = Some(self); + future::poll_fn(move |cx| { + ready!(me.as_mut().unwrap().poll_ready(cx))?; + Poll::Ready(Ok(me.take().unwrap())) + }) + .await + } + + pub(super) fn is_ready(&self) -> bool { + self.dispatch.is_ready() + } + + pub(super) fn is_closed(&self) -> bool { + self.dispatch.is_closed() + } + */ +} + +impl SendRequest +where + B: Body + 'static, +{ + /// Sends a `Request` on the associated connection. + /// + /// Returns a future that if successful, yields the `Response`. + /// + /// # Note + /// + /// There are some key differences in what automatic things the `Client` + /// does for you that will not be done here: + /// + /// - `Client` requires absolute-form `Uri`s, since the scheme and + /// authority are needed to connect. They aren't required here. + /// - Since the `Client` requires absolute-form `Uri`s, it can add + /// the `Host` header based on it. You must add a `Host` header yourself + /// before calling this method. + /// - Since absolute-form `Uri`s are not required, if received, they will + /// be serialized as-is. + pub fn send_request( + &mut self, + req: Request, + ) -> impl Future>> { + let sent = self.dispatch.send(req); + + async move { + match sent { + Ok(rx) => match rx.await { + Ok(Ok(resp)) => Ok(resp), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_canceled) => panic!("dispatch dropped without returning error"), + }, + Err(_req) => { + tracing::debug!("connection was not ready"); + + Err(crate::Error::new_canceled().with("connection was not ready")) + } + } + } + } + + /* + pub(super) fn send_request_retryable( + &mut self, + req: Request, + ) -> impl Future, (crate::Error, Option>)>> + Unpin + where + B: Send, + { + match self.dispatch.try_send(req) { + Ok(rx) => { + Either::Left(rx.then(move |res| { + match res { + Ok(Ok(res)) => future::ok(res), + Ok(Err(err)) => future::err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + } + })) + } + Err(req) => { + tracing::debug!("connection was not ready"); + let err = crate::Error::new_canceled().with("connection was not ready"); + Either::Right(future::err((err, Some(req)))) + } + } + } + */ +} + +impl fmt::Debug for SendRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SendRequest").finish() + } +} + +// ===== impl Connection + +impl fmt::Debug for Connection +where + T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + B: Body + 'static, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Future for Connection +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match ready!(Pin::new(self.inner.as_mut().unwrap()).poll(cx))? { + proto::Dispatched::Shutdown => Poll::Ready(Ok(())), + proto::Dispatched::Upgrade(pending) => match self.inner.take() { + Some(h1) => { + let (io, buf, _) = h1.into_inner(); + pending.fulfill(Upgraded::new(io, buf)); + Poll::Ready(Ok(())) + } + _ => { + drop(pending); + unreachable!("Upgraded twice"); + } + }, + } + } +} + +// ===== impl Builder + +impl Builder { + /// Creates a new connection builder. + #[inline] + pub fn new() -> Builder { + Builder { + h09_responses: false, + h1_writev: None, + h1_read_buf_exact_size: None, + h1_parser_config: Default::default(), + h1_title_case_headers: false, + h1_preserve_header_case: false, + #[cfg(feature = "ffi")] + h1_preserve_header_order: false, + h1_max_buf_size: None, + } + } + + /// Set whether HTTP/0.9 responses should be tolerated. + /// + /// Default is false. + pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder { + self.h09_responses = enabled; + self + } + + /// Set whether HTTP/1 connections will accept spaces between header names + /// and the colon that follow them in responses. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > No whitespace is allowed between the header field-name and colon. In + /// > the past, differences in the handling of such whitespace have led to + /// > security vulnerabilities in request routing and response handling. A + /// > server MUST reject any received request message that contains + /// > whitespace between a header field-name and colon with a response code + /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a + /// > response message before forwarding the message downstream. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn allow_spaces_after_header_name_in_responses( + &mut self, + enabled: bool, + ) -> &mut Builder { + self.h1_parser_config + .allow_spaces_after_header_name_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections will accept obsolete line folding for + /// header values. + /// + /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when + /// parsing. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > A server that receives an obs-fold in a request message that is not + /// > within a message/http container MUST either reject the message by + /// > sending a 400 (Bad Request), preferably with a representation + /// > explaining that obsolete line folding is unacceptable, or replace + /// > each received obs-fold with one or more SP octets prior to + /// > interpreting the field value or forwarding the message downstream. + /// + /// > A proxy or gateway that receives an obs-fold in a response message + /// > that is not within a message/http container MUST either discard the + /// > message and replace it with a 502 (Bad Gateway) response, preferably + /// > with a representation explaining that unacceptable line folding was + /// > received, or replace each received obs-fold with one or more SP + /// > octets prior to interpreting the field value or forwarding the + /// > message downstream. + /// + /// > A user agent that receives an obs-fold in a response message that is + /// > not within a message/http container MUST replace each received + /// > obs-fold with one or more SP octets prior to interpreting the field + /// > value. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + pub fn allow_obsolete_multiline_headers_in_responses( + &mut self, + enabled: bool, + ) -> &mut Builder { + self.h1_parser_config + .allow_obsolete_multiline_headers_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections will silently ignored malformed header lines. + /// + /// If this is enabled and and a header line does not start with a valid header + /// name, or does not include a colon at all, the line will be silently ignored + /// and no error will be reported. + /// + /// Default is false. + pub fn ignore_invalid_headers_in_responses( + &mut self, + enabled: bool, + ) -> &mut Builder { + self.h1_parser_config + .ignore_invalid_headers_in_responses(enabled); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + pub fn writev(&mut self, enabled: bool) -> &mut Builder { + self.h1_writev = Some(enabled); + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Default is false. + pub fn title_case_headers(&mut self, enabled: bool) -> &mut Builder { + self.h1_title_case_headers = enabled; + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Default is false. + pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Builder { + self.h1_preserve_header_case = enabled; + self + } + + /// Set whether to support preserving original header order. + /// + /// Currently, this will record the order in which headers are received, and store this + /// ordering in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Default is false. + #[cfg(feature = "ffi")] + pub fn preserve_header_order(&mut self, enabled: bool) -> &mut Builder { + self.h1_preserve_header_order = enabled; + self + } + + /// Sets the exact size of the read buffer to *always* use. + /// + /// Note that setting this option unsets the `max_buf_size` option. + /// + /// Default is an adaptive read buffer. + pub fn read_buf_exact_size(&mut self, sz: Option) -> &mut Builder { + self.h1_read_buf_exact_size = sz; + self.h1_max_buf_size = None; + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// Note that setting this option unsets the `read_exact_buf_size` option. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + pub fn max_buf_size(&mut self, max: usize) -> &mut Self { + assert!( + max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, + "the max_buf_size cannot be smaller than the minimum that h1 specifies." + ); + + self.h1_max_buf_size = Some(max); + self.h1_read_buf_exact_size = None; + self + } + + /// Constructs a connection with the configured options and IO. + /// See [`client::conn`](crate::client::conn) for more. + /// + /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will + /// do nothing. + pub fn handshake( + &self, + io: T, + ) -> impl Future, Connection)>> + where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, + { + let opts = self.clone(); + + async move { + tracing::trace!("client handshake HTTP/1"); + + let (tx, rx) = dispatch::channel(); + let mut conn = proto::Conn::new(io); + conn.set_h1_parser_config(opts.h1_parser_config); + if let Some(writev) = opts.h1_writev { + if writev { + conn.set_write_strategy_queue(); + } else { + conn.set_write_strategy_flatten(); + } + } + if opts.h1_title_case_headers { + conn.set_title_case_headers(); + } + if opts.h1_preserve_header_case { + conn.set_preserve_header_case(); + } + #[cfg(feature = "ffi")] + if opts.h1_preserve_header_order { + conn.set_preserve_header_order(); + } + + if opts.h09_responses { + conn.set_h09_responses(); + } + + if let Some(sz) = opts.h1_read_buf_exact_size { + conn.set_read_buf_exact_size(sz); + } + if let Some(max) = opts.h1_max_buf_size { + conn.set_max_buf_size(max); + } + let cd = proto::h1::dispatch::Client::new(rx); + let proto = proto::h1::Dispatcher::new(cd, conn); + + Ok(( + SendRequest { dispatch: tx }, + Connection { inner: Some(proto) }, + )) + } + } +} diff --git a/src/client/conn/http2.rs b/src/client/conn/http2.rs new file mode 100644 index 0000000000..fd0adf897b --- /dev/null +++ b/src/client/conn/http2.rs @@ -0,0 +1,427 @@ +//! HTTP/2 client connections + +use std::error::Error as StdError; +use std::fmt; +use std::marker::PhantomData; +use std::sync::Arc; +use std::time::Duration; + +use http::{Request, Response}; +use tokio::io::{AsyncRead, AsyncWrite}; + +use super::super::dispatch; +use crate::body::{HttpBody as Body, Body as IncomingBody}; +use crate::common::{ + exec::{BoxSendFuture, Exec}, + task, Future, Pin, Poll, +}; +use crate::proto; +use crate::rt::Executor; + +/// The sender side of an established connection. +pub struct SendRequest { + dispatch: dispatch::UnboundedSender, Response>, +} + +impl Clone for SendRequest { + fn clone(&self) -> SendRequest { + SendRequest { dispatch: self.dispatch.clone() } + } +} + +/// A future that processes all HTTP state for the IO object. +/// +/// In most cases, this should just be spawned into an executor, so that it +/// can process incoming and outgoing messages, notice hangups, and the like. +#[must_use = "futures do nothing unless polled"] +pub struct Connection +where + T: AsyncRead + AsyncWrite + Send + 'static, + B: Body + 'static, +{ + inner: (PhantomData, proto::h2::ClientTask), +} + +/// A builder to configure an HTTP connection. +/// +/// After setting options, the builder is used to create a handshake future. +#[derive(Clone, Debug)] +pub struct Builder { + pub(super) exec: Exec, + h2_builder: proto::h2::client::Config, +} + +/// Returns a handshake future over some IO. +/// +/// This is a shortcut for `Builder::new().handshake(io)`. +/// See [`client::conn`](crate::client::conn) for more. +pub async fn handshake( + exec: E, + io: T, +) -> crate::Result<(SendRequest, Connection)> +where + E: Executor + Send + Sync + 'static, + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, +{ + Builder::new(exec).handshake(io).await +} + +// ===== impl SendRequest + +impl SendRequest { + /// Polls to determine whether this sender can be used yet for a request. + /// + /// If the associated connection is closed, this returns an Error. + pub fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { + if self.is_closed() { + Poll::Ready(Err(crate::Error::new_closed())) + } else { + Poll::Ready(Ok(())) + } + } + + /// Waits until the dispatcher is ready + /// + /// If the associated connection is closed, this returns an Error. + pub async fn ready(&mut self) -> crate::Result<()> { + futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await + } + + /* + pub(super) async fn when_ready(self) -> crate::Result { + let mut me = Some(self); + future::poll_fn(move |cx| { + ready!(me.as_mut().unwrap().poll_ready(cx))?; + Poll::Ready(Ok(me.take().unwrap())) + }) + .await + } + + pub(super) fn is_ready(&self) -> bool { + self.dispatch.is_ready() + } + */ + + pub(super) fn is_closed(&self) -> bool { + self.dispatch.is_closed() + } +} + +impl SendRequest +where + B: Body + 'static, +{ + /// Sends a `Request` on the associated connection. + /// + /// Returns a future that if successful, yields the `Response`. + /// + /// # Note + /// + /// There are some key differences in what automatic things the `Client` + /// does for you that will not be done here: + /// + /// - `Client` requires absolute-form `Uri`s, since the scheme and + /// authority are needed to connect. They aren't required here. + /// - Since the `Client` requires absolute-form `Uri`s, it can add + /// the `Host` header based on it. You must add a `Host` header yourself + /// before calling this method. + /// - Since absolute-form `Uri`s are not required, if received, they will + /// be serialized as-is. + pub fn send_request( + &mut self, + req: Request, + ) -> impl Future>> { + let sent = self.dispatch.send(req); + + async move { + match sent { + Ok(rx) => match rx.await { + Ok(Ok(resp)) => Ok(resp), + Ok(Err(err)) => Err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_canceled) => panic!("dispatch dropped without returning error"), + }, + Err(_req) => { + tracing::debug!("connection was not ready"); + + Err(crate::Error::new_canceled().with("connection was not ready")) + } + } + } + } + + /* + pub(super) fn send_request_retryable( + &mut self, + req: Request, + ) -> impl Future, (crate::Error, Option>)>> + Unpin + where + B: Send, + { + match self.dispatch.try_send(req) { + Ok(rx) => { + Either::Left(rx.then(move |res| { + match res { + Ok(Ok(res)) => future::ok(res), + Ok(Err(err)) => future::err(err), + // this is definite bug if it happens, but it shouldn't happen! + Err(_) => panic!("dispatch dropped without returning error"), + } + })) + } + Err(req) => { + tracing::debug!("connection was not ready"); + let err = crate::Error::new_canceled().with("connection was not ready"); + Either::Right(future::err((err, Some(req)))) + } + } + } + */ +} + +impl fmt::Debug for SendRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SendRequest").finish() + } +} + +// ===== impl Connection + +impl Connection +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + Unpin + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + /// Returns whether the [extended CONNECT protocol][1] is enabled or not. + /// + /// This setting is configured by the server peer by sending the + /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. + /// This method returns the currently acknowledged value received from the + /// remote. + /// + /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3 + pub fn is_extended_connect_protocol_enabled(&self) -> bool { + self.inner.1.is_extended_connect_protocol_enabled() + } +} + +impl fmt::Debug for Connection +where + T: AsyncRead + AsyncWrite + fmt::Debug + Send + 'static, + B: Body + 'static, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Future for Connection +where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.inner.1).poll(cx))? { + proto::Dispatched::Shutdown => Poll::Ready(Ok(())), + #[cfg(feature = "http1")] + proto::Dispatched::Upgrade(_pending) => unreachable!("http2 cannot upgrade"), + } + } +} + +// ===== impl Builder + +impl Builder { + /// Creates a new connection builder. + #[inline] + pub fn new(exec: E) -> Builder + where + E: Executor + Send + Sync + 'static, + { + use std::sync::Arc; + Builder { + exec: Exec::Executor(Arc::new(exec)), + h2_builder: Default::default(), + } + } + + /// Provide an executor to execute background HTTP2 tasks. + pub fn executor(&mut self, exec: E) -> &mut Builder + where + E: Executor + Send + Sync + 'static, + { + self.exec = Exec::Executor(Arc::new(exec)); + self + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_stream_window_size = sz; + } + self + } + + /// Sets the max connection-level flow control for HTTP2 + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn initial_connection_window_size( + &mut self, + sz: impl Into>, + ) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_conn_window_size = sz; + } + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `initial_stream_window_size` and + /// `initial_connection_window_size`. + pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { + use proto::h2::SPEC_WINDOW_SIZE; + + self.h2_builder.adaptive_window = enabled; + if enabled { + self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; + self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; + } + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.max_frame_size = sz; + } + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + #[cfg(feature = "runtime")] + pub fn keep_alive_interval( + &mut self, + interval: impl Into>, + ) -> &mut Self { + self.h2_builder.keep_alive_interval = interval.into(); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + #[cfg(feature = "runtime")] + pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.h2_builder.keep_alive_timeout = timeout; + self + } + + /// Sets whether HTTP2 keep-alive should apply while the connection is idle. + /// + /// If disabled, keep-alive pings are only sent while there are open + /// request/responses streams. If enabled, pings are also sent when no + /// streams are active. Does nothing if `keep_alive_interval` is + /// disabled. + /// + /// Default is `false`. + #[cfg(feature = "runtime")] + pub fn keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { + self.h2_builder.keep_alive_while_idle = enabled; + self + } + + /// Sets the maximum number of HTTP2 concurrent locally reset streams. + /// + /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more + /// details. + /// + /// The default value is determined by the `h2` crate. + /// + /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams + pub fn max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.h2_builder.max_concurrent_reset_streams = Some(max); + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently 1MB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.h2_builder.max_send_buffer_size = max; + self + } + + /// Constructs a connection with the configured options and IO. + /// See [`client::conn`](crate::client::conn) for more. + /// + /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will + /// do nothing. + pub fn handshake( + &self, + io: T, + ) -> impl Future, Connection)>> + where + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Data: Send, + B::Error: Into>, + { + let opts = self.clone(); + + async move { + tracing::trace!("client handshake HTTP/1"); + + let (tx, rx) = dispatch::channel(); + let h2 = proto::h2::client::handshake(io, rx, &opts.h2_builder, opts.exec) + .await?; + Ok(( + SendRequest { + dispatch: tx.unbound(), + }, + Connection { + inner: (PhantomData, h2), + }, + )) + } + } +} diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs index 862a0e65c1..4815524811 100644 --- a/src/client/connect/mod.rs +++ b/src/client/connect/mod.rs @@ -80,8 +80,13 @@ //! [`AsyncWrite`]: tokio::io::AsyncWrite //! [`Connection`]: Connection use std::fmt; +use std::fmt::{Debug, Formatter}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::ops::Deref; +use std::sync::Arc; use ::http::Extensions; +use tokio::sync::watch; cfg_feature! { #![feature = "tcp"] @@ -113,6 +118,142 @@ pub struct Connected { pub(super) alpn: Alpn, pub(super) is_proxied: bool, pub(super) extra: Option, + pub(super) poisoned: PoisonPill, +} + +#[derive(Clone)] +pub(crate) struct PoisonPill { + poisoned: Arc, +} + +impl Debug for PoisonPill { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + // print the address of the pill—this makes debugging issues much easier + write!(f, "PoisonPill@{:p} {{ poisoned: {} }}", self.poisoned, self.poisoned.load(Ordering::Relaxed)) + } +} + +impl PoisonPill { + pub(crate) fn healthy() -> Self { + Self { + poisoned: Arc::new(AtomicBool::new(false)), + } + } + pub(crate) fn poison(&self) { + self.poisoned.store(true, Ordering::Relaxed) + } + + pub(crate) fn poisoned(&self) -> bool { + self.poisoned.load(Ordering::Relaxed) + } +} + +/// [`CaptureConnection`] allows callers to capture [`Connected`] information +/// +/// To capture a connection for a request, use [`capture_connection`]. +#[derive(Debug, Clone)] +pub struct CaptureConnection { + rx: watch::Receiver>, +} + +/// Capture the connection for a given request +/// +/// When making a request with Hyper, the underlying connection must implement the [`Connection`] trait. +/// [`capture_connection`] allows a caller to capture the returned [`Connected`] structure as soon +/// as the connection is established. +/// +/// *Note*: If establishing a connection fails, [`CaptureConnection::connection_metadata`] will always return none. +/// +/// # Examples +/// +/// **Synchronous access**: +/// The [`CaptureConnection::connection_metadata`] method allows callers to check if a connection has been +/// established. This is ideal for situations where you are certain the connection has already +/// been established (e.g. after the response future has already completed). +/// ```rust +/// use hyper::client::connect::{capture_connection, CaptureConnection}; +/// let mut request = http::Request::builder() +/// .uri("http://foo.com") +/// .body(()) +/// .unwrap(); +/// +/// let captured_connection = capture_connection(&mut request); +/// // some time later after the request has been sent... +/// let connection_info = captured_connection.connection_metadata(); +/// println!("we are connected! {:?}", connection_info.as_ref()); +/// ``` +/// +/// **Asynchronous access**: +/// The [`CaptureConnection::wait_for_connection_metadata`] method returns a future resolves as soon as the +/// connection is available. +/// +/// ```rust +/// # #[cfg(feature = "runtime")] +/// # async fn example() { +/// use hyper::client::connect::{capture_connection, CaptureConnection}; +/// let mut request = http::Request::builder() +/// .uri("http://foo.com") +/// .body(hyper::Body::empty()) +/// .unwrap(); +/// +/// let mut captured = capture_connection(&mut request); +/// tokio::task::spawn(async move { +/// let connection_info = captured.wait_for_connection_metadata().await; +/// println!("we are connected! {:?}", connection_info.as_ref()); +/// }); +/// +/// let client = hyper::Client::new(); +/// client.request(request).await.expect("request failed"); +/// # } +/// ``` +pub fn capture_connection(request: &mut crate::http::Request) -> CaptureConnection { + let (tx, rx) = CaptureConnection::new(); + request.extensions_mut().insert(tx); + rx +} + +/// TxSide for [`CaptureConnection`] +/// +/// This is inserted into `Extensions` to allow Hyper to back channel connection info +#[derive(Clone)] +pub(crate) struct CaptureConnectionExtension { + tx: Arc>>, +} + +impl CaptureConnectionExtension { + pub(crate) fn set(&self, connected: &Connected) { + self.tx.send_replace(Some(connected.clone())); + } +} + +impl CaptureConnection { + /// Internal API to create the tx and rx half of [`CaptureConnection`] + pub(crate) fn new() -> (CaptureConnectionExtension, Self) { + let (tx, rx) = watch::channel(None); + ( + CaptureConnectionExtension { tx: Arc::new(tx) }, + CaptureConnection { rx }, + ) + } + + /// Retrieve the connection metadata, if available + pub fn connection_metadata(&self) -> impl Deref> + '_ { + self.rx.borrow() + } + + /// Wait for the connection to be established + /// + /// If a connection was established, this will always return `Some(...)`. If the request never + /// successfully connected (e.g. DNS resolution failure), this method will never return. + pub async fn wait_for_connection_metadata( + &mut self, + ) -> impl Deref> + '_ { + if self.rx.borrow().is_some() { + return self.rx.borrow(); + } + let _ = self.rx.changed().await; + self.rx.borrow() + } } pub(super) struct Extra(Box); @@ -130,6 +271,7 @@ impl Connected { alpn: Alpn::None, is_proxied: false, extra: None, + poisoned: PoisonPill::healthy(), } } @@ -189,14 +331,24 @@ impl Connected { self.alpn == Alpn::H2 } + /// Poison this connection + /// + /// A poisoned connection will not be reused for subsequent requests by the pool + pub fn poison(&self) { + self.poisoned.poison(); + tracing::debug!( + poison_pill = ?self.poisoned, "connection was poisoned" + ); + } + // Don't public expose that `Connected` is `Clone`, unsure if we want to // keep that contract... - #[cfg(feature = "http2")] pub(super) fn clone(&self) -> Connected { Connected { alpn: self.alpn.clone(), is_proxied: self.is_proxied, extra: self.extra.clone(), + poisoned: self.poisoned.clone(), } } } @@ -351,6 +503,7 @@ pub(super) mod sealed { #[cfg(test)] mod tests { use super::Connected; + use crate::client::connect::CaptureConnection; #[derive(Clone, Debug, PartialEq)] struct Ex1(usize); @@ -409,4 +562,72 @@ mod tests { assert_eq!(ex2.get::(), Some(&Ex1(99))); assert_eq!(ex2.get::(), Some(&Ex2("hiccup"))); } + + #[test] + fn test_sync_capture_connection() { + let (tx, rx) = CaptureConnection::new(); + assert!( + rx.connection_metadata().is_none(), + "connection has not been set" + ); + tx.set(&Connected::new().proxy(true)); + assert_eq!( + rx.connection_metadata() + .as_ref() + .expect("connected should be set") + .is_proxied(), + true + ); + + // ensure it can be called multiple times + assert_eq!( + rx.connection_metadata() + .as_ref() + .expect("connected should be set") + .is_proxied(), + true + ); + } + + #[tokio::test] + async fn async_capture_connection() { + let (tx, mut rx) = CaptureConnection::new(); + assert!( + rx.connection_metadata().is_none(), + "connection has not been set" + ); + let test_task = tokio::spawn(async move { + assert_eq!( + rx.wait_for_connection_metadata() + .await + .as_ref() + .expect("connection should be set") + .is_proxied(), + true + ); + // can be awaited multiple times + assert!( + rx.wait_for_connection_metadata().await.is_some(), + "should be awaitable multiple times" + ); + + assert_eq!(rx.connection_metadata().is_some(), true); + }); + // can't be finished, we haven't set the connection yet + assert_eq!(test_task.is_finished(), false); + tx.set(&Connected::new().proxy(true)); + + assert!(test_task.await.is_ok()); + } + + #[tokio::test] + async fn capture_connection_sender_side_dropped() { + let (tx, mut rx) = CaptureConnection::new(); + assert!( + rx.connection_metadata().is_none(), + "connection has not been set" + ); + drop(tx); + assert!(rx.wait_for_connection_metadata().await.is_none()); + } } diff --git a/src/client/dispatch.rs b/src/client/dispatch.rs index 1d2b87eb00..771c40da30 100644 --- a/src/client/dispatch.rs +++ b/src/client/dispatch.rs @@ -86,7 +86,7 @@ impl Sender { } let (tx, rx) = oneshot::channel(); self.inner - .send(Envelope(Some((val, Callback::Retry(tx))))) + .send(Envelope(Some((val, Callback::Retry(Some(tx)))))) .map(move |_| rx) .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } @@ -97,7 +97,7 @@ impl Sender { } let (tx, rx) = oneshot::channel(); self.inner - .send(Envelope(Some((val, Callback::NoRetry(tx))))) + .send(Envelope(Some((val, Callback::NoRetry(Some(tx)))))) .map(move |_| rx) .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } @@ -124,7 +124,16 @@ impl UnboundedSender { pub(crate) fn try_send(&mut self, val: T) -> Result, T> { let (tx, rx) = oneshot::channel(); self.inner - .send(Envelope(Some((val, Callback::Retry(tx))))) + .send(Envelope(Some((val, Callback::Retry(Some(tx)))))) + .map(move |_| rx) + .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) + } + + #[cfg(all(feature = "backports", feature = "http2"))] + pub(crate) fn send(&mut self, val: T) -> Result, T> { + let (tx, rx) = oneshot::channel(); + self.inner + .send(Envelope(Some((val, Callback::NoRetry(Some(tx)))))) .map(move |_| rx) .map_err(|mut e| (e.0).0.take().expect("envelope not dropped").0) } @@ -198,33 +207,59 @@ impl Drop for Envelope { } pub(crate) enum Callback { - Retry(oneshot::Sender)>>), - NoRetry(oneshot::Sender>), + Retry(Option)>>>), + NoRetry(Option>>), +} + +impl Drop for Callback { + fn drop(&mut self) { + // FIXME(nox): What errors do we want here? + let error = crate::Error::new_user_dispatch_gone().with(if std::thread::panicking() { + "user code panicked" + } else { + "runtime dropped the dispatch task" + }); + + match self { + Callback::Retry(tx) => { + if let Some(tx) = tx.take() { + let _ = tx.send(Err((error, None))); + } + } + Callback::NoRetry(tx) => { + if let Some(tx) = tx.take() { + let _ = tx.send(Err(error)); + } + } + } + } } impl Callback { #[cfg(feature = "http2")] pub(crate) fn is_canceled(&self) -> bool { match *self { - Callback::Retry(ref tx) => tx.is_closed(), - Callback::NoRetry(ref tx) => tx.is_closed(), + Callback::Retry(Some(ref tx)) => tx.is_closed(), + Callback::NoRetry(Some(ref tx)) => tx.is_closed(), + _ => unreachable!(), } } pub(crate) fn poll_canceled(&mut self, cx: &mut task::Context<'_>) -> Poll<()> { match *self { - Callback::Retry(ref mut tx) => tx.poll_closed(cx), - Callback::NoRetry(ref mut tx) => tx.poll_closed(cx), + Callback::Retry(Some(ref mut tx)) => tx.poll_closed(cx), + Callback::NoRetry(Some(ref mut tx)) => tx.poll_closed(cx), + _ => unreachable!(), } } - pub(crate) fn send(self, val: Result)>) { + pub(crate) fn send(mut self, val: Result)>) { match self { - Callback::Retry(tx) => { - let _ = tx.send(val); + Callback::Retry(ref mut tx) => { + let _ = tx.take().unwrap().send(val); } - Callback::NoRetry(tx) => { - let _ = tx.send(val.map_err(|e| e.0)); + Callback::NoRetry(ref mut tx) => { + let _ = tx.take().unwrap().send(val.map_err(|e| e.0)); } } } diff --git a/src/client/service.rs b/src/client/service.rs index 406f61edc9..f3560ea088 100644 --- a/src/client/service.rs +++ b/src/client/service.rs @@ -8,6 +8,7 @@ use std::marker::PhantomData; use tracing::debug; +#[cfg_attr(feature = "deprecated", allow(deprecated))] use super::conn::{Builder, SendRequest}; use crate::{ body::HttpBody, @@ -23,6 +24,7 @@ use crate::{ #[derive(Debug)] pub struct Connect { inner: C, + #[cfg_attr(feature = "deprecated", allow(deprecated))] builder: Builder, _pd: PhantomData, } @@ -30,6 +32,7 @@ pub struct Connect { impl Connect { /// Create a new `Connect` with some inner connector `C` and a connection /// builder. + #[cfg_attr(feature = "deprecated", allow(deprecated))] pub fn new(inner: C, builder: Builder) -> Self { Self { inner, @@ -49,6 +52,7 @@ where B::Data: Send + Unpin, B::Error: Into>, { + #[cfg_attr(feature = "deprecated", allow(deprecated))] type Response = SendRequest; type Error = crate::Error; type Future = @@ -68,6 +72,7 @@ where match io.await { Ok(io) => match builder.handshake(io).await { Ok((sr, conn)) => { + #[cfg_attr(feature = "deprecated", allow(deprecated))] builder.exec.execute(async move { if let Err(e) = conn.await { debug!("connection error: {:?}", e); diff --git a/src/common/exec.rs b/src/common/exec.rs index b6da9a276b..76f616184b 100644 --- a/src/common/exec.rs +++ b/src/common/exec.rs @@ -11,7 +11,7 @@ use crate::body::HttpBody; use crate::proto::h2::server::H2Stream; use crate::rt::Executor; #[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] -use crate::server::conn::spawn_all::{NewSvcTask, Watcher}; +use crate::server::server::{new_svc::NewSvcTask, Watcher}; #[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))] use crate::service::HttpService; diff --git a/src/common/io/rewind.rs b/src/common/io/rewind.rs index 58f1de6c89..0afef5f7ea 100644 --- a/src/common/io/rewind.rs +++ b/src/common/io/rewind.rs @@ -60,7 +60,7 @@ where // TODO: There should be a way to do following two lines cleaner... buf.put_slice(&prefix[..copy_len]); prefix.advance(copy_len); - // Put back whats left + // Put back what's left if !prefix.is_empty() { self.pre = Some(prefix); } diff --git a/src/error.rs b/src/error.rs index 20acf3a7a5..468f24cb7a 100644 --- a/src/error.rs +++ b/src/error.rs @@ -137,6 +137,10 @@ pub(super) enum User { #[cfg(feature = "server")] WithoutShutdownNonHttp1, + /// The dispatch task is gone. + #[cfg(feature = "client")] + DispatchGone, + /// User aborted in an FFI callback. #[cfg(feature = "ffi")] AbortedByCallback, @@ -387,6 +391,11 @@ impl Error { Error::new_user(User::AbortedByCallback) } + #[cfg(feature = "client")] + pub(super) fn new_user_dispatch_gone() -> Error { + Error::new(Kind::User(User::DispatchGone)) + } + #[cfg(feature = "http2")] pub(super) fn new_h2(cause: ::h2::Error) -> Error { if cause.is_io() { @@ -483,6 +492,8 @@ impl Error { Kind::User(User::WithoutShutdownNonHttp1) => { "without_shutdown() called on a non-HTTP/1 connection" } + #[cfg(feature = "client")] + Kind::User(User::DispatchGone) => "dispatch task is gone", #[cfg(feature = "ffi")] Kind::User(User::AbortedByCallback) => "operation aborted by an application callback", } diff --git a/src/ext.rs b/src/ext.rs index e9d4587784..224206dd66 100644 --- a/src/ext.rs +++ b/src/ext.rs @@ -1,12 +1,21 @@ //! HTTP extensions. use bytes::Bytes; +#[cfg(any(feature = "http1", feature = "ffi"))] +use http::header::HeaderName; #[cfg(feature = "http1")] -use http::header::{HeaderName, IntoHeaderName, ValueIter}; +use http::header::{IntoHeaderName, ValueIter}; use http::HeaderMap; +#[cfg(feature = "ffi")] +use std::collections::HashMap; #[cfg(feature = "http2")] use std::fmt; +#[cfg(any(feature = "http1", feature = "ffi"))] +mod h1_reason_phrase; +#[cfg(any(feature = "http1", feature = "ffi"))] +pub use h1_reason_phrase::ReasonPhrase; + #[cfg(feature = "http2")] /// Represents the `:protocol` pseudo-header used by /// the [Extended CONNECT Protocol]. @@ -31,6 +40,7 @@ impl Protocol { self.inner.as_str() } + #[cfg(feature = "server")] pub(crate) fn from_inner(inner: h2::ext::Protocol) -> Self { Self { inner } } @@ -120,3 +130,99 @@ impl HeaderCaseMap { self.0.append(name, orig); } } + +#[cfg(feature = "ffi")] +#[derive(Clone, Debug)] +/// Hashmap +pub(crate) struct OriginalHeaderOrder { + /// Stores how many entries a Headername maps to. This is used + /// for accounting. + num_entries: HashMap, + /// Stores the ordering of the headers. ex: `vec[i] = (headerName, idx)`, + /// The vector is ordered such that the ith element + /// represents the ith header that came in off the line. + /// The `HeaderName` and `idx` are then used elsewhere to index into + /// the multi map that stores the header values. + entry_order: Vec<(HeaderName, usize)>, +} + +#[cfg(all(feature = "http1", feature = "ffi"))] +impl OriginalHeaderOrder { + pub(crate) fn default() -> Self { + OriginalHeaderOrder { + num_entries: HashMap::new(), + entry_order: Vec::new(), + } + } + + pub(crate) fn insert(&mut self, name: HeaderName) { + if !self.num_entries.contains_key(&name) { + let idx = 0; + self.num_entries.insert(name.clone(), 1); + self.entry_order.push((name, idx)); + } + // Replacing an already existing element does not + // change ordering, so we only care if its the first + // header name encountered + } + + pub(crate) fn append(&mut self, name: N) + where + N: IntoHeaderName + Into + Clone, + { + let name: HeaderName = name.into(); + let idx; + if self.num_entries.contains_key(&name) { + idx = self.num_entries[&name]; + *self.num_entries.get_mut(&name).unwrap() += 1; + } else { + idx = 0; + self.num_entries.insert(name.clone(), 1); + } + self.entry_order.push((name, idx)); + } + + // No doc test is run here because `RUSTFLAGS='--cfg hyper_unstable_ffi'` + // is needed to compile. Once ffi is stablized `no_run` should be removed + // here. + /// This returns an iterator that provides header names and indexes + /// in the original order received. + /// + /// # Examples + /// ```no_run + /// use hyper::ext::OriginalHeaderOrder; + /// use hyper::header::{HeaderName, HeaderValue, HeaderMap}; + /// + /// let mut h_order = OriginalHeaderOrder::default(); + /// let mut h_map = Headermap::new(); + /// + /// let name1 = b"Set-CookiE"; + /// let value1 = b"a=b"; + /// h_map.append(name1); + /// h_order.append(name1); + /// + /// let name2 = b"Content-Encoding"; + /// let value2 = b"gzip"; + /// h_map.append(name2, value2); + /// h_order.append(name2); + /// + /// let name3 = b"SET-COOKIE"; + /// let value3 = b"c=d"; + /// h_map.append(name3, value3); + /// h_order.append(name3) + /// + /// let mut iter = h_order.get_in_order() + /// + /// let (name, idx) = iter.next(); + /// assert_eq!(b"a=b", h_map.get_all(name).nth(idx).unwrap()); + /// + /// let (name, idx) = iter.next(); + /// assert_eq!(b"gzip", h_map.get_all(name).nth(idx).unwrap()); + /// + /// let (name, idx) = iter.next(); + /// assert_eq!(b"c=d", h_map.get_all(name).nth(idx).unwrap()); + /// ``` + pub(crate) fn get_in_order(&self) -> impl Iterator { + self.entry_order.iter() + } +} diff --git a/src/ext/h1_reason_phrase.rs b/src/ext/h1_reason_phrase.rs new file mode 100644 index 0000000000..021b632b6d --- /dev/null +++ b/src/ext/h1_reason_phrase.rs @@ -0,0 +1,221 @@ +use std::convert::TryFrom; + +use bytes::Bytes; + +/// A reason phrase in an HTTP/1 response. +/// +/// # Clients +/// +/// For clients, a `ReasonPhrase` will be present in the extensions of the `http::Response` returned +/// for a request if the reason phrase is different from the canonical reason phrase for the +/// response's status code. For example, if a server returns `HTTP/1.1 200 Awesome`, the +/// `ReasonPhrase` will be present and contain `Awesome`, but if a server returns `HTTP/1.1 200 OK`, +/// the response will not contain a `ReasonPhrase`. +/// +/// ```no_run +/// # #[cfg(all(feature = "tcp", feature = "client", feature = "http1"))] +/// # async fn fake_fetch() -> hyper::Result<()> { +/// use hyper::{Client, Uri}; +/// use hyper::ext::ReasonPhrase; +/// +/// let res = Client::new().get(Uri::from_static("http://example.com/non_canonical_reason")).await?; +/// +/// // Print out the non-canonical reason phrase, if it has one... +/// if let Some(reason) = res.extensions().get::() { +/// println!("non-canonical reason: {}", std::str::from_utf8(reason.as_bytes()).unwrap()); +/// } +/// # Ok(()) +/// # } +/// ``` +/// +/// # Servers +/// +/// When a `ReasonPhrase` is present in the extensions of the `http::Response` written by a server, +/// its contents will be written in place of the canonical reason phrase when responding via HTTP/1. +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ReasonPhrase(Bytes); + +impl ReasonPhrase { + /// Gets the reason phrase as bytes. + pub fn as_bytes(&self) -> &[u8] { + &self.0 + } + + /// Converts a static byte slice to a reason phrase. + pub fn from_static(reason: &'static [u8]) -> Self { + // TODO: this can be made const once MSRV is >= 1.57.0 + if find_invalid_byte(reason).is_some() { + panic!("invalid byte in static reason phrase"); + } + Self(Bytes::from_static(reason)) + } + + /// Converts a `Bytes` directly into a `ReasonPhrase` without validating. + /// + /// Use with care; invalid bytes in a reason phrase can cause serious security problems if + /// emitted in a response. + pub unsafe fn from_bytes_unchecked(reason: Bytes) -> Self { + Self(reason) + } +} + +impl TryFrom<&[u8]> for ReasonPhrase { + type Error = InvalidReasonPhrase; + + fn try_from(reason: &[u8]) -> Result { + if let Some(bad_byte) = find_invalid_byte(reason) { + Err(InvalidReasonPhrase { bad_byte }) + } else { + Ok(Self(Bytes::copy_from_slice(reason))) + } + } +} + +impl TryFrom> for ReasonPhrase { + type Error = InvalidReasonPhrase; + + fn try_from(reason: Vec) -> Result { + if let Some(bad_byte) = find_invalid_byte(&reason) { + Err(InvalidReasonPhrase { bad_byte }) + } else { + Ok(Self(Bytes::from(reason))) + } + } +} + +impl TryFrom for ReasonPhrase { + type Error = InvalidReasonPhrase; + + fn try_from(reason: String) -> Result { + if let Some(bad_byte) = find_invalid_byte(reason.as_bytes()) { + Err(InvalidReasonPhrase { bad_byte }) + } else { + Ok(Self(Bytes::from(reason))) + } + } +} + +impl TryFrom for ReasonPhrase { + type Error = InvalidReasonPhrase; + + fn try_from(reason: Bytes) -> Result { + if let Some(bad_byte) = find_invalid_byte(&reason) { + Err(InvalidReasonPhrase { bad_byte }) + } else { + Ok(Self(reason)) + } + } +} + +impl Into for ReasonPhrase { + fn into(self) -> Bytes { + self.0 + } +} + +impl AsRef<[u8]> for ReasonPhrase { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +/// Error indicating an invalid byte when constructing a `ReasonPhrase`. +/// +/// See [the spec][spec] for details on allowed bytes. +/// +/// [spec]: https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.7 +#[derive(Debug)] +pub struct InvalidReasonPhrase { + bad_byte: u8, +} + +impl std::fmt::Display for InvalidReasonPhrase { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Invalid byte in reason phrase: {}", self.bad_byte) + } +} + +impl std::error::Error for InvalidReasonPhrase {} + +const fn is_valid_byte(b: u8) -> bool { + // See https://www.rfc-editor.org/rfc/rfc5234.html#appendix-B.1 + const fn is_vchar(b: u8) -> bool { + 0x21 <= b && b <= 0x7E + } + + // See https://httpwg.org/http-core/draft-ietf-httpbis-semantics-latest.html#fields.values + // + // The 0xFF comparison is technically redundant, but it matches the text of the spec more + // clearly and will be optimized away. + #[allow(unused_comparisons)] + const fn is_obs_text(b: u8) -> bool { + 0x80 <= b && b <= 0xFF + } + + // See https://httpwg.org/http-core/draft-ietf-httpbis-messaging-latest.html#rfc.section.4.p.7 + b == b'\t' || b == b' ' || is_vchar(b) || is_obs_text(b) +} + +const fn find_invalid_byte(bytes: &[u8]) -> Option { + let mut i = 0; + while i < bytes.len() { + let b = bytes[i]; + if !is_valid_byte(b) { + return Some(b); + } + i += 1; + } + None +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn basic_valid() { + const PHRASE: &'static [u8] = b"OK"; + assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE); + assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE); + } + + #[test] + fn empty_valid() { + const PHRASE: &'static [u8] = b""; + assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE); + assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE); + } + + #[test] + fn obs_text_valid() { + const PHRASE: &'static [u8] = b"hyp\xe9r"; + assert_eq!(ReasonPhrase::from_static(PHRASE).as_bytes(), PHRASE); + assert_eq!(ReasonPhrase::try_from(PHRASE).unwrap().as_bytes(), PHRASE); + } + + const NEWLINE_PHRASE: &'static [u8] = b"hyp\ner"; + + #[test] + #[should_panic] + fn newline_invalid_panic() { + ReasonPhrase::from_static(NEWLINE_PHRASE); + } + + #[test] + fn newline_invalid_err() { + assert!(ReasonPhrase::try_from(NEWLINE_PHRASE).is_err()); + } + + const CR_PHRASE: &'static [u8] = b"hyp\rer"; + + #[test] + #[should_panic] + fn cr_invalid_panic() { + ReasonPhrase::from_static(CR_PHRASE); + } + + #[test] + fn cr_invalid_err() { + assert!(ReasonPhrase::try_from(CR_PHRASE).is_err()); + } +} diff --git a/src/ffi/client.rs b/src/ffi/client.rs index 1e5f29d548..670f77d141 100644 --- a/src/ffi/client.rs +++ b/src/ffi/client.rs @@ -93,8 +93,8 @@ unsafe impl AsTaskType for hyper_clientconn { ffi_fn! { /// Creates a new set of HTTP clientconn options to be used in a handshake. fn hyper_clientconn_options_new() -> *mut hyper_clientconn_options { - let mut builder = conn::Builder::new(); - builder.http1_preserve_header_case(true); + #[allow(deprecated)] + let builder = conn::Builder::new(); Box::into_raw(Box::new(hyper_clientconn_options { builder, @@ -103,6 +103,26 @@ ffi_fn! { } ?= std::ptr::null_mut() } +ffi_fn! { + /// Set the whether or not header case is preserved. + /// + /// Pass `0` to allow lowercase normalization (default), `1` to retain original case. + fn hyper_clientconn_options_set_preserve_header_case(opts: *mut hyper_clientconn_options, enabled: c_int) { + let opts = non_null! { &mut *opts ?= () }; + opts.builder.http1_preserve_header_case(enabled != 0); + } +} + +ffi_fn! { + /// Set the whether or not header order is preserved. + /// + /// Pass `0` to allow reordering (default), `1` to retain original ordering. + fn hyper_clientconn_options_set_preserve_header_order(opts: *mut hyper_clientconn_options, enabled: c_int) { + let opts = non_null! { &mut *opts ?= () }; + opts.builder.http1_preserve_header_order(enabled != 0); + } +} + ffi_fn! { /// Free a `hyper_clientconn_options *`. fn hyper_clientconn_options_free(opts: *mut hyper_clientconn_options) { diff --git a/src/ffi/http_types.rs b/src/ffi/http_types.rs index f6d32947bf..ea10f139cb 100644 --- a/src/ffi/http_types.rs +++ b/src/ffi/http_types.rs @@ -6,7 +6,7 @@ use super::body::{hyper_body, hyper_buf}; use super::error::hyper_code; use super::task::{hyper_task_return_type, AsTaskType}; use super::{UserDataPointer, HYPER_ITER_CONTINUE}; -use crate::ext::HeaderCaseMap; +use crate::ext::{HeaderCaseMap, OriginalHeaderOrder, ReasonPhrase}; use crate::header::{HeaderName, HeaderValue}; use crate::{Body, HeaderMap, Method, Request, Response, Uri}; @@ -22,11 +22,9 @@ pub struct hyper_response(pub(super) Response); pub struct hyper_headers { pub(super) headers: HeaderMap, orig_casing: HeaderCaseMap, + orig_order: OriginalHeaderOrder, } -#[derive(Debug)] -pub(crate) struct ReasonPhrase(pub(crate) Bytes); - pub(crate) struct RawHeaders(pub(crate) hyper_buf); pub(crate) struct OnInformational { @@ -233,6 +231,7 @@ impl hyper_request { if let Some(headers) = self.0.extensions_mut().remove::() { *self.0.headers_mut() = headers.headers; self.0.extensions_mut().insert(headers.orig_casing); + self.0.extensions_mut().insert(headers.orig_order); } } } @@ -348,9 +347,14 @@ impl hyper_response { .extensions_mut() .remove::() .unwrap_or_else(HeaderCaseMap::default); + let orig_order = resp + .extensions_mut() + .remove::() + .unwrap_or_else(OriginalHeaderOrder::default); resp.extensions_mut().insert(hyper_headers { headers, orig_casing, + orig_order, }); hyper_response(resp) @@ -358,7 +362,7 @@ impl hyper_response { fn reason_phrase(&self) -> &[u8] { if let Some(reason) = self.0.extensions().get::() { - return &reason.0; + return reason.as_bytes(); } if let Some(reason) = self.0.status().canonical_reason() { @@ -404,26 +408,54 @@ ffi_fn! { // and for each one, try to pair the originally cased name with the value. // // TODO: consider adding http::HeaderMap::entries() iterator - for name in headers.headers.keys() { - let mut names = headers.orig_casing.get_all(name); - - for value in headers.headers.get_all(name) { - let (name_ptr, name_len) = if let Some(orig_name) = names.next() { + let mut ordered_iter = headers.orig_order.get_in_order().peekable(); + if ordered_iter.peek().is_some() { + for (name, idx) in ordered_iter { + let (name_ptr, name_len) = if let Some(orig_name) = headers.orig_casing.get_all(name).nth(*idx) { (orig_name.as_ref().as_ptr(), orig_name.as_ref().len()) } else { ( - name.as_str().as_bytes().as_ptr(), - name.as_str().as_bytes().len(), + name.as_str().as_bytes().as_ptr(), + name.as_str().as_bytes().len(), ) }; - let val_ptr = value.as_bytes().as_ptr(); - let val_len = value.as_bytes().len(); + let val_ptr; + let val_len; + if let Some(value) = headers.headers.get_all(name).iter().nth(*idx) { + val_ptr = value.as_bytes().as_ptr(); + val_len = value.as_bytes().len(); + } else { + // Stop iterating, something has gone wrong. + return; + } if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) { return; } } + } else { + for name in headers.headers.keys() { + let mut names = headers.orig_casing.get_all(name); + + for value in headers.headers.get_all(name) { + let (name_ptr, name_len) = if let Some(orig_name) = names.next() { + (orig_name.as_ref().as_ptr(), orig_name.as_ref().len()) + } else { + ( + name.as_str().as_bytes().as_ptr(), + name.as_str().as_bytes().len(), + ) + }; + + let val_ptr = value.as_bytes().as_ptr(); + let val_len = value.as_bytes().len(); + + if HYPER_ITER_CONTINUE != func(userdata, name_ptr, name_len, val_ptr, val_len) { + return; + } + } + } } } } @@ -437,7 +469,8 @@ ffi_fn! { match unsafe { raw_name_value(name, name_len, value, value_len) } { Ok((name, value, orig_name)) => { headers.headers.insert(&name, value); - headers.orig_casing.insert(name, orig_name); + headers.orig_casing.insert(name.clone(), orig_name.clone()); + headers.orig_order.insert(name); hyper_code::HYPERE_OK } Err(code) => code, @@ -456,7 +489,8 @@ ffi_fn! { match unsafe { raw_name_value(name, name_len, value, value_len) } { Ok((name, value, orig_name)) => { headers.headers.append(&name, value); - headers.orig_casing.append(name, orig_name); + headers.orig_casing.append(&name, orig_name.clone()); + headers.orig_order.append(name); hyper_code::HYPERE_OK } Err(code) => code, @@ -469,6 +503,7 @@ impl Default for hyper_headers { Self { headers: Default::default(), orig_casing: HeaderCaseMap::default(), + orig_order: OriginalHeaderOrder::default(), } } } @@ -555,4 +590,68 @@ mod tests { HYPER_ITER_CONTINUE } } + + #[cfg(all(feature = "http1", feature = "ffi"))] + #[test] + fn test_headers_foreach_order_preserved() { + let mut headers = hyper_headers::default(); + + let name1 = b"Set-CookiE"; + let value1 = b"a=b"; + hyper_headers_add( + &mut headers, + name1.as_ptr(), + name1.len(), + value1.as_ptr(), + value1.len(), + ); + + let name2 = b"Content-Encoding"; + let value2 = b"gzip"; + hyper_headers_add( + &mut headers, + name2.as_ptr(), + name2.len(), + value2.as_ptr(), + value2.len(), + ); + + let name3 = b"SET-COOKIE"; + let value3 = b"c=d"; + hyper_headers_add( + &mut headers, + name3.as_ptr(), + name3.len(), + value3.as_ptr(), + value3.len(), + ); + + let mut vec = Vec::::new(); + hyper_headers_foreach(&headers, concat, &mut vec as *mut _ as *mut c_void); + + println!("{}", std::str::from_utf8(&vec).unwrap()); + assert_eq!( + vec, + b"Set-CookiE: a=b\r\nContent-Encoding: gzip\r\nSET-COOKIE: c=d\r\n" + ); + + extern "C" fn concat( + vec: *mut c_void, + name: *const u8, + name_len: usize, + value: *const u8, + value_len: usize, + ) -> c_int { + unsafe { + let vec = &mut *(vec as *mut Vec); + let name = std::slice::from_raw_parts(name, name_len); + let value = std::slice::from_raw_parts(value, value_len); + vec.extend(name); + vec.extend(b": "); + vec.extend(value); + vec.extend(b"\r\n"); + } + HYPER_ITER_CONTINUE + } + } } diff --git a/src/ffi/task.rs b/src/ffi/task.rs index e951e0dacc..ef54fe408f 100644 --- a/src/ffi/task.rs +++ b/src/ffi/task.rs @@ -32,7 +32,7 @@ pub struct hyper_executor { /// The executor of all task futures. /// /// There should never be contention on the mutex, as it is only locked - /// to drive the futures. However, we cannot gaurantee proper usage from + /// to drive the futures. However, we cannot guarantee proper usage from /// `hyper_executor_poll()`, which in C could potentially be called inside /// one of the stored futures. The mutex isn't re-entrant, so doing so /// would result in a deadlock, but that's better than data corruption. diff --git a/src/lib.rs b/src/lib.rs index f7a93a1959..e5e4cfc56e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,7 +2,7 @@ #![deny(missing_debug_implementations)] #![cfg_attr(test, deny(rust_2018_idioms))] #![cfg_attr(all(test, feature = "full"), deny(unreachable_pub))] -#![cfg_attr(test, deny(warnings))] +#![cfg_attr(all(test, feature = "full"), deny(warnings))] #![cfg_attr(all(test, feature = "nightly"), feature(test))] #![cfg_attr(docsrs, feature(doc_cfg))] @@ -53,6 +53,8 @@ //! connectors and acceptors for TCP, and a default executor. //! - `tcp`: Enables convenient implementations over TCP (using tokio). //! - `stream`: Provides `futures::Stream` capabilities. +//! - `backports`: 1.0 functionality backported to 0.14. +//! - `deprecated`: opt-in to deprecation warnings to prepare you for 1.0. //! //! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section diff --git a/src/proto/h1/conn.rs b/src/proto/h1/conn.rs index 66b2cdacc3..5ebff2803e 100644 --- a/src/proto/h1/conn.rs +++ b/src/proto/h1/conn.rs @@ -58,6 +58,8 @@ where #[cfg(all(feature = "server", feature = "runtime"))] h1_header_read_timeout_running: false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, title_case_headers: false, h09_responses: false, #[cfg(feature = "ffi")] @@ -111,6 +113,11 @@ where self.state.preserve_header_case = true; } + #[cfg(feature = "ffi")] + pub(crate) fn set_preserve_header_order(&mut self) { + self.state.preserve_header_order = true; + } + #[cfg(feature = "client")] pub(crate) fn set_h09_responses(&mut self) { self.state.h09_responses = true; @@ -148,19 +155,15 @@ where } pub(crate) fn can_read_head(&self) -> bool { - match self.state.reading { - Reading::Init => { - if T::should_read_first() { - true - } else { - match self.state.writing { - Writing::Init => false, - _ => true, - } - } - } - _ => false, + if !matches!(self.state.reading, Reading::Init) { + return false; + } + + if T::should_read_first() { + return true; } + + !matches!(self.state.writing, Writing::Init) } pub(crate) fn can_read_body(&self) -> bool { @@ -200,6 +203,8 @@ where #[cfg(all(feature = "server", feature = "runtime"))] h1_header_read_timeout_running: &mut self.state.h1_header_read_timeout_running, preserve_header_case: self.state.preserve_header_case, + #[cfg(feature = "ffi")] + preserve_header_order: self.state.preserve_header_order, h09_responses: self.state.h09_responses, #[cfg(feature = "ffi")] on_informational: &mut self.state.on_informational, @@ -358,10 +363,10 @@ where } fn is_mid_message(&self) -> bool { - match (&self.state.reading, &self.state.writing) { - (&Reading::Init, &Writing::Init) => false, - _ => true, - } + !matches!( + (&self.state.reading, &self.state.writing), + (&Reading::Init, &Writing::Init) + ) } // This will check to make sure the io object read is empty. @@ -484,11 +489,10 @@ where } pub(crate) fn can_write_head(&self) -> bool { - if !T::should_read_first() { - if let Reading::Closed = self.state.reading { - return false; - } + if !T::should_read_first() && matches!(self.state.reading, Reading::Closed) { + return false; } + match self.state.writing { Writing::Init => self.io.can_headers_buf(), _ => false, @@ -582,7 +586,7 @@ where } } - // Fix keep-alives when Connection: keep-alive header is not present + // Fix keep-alive when Connection: keep-alive header is not present fn fix_keep_alive(&mut self, head: &mut MessageHead) { let outgoing_is_keep_alive = head .headers @@ -632,15 +636,15 @@ where Writing::Body(ref mut encoder) => { self.io.buffer(encoder.encode(chunk)); - if encoder.is_eof() { - if encoder.is_last() { - Writing::Closed - } else { - Writing::KeepAlive - } - } else { + if !encoder.is_eof() { return; } + + if encoder.is_last() { + Writing::Closed + } else { + Writing::KeepAlive + } } _ => unreachable!("write_body invalid state: {:?}", self.state.writing), }; @@ -671,32 +675,31 @@ where pub(crate) fn end_body(&mut self) -> crate::Result<()> { debug_assert!(self.can_write_body()); - let mut res = Ok(()); - let state = match self.state.writing { - Writing::Body(ref mut encoder) => { - // end of stream, that means we should try to eof - match encoder.end() { - Ok(end) => { - if let Some(end) = end { - self.io.buffer(end); - } - if encoder.is_last() || encoder.is_close_delimited() { - Writing::Closed - } else { - Writing::KeepAlive - } - } - Err(not_eof) => { - res = Err(crate::Error::new_body_write_aborted().with(not_eof)); - Writing::Closed - } - } - } + let encoder = match self.state.writing { + Writing::Body(ref mut enc) => enc, _ => return Ok(()), }; - self.state.writing = state; - res + // end of stream, that means we should try to eof + match encoder.end() { + Ok(end) => { + if let Some(end) = end { + self.io.buffer(end); + } + + self.state.writing = if encoder.is_last() || encoder.is_close_delimited() { + Writing::Closed + } else { + Writing::KeepAlive + }; + + Ok(()) + } + Err(not_eof) => { + self.state.writing = Writing::Closed; + Err(crate::Error::new_body_write_aborted().with(not_eof)) + } + } } // When we get a parse error, depending on what side we are, we might be able @@ -745,14 +748,17 @@ where /// If the read side can be cheaply drained, do so. Otherwise, close. pub(super) fn poll_drain_or_close_read(&mut self, cx: &mut task::Context<'_>) { + if let Reading::Continue(ref decoder) = self.state.reading { + // skip sending the 100-continue + // just move forward to a read, in case a tiny body was included + self.state.reading = Reading::Body(decoder.clone()); + } + let _ = self.poll_read_body(cx); // If still in Reading::Body, just give up match self.state.reading { - Reading::Init | Reading::KeepAlive => { - trace!("body drained"); - return; - } + Reading::Init | Reading::KeepAlive => trace!("body drained"), _ => self.close_read(), } } @@ -824,6 +830,8 @@ struct State { #[cfg(all(feature = "server", feature = "runtime"))] h1_header_read_timeout_running: bool, preserve_header_case: bool, + #[cfg(feature = "ffi")] + preserve_header_order: bool, title_case_headers: bool, h09_responses: bool, /// If set, called with each 1xx informational response received for @@ -1001,43 +1009,35 @@ impl State { self.method = None; self.keep_alive.idle(); - if self.is_idle() { - self.reading = Reading::Init; - self.writing = Writing::Init; - - // !T::should_read_first() means Client. - // - // If Client connection has just gone idle, the Dispatcher - // should try the poll loop one more time, so as to poll the - // pending requests stream. - if !T::should_read_first() { - self.notify_read = true; - } - } else { + + if !self.is_idle() { self.close(); + return; + } + + self.reading = Reading::Init; + self.writing = Writing::Init; + + // !T::should_read_first() means Client. + // + // If Client connection has just gone idle, the Dispatcher + // should try the poll loop one more time, so as to poll the + // pending requests stream. + if !T::should_read_first() { + self.notify_read = true; } } fn is_idle(&self) -> bool { - if let KA::Idle = self.keep_alive.status() { - true - } else { - false - } + matches!(self.keep_alive.status(), KA::Idle) } fn is_read_closed(&self) -> bool { - match self.reading { - Reading::Closed => true, - _ => false, - } + matches!(self.reading, Reading::Closed) } fn is_write_closed(&self) -> bool { - match self.writing { - Writing::Closed => true, - _ => false, - } + matches!(self.writing, Writing::Closed) } fn prepare_upgrade(&mut self) -> crate::upgrade::OnUpgrade { diff --git a/src/proto/h1/io.rs b/src/proto/h1/io.rs index 08a3993684..1d251e2c84 100644 --- a/src/proto/h1/io.rs +++ b/src/proto/h1/io.rs @@ -1,17 +1,17 @@ use std::cmp; use std::fmt; +#[cfg(all(feature = "server", feature = "runtime"))] +use std::future::Future; use std::io::{self, IoSlice}; use std::marker::Unpin; use std::mem::MaybeUninit; #[cfg(all(feature = "server", feature = "runtime"))] -use std::future::Future; -#[cfg(all(feature = "server", feature = "runtime"))] use std::time::Duration; -#[cfg(all(feature = "server", feature = "runtime"))] -use tokio::time::Instant; use bytes::{Buf, BufMut, Bytes, BytesMut}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +#[cfg(all(feature = "server", feature = "runtime"))] +use tokio::time::Instant; use tracing::{debug, trace}; use super::{Http1Transaction, ParseContext, ParsedMessage}; @@ -194,6 +194,8 @@ where #[cfg(all(feature = "server", feature = "runtime"))] h1_header_read_timeout_running: parse_ctx.h1_header_read_timeout_running, preserve_header_case: parse_ctx.preserve_header_case, + #[cfg(feature = "ffi")] + preserve_header_order: parse_ctx.preserve_header_order, h09_responses: parse_ctx.h09_responses, #[cfg(feature = "ffi")] on_informational: parse_ctx.on_informational, @@ -208,9 +210,13 @@ where { *parse_ctx.h1_header_read_timeout_running = false; - if let Some(h1_header_read_timeout_fut) = parse_ctx.h1_header_read_timeout_fut { + if let Some(h1_header_read_timeout_fut) = + parse_ctx.h1_header_read_timeout_fut + { // Reset the timer in order to avoid woken up when the timeout finishes - h1_header_read_timeout_fut.as_mut().reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60)); + h1_header_read_timeout_fut + .as_mut() + .reset(Instant::now() + Duration::from_secs(30 * 24 * 60 * 60)); } } return Poll::Ready(Ok(msg)); @@ -224,12 +230,14 @@ where #[cfg(all(feature = "server", feature = "runtime"))] if *parse_ctx.h1_header_read_timeout_running { - if let Some(h1_header_read_timeout_fut) = parse_ctx.h1_header_read_timeout_fut { - if Pin::new( h1_header_read_timeout_fut).poll(cx).is_ready() { + if let Some(h1_header_read_timeout_fut) = + parse_ctx.h1_header_read_timeout_fut + { + if Pin::new(h1_header_read_timeout_fut).poll(cx).is_ready() { *parse_ctx.h1_header_read_timeout_running = false; tracing::warn!("read header from client timeout"); - return Poll::Ready(Err(crate::Error::new_header_timeout())) + return Poll::Ready(Err(crate::Error::new_header_timeout())); } } } @@ -727,10 +735,15 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -894,9 +907,7 @@ mod tests { async fn write_buf_flatten() { let _ = pretty_env_logger::try_init(); - let mock = Mock::new() - .write(b"hello world, it's hyper!") - .build(); + let mock = Mock::new().write(b"hello world, it's hyper!").build(); let mut buffered = Buffered::<_, Cursor>>::new(mock); buffered.write_buf.set_strategy(WriteStrategy::Flatten); diff --git a/src/proto/h1/mod.rs b/src/proto/h1/mod.rs index 06d03bf5f1..5a2587a843 100644 --- a/src/proto/h1/mod.rs +++ b/src/proto/h1/mod.rs @@ -14,7 +14,7 @@ pub(crate) use self::conn::Conn; pub(crate) use self::decode::Decoder; pub(crate) use self::dispatch::Dispatcher; pub(crate) use self::encode::{EncodedBuf, Encoder}; - //TODO: move out of h1::io +//TODO: move out of h1::io pub(crate) use self::io::MINIMUM_MAX_BUFFER_SIZE; mod conn; @@ -24,7 +24,6 @@ mod encode; mod io; mod role; - cfg_client! { pub(crate) type ClientTransaction = role::Client; } @@ -84,6 +83,8 @@ pub(crate) struct ParseContext<'a> { #[cfg(all(feature = "server", feature = "runtime"))] h1_header_read_timeout_running: &'a mut bool, preserve_header_case: bool, + #[cfg(feature = "ffi")] + preserve_header_order: bool, h09_responses: bool, #[cfg(feature = "ffi")] on_informational: &'a mut Option, diff --git a/src/proto/h1/role.rs b/src/proto/h1/role.rs index 968b63cb8e..6252207baf 100644 --- a/src/proto/h1/role.rs +++ b/src/proto/h1/role.rs @@ -1,15 +1,14 @@ use std::fmt::{self, Write}; use std::mem::MaybeUninit; -#[cfg(all(feature = "server", feature = "runtime"))] -use tokio::time::Instant; -#[cfg(any(test, feature = "server", feature = "ffi"))] use bytes::Bytes; use bytes::BytesMut; #[cfg(feature = "server")] use http::header::ValueIter; use http::header::{self, Entry, HeaderName, HeaderValue}; use http::{HeaderMap, Method, StatusCode, Version}; +#[cfg(all(feature = "server", feature = "runtime"))] +use tokio::time::Instant; use tracing::{debug, error, trace, trace_span, warn}; use crate::body::DecodedLength; @@ -17,6 +16,8 @@ use crate::body::DecodedLength; use crate::common::date; use crate::error::Parse; use crate::ext::HeaderCaseMap; +#[cfg(feature = "ffi")] +use crate::ext::OriginalHeaderOrder; use crate::headers; use crate::proto::h1::{ Encode, Encoder, Http1Transaction, ParseContext, ParseResult, ParsedMessage, @@ -78,7 +79,7 @@ where if !*ctx.h1_header_read_timeout_running { if let Some(h1_header_read_timeout) = ctx.h1_header_read_timeout { let deadline = Instant::now() + h1_header_read_timeout; - + *ctx.h1_header_read_timeout_running = true; match ctx.h1_header_read_timeout_fut { Some(h1_header_read_timeout_fut) => { debug!("resetting h1 header read timeout timer"); @@ -214,6 +215,13 @@ impl Http1Transaction for Server { None }; + #[cfg(feature = "ffi")] + let mut header_order = if ctx.preserve_header_order { + Some(OriginalHeaderOrder::default()) + } else { + None + }; + let mut headers = ctx.cached_headers.take().unwrap_or_else(HeaderMap::new); headers.reserve(headers_len); @@ -290,6 +298,11 @@ impl Http1Transaction for Server { header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); } + #[cfg(feature = "ffi")] + if let Some(ref mut header_order) = header_order { + header_order.append(&name); + } + headers.append(name, value); } @@ -304,6 +317,11 @@ impl Http1Transaction for Server { extensions.insert(header_case_map); } + #[cfg(feature = "ffi")] + if let Some(header_order) = header_order { + extensions.insert(header_order); + } + *ctx.req_method = Some(subject.0.clone()); Ok(Some(ParsedMessage { @@ -358,7 +376,13 @@ impl Http1Transaction for Server { let init_cap = 30 + msg.head.headers.len() * AVERAGE_HEADER_SIZE; dst.reserve(init_cap); - if msg.head.version == Version::HTTP_11 && msg.head.subject == StatusCode::OK { + + let custom_reason_phrase = msg.head.extensions.get::(); + + if msg.head.version == Version::HTTP_11 + && msg.head.subject == StatusCode::OK + && custom_reason_phrase.is_none() + { extend(dst, b"HTTP/1.1 200 OK\r\n"); } else { match msg.head.version { @@ -373,15 +397,21 @@ impl Http1Transaction for Server { extend(dst, msg.head.subject.as_str().as_bytes()); extend(dst, b" "); - // a reason MUST be written, as many parsers will expect it. - extend( - dst, - msg.head - .subject - .canonical_reason() - .unwrap_or("") - .as_bytes(), - ); + + if let Some(reason) = custom_reason_phrase { + extend(dst, reason.as_bytes()); + } else { + // a reason MUST be written, as many parsers will expect it. + extend( + dst, + msg.head + .subject + .canonical_reason() + .unwrap_or("") + .as_bytes(), + ); + } + extend(dst, b"\r\n"); } @@ -468,6 +498,10 @@ impl Server { } } + fn can_have_implicit_zero_content_length(method: &Option, status: StatusCode) -> bool { + Server::can_have_content_length(method, status) && method != &Some(Method::HEAD) + } + fn encode_headers_with_lower_case( msg: Encode<'_, StatusCode>, dst: &mut Vec, @@ -820,7 +854,10 @@ impl Server { } } None | Some(BodyLength::Known(0)) => { - if Server::can_have_content_length(msg.req_method, msg.head.subject) { + if Server::can_have_implicit_zero_content_length( + msg.req_method, + msg.head.subject, + ) { header_name_writer.write_full_header_line( dst, "content-length: 0\r\n", @@ -918,12 +955,9 @@ impl Http1Transaction for Client { trace!("Response.parse Complete({})", len); let status = StatusCode::from_u16(res.code.unwrap())?; - #[cfg(not(feature = "ffi"))] - let reason = (); - #[cfg(feature = "ffi")] let reason = { let reason = res.reason.unwrap(); - // Only save the reason phrase if it isnt the canonical reason + // Only save the reason phrase if it isn't the canonical reason if Some(reason) != status.canonical_reason() { Some(Bytes::copy_from_slice(reason.as_bytes())) } else { @@ -944,12 +978,7 @@ impl Http1Transaction for Client { Err(httparse::Error::Version) if ctx.h09_responses => { trace!("Response.parse accepted HTTP/0.9 response"); - #[cfg(not(feature = "ffi"))] - let reason = (); - #[cfg(feature = "ffi")] - let reason = None; - - (0, StatusCode::OK, reason, Version::HTTP_09, 0) + (0, StatusCode::OK, None, Version::HTTP_09, 0) } Err(e) => return Err(e.into()), } @@ -957,7 +986,10 @@ impl Http1Transaction for Client { let mut slice = buf.split_to(len); - if ctx.h1_parser_config.obsolete_multiline_headers_in_responses_are_allowed() { + if ctx + .h1_parser_config + .obsolete_multiline_headers_in_responses_are_allowed() + { for header in &headers_indices[..headers_len] { // SAFETY: array is valid up to `headers_len` let header = unsafe { &*header.as_ptr() }; @@ -981,6 +1013,13 @@ impl Http1Transaction for Client { None }; + #[cfg(feature = "ffi")] + let mut header_order = if ctx.preserve_header_order { + Some(OriginalHeaderOrder::default()) + } else { + None + }; + headers.reserve(headers_len); for header in &headers_indices[..headers_len] { // SAFETY: array is valid up to `headers_len` @@ -1003,6 +1042,11 @@ impl Http1Transaction for Client { header_case_map.append(&name, slice.slice(header.name.0..header.name.1)); } + #[cfg(feature = "ffi")] + if let Some(ref mut header_order) = header_order { + header_order.append(&name); + } + headers.append(name, value); } @@ -1013,11 +1057,16 @@ impl Http1Transaction for Client { } #[cfg(feature = "ffi")] + if let Some(header_order) = header_order { + extensions.insert(header_order); + } + if let Some(reason) = reason { - extensions.insert(crate::ffi::ReasonPhrase(reason)); + // Safety: httparse ensures that only valid reason phrase bytes are present in this + // field. + let reason = unsafe { crate::ext::ReasonPhrase::from_bytes_unchecked(reason) }; + extensions.insert(reason); } - #[cfg(not(feature = "ffi"))] - drop(reason); #[cfg(feature = "ffi")] if ctx.raw_headers { @@ -1474,10 +1523,15 @@ mod tests { cached_headers: &mut None, req_method: &mut method, h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1504,10 +1558,15 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1529,10 +1588,15 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1552,10 +1616,15 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: true, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1577,10 +1646,15 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1606,10 +1680,15 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config, + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1632,10 +1711,15 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(crate::Method::GET), h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1653,10 +1737,15 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: true, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1695,10 +1784,15 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1718,10 +1812,15 @@ mod tests { cached_headers: &mut None, req_method: &mut None, h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1950,10 +2049,15 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(Method::GET), h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1973,10 +2077,15 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(m), h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -1996,10 +2105,15 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(Method::GET), h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -2496,10 +2610,15 @@ mod tests { cached_headers: &mut None, req_method: &mut Some(Method::GET), h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -2583,10 +2702,15 @@ mod tests { cached_headers: &mut headers, req_method: &mut None, h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, @@ -2626,10 +2750,15 @@ mod tests { cached_headers: &mut headers, req_method: &mut None, h1_parser_config: Default::default(), + #[cfg(feature = "runtime")] h1_header_read_timeout: None, + #[cfg(feature = "runtime")] h1_header_read_timeout_fut: &mut None, + #[cfg(feature = "runtime")] h1_header_read_timeout_running: &mut false, preserve_header_case: false, + #[cfg(feature = "ffi")] + preserve_header_order: false, h09_responses: false, #[cfg(feature = "ffi")] on_informational: &mut None, diff --git a/src/proto/h2/client.rs b/src/proto/h2/client.rs index 013f6fb5a8..bac8eceb3a 100644 --- a/src/proto/h2/client.rs +++ b/src/proto/h2/client.rs @@ -7,12 +7,14 @@ use futures_channel::{mpsc, oneshot}; use futures_util::future::{self, Either, FutureExt as _, TryFutureExt as _}; use futures_util::stream::StreamExt as _; use h2::client::{Builder, SendRequest}; +use h2::SendStream; use http::{Method, StatusCode}; use tokio::io::{AsyncRead, AsyncWrite}; use tracing::{debug, trace, warn}; use super::{ping, H2Upgraded, PipeToSendStream, SendBuf}; use crate::body::HttpBody; +use crate::client::dispatch::Callback; use crate::common::{exec::Exec, task, Future, Never, Pin, Poll}; use crate::ext::Protocol; use crate::headers; @@ -20,6 +22,7 @@ use crate::proto::h2::UpgradedSendStream; use crate::proto::Dispatched; use crate::upgrade::Upgraded; use crate::{Body, Request, Response}; +use h2::client::ResponseFuture; type ClientRx = crate::client::dispatch::Receiver, Response>; @@ -170,6 +173,7 @@ where executor: exec, h2_tx, req_rx, + fut_ctx: None, }) } @@ -193,6 +197,20 @@ where } } +struct FutCtx +where + B: HttpBody, +{ + is_connect: bool, + eos: bool, + fut: ResponseFuture, + body_tx: SendStream>, + body: B, + cb: Callback, Response>, +} + +impl Unpin for FutCtx {} + pub(crate) struct ClientTask where B: HttpBody, @@ -203,6 +221,7 @@ where executor: Exec, h2_tx: SendRequest>, req_rx: ClientRx, + fut_ctx: Option>, } impl ClientTask @@ -214,6 +233,99 @@ where } } +impl ClientTask +where + B: HttpBody + Send + 'static, + B::Data: Send, + B::Error: Into>, +{ + fn poll_pipe(&mut self, f: FutCtx, cx: &mut task::Context<'_>) { + let ping = self.ping.clone(); + let send_stream = if !f.is_connect { + if !f.eos { + let mut pipe = Box::pin(PipeToSendStream::new(f.body, f.body_tx)).map(|res| { + if let Err(e) = res { + debug!("client request body error: {}", e); + } + }); + + // eagerly see if the body pipe is ready and + // can thus skip allocating in the executor + match Pin::new(&mut pipe).poll(cx) { + Poll::Ready(_) => (), + Poll::Pending => { + let conn_drop_ref = self.conn_drop_ref.clone(); + // keep the ping recorder's knowledge of an + // "open stream" alive while this body is + // still sending... + let ping = ping.clone(); + let pipe = pipe.map(move |x| { + drop(conn_drop_ref); + drop(ping); + x + }); + // Clear send task + self.executor.execute(pipe); + } + } + } + + None + } else { + Some(f.body_tx) + }; + + let fut = f.fut.map(move |result| match result { + Ok(res) => { + // record that we got the response headers + ping.record_non_data(); + + let content_length = headers::content_length_parse_all(res.headers()); + if let (Some(mut send_stream), StatusCode::OK) = (send_stream, res.status()) { + if content_length.map_or(false, |len| len != 0) { + warn!("h2 connect response with non-zero body not supported"); + + send_stream.send_reset(h2::Reason::INTERNAL_ERROR); + return Err(( + crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), + None, + )); + } + let (parts, recv_stream) = res.into_parts(); + let mut res = Response::from_parts(parts, Body::empty()); + + let (pending, on_upgrade) = crate::upgrade::pending(); + let io = H2Upgraded { + ping, + send_stream: unsafe { UpgradedSendStream::new(send_stream) }, + recv_stream, + buf: Bytes::new(), + }; + let upgraded = Upgraded::new(io, Bytes::new()); + + pending.fulfill(upgraded); + res.extensions_mut().insert(on_upgrade); + + Ok(res) + } else { + let res = res.map(|stream| { + let ping = ping.for_stream(&stream); + crate::Body::h2(stream, content_length.into(), ping) + }); + Ok(res) + } + } + Err(err) => { + ping.ensure_not_timed_out().map_err(|e| (e, None))?; + + debug!("client response error: {}", err); + Err((crate::Error::new_h2(err), None)) + } + }); + self.executor.execute(f.cb.send_when(fut)); + } +} + impl Future for ClientTask where B: HttpBody + Send + 'static, @@ -237,6 +349,16 @@ where } }; + match self.fut_ctx.take() { + // If we were waiting on pending open + // continue where we left off. + Some(f) => { + self.poll_pipe(f, cx); + continue; + } + None => (), + } + match self.req_rx.poll_recv(cx) { Poll::Ready(Some((req, cb))) => { // check that future hasn't been canceled already @@ -255,7 +377,6 @@ where let is_connect = req.method() == Method::CONNECT; let eos = body.is_end_stream(); - let ping = self.ping.clone(); if is_connect { if headers::content_length_parse_all(req.headers()) @@ -283,90 +404,31 @@ where } }; - let send_stream = if !is_connect { - if !eos { - let mut pipe = - Box::pin(PipeToSendStream::new(body, body_tx)).map(|res| { - if let Err(e) = res { - debug!("client request body error: {}", e); - } - }); - - // eagerly see if the body pipe is ready and - // can thus skip allocating in the executor - match Pin::new(&mut pipe).poll(cx) { - Poll::Ready(_) => (), - Poll::Pending => { - let conn_drop_ref = self.conn_drop_ref.clone(); - // keep the ping recorder's knowledge of an - // "open stream" alive while this body is - // still sending... - let ping = ping.clone(); - let pipe = pipe.map(move |x| { - drop(conn_drop_ref); - drop(ping); - x - }); - self.executor.execute(pipe); - } - } - } - - None - } else { - Some(body_tx) + let f = FutCtx { + is_connect, + eos, + fut, + body_tx, + body, + cb, }; - let fut = fut.map(move |result| match result { - Ok(res) => { - // record that we got the response headers - ping.record_non_data(); - - let content_length = headers::content_length_parse_all(res.headers()); - if let (Some(mut send_stream), StatusCode::OK) = - (send_stream, res.status()) - { - if content_length.map_or(false, |len| len != 0) { - warn!("h2 connect response with non-zero body not supported"); - - send_stream.send_reset(h2::Reason::INTERNAL_ERROR); - return Err(( - crate::Error::new_h2(h2::Reason::INTERNAL_ERROR.into()), - None, - )); - } - let (parts, recv_stream) = res.into_parts(); - let mut res = Response::from_parts(parts, Body::empty()); - - let (pending, on_upgrade) = crate::upgrade::pending(); - let io = H2Upgraded { - ping, - send_stream: unsafe { UpgradedSendStream::new(send_stream) }, - recv_stream, - buf: Bytes::new(), - }; - let upgraded = Upgraded::new(io, Bytes::new()); - - pending.fulfill(upgraded); - res.extensions_mut().insert(on_upgrade); - - Ok(res) - } else { - let res = res.map(|stream| { - let ping = ping.for_stream(&stream); - crate::Body::h2(stream, content_length.into(), ping) - }); - Ok(res) - } + // Check poll_ready() again. + // If the call to send_request() resulted in the new stream being pending open + // we have to wait for the open to complete before accepting new requests. + match self.h2_tx.poll_ready(cx) { + Poll::Pending => { + // Save Context + self.fut_ctx = Some(f); + return Poll::Pending; } - Err(err) => { - ping.ensure_not_timed_out().map_err(|e| (e, None))?; - - debug!("client response error: {}", err); - Err((crate::Error::new_h2(err), None)) + Poll::Ready(Ok(())) => (), + Poll::Ready(Err(err)) => { + f.cb.send(Err((crate::Error::new_h2(err), None))); + continue; } - }); - self.executor.execute(cb.send_when(fut)); + } + self.poll_pipe(f, cx); continue; } diff --git a/src/proto/h2/server.rs b/src/proto/h2/server.rs index b9037ee3dd..4127387e71 100644 --- a/src/proto/h2/server.rs +++ b/src/proto/h2/server.rs @@ -35,6 +35,8 @@ const DEFAULT_CONN_WINDOW: u32 = 1024 * 1024; // 1mb const DEFAULT_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb const DEFAULT_MAX_FRAME_SIZE: u32 = 1024 * 16; // 16kb const DEFAULT_MAX_SEND_BUF_SIZE: usize = 1024 * 400; // 400kb +// 16 MB "sane default" taken from golang http2 +const DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE: u32 = 16 << 20; #[derive(Clone, Debug)] pub(crate) struct Config { @@ -44,11 +46,13 @@ pub(crate) struct Config { pub(crate) max_frame_size: u32, pub(crate) enable_connect_protocol: bool, pub(crate) max_concurrent_streams: Option, + pub(crate) max_pending_accept_reset_streams: Option, #[cfg(feature = "runtime")] pub(crate) keep_alive_interval: Option, #[cfg(feature = "runtime")] pub(crate) keep_alive_timeout: Duration, pub(crate) max_send_buffer_size: usize, + pub(crate) max_header_list_size: u32, } impl Default for Config { @@ -60,11 +64,13 @@ impl Default for Config { max_frame_size: DEFAULT_MAX_FRAME_SIZE, enable_connect_protocol: false, max_concurrent_streams: None, + max_pending_accept_reset_streams: None, #[cfg(feature = "runtime")] keep_alive_interval: None, #[cfg(feature = "runtime")] keep_alive_timeout: Duration::from_secs(20), max_send_buffer_size: DEFAULT_MAX_SEND_BUF_SIZE, + max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE, } } } @@ -116,10 +122,14 @@ where .initial_window_size(config.initial_stream_window_size) .initial_connection_window_size(config.initial_conn_window_size) .max_frame_size(config.max_frame_size) + .max_header_list_size(config.max_header_list_size) .max_send_buffer_size(config.max_send_buffer_size); if let Some(max) = config.max_concurrent_streams { builder.max_concurrent_streams(max); } + if let Some(max) = config.max_pending_accept_reset_streams { + builder.max_pending_accept_reset_streams(max); + } if config.enable_connect_protocol { builder.enable_connect_protocol(); } @@ -138,7 +148,7 @@ where #[cfg(feature = "runtime")] keep_alive_timeout: config.keep_alive_timeout, // If keep-alive is enabled for servers, always enabled while - // idle, so it can more aggresively close dead connections. + // idle, so it can more aggressively close dead connections. #[cfg(feature = "runtime")] keep_alive_while_idle: true, }; @@ -259,7 +269,7 @@ where let reason = err.h2_reason(); if reason == Reason::NO_ERROR { // NO_ERROR is only used for graceful shutdowns... - trace!("interpretting NO_ERROR user error as graceful_shutdown"); + trace!("interpreting NO_ERROR user error as graceful_shutdown"); self.conn.graceful_shutdown(); } else { trace!("abruptly shutting down with {:?}", reason); @@ -498,7 +508,6 @@ where } } - if !body.is_end_stream() { // automatically set Content-Length from body... if let Some(len) = body.size_hint().exact() { diff --git a/src/server/conn.rs b/src/server/conn.rs index de765b3a15..dfe2172457 100644 --- a/src/server/conn.rs +++ b/src/server/conn.rs @@ -48,8 +48,7 @@ not(all(feature = "http1", feature = "http2")) ))] use std::marker::PhantomData; -#[cfg(feature = "tcp")] -use std::net::SocketAddr; +#[cfg(all(any(feature = "http1", feature = "http2"), feature = "runtime"))] use std::time::Duration; #[cfg(feature = "http2")] @@ -59,6 +58,11 @@ use crate::error::{Kind, Parse}; #[cfg(feature = "http1")] use crate::upgrade::Upgraded; +#[cfg(all(feature = "backports", feature = "http1"))] +pub mod http1; +#[cfg(all(feature = "backports", feature = "http2"))] +pub mod http2; + cfg_feature! { #![any(feature = "http1", feature = "http2")] @@ -70,17 +74,15 @@ cfg_feature! { use tokio::io::{AsyncRead, AsyncWrite}; use tracing::trace; - use super::accept::Accept; + pub use super::server::Connecting; use crate::body::{Body, HttpBody}; use crate::common::{task, Future, Pin, Poll, Unpin}; #[cfg(not(all(feature = "http1", feature = "http2")))] use crate::common::Never; - use crate::common::exec::{ConnStreamExec, Exec, NewSvcExec}; + use crate::common::exec::{ConnStreamExec, Exec}; use crate::proto; - use crate::service::{HttpService, MakeServiceRef}; - use self::spawn_all::NewSvcTask; + use crate::service::HttpService; - pub(super) use self::spawn_all::{NoopWatcher, Watcher}; pub(super) use self::upgrades::UpgradeableConnection; } @@ -96,8 +98,14 @@ pub use super::tcp::{AddrIncoming, AddrStream}; #[derive(Clone, Debug)] #[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This struct will be replaced with `server::conn::http1::Builder` and `server::conn::http2::Builder` in 1.0, enable the \"backports\" feature to use them now." + ) +)] pub struct Http { - exec: E, + pub(crate) exec: E, h1_half_close: bool, h1_keep_alive: bool, h1_title_case_headers: bool, @@ -127,51 +135,6 @@ enum ConnectionMode { Fallback, } -#[cfg(any(feature = "http1", feature = "http2"))] -pin_project! { - /// A stream mapping incoming IOs to new services. - /// - /// Yields `Connecting`s that are futures that should be put on a reactor. - #[must_use = "streams do nothing unless polled"] - #[derive(Debug)] - pub(super) struct Serve { - #[pin] - incoming: I, - make_service: S, - protocol: Http, - } -} - -#[cfg(any(feature = "http1", feature = "http2"))] -pin_project! { - /// A future building a new `Service` to a `Connection`. - /// - /// Wraps the future returned from `MakeService` into one that returns - /// a `Connection`. - #[must_use = "futures do nothing unless polled"] - #[derive(Debug)] - #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] - pub struct Connecting { - #[pin] - future: F, - io: Option, - protocol: Http, - } -} - -#[cfg(any(feature = "http1", feature = "http2"))] -pin_project! { - #[must_use = "futures do nothing unless polled"] - #[derive(Debug)] - pub(super) struct SpawnAll { - // TODO: re-add `pub(super)` once rustdoc can handle this. - // - // See https://github.com/rust-lang/rust/issues/64705 - #[pin] - pub(super) serve: Serve, - } -} - #[cfg(any(feature = "http1", feature = "http2"))] pin_project! { /// A future binding a connection with a Service. @@ -256,6 +219,12 @@ impl Unpin for Fallback {} #[derive(Debug)] #[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +#[cfg_attr( + feature = "deprecated", + deprecated( + note = "This struct will be replaced with `server::conn::http1::Parts` in 1.0, enable the \"backports\" feature to use them now." + ) +)] pub struct Parts { /// The original IO object used in the handshake. pub io: T, @@ -275,6 +244,7 @@ pub struct Parts { // ===== impl Http ===== +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[cfg(any(feature = "http1", feature = "http2"))] impl Http { /// Creates a new instance of the HTTP protocol, ready to spawn a server or @@ -298,6 +268,7 @@ impl Http { } } +#[cfg_attr(feature = "deprecated", allow(deprecated))] #[cfg(any(feature = "http1", feature = "http2"))] impl Http { /// Sets whether HTTP1 is required. @@ -375,7 +346,7 @@ impl Http { self } - /// Set a timeout for reading client request headers. If a client does not + /// Set a timeout for reading client request headers. If a client does not /// transmit the entire header within this time, the connection is closed. /// /// Default is None. @@ -423,6 +394,23 @@ impl Http { self } + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). + /// As of v0.3.17, it is 20. + /// + /// See for more information. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_pending_accept_reset_streams( + &mut self, + max: impl Into>, + ) -> &mut Self { + self.h2_builder.max_pending_accept_reset_streams = max.into(); + + self + } + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// @@ -567,6 +555,16 @@ impl Http { self } + /// Sets the max size of received header frames. + /// + /// Default is currently ~16MB, but may change. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self { + self.h2_builder.max_header_list_size = max; + self + } + /// Set the maximum buffer size for the connection. /// /// Default is ~400kb. @@ -719,23 +717,6 @@ impl Http { fallback: PhantomData, } } - - pub(super) fn serve(&self, incoming: I, make_service: S) -> Serve - where - I: Accept, - IE: Into>, - IO: AsyncRead + AsyncWrite + Unpin, - S: MakeServiceRef, - S::Error: Into>, - Bd: HttpBody, - E: ConnStreamExec<>::Future, Bd>, - { - Serve { - incoming, - make_service, - protocol: self.clone(), - } - } } // ===== impl Connection ===== @@ -788,6 +769,7 @@ where /// /// # Panics /// This method will panic if this connection is using an h2 protocol. + #[cfg_attr(feature = "deprecated", allow(deprecated))] pub fn into_parts(self) -> Parts { self.try_into_parts() .unwrap_or_else(|| panic!("h2 cannot into_inner")) @@ -796,6 +778,7 @@ where /// Return the inner IO object, and additional information, if available. /// /// This method will return a `None` if this connection is using an h2 protocol. + #[cfg_attr(feature = "deprecated", allow(deprecated))] pub fn try_into_parts(self) -> Option> { match self.conn.unwrap() { #[cfg(feature = "http1")] @@ -822,12 +805,7 @@ where /// upgrade. Once the upgrade is completed, the connection would be "done", /// but it is not desired to actually shutdown the IO object. Instead you /// would take it back using `into_parts`. - pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> - where - S: Unpin, - S::Future: Unpin, - B: Unpin, - { + pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> { loop { match *self.conn.as_mut().unwrap() { #[cfg(feature = "http1")] @@ -863,16 +841,17 @@ where /// # Error /// /// This errors if the underlying connection protocol is not HTTP/1. - pub fn without_shutdown(self) -> impl Future>> - where - S: Unpin, - S::Future: Unpin, - B: Unpin, - { + #[cfg_attr(feature = "deprecated", allow(deprecated))] + pub fn without_shutdown(self) -> impl Future>> { let mut conn = Some(self); futures_util::future::poll_fn(move |cx| { ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?; - Poll::Ready(conn.take().unwrap().try_into_parts().ok_or_else(crate::Error::new_without_shutdown_not_h1)) + Poll::Ready( + conn.take() + .unwrap() + .try_into_parts() + .ok_or_else(crate::Error::new_without_shutdown_not_h1), + ) }) } @@ -915,7 +894,7 @@ impl Future for Connection where S: HttpService, S::Error: Into>, - I: AsyncRead + AsyncWrite + Unpin + 'static, + I: AsyncRead + AsyncWrite + Unpin, B: HttpBody + 'static, B::Error: Into>, E: ConnStreamExec, @@ -987,141 +966,6 @@ impl Default for ConnectionMode { } } -// ===== impl Serve ===== - -#[cfg(any(feature = "http1", feature = "http2"))] -impl Serve { - /// Get a reference to the incoming stream. - #[inline] - pub(super) fn incoming_ref(&self) -> &I { - &self.incoming - } - - /* - /// Get a mutable reference to the incoming stream. - #[inline] - pub fn incoming_mut(&mut self) -> &mut I { - &mut self.incoming - } - */ - - /// Spawn all incoming connections onto the executor in `Http`. - pub(super) fn spawn_all(self) -> SpawnAll { - SpawnAll { serve: self } - } -} - -#[cfg(any(feature = "http1", feature = "http2"))] -impl Serve -where - I: Accept, - IO: AsyncRead + AsyncWrite + Unpin, - IE: Into>, - S: MakeServiceRef, - B: HttpBody, - E: ConnStreamExec<>::Future, B>, -{ - fn poll_next_( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll>>> { - let me = self.project(); - match ready!(me.make_service.poll_ready_ref(cx)) { - Ok(()) => (), - Err(e) => { - trace!("make_service closed"); - return Poll::Ready(Some(Err(crate::Error::new_user_make_service(e)))); - } - } - - if let Some(item) = ready!(me.incoming.poll_accept(cx)) { - let io = item.map_err(crate::Error::new_accept)?; - let new_fut = me.make_service.make_service_ref(&io); - Poll::Ready(Some(Ok(Connecting { - future: new_fut, - io: Some(io), - protocol: me.protocol.clone(), - }))) - } else { - Poll::Ready(None) - } - } -} - -// ===== impl Connecting ===== - -#[cfg(any(feature = "http1", feature = "http2"))] -impl Future for Connecting -where - I: AsyncRead + AsyncWrite + Unpin, - F: Future>, - S: HttpService, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec, -{ - type Output = Result, FE>; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - let mut me = self.project(); - let service = ready!(me.future.poll(cx))?; - let io = Option::take(&mut me.io).expect("polled after complete"); - Poll::Ready(Ok(me.protocol.serve_connection(io, service))) - } -} - -// ===== impl SpawnAll ===== - -#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))] -impl SpawnAll { - pub(super) fn local_addr(&self) -> SocketAddr { - self.serve.incoming.local_addr() - } -} - -#[cfg(any(feature = "http1", feature = "http2"))] -impl SpawnAll { - pub(super) fn incoming_ref(&self) -> &I { - self.serve.incoming_ref() - } -} - -#[cfg(any(feature = "http1", feature = "http2"))] -impl SpawnAll -where - I: Accept, - IE: Into>, - IO: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: MakeServiceRef, - B: HttpBody, - E: ConnStreamExec<>::Future, B>, -{ - pub(super) fn poll_watch( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - watcher: &W, - ) -> Poll> - where - E: NewSvcExec, - W: Watcher, - { - let mut me = self.project(); - loop { - if let Some(connecting) = ready!(me.serve.as_mut().poll_next_(cx)?) { - let fut = NewSvcTask::new(connecting, watcher.clone()); - me.serve - .as_mut() - .project() - .protocol - .exec - .execute_new_svc(fut); - } else { - return Poll::Ready(Ok(())); - } - } - } -} - // ===== impl ProtoServer ===== #[cfg(any(feature = "http1", feature = "http2"))] @@ -1151,150 +995,6 @@ where } } -#[cfg(any(feature = "http1", feature = "http2"))] -pub(crate) mod spawn_all { - use std::error::Error as StdError; - use tokio::io::{AsyncRead, AsyncWrite}; - use tracing::debug; - - use super::{Connecting, UpgradeableConnection}; - use crate::body::{Body, HttpBody}; - use crate::common::exec::ConnStreamExec; - use crate::common::{task, Future, Pin, Poll, Unpin}; - use crate::service::HttpService; - use pin_project_lite::pin_project; - - // Used by `SpawnAll` to optionally watch a `Connection` future. - // - // The regular `hyper::Server` just uses a `NoopWatcher`, which does - // not need to watch anything, and so returns the `Connection` untouched. - // - // The `Server::with_graceful_shutdown` needs to keep track of all active - // connections, and signal that they start to shutdown when prompted, so - // it has a `GracefulWatcher` implementation to do that. - pub trait Watcher, E>: Clone { - type Future: Future>; - - fn watch(&self, conn: UpgradeableConnection) -> Self::Future; - } - - #[allow(missing_debug_implementations)] - #[derive(Copy, Clone)] - pub struct NoopWatcher; - - impl Watcher for NoopWatcher - where - I: AsyncRead + AsyncWrite + Unpin + Send + 'static, - S: HttpService, - E: ConnStreamExec, - S::ResBody: 'static, - ::Error: Into>, - { - type Future = UpgradeableConnection; - - fn watch(&self, conn: UpgradeableConnection) -> Self::Future { - conn - } - } - - // This is a `Future` spawned to an `Executor` inside - // the `SpawnAll`. By being a nameable type, we can be generic over the - // user's `Service::Future`, and thus an `Executor` can execute it. - // - // Doing this allows for the server to conditionally require `Send` futures, - // depending on the `Executor` configured. - // - // Users cannot import this type, nor the associated `NewSvcExec`. Instead, - // a blanket implementation for `Executor` is sufficient. - - pin_project! { - #[allow(missing_debug_implementations)] - pub struct NewSvcTask, E, W: Watcher> { - #[pin] - state: State, - } - } - - pin_project! { - #[project = StateProj] - pub(super) enum State, E, W: Watcher> { - Connecting { - #[pin] - connecting: Connecting, - watcher: W, - }, - Connected { - #[pin] - future: W::Future, - }, - } - } - - impl, E, W: Watcher> NewSvcTask { - pub(super) fn new(connecting: Connecting, watcher: W) -> Self { - NewSvcTask { - state: State::Connecting { - connecting, - watcher, - }, - } - } - } - - impl Future for NewSvcTask - where - I: AsyncRead + AsyncWrite + Unpin + Send + 'static, - N: Future>, - NE: Into>, - S: HttpService, - B: HttpBody + 'static, - B::Error: Into>, - E: ConnStreamExec, - W: Watcher, - { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - // If it weren't for needing to name this type so the `Send` bounds - // could be projected to the `Serve` executor, this could just be - // an `async fn`, and much safer. Woe is me. - - let mut me = self.project(); - loop { - let next = { - match me.state.as_mut().project() { - StateProj::Connecting { - connecting, - watcher, - } => { - let res = ready!(connecting.poll(cx)); - let conn = match res { - Ok(conn) => conn, - Err(err) => { - let err = crate::Error::new_user_make_service(err); - debug!("connecting error: {}", err); - return Poll::Ready(()); - } - }; - let future = watcher.watch(conn.with_upgrades()); - State::Connected { future } - } - StateProj::Connected { future } => { - return future.poll(cx).map(|res| { - if let Err(err) = res { - debug!("connection error: {}", err); - } - }); - } - } - }; - - me.state.set(next); - } - } - } -} - #[cfg(any(feature = "http1", feature = "http2"))] mod upgrades { use super::*; diff --git a/src/server/conn/http1.rs b/src/server/conn/http1.rs new file mode 100644 index 0000000000..b2e54976e7 --- /dev/null +++ b/src/server/conn/http1.rs @@ -0,0 +1,446 @@ +//! HTTP/1 Server Connections + +use std::error::Error as StdError; +use std::fmt; +use std::time::Duration; + +use bytes::Bytes; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::common::{task, Future, Pin, Poll, Unpin}; +use crate::proto; +use crate::service::HttpService; + +type Http1Dispatcher = proto::h1::Dispatcher< + proto::h1::dispatch::Server, + B, + T, + proto::ServerTransaction, +>; + +pin_project_lite::pin_project! { + /// A future binding an http1 connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + pub struct Connection + where + S: HttpService, + { + conn: Http1Dispatcher, + } +} + +/// A configuration builder for HTTP/1 server connections. +#[derive(Clone, Debug)] +pub struct Builder { + h1_half_close: bool, + h1_keep_alive: bool, + h1_title_case_headers: bool, + h1_preserve_header_case: bool, + h1_header_read_timeout: Option, + h1_writev: Option, + max_buf_size: Option, + pipeline_flush: bool, +} + +/// Deconstructed parts of a `Connection`. +/// +/// This allows taking apart a `Connection` at a later time, in order to +/// reclaim the IO object, and additional related pieces. +#[derive(Debug)] +pub struct Parts { + /// The original IO object used in the handshake. + pub io: T, + /// A buffer of bytes that have been read but not processed as HTTP. + /// + /// If the client sent additional bytes after its last request, and + /// this connection "ended" with an upgrade, the read buffer will contain + /// those bytes. + /// + /// You will want to check for any existing bytes if you plan to continue + /// communicating on the IO object. + pub read_buf: Bytes, + /// The `Service` used to serve this connection. + pub service: S, + _inner: (), +} + +// ===== impl Connection ===== + +impl fmt::Debug for Connection +where + S: HttpService, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + /// + /// # Note + /// + /// This should only be called while the `Connection` future is still + /// pending. If called after `Connection::poll` has resolved, this does + /// nothing. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + self.conn.disable_keep_alive(); + } + + /// Return the inner IO object, and additional information. + /// + /// If the IO object has been "rewound" the io will not contain those bytes rewound. + /// This should only be called after `poll_without_shutdown` signals + /// that the connection is "done". Otherwise, it may not have finished + /// flushing all necessary HTTP bytes. + /// + /// # Panics + /// This method will panic if this connection is using an h2 protocol. + pub fn into_parts(self) -> Parts { + let (io, read_buf, dispatch) = self.conn.into_inner(); + Parts { + io, + read_buf, + service: dispatch.into_service(), + _inner: (), + } + } + + /// Poll the connection for completion, but without calling `shutdown` + /// on the underlying IO. + /// + /// This is useful to allow running a connection while doing an HTTP + /// upgrade. Once the upgrade is completed, the connection would be "done", + /// but it is not desired to actually shutdown the IO object. Instead you + /// would take it back using `into_parts`. + pub fn poll_without_shutdown(&mut self, cx: &mut task::Context<'_>) -> Poll> + where + S: Unpin, + S::Future: Unpin, + B: Unpin, + { + self.conn.poll_without_shutdown(cx) + } + + /// Prevent shutdown of the underlying IO object at the end of service the request, + /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`. + /// + /// # Error + /// + /// This errors if the underlying connection protocol is not HTTP/1. + pub fn without_shutdown(self) -> impl Future>> + where + S: Unpin, + S::Future: Unpin, + B: Unpin, + { + let mut zelf = Some(self); + futures_util::future::poll_fn(move |cx| { + ready!(zelf.as_mut().unwrap().conn.poll_without_shutdown(cx))?; + Poll::Ready(Ok(zelf.take().unwrap().into_parts())) + }) + } + + /// Enable this connection to support higher-level HTTP upgrades. + /// + /// See [the `upgrade` module](crate::upgrade) for more. + pub fn with_upgrades(self) -> upgrades::UpgradeableConnection + where + I: Send, + { + upgrades::UpgradeableConnection { inner: Some(self) } + } +} + +impl Future for Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.conn).poll(cx)) { + Ok(done) => { + match done { + proto::Dispatched::Shutdown => {} + proto::Dispatched::Upgrade(pending) => { + // With no `Send` bound on `I`, we can't try to do + // upgrades here. In case a user was trying to use + // `Body::on_upgrade` with this API, send a special + // error letting them know about that. + pending.manual(); + } + }; + return Poll::Ready(Ok(())); + } + Err(e) => Poll::Ready(Err(e)), + } + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Create a new connection builder. + pub fn new() -> Self { + Self { + h1_half_close: false, + h1_keep_alive: true, + h1_title_case_headers: false, + h1_preserve_header_case: false, + h1_header_read_timeout: None, + h1_writev: None, + max_buf_size: None, + pipeline_flush: false, + } + } + /// Set whether HTTP/1 connections should support half-closures. + /// + /// Clients can chose to shutdown their write-side while waiting + /// for the server to respond. Setting this to `true` will + /// prevent closing the connection immediately if `read` + /// detects an EOF in the middle of a request. + /// + /// Default is `false`. + pub fn half_close(&mut self, val: bool) -> &mut Self { + self.h1_half_close = val; + self + } + + /// Enables or disables HTTP/1 keep-alive. + /// + /// Default is true. + pub fn keep_alive(&mut self, val: bool) -> &mut Self { + self.h1_keep_alive = val; + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Default is false. + pub fn title_case_headers(&mut self, enabled: bool) -> &mut Self { + self.h1_title_case_headers = enabled; + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Request`. It will also look for and use + /// such an extension in any provided `Response`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Default is false. + pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Self { + self.h1_preserve_header_case = enabled; + self + } + + /// Set a timeout for reading client request headers. If a client does not + /// transmit the entire header within this time, the connection is closed. + /// + /// Default is None. + pub fn header_read_timeout(&mut self, read_timeout: Duration) -> &mut Self { + self.h1_header_read_timeout = Some(read_timeout); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + pub fn writev(&mut self, val: bool) -> &mut Self { + self.h1_writev = Some(val); + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + pub fn max_buf_size(&mut self, max: usize) -> &mut Self { + assert!( + max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE, + "the max_buf_size cannot be smaller than the minimum that h1 specifies." + ); + self.max_buf_size = Some(max); + self + } + + /// Aggregates flushes to better support pipelined responses. + /// + /// Experimental, may have bugs. + /// + /// Default is false. + pub fn pipeline_flush(&mut self, enabled: bool) -> &mut Self { + self.pipeline_flush = enabled; + self + } + + // /// Set the timer used in background tasks. + // pub fn timer(&mut self, timer: M) -> &mut Self + // where + // M: Timer + Send + Sync + 'static, + // { + // self.timer = Time::Timer(Arc::new(timer)); + // self + // } + + /// Bind a connection together with a [`Service`](crate::service::Service). + /// + /// This returns a Future that must be polled in order for HTTP to be + /// driven on the connection. + /// + /// # Example + /// + /// ``` + /// # use hyper::{Body as Incoming, Request, Response}; + /// # use hyper::service::Service; + /// # use hyper::server::conn::http1::Builder; + /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # async fn run(some_io: I, some_service: S) + /// # where + /// # I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + /// # S: Service, Response=hyper::Response> + Send + 'static, + /// # S::Error: Into>, + /// # S::Future: Send, + /// # { + /// let http = Builder::new(); + /// let conn = http.serve_connection(some_io, some_service); + /// + /// if let Err(e) = conn.await { + /// eprintln!("server connection error: {}", e); + /// } + /// # } + /// # fn main() {} + /// ``` + pub fn serve_connection(&self, io: I, service: S) -> Connection + where + S: HttpService, + S::Error: Into>, + S::ResBody: 'static, + ::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + { + let mut conn = proto::Conn::new(io); + if !self.h1_keep_alive { + conn.disable_keep_alive(); + } + if self.h1_half_close { + conn.set_allow_half_close(); + } + if self.h1_title_case_headers { + conn.set_title_case_headers(); + } + if self.h1_preserve_header_case { + conn.set_preserve_header_case(); + } + if let Some(header_read_timeout) = self.h1_header_read_timeout { + conn.set_http1_header_read_timeout(header_read_timeout); + } + if let Some(writev) = self.h1_writev { + if writev { + conn.set_write_strategy_queue(); + } else { + conn.set_write_strategy_flatten(); + } + } + conn.set_flush_pipeline(self.pipeline_flush); + if let Some(max) = self.max_buf_size { + conn.set_max_buf_size(max); + } + let sd = proto::h1::dispatch::Server::new(service); + let proto = proto::h1::Dispatcher::new(sd, conn); + Connection { conn: proto } + } +} + +mod upgrades { + use crate::upgrade::Upgraded; + + use super::*; + + // A future binding a connection with a Service with Upgrade support. + // + // This type is unnameable outside the crate. + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + pub struct UpgradeableConnection + where + S: HttpService, + { + pub(super) inner: Option>, + } + + impl UpgradeableConnection + where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, + { + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + Pin::new(self.inner.as_mut().unwrap()).graceful_shutdown() + } + } + + impl Future for UpgradeableConnection + where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + B: Body + 'static, + B::Error: Into>, + { + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.inner.as_mut().unwrap().conn).poll(cx)) { + Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())), + Ok(proto::Dispatched::Upgrade(pending)) => { + let (io, buf, _) = self.inner.take().unwrap().conn.into_inner(); + pending.fulfill(Upgraded::new(io, buf)); + Poll::Ready(Ok(())) + } + Err(e) => Poll::Ready(Err(e)), + } + } + } +} diff --git a/src/server/conn/http2.rs b/src/server/conn/http2.rs new file mode 100644 index 0000000000..978c646e10 --- /dev/null +++ b/src/server/conn/http2.rs @@ -0,0 +1,257 @@ +//! HTTP/2 Server Connections + +use std::error::Error as StdError; +use std::fmt; +use std::time::Duration; + +use pin_project_lite::pin_project; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::body::{Body as IncomingBody, HttpBody as Body}; +use crate::common::exec::ConnStreamExec; +use crate::common::{task, Future, Pin, Poll, Unpin}; +use crate::proto; +use crate::service::HttpService; + +pin_project! { + /// A future binding an HTTP/2 connection with a Service. + /// + /// Polling this future will drive HTTP forward. + #[must_use = "futures do nothing unless polled"] + pub struct Connection + where + S: HttpService, + { + conn: proto::h2::Server, + } +} + +/// A configuration builder for HTTP/2 server connections. +#[derive(Clone, Debug)] +pub struct Builder { + exec: E, + h2_builder: proto::h2::server::Config, +} + +// ===== impl Connection ===== + +impl fmt::Debug for Connection +where + S: HttpService, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Connection").finish() + } +} + +impl Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + B: Body + 'static, + B::Error: Into>, + E: ConnStreamExec, +{ + /// Start a graceful shutdown process for this connection. + /// + /// This `Connection` should continue to be polled until shutdown + /// can finish. + /// + /// # Note + /// + /// This should only be called while the `Connection` future is still + /// pending. If called after `Connection::poll` has resolved, this does + /// nothing. + pub fn graceful_shutdown(mut self: Pin<&mut Self>) { + self.conn.graceful_shutdown(); + } +} + +impl Future for Connection +where + S: HttpService, + S::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin + 'static, + B: Body + 'static, + B::Error: Into>, + E: ConnStreamExec, +{ + type Output = crate::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match ready!(Pin::new(&mut self.conn).poll(cx)) { + Ok(_done) => { + //TODO: the proto::h2::Server no longer needs to return + //the Dispatched enum + Poll::Ready(Ok(())) + } + Err(e) => Poll::Ready(Err(e)), + } + } +} + +// ===== impl Builder ===== + +impl Builder { + /// Create a new connection builder. + /// + /// This starts with the default options, and an executor. + pub fn new(exec: E) -> Self { + Self { + exec: exec, + h2_builder: Default::default(), + } + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + pub fn initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_stream_window_size = sz; + } + self + } + + /// Sets the max connection-level flow control for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn initial_connection_window_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.adaptive_window = false; + self.h2_builder.initial_conn_window_size = sz; + } + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `initial_stream_window_size` and + /// `initial_connection_window_size`. + pub fn adaptive_window(&mut self, enabled: bool) -> &mut Self { + use proto::h2::SPEC_WINDOW_SIZE; + + self.h2_builder.adaptive_window = enabled; + if enabled { + self.h2_builder.initial_conn_window_size = SPEC_WINDOW_SIZE; + self.h2_builder.initial_stream_window_size = SPEC_WINDOW_SIZE; + } + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + pub fn max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + if let Some(sz) = sz.into() { + self.h2_builder.max_frame_size = sz; + } + self + } + + /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 + /// connections. + /// + /// Default is no limit (`std::u32::MAX`). Passing `None` will do nothing. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_MAX_CONCURRENT_STREAMS + pub fn max_concurrent_streams(&mut self, max: impl Into>) -> &mut Self { + self.h2_builder.max_concurrent_streams = max.into(); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// # Cargo Feature + /// + pub fn keep_alive_interval(&mut self, interval: impl Into>) -> &mut Self { + self.h2_builder.keep_alive_interval = interval.into(); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + pub fn keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.h2_builder.keep_alive_timeout = timeout; + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently ~400KB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + pub fn max_send_buf_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.h2_builder.max_send_buffer_size = max; + self + } + + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + pub fn enable_connect_protocol(&mut self) -> &mut Self { + self.h2_builder.enable_connect_protocol = true; + self + } + + /// Sets the max size of received header frames. + /// + /// Default is currently ~16MB, but may change. + pub fn max_header_list_size(&mut self, max: u32) -> &mut Self { + self.h2_builder.max_header_list_size = max; + self + } + + // /// Set the timer used in background tasks. + // pub fn timer(&mut self, timer: M) -> &mut Self + // where + // M: Timer + Send + Sync + 'static, + // { + // self.timer = Time::Timer(Arc::new(timer)); + // self + // } + + /// Bind a connection together with a [`Service`](crate::service::Service). + /// + /// This returns a Future that must be polled in order for HTTP to be + /// driven on the connection. + pub fn serve_connection(&self, io: I, service: S) -> Connection + where + S: HttpService, + S::Error: Into>, + Bd: Body + 'static, + Bd::Error: Into>, + I: AsyncRead + AsyncWrite + Unpin, + E: ConnStreamExec, + { + let proto = proto::h2::Server::new(io, service, &self.h2_builder, self.exec.clone()); + Connection { conn: proto } + } +} diff --git a/src/server/mod.rs b/src/server/mod.rs index a97944f518..65eb7063e5 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -91,6 +91,7 @@ //! use std::net::SocketAddr; //! use hyper::{Body, Request, Response, Server}; //! use hyper::service::{make_service_fn, service_fn}; +//! # #[cfg(feature = "runtime")] //! use hyper::server::conn::AddrStream; //! //! #[derive(Clone)] @@ -149,7 +150,6 @@ pub mod accept; pub mod conn; -mod server; #[cfg(feature = "tcp")] mod tcp; @@ -158,7 +158,16 @@ pub use self::server::Server; cfg_feature! { #![any(feature = "http1", feature = "http2")] + #[cfg_attr(feature = "deprecated", allow(deprecated))] + pub(crate) mod server; pub use self::server::Builder; mod shutdown; } + +cfg_feature! { + #![not(any(feature = "http1", feature = "http2"))] + + mod server_stub; + use server_stub as server; +} diff --git a/src/server/server.rs b/src/server/server.rs index c48582c7fd..c90eac3e53 100644 --- a/src/server/server.rs +++ b/src/server/server.rs @@ -1,33 +1,31 @@ +use std::error::Error as StdError; use std::fmt; #[cfg(feature = "tcp")] use std::net::{SocketAddr, TcpListener as StdTcpListener}; -#[cfg(any(feature = "tcp", feature = "http1"))] -use std::time::Duration; -#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))] -use super::tcp::AddrIncoming; -use crate::common::exec::Exec; +#[cfg(feature = "tcp")] +use std::time::Duration; -cfg_feature! { - #![any(feature = "http1", feature = "http2")] +use pin_project_lite::pin_project; - use std::error::Error as StdError; +use tokio::io::{AsyncRead, AsyncWrite}; +use tracing::trace; - use pin_project_lite::pin_project; - use tokio::io::{AsyncRead, AsyncWrite}; +use super::accept::Accept; +#[cfg(all(feature = "tcp"))] +use super::tcp::AddrIncoming; +use crate::body::{Body, HttpBody}; +use crate::common::exec::Exec; +use crate::common::exec::{ConnStreamExec, NewSvcExec}; +use crate::common::{task, Future, Pin, Poll, Unpin}; +// Renamed `Http` as `Http_` for now so that people upgrading don't see an +// error that `hyper::server::Http` is private... +use super::conn::{Connection, Http as Http_, UpgradeableConnection}; +use super::shutdown::{Graceful, GracefulWatcher}; +use crate::service::{HttpService, MakeServiceRef}; - use super::accept::Accept; - use crate::body::{Body, HttpBody}; - use crate::common::{task, Future, Pin, Poll, Unpin}; - use crate::common::exec::{ConnStreamExec, NewSvcExec}; - // Renamed `Http` as `Http_` for now so that people upgrading don't see an - // error that `hyper::server::Http` is private... - use super::conn::{Http as Http_, NoopWatcher, SpawnAll}; - use super::shutdown::{Graceful, GracefulWatcher}; - use crate::service::{HttpService, MakeServiceRef}; -} +use self::new_svc::NewSvcTask; -#[cfg(any(feature = "http1", feature = "http2"))] pin_project! { /// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. /// @@ -37,21 +35,14 @@ pin_project! { /// `Executor`. pub struct Server { #[pin] - spawn_all: SpawnAll, + incoming: I, + make_service: S, + protocol: Http_, } } -/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. -/// -/// Needs at least one of the `http1` and `http2` features to be activated to actually be useful. -#[cfg(not(any(feature = "http1", feature = "http2")))] -pub struct Server { - _marker: std::marker::PhantomData<(I, S, E)>, -} - /// A builder for a [`Server`](Server). #[derive(Debug)] -#[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Builder { incoming: I, @@ -60,7 +51,6 @@ pub struct Builder { // ===== impl Server ===== -#[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] impl Server { /// Starts a [`Builder`](Builder) with the provided incoming stream. @@ -72,47 +62,48 @@ impl Server { } } -cfg_feature! { - #![all(feature = "tcp", any(feature = "http1", feature = "http2"))] - - impl Server { - /// Binds to the provided address, and returns a [`Builder`](Builder). - /// - /// # Panics - /// - /// This method will panic if binding to the address fails. For a method - /// to bind to an address and return a `Result`, see `Server::try_bind`. - pub fn bind(addr: &SocketAddr) -> Builder { - let incoming = AddrIncoming::new(addr).unwrap_or_else(|e| { - panic!("error binding to {}: {}", addr, e); - }); - Server::builder(incoming) - } +#[cfg(feature = "tcp")] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))) +)] +impl Server { + /// Binds to the provided address, and returns a [`Builder`](Builder). + /// + /// # Panics + /// + /// This method will panic if binding to the address fails. For a method + /// to bind to an address and return a `Result`, see `Server::try_bind`. + pub fn bind(addr: &SocketAddr) -> Builder { + let incoming = AddrIncoming::new(addr).unwrap_or_else(|e| { + panic!("error binding to {}: {}", addr, e); + }); + Server::builder(incoming) + } - /// Tries to bind to the provided address, and returns a [`Builder`](Builder). - pub fn try_bind(addr: &SocketAddr) -> crate::Result> { - AddrIncoming::new(addr).map(Server::builder) - } + /// Tries to bind to the provided address, and returns a [`Builder`](Builder). + pub fn try_bind(addr: &SocketAddr) -> crate::Result> { + AddrIncoming::new(addr).map(Server::builder) + } - /// Create a new instance from a `std::net::TcpListener` instance. - pub fn from_tcp(listener: StdTcpListener) -> Result, crate::Error> { - AddrIncoming::from_std(listener).map(Server::builder) - } + /// Create a new instance from a `std::net::TcpListener` instance. + pub fn from_tcp(listener: StdTcpListener) -> Result, crate::Error> { + AddrIncoming::from_std(listener).map(Server::builder) } } -cfg_feature! { - #![all(feature = "tcp", any(feature = "http1", feature = "http2"))] - - impl Server { - /// Returns the local address that this server is bound to. - pub fn local_addr(&self) -> SocketAddr { - self.spawn_all.local_addr() - } +#[cfg(feature = "tcp")] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))) +)] +impl Server { + /// Returns the local address that this server is bound to. + pub fn local_addr(&self) -> SocketAddr { + self.incoming.local_addr() } } -#[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] impl Server where @@ -124,7 +115,6 @@ where B: HttpBody + 'static, B::Error: Into>, E: ConnStreamExec<>::Future, B>, - E: NewSvcExec, { /// Prepares a server to handle graceful shutdown when the provided future /// completes. @@ -165,12 +155,57 @@ where pub fn with_graceful_shutdown(self, signal: F) -> Graceful where F: Future, + E: NewSvcExec, { - Graceful::new(self.spawn_all, signal) + Graceful::new(self, signal) + } + + fn poll_next_( + self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + ) -> Poll>>> { + let me = self.project(); + match ready!(me.make_service.poll_ready_ref(cx)) { + Ok(()) => (), + Err(e) => { + trace!("make_service closed"); + return Poll::Ready(Some(Err(crate::Error::new_user_make_service(e)))); + } + } + + if let Some(item) = ready!(me.incoming.poll_accept(cx)) { + let io = item.map_err(crate::Error::new_accept)?; + let new_fut = me.make_service.make_service_ref(&io); + Poll::Ready(Some(Ok(Connecting { + future: new_fut, + io: Some(io), + protocol: me.protocol.clone(), + }))) + } else { + Poll::Ready(None) + } + } + + pub(super) fn poll_watch( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + watcher: &W, + ) -> Poll> + where + E: NewSvcExec, + W: Watcher, + { + loop { + if let Some(connecting) = ready!(self.as_mut().poll_next_(cx)?) { + let fut = NewSvcTask::new(connecting, watcher.clone()); + self.as_mut().project().protocol.exec.execute_new_svc(fut); + } else { + return Poll::Ready(Ok(())); + } + } } } -#[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] impl Future for Server where @@ -187,22 +222,20 @@ where type Output = crate::Result<()>; fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.project().spawn_all.poll_watch(cx, &NoopWatcher) + self.poll_watch(cx, &NoopWatcher) } } impl fmt::Debug for Server { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut st = f.debug_struct("Server"); - #[cfg(any(feature = "http1", feature = "http2"))] - st.field("listener", &self.spawn_all.incoming_ref()); + st.field("listener", &self.incoming); st.finish() } } // ===== impl Builder ===== -#[cfg(any(feature = "http1", feature = "http2"))] #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] impl Builder { /// Start a new builder, wrapping an incoming stream and low-level options. @@ -309,7 +342,7 @@ impl Builder { self } - /// Set a timeout for reading client request headers. If a client does not + /// Set a timeout for reading client request headers. If a client does not /// transmit the entire header within this time, the connection is closed. /// /// Default is None. @@ -340,6 +373,18 @@ impl Builder { self } + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to whatever the default in h2 is. As of v0.3.17, it is 20. + /// + /// See for more information. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_pending_accept_reset_streams(mut self, max: impl Into>) -> Self { + self.protocol.http2_max_pending_accept_reset_streams(max); + self + } + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// @@ -392,6 +437,16 @@ impl Builder { self } + /// Sets the max size of received header frames. + /// + /// Default is currently ~16MB, but may change. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_header_list_size(mut self, max: u32) -> Self { + self.protocol.http2_max_header_list_size(max); + self + } + /// Sets the [`SETTINGS_MAX_CONCURRENT_STREAMS`][spec] option for HTTP2 /// connections. /// @@ -502,7 +557,7 @@ impl Builder { /// } /// # } /// ``` - pub fn serve(self, new_service: S) -> Server + pub fn serve(self, make_service: S) -> Server where I: Accept, I::Error: Into>, @@ -514,24 +569,41 @@ impl Builder { E: NewSvcExec, E: ConnStreamExec<>::Future, B>, { - let serve = self.protocol.serve(self.incoming, new_service); - let spawn_all = serve.spawn_all(); - Server { spawn_all } + Server { + incoming: self.incoming, + make_service, + protocol: self.protocol.clone(), + } } } -#[cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))] +#[cfg(feature = "tcp")] +#[cfg_attr( + docsrs, + doc(cfg(all(feature = "tcp", any(feature = "http1", feature = "http2")))) +)] impl Builder { - /// Set whether TCP keepalive messages are enabled on accepted connections. + /// Set the duration to remain idle before sending TCP keepalive probes. /// - /// If `None` is specified, keepalive is disabled, otherwise the duration - /// specified will be the time to remain idle before sending TCP keepalive - /// probes. + /// If `None` is specified, keepalive is disabled. pub fn tcp_keepalive(mut self, keepalive: Option) -> Self { self.incoming.set_keepalive(keepalive); self } + /// Set the duration between two successive TCP keepalive retransmissions, + /// if acknowledgement to the previous keepalive transmission is not received. + pub fn tcp_keepalive_interval(mut self, interval: Option) -> Self { + self.incoming.set_keepalive_interval(interval); + self + } + + /// Set the number of retransmissions to be carried out before declaring that remote end is not available. + pub fn tcp_keepalive_retries(mut self, retries: Option) -> Self { + self.incoming.set_keepalive_retries(retries); + self + } + /// Set the value of `TCP_NODELAY` option for accepted connections. pub fn tcp_nodelay(mut self, enabled: bool) -> Self { self.incoming.set_nodelay(enabled); @@ -558,3 +630,182 @@ impl Builder { self } } + +// Used by `Server` to optionally watch a `Connection` future. +// +// The regular `hyper::Server` just uses a `NoopWatcher`, which does +// not need to watch anything, and so returns the `Connection` untouched. +// +// The `Server::with_graceful_shutdown` needs to keep track of all active +// connections, and signal that they start to shutdown when prompted, so +// it has a `GracefulWatcher` implementation to do that. +pub trait Watcher, E>: Clone { + type Future: Future>; + + fn watch(&self, conn: UpgradeableConnection) -> Self::Future; +} + +#[allow(missing_debug_implementations)] +#[derive(Copy, Clone)] +pub struct NoopWatcher; + +impl Watcher for NoopWatcher +where + I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + S: HttpService, + E: ConnStreamExec, + S::ResBody: 'static, + ::Error: Into>, +{ + type Future = UpgradeableConnection; + + fn watch(&self, conn: UpgradeableConnection) -> Self::Future { + conn + } +} + +// used by exec.rs +pub(crate) mod new_svc { + use std::error::Error as StdError; + use tokio::io::{AsyncRead, AsyncWrite}; + use tracing::debug; + + use super::{Connecting, Watcher}; + use crate::body::{Body, HttpBody}; + use crate::common::exec::ConnStreamExec; + use crate::common::{task, Future, Pin, Poll, Unpin}; + use crate::service::HttpService; + use pin_project_lite::pin_project; + + // This is a `Future` spawned to an `Executor` inside + // the `Server`. By being a nameable type, we can be generic over the + // user's `Service::Future`, and thus an `Executor` can execute it. + // + // Doing this allows for the server to conditionally require `Send` futures, + // depending on the `Executor` configured. + // + // Users cannot import this type, nor the associated `NewSvcExec`. Instead, + // a blanket implementation for `Executor` is sufficient. + + pin_project! { + #[allow(missing_debug_implementations)] + pub struct NewSvcTask, E, W: Watcher> { + #[pin] + state: State, + } + } + + pin_project! { + #[project = StateProj] + pub(super) enum State, E, W: Watcher> { + Connecting { + #[pin] + connecting: Connecting, + watcher: W, + }, + Connected { + #[pin] + future: W::Future, + }, + } + } + + impl, E, W: Watcher> NewSvcTask { + pub(super) fn new(connecting: Connecting, watcher: W) -> Self { + NewSvcTask { + state: State::Connecting { + connecting, + watcher, + }, + } + } + } + + impl Future for NewSvcTask + where + I: AsyncRead + AsyncWrite + Unpin + Send + 'static, + N: Future>, + NE: Into>, + S: HttpService, + B: HttpBody + 'static, + B::Error: Into>, + E: ConnStreamExec, + W: Watcher, + { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + // If it weren't for needing to name this type so the `Send` bounds + // could be projected to the `Serve` executor, this could just be + // an `async fn`, and much safer. Woe is me. + + let mut me = self.project(); + loop { + let next = { + match me.state.as_mut().project() { + StateProj::Connecting { + connecting, + watcher, + } => { + let res = ready!(connecting.poll(cx)); + let conn = match res { + Ok(conn) => conn, + Err(err) => { + let err = crate::Error::new_user_make_service(err); + debug!("connecting error: {}", err); + return Poll::Ready(()); + } + }; + let future = watcher.watch(conn.with_upgrades()); + State::Connected { future } + } + StateProj::Connected { future } => { + return future.poll(cx).map(|res| { + if let Err(err) = res { + debug!("connection error: {}", err); + } + }); + } + } + }; + + me.state.set(next); + } + } + } +} + +pin_project! { + /// A future building a new `Service` to a `Connection`. + /// + /// Wraps the future returned from `MakeService` into one that returns + /// a `Connection`. + #[must_use = "futures do nothing unless polled"] + #[derive(Debug)] + #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] + pub struct Connecting { + #[pin] + future: F, + io: Option, + protocol: Http_, + } +} + +impl Future for Connecting +where + I: AsyncRead + AsyncWrite + Unpin, + F: Future>, + S: HttpService, + B: HttpBody + 'static, + B::Error: Into>, + E: ConnStreamExec, +{ + type Output = Result, FE>; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + let mut me = self.project(); + let service = ready!(me.future.poll(cx))?; + let io = Option::take(&mut me.io).expect("polled after complete"); + Poll::Ready(Ok(me.protocol.serve_connection(io, service))) + } +} diff --git a/src/server/server_stub.rs b/src/server/server_stub.rs new file mode 100644 index 0000000000..87b1f5131f --- /dev/null +++ b/src/server/server_stub.rs @@ -0,0 +1,16 @@ +use std::fmt; + +use crate::common::exec::Exec; + +/// A listening HTTP server that accepts connections in both HTTP1 and HTTP2 by default. +/// +/// Needs at least one of the `http1` and `http2` features to be activated to actually be useful. +pub struct Server { + _marker: std::marker::PhantomData<(I, S, E)>, +} + +impl fmt::Debug for Server { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Server").finish() + } +} diff --git a/src/server/shutdown.rs b/src/server/shutdown.rs index 2277a40964..96937d0827 100644 --- a/src/server/shutdown.rs +++ b/src/server/shutdown.rs @@ -5,7 +5,8 @@ use tokio::io::{AsyncRead, AsyncWrite}; use tracing::debug; use super::accept::Accept; -use super::conn::{SpawnAll, UpgradeableConnection, Watcher}; +use super::conn::UpgradeableConnection; +use super::server::{Server, Watcher}; use crate::body::{Body, HttpBody}; use crate::common::drain::{self, Draining, Signal, Watch, Watching}; use crate::common::exec::{ConnStreamExec, NewSvcExec}; @@ -26,7 +27,7 @@ pin_project! { Running { drain: Option<(Signal, Watch)>, #[pin] - spawn_all: SpawnAll, + server: Server, #[pin] signal: F, }, @@ -35,12 +36,12 @@ pin_project! { } impl Graceful { - pub(super) fn new(spawn_all: SpawnAll, signal: F) -> Self { + pub(super) fn new(server: Server, signal: F) -> Self { let drain = Some(drain::channel()); Graceful { state: State::Running { drain, - spawn_all, + server, signal, }, } @@ -69,7 +70,7 @@ where match me.state.as_mut().project() { StateProj::Running { drain, - spawn_all, + server, signal, } => match signal.poll(cx) { Poll::Ready(()) => { @@ -81,7 +82,7 @@ where } Poll::Pending => { let watch = drain.as_ref().expect("drain channel").1.clone(); - return spawn_all.poll_watch(cx, &GracefulWatcher(watch)); + return server.poll_watch(cx, &GracefulWatcher(watch)); } }, StateProj::Draining { ref mut draining } => { diff --git a/src/server/tcp.rs b/src/server/tcp.rs index 013bdaea1d..3f937154be 100644 --- a/src/server/tcp.rs +++ b/src/server/tcp.rs @@ -2,6 +2,7 @@ use std::fmt; use std::io; use std::net::{SocketAddr, TcpListener as StdTcpListener}; use std::time::Duration; +use socket2::TcpKeepalive; use tokio::net::TcpListener; use tokio::time::Sleep; @@ -13,13 +14,103 @@ use crate::common::{task, Future, Pin, Poll}; pub use self::addr_stream::AddrStream; use super::accept::Accept; +#[derive(Default, Debug, Clone, Copy)] +struct TcpKeepaliveConfig { + time: Option, + interval: Option, + retries: Option, +} + +impl TcpKeepaliveConfig { + /// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration. + fn into_socket2(self) -> Option { + let mut dirty = false; + let mut ka = TcpKeepalive::new(); + if let Some(time) = self.time { + ka = ka.with_time(time); + dirty = true + } + if let Some(interval) = self.interval { + ka = Self::ka_with_interval(ka, interval, &mut dirty) + }; + if let Some(retries) = self.retries { + ka = Self::ka_with_retries(ka, retries, &mut dirty) + }; + if dirty { + Some(ka) + } else { + None + } + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + windows, + ))] + fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive { + *dirty = true; + ka.with_interval(interval) + } + + #[cfg(not(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + windows, + )))] + fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive { + ka // no-op as keepalive interval is not supported on this platform + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ))] + fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive { + *dirty = true; + ka.with_retries(retries) + } + + #[cfg(not(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + )))] + fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive { + ka // no-op as keepalive retries is not supported on this platform + } +} + /// A stream of connections from binding to an address. #[must_use = "streams do nothing unless polled"] pub struct AddrIncoming { addr: SocketAddr, listener: TcpListener, sleep_on_errors: bool, - tcp_keepalive_timeout: Option, + tcp_keepalive_config: TcpKeepaliveConfig, tcp_nodelay: bool, timeout: Option>>, } @@ -52,7 +143,7 @@ impl AddrIncoming { listener, addr, sleep_on_errors: true, - tcp_keepalive_timeout: None, + tcp_keepalive_config: TcpKeepaliveConfig::default(), tcp_nodelay: false, timeout: None, }) @@ -63,13 +154,24 @@ impl AddrIncoming { self.addr } - /// Set whether TCP keepalive messages are enabled on accepted connections. + /// Set the duration to remain idle before sending TCP keepalive probes. /// - /// If `None` is specified, keepalive is disabled, otherwise the duration - /// specified will be the time to remain idle before sending TCP keepalive - /// probes. - pub fn set_keepalive(&mut self, keepalive: Option) -> &mut Self { - self.tcp_keepalive_timeout = keepalive; + /// If `None` is specified, keepalive is disabled. + pub fn set_keepalive(&mut self, time: Option) -> &mut Self { + self.tcp_keepalive_config.time = time; + self + } + + /// Set the duration between two successive TCP keepalive retransmissions, + /// if acknowledgement to the previous keepalive transmission is not received. + pub fn set_keepalive_interval(&mut self, interval: Option) -> &mut Self { + self.tcp_keepalive_config.interval = interval; + self + } + + /// Set the number of retransmissions to be carried out before declaring that remote end is not available. + pub fn set_keepalive_retries(&mut self, retries: Option) -> &mut Self { + self.tcp_keepalive_config.retries = retries; self } @@ -107,18 +209,18 @@ impl AddrIncoming { loop { match ready!(self.listener.poll_accept(cx)) { - Ok((socket, addr)) => { - if let Some(dur) = self.tcp_keepalive_timeout { - let socket = socket2::SockRef::from(&socket); - let conf = socket2::TcpKeepalive::new().with_time(dur); - if let Err(e) = socket.set_tcp_keepalive(&conf) { + Ok((socket, remote_addr)) => { + if let Some(tcp_keepalive) = &self.tcp_keepalive_config.into_socket2() { + let sock_ref = socket2::SockRef::from(&socket); + if let Err(e) = sock_ref.set_tcp_keepalive(tcp_keepalive) { trace!("error trying to set TCP keepalive: {}", e); } } if let Err(e) = socket.set_nodelay(self.tcp_nodelay) { trace!("error trying to set TCP nodelay: {}", e); } - return Poll::Ready(Ok(AddrStream::new(socket, addr))); + let local_addr = socket.local_addr()?; + return Poll::Ready(Ok(AddrStream::new(socket, remote_addr, local_addr))); } Err(e) => { // Connection errors can be ignored directly, continue by @@ -174,9 +276,12 @@ impl Accept for AddrIncoming { /// The timeout is useful to handle resource exhaustion errors like ENFILE /// and EMFILE. Otherwise, could enter into tight loop. fn is_connection_error(e: &io::Error) -> bool { - matches!(e.kind(), io::ErrorKind::ConnectionRefused - | io::ErrorKind::ConnectionAborted - | io::ErrorKind::ConnectionReset) + matches!( + e.kind(), + io::ErrorKind::ConnectionRefused + | io::ErrorKind::ConnectionAborted + | io::ErrorKind::ConnectionReset + ) } impl fmt::Debug for AddrIncoming { @@ -184,7 +289,7 @@ impl fmt::Debug for AddrIncoming { f.debug_struct("AddrIncoming") .field("addr", &self.addr) .field("sleep_on_errors", &self.sleep_on_errors) - .field("tcp_keepalive_timeout", &self.tcp_keepalive_timeout) + .field("tcp_keepalive_config", &self.tcp_keepalive_config) .field("tcp_nodelay", &self.tcp_nodelay) .finish() } @@ -207,14 +312,20 @@ mod addr_stream { #[pin] inner: TcpStream, pub(super) remote_addr: SocketAddr, + pub(super) local_addr: SocketAddr } } impl AddrStream { - pub(super) fn new(tcp: TcpStream, addr: SocketAddr) -> AddrStream { + pub(super) fn new( + tcp: TcpStream, + remote_addr: SocketAddr, + local_addr: SocketAddr, + ) -> AddrStream { AddrStream { inner: tcp, - remote_addr: addr, + remote_addr, + local_addr, } } @@ -224,6 +335,12 @@ mod addr_stream { self.remote_addr } + /// Returns the local address of this connection. + #[inline] + pub fn local_addr(&self) -> SocketAddr { + self.local_addr + } + /// Consumes the AddrStream and returns the underlying IO object #[inline] pub fn into_inner(self) -> TcpStream { @@ -300,3 +417,68 @@ mod addr_stream { } } } + +#[cfg(test)] +mod tests { + use std::time::Duration; + use crate::server::tcp::TcpKeepaliveConfig; + + #[test] + fn no_tcp_keepalive_config() { + assert!(TcpKeepaliveConfig::default().into_socket2().is_none()); + } + + #[test] + fn tcp_keepalive_time_config() { + let mut kac = TcpKeepaliveConfig::default(); + kac.time = Some(Duration::from_secs(60)); + if let Some(tcp_keepalive) = kac.into_socket2() { + assert!(format!("{tcp_keepalive:?}").contains("time: Some(60s)")); + } else { + panic!("test failed"); + } + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + windows, + ))] + #[test] + fn tcp_keepalive_interval_config() { + let mut kac = TcpKeepaliveConfig::default(); + kac.interval = Some(Duration::from_secs(1)); + if let Some(tcp_keepalive) = kac.into_socket2() { + assert!(format!("{tcp_keepalive:?}").contains("interval: Some(1s)")); + } else { + panic!("test failed"); + } + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "linux", + target_os = "netbsd", + target_vendor = "apple", + ))] + #[test] + fn tcp_keepalive_retries_config() { + let mut kac = TcpKeepaliveConfig::default(); + kac.retries = Some(3); + if let Some(tcp_keepalive) = kac.into_socket2() { + assert!(format!("{tcp_keepalive:?}").contains("retries: Some(3)")); + } else { + panic!("test failed"); + } + } +} diff --git a/tests/client.rs b/tests/client.rs index 417e9bf2d9..2953313798 100644 --- a/tests/client.rs +++ b/tests/client.rs @@ -1,5 +1,6 @@ #![deny(warnings)] #![warn(rust_2018_idioms)] +#![cfg_attr(feature = "deprecated", allow(deprecated))] #[macro_use] extern crate matches; @@ -1121,10 +1122,11 @@ mod dispatch_impl { use http::Uri; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tokio::net::TcpStream; + use tokio_test::block_on; use super::support; use hyper::body::HttpBody; - use hyper::client::connect::{Connected, Connection, HttpConnector}; + use hyper::client::connect::{capture_connection, Connected, Connection, HttpConnector}; use hyper::Client; #[test] @@ -1533,6 +1535,37 @@ mod dispatch_impl { assert_eq!(connects.load(Ordering::Relaxed), 0); } + #[test] + fn capture_connection_on_client() { + let _ = pretty_env_logger::try_init(); + + let _rt = support::runtime(); + let connector = DebugConnector::new(); + + let client = Client::builder().build(connector); + + let server = TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = server.local_addr().unwrap(); + thread::spawn(move || { + let mut sock = server.accept().unwrap().0; + //drop(server); + sock.set_read_timeout(Some(Duration::from_secs(5))).unwrap(); + sock.set_write_timeout(Some(Duration::from_secs(5))) + .unwrap(); + let mut buf = [0; 4096]; + sock.read(&mut buf).expect("read 1"); + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .expect("write 1"); + }); + let mut req = Request::builder() + .uri(&*format!("http://{}/a", addr)) + .body(Body::empty()) + .unwrap(); + let captured_conn = capture_connection(&mut req); + block_on(client.request(req)).expect("200 OK"); + assert!(captured_conn.connection_metadata().is_some()); + } + #[test] fn client_keep_alive_0() { let _ = pretty_env_logger::try_init(); @@ -2149,6 +2182,7 @@ mod dispatch_impl { } } +#[allow(deprecated)] mod conn { use std::io::{self, Read, Write}; use std::net::{SocketAddr, TcpListener}; @@ -2214,6 +2248,131 @@ mod conn { future::join(server, client).await; } + #[deny(deprecated)] + #[cfg(feature = "backports")] + mod backports { + use super::*; + #[tokio::test] + async fn get() { + let _ = ::pretty_env_logger::try_init(); + let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) + .await + .unwrap(); + let addr = listener.local_addr().unwrap(); + + let server = async move { + let mut sock = listener.accept().await.unwrap().0; + let mut buf = [0; 4096]; + let n = sock.read(&mut buf).await.expect("read 1"); + + // Notably: + // - Just a path, since just a path was set + // - No host, since no host was set + let expected = "GET /a HTTP/1.1\r\n\r\n"; + assert_eq!(s(&buf[..n]), expected); + + sock.write_all(b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n") + .await + .unwrap(); + }; + + let client = async move { + let tcp = tcp_connect(&addr).await.expect("connect"); + let (mut client, conn) = conn::http1::handshake(tcp).await.expect("handshake"); + + tokio::task::spawn(async move { + conn.await.expect("http conn"); + }); + + let req: Request = Request::builder() + .uri("/a") + .body(Default::default()) + .unwrap(); + let mut res = client.send_request(req).await.expect("send_request"); + assert_eq!(res.status(), hyper::StatusCode::OK); + assert!(res.body_mut().next().await.is_none()); + }; + + future::join(server, client).await; + } + + #[tokio::test] + async fn http2_detect_conn_eof() { + use futures_util::future; + use hyper::service::{make_service_fn, service_fn}; + use hyper::{Response, Server}; + + let _ = pretty_env_logger::try_init(); + + let server = Server::bind(&([127, 0, 0, 1], 0).into()) + .http2_only(true) + .serve(make_service_fn(|_| async move { + Ok::<_, hyper::Error>(service_fn(|_req| { + future::ok::<_, hyper::Error>(Response::new(Body::empty())) + })) + })); + let addr = server.local_addr(); + let (shdn_tx, shdn_rx) = oneshot::channel(); + tokio::task::spawn(async move { + server + .with_graceful_shutdown(async move { + let _ = shdn_rx.await; + }) + .await + .expect("server") + }); + + struct TokioExec; + impl hyper::rt::Executor for TokioExec + where + F: std::future::Future + Send + 'static, + F::Output: Send + 'static, + { + fn execute(&self, fut: F) { + tokio::spawn(fut); + } + } + + let io = tcp_connect(&addr).await.expect("tcp connect"); + let (mut client, conn) = conn::http2::Builder::new(TokioExec) + .handshake::<_, Body>(io) + .await + .expect("http handshake"); + + tokio::task::spawn(async move { + conn.await.expect("client conn"); + }); + + // Sanity check that client is ready + future::poll_fn(|ctx| client.poll_ready(ctx)) + .await + .expect("client poll ready sanity"); + + let req = Request::builder() + .uri(format!("http://{}/", addr)) + .body(Body::empty()) + .expect("request builder"); + + client.send_request(req).await.expect("req1 send"); + + // Sanity check that client is STILL ready + future::poll_fn(|ctx| client.poll_ready(ctx)) + .await + .expect("client poll ready after"); + + // Trigger the server shutdown... + let _ = shdn_tx.send(()); + + // Allow time for graceful shutdown roundtrips... + tokio::time::sleep(Duration::from_millis(100)).await; + + // After graceful shutdown roundtrips, the client should be closed... + future::poll_fn(|ctx| client.poll_ready(ctx)) + .await + .expect_err("client should be closed"); + } + } + #[tokio::test] async fn get_obsolete_line_folding() { let _ = ::pretty_env_logger::try_init(); @@ -2271,6 +2430,62 @@ mod conn { future::join(server, client).await; } + #[tokio::test] + async fn get_custom_reason_phrase() { + let _ = ::pretty_env_logger::try_init(); + let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) + .await + .unwrap(); + let addr = listener.local_addr().unwrap(); + + let server = async move { + let mut sock = listener.accept().await.unwrap().0; + let mut buf = [0; 4096]; + let n = sock.read(&mut buf).await.expect("read 1"); + + // Notably: + // - Just a path, since just a path was set + // - No host, since no host was set + let expected = "GET /a HTTP/1.1\r\n\r\n"; + assert_eq!(s(&buf[..n]), expected); + + sock.write_all(b"HTTP/1.1 200 Alright\r\nContent-Length: 0\r\n\r\n") + .await + .unwrap(); + }; + + let client = async move { + let tcp = tcp_connect(&addr).await.expect("connect"); + let (mut client, conn) = conn::handshake(tcp).await.expect("handshake"); + + tokio::task::spawn(async move { + conn.await.expect("http conn"); + }); + + let req = Request::builder() + .uri("/a") + .body(Default::default()) + .unwrap(); + let mut res = client.send_request(req).await.expect("send_request"); + assert_eq!(res.status(), hyper::StatusCode::OK); + assert_eq!( + res.extensions() + .get::() + .expect("custom reason phrase is present") + .as_bytes(), + &b"Alright"[..] + ); + assert_eq!(res.headers().len(), 1); + assert_eq!( + res.headers().get(http::header::CONTENT_LENGTH).unwrap(), + "0" + ); + assert!(res.body_mut().next().await.is_none()); + }; + + future::join(server, client).await; + } + #[test] fn incoming_content_length() { use hyper::body::HttpBody; @@ -3058,6 +3273,44 @@ mod conn { done_tx.send(()).unwrap(); } + #[tokio::test] + async fn test_body_panics() { + use hyper::body::HttpBody; + + let _ = pretty_env_logger::try_init(); + + let listener = TkTcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0))) + .await + .unwrap(); + let addr = listener.local_addr().unwrap(); + + // spawn a server that reads but doesn't write + tokio::spawn(async move { + let sock = listener.accept().await.unwrap().0; + drain_til_eof(sock).await.expect("server read"); + }); + + let io = tcp_connect(&addr).await.expect("tcp connect"); + + let (mut client, conn) = conn::Builder::new().handshake(io).await.expect("handshake"); + + tokio::spawn(async move { + conn.await.expect("client conn shouldn't error"); + }); + + let req = Request::post("/a") + .body(Body::from("baguette").map_data::<_, &[u8]>(|_| panic!("oopsie"))) + .unwrap(); + + let error = client.send_request(req).await.unwrap_err(); + + assert!(error.is_user()); + assert_eq!( + error.to_string(), + "dispatch task is gone: user code panicked" + ); + } + async fn drain_til_eof(mut sock: T) -> io::Result<()> { let mut buf = [0u8; 1024]; loop { diff --git a/tests/server.rs b/tests/server.rs index 82491ec408..191554da44 100644 --- a/tests/server.rs +++ b/tests/server.rs @@ -1,6 +1,8 @@ #![deny(warnings)] #![deny(rust_2018_idioms)] +#![cfg_attr(feature = "deprecated", allow(deprecated))] +use std::convert::TryInto; use std::future::Future; use std::io::{self, Read, Write}; use std::net::TcpListener as StdTcpListener; @@ -383,6 +385,33 @@ mod response_body_lengths { } } +#[test] +fn get_response_custom_reason_phrase() { + let _ = pretty_env_logger::try_init(); + let server = serve(); + server.reply().reason_phrase("Cool"); + let mut req = connect(server.addr()); + req.write_all( + b"\ + GET / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + Connection: close\r\n\ + \r\n\ + ", + ) + .unwrap(); + + let mut response = String::new(); + req.read_to_string(&mut response).unwrap(); + + let mut lines = response.lines(); + assert_eq!(lines.next(), Some("HTTP/1.1 200 Cool")); + + let mut lines = lines.skip_while(|line| !line.is_empty()); + assert_eq!(lines.next(), Some("")); + assert_eq!(lines.next(), None); +} + #[test] fn get_chunked_response_with_ka() { let foo_bar = b"foo bar baz"; @@ -945,9 +974,8 @@ async fn expect_continue_waits_for_body_poll() { service_fn(|req| { assert_eq!(req.headers()["expect"], "100-continue"); // But! We're never going to poll the body! + drop(req); tokio::time::sleep(Duration::from_millis(50)).map(move |_| { - // Move and drop the req, so we don't auto-close - drop(req); Response::builder() .status(StatusCode::BAD_REQUEST) .body(hyper::Body::empty()) @@ -2421,6 +2449,26 @@ fn skips_content_length_and_body_for_304_responses() { assert_eq!(lines.next(), None); } +#[test] +fn no_implicit_zero_content_length_for_head_responses() { + let server = serve(); + server.reply().status(hyper::StatusCode::OK).body([]); + let mut req = connect(server.addr()); + req.write_all( + b"\ + HEAD / HTTP/1.1\r\n\ + Host: example.domain\r\n\ + Connection: close\r\n\ + \r\n\ + ", + ) + .unwrap(); + + let mut response = String::new(); + req.read_to_string(&mut response).unwrap(); + assert!(!response.contains("content-length:")); +} + #[tokio::test] async fn http2_keep_alive_detects_unresponsive_client() { let _ = pretty_env_logger::try_init(); @@ -2489,6 +2537,7 @@ async fn http2_keep_alive_with_responsive_client() { }); let tcp = connect_async(addr).await; + #[allow(deprecated)] let (mut client, conn) = hyper::client::conn::Builder::new() .http2_only(true) .handshake::<_, Body>(tcp) @@ -2593,6 +2642,146 @@ async fn http2_keep_alive_count_server_pings() { .expect("timed out waiting for pings"); } +// Tests for backported 1.0 APIs +#[deny(deprecated)] +#[cfg(feature = "backports")] +mod backports { + use super::*; + use hyper::server::conn::{http1, http2}; + + #[tokio::test] + async fn http_connect() { + let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + + let (tx, rx) = oneshot::channel(); + + thread::spawn(move || { + let mut tcp = connect(&addr); + tcp.write_all( + b"\ + CONNECT localhost:80 HTTP/1.1\r\n\ + \r\n\ + eagerly optimistic\ + ", + ) + .expect("write 1"); + let mut buf = [0; 256]; + tcp.read(&mut buf).expect("read 1"); + + let expected = "HTTP/1.1 200 OK\r\n"; + assert_eq!(s(&buf[..expected.len()]), expected); + let _ = tx.send(()); + + let n = tcp.read(&mut buf).expect("read 2"); + assert_eq!(s(&buf[..n]), "foo=bar"); + tcp.write_all(b"bar=foo").expect("write 2"); + }); + + let (socket, _) = listener.accept().await.unwrap(); + let conn = http1::Builder::new().serve_connection( + socket, + service_fn(|_| { + // In 1.0 we would use `http_body_util::Empty::::new()` to construct + // an empty body + let res = Response::builder().status(200).body(Body::empty()).unwrap(); + future::ready(Ok::<_, hyper::Error>(res)) + }), + ); + + let parts = conn.without_shutdown().await.unwrap(); + assert_eq!(parts.read_buf, "eagerly optimistic"); + + // wait so that we don't write until other side saw 101 response + rx.await.unwrap(); + + let mut io = parts.io; + io.write_all(b"foo=bar").await.unwrap(); + let mut vec = vec![]; + io.read_to_end(&mut vec).await.unwrap(); + assert_eq!(vec, b"bar=foo"); + } + + #[tokio::test] + async fn h2_connect() { + let listener = tcp_bind(&"127.0.0.1:0".parse().unwrap()).unwrap(); + let addr = listener.local_addr().unwrap(); + + let conn = connect_async(addr).await; + + let (h2, connection) = h2::client::handshake(conn).await.unwrap(); + tokio::spawn(async move { + connection.await.unwrap(); + }); + let mut h2 = h2.ready().await.unwrap(); + + async fn connect_and_recv_bread( + h2: &mut SendRequest, + ) -> (RecvStream, SendStream) { + let request = Request::connect("localhost").body(()).unwrap(); + let (response, send_stream) = h2.send_request(request, false).unwrap(); + let response = response.await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let mut body = response.into_body(); + let bytes = body.data().await.unwrap().unwrap(); + assert_eq!(&bytes[..], b"Bread?"); + let _ = body.flow_control().release_capacity(bytes.len()); + + (body, send_stream) + } + + tokio::spawn(async move { + let (mut recv_stream, mut send_stream) = connect_and_recv_bread(&mut h2).await; + + send_stream.send_data("Baguette!".into(), true).unwrap(); + + assert!(recv_stream.data().await.unwrap().unwrap().is_empty()); + }); + + // In 1.0 the `Body` struct is renamed to `IncomingBody` + let svc = service_fn(move |req: Request| { + let on_upgrade = hyper::upgrade::on(req); + + tokio::spawn(async move { + let mut upgraded = on_upgrade.await.expect("on_upgrade"); + upgraded.write_all(b"Bread?").await.unwrap(); + + let mut vec = vec![]; + upgraded.read_to_end(&mut vec).await.unwrap(); + assert_eq!(s(&vec), "Baguette!"); + + upgraded.shutdown().await.unwrap(); + }); + + future::ok::<_, hyper::Error>( + // In 1.0 we would use `http_body_util::Empty::::new()` to construct + // an empty body + Response::builder().status(200).body(Body::empty()).unwrap(), + ) + }); + + let (socket, _) = listener.accept().await.unwrap(); + http2::Builder::new(TokioExecutor) + .serve_connection(socket, svc) + .await + .unwrap(); + } + + #[derive(Clone)] + /// An Executor that uses the tokio runtime. + pub struct TokioExecutor; + + impl hyper::rt::Executor for TokioExecutor + where + F: std::future::Future + Send + 'static, + F::Output: Send + 'static, + { + fn execute(&self, fut: F) { + tokio::task::spawn(fut); + } + } +} // ------------------------------------------------- // the Server that is used to run all the tests with // ------------------------------------------------- @@ -2651,6 +2840,17 @@ impl<'a> ReplyBuilder<'a> { self } + fn reason_phrase(self, reason: &str) -> Self { + self.tx + .lock() + .unwrap() + .send(Reply::ReasonPhrase( + reason.as_bytes().try_into().expect("reason phrase"), + )) + .unwrap(); + self + } + fn version(self, version: hyper::Version) -> Self { self.tx .lock() @@ -2724,6 +2924,7 @@ struct TestService { #[derive(Debug)] enum Reply { Status(hyper::StatusCode), + ReasonPhrase(hyper::ext::ReasonPhrase), Version(hyper::Version), Header(HeaderName, HeaderValue), Body(hyper::Body), @@ -2779,6 +2980,9 @@ impl TestService { Reply::Status(s) => { *res.status_mut() = s; } + Reply::ReasonPhrase(reason) => { + res.extensions_mut().insert(reason); + } Reply::Version(v) => { *res.version_mut() = v; }