diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4adb2d78..b42d5e73 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -110,10 +110,10 @@ jobs: os: [macos-latest, windows-latest] steps: # if your project needs OpenSSL, uncomment this to fix Windows builds. - - run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append - if: runner.os == 'Windows' - - run: vcpkg install openssl:x64-windows-static-md - if: runner.os == 'Windows' + # - run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append + # if: runner.os == 'Windows' + # - run: vcpkg install openssl:x64-windows-static-md + # if: runner.os == 'Windows' - uses: actions/checkout@v4 with: submodules: true diff --git a/.github/workflows/tls.yml b/.github/workflows/tls.yml index 058bf965..9c86816b 100644 --- a/.github/workflows/tls.yml +++ b/.github/workflows/tls.yml @@ -29,4 +29,4 @@ jobs: - name: Run tests env: FAKTORY_URL_SECURE: tcp://localhost:17419 - run: cargo test --locked --features tls --test tls + run: cargo test --locked --features native_tls,rustls --test tls diff --git a/.gitignore b/.gitignore index e8da9172..0f735146 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /target/ **/*.rs.bk perf.* +.vscode diff --git a/Cargo.lock b/Cargo.lock index 07d857dc..8505c38b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,21 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "addr2line" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6aa100a6f6f525226719f8de3f70076be4f4191801ebd92621450d1c51e9053d" + [[package]] name = "android-tzdata" version = "0.1.1" @@ -19,9 +34,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.12" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" +checksum = "bff2cf94a3dbe2d57cbd56485e1bd7436455058034d6c2d47be51d4e5e4bc6ab" dependencies = [ "anstyle", "anstyle-parse", @@ -33,85 +48,174 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ "windows-sys", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "0238ca56c96dfa37bdf7c373c8886dd591322500aceeeccdb2216fe06dc2f796" dependencies = [ "anstyle", "windows-sys", ] +[[package]] +name = "asn1-rs" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e577111f9ca51289da894bcb4b17047737218c2e4477ea2fc36cd3922172062f" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits 0.2.14", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b9002415e8baa0177a3ae0946fb62ca6e9e470755717409134e44d8e0ae2cad" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.91", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.91", +] + +[[package]] +name = "async-stream" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dad5c83079eae9969be7fadefe640a1c566901f05ff91ab221de4b6f68d9507e" +dependencies = [ + "async-stream-impl", + "futures-core", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10f203db73a71dfa2fb6dd22763990fa26f3d2625a6da2da900d23b87d26be27" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.91", +] + +[[package]] +name = "async-trait" +version = "0.1.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.46", +] + [[package]] name = "autocfg" -version = "1.1.0" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" + +[[package]] +name = "backtrace" +version = "0.3.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88fb5a785d6b44fd9d6700935608639af1b8356de1e55d5f7c2740f4faa15d82" +dependencies = [ + "addr2line", + "cc", + "cfg-if 1.0.0", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] [[package]] name = "bitflags" -version = "1.3.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +checksum = "f5cde24d1b2e2216a726368b2363a273739c91f4e3eb4e0dd12d672d396ad989" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "24a6904aef64d73cf10ab17ebace7befb918b82164785cb89907993be7f83813" [[package]] name = "block-buffer" -version = "0.10.4" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +checksum = "03588e54c62ae6d763e2a80090d50353b785795361b4ff5b3bf0a5097fc31c0b" dependencies = [ "generic-array", ] [[package]] -name = "bufstream" -version = "0.1.4" +name = "bumpalo" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" +checksum = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" [[package]] -name = "bumpalo" -version = "3.15.0" +name = "bytes" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32a994c2b3ca201d9b263612a374263f05e7adde37c4707f693dcd375076d1f" +checksum = "ad1f8e949d755f9d79112b5bb46938e0ef9d3804a0b16dfab13aafcaa5f0fa72" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] +checksum = "e70cc2f62c6ce1868963827bd677764c62d07c3d9a3e1fb1177ee1a9ab199eb2" + +[[package]] +name = "cfg-if" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4c819a1287eb618df47cc647173c5c4c66ba19d888a6e50d605672aed3140de" [[package]] name = "cfg-if" @@ -121,43 +225,53 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "41daef31d7a747c5c847246f36de49ced6f7403b4cdabc807a97b5cc184cda7a" dependencies = [ "android-tzdata", "iana-time-zone", - "num-traits", + "num-traits 0.2.14", "serde", - "windows-targets", + "windows-targets 0.52.0", ] [[package]] name = "clap" -version = "4.5.1" +version = "4.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da" +checksum = "41fffed7514f420abec6d183b1d3acfd9099c79c3a10a06ade4f8203f1411272" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.5.1" +version = "4.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb" +checksum = "63361bae7eef3771745f02d8d892bec2fee5f6e34af316ba556e7f97a7069ff1" dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.0", + "strsim", ] [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" + +[[package]] +name = "codespan-reporting" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "c6ce42b8998a383572e0a802d859b1f00c79b7b7474e62fff88ee5c2845d9c13" +dependencies = [ + "termcolor", + "unicode-width", +] [[package]] name = "colorchoice" @@ -167,44 +281,93 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "core-foundation" -version = "0.9.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" dependencies = [ - "core-foundation-sys", + "core-foundation-sys 0.7.0", "libc", ] [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" + +[[package]] +name = "core-foundation-sys" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "a4600d695eb3f6ce1cd44e6e291adceb2cc3ab12f20a33777ecd0bf6eba34e06" dependencies = [ "generic-array", - "typenum", +] + +[[package]] +name = "cxx" +version = "1.0.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c53d75fe543215ca091d792e13351dcb940842dd2829b2a2dd43ab4bd1a015" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbb7ed9eb5b6ed9942747aea961a4303b7a56c54f582ea8304cdae391d29d274" +dependencies = [ + "cc", + "codespan-reporting", + "lazy_static", + "proc-macro2", + "quote", + "scratch", + "syn 1.0.91", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca21461be76a23df4f63a2107a0bb406ef41548e635ff7edcbd1ab5a6bb997e2" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee8da0a2c0697647b5824844a5d2dedcd97a2d7b75e6e4d0b8dd183e4081e1cf" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.91", ] [[package]] name = "darling" -version = "0.20.6" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c376d08ea6aa96aafe61237c7200d1241cb177b7d3a542d791f2d118e9cbb955" +checksum = "f1a5d2e8b5a94b2261efb20e99a01255b9c5293797d69bbf04600567b2f9b8d7" dependencies = [ "darling_core", "darling_macro", @@ -212,109 +375,137 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.6" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33043dcd19068b8192064c704b3f83eb464f91f1ff527b44a4e2b08d9cdb8855" +checksum = "8f1c7d56716be82d9c6adb967cfe700955179ea88806e898483dad6987330a54" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", - "strsim 0.10.0", - "syn", + "strsim", + "syn 1.0.91", ] [[package]] name = "darling_macro" -version = "0.20.6" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5a91391accf613803c2a9bf9abccdbaa07c54b4244a5b64883f9c3c137c86be" +checksum = "64dd7e5a75a00cb6799ae9fbbfc3bba0134def6579a9e27564e72c839c837bed" dependencies = [ "darling_core", "quote", - "syn", + "syn 1.0.91", +] + +[[package]] +name = "data-encoding" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72aa14c04dfae8dd7d8a2b1cb7ca2152618cd01336dbfe704b8dcbf8d41dbd69" + +[[package]] +name = "der-parser" +version = "8.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d4bc9b0db0a0df9ae64634ac5bdefb7afcb534e182275ca0beadbe486701c1" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits 0.2.14", + "rusticata-macros", ] [[package]] name = "derive_builder" -version = "0.20.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0350b5cb0331628a5916d6c5c0b72e97393b8b6b03b47a9284f4e7f5a405ffd7" +checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" dependencies = [ "derive_builder_macro", ] [[package]] name = "derive_builder_core" -version = "0.20.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d48cda787f839151732d396ac69e3473923d54312c070ee21e9effcaa8ca0b1d" +checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" dependencies = [ "darling", "proc-macro2", "quote", - "syn", + "syn 1.0.91", ] [[package]] name = "derive_builder_macro" -version = "0.20.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206868b8242f27cecce124c19fd88157fbd0dd334df2587f36417bafbc85097b" +checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" dependencies = [ "derive_builder_core", - "syn", + "syn 1.0.91", ] [[package]] name = "digest" -version = "0.10.7" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +checksum = "8cb780dce4f9a8f5c087362b3a4595936b2019e7c8b30f2c3e9a7e94e6ae9837" dependencies = [ "block-buffer", "crypto-common", ] [[package]] -name = "errno" -version = "0.3.8" +name = "displaydoc" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "278ef1934318d524612205f69df005eea30ec10edf7913e500b5a527fce55bc0" dependencies = [ - "libc", - "windows-sys", + "proc-macro2", + "quote", + "syn 1.0.91", ] +[[package]] +name = "dtoa" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f" + [[package]] name = "faktory" version = "0.12.5" dependencies = [ - "bufstream", + "async-trait", "chrono", "clap", "derive_builder", "fnv", "hostname", - "libc", - "mockstream", "native-tls", + "num-bigint", + "oid-registry", "openssl", - "rand", + "pin-project", + "rand 0.8.0", + "rustls-pki-types", "serde", "serde_derive", "serde_json", "sha2", "thiserror", + "tokio", + "tokio-native-tls", + "tokio-rustls", + "tokio-test", "url", + "x509-parser", ] -[[package]] -name = "fastrand" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" - [[package]] name = "fnv" version = "1.0.7" @@ -323,33 +514,46 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foreign-types" -version = "0.3.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +checksum = "a21b40436003b2a1e22483c5ed6c3d25e755b6b3120f601cc22aa57e25dc9065" dependencies = [ "foreign-types-shared", ] [[package]] name = "foreign-types-shared" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" +checksum = "baa1839fc3c5487b5e129ea4f774e3fd84e6c4607127315521bc014a722ebc9e" [[package]] -name = "form_urlencoded" -version = "1.2.1" +name = "fuchsia-zircon" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "3b5365afd01fdf916e775a224e844f80b3b9710d0f4f00903e219e859474d7ae" dependencies = [ - "percent-encoding", + "bitflags 1.0.0", + "fuchsia-zircon-sys", ] +[[package]] +name = "fuchsia-zircon-sys" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "069def9a0e5feb7e9120635f6ebad24d853a6affbb077fec84d0888316cf9ae6" + +[[package]] +name = "futures-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30f0ab78f035d7ed5d52689f4b05a56c15ad80097f1d860e644bdc9dba3831f2" + [[package]] name = "generic-array" -version = "0.14.7" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ "typenum", "version_check", @@ -357,20 +561,26 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "wasi", ] +[[package]] +name = "gimli" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" + [[package]] name = "hostname" -version = "0.3.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +checksum = "01b1af8d6d068ba9de1c39c6ff0d879aed20f74873d4d3929a4535000bb07886" dependencies = [ "libc", "match_cfg", @@ -379,25 +589,26 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "0c17cc76786e99f8d2f055c11159e7f0091c42474dcc3189fbab96072e873e6d" dependencies = [ "android_system_properties", - "core-foundation-sys", + "core-foundation-sys 0.8.3", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows", ] [[package]] name = "iana-time-zone-haiku" -version = "0.1.2" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" dependencies = [ - "cc", + "cxx", + "cxx-build", ] [[package]] @@ -408,25 +619,32 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.5.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" dependencies = [ + "matches", "unicode-bidi", "unicode-normalization", ] [[package]] name = "itoa" -version = "1.0.10" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "91fd9dc2c587067de817fec4ad355e3818c3d893a78cab32a0a474c7a15bb8d5" + +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" dependencies = [ "wasm-bindgen", ] @@ -439,21 +657,27 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" [[package]] -name = "linux-raw-sys" -version = "0.4.13" +name = "link-cplusplus" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "4dfb9f65d9966f6ca6522043978030b564f3291af987fbf1dd55b6a064ba1b36" +dependencies = [ + "cc", +] [[package]] name = "log" -version = "0.4.20" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "d4fcce5fa49cc693c312001daf1d13411c4a5283796bac1084299ea3e567113f" +dependencies = [ + "cfg-if 0.1.2", +] [[package]] name = "match_cfg" @@ -462,16 +686,51 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] -name = "mockstream" -version = "0.0.3" +name = "matches" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15305656809ce5a4805b1ff2946892810992197ce1270ff79baded852187942e" + +[[package]] +name = "memchr" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bbe0c0c9d254b463b13734bc361d1423289547e052b1e77e5a77292496ba2e" +checksum = "e01e64d9017d18e7fc09d8e4fe0e28ff6931019e979fb8019319db7ca827f8a6" +dependencies = [ + "libc", +] + +[[package]] +name = "minimal-lexical" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6595bb28ed34f43c3fe088e48f6cfb2e033cab45f25a5384d5fdf564fbc8c4b2" + +[[package]] +name = "miniz_oxide" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +dependencies = [ + "libc", + "wasi", + "windows-sys", +] [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" dependencies = [ "lazy_static", "libc", @@ -485,29 +744,100 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nom" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffd9d26838a953b4af82cbeb9f1592c6798916983959be223a7124e992742c1" +dependencies = [ + "memchr", + "minimal-lexical", + "version_check", +] + +[[package]] +name = "num-bigint" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74e768dff5fb39a41b3bcd30bb25cf989706c90d028d1ad71971987aa309d535" +dependencies = [ + "autocfg", + "num-integer", + "num-traits 0.2.14", +] + +[[package]] +name = "num-integer" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" +dependencies = [ + "autocfg", + "num-traits 0.2.14", +] + +[[package]] +name = "num-traits" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51eab148f171aefad295f8cece636fc488b9b392ef544da31ea4b8ef6b9e9c39" + [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", ] +[[package]] +name = "num_cpus" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30" +dependencies = [ + "libc", +] + +[[package]] +name = "num_threads" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71a1eb3a36534514077c1e079ada2fb170ef30c47d203aa6916138cf882ecd52" +dependencies = [ + "libc", +] + +[[package]] +name = "object" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" + +[[package]] +name = "oid-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" -version = "1.19.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" [[package]] name = "openssl" -version = "0.10.63" +version = "0.10.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15c9d69dd87a29568d4d017cfe8ec518706046a05184e5aea92d0af890b803c8" +checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" dependencies = [ - "bitflags 2.4.2", - "cfg-if", + "bitflags 2.2.1", + "cfg-if 1.0.0", "foreign-types", "libc", "once_cell", @@ -517,26 +847,26 @@ dependencies = [ [[package]] name = "openssl-macros" -version = "0.1.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.91", ] [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "756d49c8424483a3df3b5d735112b4da22109ced9a8294f1f5cdf80fb3810919" [[package]] name = "openssl-sys" -version = "0.9.99" +version = "0.9.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1bf214306098e4832460f797824c05d25aacdf896f64a985fb0fd992454ae" +checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" dependencies = [ "cc", "libc", @@ -546,27 +876,53 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba4f28a6faf4ffea762ba8f4baef48c61a6db348647c73095034041fc79dd954" + +[[package]] +name = "pin-project" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.46", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "2c516611246607d0c04186886dbb3a754368ef82c79e9827a802c6d836dd111c" [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "3a8b4c6b8165cd1a1cd4b9b120978131389f64bdaf456435caa41e630edba903" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "2de98502f212cfcea8d0bb305bd0f49d7ebdd75b64ba0a68f937d888f4e0d6db" dependencies = [ "unicode-ident", ] @@ -582,20 +938,31 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5f78082e6a6d042862611e9640cf20776185fee506cf6cf67e93c6225cee31" +dependencies = [ + "fuchsia-zircon", + "libc", +] + +[[package]] +name = "rand" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "a76330fb486679b4ace3670f117bbc9e16204005c4bde9c4bd372f45bed34f12" dependencies = [ "libc", "rand_chacha", "rand_core", + "rand_hc", ] [[package]] name = "rand_chacha" -version = "0.3.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ "ppv-lite86", "rand_core", @@ -603,106 +970,194 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.4" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" dependencies = [ "getrandom", ] [[package]] -name = "rustix" -version = "0.38.31" +name = "rand_hc" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" +dependencies = [ + "rand_core", +] + +[[package]] +name = "redox_syscall" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35a48131ab10dbeb17202bd1dcb9c9798963a58a50c9ec31640f237358832094" + +[[package]] +name = "remove_dir_all" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc5b3ce5d5ea144bb04ebd093a9e14e9765bcfec866aecda9b6dec43b3d1e24" +dependencies = [ + "winapi", +] + +[[package]] +name = "ring" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "fb9d44f9bf6b635117787f72416783eb7e4227aaf255e5ce739563d817176a7e" dependencies = [ - "bitflags 2.4.2", - "errno", + "cc", + "getrandom", "libc", - "linux-raw-sys", + "spin", + "untrusted", "windows-sys", ] [[package]] -name = "ryu" -version = "1.0.16" +name = "rustc-demangle" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "3058a43ada2c2d0b92b3ae38007a2d0fa5e9db971be260e0171408a4ff471c95" + +[[package]] +name = "rusticata-macros" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65c52377bb2288aa522a0c8208947fada1e0c76397f108cc08f57efe6077b50d" +dependencies = [ + "nom", +] + +[[package]] +name = "rustls" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bc238b76c51bbc449c55ffbc39d03772a057cc8cf783c49d4af4c2537b74a8b" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7673e0aa20ee4937c6aacfc12bb8341cfbf054cdd21df6bec5fd0629fe9339b" + +[[package]] +name = "rustls-webpki" +version = "0.102.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de2635c8bc2b88d367767c5de8ea1d8db9af3f6219eba28442242d9ab81d1b89" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "87f550b06b6cba9c8b8be3ee73f391990116bf527450d2556e9b9ce263b9a021" dependencies = [ - "windows-sys", + "lazy_static", + "winapi", ] +[[package]] +name = "scratch" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e114536316b51a5aa7a0e59fc49661fd263c5507dd08bd28de052e57626ce69" + [[package]] name = "security-framework" -version = "2.9.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "97bbedbe81904398b6ebb054b3e912f99d55807125790f3198ac990d98def5b0" dependencies = [ - "bitflags 1.3.2", + "bitflags 1.0.0", "core-foundation", - "core-foundation-sys", - "libc", + "core-foundation-sys 0.7.0", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "06fd2f23e31ef68dd2328cc383bd493142e46107a3a0e24f7d734e3f3b80fe4c" dependencies = [ - "core-foundation-sys", + "core-foundation-sys 0.7.0", "libc", ] [[package]] name = "serde" -version = "1.0.196" +version = "1.0.186" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "9f5db24220c009de9bd45e69fb2938f4b6d2df856aa9304ce377b3180f83b7c1" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.186" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "5ad697f7e0b65af4983a4ce8f56ed5b357e8d3c36651bf6a7e13639c17b8e670" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.46", ] [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "e9b1ec939469a124b27e208106550c38358ed4334d2b1b5b3825bc1ee37d946a" dependencies = [ - "itoa", - "ryu", + "dtoa", + "itoa 0.3.0", + "num-traits 0.1.32", "serde", ] [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "99c3bd8169c58782adad9290a9af5939994036b76187f7b4f0e6de91dbbfc0ec" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest", ] +[[package]] +name = "socket2" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "spin" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" + [[package]] name = "strsim" version = "0.10.0" @@ -710,104 +1165,237 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] -name = "strsim" -version = "0.11.0" +name = "subtle" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" + +[[package]] +name = "syn" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid 0.2.0", +] [[package]] name = "syn" -version = "2.0.49" +version = "2.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915aea9e586f80826ee59f8453c1101f9d1c4b3964cd2460185ee8e299ada496" +checksum = "89456b690ff72fddcecf231caedbe615c59480c93358a93dfae7fc29e3ebbf0e" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "synstructure" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "affc27d5f1764f7487bafeb41e380664790716e38ba45d8487bddcc53e79f0f6" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.91", + "unicode-xid 0.1.0", +] + [[package]] name = "tempfile" -version = "3.10.0" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "47776f63b85777d984a50ce49d6b9e58826b6a3766a449fc95bc66cd5663c15b" dependencies = [ - "cfg-if", - "fastrand", - "rustix", - "windows-sys", + "libc", + "rand 0.4.1", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "termcolor" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a52c023823933499250b43960b272e25336c6e2ab8684672edc34489f049ccdd" +dependencies = [ + "wincolor", ] [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.91", ] [[package]] -name = "tinyvec" -version = "1.6.0" +name = "time" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "004cbc98f30fa233c61a38bc77e96a9106e65c88f2d3bef182ae952027e5753d" +dependencies = [ + "itoa 1.0.1", + "libc", + "num_threads", + "time-macros", +] + +[[package]] +name = "time-macros" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25eb0ca3468fc0acc11828786797f6ef9aa1555e4a211a60d64cc8e4d1be47d6" + +[[package]] +name = "tokio" +version = "1.35.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "pin-project-lite", + "socket2", + "tokio-macros", + "windows-sys", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.46", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" dependencies = [ - "tinyvec_macros", + "rustls", + "rustls-pki-types", + "tokio", ] [[package]] -name = "tinyvec_macros" +name = "tokio-stream" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" +checksum = "e4cdeb73537e63f98adcd73138af75e3f368ccaecffaa29d7eb61b9f5a440457" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-test" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89b3cbabd3ae862100094ae433e1def582cf86451b4e9bf83aa7ac1d8a7d719" +dependencies = [ + "async-stream", + "bytes", + "futures-core", + "tokio", + "tokio-stream", +] [[package]] name = "typenum" -version = "1.17.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "2560b941fdb9ea38301b9b708504d612fcdf9c91a8c31d82219bd74cb07d304d" +dependencies = [ + "matches", +] [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" -dependencies = [ - "tinyvec", -] +checksum = "51ccda9ef9efa3f7ef5d91e8f9b83bbe6955f9bf86aec89d5cce2c874625920f" + +[[package]] +name = "unicode-width" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc85732b6d55a0d520aaf765536a188d9d993770c28633422f85bb646da61335" + +[[package]] +name = "unicode-xid" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" + +[[package]] +name = "unicode-xid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "77ddaf52e65c6b81c56b7e957c0b1970f7937f21c5c6774c4e56fcb4e20b48c6" dependencies = [ - "form_urlencoded", "idna", + "matches", "percent-encoding", ] @@ -819,15 +1407,15 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "vcpkg" -version = "0.2.15" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +checksum = "3fc439f2794e98976c88a2a2dafce96b930fe8010b0a256b3c2199a773933168" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "45d3d553fd9413fffe7147a20171d640eda0ad4c070acd7d0c885a21bcd2e8b7" [[package]] name = "wasi" @@ -837,34 +1425,34 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" dependencies = [ "bumpalo", + "lazy_static", "log", - "once_cell", "proc-macro2", "quote", - "syn", + "syn 1.0.91", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -872,28 +1460,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.91", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" [[package]] name = "winapi" -version = "0.3.9" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +checksum = "b3ad91d846a4a5342c1fb7008d26124ee6cf94a3953751618577295373b32117" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", @@ -901,32 +1489,71 @@ dependencies = [ [[package]] name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +checksum = "a16a8e2ebfc883e2b1771c6482b1fb3c6831eab289ba391619a2d93a7356220f" [[package]] name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +checksum = "8ca29cb03c8ceaf20f8224a18a530938305e9872b1478ea24ff44b4f503a1d1d" [[package]] -name = "windows-core" -version = "0.52.0" +name = "wincolor" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "b9dc3aa9dcda98b5a16150c54619c1ead22e3d3a5d458778ae914be760aa981a" dependencies = [ - "windows-targets", + "winapi", +] + +[[package]] +name = "windows" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdacb41e6a96a052c6cb63a144f24900236121c6f63f4f8219fef5977ecb0c25" +dependencies = [ + "windows-targets 0.42.2", ] [[package]] name = "windows-sys" -version = "0.52.0" +version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.0", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +dependencies = [ + "windows_aarch64_gnullvm 0.48.0", + "windows_aarch64_msvc 0.48.0", + "windows_i686_gnu 0.48.0", + "windows_i686_msvc 0.48.0", + "windows_x86_64_gnu 0.48.0", + "windows_x86_64_gnullvm 0.48.0", + "windows_x86_64_msvc 0.48.0", ] [[package]] @@ -935,53 +1562,160 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.52.0", + "windows_aarch64_msvc 0.52.0", + "windows_i686_gnu 0.52.0", + "windows_i686_msvc 0.52.0", + "windows_x86_64_gnu 0.52.0", + "windows_x86_64_gnullvm 0.52.0", + "windows_x86_64_msvc 0.52.0", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" + [[package]] name = "windows_aarch64_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" + [[package]] name = "windows_i686_gnu" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" + [[package]] name = "windows_i686_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" + [[package]] name = "windows_x86_64_gnu" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" + [[package]] name = "windows_x86_64_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" + +[[package]] +name = "x509-parser" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "zeroize" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9" diff --git a/Cargo.toml b/Cargo.toml index 6e534db0..5e5a257e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,34 +15,61 @@ exclude = [".github", "docker", ".gitignore", "Makefile"] [features] default = [] -tls = ["native-tls"] -binaries = ["clap"] +native_tls = ["dep:pin-project", "dep:tokio-native-tls"] +rustls = ["dep:pin-project", "dep:tokio-rustls"] +binaries = ["dep:clap", "tokio/macros"] ent = [] [dependencies] -bufstream = "0.1" -chrono = { version = "0.4", features = ["serde", "clock"], default-features = false } +async-trait = "0.1.77" clap = { version = "4.4.10", optional = true } -derive_builder = "0.20.0" +chrono = { version = "0.4", features = [ + "serde", + "clock", +], default-features = false } +derive_builder = "0.12.0" fnv = "1.0.5" hostname = "0.3" -libc = "0.2" -native-tls = { version = "0.2", optional = true } +pin-project = { version = "1.1.4", optional = true } rand = "0.8" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" sha2 = "0.10.0" thiserror = "1.0.30" +tokio = { version = "1.35.1", features = [ + "io-util", + "net", + "rt", + "rt-multi-thread", + "time", +] } +tokio-native-tls = { version = "0.3.1", optional = true } +tokio-rustls = { version = "0.25.0", optional = true } url = "2" [dev-dependencies] -mockstream = "0.0.3" +rustls-pki-types = "1.0.1" +tokio = { version = "1.35.1", features = ["rt", "macros"] } +tokio-test = "0.4.3" +x509-parser = "0.15.1" # to make -Zminimal-versions work [target.'cfg(any())'.dependencies] +native-tls = { version = "0.2.4", optional = true } +num-bigint = "0.4.2" +oid-registry = "0.6.1" openssl = { version = "0.10.60", optional = true } +# TryFrom for ServerName<'static> has been implemented: +# https://github.com/rustls/pki-types/compare/rustls:3793627...rustls:1303efa# +rustls-pki-types = { version = "1.0.1", optional = true } + +# Lockstep between `serde` and `serde_derive` was introduced with the "pinned" release: +# https://github.com/serde-rs/serde/compare/v1.0.185...v1.0.186#diff-2843fc1320fa24a059f5ca967ee45d116110116263a8ba311a3aca3793c562f0R34-R41 +# Without this pin our `#[serde(transparent)]` and `#[derive(Serialize, Deserialize)] do not play well together. +serde = "1.0.186" + [[bin]] name = "loadtest" path = "src/bin/loadtest.rs" diff --git a/Makefile b/Makefile index 6e23d9c0..16a2ff95 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,4 @@ +FAKTORY_IP=127.0.0.1 FAKTORY_HOST=localhost FAKTORY_PORT=7419 FAKTORY_PORT_SECURE=17419 @@ -6,7 +7,7 @@ FAKTORY_PORT_UI=7420 .PHONY: check check: cargo fmt --check - cargo clippy + cargo clippy --all-features cargo d --no-deps --all-features .PHONY: doc @@ -17,8 +18,8 @@ doc: faktory: docker run --rm -d \ -v faktory-data:/var/lib/faktory \ - -p ${FAKTORY_HOST}:${FAKTORY_PORT}:7419 \ - -p ${FAKTORY_HOST}:${FAKTORY_PORT_UI}:7420 \ + -p ${FAKTORY_IP}:${FAKTORY_PORT}:7419 \ + -p ${FAKTORY_IP}:${FAKTORY_PORT_UI}:7420 \ --name faktory \ contribsys/faktory:latest \ /faktory -b :7419 -w :7420 @@ -50,7 +51,7 @@ test/e2e: .PHONY: test/e2e/tls test/e2e/tls: FAKTORY_URL_SECURE=tcp://${FAKTORY_HOST}:${FAKTORY_PORT_SECURE} \ - cargo test --locked --features tls --test tls + cargo test --locked --features native_tls,rustls --test tls -- --nocapture .PHONY: test/load test/load: diff --git a/README.md b/README.md index bd55d9e4..c5eae769 100644 --- a/README.md +++ b/README.md @@ -19,60 +19,64 @@ jobs. A client enqueues a job, Faktory sends the job to an available worker (and they're all busy), the worker executes the job, and eventually reports back to Faktory that the job has completed. -Jobs are self-contained, and consist of a job *type* (a string), arguments for the job, and +Jobs are self-contained, and consist of a job _type_ (a string), arguments for the job, and bits and pieces of metadata. When a job is scheduled for execution, the worker is given this information, and uses the job type to figure out how to execute the job. You can think of job execution as a remote function call (or RPC) where the job type is the name of the function, and the job arguments are, perhaps unsuprisingly, the arguments to the function. -In this crate, you will find bindings both for submitting jobs (clients that *produce* jobs) -and for executing jobs (workers that *consume* jobs). The former can be done by making a -`Producer`, whereas the latter is done with a `Consumer`. See the documentation for each for +In this crate, you will find bindings both for submitting jobs (clients that _produce_ jobs) +and for executing jobs (workers that _consume_ jobs). The former can be done by making a +`Client`, whereas the latter is done with a `Worker`. See the documentation for each for more details on how to use them. ## Encrypted connections (TLS) To connect to a Faktory server hosted over TLS, add the `tls` feature, and see the -documentation for `TlsStream`, which can be supplied to `Producer::connect_with` and -`Consumer::connect_with`. +documentation for `TlsStream`, which can be supplied to `Client::connect_with` and +`WorkerBuilder::connect_with`. ## Examples -If you want to **submit** jobs to Faktory, use `Producer`. +If you want to **submit** jobs to Faktory, use `Client`. ```rust -use faktory::{Producer, Job}; -let mut p = Producer::connect(None).unwrap(); -p.enqueue(Job::new("foobar", vec!["z"])).unwrap(); +use faktory::{Client, Job}; +let mut c = Client::connect(None).await.unwrap(); +c.enqueue(Job::new("foobar", vec!["z"])).await.unwrap(); ``` -If you want to **accept** jobs from Faktory, use `Consumer`. +If you want to **accept** jobs from Faktory, use `Worker`. ```rust -use faktory::ConsumerBuilder; +use faktory::WorkerBuilder; use std::io; -let mut c = ConsumerBuilder::default(); -c.register("foobar", |job| -> io::Result<()> { - println!("{:?}", job); - Ok(()) +let mut w = WorkerBuilder::default(); +w.register("foobar", |job| async move { + println!("{:?}", job); + Ok::<(), io::Error>(()) }); -let mut c = c.connect(None).unwrap(); -if let Err(e) = c.run(&["default"]) { +let mut w = w.connect(None).await.unwrap(); +if let Err(e) = w.run(&["default"]).await { println!("worker failed: {}", e); } ``` ## Run test suite locally -First ensure the "Factory" service is running and accepting connections on your machine. +First ensure the "Factory" service is running and accepting connections on your machine. To launch it a [Factory](https://hub.docker.com/r/contribsys/faktory/) container with [docker](https://docs.docker.com/engine/install/), run: + ```bash docker run --rm -it -v faktory-data:/var/lib/faktory -p 127.0.0.1:7419:7419 -p 127.0.0.1:7420:7420 contribsys/faktory:latest /faktory -b :7419 -w :7420 ``` + After that run the tests: + ```bash FAKTORY_URL=tcp://127.0.0.1:7419 cargo test --all-features --locked --all-targets ``` + Please note that setting "FAKTORY_URL" environment variable is required for e2e tests to not be skipped. Provided you have [make](https://www.gnu.org/software/make/#download) installed and `docker` daemon running, diff --git a/src/bin/loadtest.rs b/src/bin/loadtest.rs index e5a24e41..60d17d67 100644 --- a/src/bin/loadtest.rs +++ b/src/bin/loadtest.rs @@ -5,15 +5,16 @@ use rand::prelude::*; use std::io; use std::process; use std::sync::{self, atomic}; -use std::thread; use std::time; +use tokio::task; const QUEUES: &[&str] = &["queue0", "queue1", "queue2", "queue3", "queue4"]; -fn main() { +#[tokio::main] +async fn main() { let matches = Command::new("My Super Program") .version("0.1") - .about("Benchmark the performance of Rust Faktory consumers and producers") + .about("Benchmark the performance of Rust Faktory async workers and client") .arg( Arg::new("jobs") .help("Number of jobs to run") @@ -23,7 +24,7 @@ fn main() { ) .arg( Arg::new("threads") - .help("Number of consumers/producers to run") + .help("Number of workers/clients to run") .value_parser(value_parser!(usize)) .index(2) .default_value("10"), @@ -37,8 +38,10 @@ fn main() { jobs, threads ); - // ensure that we can actually connect to the server - if let Err(e) = Producer::connect(None) { + // ensure that we can actually connect to the server; + // will create a client, run a handshake with Faktory, + // and drop the cliet immediately afterwards; + if let Err(e) = Client::connect(None).await { println!("{}", e); process::exit(1); } @@ -47,55 +50,63 @@ fn main() { let popped = sync::Arc::new(atomic::AtomicUsize::new(0)); let start = time::Instant::now(); - let threads: Vec>> = (0..threads) - .map(|_| { - let pushed = sync::Arc::clone(&pushed); - let popped = sync::Arc::clone(&popped); - thread::spawn(move || { - // make producer and consumer - let mut p = Producer::connect(None).unwrap(); - let mut c = ConsumerBuilder::default(); - c.register("SomeJob", |_| { - let mut rng = rand::thread_rng(); - if rng.gen_bool(0.01) { - Err(io::Error::new(io::ErrorKind::Other, "worker closed")) - } else { - Ok(()) - } - }); - let mut c = c.connect(None).unwrap(); - let mut rng = rand::thread_rng(); - let mut random_queues = Vec::from(QUEUES); - random_queues.shuffle(&mut rng); - for idx in 0..jobs { - if idx % 2 == 0 { - // push - let mut job = Job::new( - "SomeJob", - vec![serde_json::Value::from(1), "string".into(), 3.into()], - ); - job.priority = Some(rng.gen_range(1..10)); - job.queue = QUEUES.choose(&mut rng).unwrap().to_string(); - p.enqueue(job)?; - if pushed.fetch_add(1, atomic::Ordering::SeqCst) >= jobs { - return Ok(idx); - } - } else { - // pop - c.run_one(0, &random_queues[..])?; - if popped.fetch_add(1, atomic::Ordering::SeqCst) >= jobs { - return Ok(idx); + let mut set = task::JoinSet::new(); + for _ in 0..threads { + let pushed = sync::Arc::clone(&pushed); + let popped = sync::Arc::clone(&popped); + set.spawn(async move { + // make producer and consumer + let mut p = Client::connect(None).await.unwrap(); + let mut worker = WorkerBuilder::default() + .register_fn("SomeJob", |_| { + Box::pin(async move { + let mut rng = rand::thread_rng(); + if rng.gen_bool(0.01) { + Err(io::Error::new(io::ErrorKind::Other, "worker closed")) + } else { + Ok(()) } + }) + }) + .connect(None) + .await + .unwrap(); + let mut rng = rand::rngs::OsRng; + let mut random_queues = Vec::from(QUEUES); + random_queues.shuffle(&mut rng); + for idx in 0..jobs { + if idx % 2 == 0 { + // push + let mut job = Job::new( + "SomeJob", + vec![serde_json::Value::from(1), "string".into(), 3.into()], + ); + job.priority = Some(rng.gen_range(1..10)); + job.queue = QUEUES.choose(&mut rng).unwrap().to_string(); + p.enqueue(job).await?; + if pushed.fetch_add(1, atomic::Ordering::SeqCst) >= jobs { + return Ok::(idx); + } + } else { + // pop + worker.run_one(0, &random_queues[..]).await?; + if popped.fetch_add(1, atomic::Ordering::SeqCst) >= jobs { + return Ok(idx); } } - Ok(jobs) - }) - }) - .collect(); + } + Ok(jobs) + }); + } + + let mut ops_count = Vec::with_capacity(threads); + while let Some(res) = set.join_next().await { + ops_count.push(res.unwrap()) + } - let _ops_count: Result, _> = threads.into_iter().map(|jt| jt.join().unwrap()).collect(); let stop = start.elapsed(); + let stop_secs = stop.as_secs() * 1_000_000_000 + u64::from(stop.subsec_nanos()); let stop_secs = stop_secs as f64 / 1_000_000_000.0; println!( @@ -105,5 +116,8 @@ fn main() { stop_secs, jobs as f64 / stop_secs, ); - // println!("{:?}", _ops_count); + println!( + "Number of operations (pushes and pops) per thread: {:?}", + ops_count + ); } diff --git a/src/consumer/mod.rs b/src/consumer/mod.rs deleted file mode 100644 index 119b64f3..00000000 --- a/src/consumer/mod.rs +++ /dev/null @@ -1,686 +0,0 @@ -use crate::error::Error; -use crate::proto::{ - self, parse_provided_or_from_env, Ack, Client, ClientOptions, Fail, HeartbeatStatus, Job, - Reconnect, -}; -use fnv::FnvHashMap; -use std::error::Error as StdError; -use std::io::prelude::*; -use std::net::TcpStream; -use std::sync::{atomic, Arc, Mutex}; - -const STATUS_RUNNING: usize = 0; -const STATUS_QUIET: usize = 1; -const STATUS_TERMINATING: usize = 2; - -/// Implementations of this trait can be registered to run jobs in a `Consumer`. -/// -/// # Example -/// -/// Create a worker with all default options, register a single handler (for the `foo` job -/// type), connect to the Faktory server, and start accepting jobs. -/// The handler is a struct that implements `JobRunner`. -/// -/// ```no_run -/// use faktory::{ConsumerBuilder, JobRunner, Job}; -/// use std::io; -/// -/// struct MyHandler { -/// config: String, -/// } -/// impl JobRunner for MyHandler { -/// type Error = io::Error; -/// fn run(&self, job: Job) -> Result<(), Self::Error> { -/// println!("config: {}", self.config); -/// println!("job: {:?}", job); -/// Ok(()) -/// } -/// } -/// -/// let mut c = ConsumerBuilder::default(); -/// let handler = MyHandler { -/// config: "bar".to_string(), -/// }; -/// c.register_runner("foo", handler); -/// let mut c = c.connect(None).unwrap(); -/// if let Err(e) = c.run(&["default"]) { -/// println!("worker failed: {}", e); -/// } -/// ``` -pub trait JobRunner: Send + Sync { - /// The error type that the handler may return. - type Error; - /// A handler function that runs a job. - fn run(&self, job: Job) -> Result<(), Self::Error>; -} -type BoxedJobRunner = Box>; -// Implements JobRunner for a closure that takes a Job and returns a Result<(), E> -impl JobRunner for Box -where - F: Fn(Job) -> Result<(), E> + Send + Sync, -{ - type Error = E; - fn run(&self, job: Job) -> Result<(), E> { - self(job) - } -} - -// Additional Blanket Implementations -impl<'a, E, F> JobRunner for &'a F -where - F: Fn(Job) -> Result<(), E> + Send + Sync, -{ - type Error = E; - fn run(&self, job: Job) -> Result<(), E> { - self(job) - } -} -impl<'a, E, F> JobRunner for &'a mut F -where - F: Fn(Job) -> Result<(), E> + Send + Sync, -{ - type Error = E; - fn run(&self, job: Job) -> Result<(), E> { - (self as &F)(job) - } -} -#[repr(transparent)] -struct Closure(F); -impl JobRunner for Closure -where - F: Fn(Job) -> Result<(), E> + Send + Sync, -{ - type Error = E; - fn run(&self, job: Job) -> Result<(), E> { - (self.0)(job) - } -} - -/// `Consumer` is used to run a worker that processes jobs provided by Faktory. -/// -/// # Building the worker -/// -/// Faktory needs a decent amount of information from its workers, such as a unique worker ID, a -/// hostname for the worker, its process ID, and a set of labels used to identify the worker. In -/// order to enable setting all these, constructing a worker is a two-step process. You first use a -/// [`ConsumerBuilder`](struct.ConsumerBuilder.html) (which conveniently implements a sensible -/// `Default`) to set the worker metadata, as well as to register any job handlers. You then use -/// one of the `connect_*` methods to finalize the worker and connect to the Faktory server. -/// -/// In most cases, `ConsumerBuilder::default()` will do what you want. You only need to augment it -/// with calls to [`register`](struct.ConsumerBuilder.html#method.register) to register handlers -/// for each of your job types, and then you can connect. If you have different *types* of workers, -/// you may also want to use [`labels`](struct.ConsumerBuilder.html#method.labels) to distinguish -/// them in the Faktory Web UI. To specify that some jobs should only go to some workers, use -/// different queues. -/// -/// ## Handlers -/// -/// For each [`Job`](struct.Job.html) that the worker receives, the handler that is registered for -/// that job's type will be called. If a job is received with a type for which no handler exists, -/// the job will be failed and returned to the Faktory server. Similarly, if a handler returns an -/// error response, the job will be failed, and the error reported back to the Faktory server. -/// -/// If you are new to Rust, getting the handler types to work out can be a little tricky. If you -/// want to understand why, I highly recommend that you have a look at the chapter on [closures and -/// generic -/// parameters](https://doc.rust-lang.org/book/second-edition/ch13-01-closures.html#using-closures-with-generic-parameters-and-the-fn-traits) -/// in the Rust Book. If you just want it to work, my recommendation is to either use regular -/// functions instead of closures, and giving `&func_name` as the handler, **or** wrapping all your -/// closures in `Box::new()`. -/// -/// ## Concurrency -/// -/// By default, only a single thread is spun up to process the jobs given to this worker. If you -/// want to dedicate more resources to processing jobs, you have a number of options listed below. -/// As you go down the list below, efficiency increases, but fault isolation decreases. I will not -/// give further detail here, but rather recommend that if these don't mean much to you, you should -/// use the last approach and let the library handle the concurrency for you. -/// -/// - You can spin up more worker processes by launching your worker program more than once. -/// - You can create more than one `Consumer`. -/// - You can call [`ConsumerBuilder::workers`](struct.ConsumerBuilder.html#method.workers) to set -/// the number of worker threads you'd like the `Consumer` to use internally. -/// -/// # Connecting to Faktory -/// -/// To fetch jobs, the `Consumer` must first be connected to the Faktory server. Exactly how you do -/// that depends on your setup. In most cases, you'll want to use `Consumer::connect`, and provide -/// a connection URL. If you supply a URL, it must be of the form: -/// -/// ```text -/// protocol://[:password@]hostname[:port] -/// ``` -/// -/// Faktory suggests using the `FAKTORY_PROVIDER` and `FAKTORY_URL` environment variables (see -/// their docs for more information) with `localhost:7419` as the fallback default. If you want -/// this behavior, pass `None` as the URL. -/// -/// See the [`Producer` examples](struct.Producer.html#examples) for examples of how to connect to -/// different Factory setups. -/// -/// # Worker lifecycle -/// -/// Okay, so you've built your worker and connected to the Faktory server. Now what? -/// -/// If all this process is doing is handling jobs, reconnecting on failure, and exiting when told -/// to by the Faktory server, you should use -/// [`run_to_completion`](struct.Consumer.html#method.run_to_completion). If you want more -/// fine-grained control over the lifetime of your process, you should use -/// [`Consumer::run`](struct.Consumer.html#method.run). See the documentation for each of these -/// methods for details. -/// -/// # Examples -/// -/// Create a worker with all default options, register a single handler (for the `foobar` job -/// type), connect to the Faktory server, and start accepting jobs. -/// -/// ```no_run -/// use faktory::ConsumerBuilder; -/// use std::io; -/// let mut c = ConsumerBuilder::default(); -/// c.register("foobar", |job| -> io::Result<()> { -/// println!("{:?}", job); -/// Ok(()) -/// }); -/// let mut c = c.connect(None).unwrap(); -/// if let Err(e) = c.run(&["default"]) { -/// println!("worker failed: {}", e); -/// } -/// ``` -pub struct Consumer -where - S: Read + Write, -{ - c: Client, - worker_states: Arc>>, - callbacks: Arc>>, - terminated: bool, -} - -#[derive(Default)] -struct WorkerState { - last_job_result: Option>, - running_job: Option, -} - -/// Convenience wrapper for building a Faktory worker. -/// -/// See the [`Consumer`](struct.Consumer.html) documentation for details. -pub struct ConsumerBuilder { - opts: ClientOptions, - workers: usize, - callbacks: FnvHashMap>, -} - -impl Default for ConsumerBuilder { - /// Construct a new worker with default worker options and the url fetched from environment - /// variables. - /// - /// This will construct a worker where: - /// - /// - `hostname` is this machine's hostname. - /// - `wid` is a randomly generated string. - /// - `pid` is the OS PID of this process. - /// - `labels` is `["rust"]`. - /// - fn default() -> Self { - ConsumerBuilder { - opts: ClientOptions::default(), - workers: 1, - callbacks: Default::default(), - } - } -} - -impl ConsumerBuilder { - /// Set the hostname to use for this worker. - /// - /// Defaults to the machine's hostname as reported by the operating system. - pub fn hostname(&mut self, hn: String) -> &mut Self { - self.opts.hostname = Some(hn); - self - } - - /// Set a unique identifier for this worker. - /// - /// Defaults to a randomly generated ASCII string. - pub fn wid(&mut self, wid: String) -> &mut Self { - self.opts.wid = Some(wid); - self - } - - /// Set the labels to use for this worker. - /// - /// Defaults to `["rust"]`. - pub fn labels(&mut self, labels: Vec) -> &mut Self { - self.opts.labels = labels; - self - } - - /// Set the number of workers to use for `run` and `run_to_completion_*`. - /// - /// Defaults to 1. - pub fn workers(&mut self, w: usize) -> &mut Self { - self.workers = w; - self - } - - /// Register a handler function for the given job type (`kind`). - /// - /// Whenever a job whose type matches `kind` is fetched from the Faktory, the given handler - /// function is called with that job as its argument. - pub fn register(&mut self, kind: K, handler: H) -> &mut Self - where - K: Into, - H: Fn(Job) -> Result<(), E> + Send + Sync + 'static, - { - self.register_runner(kind, Closure(handler)) - } - - /// Register a handler for the given job type (`kind`). - /// - /// Whenever a job whose type matches `kind` is fetched from the Faktory, the given handler - /// object is called with that job as its argument. - pub fn register_runner(&mut self, kind: K, runner: H) -> &mut Self - where - K: Into, - H: JobRunner + 'static, - { - self.callbacks.insert(kind.into(), Box::new(runner)); - self - } - - /// Connect to a Faktory server. - /// - /// If `url` is not given, will use the standard Faktory environment variables. Specifically, - /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address - /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the - /// server address. If the latter environment variable is not defined, the connection will be - /// made to - /// - /// ```text - /// tcp://localhost:7419 - /// ``` - /// - /// If `url` is given, but does not specify a port, it defaults to 7419. - pub fn connect(self, url: Option<&str>) -> Result, Error> { - let url = parse_provided_or_from_env(url)?; - let stream = TcpStream::connect(proto::host_from_url(&url))?; - Self::connect_with(self, stream, url.password().map(|p| p.to_string())) - } - - /// Connect to a Faktory server with a non-standard stream. - pub fn connect_with( - mut self, - stream: S, - pwd: Option, - ) -> Result, Error> { - self.opts.password = pwd; - self.opts.is_worker = true; - Ok(Consumer::new( - Client::new(stream, self.opts)?, - self.workers, - self.callbacks, - )) - } -} - -enum Failed { - Application(E), - BadJobType(String), -} - -impl Consumer { - fn new(c: Client, workers: usize, callbacks: FnvHashMap>) -> Self { - Consumer { - c, - callbacks: Arc::new(callbacks), - worker_states: Arc::new((0..workers).map(|_| Default::default()).collect()), - terminated: false, - } - } -} - -impl Consumer { - fn reconnect(&mut self) -> Result<(), Error> { - self.c.reconnect() - } -} - -impl Consumer -where - S: Read + Write, - E: StdError, -{ - fn run_job(&mut self, job: Job) -> Result<(), Failed> { - match self.callbacks.get(&job.kind) { - Some(callback) => callback.run(job).map_err(Failed::Application), - None => { - // cannot execute job, since no handler exists - Err(Failed::BadJobType(job.kind)) - } - } - } - - /// Fetch and run a single job on the current thread, and then return. - pub fn run_one(&mut self, worker: usize, queues: &[Q]) -> Result - where - Q: AsRef, - { - // get a job - let job = match self.c.fetch(queues)? { - Some(job) => job, - None => return Ok(false), - }; - - // remember the job id - let jid = job.jid.clone(); - - // keep track of running job in case we're terminated during it - self.worker_states[worker].lock().unwrap().running_job = Some(jid.clone()); - - // process the job - let r = self.run_job(job); - - // report back - match r { - Ok(_) => { - // job done -- acknowledge - // remember it in case we fail to notify the server (e.g., broken connection) - self.worker_states[worker].lock().unwrap().last_job_result = Some(Ok(jid.clone())); - self.c.issue(&Ack::new(jid))?.await_ok()?; - } - Err(e) => { - // job failed -- let server know - // "unknown" is the errtype used by the go library too - let fail = match e { - Failed::BadJobType(jt) => { - Fail::new(jid, "unknown", format!("No handler for {}", jt)) - } - Failed::Application(e) => { - let mut f = Fail::new(jid, "unknown", format!("{}", e)); - let mut root = e.source(); - let mut backtrace = Vec::new(); - while let Some(r) = root.take() { - backtrace.push(format!("{}", r)); - root = r.source(); - } - f.set_backtrace(backtrace); - f - } - }; - - let fail2 = fail.clone(); - self.worker_states[worker].lock().unwrap().last_job_result = Some(Err(fail)); - self.c.issue(&fail2)?.await_ok()?; - } - } - - // we won't have to tell the server again - { - let mut state = self.worker_states[worker].lock().unwrap(); - state.last_job_result = None; - state.running_job = None; - } - Ok(true) - } - - #[cfg(test)] - pub(crate) fn run_n(&mut self, n: usize, queues: &[Q]) -> Result<(), Error> - where - Q: AsRef, - { - for _ in 0..n { - self.run_one(0, queues)?; - } - Ok(()) - } -} - -impl Consumer -where - S: Read + Write + Reconnect + Send + 'static, - E: StdError + 'static, -{ - fn for_worker(&mut self) -> Result { - Ok(Consumer { - c: self.c.connect_again()?, - callbacks: Arc::clone(&self.callbacks), - worker_states: Arc::clone(&self.worker_states), - terminated: self.terminated, - }) - } - - /// Run this worker on the given `queues` until an I/O error occurs (`Err` is returned), or - /// until the server tells the worker to disengage (`Ok` is returned). - /// - /// The value in an `Ok` indicates the number of workers that may still be processing jobs. - /// - /// Note that if the worker fails, [`reconnect()`](struct.Consumer.html#method.reconnect) - /// should likely be called before calling `run()` again. If an error occurred while reporting - /// a job success or failure, the result will be re-reported to the server without re-executing - /// the job. If the worker was terminated (i.e., `run` returns with an `Ok` response), the - /// worker should **not** try to resume by calling `run` again. This will cause a panic. - pub fn run(&mut self, queues: &[Q]) -> Result - where - Q: AsRef, - { - assert!(!self.terminated, "do not re-run a terminated worker"); - let worker_states = Arc::get_mut(&mut self.worker_states) - .expect("all workers are scoped to &mut of the user-code-visible Consumer"); - - // retry delivering notification about our last job result. - // we know there's no leftover thread at this point, so there's no race on the option. - for wstate in worker_states.iter_mut() { - let wstate = wstate.get_mut().unwrap(); - if let Some(res) = wstate.last_job_result.take() { - let r = match res { - Ok(ref jid) => self.c.issue(&Ack::new(&**jid)), - Err(ref fail) => self.c.issue(fail), - }; - - let r = match r { - Ok(r) => r, - Err(e) => { - wstate.last_job_result = Some(res); - return Err(e); - } - }; - - if let Err(e) = r.await_ok() { - // it could be that the server did previously get our ACK/FAIL, and that it was - // the resulting OK that failed. in that case, we would get an error response - // when re-sending the job response. this should not count as critical. other - // errors, however, should! - if let Error::IO(_) = e { - wstate.last_job_result = Some(res); - return Err(e); - } - } - } - } - - // keep track of the current status of each worker - let status: Vec<_> = (0..self.worker_states.len()) - .map(|_| Arc::new(atomic::AtomicUsize::new(STATUS_RUNNING))) - .collect(); - - // start worker threads - use std::thread; - let workers = status - .iter() - .enumerate() - .map(|(worker, status)| { - let mut w = self.for_worker()?; - let status = Arc::clone(status); - let queues: Vec<_> = queues.iter().map(|s| s.as_ref().to_string()).collect(); - Ok(thread::spawn(move || { - while status.load(atomic::Ordering::SeqCst) == STATUS_RUNNING { - if let Err(e) = w.run_one(worker, &queues[..]) { - status.store(STATUS_TERMINATING, atomic::Ordering::SeqCst); - return Err(e); - } - } - status.store(STATUS_TERMINATING, atomic::Ordering::SeqCst); - Ok(()) - })) - }) - .collect::, Error>>()?; - - // listen for heartbeats - let mut target = STATUS_RUNNING; - let exit = { - use std::time; - let mut last = time::Instant::now(); - - loop { - thread::sleep(time::Duration::from_millis(100)); - - // has a worker failed? - if target == STATUS_RUNNING - && status - .iter() - .any(|s| s.load(atomic::Ordering::SeqCst) == STATUS_TERMINATING) - { - // tell all workers to exit - // (though chances are they've all failed already) - for s in &status { - s.store(STATUS_TERMINATING, atomic::Ordering::SeqCst); - } - break Ok(false); - } - - if last.elapsed().as_secs() < 5 { - // don't sent a heartbeat yet - continue; - } - - match self.c.heartbeat() { - Ok(hb) => { - match hb { - HeartbeatStatus::Ok => {} - HeartbeatStatus::Quiet => { - // tell the workers to eventually terminate - for s in &status { - s.store(STATUS_QUIET, atomic::Ordering::SeqCst); - } - target = STATUS_QUIET; - } - HeartbeatStatus::Terminate => { - // tell the workers to terminate - // *and* fail the current job and immediately return - for s in &status { - s.store(STATUS_QUIET, atomic::Ordering::SeqCst); - } - break Ok(true); - } - } - } - Err(e) => { - // for this to fail, the workers have probably also failed - for s in &status { - s.store(STATUS_TERMINATING, atomic::Ordering::SeqCst); - } - break Err(e); - } - } - last = time::Instant::now(); - } - }; - - // there are a couple of cases here: - // - // - we got TERMINATE, so we should just return, even if a worker is still running - // - we got TERMINATE and all workers has exited - // - we got an error from heartbeat() - // - self.terminated = exit.is_ok(); - if let Ok(true) = exit { - // FAIL currently running jobs even though they're still running - let mut running = 0; - for wstate in self.worker_states.iter() { - if let Some(jid) = wstate.lock().unwrap().running_job.take() { - let f = Fail::new(&*jid, "unknown", "terminated"); - - // if this fails, we don't want to exit with Err(), - // because we *were* still terminated! - let _ = self.c.issue(&f).and_then(|r| r.await_ok()).is_ok(); - - running += 1; - } - } - - if running != 0 { - return Ok(running); - } - } - - match exit { - Ok(_) => { - // we want to expose any worker errors - workers - .into_iter() - .map(|w| w.join().unwrap()) - .collect::, _>>() - .map(|_| 0) - } - Err(e) => { - // we want to expose worker errors, or otherwise the heartbeat error - workers - .into_iter() - .map(|w| w.join().unwrap()) - .collect::, _>>() - .and(Err(e)) - } - } - } - - /// Run this worker until the server tells us to exit or a connection cannot be re-established. - /// - /// This function never returns. When the worker decides to exit, the process is terminated. - pub fn run_to_completion(mut self, queues: &[Q]) -> ! - where - Q: AsRef, - { - use std::process; - while self.run(queues).is_err() { - if self.reconnect().is_err() { - break; - } - } - - process::exit(0); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - // https://github.com/rust-lang/rust/pull/42219 - //#[allow_fail] - #[ignore] - fn it_works() { - use crate::producer::Producer; - use std::io; - - let mut p = Producer::connect(None).unwrap(); - let mut j = Job::new("foobar", vec!["z"]); - j.queue = "worker_test_1".to_string(); - p.enqueue(j).unwrap(); - - let mut c = ConsumerBuilder::default(); - c.register("foobar", |job: Job| -> Result<(), io::Error> { - assert_eq!(job.args, vec!["z"]); - Ok(()) - }); - let mut c = c.connect(None).unwrap(); - let e = c.run_n(1, &["worker_test_1"]); - if e.is_err() { - println!("{:?}", e); - } - assert!(e.is_ok()); - } -} diff --git a/src/error.rs b/src/error.rs index 89de50ff..9296a807 100644 --- a/src/error.rs +++ b/src/error.rs @@ -42,11 +42,11 @@ pub enum Error { #[error("serialization")] Serialization(#[source] serde_json::Error), - /// Indicates an error in the underlying TLS stream. - #[cfg(feature = "tls")] - #[cfg_attr(docsrs, doc(cfg(feature = "tls")))] - #[error("underlying tls stream")] - TlsStream(#[source] native_tls::Error), + /// Indicates an error in the underlying non-standard stream, e.g. TLS stream. + #[cfg(any(feature = "native_tls", feature = "rustls"))] + #[cfg_attr(docsrs, doc(cfg(any(feature = "native_tls", feature = "rustls"))))] + #[error("tls stream")] + Stream(#[from] Stream), } /// Errors specific to connection logic. @@ -160,3 +160,20 @@ impl Protocol { } } } + +/// Implementation specific errors in the underlying non-standard stream, e.g. TLS stream. +#[derive(Debug, Error)] +#[non_exhaustive] +pub enum Stream { + /// Error in the underlying native tls powered stream. + #[cfg(feature = "native_tls")] + #[cfg_attr(docsrs, doc(cfg(feature = "native_tls")))] + #[error("underlying tls stream")] + NativeTls(#[source] tokio_native_tls::native_tls::Error), + + /// Error in the underlying rustls powered stream. + #[cfg(feature = "rustls")] + #[cfg_attr(docsrs, doc(cfg(feature = "rustls")))] + #[error("underlying tls stream")] + RustTls(#[source] tokio_rustls::rustls::Error), +} diff --git a/src/lib.rs b/src/lib.rs index a2ffaa31..6f702741 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,43 +20,47 @@ //! //! In this crate, you will find bindings both for submitting jobs (clients that *produce* jobs) //! and for executing jobs (workers that *consume* jobs). The former can be done by making a -//! `Producer`, whereas the latter is done with a `Consumer`. See the documentation for each for +//! `Client`, whereas the latter is done with a `Worker`. See the documentation for each for //! more details on how to use them. //! //! # Encrypted connections (TLS) //! //! To connect to a Faktory server hosted over TLS, add the `tls` feature, and see the -//! documentation for `TlsStream`, which can be supplied to `Producer::connect_with` and -//! `Consumer::connect_with`. +//! documentation for `TlsStream`, which can be supplied to [`Client::connect_with`] and +//! [`WorkerBuilder::connect_with`]. //! //! # Examples //! -//! If you want to **submit** jobs to Faktory, use `Producer`. +//! If you want to **submit** jobs to Faktory, use `Client`. //! //! ```no_run -//! use faktory::{Producer, Job}; -//! let mut p = Producer::connect(None).unwrap(); -//! p.enqueue(Job::new("foobar", vec!["z"])).unwrap(); +//! # tokio_test::block_on(async { +//! use faktory::{Client, Job}; +//! let mut client = Client::connect(None).await.unwrap(); +//! client.enqueue(Job::new("foobar", vec!["z"])).await.unwrap(); //! -//! let (enqueued_count, errors) = p.enqueue_many(vec![Job::new("foobar", vec!["z"]), Job::new("foobar", vec!["z"])]).unwrap(); +//! let (enqueued_count, errors) = client.enqueue_many([Job::new("foobar", vec!["z"]), Job::new("foobar", vec!["z"])]).await.unwrap(); //! assert_eq!(enqueued_count, 2); //! assert_eq!(errors, None); +//! }); //! ``` -//! -//! If you want to **accept** jobs from Faktory, use `Consumer`. +//! If you want to **accept** jobs from Faktory, use `Worker`. //! //! ```no_run -//! use faktory::ConsumerBuilder; +//! # tokio_test::block_on(async { +//! use faktory::WorkerBuilder; //! use std::io; -//! let mut c = ConsumerBuilder::default(); -//! c.register("foobar", |job| -> io::Result<()> { -//! println!("{:?}", job); -//! Ok(()) -//! }); -//! let mut c = c.connect(None).unwrap(); -//! if let Err(e) = c.run(&["default"]) { +//! let mut w = WorkerBuilder::default() +//! .register_fn("foobar", |job| async move { +//! println!("{:?}", job); +//! Ok::<(), io::Error>(()) +//! }) +//! .connect(None).await.unwrap(); +//! +//! if let Err(e) = w.run(&["default"]).await { //! println!("worker failed: {}", e); //! } +//! # }); //! ``` #![deny(missing_docs)] #![cfg_attr(docsrs, feature(doc_cfg))] @@ -67,29 +71,25 @@ extern crate serde_derive; pub mod error; -mod consumer; -mod producer; mod proto; +mod worker; -pub use crate::consumer::{Consumer, ConsumerBuilder, JobRunner}; pub use crate::error::Error; -pub use crate::producer::Producer; -pub use crate::proto::{Client, Job, JobBuilder, Reconnect}; +pub use crate::proto::{Client, Job, JobBuilder, JobId, Reconnect, WorkerId}; +pub use crate::worker::{JobRunner, Worker, WorkerBuilder}; #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] /// Constructs only available with the enterprise version of Faktory. pub mod ent { pub use crate::proto::{ - Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState, JobState, Progress, + Batch, BatchBuilder, BatchHandle, BatchId, BatchStatus, CallbackState, JobState, Progress, ProgressUpdate, ProgressUpdateBuilder, }; } -#[cfg(feature = "tls")] -#[cfg_attr(docsrs, doc(cfg(feature = "tls")))] +#[cfg(any(feature = "native_tls", feature = "rustls"))] mod tls; -#[cfg(feature = "tls")] -#[cfg_attr(docsrs, doc(cfg(feature = "tls")))] -pub use tls::TlsStream; +#[cfg(any(feature = "native_tls", feature = "rustls"))] +pub use tls::*; diff --git a/src/producer/mod.rs b/src/producer/mod.rs deleted file mode 100644 index c92b111c..00000000 --- a/src/producer/mod.rs +++ /dev/null @@ -1,201 +0,0 @@ -use crate::error::Error; -use crate::proto::{Client, Info, Job, Push, PushBulk, QueueAction, QueueControl}; -use std::collections::HashMap; -use std::io::prelude::*; -use std::net::TcpStream; - -#[cfg(feature = "ent")] -use crate::proto::{Batch, BatchHandle, CommitBatch, OpenBatch}; - -/// `Producer` is used to enqueue new jobs that will in turn be processed by Faktory workers. -/// -/// # Connecting to Faktory -/// -/// To issue jobs, the `Producer` must first be connected to the Faktory server. Exactly how you do -/// that depends on your setup. Faktory suggests using the `FAKTORY_PROVIDER` and `FAKTORY_URL` -/// environment variables (see their docs for more information) with `localhost:7419` as the -/// fallback default. If you want this behavior, pass `None` to -/// [`Producer::connect`](struct.Producer.html#method.connect). If not, you can supply the URL -/// directly to [`Producer::connect`](struct.Producer.html#method.connect) in the form: -/// -/// ```text -/// protocol://[:password@]hostname[:port] -/// ``` -/// -/// -/// # Issuing jobs -/// -/// Most of the lifetime of a `Producer` will be spent creating and enqueueing jobs for Faktory -/// workers. This is done by passing a [`Job`](struct.Job.html) to -/// [`Producer::enqueue`](struct.Producer.html#method.enqueue). The most important part of a `Job` -/// is its `kind`; this field dictates how workers will execute the job when they receive it. The -/// string provided here must match a handler registered on the worker using -/// [`ConsumerBuilder::register`](struct.ConsumerBuilder.html#method.register) (or the equivalent -/// handler registration method in workers written in other languages). -/// -/// Since Faktory workers do not all need to be the same (you could have some written in Rust for -/// performance-critical tasks, some in Ruby for more webby tasks, etc.), it may be the case that a -/// given job can only be executed by some workers (e.g., if they job type is not registered at -/// others). To allow for this, Faktory includes a `labels` field with each job. Jobs will only be -/// sent to workers whose labels (see -/// [`ConsumerBuilder::labels`](struct.ConsumerBuilder.html#method.labels)) match those set in -/// `Job::labels`. -/// -/// # Examples -/// -/// Connecting to an unsecured Faktory server using environment variables -/// -/// ```no_run -/// use faktory::Producer; -/// let p = Producer::connect(None).unwrap(); -/// ``` -/// -/// Connecting to a secured Faktory server using an explicit URL -/// -/// ```no_run -/// use faktory::Producer; -/// let p = Producer::connect(Some("tcp://:hunter2@localhost:7439")).unwrap(); -/// ``` -/// -/// Issuing a job using a `Producer` -/// -/// ```no_run -/// # use faktory::Producer; -/// # let mut p = Producer::connect(None).unwrap(); -/// use faktory::Job; -/// p.enqueue(Job::new("foobar", vec!["z"])).unwrap(); -/// ``` -/// -// TODO: provide way of inspecting status of job. -pub struct Producer { - c: Client, -} - -impl Producer { - /// Connect to a Faktory server. - /// - /// If `url` is not given, will use the standard Faktory environment variables. Specifically, - /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address - /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the - /// server address. If the latter environment variable is not defined, the connection will be - /// made to - /// - /// ```text - /// tcp://localhost:7419 - /// ``` - /// - /// If `url` is given, but does not specify a port, it defaults to 7419. - pub fn connect(url: Option<&str>) -> Result { - let c = Client::connect(url)?; - Ok(Producer { c }) - } -} - -impl Producer { - /// Connect to a Faktory server with a non-standard stream. - pub fn connect_with(stream: S, pwd: Option) -> Result, Error> { - let c = Client::connect_with(stream, pwd)?; - Ok(Producer { c }) - } - - /// Enqueue the given job on the Faktory server. - /// - /// Returns `Ok` if the job was successfully queued by the Faktory server. - pub fn enqueue(&mut self, job: Job) -> Result<(), Error> { - self.c.issue(&Push::from(job))?.await_ok() - } - - /// Enqueue numerous jobs on the Faktory server. - /// - /// Provided you have numerous jobs to submit, using this method will be more efficient as compared - /// to calling [`enqueue`](Producer::enqueue) multiple times. - /// - /// The returned `Ok` result will contain a tuple of enqueued jobs count and an option of a hash map - /// with job ids mapped onto error messages. Therefore `Ok(n, None)` will indicate that all n jobs - /// have been enqueued without errors. - /// - /// Note that this is not an all-or-nothing operation: jobs that contain errors will not be enqueued, - /// while those that are error-free _will_ be enqueued by the Faktory server. - pub fn enqueue_many( - &mut self, - jobs: J, - ) -> Result<(usize, Option>), Error> - where - J: IntoIterator, - J::IntoIter: ExactSizeIterator, - { - let jobs = jobs.into_iter(); - let jobs_count = jobs.len(); - let errors: HashMap = self - .c - .issue(&PushBulk::from(jobs.collect::>()))? - .read_json()? - .expect("Faktory server sends {} literal when there are no errors"); - if errors.is_empty() { - return Ok((jobs_count, None)); - } - Ok((jobs_count - errors.len(), Some(errors))) - } - - /// Retrieve information about the running server. - /// - /// The returned value is the result of running the `INFO` command on the server. - pub fn info(&mut self) -> Result { - self.c - .issue(&Info)? - .read_json() - .map(|v| v.expect("info command cannot give empty response")) - } - - /// Pause the given queues. - pub fn queue_pause>(&mut self, queues: &[T]) -> Result<(), Error> { - self.c - .issue(&QueueControl::new(QueueAction::Pause, queues))? - .await_ok() - } - - /// Resume the given queues. - pub fn queue_resume>(&mut self, queues: &[T]) -> Result<(), Error> { - self.c - .issue(&QueueControl::new(QueueAction::Resume, queues))? - .await_ok() - } - - /// Initiate a new batch of jobs. - #[cfg(feature = "ent")] - #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] - pub fn start_batch(&mut self, batch: Batch) -> Result, Error> { - let bid = self.c.issue(&batch)?.read_bid()?; - Ok(BatchHandle::new(bid, self)) - } - - /// Open an already existing batch of jobs. - /// - /// This will not error if a batch with the provided `bid` does not exist, - /// rather `Ok(None)` will be returned. - #[cfg(feature = "ent")] - #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] - pub fn open_batch(&mut self, bid: String) -> Result>, Error> { - let bid = self.c.issue(&OpenBatch::from(bid))?.maybe_bid()?; - Ok(bid.map(|bid| BatchHandle::new(bid, self))) - } - - #[cfg(feature = "ent")] - pub(crate) fn commit_batch(&mut self, bid: String) -> Result<(), Error> { - self.c.issue(&CommitBatch::from(bid))?.await_ok() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - // https://github.com/rust-lang/rust/pull/42219 - //#[allow_fail] - #[ignore] - fn it_works() { - let mut p = Producer::connect(None).unwrap(); - p.enqueue(Job::new("foobar", vec!["z"])).unwrap(); - } -} diff --git a/src/proto/batch/cmd.rs b/src/proto/batch/cmd.rs index a6c57367..1c1df52f 100644 --- a/src/proto/batch/cmd.rs +++ b/src/proto/batch/cmd.rs @@ -1,66 +1,52 @@ -use crate::ent::Batch; -use crate::proto::single::FaktoryCommand; -use crate::Error; -use std::io::Write; +use crate::error::Error; +use crate::proto::{single::FaktoryCommand, Batch, BatchId}; +use tokio::io::{AsyncWrite, AsyncWriteExt}; +#[async_trait::async_trait] impl FaktoryCommand for Batch { - fn issue(&self, w: &mut W) -> Result<(), Error> { - w.write_all(b"BATCH NEW ")?; - serde_json::to_writer(&mut *w, self).map_err(Error::Serialization)?; - Ok(w.write_all(b"\r\n")?) + async fn issue(&self, w: &mut W) -> Result<(), Error> { + w.write_all(b"BATCH NEW ").await?; + let r = serde_json::to_vec(self).map_err(Error::Serialization)?; + w.write_all(&r).await?; + Ok(w.write_all(b"\r\n").await?) } } -// ---------------------------------------------- - -pub struct CommitBatch(String); - -impl From for CommitBatch { - fn from(value: String) -> Self { - CommitBatch(value) - } -} - -impl FaktoryCommand for CommitBatch { - fn issue(&self, w: &mut W) -> Result<(), Error> { - w.write_all(b"BATCH COMMIT ")?; - w.write_all(self.0.as_bytes())?; - Ok(w.write_all(b"\r\n")?) - } +macro_rules! batch_cmd { + ($structure:ident, $cmd:expr) => { + impl> From for $structure { + fn from(value: B) -> Self { + $structure(value) + } + } + + #[async_trait::async_trait] + impl FaktoryCommand for $structure + where + B: AsRef + Sync, + { + async fn issue(&self, w: &mut W) -> Result<(), Error> { + w.write_all(b"BATCH ").await?; + w.write_all($cmd.as_bytes()).await?; + w.write_all(b" ").await?; + w.write_all(self.0.as_ref().as_bytes()).await?; + Ok(w.write_all(b"\r\n").await?) + } + } + }; } -// ---------------------------------------------- +pub(crate) struct CommitBatch(B) +where + B: AsRef; +batch_cmd!(CommitBatch, "COMMIT"); -pub struct GetBatchStatus(String); +pub(crate) struct GetBatchStatus(B) +where + B: AsRef; +batch_cmd!(GetBatchStatus, "STATUS"); -impl From for GetBatchStatus { - fn from(value: String) -> Self { - GetBatchStatus(value) - } -} - -impl FaktoryCommand for GetBatchStatus { - fn issue(&self, w: &mut W) -> Result<(), Error> { - w.write_all(b"BATCH STATUS ")?; - w.write_all(self.0.as_bytes())?; - Ok(w.write_all(b"\r\n")?) - } -} - -// ---------------------------------------------- - -pub struct OpenBatch(String); - -impl From for OpenBatch { - fn from(value: String) -> Self { - OpenBatch(value) - } -} - -impl FaktoryCommand for OpenBatch { - fn issue(&self, w: &mut W) -> Result<(), Error> { - w.write_all(b"BATCH OPEN ")?; - w.write_all(self.0.as_bytes())?; - Ok(w.write_all(b"\r\n")?) - } -} +pub(crate) struct OpenBatch(B) +where + B: AsRef; +batch_cmd!(OpenBatch, "OPEN"); diff --git a/src/proto/batch/handle.rs b/src/proto/batch/handle.rs new file mode 100644 index 00000000..5015510d --- /dev/null +++ b/src/proto/batch/handle.rs @@ -0,0 +1,50 @@ +use crate::error::Error; +use crate::proto::{Batch, BatchId, Client, Job}; +use tokio::io::{AsyncBufRead, AsyncWrite}; + +/// Represents a newly started or re-opened batch of jobs. +pub struct BatchHandle<'a, S: AsyncWrite + Unpin + Send> { + bid: BatchId, + c: &'a mut Client, +} + +impl<'a, S: AsyncWrite + Unpin + Send> BatchHandle<'a, S> { + pub(crate) fn new(bid: BatchId, c: &mut Client) -> BatchHandle<'_, S> { + BatchHandle { bid, c } + } +} + +impl<'a, S: AsyncWrite + Unpin + Send> BatchHandle<'a, S> { + /// ID issued by the Faktory server to this batch. + pub fn id(&self) -> &BatchId { + &self.bid + } +} + +impl<'a, S: AsyncBufRead + AsyncWrite + Unpin + Send> BatchHandle<'a, S> { + /// Add the given job to the batch. + /// + /// Should the submitted job - for whatever reason - already have a `bid` key present in its custom hash, + /// this value will be overwritten by the ID of the batch this job is being added to with the old value + /// returned as `Some()`. + pub async fn add(&mut self, mut job: Job) -> Result, Error> { + let bid = job.custom.insert("bid".into(), self.bid.clone().into()); + self.c.enqueue(job).await.map(|_| bid) + } + + /// Initiate a child batch of jobs. + pub async fn start_batch(&mut self, mut batch: Batch) -> Result, Error> { + batch.parent_bid = Some(self.bid.clone()); + self.c.start_batch(batch).await + } + + /// Commit this batch. + /// + /// The Faktory server will not queue any callbacks, unless the batch is committed. + /// Committing an empty batch will make the server queue the callback(s) right away. + /// Once committed, the batch can still be re-opened with [open_batch](Client::open_batch), + /// and extra jobs can be added to it. + pub async fn commit(self) -> Result<(), Error> { + self.c.commit_batch(&self.bid).await + } +} diff --git a/src/proto/batch/mod.rs b/src/proto/batch/mod.rs index cd1134e1..a375d18c 100644 --- a/src/proto/batch/mod.rs +++ b/src/proto/batch/mod.rs @@ -1,14 +1,17 @@ #[cfg(doc)] use crate::Client; -use crate::{Error, Job, Producer}; -use chrono::{DateTime, Utc}; +use crate::proto::{BatchId, Job}; use derive_builder::Builder; -use std::io::{Read, Write}; mod cmd; +mod handle; +mod status; -pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; +pub use handle::BatchHandle; +pub use status::{BatchStatus, CallbackState}; + +pub(crate) use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// Batch of jobs. /// @@ -27,10 +30,11 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// /// Here is how you can create a simple batch: /// ```no_run +/// # tokio_test::block_on(async { /// # use faktory::Error; -/// use faktory::{Producer, Job, ent::Batch}; +/// use faktory::{Client, Job, ent::Batch}; /// -/// let mut prod = Producer::connect(None)?; +/// let mut cl = Client::connect(None).await?; /// let job1 = Job::builder("job_type").build(); /// let job2 = Job::builder("job_type").build(); /// let job_cb = Job::builder("callback_job_type").build(); @@ -39,19 +43,21 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// .description("Batch description") /// .with_complete_callback(job_cb); /// -/// let mut batch = prod.start_batch(batch)?; -/// batch.add(job1)?; -/// batch.add(job2)?; -/// batch.commit()?; +/// let mut batch = cl.start_batch(batch).await?; +/// batch.add(job1).await?; +/// batch.add(job2).await?; +/// batch.commit().await?; /// /// # Ok::<(), Error>(()) +/// # }); /// ``` /// /// Nested batches are also supported: /// ```no_run -/// # use faktory::{Producer, Job, Error}; +/// # tokio_test::block_on(async { +/// # use faktory::{Client, Job, Error}; /// # use faktory::ent::Batch; -/// # let mut prod = Producer::connect(None)?; +/// # let mut cl = Client::connect(None).await?; /// let parent_job1 = Job::builder("job_type").build(); /// let parent_job2 = Job::builder("another_job_type").build(); /// let parent_cb = Job::builder("callback_job_type").build(); @@ -67,17 +73,18 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// .description("Child batch description") /// .with_success_callback(child_cb); /// -/// let mut parent = prod.start_batch(parent_batch)?; -/// parent.add(parent_job1)?; -/// parent.add(parent_job2)?; -/// let mut child = parent.start_batch(child_batch)?; -/// child.add(child_job1)?; -/// child.add(child_job2)?; +/// let mut parent = cl.start_batch(parent_batch).await?; +/// parent.add(parent_job1).await?; +/// parent.add(parent_job2).await?; +/// let mut child = parent.start_batch(child_batch).await?; +/// child.add(child_job1).await?; +/// child.add(child_job2).await?; /// -/// child.commit()?; -/// parent.commit()?; +/// child.commit().await?; +/// parent.commit().await?; /// /// # Ok::<(), Error>(()) +/// }); /// ``` /// /// In the example above, there is a single level nesting, but you can nest those batches as deep as you wish, @@ -87,22 +94,23 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// You can retieve the batch status using a [`Client`]: /// ```no_run /// # use faktory::Error; -/// # use faktory::{Producer, Job, Client}; +/// # use faktory::{Job, Client}; /// # use faktory::ent::{Batch, CallbackState}; -/// let mut prod = Producer::connect(None)?; +/// # tokio_test::block_on(async { +/// let mut cl = Client::connect(None).await?; /// let job = Job::builder("job_type").build(); /// let cb_job = Job::builder("callback_job_type").build(); /// let b = Batch::builder() /// .description("Batch description") /// .with_complete_callback(cb_job); /// -/// let mut b = prod.start_batch(b)?; -/// let bid = b.id().to_string(); -/// b.add(job)?; -/// b.commit()?; +/// let mut b = cl.start_batch(b).await?; +/// let bid = b.id().to_owned(); +/// b.add(job).await?; +/// b.commit().await?; /// -/// let mut t = Client::connect(None)?; -/// let s = t.get_batch_status(bid)?.unwrap(); +/// let mut t = Client::connect(None).await?; +/// let s = t.get_batch_status(bid).await?.unwrap(); /// assert_eq!(s.total, 1); /// assert_eq!(s.pending, 1); /// assert_eq!(s.description, Some("Batch description".into())); @@ -112,8 +120,9 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; /// _ => panic!("The jobs of this batch have not executed, so the callback job is expected to _not_ have fired"), /// } /// # Ok::<(), Error>(()) +/// }); /// ``` -#[derive(Builder, Debug, Serialize)] +#[derive(Builder, Default, Debug, Serialize)] #[builder( custom_constructor, pattern = "owned", @@ -123,7 +132,7 @@ pub use cmd::{CommitBatch, GetBatchStatus, OpenBatch}; pub struct Batch { #[serde(skip_serializing_if = "Option::is_none")] #[builder(setter(skip))] - parent_bid: Option, + parent_bid: Option, /// Batch description for Faktory WEB UI. #[serde(skip_serializing_if = "Option::is_none")] @@ -148,6 +157,12 @@ pub struct Batch { pub(crate) complete: Option, } +impl Default for BatchBuilder { + fn default() -> Self { + Self::new() + } +} + impl Batch { /// Create a new `BatchBuilder`. pub fn builder() -> BatchBuilder { @@ -197,149 +212,22 @@ impl BatchBuilder { impl Clone for BatchBuilder { fn clone(&self) -> Self { BatchBuilder { - parent_bid: self.parent_bid.clone(), + parent_bid: self.parent_bid, description: self.description.clone(), - success: self.success.clone(), - complete: self.complete.clone(), + success: self.success, + complete: self.complete, } } } -/// Represents a newly started or re-opened batch of jobs. -pub struct BatchHandle<'a, S: Read + Write> { - bid: String, - prod: &'a mut Producer, -} - -impl<'a, S: Read + Write> BatchHandle<'a, S> { - /// ID issued by the Faktory server to this batch. - pub fn id(&self) -> &str { - self.bid.as_ref() - } - - pub(crate) fn new(bid: String, prod: &mut Producer) -> BatchHandle<'_, S> { - BatchHandle { bid, prod } - } - - /// Add the given job to the batch. - /// - /// Should the submitted job - for whatever reason - already have a `bid` key present in its custom hash, - /// this value will be overwritten by the ID of the batch this job is being added to with the old value - /// returned as `Some()`. - pub fn add(&mut self, mut job: Job) -> Result, Error> { - let bid = job.custom.insert("bid".into(), self.bid.clone().into()); - self.prod.enqueue(job).map(|_| bid) - } - - /// Initiate a child batch of jobs. - pub fn start_batch(&mut self, mut batch: Batch) -> Result, Error> { - batch.parent_bid = Some(self.bid.clone()); - self.prod.start_batch(batch) - } - - /// Commit this batch. - /// - /// The Faktory server will not queue any callbacks, unless the batch is committed. - /// Committing an empty batch will make the server queue the callback(s) right away. - /// Once committed, the batch can still be re-opened with [open_batch](Producer::open_batch), - /// and extra jobs can be added to it. - pub fn commit(self) -> Result<(), Error> { - self.prod.commit_batch(self.bid) - } -} - -// Not documented, but existing de fakto and also mentioned in the official client -// https://github.com/contribsys/faktory/blob/main/client/batch.go#L17-L19 -/// State of a `callback` job of a [`Batch`]. -#[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq)] -#[non_exhaustive] -pub enum CallbackState { - /// Not enqueued yet. - #[serde(rename = "")] - Pending, - /// Enqueued by the server, because the jobs belonging to this batch have finished executing. - /// If a callback has been consumed, it's status is still `Enqueued`. - /// If a callback has finished with failure, it's status remains `Enqueued`. - #[serde(rename = "1")] - Enqueued, - /// The enqueued callback job has been consumed and successfully executed. - #[serde(rename = "2")] - FinishedOk, -} - -impl std::fmt::Display for CallbackState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - use CallbackState::*; - let s = match self { - Pending => "Pending", - Enqueued => "Enqueued", - FinishedOk => "FinishedOk", - }; - write!(f, "{}", s) - } -} - -/// Batch status retrieved from Faktory server. -#[derive(Deserialize, Debug)] -pub struct BatchStatus { - // Fields "bid", "created_at", "description", "total", "pending", and "failed" - // are described in the docs: https://github.com/contribsys/faktory/wiki/Ent-Batches#status - /// Id of this batch. - pub bid: String, - - /// Batch creation date and time. - pub created_at: DateTime, - - /// Batch description, if any. - pub description: Option, - - /// Number of jobs in this batch. - pub total: usize, - - /// Number of pending jobs. - pub pending: usize, - - /// Number of failed jobs. - pub failed: usize, - - // The official golang client also mentions "parent_bid', "complete_st", and "success_st": - // https://github.com/contribsys/faktory/blob/main/client/batch.go#L8-L22 - /// Id of the parent batch, provided this batch is a child ("nested") batch. - pub parent_bid: Option, - - /// State of the `complete` callback. - /// - /// See [with_complete_callback](struct.BatchBuilder.html#method.with_complete_callback). - #[serde(rename = "complete_st")] - pub complete_callback_state: CallbackState, - - /// State of the `success` callback. - /// - /// See [with_success_callback](struct.BatchBuilder.html#method.with_success_callback). - #[serde(rename = "success_st")] - pub success_callback_state: CallbackState, -} - -#[cfg(feature = "ent")] -#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] -impl<'a> BatchStatus { - /// Open the batch for which this `BatchStatus` has been retrieved. - /// - /// See [`open_batch`](Producer::open_batch). - pub fn open( - &self, - prod: &'a mut Producer, - ) -> Result>, Error> { - prod.open_batch(self.bid.clone()) - } -} - #[cfg(test)] mod test { use std::str::FromStr; use chrono::{DateTime, Utc}; + use crate::JobId; + use super::*; #[test] @@ -375,7 +263,7 @@ mod test { #[test] fn test_batch_serialized_correctly() { let prepare_test_job = |jobtype: String| { - let jid = "LFluKy1Baak83p54"; + let jid = JobId::new("LFluKy1Baak83p54"); let dt = "2023-12-22T07:00:52.546258624Z"; let created_at = DateTime::::from_str(dt).unwrap(); Job::builder(jobtype) diff --git a/src/proto/batch/status.rs b/src/proto/batch/status.rs new file mode 100644 index 00000000..b1c66332 --- /dev/null +++ b/src/proto/batch/status.rs @@ -0,0 +1,92 @@ +#[cfg(doc)] +use super::Batch; + +use super::BatchHandle; +use crate::error::Error; +use crate::proto::{BatchId, Client}; +use chrono::{DateTime, Utc}; +use tokio::io::{AsyncBufRead, AsyncWrite}; + +// Not documented, but existing de fakto and also mentioned in the official client +// https://github.com/contribsys/faktory/blob/main/client/batch.go#L17-L19 +/// State of a `callback` job of a [`Batch`]. +#[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq)] +#[non_exhaustive] +pub enum CallbackState { + /// Not enqueued yet. + #[serde(rename = "")] + Pending, + /// Enqueued by the server, because the jobs belonging to this batch have finished executing. + /// If a callback has been consumed, it's status is still `Enqueued`. + /// If a callback has finished with failure, it's status remains `Enqueued`. + #[serde(rename = "1")] + Enqueued, + /// The enqueued callback job has been consumed and successfully executed. + #[serde(rename = "2")] + FinishedOk, +} + +impl std::fmt::Display for CallbackState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + use CallbackState::*; + let s = match self { + Pending => "Pending", + Enqueued => "Enqueued", + FinishedOk => "FinishedOk", + }; + write!(f, "{}", s) + } +} + +/// Batch status retrieved from Faktory server. +#[derive(Deserialize, Debug)] +pub struct BatchStatus { + // Fields "bid", "created_at", "description", "total", "pending", and "failed" + // are described in the docs: https://github.com/contribsys/faktory/wiki/Ent-Batches#status + /// Id of this batch. + pub bid: BatchId, + + /// Batch creation date and time. + pub created_at: DateTime, + + /// Batch description, if any. + pub description: Option, + + /// Number of jobs in this batch. + pub total: usize, + + /// Number of pending jobs. + pub pending: usize, + + /// Number of failed jobs. + pub failed: usize, + + // The official golang client also mentions "parent_bid', "complete_st", and "success_st": + // https://github.com/contribsys/faktory/blob/main/client/batch.go#L8-L22 + /// Id of the parent batch, provided this batch is a child ("nested") batch. + pub parent_bid: Option, + + /// State of the `complete` callback. + /// + /// See [with_complete_callback](struct.BatchBuilder.html#method.with_complete_callback). + #[serde(rename = "complete_st")] + pub complete_callback_state: CallbackState, + + /// State of the `success` callback. + /// + /// See [with_success_callback](struct.BatchBuilder.html#method.with_success_callback). + #[serde(rename = "success_st")] + pub success_callback_state: CallbackState, +} + +impl<'a> BatchStatus { + /// Open the batch for which this `BatchStatus` has been retrieved. + /// + /// See [`open_batch`](Client::open_batch). + pub async fn open( + &self, + prod: &'a mut Client, + ) -> Result>, Error> { + prod.open_batch(&self.bid).await + } +} diff --git a/src/proto/client/ent.rs b/src/proto/client/ent.rs new file mode 100644 index 00000000..2b54269a --- /dev/null +++ b/src/proto/client/ent.rs @@ -0,0 +1,75 @@ +use super::super::batch::{CommitBatch, GetBatchStatus, OpenBatch}; +use super::super::{single, BatchStatus, JobId, Progress, ProgressUpdate, Track}; +use super::{Client, ReadToken}; +use crate::ent::{Batch, BatchHandle, BatchId}; +use crate::error::{self, Error}; +use tokio::io::{AsyncBufRead, AsyncWrite}; + +impl Client { + /// Send information on a job's execution progress to Faktory. + pub async fn set_progress(&mut self, upd: ProgressUpdate) -> Result<(), Error> { + let cmd = Track::Set(upd); + self.issue(&cmd).await?.read_ok().await + } + + /// Fetch information on a job's execution progress from Faktory. + pub async fn get_progress(&mut self, jid: JobId) -> Result, Error> { + let cmd = Track::Get(jid); + self.issue(&cmd).await?.read_json().await + } + + /// Fetch information on a batch of jobs execution progress. + pub async fn get_batch_status(&mut self, bid: B) -> Result, Error> + where + B: AsRef + Sync, + { + let cmd = GetBatchStatus::from(&bid); + self.issue(&cmd).await?.read_json().await + } + + /// Initiate a new batch of jobs. + pub async fn start_batch(&mut self, batch: Batch) -> Result, Error> { + let bid = self.issue(&batch).await?.read_bid().await?; + Ok(BatchHandle::new(bid, self)) + } + + /// Open an already existing batch of jobs. + /// + /// This will not error if a batch with the provided `bid` does not exist, + /// rather `Ok(None)` will be returned. + pub async fn open_batch(&mut self, bid: B) -> Result>, Error> + where + B: AsRef + Sync, + { + let bid = self.issue(&OpenBatch::from(bid)).await?.maybe_bid().await?; + Ok(bid.map(|bid| BatchHandle::new(bid, self))) + } + + pub(crate) async fn commit_batch(&mut self, bid: B) -> Result<(), Error> + where + B: AsRef + Sync, + { + self.issue(&CommitBatch::from(bid)).await?.read_ok().await + } +} + +impl<'a, S: AsyncBufRead + AsyncWrite + Unpin + Send> ReadToken<'a, S> { + pub(crate) async fn read_bid(self) -> Result { + single::read_bid(&mut self.0.stream).await + } + + pub(crate) async fn maybe_bid(self) -> Result, Error> { + match single::read_bid(&mut self.0.stream).await { + Ok(bid) => Ok(Some(bid)), + Err(err) => match err { + Error::Protocol(error::Protocol::Internal { msg }) => { + if msg.starts_with("No such batch") { + return Ok(None); + } + Err(error::Protocol::Internal { msg }.into()) + } + another => Err(another), + }, + } + } +} diff --git a/src/proto/client/mod.rs b/src/proto/client/mod.rs new file mode 100644 index 00000000..7d256571 --- /dev/null +++ b/src/proto/client/mod.rs @@ -0,0 +1,410 @@ +#[cfg(feature = "ent")] +#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] +mod ent; + +#[cfg(doc)] +use crate::proto::{BatchStatus, Progress, ProgressUpdate}; + +use super::{single, Info, Push, QueueAction, QueueControl, Reconnect}; +use super::{utils, PushBulk}; +use crate::error::{self, Error}; +use crate::{Job, WorkerId}; +use std::collections::HashMap; +use tokio::io::{AsyncBufRead, AsyncRead, AsyncWrite, BufStream}; +use tokio::net::TcpStream as TokioStream; + +mod options; +pub(crate) use options::ClientOptions; + +pub(crate) const EXPECTED_PROTOCOL_VERSION: usize = 2; + +fn check_protocols_match(ver: usize) -> Result<(), Error> { + if ver != EXPECTED_PROTOCOL_VERSION { + return Err(error::Connect::VersionMismatch { + ours: EXPECTED_PROTOCOL_VERSION, + theirs: ver, + } + .into()); + } + Ok(()) +} + +/// `Client` is used to enqueue new jobs that will in turn be processed by Faktory workers. +/// +/// # Connecting to Faktory +/// +/// To issue jobs, the `Client` must first be connected to the Faktory server. Exactly how you do +/// that depends on your setup. Faktory suggests using the `FAKTORY_PROVIDER` and `FAKTORY_URL` +/// environment variables (see their docs for more information) with `localhost:7419` as the +/// fallback default. If you want this behavior, pass `None` to [`Client::connect`](Client::connect). +/// If not, you can supply the URL directly in the form: +/// +/// ```text +/// protocol://[:password@]hostname[:port] +/// ``` +/// +/// +/// # Issuing jobs +/// +/// Most of the lifetime of a `Client` will be spent creating and enqueueing jobs for Faktory +/// workers. This is done by passing a [`Job`](struct.Job.html) to +/// [`Client::enqueue`](Client::enqueue). The most important part of a `Job` +/// is its `kind`; this field dictates how workers will execute the job when they receive it. The +/// string provided here must match a handler registered on the worker using +/// [`WorkerBuilder::register`](struct.WorkerBuilder.html#method.register) (or the equivalent +/// handler registration method in workers written in other languages). +/// +/// Since Faktory workers do not all need to be the same (you could have some written in Rust for +/// performance-critical tasks, some in Ruby for more webby tasks, etc.), it may be the case that a +/// given job can only be executed by some workers (e.g., if they job type is not registered at +/// others). To allow for this, Faktory includes a `labels` field with each job. Jobs will only be +/// sent to workers whose labels (see +/// [`WorkerBuilder::labels`](struct.WorkerBuilder.html#method.labels)) match those set in +/// `Job::labels`. +/// +/// # Examples +/// +/// Connecting to an unsecured Faktory server using environment variables: +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// use faktory::Client; +/// let p = Client::connect(None).await.unwrap(); +/// # }); +/// ``` +/// +/// Connecting to a secured Faktory server using an explicit URL: +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// use faktory::Client; +/// let p = Client::connect(Some("tcp://:hunter2@localhost:7439")).await.unwrap(); +/// # }) +/// ``` +/// +/// Issuing a job using a `Client`: +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// # use faktory::Client; +/// # let mut client = Client::connect(None).await.unwrap(); +/// use faktory::Job; +/// client.enqueue(Job::new("foobar", vec!["z"])).await.unwrap(); +/// # }); +/// ``` +/// +/// `Client` is also useful for retrieving and updating information on a job's execution progress +/// (see [`Progress`] and [`ProgressUpdate`]), as well for retrieving a batch's status +/// from the Faktory server (see [`BatchStatus`]). But these constructs are only available under `ent` feature +/// and are only supported by Enterprise Faktory. +/// +/// Fetching a job's execution progress: +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// use faktory::{Client, JobId, ent::JobState}; +/// let job_id = JobId::new("W8qyVle9vXzUWQOf"); +/// let mut cl = Client::connect(None).await?; +/// if let Some(progress) = cl.get_progress(job_id).await? { +/// if let JobState::Success = progress.state { +/// # /* +/// ... +/// # */ +/// } +/// } +/// # Ok::<(), faktory::Error>(()) +/// }); +/// ``` +/// +/// Sending an update on a job's execution progress: +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// use faktory::{Client, JobId, ent::ProgressUpdateBuilder}; +/// let jid = JobId::new("W8qyVle9vXzUWQOf"); +/// let mut cl = Client::connect(None).await?; +/// let progress = ProgressUpdateBuilder::new(jid) +/// .desc("Almost done...".to_owned()) +/// .percent(99) +/// .build(); +/// cl.set_progress(progress).await?; +/// # Ok::<(), faktory::Error>(()) +/// }); +///```` +/// +/// Fetching a batch's status: +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// use faktory::{Client, ent::BatchId}; +/// let bid = BatchId::new("W8qyVle9vXzUWQOg"); +/// let mut cl = Client::connect(None).await?; +/// if let Some(status) = cl.get_batch_status(bid).await? { +/// println!("This batch created at {}", status.created_at); +/// } +/// # Ok::<(), faktory::Error>(()) +/// }); +/// ``` +pub struct Client { + stream: S, + opts: ClientOptions, +} + +impl Client +where + S: AsyncBufRead + AsyncWrite + Unpin + Send + Reconnect, +{ + pub(crate) async fn connect_again(&mut self) -> Result { + let s = self.stream.reconnect().await?; + Client::new(s, self.opts.clone()).await + } + + pub(crate) async fn reconnect(&mut self) -> Result<(), Error> { + self.stream = self.stream.reconnect().await?; + self.init().await + } +} + +impl Drop for Client +where + S: AsyncWrite + Unpin + Send, +{ + fn drop(&mut self) { + tokio::task::block_in_place(|| { + tokio::runtime::Handle::current().block_on(async { + single::write_command(&mut self.stream, &single::End) + .await + .unwrap(); + }) + }); + } +} + +pub(crate) enum HeartbeatStatus { + Ok, + Terminate, + Quiet, +} + +impl Client> { + /// Create new [`Client`] and connect to a Faktory server with a non-standard stream. + pub async fn connect_with( + stream: S, + pwd: Option, + ) -> Result>, Error> { + let buffered = BufStream::new(stream); + let opts = ClientOptions { + password: pwd, + ..Default::default() + }; + Client::new(buffered, opts).await + } +} + +impl Client> { + /// Create new [`Client`] and connect to a Faktory server. + /// + /// If `url` is not given, will use the standard Faktory environment variables. Specifically, + /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address + /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the + /// server address. If the latter environment variable is not defined, the connection will be + /// made to + /// + /// ```text + /// tcp://localhost:7419 + /// ``` + pub async fn connect(url: Option<&str>) -> Result>, Error> { + let url = utils::parse_provided_or_from_env(url)?; + let stream = TokioStream::connect(utils::host_from_url(&url)).await?; + Self::connect_with(stream, url.password().map(|p| p.to_string())).await + } +} + +impl Client +where + S: AsyncBufRead + AsyncWrite + Unpin + Send, +{ + async fn init(&mut self) -> Result<(), Error> { + let hi = single::read_hi(&mut self.stream).await?; + check_protocols_match(hi.version)?; + + // fill in any missing options, and remember them for re-connect + let mut hello = single::Hello::default(); + + // prepare password hash, if one expected by 'Faktory' + if hi.salt.is_some() { + if let Some(ref pwd) = self.opts.password { + hello.set_password(&hi, pwd); + } else { + return Err(error::Connect::AuthenticationNeeded.into()); + } + } + + if self.opts.is_worker { + // fill in any missing options, and remember them for re-connect + let hostname = self + .opts + .hostname + .clone() + .or_else(|| hostname::get().ok()?.into_string().ok()) + .unwrap_or_else(|| "local".to_string()); + self.opts.hostname = Some(hostname); + let pid = self.opts.pid.unwrap_or_else(|| std::process::id() as usize); + self.opts.pid = Some(pid); + let wid = self.opts.wid.clone().unwrap_or_else(WorkerId::random); + self.opts.wid = Some(wid); + + hello.hostname = Some(self.opts.hostname.clone().unwrap()); + hello.wid = Some(self.opts.wid.clone().unwrap()); + hello.pid = Some(self.opts.pid.unwrap()); + hello.labels.clone_from(&self.opts.labels); + } + + single::write_command_and_await_ok(&mut self.stream, &hello).await?; + Ok(()) + } + + pub(crate) async fn new(stream: S, opts: ClientOptions) -> Result, Error> { + let mut c = Client { stream, opts }; + c.init().await?; + Ok(c) + } + + pub(crate) async fn issue( + &mut self, + c: &FC, + ) -> Result, Error> { + single::write_command(&mut self.stream, c).await?; + Ok(ReadToken(self)) + } + + pub(crate) async fn fetch(&mut self, queues: &[Q]) -> Result, Error> + where + Q: AsRef + Sync, + { + self.issue(&single::Fetch::from(queues)) + .await? + .read_json() + .await + } + + pub(crate) async fn heartbeat(&mut self) -> Result { + single::write_command( + &mut self.stream, + &single::Heartbeat::new(self.opts.wid.as_ref().unwrap().clone()), + ) + .await?; + + match single::read_json::<_, serde_json::Value>(&mut self.stream).await? { + None => Ok(HeartbeatStatus::Ok), + Some(s) => match s + .as_object() + .and_then(|m| m.get("state")) + .and_then(|s| s.as_str()) + { + Some("terminate") => Ok(HeartbeatStatus::Terminate), + Some("quiet") => Ok(HeartbeatStatus::Quiet), + _ => Err(error::Protocol::BadType { + expected: "heartbeat response", + received: format!("{}", s), + } + .into()), + }, + } + } +} + +impl Client +where + S: AsyncBufRead + AsyncWrite + Unpin + Send, +{ + /// Enqueue the given job on the Faktory server. + /// + /// Returns `Ok` if the job was successfully queued by the Faktory server. + pub async fn enqueue(&mut self, job: Job) -> Result<(), Error> { + self.issue(&Push::from(job)).await?.read_ok().await + } + + /// Enqueue numerous jobs on the Faktory server. + /// + /// Provided you have numerous jobs to submit, using this method will be more efficient as compared + /// to calling [`enqueue`](Client::enqueue) multiple times. + /// + /// The returned `Ok` result will contain a tuple of enqueued jobs count and an option of a hash map + /// with job ids mapped onto error messages. Therefore `Ok(n, None)` will indicate that all n jobs + /// have been enqueued without errors. + /// + /// Note that this is not an all-or-nothing operation: jobs that contain errors will not be enqueued, + /// while those that are error-free _will_ be enqueued by the Faktory server. + pub async fn enqueue_many( + &mut self, + jobs: J, + ) -> Result<(usize, Option>), Error> + where + J: IntoIterator, + J::IntoIter: ExactSizeIterator, + { + let jobs = jobs.into_iter(); + let jobs_count = jobs.len(); + let errors: HashMap = self + .issue(&PushBulk::from(jobs.collect::>())) + .await? + .read_json() + .await? + .expect("Faktory server sends {} literal when there are no errors"); + if errors.is_empty() { + return Ok((jobs_count, None)); + } + Ok((jobs_count - errors.len(), Some(errors))) + } + + /// Retrieve information about the running server. + /// + /// The returned value is the result of running the `INFO` command on the server. + pub async fn info(&mut self) -> Result { + self.issue(&Info) + .await? + .read_json() + .await + .map(|v| v.expect("info command cannot give empty response")) + } + + /// Pause the given queues. + pub async fn queue_pause(&mut self, queues: &[Q]) -> Result<(), Error> + where + Q: AsRef + Sync, + { + self.issue(&QueueControl::new(QueueAction::Pause, queues)) + .await? + .read_ok() + .await + } + + /// Resume the given queues. + pub async fn queue_resume(&mut self, queues: &[Q]) -> Result<(), Error> + where + Q: AsRef + Sync, + { + self.issue(&QueueControl::new(QueueAction::Resume, queues)) + .await? + .read_ok() + .await + } +} + +pub struct ReadToken<'a, S>(pub(crate) &'a mut Client) +where + S: AsyncWrite + Unpin + Send; + +impl<'a, S: AsyncBufRead + AsyncWrite + Unpin + Send> ReadToken<'a, S> { + pub(crate) async fn read_ok(self) -> Result<(), Error> { + single::read_ok(&mut self.0.stream).await + } + + pub(crate) async fn read_json(self) -> Result, Error> + where + T: serde::de::DeserializeOwned, + { + single::read_json(&mut self.0.stream).await + } +} diff --git a/src/proto/client/options.rs b/src/proto/client/options.rs new file mode 100644 index 00000000..aaa5e269 --- /dev/null +++ b/src/proto/client/options.rs @@ -0,0 +1,45 @@ +use crate::proto::WorkerId; + +#[derive(Clone, Debug)] +pub(crate) struct ClientOptions { + /// Hostname to advertise to server. + /// + /// Defaults to machine hostname. + pub(crate) hostname: Option, + + /// PID to advertise to server. + /// + /// Defaults to process ID. + pub(crate) pid: Option, + + /// Worker ID to advertise to server. + /// + /// Defaults to a GUID. + pub(crate) wid: Option, + + /// Labels to advertise to server. + /// + /// Defaults to ["rust"]. + pub(crate) labels: Vec, + + /// Password to authenticate with. + /// + /// Defaults to None. + pub(crate) password: Option, + + /// Whether this client is instatianted for a worker (i.e. to consume jobs). + pub(crate) is_worker: bool, +} + +impl Default for ClientOptions { + fn default() -> Self { + ClientOptions { + hostname: None, + pid: None, + wid: None, + labels: vec!["rust".to_string()], + password: None, + is_worker: false, + } + } +} diff --git a/src/proto/mod.rs b/src/proto/mod.rs index f7b6f3ec..5768b085 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -1,448 +1,53 @@ -#[cfg(doc)] -use crate::{Consumer, Producer}; - -use crate::error::{self, Error}; -use bufstream::BufStream; -use libc::getpid; use std::io; -use std::io::prelude::*; -use std::net::TcpStream; -use url::Url; +use tokio::io::BufStream; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::net::TcpStream as TokioStream; -pub(crate) const EXPECTED_PROTOCOL_VERSION: usize = 2; +mod client; +pub use client::Client; +pub(crate) use client::{ClientOptions, HeartbeatStatus, EXPECTED_PROTOCOL_VERSION}; mod single; -// commands that users can issue -pub use self::single::{ - Ack, Fail, Heartbeat, Info, Job, JobBuilder, Push, PushBulk, QueueAction, QueueControl, -}; +pub use single::{Job, JobBuilder, JobId, WorkerId}; + +pub(crate) use single::{Ack, Fail, Info, Push, PushBulk, QueueAction, QueueControl}; + +pub(crate) mod utils; #[cfg(feature = "ent")] pub use self::single::ent::{JobState, Progress, ProgressUpdate, ProgressUpdateBuilder, Track}; +#[cfg(feature = "ent")] +pub use self::single::BatchId; + #[cfg(feature = "ent")] mod batch; #[cfg(feature = "ent")] -pub use batch::{ - Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState, CommitBatch, GetBatchStatus, - OpenBatch, -}; - -pub(crate) fn get_env_url() -> String { - use std::env; - let var = env::var("FAKTORY_PROVIDER").unwrap_or_else(|_| "FAKTORY_URL".to_string()); - env::var(var).unwrap_or_else(|_| "tcp://localhost:7419".to_string()) -} - -pub(crate) fn host_from_url(url: &Url) -> String { - format!("{}:{}", url.host_str().unwrap(), url.port().unwrap_or(7419)) -} - -pub(crate) fn url_parse(url: &str) -> Result { - let url = Url::parse(url).map_err(error::Connect::ParseUrl)?; - if url.scheme() != "tcp" { - return Err(error::Connect::BadScheme { - scheme: url.scheme().to_string(), - } - .into()); - } - - if url.host_str().is_none() || url.host_str().unwrap().is_empty() { - return Err(error::Connect::MissingHostname.into()); - } - - Ok(url) -} - -pub(crate) fn parse_provided_or_from_env(url: Option<&str>) -> Result { - url_parse(url.unwrap_or(&get_env_url())) -} - -fn check_protocols_match(ver: usize) -> Result<(), Error> { - if ver != EXPECTED_PROTOCOL_VERSION { - return Err(error::Connect::VersionMismatch { - ours: EXPECTED_PROTOCOL_VERSION, - theirs: ver, - } - .into()); - } - Ok(()) -} +pub use batch::{Batch, BatchBuilder, BatchHandle, BatchStatus, CallbackState}; /// A stream that can be re-established after failing. +#[async_trait::async_trait] pub trait Reconnect: Sized { /// Re-establish the stream. - fn reconnect(&self) -> io::Result; + async fn reconnect(&mut self) -> io::Result; } -impl Reconnect for TcpStream { - fn reconnect(&self) -> io::Result { - TcpStream::connect(self.peer_addr().unwrap()) +#[async_trait::async_trait] +impl Reconnect for TokioStream { + async fn reconnect(&mut self) -> io::Result { + let addr = &self.peer_addr().expect("socket address"); + TokioStream::connect(addr).await } } -#[derive(Clone)] -pub(crate) struct ClientOptions { - /// Hostname to advertise to server. - /// Defaults to machine hostname. - pub(crate) hostname: Option, - - /// PID to advertise to server. - /// Defaults to process ID. - pub(crate) pid: Option, - - /// Worker ID to advertise to server. - /// Defaults to a GUID. - pub(crate) wid: Option, - - /// Labels to advertise to server. - /// Defaults to ["rust"]. - pub(crate) labels: Vec, - - /// Password to authenticate with - /// Defaults to None. - pub(crate) password: Option, - - /// Whether this client is instatianted for - /// a consumer ("worker" in Faktory terms). - pub(crate) is_worker: bool, -} - -impl Default for ClientOptions { - fn default() -> Self { - ClientOptions { - hostname: None, - pid: None, - wid: None, - labels: vec!["rust".to_string()], - password: None, - is_worker: false, - } - } -} - -/// A Faktory connection that represents neither a [`Producer`] nor a [`Consumer`]. -/// -/// Useful for retrieving and updating information on a job's execution progress -/// (see [`Progress`] and [`ProgressUpdate`]), as well for retrieving a batch's status -/// from the Faktory server (see [`BatchStatus`]). -/// -/// Fetching a job's execution progress: -/// ```no_run -/// use faktory::{Client, ent::JobState}; -/// let job_id = String::from("W8qyVle9vXzUWQOf"); -/// let mut cl = Client::connect(None)?; -/// if let Some(progress) = cl.get_progress(job_id)? { -/// if let JobState::Success = progress.state { -/// # /* -/// ... -/// # */ -/// } -/// } -/// # Ok::<(), faktory::Error>(()) -/// ``` -/// -/// Sending an update on a job's execution progress: -/// -/// ```no_run -/// use faktory::{Client, ent::ProgressUpdateBuilder}; -/// let jid = String::from("W8qyVle9vXzUWQOf"); -/// let mut cl = Client::connect(None)?; -/// let progress = ProgressUpdateBuilder::new(&jid) -/// .desc("Almost done...".to_owned()) -/// .percent(99) -/// .build(); -/// cl.set_progress(progress)?; -/// # Ok::<(), faktory::Error>(()) -///```` -/// -/// Fetching a batch's status: -/// -/// ```no_run -/// use faktory::Client; -/// let bid = String::from("W8qyVle9vXzUWQOg"); -/// let mut cl = Client::connect(None)?; -/// if let Some(status) = cl.get_batch_status(bid)? { -/// println!("This batch created at {}", status.created_at); -/// } -/// # Ok::<(), faktory::Error>(()) -/// ``` -pub struct Client { - stream: BufStream, - opts: ClientOptions, -} - -impl Client { - /// Create new [`Client`] and connect to a Faktory server. - /// - /// If `url` is not given, will use the standard Faktory environment variables. Specifically, - /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address - /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the - /// server address. If the latter environment variable is not defined, the connection will be - /// made to - /// - /// ```text - /// tcp://localhost:7419 - /// ``` - pub fn connect(url: Option<&str>) -> Result, Error> { - let url = parse_provided_or_from_env(url)?; - let stream = TcpStream::connect(host_from_url(&url))?; - Self::connect_with(stream, url.password().map(|p| p.to_string())) - } -} - -impl Client +#[async_trait::async_trait] +impl Reconnect for BufStream where - S: Read + Write + Reconnect, + S: AsyncRead + AsyncWrite + Reconnect + Send + Sync, { - pub(crate) fn connect_again(&self) -> Result { - let s = self.stream.get_ref().reconnect()?; - Client::new(s, self.opts.clone()) - } - - pub(crate) fn reconnect(&mut self) -> Result<(), Error> { - let s = self.stream.get_ref().reconnect()?; - self.stream = BufStream::new(s); - self.init() - } -} - -impl Client { - pub(crate) fn new(stream: S, opts: ClientOptions) -> Result, Error> { - let mut c = Client { - stream: BufStream::new(stream), - opts, - }; - c.init()?; - Ok(c) - } - - /// Create new [`Client`] and connect to a Faktory server with a non-standard stream. - pub fn connect_with(stream: S, pwd: Option) -> Result, Error> { - let opts = ClientOptions { - password: pwd, - ..Default::default() - }; - Client::new(stream, opts) - } -} - -impl Client { - fn init(&mut self) -> Result<(), Error> { - let hi = single::read_hi(&mut self.stream)?; - - check_protocols_match(hi.version)?; - - let mut hello = single::Hello::default(); - - // prepare password hash, if one expected by 'Faktory' - if hi.salt.is_some() { - if let Some(ref pwd) = self.opts.password { - hello.set_password(&hi, pwd); - } else { - return Err(error::Connect::AuthenticationNeeded.into()); - } - } - - if self.opts.is_worker { - // fill in any missing options, and remember them for re-connect - let hostname = self - .opts - .hostname - .clone() - .or_else(|| hostname::get().ok()?.into_string().ok()) - .unwrap_or_else(|| "local".to_string()); - self.opts.hostname = Some(hostname); - let pid = self - .opts - .pid - .unwrap_or_else(|| unsafe { getpid() } as usize); - self.opts.pid = Some(pid); - let wid = self.opts.wid.clone().unwrap_or_else(single::gen_random_wid); - self.opts.wid = Some(wid); - - hello.hostname = Some(self.opts.hostname.clone().unwrap()); - hello.wid = Some(self.opts.wid.clone().unwrap()); - hello.pid = Some(self.opts.pid.unwrap()); - hello.labels = self.opts.labels.clone(); - } - - single::write_command_and_await_ok(&mut self.stream, &hello) - } -} - -impl Drop for Client { - fn drop(&mut self) { - single::write_command(&mut self.stream, &single::End).unwrap(); - } -} - -#[cfg(feature = "ent")] -#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] -impl Client { - /// Send information on a job's execution progress to Faktory. - pub fn set_progress(&mut self, upd: ProgressUpdate) -> Result<(), Error> { - let cmd = Track::Set(upd); - self.issue(&cmd)?.await_ok() - } - - /// Fetch information on a job's execution progress from Faktory. - pub fn get_progress(&mut self, jid: String) -> Result, Error> { - let cmd = Track::Get(jid); - self.issue(&cmd)?.read_json() - } - - /// Fetch information on a batch of jobs execution progress. - pub fn get_batch_status(&mut self, bid: String) -> Result, Error> { - let cmd = GetBatchStatus::from(bid); - self.issue(&cmd)?.read_json() - } -} - -pub struct ReadToken<'a, S: Read + Write>(&'a mut Client); - -pub(crate) enum HeartbeatStatus { - Ok, - Terminate, - Quiet, -} - -impl Client { - pub(crate) fn issue( - &mut self, - c: &FC, - ) -> Result, Error> { - single::write_command(&mut self.stream, c)?; - Ok(ReadToken(self)) - } - - pub(crate) fn heartbeat(&mut self) -> Result { - single::write_command( - &mut self.stream, - &Heartbeat::new(&**self.opts.wid.as_ref().unwrap()), - )?; - - match single::read_json::<_, serde_json::Value>(&mut self.stream)? { - None => Ok(HeartbeatStatus::Ok), - Some(s) => match s - .as_object() - .and_then(|m| m.get("state")) - .and_then(|s| s.as_str()) - { - Some("terminate") => Ok(HeartbeatStatus::Terminate), - Some("quiet") => Ok(HeartbeatStatus::Quiet), - _ => Err(error::Protocol::BadType { - expected: "heartbeat response", - received: format!("{}", s), - } - .into()), - }, - } - } - - pub(crate) fn fetch(&mut self, queues: &[Q]) -> Result, Error> - where - Q: AsRef, - { - self.issue(&single::Fetch::from(queues))?.read_json() - } -} - -impl<'a, S: Read + Write> ReadToken<'a, S> { - pub(crate) fn await_ok(self) -> Result<(), Error> { - single::read_ok(&mut self.0.stream) - } - - pub(crate) fn read_json(self) -> Result, Error> - where - T: serde::de::DeserializeOwned, - { - single::read_json(&mut self.0.stream) - } - - #[cfg(feature = "ent")] - pub(crate) fn read_bid(self) -> Result { - single::read_bid(&mut self.0.stream) - } - - #[cfg(feature = "ent")] - pub(crate) fn maybe_bid(self) -> Result, Error> { - let bid_read_res = single::read_bid(&mut self.0.stream); - if bid_read_res.is_ok() { - return Ok(Some(bid_read_res.unwrap())); - } - match bid_read_res.unwrap_err() { - Error::Protocol(error::Protocol::Internal { msg }) => { - if msg.starts_with("No such batch") { - return Ok(None); - } - return Err(error::Protocol::Internal { msg }.into()); - } - another => Err(another), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - // https://github.com/rust-lang/rust/pull/42219 - //#[allow_fail] - #[ignore] - fn it_works() { - Client::new( - TcpStream::connect("localhost:7419").unwrap(), - ClientOptions::default(), - ) - .unwrap(); - } - - #[test] - fn correct_env_parsing() { - use std::env; - - if env::var_os("FAKTORY_URL").is_some() { - eprintln!("skipping test to avoid messing with user-set FAKTORY_URL"); - return; - } - - assert_eq!(get_env_url(), "tcp://localhost:7419"); - - env::set_var("FAKTORY_URL", "tcp://example.com:7500"); - assert_eq!(get_env_url(), "tcp://example.com:7500"); - - env::set_var("FAKTORY_PROVIDER", "URL"); - env::set_var("URL", "tcp://example.com:7501"); - assert_eq!(get_env_url(), "tcp://example.com:7501"); - } - - #[test] - fn url_port_default() { - use url::Url; - let url = Url::parse("tcp://example.com").unwrap(); - assert_eq!(host_from_url(&url), "example.com:7419"); - } - - #[test] - fn url_requires_tcp() { - url_parse("foobar").unwrap_err(); - } - - #[test] - fn url_requires_host() { - url_parse("tcp://:7419").unwrap_err(); - } - - #[test] - fn url_doesnt_require_port() { - url_parse("tcp://example.com").unwrap(); - } - - #[test] - fn url_can_take_password_and_port() { - url_parse("tcp://:foobar@example.com:7419").unwrap(); + async fn reconnect(&mut self) -> io::Result { + let stream = self.get_mut().reconnect().await?; + Ok(Self::new(stream)) } } diff --git a/src/proto/single/cmd.rs b/src/proto/single/cmd.rs index 800d3dd2..54fb7115 100644 --- a/src/proto/single/cmd.rs +++ b/src/proto/single/cmd.rs @@ -1,86 +1,90 @@ -use crate::{error::Error, Job}; -use std::io::prelude::*; +use crate::error::Error; +use crate::proto::{Job, JobId, WorkerId}; +use std::error::Error as StdError; +use tokio::io::{AsyncWrite, AsyncWriteExt}; +#[async_trait::async_trait] pub trait FaktoryCommand { - fn issue(&self, w: &mut W) -> Result<(), Error>; + async fn issue(&self, w: &mut W) -> Result<(), Error>; +} + +macro_rules! self_to_cmd { + ($struct:ident, $cmd:expr) => { + #[async_trait::async_trait] + impl FaktoryCommand for $struct { + async fn issue(&self, w: &mut W) -> Result<(), Error> { + w.write_all($cmd.as_bytes()).await?; + w.write_all(b" ").await?; + let r = serde_json::to_vec(self).map_err(Error::Serialization)?; + w.write_all(&r).await?; + Ok(w.write_all(b"\r\n").await?) + } + } + }; } /// Write queues as part of a command. They are written with a leading space /// followed by space separated queue names. -fn write_queues(w: &mut W, queues: &[S]) -> Result<(), Error> +async fn write_queues(w: &mut W, queues: &[S]) -> Result<(), Error> where - W: Write, + W: AsyncWrite + Unpin + Send, S: AsRef, { for q in queues { - w.write_all(b" ")?; - w.write_all(q.as_ref().as_bytes())?; + w.write_all(b" ").await?; + w.write_all(q.as_ref().as_bytes()).await?; } Ok(()) } -// ---------------------------------------------- +// -------------------- INFO ---------------------- -pub struct Info; +pub(crate) struct Info; +#[async_trait::async_trait] impl FaktoryCommand for Info { - fn issue(&self, w: &mut W) -> Result<(), Error> { - Ok(w.write_all(b"INFO\r\n")?) + async fn issue(&self, w: &mut W) -> Result<(), Error> { + Ok(w.write_all(b"INFO\r\n").await?) } } -// ---------------------------------------------- +// -------------------- ACK ---------------------- #[derive(Serialize)] -pub struct Ack { - #[serde(rename = "jid")] - job_id: String, -} - -impl FaktoryCommand for Ack { - fn issue(&self, w: &mut W) -> Result<(), Error> { - w.write_all(b"ACK ")?; - serde_json::to_writer(&mut *w, self).map_err(Error::Serialization)?; - Ok(w.write_all(b"\r\n")?) - } +pub(crate) struct Ack { + jid: JobId, } impl Ack { - pub fn new>(job_id: S) -> Ack { - Ack { - job_id: job_id.into(), - } + pub fn new>(jid: J) -> Ack { + Ack { jid: jid.into() } } } -// ---------------------------------------------- +self_to_cmd!(Ack, "ACK"); -#[derive(Serialize)] -pub struct Heartbeat { - wid: String, -} +// -------------------- BEAT ------------------ -impl FaktoryCommand for Heartbeat { - fn issue(&self, w: &mut W) -> Result<(), Error> { - w.write_all(b"BEAT ")?; - serde_json::to_writer(&mut *w, self).map_err(Error::Serialization)?; - Ok(w.write_all(b"\r\n")?) - } +#[derive(Serialize)] +pub(crate) struct Heartbeat { + wid: WorkerId, } impl Heartbeat { - pub fn new>(wid: S) -> Heartbeat { + pub fn new>(wid: S) -> Heartbeat { Heartbeat { wid: wid.into() } } } -// ---------------------------------------------- +self_to_cmd!(Heartbeat, "BEAT"); + +// -------------------- FAIL --------------------- #[derive(Serialize, Clone)] -pub struct Fail { +pub(crate) struct Fail { #[serde(rename = "jid")] - job_id: String, + job_id: JobId, #[serde(rename = "errtype")] kind: String, message: String, @@ -88,65 +92,77 @@ pub struct Fail { backtrace: Vec, } -impl FaktoryCommand for Fail { - fn issue(&self, w: &mut W) -> Result<(), Error> { - w.write_all(b"FAIL ")?; - serde_json::to_writer(&mut *w, self).map_err(Error::Serialization)?; - Ok(w.write_all(b"\r\n")?) - } -} - impl Fail { - pub fn new, S2: Into, S3: Into>( - job_id: S1, - kind: S2, - message: S3, - ) -> Self { + pub(crate) fn new(job_id: JobId, kind: impl Into, message: impl Into) -> Self { Fail { - job_id: job_id.into(), + job_id, kind: kind.into(), message: message.into(), backtrace: Vec::new(), } } - pub fn set_backtrace(&mut self, lines: Vec) { + // Used for all kind of errors not related to domain logic, e.g. missing handler for this type of job (i.e. + // the worker consumed a job from a specified queue, but has no tool to process it). + // Note that "unknown" is the error type used by the Go library in such cases too. + pub(crate) fn generic>(job_id: JobId, message: S) -> Self { + Fail::new(job_id, "unknown", message) + } + + pub(crate) fn set_backtrace(&mut self, lines: Vec) { self.backtrace = lines; } + + // For any application errors (all kind of errors that could happen in userland when handling the job) + // we want to send backtrace (split into lines) to the Faktory server so that whoever is interested in + // the job result could follow the trace (for debugging essentially). + pub(crate) fn generic_with_backtrace(jid: JobId, e: E) -> Self + where + E: StdError, + { + let mut f = Fail::generic(jid, format!("{}", e)); + let mut root = e.source(); + let mut lines = Vec::new(); + while let Some(r) = root.take() { + lines.push(format!("{}", r)); + root = r.source(); + } + f.set_backtrace(lines); + f + } } -// ---------------------------------------------- +self_to_cmd!(Fail, "FAIL"); -pub struct End; +// ---------------------- END -------------------- +pub(crate) struct End; + +#[async_trait::async_trait] impl FaktoryCommand for End { - fn issue(&self, w: &mut W) -> Result<(), Error> { - Ok(w.write_all(b"END\r\n")?) + async fn issue(&self, w: &mut W) -> Result<(), Error> { + Ok(w.write_all(b"END\r\n").await?) } } -// ---------------------------------------------- +// --------------------- FETCH -------------------- -pub struct Fetch<'a, S> +pub(crate) struct Fetch<'a, S> where S: AsRef, { - queues: &'a [S], + pub(crate) queues: &'a [S], } -impl<'a, S> FaktoryCommand for Fetch<'a, S> +#[async_trait::async_trait] +impl<'a, Q> FaktoryCommand for Fetch<'a, Q> where - S: AsRef, + Q: AsRef + Sync, { - fn issue(&self, w: &mut W) -> Result<(), Error> { - if self.queues.is_empty() { - w.write_all(b"FETCH\r\n")?; - } else { - w.write_all(b"FETCH")?; - write_queues::(w, self.queues)?; - w.write_all(b"\r\n")?; - } - Ok(()) + async fn issue(&self, w: &mut W) -> Result<(), Error> { + w.write_all(b"FETCH").await?; + write_queues(w, self.queues).await?; + Ok(w.write_all(b"\r\n").await?) } } @@ -159,14 +175,14 @@ where } } -// ---------------------------------------------- +// --------------------- HELLO -------------------- #[derive(Serialize)] -pub struct Hello { +pub(crate) struct Hello { #[serde(skip_serializing_if = "Option::is_none")] pub hostname: Option, #[serde(skip_serializing_if = "Option::is_none")] - pub wid: Option, + pub wid: Option, #[serde(skip_serializing_if = "Option::is_none")] pub pid: Option, #[serde(skip_serializing_if = "Vec::is_empty")] @@ -208,17 +224,11 @@ impl Hello { } } -impl FaktoryCommand for Hello { - fn issue(&self, w: &mut W) -> Result<(), Error> { - w.write_all(b"HELLO ")?; - serde_json::to_writer(&mut *w, self).map_err(Error::Serialization)?; - Ok(w.write_all(b"\r\n")?) - } -} +self_to_cmd!(Hello, "HELLO"); -// ---------------------------------------------- +// --------------------- PUSH -------------------- -pub struct Push(Job); +pub(crate) struct Push(Job); use std::ops::Deref; impl Deref for Push { @@ -234,17 +244,19 @@ impl From for Push { } } +#[async_trait::async_trait] impl FaktoryCommand for Push { - fn issue(&self, w: &mut W) -> Result<(), Error> { - w.write_all(b"PUSH ")?; - serde_json::to_writer(&mut *w, &**self).map_err(Error::Serialization)?; - Ok(w.write_all(b"\r\n")?) + async fn issue(&self, w: &mut W) -> Result<(), Error> { + w.write_all(b"PUSH ").await?; + let r = serde_json::to_vec(&**self).map_err(Error::Serialization)?; + w.write_all(&r).await?; + Ok(w.write_all(b"\r\n").await?) } } -// ---------------------------------------------- +// ---------------------- PUSHB ------------------- -pub struct PushBulk(Vec); +pub(crate) struct PushBulk(Vec); impl From> for PushBulk { fn from(jobs: Vec) -> Self { @@ -252,22 +264,24 @@ impl From> for PushBulk { } } +#[async_trait::async_trait] impl FaktoryCommand for PushBulk { - fn issue(&self, w: &mut W) -> Result<(), Error> { - w.write_all(b"PUSHB ")?; - serde_json::to_writer(&mut *w, &self.0).map_err(Error::Serialization)?; - Ok(w.write_all(b"\r\n")?) + async fn issue(&self, w: &mut W) -> Result<(), Error> { + w.write_all(b"PUSHB ").await?; + let r = serde_json::to_vec(&self.0).map_err(Error::Serialization)?; + w.write_all(&r).await?; + Ok(w.write_all(b"\r\n").await?) } } -// ---------------------------------------------- +// ---------------------- QUEUE ------------------- -pub enum QueueAction { +pub(crate) enum QueueAction { Pause, Resume, } -pub struct QueueControl<'a, S> +pub(crate) struct QueueControl<'a, S> where S: AsRef, { @@ -275,19 +289,21 @@ where pub queues: &'a [S], } -impl> FaktoryCommand for QueueControl<'_, S> { - fn issue(&self, w: &mut W) -> Result<(), Error> { +#[async_trait::async_trait] +impl FaktoryCommand for QueueControl<'_, Q> +where + Q: AsRef + Sync, +{ + async fn issue(&self, w: &mut W) -> Result<(), Error> { let command = match self.action { QueueAction::Pause => b"QUEUE PAUSE".as_ref(), QueueAction::Resume => b"QUEUE RESUME".as_ref(), }; - - w.write_all(command)?; - write_queues::(w, self.queues)?; - Ok(w.write_all(b"\r\n")?) + w.write_all(command).await?; + write_queues(w, self.queues).await?; + Ok(w.write_all(b"\r\n").await?) } } - impl<'a, S: AsRef> QueueControl<'a, S> { pub fn new(action: QueueAction, queues: &'a [S]) -> Self { Self { action, queues } diff --git a/src/proto/single/ent/cmd.rs b/src/proto/single/ent/cmd.rs index 50d06b81..43881e44 100644 --- a/src/proto/single/ent/cmd.rs +++ b/src/proto/single/ent/cmd.rs @@ -1,26 +1,28 @@ use super::ProgressUpdate; use crate::error::Error; -use crate::proto::single::FaktoryCommand; -use std::{fmt::Debug, io::Write}; +use crate::proto::{single::FaktoryCommand, JobId}; +use tokio::io::{AsyncWrite, AsyncWriteExt}; #[derive(Debug, Clone)] pub enum Track { Set(ProgressUpdate), - Get(String), + Get(JobId), } +#[async_trait::async_trait] impl FaktoryCommand for Track { - fn issue(&self, w: &mut W) -> Result<(), Error> { + async fn issue(&self, w: &mut W) -> Result<(), Error> { match self { Self::Set(upd) => { - w.write_all(b"TRACK SET ")?; - serde_json::to_writer(&mut *w, upd).map_err(Error::Serialization)?; - Ok(w.write_all(b"\r\n")?) + w.write_all(b"TRACK SET ").await?; + let r = serde_json::to_vec(upd).map_err(Error::Serialization)?; + w.write_all(&r).await?; + Ok(w.write_all(b"\r\n").await?) } Self::Get(jid) => { - w.write_all(b"TRACK GET ")?; - w.write_all(jid.as_bytes())?; - Ok(w.write_all(b"\r\n")?) + w.write_all(b"TRACK GET ").await?; + w.write_all(jid.as_bytes()).await?; + Ok(w.write_all(b"\r\n").await?) } } } diff --git a/src/proto/single/ent/progress.rs b/src/proto/single/ent/progress.rs index ede5d838..49304a5a 100644 --- a/src/proto/single/ent/progress.rs +++ b/src/proto/single/ent/progress.rs @@ -1,3 +1,5 @@ +use crate::proto::single::JobId; + use super::utils; use chrono::{DateTime, Utc}; use derive_builder::Builder; @@ -15,7 +17,7 @@ use derive_builder::Builder; pub struct ProgressUpdate { /// Id of the tracked job. #[builder(setter(custom))] - pub jid: String, + pub jid: JobId, /// Percentage of the job's completion. #[serde(skip_serializing_if = "Option::is_none")] @@ -38,7 +40,7 @@ pub struct ProgressUpdate { impl ProgressUpdate { /// Create an instance of `ProgressUpdate` for the job with this ID specifying its completion percentage. - pub fn set(jid: impl Into, percent: u8) -> ProgressUpdate { + pub fn set(jid: JobId, percent: u8) -> ProgressUpdate { ProgressUpdate::builder(jid).percent(percent).build() } @@ -46,7 +48,7 @@ impl ProgressUpdate { /// /// Equivalent to creating a [new](struct.ProgressUpdateBuilder.html#method.new) /// `ProgressUpdateBuilder`. - pub fn builder(jid: impl Into) -> ProgressUpdateBuilder { + pub fn builder(jid: JobId) -> ProgressUpdateBuilder { ProgressUpdateBuilder::new(jid) } } @@ -59,9 +61,9 @@ impl ProgressUpdateBuilder { } /// Create a new instance of `JobBuilder` - pub fn new(jid: impl Into) -> ProgressUpdateBuilder { + pub fn new(jid: JobId) -> ProgressUpdateBuilder { ProgressUpdateBuilder { - jid: Some(jid.into()), + jid: Some(jid), ..ProgressUpdateBuilder::create_empty() } } @@ -120,7 +122,7 @@ impl std::fmt::Display for JobState { #[derive(Debug, Clone, Deserialize)] pub struct Progress { /// Id of the tracked job. - pub jid: String, + pub jid: JobId, /// Job's state. pub state: JobState, @@ -141,7 +143,7 @@ impl Progress { /// /// This will copy the [`desc`](Progress::desc) from the `Progress` (retrieved) over to `ProgressUpdate` (to be sent). pub fn update_percent(&self, percent: u8) -> ProgressUpdate { - ProgressUpdate::builder(&self.jid) + ProgressUpdate::builder(self.jid.clone()) .desc(self.desc.clone()) .percent(percent) .build() @@ -149,6 +151,6 @@ impl Progress { /// Create an instance of `ProgressUpdateBuilder` for the job. pub fn update_builder(&self) -> ProgressUpdateBuilder { - ProgressUpdateBuilder::new(&self.jid) + ProgressUpdateBuilder::new(self.jid.clone()) } } diff --git a/src/proto/single/id.rs b/src/proto/single/id.rs new file mode 100644 index 00000000..f1b6a844 --- /dev/null +++ b/src/proto/single/id.rs @@ -0,0 +1,102 @@ +use super::utils; +use std::ops::Deref; + +macro_rules! string_wrapper_impls { + ($new_type:ident) => { + impl $new_type { + /// Create a new entity identifier. + pub fn new(inner: S) -> Self + where + S: Into, + { + Self(inner.into()) + } + } + + impl Deref for $new_type { + type Target = String; + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + impl AsRef for $new_type { + fn as_ref(&self) -> &str { + self.deref().as_ref() + } + } + + impl AsRef<$new_type> for $new_type { + fn as_ref(&self) -> &$new_type { + &self + } + } + + impl PartialEq for $new_type { + fn eq(&self, other: &str) -> bool { + self.deref().eq(other) + } + } + }; +} + +/// Job identifier. +/// +/// The Faktory server expects a [`jid`](struct.Job.html#structfield.jid) of a reasonable length +/// (at least 8 chars), which you should take into account when creating a new instance of `JobId`. +/// +/// If you do not have any domain, product or organisation specific requirements, you may prefer +/// to have a random job identifier generated for you with [`random`](JobId::random). +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct JobId(String); + +impl JobId { + /// Internally, generates a 16-char long random ASCII string. + pub fn random() -> Self { + Self(utils::gen_random_jid()) + } +} + +string_wrapper_impls!(JobId); + +// ----------------------------------------------------- + +/// Worker identifier. +/// +/// The Faktory server expects a non-empty string as a worker identifier, +/// see [`wid`](struct.WorkerBuilder.html#method.wid). +/// +/// If you do not have any domain, product or organisation specific requirements, you may prefer +/// to have a random job identifier generated for you with [`random`](WorkerId::random). +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct WorkerId(String); + +impl WorkerId { + /// Internally, generates a 32-char long random ASCII string. + pub fn random() -> Self { + Self(utils::gen_random_wid()) + } +} + +string_wrapper_impls!(WorkerId); + +// ----------------------------------------------------- + +/// Batch identifier. +/// +/// This is a wrapper over the string identifier issued by the Faktory server. +/// Only used for operations with [`Batch`](struct.Batch.html) in Enterprise Faktory. +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(transparent)] +pub struct BatchId(String); + +string_wrapper_impls!(BatchId); + +use serde_json::Value; +impl From for Value { + fn from(value: BatchId) -> Self { + value.0.into() + } +} diff --git a/src/proto/single/mod.rs b/src/proto/single/mod.rs index b2607347..737b1c2b 100644 --- a/src/proto/single/mod.rs +++ b/src/proto/single/mod.rs @@ -1,21 +1,25 @@ +use crate::Error; use chrono::{DateTime, Utc}; use derive_builder::Builder; use std::collections::HashMap; -use std::io::prelude::*; +use tokio::io::{AsyncBufRead, AsyncWrite, AsyncWriteExt}; mod cmd; +mod id; mod resp; mod utils; +pub use cmd::*; +pub use id::{JobId, WorkerId}; +pub use resp::*; + #[cfg(feature = "ent")] #[cfg_attr(docsrs, doc(cfg(feature = "ent")))] pub mod ent; -pub use self::cmd::*; -pub use self::resp::*; -use crate::error::Error; - -pub(crate) use self::utils::gen_random_wid; +#[cfg(feature = "ent")] +#[cfg_attr(docsrs, doc(cfg(feature = "ent")))] +pub use id::BatchId; const JOB_DEFAULT_QUEUE: &str = "default"; const JOB_DEFAULT_RESERVED_FOR_SECS: usize = 600; @@ -66,8 +70,8 @@ const JOB_DEFAULT_BACKTRACE: usize = 0; )] pub struct Job { /// The job's unique identifier. - #[builder(default = "utils::gen_random_jid()")] - pub(crate) jid: String, + #[builder(default = "JobId::random()")] + pub(crate) jid: JobId, /// The queue this job belongs to. Usually `default`. #[builder(default = "JOB_DEFAULT_QUEUE.into()")] @@ -76,7 +80,7 @@ pub struct Job { /// The job's type. Called `kind` because `type` is reserved. #[serde(rename = "jobtype")] #[builder(setter(custom))] - pub(crate) kind: String, + pub kind: String, /// The arguments provided for this job. #[builder(setter(custom), default = "Vec::new()")] @@ -159,9 +163,10 @@ impl JobBuilder { } /// Setter for the arguments provided for this job. - pub fn args(&mut self, args: Vec) -> &mut Self + pub fn args(&mut self, args: I) -> &mut Self where - A: Into, + I: IntoIterator, + V: Into, { self.args = Some(args.into_iter().map(|s| s.into()).collect()); self @@ -236,7 +241,7 @@ impl Job { } /// This job's id. - pub fn id(&self) -> &str { + pub fn id(&self) -> &JobId { &self.jid } @@ -256,17 +261,23 @@ impl Job { } } -pub fn write_command(w: &mut W, command: &C) -> Result<(), Error> { - command.issue::(w)?; - Ok(w.flush()?) +pub async fn write_command( + w: &mut W, + command: &C, +) -> Result<(), Error> { + command.issue::(w).await?; + Ok(w.flush().await?) } -pub fn write_command_and_await_ok( - x: &mut X, +pub async fn write_command_and_await_ok< + S: AsyncBufRead + AsyncWrite + Unpin + Send, + C: FaktoryCommand, +>( + stream: &mut S, command: &C, ) -> Result<(), Error> { - write_command(x, command)?; - read_ok(x) + write_command(stream, command).await?; + read_ok(stream).await } #[cfg(test)] @@ -279,7 +290,7 @@ mod test { let job_args = vec!["ISBN-13:9781718501850"]; let job = JobBuilder::new(job_kind).args(job_args.clone()).build(); - assert!(job.jid != "".to_owned()); + assert_ne!(&job.jid, ""); assert!(job.queue == JOB_DEFAULT_QUEUE.to_string()); assert_eq!(job.kind, job_kind); assert_eq!(job.args, job_args); diff --git a/src/proto/single/resp.rs b/src/proto/single/resp.rs index 9a1d3e19..5d4ede59 100644 --- a/src/proto/single/resp.rs +++ b/src/proto/single/resp.rs @@ -1,7 +1,10 @@ +#[cfg(feature = "ent")] +use crate::ent::BatchId; + use crate::error::{self, Error}; -use std::io::prelude::*; +use tokio::io::AsyncBufRead; -fn bad(expected: &'static str, got: &RawResponse) -> error::Protocol { +pub fn bad(expected: &'static str, got: &RawResponse) -> error::Protocol { let stringy = match *got { RawResponse::String(ref s) => Some(&**s), RawResponse::Blob(ref b) => { @@ -28,8 +31,10 @@ fn bad(expected: &'static str, got: &RawResponse) -> error::Protocol { // ---------------------------------------------- -pub fn read_json(r: R) -> Result, Error> { - let rr = read(r)?; +pub async fn read_json( + r: R, +) -> Result, Error> { + let rr = read(r).await?; match rr { RawResponse::String(ref s) if s == "OK" => { return Ok(None); @@ -60,19 +65,20 @@ pub fn read_json(r: R) -> Result(r: R) -> Result { - match read(r)? { +pub async fn read_bid(r: R) -> Result { + match read(r).await? { RawResponse::Blob(ref b) if b.is_empty() => Err(error::Protocol::BadType { expected: "non-empty blob representation of batch id", received: "empty blob".into(), } .into()), - RawResponse::Blob(ref b) => Ok(std::str::from_utf8(b) - .map_err(|_| error::Protocol::BadType { + RawResponse::Blob(ref b) => { + let raw = std::str::from_utf8(b).map_err(|_| error::Protocol::BadType { expected: "valid blob representation of batch id", received: "unprocessable blob".into(), - })? - .into()), + })?; + Ok(BatchId::new(raw)) + } something_else => Err(bad("id", &something_else).into()), } } @@ -89,21 +95,20 @@ pub struct Hi { pub salt: Option, } -pub fn read_hi(r: R) -> Result { - let rr = read(r)?; +pub async fn read_hi(r: R) -> Result { + let rr = read(r).await?; if let RawResponse::String(ref s) = rr { if let Some(s) = s.strip_prefix("HI ") { return serde_json::from_str(s).map_err(Error::Serialization); } } - Err(bad("server hi", &rr).into()) } // ---------------------------------------------- -pub fn read_ok(r: R) -> Result<(), Error> { - let rr = read(r)?; +pub async fn read_ok(r: R) -> Result<(), Error> { + let rr = read(r).await?; if let RawResponse::String(ref s) = rr { if s == "OK" { return Ok(()); @@ -120,22 +125,26 @@ pub fn read_ok(r: R) -> Result<(), Error> { // ---------------------------------------------- #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] -enum RawResponse { +pub enum RawResponse { String(String), Blob(Vec), Number(isize), Null, } -fn read(mut r: R) -> Result { +use tokio::io::{AsyncBufReadExt, AsyncReadExt}; +async fn read(mut r: R) -> Result +where + R: AsyncBufRead + Unpin, +{ let mut cmdbuf = [0u8; 1]; - r.read_exact(&mut cmdbuf)?; + r.read_exact(&mut cmdbuf).await?; match cmdbuf[0] { b'+' => { // Simple String // https://redis.io/topics/protocol#resp-simple-strings let mut s = String::new(); - r.read_line(&mut s)?; + r.read_line(&mut s).await?; // remove newlines let l = s.len() - 2; @@ -147,7 +156,7 @@ fn read(mut r: R) -> Result { // Error // https://redis.io/topics/protocol#resp-errors let mut s = String::new(); - r.read_line(&mut s)?; + r.read_line(&mut s).await?; // remove newlines let l = s.len() - 2; @@ -159,7 +168,7 @@ fn read(mut r: R) -> Result { // Integer // https://redis.io/topics/protocol#resp-integers let mut s = String::with_capacity(32); - r.read_line(&mut s)?; + r.read_line(&mut s).await?; // remove newlines let l = s.len() - 2; @@ -179,7 +188,7 @@ fn read(mut r: R) -> Result { // Bulk String // https://redis.io/topics/protocol#resp-bulk-strings let mut bytes = Vec::with_capacity(32); - r.read_until(b'\n', &mut bytes)?; + r.read_until(b'\n', &mut bytes).await?; let s = std::str::from_utf8(&bytes[0..bytes.len() - 2]).map_err(|_| { error::Protocol::BadResponse { typed_as: "bulk string", @@ -201,8 +210,8 @@ fn read(mut r: R) -> Result { } else { let size = size as usize; let mut bytes = vec![0; size]; - r.read_exact(&mut bytes[..])?; - r.read_exact(&mut [0u8; 2])?; + r.read_exact(&mut bytes[..]).await?; + r.read_exact(&mut [0u8; 2]).await?; Ok(RawResponse::Blob(bytes)) } } @@ -248,32 +257,34 @@ impl From> for RawResponse { #[cfg(test)] mod test { use super::{read, RawResponse}; + use crate::error::{self, Error}; - use serde_json::{self, Map, Value}; - use std::io::{self, Cursor}; + use serde_json::{Map, Value}; + use std::io::Cursor; + use tokio::io::AsyncBufRead; - fn read_json(c: C) -> Result, Error> { - super::read_json(c) + async fn read_json(c: C) -> Result, Error> { + super::read_json(c).await } - #[test] - fn it_parses_simple_strings() { + #[tokio::test] + async fn it_parses_simple_strings() { let c = Cursor::new(b"+OK\r\n"); - assert_eq!(read(c).unwrap(), RawResponse::from("OK")); + assert_eq!(read(c).await.unwrap(), RawResponse::from("OK")); } - #[test] - fn it_parses_numbers() { + #[tokio::test] + async fn it_parses_numbers() { let c = Cursor::new(b":1024\r\n"); - assert_eq!(read(c).unwrap(), RawResponse::from(1024)); + assert_eq!(read(c).await.unwrap(), RawResponse::from(1024)); } - #[test] - fn it_errors_on_bad_numbers() { + #[tokio::test] + async fn it_errors_on_bad_numbers() { let c = Cursor::new(b":x\r\n"); if let Error::Protocol(error::Protocol::BadResponse { typed_as, error, .. - }) = read(c).unwrap_err() + }) = read(c).await.unwrap_err() { assert_eq!(typed_as, "integer"); assert_eq!(error, "invalid integer value"); @@ -282,35 +293,35 @@ mod test { } } - #[test] - fn it_parses_errors() { + #[tokio::test] + async fn it_parses_errors() { let c = Cursor::new(b"-ERR foo\r\n"); - if let Error::Protocol(error::Protocol::Internal { ref msg }) = read(c).unwrap_err() { + if let Error::Protocol(error::Protocol::Internal { ref msg }) = read(c).await.unwrap_err() { assert_eq!(msg, "foo"); } else { unreachable!(); } } - #[test] + #[tokio::test] #[should_panic] - fn it_cant_do_arrays() { + async fn it_cant_do_arrays() { let c = Cursor::new(b"*\r\n"); - read(c).unwrap_err(); + read(c).await.unwrap_err(); } - #[test] - fn it_parses_nills() { + #[tokio::test] + async fn it_parses_nills() { let c = Cursor::new(b"$-1\r\n"); - assert_eq!(read(c).unwrap(), RawResponse::Null); + assert_eq!(read(c).await.unwrap(), RawResponse::Null); } - #[test] - fn it_errors_on_bad_sizes() { + #[tokio::test] + async fn it_errors_on_bad_sizes() { let c = Cursor::new(b"$x\r\n\r\n"); if let Error::Protocol(error::Protocol::BadResponse { typed_as, error, .. - }) = read(c).unwrap_err() + }) = read(c).await.unwrap_err() { assert_eq!(typed_as, "bulk string"); assert_eq!(error, "server bulk response size prefix is not an integer"); @@ -319,88 +330,88 @@ mod test { } } - #[test] - fn it_parses_empty_bulk() { + #[tokio::test] + async fn it_parses_empty_bulk() { let c = Cursor::new(b"$0\r\n\r\n"); - assert_eq!(read(c).unwrap(), RawResponse::from(vec![])); + assert_eq!(read(c).await.unwrap(), RawResponse::from(vec![])); } - #[test] - fn it_parses_non_empty_bulk() { + #[tokio::test] + async fn it_parses_non_empty_bulk() { let c = Cursor::new(b"$11\r\nHELLO WORLD\r\n"); assert_eq!( - read(c).unwrap(), + read(c).await.unwrap(), RawResponse::from(Vec::from(&b"HELLO WORLD"[..])) ); } - #[test] - fn it_decodes_json_ok_string() { + #[tokio::test] + async fn it_decodes_json_ok_string() { let c = Cursor::new(b"+OK\r\n"); - assert_eq!(read_json(c).unwrap(), None); + assert_eq!(read_json(c).await.unwrap(), None); } - #[test] - fn it_decodes_json_ok_blob() { + #[tokio::test] + async fn it_decodes_json_ok_blob() { let c = Cursor::new(b"$2\r\nOK\r\n"); - assert_eq!(read_json(c).unwrap(), None); + assert_eq!(read_json(c).await.unwrap(), None); } - #[test] - fn it_decodes_json_nill() { + #[tokio::test] + async fn it_decodes_json_nill() { let c = Cursor::new(b"$-1\r\n"); - assert_eq!(read_json(c).unwrap(), None); + assert_eq!(read_json(c).await.unwrap(), None); } - #[test] - fn it_decodes_json_empty() { + #[tokio::test] + async fn it_decodes_json_empty() { let c = Cursor::new(b"$0\r\n\r\n"); - assert_eq!(read_json(c).unwrap(), None); + assert_eq!(read_json(c).await.unwrap(), None); } - #[test] - fn it_decodes_string_json() { + #[tokio::test] + async fn it_decodes_string_json() { let c = Cursor::new(b"+{\"hello\":1}\r\n"); let mut m = Map::new(); m.insert("hello".to_string(), Value::from(1)); - assert_eq!(read_json(c).unwrap(), Some(Value::Object(m))); + assert_eq!(read_json(c).await.unwrap(), Some(Value::Object(m))); } - #[test] - fn it_decodes_blob_json() { + #[tokio::test] + async fn it_decodes_blob_json() { let c = Cursor::new(b"$11\r\n{\"hello\":1}\r\n"); let mut m = Map::new(); m.insert("hello".to_string(), Value::from(1)); - assert_eq!(read_json(c).unwrap(), Some(Value::Object(m))); + assert_eq!(read_json(c).await.unwrap(), Some(Value::Object(m))); } - #[test] - fn it_errors_on_bad_json_blob() { + #[tokio::test] + async fn it_errors_on_bad_json_blob() { let c = Cursor::new(b"$9\r\n{\"hello\"}\r\n"); - if let Error::Serialization(err) = read_json(c).unwrap_err() { + if let Error::Serialization(err) = read_json(c).await.unwrap_err() { let _: serde_json::Error = err; } else { unreachable!(); } } - #[test] - fn it_errors_on_bad_json_string() { + #[tokio::test] + async fn it_errors_on_bad_json_string() { let c = Cursor::new(b"+{\"hello\"}\r\n"); - if let Error::Serialization(err) = read_json(c).unwrap_err() { + if let Error::Serialization(err) = read_json(c).await.unwrap_err() { let _: serde_json::Error = err; } else { unreachable!(); } } - #[test] - fn json_error_on_number() { + #[tokio::test] + async fn json_error_on_number() { let c = Cursor::new(b":9\r\n"); if let Error::Protocol(error::Protocol::BadType { expected, ref received, - }) = read_json(c).unwrap_err() + }) = read_json(c).await.unwrap_err() { assert_eq!(expected, "json"); assert_eq!(received, "Number(9)"); @@ -409,12 +420,12 @@ mod test { } } - #[test] - fn it_errors_on_unknown_resp_type() { + #[tokio::test] + async fn it_errors_on_unknown_resp_type() { let c = Cursor::new(b"^\r\n"); if let Error::Protocol(error::Protocol::BadResponse { typed_as, error, .. - }) = read_json(c).unwrap_err() + }) = read_json(c).await.unwrap_err() { assert_eq!(typed_as, "unknown"); assert_eq!(error, "invalid response type prefix"); diff --git a/src/proto/utils.rs b/src/proto/utils.rs new file mode 100644 index 00000000..f2ac84db --- /dev/null +++ b/src/proto/utils.rs @@ -0,0 +1,83 @@ +use crate::error::{self, Error}; +use url::Url; + +pub(crate) fn get_env_url() -> String { + use std::env; + let var = env::var("FAKTORY_PROVIDER").unwrap_or_else(|_| "FAKTORY_URL".to_string()); + env::var(var).unwrap_or_else(|_| "tcp://localhost:7419".to_string()) +} + +pub(crate) fn host_from_url(url: &Url) -> String { + format!("{}:{}", url.host_str().unwrap(), url.port().unwrap_or(7419)) +} + +pub(crate) fn url_parse(url: &str) -> Result { + let url = Url::parse(url).map_err(error::Connect::ParseUrl)?; + if url.scheme() != "tcp" { + return Err(error::Connect::BadScheme { + scheme: url.scheme().to_string(), + } + .into()); + } + + if url.host_str().is_none() || url.host_str().unwrap().is_empty() { + return Err(error::Connect::MissingHostname.into()); + } + + Ok(url) +} + +pub(crate) fn parse_provided_or_from_env(url: Option<&str>) -> Result { + url_parse(url.unwrap_or(&get_env_url())) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn correct_env_parsing() { + use std::env; + + if env::var_os("FAKTORY_URL").is_some() { + eprintln!("skipping test to avoid messing with user-set FAKTORY_URL"); + return; + } + + assert_eq!(get_env_url(), "tcp://localhost:7419"); + + env::set_var("FAKTORY_URL", "tcp://example.com:7500"); + assert_eq!(get_env_url(), "tcp://example.com:7500"); + + env::set_var("FAKTORY_PROVIDER", "URL"); + env::set_var("URL", "tcp://example.com:7501"); + assert_eq!(get_env_url(), "tcp://example.com:7501"); + } + + #[test] + fn url_port_default() { + use url::Url; + let url = Url::parse("tcp://example.com").unwrap(); + assert_eq!(host_from_url(&url), "example.com:7419"); + } + + #[test] + fn url_requires_tcp() { + url_parse("foobar").unwrap_err(); + } + + #[test] + fn url_requires_host() { + url_parse("tcp://:7419").unwrap_err(); + } + + #[test] + fn url_doesnt_require_port() { + url_parse("tcp://example.com").unwrap(); + } + + #[test] + fn url_can_take_password_and_port() { + url_parse("tcp://:foobar@example.com:7419").unwrap(); + } +} diff --git a/src/tls.rs b/src/tls.rs deleted file mode 100644 index f9db6ca4..00000000 --- a/src/tls.rs +++ /dev/null @@ -1,127 +0,0 @@ -use crate::proto::{self, Reconnect}; -use crate::Error; -use native_tls::TlsConnector; -use native_tls::TlsStream as NativeTlsStream; -use std::io; -use std::io::prelude::*; -use std::net::TcpStream; - -/// A reconnectable stream encrypted with TLS. -/// -/// This can be used as an argument to `Consumer::connect_with` and `Producer::connect_with` to -/// connect to a TLS-secured Faktory server. -/// -/// # Examples -/// -/// ```no_run -/// use faktory::{Producer, TlsStream}; -/// let tls = TlsStream::connect(None).unwrap(); -/// let p = Producer::connect_with(tls, None).unwrap(); -/// # drop(p); -/// ``` -/// -pub struct TlsStream { - connector: TlsConnector, - hostname: String, - stream: NativeTlsStream, -} - -impl TlsStream { - /// Create a new TLS connection over TCP. - /// - /// If `url` is not given, will use the standard Faktory environment variables. Specifically, - /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address - /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the - /// server address. If the latter environment variable is not defined, the connection will be - /// made to - /// - /// ```text - /// tcp://localhost:7419 - /// ``` - /// - /// If `url` is given, but does not specify a port, it defaults to 7419. - pub fn connect(url: Option<&str>) -> Result { - TlsStream::with_connector( - TlsConnector::builder().build().map_err(Error::TlsStream)?, - url, - ) - } - - /// Create a new TLS connection over TCP using a non-default TLS configuration. - /// - /// See `connect` for details about the `url` parameter. - pub fn with_connector(tls: TlsConnector, url: Option<&str>) -> Result { - let url = match url { - Some(url) => proto::url_parse(url), - None => proto::url_parse(&proto::get_env_url()), - }?; - let stream = TcpStream::connect(proto::host_from_url(&url))?; - Ok(TlsStream::new(stream, tls, url.host_str().unwrap())?) - } -} - -use std::fmt::Debug; -impl TlsStream -where - S: Read + Write + Reconnect + Send + Sync + Debug + 'static, -{ - /// Create a new TLS connection on an existing stream. - pub fn default(stream: S, hostname: &str) -> io::Result { - Self::new(stream, TlsConnector::builder().build().unwrap(), hostname) - } - - /// Create a new TLS connection on an existing stream with a non-default TLS configuration. - pub fn new(stream: S, tls: TlsConnector, hostname: &str) -> io::Result { - let stream = tls - .connect(hostname, stream) - .map_err(|e| io::Error::new(io::ErrorKind::ConnectionAborted, e))?; - - Ok(TlsStream { - connector: tls, - hostname: hostname.to_string(), - stream, - }) - } -} - -impl Reconnect for TlsStream -where - S: Read + Write + Reconnect + Send + Sync + Debug + 'static, -{ - fn reconnect(&self) -> io::Result { - Self::new( - self.stream.get_ref().reconnect()?, - self.connector.clone(), - &self.hostname, - ) - } -} - -use std::ops::{Deref, DerefMut}; -impl Deref for TlsStream { - type Target = NativeTlsStream; - fn deref(&self) -> &Self::Target { - &self.stream - } -} -impl DerefMut for TlsStream { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.stream - } -} - -impl Read for TlsStream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.stream.read(buf) - } -} - -impl Write for TlsStream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.stream.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.stream.flush() - } -} diff --git a/src/tls/mod.rs b/src/tls/mod.rs new file mode 100644 index 00000000..95cf94c7 --- /dev/null +++ b/src/tls/mod.rs @@ -0,0 +1,12 @@ +#[cfg(feature = "native_tls")] +#[cfg_attr(docsrs, doc(cfg(feature = "native_tls")))] +/// Namespace for native TLS powered [`TlsStream`](crate::native_tls::TlsStream). +/// +/// The underlying crate (`native-tls`) will use _SChannel_ on Windows, +/// _SecureTransport_ on OSX, and _OpenSSL_ on other platforms. +pub mod native_tls; + +#[cfg(feature = "rustls")] +#[cfg_attr(docsrs, doc(cfg(feature = "rustls")))] +/// Namespace for Rustls-powered [`TlsStream`](crate::rustls::TlsStream). +pub mod rustls; diff --git a/src/tls/native_tls.rs b/src/tls/native_tls.rs new file mode 100644 index 00000000..ae5c903b --- /dev/null +++ b/src/tls/native_tls.rs @@ -0,0 +1,175 @@ +#[cfg(doc)] +use crate::{Client, WorkerBuilder}; + +use crate::error::{self, Error}; +use crate::proto::utils; +use crate::Reconnect; +use std::io; +use std::ops::{Deref, DerefMut}; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::net::TcpStream as TokioTcpStream; +use tokio_native_tls::TlsStream as NativeTlsStream; +use tokio_native_tls::{native_tls::TlsConnector, TlsConnector as AsyncTlsConnector}; + +/// A reconnectable stream encrypted with TLS. +/// +/// This can be used as an argument to [`WorkerBuilder::connect_with`] and [`Client::connect_with`] to +/// connect to a TLS-secured Faktory server. +/// +/// # Examples +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// use faktory::Client; +/// use faktory::native_tls::TlsStream; +/// let tls = TlsStream::connect(None).await.unwrap(); +/// let cl = Client::connect_with(tls, None).await.unwrap(); +/// # drop(cl); +/// # }); +/// ``` +/// +#[pin_project::pin_project] +pub struct TlsStream { + connector: AsyncTlsConnector, + hostname: String, + #[pin] + stream: NativeTlsStream, +} + +impl TlsStream { + /// Create a new TLS connection over TCP. + /// + /// If `url` is not given, will use the standard Faktory environment variables. Specifically, + /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address + /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the + /// server address. If the latter environment variable is not defined, the connection will be + /// made to + /// + /// ```text + /// tcp://localhost:7419 + /// ``` + /// + /// If `url` is given, but does not specify a port, it defaults to 7419. + pub async fn connect(url: Option<&str>) -> Result { + TlsStream::with_connector( + TlsConnector::builder() + .build() + .map_err(error::Stream::NativeTls)?, + url, + ) + .await + } + + /// Create a new TLS connection over TCP using a non-default TLS configuration. + /// + /// See `connect` for details about the `url` parameter. + pub async fn with_connector(connector: TlsConnector, url: Option<&str>) -> Result { + let url = match url { + Some(url) => utils::url_parse(url), + None => utils::url_parse(&utils::get_env_url()), + }?; + let hostname = utils::host_from_url(&url); + let tcp_stream = TokioTcpStream::connect(&hostname).await?; + Ok(TlsStream::new(tcp_stream, connector, hostname).await?) + } +} + +impl TlsStream +where + S: AsyncRead + AsyncWrite + Send + Unpin, +{ + /// Create a new TLS connection on an existing stream. + /// + /// Internally creates a `ClientConfig` with an empty root certificates store and no client + /// authentication. Use [`new`](TlsStream::new) for a customized `TlsConnector`. + /// Create a new TLS connection on an existing stream. + pub async fn default(stream: S, hostname: String) -> io::Result { + let connector = TlsConnector::builder() + .build() + .map_err(error::Stream::NativeTls) + .unwrap(); + Self::new(stream, connector, hostname).await + } + + /// Create a new TLS connection on an existing stream with a non-default TLS configuration. + pub async fn new( + stream: S, + connector: impl Into, + hostname: String, + ) -> io::Result { + let connector: AsyncTlsConnector = connector.into(); + let tls_stream = connector + .connect(&hostname, stream) + .await + .map_err(|e| io::Error::new(io::ErrorKind::ConnectionAborted, e))?; + Ok(TlsStream { + connector, + hostname, + stream: tls_stream, + }) + } +} + +#[async_trait::async_trait] +impl Reconnect for TlsStream +where + S: AsyncRead + AsyncWrite + Send + Unpin + Reconnect, +{ + async fn reconnect(&mut self) -> io::Result { + let stream = self + .stream + .get_mut() + .get_mut() + .get_mut() + .reconnect() + .await?; + Self::new(stream, self.connector.clone(), self.hostname.clone()).await + } +} + +impl Deref for TlsStream { + type Target = NativeTlsStream; + fn deref(&self) -> &Self::Target { + &self.stream + } +} + +impl DerefMut for TlsStream { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.stream + } +} + +impl AsyncRead for TlsStream { + fn poll_read( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + self.project().stream.poll_read(cx, buf) + } +} + +impl AsyncWrite for TlsStream { + fn poll_write( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> std::task::Poll> { + self.project().stream.poll_write(cx, buf) + } + + fn poll_flush( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.project().stream.poll_flush(cx) + } + + fn poll_shutdown( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.project().stream.poll_shutdown(cx) + } +} diff --git a/src/tls/rustls.rs b/src/tls/rustls.rs new file mode 100644 index 00000000..a8a59cb8 --- /dev/null +++ b/src/tls/rustls.rs @@ -0,0 +1,179 @@ +#[cfg(doc)] +use crate::{Client, WorkerBuilder}; + +use crate::{proto::utils, Error, Reconnect}; +use std::io; +use std::ops::{Deref, DerefMut}; +use std::sync::Arc; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::net::TcpStream as TokioTcpStream; +use tokio_rustls::client::TlsStream as RustlsStream; +use tokio_rustls::rustls::{ClientConfig, RootCertStore}; +use tokio_rustls::TlsConnector; + +/// A reconnectable stream encrypted with TLS. +/// +/// This can be used as an argument to [`WorkerBuilder::connect_with`] and [`Client::connect_with`] to +/// connect to a TLS-secured Faktory server. +/// +/// # Examples +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// use faktory::Client; +/// use faktory::rustls::TlsStream; +/// let tls = TlsStream::connect(None).await.unwrap(); +/// let cl = Client::connect_with(tls, None).await.unwrap(); +/// # drop(cl); +/// # }); +/// ``` +/// +#[pin_project::pin_project] +pub struct TlsStream { + connector: TlsConnector, + hostname: String, + #[pin] + stream: RustlsStream, +} + +impl TlsStream { + /// Create a new TLS connection over TCP. + /// + /// If `url` is not given, will use the standard Faktory environment variables. Specifically, + /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address + /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the + /// server address. If the latter environment variable is not defined, the connection will be + /// made to + /// + /// ```text + /// tcp://localhost:7419 + /// ``` + /// + /// If `url` is given, but does not specify a port, it defaults to 7419. + /// + /// Internally creates a `ClientConfig` with an empty root certificates store and no client + /// authentication. Use [`with_client_config`](TlsStream::with_client_config) + /// or [`with_connector`](TlsStream::with_connector) for customized + /// `ClientConfig` and `TlsConnector` accordingly. + pub async fn connect(url: Option<&str>) -> Result { + let conf = ClientConfig::builder() + .with_root_certificates(RootCertStore::empty()) + .with_no_client_auth(); + let con = TlsConnector::from(Arc::new(conf)); + TlsStream::with_connector(con, url).await + } + + /// Create a new TLS connection over TCP using a non-default TLS configuration. + /// + /// See `connect` for details about the `url` parameter. + pub async fn with_client_config(conf: ClientConfig, url: Option<&str>) -> Result { + let con = TlsConnector::from(Arc::new(conf)); + TlsStream::with_connector(con, url).await + } + + /// Create a new TLS connection over TCP using a connector with a non-default TLS configuration. + /// + /// See `connect` for details about the `url` parameter. + pub async fn with_connector(connector: TlsConnector, url: Option<&str>) -> Result { + let url = match url { + Some(url) => utils::url_parse(url), + None => utils::url_parse(&utils::get_env_url()), + }?; + let host_and_port = utils::host_from_url(&url); + let tcp_stream = TokioTcpStream::connect(&host_and_port).await?; + let host = url.host_str().unwrap().to_string(); + Ok(TlsStream::new(tcp_stream, connector, host).await?) + } +} + +impl TlsStream +where + S: AsyncRead + AsyncWrite + Send + Unpin, +{ + /// Create a new TLS connection on an existing stream. + /// + /// Internally creates a `ClientConfig` with an empty root certificates store and no client + /// authentication. Use [`new`](TlsStream::new) for a customized `TlsConnector`. + pub async fn default(stream: S, hostname: String) -> io::Result { + let conf = ClientConfig::builder() + .with_root_certificates(RootCertStore::empty()) + .with_no_client_auth(); + + Self::new(stream, TlsConnector::from(Arc::new(conf)), hostname).await + } + + /// Create a new TLS connection on an existing stream with a non-default TLS configuration. + pub async fn new(stream: S, connector: TlsConnector, hostname: String) -> io::Result { + let server_name = hostname + .clone() + .try_into() + .expect("a valid DNS name or IP address"); + let tls_stream = connector + .connect(server_name, stream) + .await + .map_err(|e| io::Error::new(io::ErrorKind::ConnectionAborted, e))?; + Ok(TlsStream { + connector, + hostname, + stream: tls_stream, + }) + } +} + +#[async_trait::async_trait] +impl Reconnect for TlsStream +where + S: AsyncRead + AsyncWrite + Send + Unpin + Reconnect, +{ + async fn reconnect(&mut self) -> io::Result { + let stream = self.stream.get_mut().0.reconnect().await?; + TlsStream::new(stream, self.connector.clone(), self.hostname.clone()).await + } +} + +impl Deref for TlsStream { + type Target = RustlsStream; + fn deref(&self) -> &Self::Target { + &self.stream + } +} + +impl DerefMut for TlsStream { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.stream + } +} + +impl AsyncRead for TlsStream { + fn poll_read( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + self.project().stream.poll_read(cx, buf) + } +} + +impl AsyncWrite for TlsStream { + fn poll_write( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> std::task::Poll> { + self.project().stream.poll_write(cx, buf) + } + + fn poll_flush( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.project().stream.poll_flush(cx) + } + + fn poll_shutdown( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.project().stream.poll_shutdown(cx) + } +} diff --git a/src/worker/builder.rs b/src/worker/builder.rs new file mode 100644 index 00000000..f5768216 --- /dev/null +++ b/src/worker/builder.rs @@ -0,0 +1,159 @@ +use super::{runner::Closure, CallbacksRegistry, Client, Worker}; +use crate::{ + proto::{utils, ClientOptions}, + Error, Job, JobRunner, WorkerId, +}; +use std::future::Future; +use tokio::io::{AsyncRead, AsyncWrite, BufStream}; +use tokio::net::TcpStream as TokioStream; + +/// Convenience wrapper for building a Faktory worker. +/// +/// See the [`Worker`] documentation for details. +pub struct WorkerBuilder { + opts: ClientOptions, + workers_count: usize, + callbacks: CallbacksRegistry, +} + +impl Default for WorkerBuilder { + /// Construct a new [`WorkerBuilder`](struct.WorkerBuilder.html) with default worker options and the url fetched from environment + /// variables. + /// + /// This will construct a worker where: + /// + /// - `hostname` is this machine's hostname. + /// - `wid` is a randomly generated string. + /// - `pid` is the OS PID of this process. + /// - `labels` is `["rust"]`. + /// + fn default() -> Self { + WorkerBuilder { + opts: ClientOptions::default(), + workers_count: 1, + callbacks: CallbacksRegistry::default(), + } + } +} + +impl WorkerBuilder { + /// Set the hostname to use for this worker. + /// + /// Defaults to the machine's hostname as reported by the operating system. + pub fn hostname(mut self, hn: String) -> Self { + self.opts.hostname = Some(hn); + self + } + + /// Set a unique identifier for this worker. + /// + /// Defaults to a randomly generated 32-char ASCII string. + pub fn wid(mut self, wid: WorkerId) -> Self { + self.opts.wid = Some(wid); + self + } + + /// Set the labels to use for this worker. + /// + /// Defaults to `["rust"]`. + /// + /// Note that calling this overrides the labels set previously. + /// + /// If you need to extend the labels already set, use [`WorkerBuilder::add_to_labels`] instead. + pub fn labels(mut self, labels: I) -> Self + where + I: IntoIterator, + { + self.opts.labels = labels.into_iter().collect(); + self + } + + /// Extend the worker's labels. + /// + /// Note that calling this will add the provided labels to those that are already there or - + /// if no labels have been explicitly set before - to the default `"rust"` label. + /// + /// If you need to override the labels set previously, use [`WorkerBuilder::labels`] instead. + pub fn add_to_labels(mut self, labels: I) -> Self + where + I: IntoIterator, + { + self.opts.labels.extend(labels); + self + } + + /// Set the number of workers to use `run` and `run_to_completion`. + /// + /// Defaults to 1. + pub fn workers(mut self, w: usize) -> Self { + self.workers_count = w; + self + } + + /// Register a handler function for the given job type (`kind`). + /// + /// Whenever a job whose type matches `kind` is fetched from the Faktory, the given handler + /// function is called with that job as its argument. + /// + /// Note that only one single handler per job kind is supported. Registering another handler + /// for the same job kind will silently override the handler registered previously. + pub fn register_fn(self, kind: K, handler: H) -> Self + where + K: Into, + H: Fn(Job) -> Fut + Send + Sync + 'static, + Fut: Future> + Send, + { + self.register(kind, Closure(handler)) + } + + /// Register a handler for the given job type (`kind`). + /// + /// Whenever a job whose type matches `kind` is fetched from the Faktory, the given handler + /// object is called with that job as its argument. + /// + /// Note that only one single handler per job kind is supported. Registering another handler + /// for the same job kind will silently override the handler registered previously. + pub fn register(mut self, kind: K, runner: H) -> Self + where + K: Into, + H: JobRunner + 'static, + { + self.callbacks.insert(kind.into(), Box::new(runner)); + self + } + + /// Connect to a Faktory server with a non-standard stream. + pub async fn connect_with( + mut self, + stream: S, + pwd: Option, + ) -> Result, E>, Error> { + self.opts.password = pwd; + self.opts.is_worker = true; + let buffered = BufStream::new(stream); + let client = Client::new(buffered, self.opts).await?; + Ok(Worker::new(client, self.workers_count, self.callbacks).await) + } + + /// Connect to a Faktory server. + /// + /// If `url` is not given, will use the standard Faktory environment variables. Specifically, + /// `FAKTORY_PROVIDER` is read to get the name of the environment variable to get the address + /// from (defaults to `FAKTORY_URL`), and then that environment variable is read to get the + /// server address. If the latter environment variable is not defined, the connection will be + /// made to + /// + /// ```text + /// tcp://localhost:7419 + /// ``` + /// + /// If `url` is given, but does not specify a port, it defaults to 7419. + pub async fn connect( + self, + url: Option<&str>, + ) -> Result, E>, Error> { + let url = utils::parse_provided_or_from_env(url)?; + let stream = TokioStream::connect(utils::host_from_url(&url)).await?; + self.connect_with(stream, None).await + } +} diff --git a/src/worker/health.rs b/src/worker/health.rs new file mode 100644 index 00000000..47749123 --- /dev/null +++ b/src/worker/health.rs @@ -0,0 +1,87 @@ +use super::{Worker, STATUS_QUIET, STATUS_RUNNING, STATUS_TERMINATING}; +use crate::{proto::HeartbeatStatus, Error}; +use std::{ + error::Error as StdError, + sync::{atomic, Arc}, + time, +}; +use tokio::io::{AsyncBufRead, AsyncWrite}; +use tokio::time::sleep as tokio_sleep; + +impl Worker +where + S: AsyncBufRead + AsyncWrite + Send + Unpin, + E: StdError, +{ + /// Send beats to Fakotry and quiet/terminate workers if signalled so. + /// + /// Some core details: + /// - beats should be sent to Faktory at least every 15 seconds; + /// - a worker's lifecycle is "running -> quiet -> terminate"; + /// - STATUS_QUIET means the worker should not consume any new jobs, + /// but should _continue_ processing its current job (if any); + /// + /// See more details [here](https://github.com/contribsys/faktory/blob/b4a93227a3323ab4b1365b0c37c2fac4f9588cc8/server/workers.go#L13-L49). + pub(crate) async fn listen_for_heartbeats( + &mut self, + statuses: &[Arc], + ) -> Result { + let mut target = STATUS_RUNNING; + + let mut last = time::Instant::now(); + + loop { + tokio_sleep(time::Duration::from_millis(100)).await; + + // has a worker failed? + if target == STATUS_RUNNING + && statuses + .iter() + .any(|s| s.load(atomic::Ordering::SeqCst) == STATUS_TERMINATING) + { + // tell all workers to exit + // (though chances are they've all failed already) + for s in statuses { + s.store(STATUS_TERMINATING, atomic::Ordering::SeqCst); + } + break Ok(false); + } + + if last.elapsed().as_secs() < 5 { + // don't sent a heartbeat yet + continue; + } + + match self.c.heartbeat().await { + Ok(hb) => { + match hb { + HeartbeatStatus::Ok => {} + HeartbeatStatus::Quiet => { + // tell the workers to eventually terminate + for s in statuses { + s.store(STATUS_QUIET, atomic::Ordering::SeqCst); + } + target = STATUS_QUIET; + } + HeartbeatStatus::Terminate => { + // tell the workers to terminate + // *and* fail the current job and immediately return + for s in statuses { + s.store(STATUS_TERMINATING, atomic::Ordering::SeqCst); + } + break Ok(true); + } + } + } + Err(e) => { + // for this to fail, the workers have probably also failed + for s in statuses { + s.store(STATUS_TERMINATING, atomic::Ordering::SeqCst); + } + break Err(e); + } + } + last = time::Instant::now(); + } + } +} diff --git a/src/worker/mod.rs b/src/worker/mod.rs new file mode 100644 index 00000000..cc106e30 --- /dev/null +++ b/src/worker/mod.rs @@ -0,0 +1,419 @@ +use super::proto::{Client, Reconnect}; +use crate::error::Error; +use crate::proto::{Ack, Fail, Job}; +use fnv::FnvHashMap; +use std::sync::{atomic, Arc}; +use std::{error::Error as StdError, sync::atomic::AtomicUsize}; +use tokio::io::{AsyncBufRead, AsyncWrite}; +use tokio::net::TcpStream; +use tokio::task::{AbortHandle, JoinSet}; + +mod builder; +mod health; +mod runner; +mod state; + +pub use builder::WorkerBuilder; +pub use runner::JobRunner; + +pub(crate) const STATUS_RUNNING: usize = 0; +pub(crate) const STATUS_QUIET: usize = 1; +pub(crate) const STATUS_TERMINATING: usize = 2; + +type CallbacksRegistry = FnvHashMap>; + +/// `Worker` is used to run a worker that processes jobs provided by Faktory. +/// +/// # Building the worker +/// +/// Faktory needs a decent amount of information from its workers, such as a unique worker ID, a +/// hostname for the worker, its process ID, and a set of labels used to identify the worker. In +/// order to enable setting all these, constructing a worker is a two-step process. You first use a +/// [`WorkerBuilder`] (which conveniently implements a sensible +/// `Default`) to set the worker metadata, as well as to register any job handlers. You then use +/// one of the `connect_*` methods to finalize the worker and connect to the Faktory server. +/// +/// In most cases, [`WorkerBuilder::default()`] will do what you want. You only need to augment it +/// with calls to [`register`](WorkerBuilder::register) to register handlers +/// for each of your job types, and then you can connect. If you have different *types* of workers, +/// you may also want to use [`labels`](WorkerBuilder::labels) to distinguish +/// them in the Faktory Web UI. To specify that some jobs should only go to some workers, use +/// different queues. +/// +/// ## Handlers +/// +/// For each [`Job`](struct.Job.html) that the worker receives, the handler that is registered for +/// that job's type will be called. If a job is received with a type for which no handler exists, +/// the job will be failed and returned to the Faktory server. Similarly, if a handler returns an +/// error response, the job will be failed, and the error reported back to the Faktory server. +/// +/// If you are new to Rust, getting the handler types to work out can be a little tricky. If you +/// want to understand why, I highly recommend that you have a look at the chapter on [closures and +/// generic +/// parameters](https://doc.rust-lang.org/book/second-edition/ch13-01-closures.html#using-closures-with-generic-parameters-and-the-fn-traits) +/// in the Rust Book. If you just want it to work, my recommendation is to either use regular +/// functions instead of closures, and giving `&func_name` as the handler, **or** wrapping all your +/// closures in `Box::new()`. +/// +/// ## Concurrency +/// +/// By default, only a single thread is spun up to process the jobs given to this worker. If you +/// want to dedicate more resources to processing jobs, you have a number of options listed below. +/// As you go down the list below, efficiency increases, but fault isolation decreases. I will not +/// give further detail here, but rather recommend that if these don't mean much to you, you should +/// use the last approach and let the library handle the concurrency for you. +/// +/// - You can spin up more worker processes by launching your worker program more than once. +/// - You can create more than one `Worker`. +/// - You can call [`WorkerBuilder::workers`] to set +/// the number of worker threads you'd like the `Worker` to use internally. +/// +/// # Connecting to Faktory +/// +/// To fetch jobs, the `Worker` must first be connected to the Faktory server. Exactly how you do +/// that depends on your setup. In most cases, you'll want to use [`WorkerBuilder::connect`], and provide +/// a connection URL. If you supply a URL, it must be of the form: +/// +/// ```text +/// protocol://[:password@]hostname[:port] +/// ``` +/// +/// Faktory suggests using the `FAKTORY_PROVIDER` and `FAKTORY_URL` environment variables (see +/// their docs for more information) with `localhost:7419` as the fallback default. If you want +/// this behavior, pass `None` as the URL. +/// +/// See the [`Client` examples](struct.Client.html#examples) for examples of how to connect to +/// different Factory setups. +/// +/// # Worker lifecycle +/// +/// Okay, so you've built your worker and connected to the Faktory server. Now what? +/// +/// If all this process is doing is handling jobs, reconnecting on failure, and exiting when told +/// to by the Faktory server, you should use +/// [`run_to_completion`](Worker::run_to_completion). If you want more +/// fine-grained control over the lifetime of your process, you should use +/// [`run`](Worker::run). See the documentation for each of these +/// methods for details. +/// +/// # Examples +/// +/// Create a worker with all default options, register a single handler (for the `foo` job +/// type), connect to the Faktory server, and start accepting jobs. +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// use faktory::{Worker, Job}; +/// use std::io; +/// +/// async fn process_job(job: Job) -> io::Result<()> { +/// println!("{:?}", job); +/// Ok(()) +/// } +/// +/// let mut w = Worker::builder() +/// .register_fn("foo", process_job) +/// .connect(None) +/// .await +/// .unwrap(); +/// +/// if let Err(e) = w.run(&["default"]).await { +/// println!("worker failed: {}", e); +/// } +/// # }); +/// ``` +/// +/// Handler can be inlined. +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// # use faktory::Worker; +/// # use std::io; +/// let _w = Worker::builder() +/// .register_fn("bar", |job| async move { +/// println!("{:?}", job); +/// Ok::<(), io::Error>(()) +/// }) +/// .connect(None) +/// .await +/// .unwrap(); +/// }); +/// ``` +/// +/// You can also register anything that implements [`JobRunner`] to handle jobs +/// with [`register`](WorkerBuilder::register). +/// +pub struct Worker { + c: Client, + worker_states: Arc, + callbacks: Arc>, + terminated: bool, +} + +impl Worker { + /// Creates an ergonomic constructor for a new [`Worker`]. + /// + /// Also equivalent to [`WorkerBuilder::default`]. + pub fn builder() -> WorkerBuilder { + WorkerBuilder::default() + } +} + +impl Worker { + async fn reconnect(&mut self) -> Result<(), Error> { + self.c.reconnect().await + } +} + +impl Worker { + async fn new(c: Client, workers_count: usize, callbacks: CallbacksRegistry) -> Self { + Worker { + c, + callbacks: Arc::new(callbacks), + worker_states: Arc::new(state::WorkerStatesRegistry::new(workers_count)), + terminated: false, + } + } +} + +enum Failed { + Application(E), + BadJobType(String), +} + +impl Worker { + async fn run_job(&mut self, job: Job) -> Result<(), Failed> { + let handler = self + .callbacks + .get(job.kind()) + .ok_or(Failed::BadJobType(job.kind().to_string()))?; + handler.run(job).await.map_err(Failed::Application) + } + + async fn report_on_all_workers(&mut self) -> Result<(), Error> { + let worker_states = Arc::get_mut(&mut self.worker_states) + .expect("all workers are scoped to &mut of the user-code-visible Worker"); + + // retry delivering notification about our last job result. + // we know there's no leftover thread at this point, so there's no race on the option. + for wstate in worker_states { + let wstate = wstate.get_mut().unwrap(); + if let Some(res) = wstate.take_last_result() { + let r = match res { + Ok(ref jid) => self.c.issue(&Ack::new(jid.clone())).await, + Err(ref fail) => self.c.issue(fail).await, + }; + + let r = match r { + Ok(r) => r, + Err(e) => { + wstate.save_last_result(res); + return Err(e); + } + }; + + if let Err(e) = r.read_ok().await { + // it could be that the server did previously get our ACK/FAIL, and that it was + // the resulting OK that failed. in that case, we would get an error response + // when re-sending the job response. this should not count as critical. other + // errors, however, should! + if let Error::IO(_) = e { + wstate.save_last_result(res); + return Err(e); + } + } + } + } + + Ok(()) + } + + /// Fail currently running jobs. + /// + /// This will FAIL _all_ the jobs even though they're still running. + /// Returns the number of workers that may still be processing jobs. + async fn force_fail_all_workers(&mut self) -> usize { + let mut running = 0; + for wstate in &*self.worker_states { + let may_be_jid = wstate.lock().unwrap().take_currently_running(); + if let Some(jid) = may_be_jid { + running += 1; + let f = Fail::generic(jid, "terminated"); + let _ = match self.c.issue(&f).await { + Ok(r) => r.read_ok().await, + // We are ignoring any FAIL command issue errors, since this is already + // an "emergency" case. + Err(_) => continue, + } + .is_ok(); + } + } + running + } + + /// Fetch and run a single job, and then return. + pub async fn run_one(&mut self, worker: usize, queues: &[Q]) -> Result + where + Q: AsRef + Sync, + { + let job = match self.c.fetch(queues).await? { + None => return Ok(false), + Some(j) => j, + }; + + let jid = job.jid.clone(); + + self.worker_states.register_running(worker, jid.clone()); + + match self.run_job(job).await { + Ok(_) => { + self.worker_states.register_success(worker, jid.clone()); + self.c.issue(&Ack::new(jid)).await?.read_ok().await?; + } + Err(e) => { + let fail = match e { + Failed::BadJobType(jt) => Fail::generic(jid, format!("No handler for {}", jt)), + Failed::Application(e) => Fail::generic_with_backtrace(jid, e), + }; + self.worker_states.register_failure(worker, fail.clone()); + self.c.issue(&fail).await?.read_ok().await?; + } + } + + self.worker_states.reset(worker); + + Ok(true) + } +} + +impl< + S: AsyncBufRead + AsyncWrite + Reconnect + Send + Unpin + 'static, + E: StdError + 'static + Send, + > Worker +{ + async fn for_worker(&mut self) -> Result { + Ok(Worker { + // We actually only need: + // + // 1) a connected client; + // 2) access to callback registry; + // 3) access to this worker's state (not all of them) + // + // For simplicity, we are currently creating a processing worker as a full replica + // of the coordinating worker. + // + // In the future though this can be updated to strip off `terminated` from + // the processing worker (as unused) and disallow access to other processing workers' + // states from inside this processing worker (as privilege not needed). + // + c: self.c.connect_again().await?, + callbacks: Arc::clone(&self.callbacks), + worker_states: Arc::clone(&self.worker_states), + terminated: self.terminated, + }) + } + + async fn spawn_worker_into( + &mut self, + set: &mut JoinSet>, + status: Arc, + worker: usize, + queues: &[Q], + ) -> Result + where + Q: AsRef, + { + let mut w = self.for_worker().await?; + let queues: Vec<_> = queues.iter().map(|s| s.as_ref().to_string()).collect(); + Ok(set.spawn(async move { + while status.load(atomic::Ordering::SeqCst) == STATUS_RUNNING { + if let Err(e) = w.run_one(worker, &queues[..]).await { + status.store(STATUS_TERMINATING, atomic::Ordering::SeqCst); + return Err(e); + } + } + status.store(STATUS_TERMINATING, atomic::Ordering::SeqCst); + Ok(()) + })) + } + + /// Run this worker on the given `queues` until an I/O error occurs (`Err` is returned), or + /// until the server tells the worker to disengage (`Ok` is returned). + /// + /// The value in an `Ok` indicates the number of workers that may still be processing jobs. + /// + /// If an error occurred while reporting a job success or failure, the result will be re-reported to the server + /// without re-executing the job. If the worker was terminated (i.e., `run` returns with an `Ok` response), + /// the worker should **not** try to resume by calling `run` again. This will cause a panic. + pub async fn run(&mut self, queues: &[Q]) -> Result + where + Q: AsRef, + { + assert!( + !self.terminated, + "do not re-run a terminated worker (coordinator)" + ); + self.report_on_all_workers().await?; + + let workers_count = self.worker_states.len(); + + // keep track of the current status of each worker + let statuses: Vec<_> = (0..workers_count) + .map(|_| Arc::new(atomic::AtomicUsize::new(STATUS_RUNNING))) + .collect(); + + let mut join_set = JoinSet::new(); + for (worker, status) in statuses.iter().enumerate() { + let _abort_handle = self + .spawn_worker_into(&mut join_set, Arc::clone(status), worker, queues) + .await?; + } + + let exit = self.listen_for_heartbeats(&statuses).await; + + // there are a couple of cases here: + // + // - we got TERMINATE, so we should just return, even if a worker is still running + // - we got TERMINATE and all workers have exited + // - we got an error from heartbeat() + // + self.terminated = exit.is_ok(); + + if let Ok(true) = exit { + let running = self.force_fail_all_workers().await; + if running != 0 { + return Ok(running); + } + } + + // we want to expose worker errors, or otherwise the heartbeat error + let mut results = Vec::with_capacity(workers_count); + while let Some(res) = join_set.join_next().await { + results.push(res.expect("joined ok")); + } + + let result = results.into_iter().collect::, _>>(); + + match exit { + Ok(_) => result.map(|_| 0), + Err(e) => result.and(Err(e)), + } + } + + /// Run this worker until the server tells us to exit or a connection cannot be re-established. + /// + /// This function never returns. When the worker decides to exit, the process is terminated. + pub async fn run_to_completion(mut self, queues: &[Q]) -> ! + where + Q: AsRef, + { + use std::process; + while self.run(queues).await.is_err() { + if self.reconnect().await.is_err() { + break; + } + } + + process::exit(0); + } +} diff --git a/src/worker/runner.rs b/src/worker/runner.rs new file mode 100644 index 00000000..caf7030a --- /dev/null +++ b/src/worker/runner.rs @@ -0,0 +1,123 @@ +#[cfg(doc)] +use super::Worker; + +use crate::Job; +use std::future::Future; + +/// Implementations of this trait can be registered to run jobs in a [`Worker`](Worker). +/// +/// # Example +/// +/// Create a worker with all default options, register a single handler (for the `foo` job +/// type), connect to the Faktory server, and start accepting jobs. +/// The handler is a struct that implements [`JobRunner`]. +/// +/// ```no_run +/// # tokio_test::block_on(async { +/// use async_trait::async_trait; +/// use faktory::{Job, JobRunner, WorkerBuilder}; +/// use std::io; +/// +/// struct MyHandler { +/// config: String, +/// } +/// +/// #[async_trait] +/// impl JobRunner for MyHandler { +/// type Error = io::Error; +/// async fn run(&self, job: Job) -> Result<(), Self::Error> { +/// println!("config: {}", self.config); +/// println!("job: {:?}", job); +/// Ok(()) +/// } +/// } +/// +/// let handler = MyHandler { +/// config: "bar".to_string(), +/// }; +/// +/// let mut w = WorkerBuilder::default() +/// .register("foo", handler) +/// .connect(None) +/// .await +/// .unwrap(); +/// +/// if let Err(e) = w.run(&["default"]).await { +/// println!("worker failed: {}", e); +/// } +/// }); +/// ``` +#[async_trait::async_trait] +pub trait JobRunner: Send + Sync { + /// The error type that the handler may return. + type Error; + /// A handler function that runs a job. + async fn run(&self, job: Job) -> Result<(), Self::Error>; +} + +// Implements JobRunner for a closure that takes a Job and returns a Result<(), E> +#[async_trait::async_trait] +impl JobRunner for Box +where + F: Send + Sync + Fn(Job) -> Fut, + Fut: Future> + Send, +{ + type Error = E; + async fn run(&self, job: Job) -> Result<(), E> { + self(job).await + } +} + +// Additional Blanket Implementations +#[async_trait::async_trait] +impl<'a, E, F, Fut> JobRunner for &'a F +where + F: Send + Sync + Fn(Job) -> Fut, + Fut: Future> + Send, +{ + type Error = E; + async fn run(&self, job: Job) -> Result<(), E> { + self(job).await + } +} + +#[async_trait::async_trait] +impl<'a, E, F, Fut> JobRunner for &'a mut F +where + F: Send + Sync + Fn(Job) -> Fut, + Fut: Future> + Send, +{ + type Error = E; + async fn run(&self, job: Job) -> Result<(), E> { + (self as &F)(job).await + } +} + +/// A closure that implements [`JobRunner`]. +/// +/// The `Closure` newtype is introduced to avoid having to box a job handler: +/// we can now use `Closure(handler)` instead of `Box::new(handler)` and make +/// the compiler happy. +/// +/// The `repr(transparent)` macro is to guarantee that this single-field struct +/// and the wrapped handler have the same layout and so it is safe to operate on +/// the in-memory representations of _the_ handler (submitted to us +/// from the user code) and its enclosed (by us) self. +/// +/// Ref: https://github.com/jonhoo/faktory-rs/pull/51 +#[repr(transparent)] +pub(crate) struct Closure(pub F); + +#[async_trait::async_trait] +impl JobRunner for Closure +where + F: Send + Sync + Fn(Job) -> Fut, + Fut: Future> + Send, +{ + type Error = E; + async fn run(&self, job: Job) -> Result<(), E> { + (self.0)(job).await + } +} + +pub(crate) type BoxedJobRunner = Box>; diff --git a/src/worker/state.rs b/src/worker/state.rs new file mode 100644 index 00000000..f8f8ea5d --- /dev/null +++ b/src/worker/state.rs @@ -0,0 +1,88 @@ +use crate::proto::{Fail, JobId}; +use std::{ + ops::{Deref, DerefMut}, + sync::Mutex, +}; + +#[derive(Default)] +pub(crate) struct WorkerState { + last_job_result: Option>, + running_job: Option, +} + +impl WorkerState { + pub(crate) fn take_last_result(&mut self) -> Option> { + self.last_job_result.take() + } + + pub(crate) fn take_currently_running(&mut self) -> Option { + self.running_job.take() + } + + pub(crate) fn save_last_result(&mut self, res: Result) { + self.last_job_result = Some(res) + } +} + +pub(crate) struct WorkerStatesRegistry(Vec>); + +impl Deref for WorkerStatesRegistry { + type Target = Vec>; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for WorkerStatesRegistry { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<'a> IntoIterator for &'a WorkerStatesRegistry { + type Item = &'a Mutex; + type IntoIter = <&'a Vec> as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl<'a> IntoIterator for &'a mut WorkerStatesRegistry { + type Item = &'a mut Mutex; + type IntoIter = <&'a mut Vec> as IntoIterator>::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter_mut() + } +} + +impl WorkerStatesRegistry { + pub(crate) fn new(workers_count: usize) -> Self { + Self((0..workers_count).map(|_| Default::default()).collect()) + } + + pub(crate) fn register_running(&self, worker: usize, jid: JobId) { + self[worker].lock().expect("lock acquired").running_job = Some(jid); + } + + pub(crate) fn register_success(&self, worker: usize, jid: JobId) { + self[worker] + .lock() + .expect("lock acquired") + .save_last_result(Ok(jid)); + } + + pub(crate) fn register_failure(&self, worker: usize, f: Fail) { + self[worker] + .lock() + .expect("lock acquired") + .save_last_result(Err(f)); + } + + pub(crate) fn reset(&self, worker: usize) { + let mut state = self[worker].lock().expect("lock acquired"); + state.last_job_result = None; + state.running_job = None; + } +} diff --git a/tests/consumer.rs b/tests/consumer.rs index ee7c4aea..5fed5a54 100644 --- a/tests/consumer.rs +++ b/tests/consumer.rs @@ -1,25 +1,83 @@ -extern crate faktory; -extern crate mockstream; -extern crate serde_json; -extern crate url; - +/// This sketch should help appreciate how mock streams are being distributed across workers. +/// +/// Side-note. Note how `CLIENT` (another node), `WEB UI` (browser), `FAKTORY SERVER` (contribsys Faktory binary), +/// and `FAKTORY WORKER (COORDINATOR)` are all separate processes (but `client` and `worker` _can_ belong +/// to the same process), while processing workers are threads (tokio tasks) in the `FAKTORY WORKER` process. +/// +/// __________________________ +/// | | __________ +/// | CLIENT | | | +/// | (PRODUCING AND TRACKING) | | WEB UI | +/// |__________________________| |__________| +/// | | +/// |:7419 _________________ | +/// | | | |:7420 +/// |---------------> | FAKTORY SERVER | <-----| +/// | localhost:7419 | +/// | localhost:7420 | +/// |---------------> |_________________| +/// |:7419 +/// | +/// | ___________________________________________________________________________________________________ +/// | | | +/// |_____________ | FAKTORY WORKER (COORDINATOR) | +/// | | with at least N + 2 threads: main thread, heartbeat thread, and N processing worker threads - | +/// | | tokio tasks - the actual workers; the desired count is specified via WorkerBuilder::workers(N) | +/// | | | +/// | |--> HEARTBEAT | +/// | | - send b"BEAT {\"wid\":\"wid\"}" to Faktory every 5 seconds; | +/// | | - set workers quiet if Faktory asked so; | +/// | | - terminate workers if: | +/// | | - corresponding signal received from Faktory (returning Ok(true)) | +/// | | - one of the workers failed (returning Ok(false)) | +/// | | - critical error in HEARTBEART thread occurs (returning Err(e) | +/// | | | +/// | |--> WORKER (index 0) with the following life-cycle: | +/// | - get owned stream by reconnecting coordinator client via `self.stream.reconnect().await` | +/// | (which for TcpStream will lead to establishing a new TCP connection to localhost:7419); | | +/// | - init a `Client` (say HELLO to HI); | +/// | - loop { self.run_one().await } until critical error or signal from coordinator; | +/// | |--> ... | +/// | |--> WORKER (index N) | +/// |___________________________________________________________________________________________________| +/// +/// Note how each processing worker is getting its owned stream and how we can control which stream is return +/// by means of implementing `Reconnect` for the stream we are supplying to the `Worker` initially. +/// +/// So, what we are doing for testing purposes is: +/// 1) provide a [`Stream`] to [`connect_with`](`WorkerBuilder::connect_with`) that will be holding inside a vector of mock streams +/// and with a reference to the stream of current interest (see `mine` field on [`Stream`]) and a "pointer" (see `take_next` field +/// on the private `mock::inner::Innner`) to the stream that will be given away on next call of `reconnect`; +/// 2) implement [`AsyncRead`] and [`AsyncWrite`] for the [`Stream`] so that internally we are polling read and write against the stream +/// referenced by [`mine`](Stream::mine). +/// 3) implement [`faktory::Reconnect`] for the [`Stream`] in a way that each time they call the `reconnect` method of the stream +/// we set `mine` to reference the stream that the "pointer" is currently pointing to and increment the "pointer" by 1; +/// 4) implement `Drop` for `mock::Stream` in a way that if the value of the "pointer" is not equal the length of the internal +/// vector of streams, we panic to indicate that we mis-planned things when setting up the test; mod mock; use faktory::*; -use std::io; -use std::thread; -use std::time::Duration; +use std::{io, time::Duration}; +use tokio::{spawn, time::sleep}; -#[test] -fn hello() { +#[tokio::test(flavor = "multi_thread")] +async fn hello() { let mut s = mock::Stream::default(); - - let mut c = ConsumerBuilder::default(); - c.hostname("host".to_string()) - .wid("wid".to_string()) - .labels(vec!["foo".to_string(), "bar".to_string()]); - c.register("never_called", |_| -> io::Result<()> { unreachable!() }); - let c = c.connect_with(s.clone(), None).unwrap(); + let w: Worker<_, io::Error> = WorkerBuilder::default() + .hostname("host".to_string()) + .wid(WorkerId::new("wid")) + .labels([ + "will".to_string(), + "be!".to_string(), + "overwritten".to_string(), + ]) + .labels(["foo".to_string(), "bar".to_string()]) + .add_to_labels(["will".to_string()]) + .add_to_labels(["be".to_string(), "added".to_string()]) + .register_fn("never_called", |_j: Job| async move { unreachable!() }) + .connect_with(s.clone(), None) + .await + .unwrap(); let written = s.pop_bytes_written(0); assert!(written.starts_with(b"HELLO {")); let written: serde_json::Value = serde_json::from_slice(&written[b"HELLO ".len()..]).unwrap(); @@ -32,21 +90,20 @@ fn hello() { assert_eq!(written.get("pid").map(|h| h.is_number()), Some(true)); assert_eq!(written.get("v").and_then(|h| h.as_i64()), Some(2)); let labels = written["labels"].as_array().unwrap(); - assert_eq!(labels, &["foo", "bar"]); + assert_eq!(labels, &["foo", "bar", "will", "be", "added"]); - drop(c); + drop(w); let written = s.pop_bytes_written(0); assert_eq!(written, b"END\r\n"); } -#[test] -fn hello_pwd() { +#[tokio::test(flavor = "multi_thread")] +async fn hello_pwd() { let mut s = mock::Stream::with_salt(1545, "55104dc76695721d"); - - let mut c = ConsumerBuilder::default(); - c.register("never_called", |_| -> io::Result<()> { unreachable!() }); - let c = c + let w: Worker<_, io::Error> = WorkerBuilder::default() + .register_fn("never_called", |_j: Job| async move { unreachable!() }) .connect_with(s.clone(), Some("foobar".to_string())) + .await .unwrap(); let written = s.pop_bytes_written(0); assert!(written.starts_with(b"HELLO {")); @@ -56,19 +113,20 @@ fn hello_pwd() { written.get("pwdhash").and_then(|h| h.as_str()), Some("6d877f8e5544b1f2598768f817413ab8a357afffa924dedae99eb91472d4ec30") ); - - drop(c); + drop(w); } -#[test] -fn dequeue() { +#[tokio::test(flavor = "multi_thread")] +async fn dequeue() { let mut s = mock::Stream::default(); - let mut c = ConsumerBuilder::default(); - c.register("foobar", |job: Job| -> io::Result<()> { - assert_eq!(job.args(), &["z"]); - Ok(()) - }); - let mut c = c.connect_with(s.clone(), None).unwrap(); + let mut w = WorkerBuilder::default() + .register_fn("foobar", |job: Job| async move { + assert_eq!(job.args(), &["z"]); + Ok::<(), io::Error>(()) + }) + .connect_with(s.clone(), None) + .await + .unwrap(); s.ignore(0); s.push_bytes_to_read( @@ -86,7 +144,7 @@ fn dequeue() { }\r\n", ); s.ok(0); // for the ACK - if let Err(e) = c.run_one(0, &["default"]) { + if let Err(e) = w.run_one(0, &["default"]).await { println!("{:?}", e); unreachable!(); } @@ -99,15 +157,17 @@ fn dequeue() { ); } -#[test] -fn dequeue_first_empty() { +#[tokio::test(flavor = "multi_thread")] +async fn dequeue_first_empty() { let mut s = mock::Stream::default(); - let mut c = ConsumerBuilder::default(); - c.register("foobar", |job: Job| -> io::Result<()> { - assert_eq!(job.args(), &["z"]); - Ok(()) - }); - let mut c = c.connect_with(s.clone(), None).unwrap(); + let mut w = WorkerBuilder::default() + .register_fn("foobar", |job: Job| async move { + assert_eq!(job.args(), &["z"]); + Ok::<(), io::Error>(()) + }) + .connect_with(s.clone(), None) + .await + .unwrap(); s.ignore(0); s.push_bytes_to_read( @@ -127,7 +187,7 @@ fn dequeue_first_empty() { s.ok(0); // for the ACK // run once, shouldn't do anything - match c.run_one(0, &["default"]) { + match w.run_one(0, &["default"]).await { Ok(did_work) => assert!(!did_work), Err(e) => { println!("{:?}", e); @@ -135,7 +195,7 @@ fn dequeue_first_empty() { } } // run again, this time doing the job - match c.run_one(0, &["default"]) { + match w.run_one(0, &["default"]).await { Ok(did_work) => assert!(did_work), Err(e) => { println!("{:?}", e); @@ -154,17 +214,19 @@ fn dequeue_first_empty() { ); } -#[test] -fn well_behaved() { +#[tokio::test(flavor = "multi_thread")] +async fn well_behaved() { let mut s = mock::Stream::new(2); // main plus worker - let mut c = ConsumerBuilder::default(); - c.wid("wid".to_string()); - c.register("foobar", |_| -> io::Result<()> { - // NOTE: this time needs to be so that it lands between the first heartbeat and the second - thread::sleep(Duration::from_secs(7)); - Ok(()) - }); - let mut c = c.connect_with(s.clone(), None).unwrap(); + let mut w = WorkerBuilder::default() + .wid(WorkerId::new("wid")) + .register_fn("foobar", |_| async move { + // NOTE: this time needs to be so that it lands between the first heartbeat and the second + sleep(Duration::from_secs(7)).await; + Ok::<(), io::Error>(()) + }) + .connect_with(s.clone(), None) + .await + .unwrap(); s.ignore(0); // push a job that'll take a while to run @@ -183,7 +245,7 @@ fn well_behaved() { }\r\n", ); - let jh = thread::spawn(move || c.run(&["default"])); + let jh = spawn(async move { w.run(&["default"]).await }); // the running thread won't return for a while. the heartbeat thingy is going to eventually // send a heartbeat, and we want to respond to that with a "quiet" to make it not accept any @@ -197,7 +259,7 @@ fn well_behaved() { s.push_bytes_to_read(0, b"+{\"state\":\"terminate\"}\r\n"); // at this point, c.run() should eventually return with Ok(0) indicating that it finished. - assert_eq!(jh.join().unwrap().unwrap(), 0); + assert_eq!(jh.await.unwrap().unwrap(), 0); // heartbeat should have seen two beats (quiet + terminate) let written = s.pop_bytes_written(0); @@ -219,17 +281,19 @@ fn well_behaved() { ); } -#[test] -fn no_first_job() { - let mut s = mock::Stream::new(2); - let mut c = ConsumerBuilder::default(); - c.wid("wid".to_string()); - c.register("foobar", |_| -> io::Result<()> { - // NOTE: this time needs to be so that it lands between the first heartbeat and the second - thread::sleep(Duration::from_secs(7)); - Ok(()) - }); - let mut c = c.connect_with(s.clone(), None).unwrap(); +#[tokio::test(flavor = "multi_thread")] +async fn no_first_job() { + let mut s = mock::Stream::new(2); // main plus worker + let mut w = WorkerBuilder::default() + .wid(WorkerId::new("wid")) + .register_fn("foobar", |_| async move { + // NOTE: this time needs to be so that it lands between the first heartbeat and the second + sleep(Duration::from_secs(7)).await; + Ok::<(), io::Error>(()) + }) + .connect_with(s.clone(), None) + .await + .unwrap(); s.ignore(0); // push a job that'll take a while to run @@ -248,7 +312,7 @@ fn no_first_job() { }\r\n", ); - let jh = thread::spawn(move || c.run(&["default"])); + let jh = spawn(async move { w.run(&["default"]).await }); // the running thread won't return for a while. the heartbeat thingy is going to eventually // send a heartbeat, and we want to respond to that with a "quiet" to make it not accept any @@ -262,7 +326,7 @@ fn no_first_job() { s.push_bytes_to_read(0, b"+{\"state\":\"terminate\"}\r\n"); // at this point, c.run() should eventually return with Ok(0) indicating that it finished. - assert_eq!(jh.join().unwrap().unwrap(), 0); + assert_eq!(jh.await.unwrap().unwrap(), 0); // heartbeat should have seen two beats (quiet + terminate) let written = s.pop_bytes_written(0); @@ -285,18 +349,20 @@ fn no_first_job() { ); } -#[test] -fn well_behaved_many() { - let mut s = mock::Stream::new(3); - let mut c = ConsumerBuilder::default(); - c.workers(2); - c.wid("wid".to_string()); - c.register("foobar", |_| -> io::Result<()> { - // NOTE: this time needs to be so that it lands between the first heartbeat and the second - thread::sleep(Duration::from_secs(7)); - Ok(()) - }); - let mut c = c.connect_with(s.clone(), None).unwrap(); +#[tokio::test(flavor = "multi_thread")] +async fn well_behaved_many() { + let mut s = mock::Stream::new(3); // main plus 2 workers + let mut w = WorkerBuilder::default() + .workers(2) + .wid(WorkerId::new("wid")) + .register_fn("foobar", |_| async move { + // NOTE: this time needs to be so that it lands between the first heartbeat and the second + sleep(Duration::from_secs(7)).await; + Ok::<(), io::Error>(()) + }) + .connect_with(s.clone(), None) + .await + .unwrap(); s.ignore(0); // push two jobs that'll take a while to run @@ -322,7 +388,7 @@ fn well_behaved_many() { ); } - let jh = thread::spawn(move || c.run(&["default"])); + let jh = spawn(async move { w.run(&["default"]).await }); // the running thread won't return for a while. the heartbeat thingy is going to eventually // send a heartbeat, and we want to respond to that with a "quiet" to make it not accept any @@ -337,7 +403,7 @@ fn well_behaved_many() { s.push_bytes_to_read(0, b"+{\"state\":\"terminate\"}\r\n"); // at this point, c.run() should eventually return with Ok(0) indicating that it finished. - assert_eq!(jh.join().unwrap().unwrap(), 0); + assert_eq!(jh.await.unwrap().unwrap(), 0); // heartbeat should have seen two beats (quiet + terminate) let written = s.pop_bytes_written(0); @@ -361,19 +427,34 @@ fn well_behaved_many() { } } -#[test] -fn terminate() { - let mut s = mock::Stream::new(2); - let mut c = ConsumerBuilder::default(); - c.wid("wid".to_string()); - c.register("foobar", |_| -> io::Result<()> { - loop { - thread::sleep(Duration::from_secs(5)); - } - }); - let mut c = c.connect_with(s.clone(), None).unwrap(); +#[tokio::test(flavor = "multi_thread")] +async fn terminate() { + // Internally, the `take_next` member on the `mock::Inner` struct will be incremented from `0` to `1`, + // while the `Stream::mine` wil be pointing to stream with index 0. See how we are later on ignoring bytes + // written to this stream by means of `s.ignore(0)`. + let mut s = mock::Stream::new(2); // main plus worker + + // prepare a worker with only never (!) returning handler + let mut w: Worker<_, io::Error> = WorkerBuilder::default() + .hostname("machine".into()) + .wid(WorkerId::new("wid")) + .register_fn("foobar", |_| async move { + loop { + sleep(Duration::from_secs(5)).await; + } + }) + .connect_with(s.clone(), None) + .await + .unwrap(); + + // what now is being ignored on `mine` channel are these written bytes (pid will vary): + // b"HELLO {\"hostname\":\"machine\",\"wid\":\"wid\",\"pid\":7332,\"labels\":[\"rust\"],\"v\":2}\r\n" + // this was the HELLO from main (coordinating) worker s.ignore(0); + // as if a producing client had sent this job to Faktory and Faktory, in its turn, + // had sent it to the processing (NB) worker, rather than coordinating one (note how we + // are passing `1` as first arg to `s.push_bytes_to_read`) s.push_bytes_to_read( 1, b"$186\r\n\ @@ -389,25 +470,34 @@ fn terminate() { }\r\n", ); - let jh = thread::spawn(move || c.run(&["default"])); + let jh = spawn(async move { + // Note how running a coordinating leads to mock::Stream::reconnect: + // `Worker::run` -> `Worker::spawn_worker_into` -> `Worker::for_worker` -> `Client::connect_again` -> `Stream::reconnect` + // + // So when the `w.run` is triggered, `Stream::reconnect` will fire and the `take_next` member on the `mock::Inner` struct + // will be incremented from `1` to `2`. But, most importently, `mine` will now be pointing to the second + // stream (stream with index 1) from this test, and the _actual_ worker (not the master worker (coordinator)) will + // be talking via this stream. + w.run(&["default"]).await + }); // the running thread won't ever return, because the job never exits. the heartbeat thingy is - // going to eventually send a heartbeat, and we want to respond to that with a "terminate" + // going to eventually (in ~5 seconds) send a heartbeat, and we want to respond to that with a "terminate" s.push_bytes_to_read(0, b"+{\"state\":\"terminate\"}\r\n"); // at this point, c.run() should immediately return with Ok(1) indicating that one job is still // running. - assert_eq!(jh.join().unwrap().unwrap(), 1); + assert_eq!(jh.await.unwrap().unwrap(), 1); - // heartbeat should have seen one beat (terminate) and then send FAIL + // Heartbeat Thread (stream with index 0). + // + // Heartbeat thread should have sent one BEAT command, then an immediate FAIL, and a final END: + // <---------- BEAT ---------><---------------------------- FAIL JOB -----------------------------------------><-END-> + // "BEAT {\"wid\":\"wid\"}\r\nFAIL {\"jid\":\"forever\",\"errtype\":\"unknown\",\"message\":\"terminated\"}\r\nEND\r\n" let written = s.pop_bytes_written(0); let beat = b"BEAT {\"wid\":\"wid\"}\r\nFAIL "; assert_eq!(&written[0..beat.len()], &beat[..]); assert!(written.ends_with(b"\r\nEND\r\n")); - println!( - "{}", - std::str::from_utf8(&written[beat.len()..(written.len() - b"\r\nEND\r\n".len())]).unwrap() - ); let written: serde_json::Value = serde_json::from_slice(&written[beat.len()..(written.len() - b"\r\nEND\r\n".len())]) .unwrap(); @@ -418,13 +508,28 @@ fn terminate() { .and_then(|v| v.as_str()), Some("forever") ); - - // worker should have just fetched once + assert_eq!(written.get("errtype").unwrap().as_str(), Some("unknown")); + assert_eq!(written.get("message").unwrap().as_str(), Some("terminated")); + + // Let's give the worker's client a chance to complete clean up on Client's drop (effectively send `END\r\n`), + // and only after that pop bytes written into its stream. If we do not do this, we will end up with a flaky + // test, where `END\r\n` will sometimes make it to the writer and sometimes not. The `500` ms are empirical. + sleep(Duration::from_millis(500)).await; + + // Worker Thread (stream with index 1). + // + // Worker thread should have sent HELLO (which in coordinator case we thew away with `s.ignore(0)`), FETCH ( + // consume one job from the "default" queue), and END (which is performed as Client's clean-up). + // <------------------------------------ HELLO (PASSWORDLESS) -------------------------------------><--- FETCH -----><-END-> + // "HELLO {\"hostname\":\"machine\",\"wid\":\"wid\",\"pid\":12628,\"labels\":[\"rust\"],\"v\":2}\r\nFETCH default\r\nEND\r\n" let written = s.pop_bytes_written(1); - let msgs = "\r\n\ - FETCH default\r\n"; - assert_eq!( - std::str::from_utf8(&written[(written.len() - msgs.len())..]).unwrap(), - msgs - ); + assert!(written.starts_with(b"HELLO {\"hostname\":\"machine\",\"wid\":\"wid\"")); + assert!(written.ends_with(b"\r\nFETCH default\r\nEND\r\n")); + + // P.S. Interestingly, before we switched to `JoinSet` in `Worker::run` internals, this last `END\r\n` + // of the processing worker never actually got to the bytes written, no matter how much time you sleep + // before popping those bytes from the mock stream. + // + // But generally speaking, the graceful situation is when the number of `HI`s and the number of `END`s are + // equal. Why did they decide for `END` instead of `BYE` in Faktory ? :smile: } diff --git a/tests/mock/inner.rs b/tests/mock/inner.rs new file mode 100644 index 00000000..85edc337 --- /dev/null +++ b/tests/mock/inner.rs @@ -0,0 +1,38 @@ +use std::sync::{Arc, Mutex}; +use std::{io, mem}; + +#[derive(Debug, Clone, Default)] +pub(crate) struct Duplex { + pub reader: io::Cursor>, + pub writer: io::Cursor>, +} + +#[derive(Debug, Clone, Default)] +pub(crate) struct MockStream { + pub du: Arc>, +} + +impl MockStream { + pub fn push_bytes_to_read(&mut self, bytes: &[u8]) { + self.du.lock().unwrap().reader.get_mut().extend(bytes); + } + + pub fn pop_bytes_written(&mut self) -> Vec { + let mut du = self.du.lock().unwrap(); + let wr = mem::take(du.writer.get_mut()); + du.writer.set_position(0); + wr + } +} + +pub(crate) struct Inner { + pub take_next: usize, + pub streams: Vec, +} + +impl Inner { + pub fn take_stream(&mut self) -> Option { + self.take_next += 1; + self.streams.get(self.take_next - 1).cloned() + } +} diff --git a/tests/mock/mod.rs b/tests/mock/mod.rs index 86835b21..f3858585 100644 --- a/tests/mock/mod.rs +++ b/tests/mock/mod.rs @@ -1,25 +1,17 @@ use faktory::Reconnect; -use mockstream::SyncMockStream; -use std::io; -use std::sync::{Arc, Mutex}; +use std::{ + io, + pin::Pin, + sync::{Arc, Mutex}, +}; +use tokio::io::{AsyncRead, AsyncWrite}; -struct Inner { - take_next: usize, - streams: Vec, -} - -impl Inner { - fn take_stream(&mut self) -> Option { - self.take_next += 1; - - self.streams.get(self.take_next - 1).cloned() - } -} +mod inner; #[derive(Clone)] pub struct Stream { - mine: Option, - all: Arc>, + mine: inner::MockStream, + all: Arc>, } impl Default for Stream { @@ -28,8 +20,9 @@ impl Default for Stream { } } +#[async_trait::async_trait] impl Reconnect for Stream { - fn reconnect(&self) -> io::Result { + async fn reconnect(&mut self) -> Result { let mine = self .all .lock() @@ -37,25 +30,47 @@ impl Reconnect for Stream { .take_stream() .expect("tried to make a new stream, but no more connections expected"); Ok(Stream { - mine: Some(mine), + mine, all: Arc::clone(&self.all), }) } } -impl io::Read for Stream { - fn read(&mut self, buf: &mut [u8]) -> io::Result { - self.mine.as_mut().unwrap().read(buf) +impl AsyncRead for Stream { + fn poll_read( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + let mut duplex = self.mine.du.lock().unwrap(); + Pin::new(&mut duplex.reader).poll_read(cx, buf) } } -impl io::Write for Stream { - fn write(&mut self, buf: &[u8]) -> io::Result { - self.mine.as_mut().unwrap().write(buf) +impl AsyncWrite for Stream { + fn poll_write( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &[u8], + ) -> std::task::Poll> { + let mut duplex = self.mine.du.lock().unwrap(); + Pin::new(&mut duplex.writer).poll_write(cx, buf) + } + + fn poll_flush( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let mut duplex = self.mine.du.lock().unwrap(); + Pin::new(&mut duplex.writer).poll_flush(cx) } - fn flush(&mut self) -> io::Result<()> { - self.mine.as_mut().unwrap().flush() + fn poll_shutdown( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let mut duplex = self.mine.du.lock().unwrap(); + Pin::new(&mut duplex.writer).poll_shutdown(cx) } } @@ -63,7 +78,7 @@ impl Stream { fn make(salt: Option<(usize, &str)>, streams: usize) -> Self { let streams = (0..streams) .map(|_| { - let mut s = SyncMockStream::new(); + let mut s = inner::MockStream::default(); // need to say HELLO if let Some((iters, salt)) = salt { // include salt for pwdhash @@ -79,12 +94,16 @@ impl Stream { }) .collect(); - let mut inner = Inner { + let mut inner = inner::Inner { take_next: 0, streams, }; - let mine = inner.take_stream(); + let mine = inner.take_stream().unwrap(); + // So if they asked for two stream (see `consumer::terminate` test), + // the first one will be `mine` while they both will be accessible + // internally via `all` (since `Inner::take_stream` is not actually + // taking, it is rather _cloning_). Stream { mine, all: Arc::new(Mutex::new(inner)), diff --git a/tests/producer.rs b/tests/producer.rs index 1db4c82d..ee50d815 100644 --- a/tests/producer.rs +++ b/tests/producer.rs @@ -1,17 +1,12 @@ -extern crate faktory; -extern crate mockstream; -extern crate serde_json; -extern crate url; - mod mock; use faktory::*; -#[test] -fn hello() { +#[tokio::test(flavor = "multi_thread")] +async fn hello() { let mut s = mock::Stream::default(); - let p = Producer::connect_with(s.clone(), None).unwrap(); + let p = Client::connect_with(s.clone(), None).await.unwrap(); let written = s.pop_bytes_written(0); assert!(written.starts_with(b"HELLO {")); let written: serde_json::Value = serde_json::from_slice(&written[b"HELLO ".len()..]).unwrap(); @@ -27,11 +22,13 @@ fn hello() { assert_eq!(written, b"END\r\n"); } -#[test] -fn hello_pwd() { +#[tokio::test(flavor = "multi_thread")] +async fn hello_pwd() { let mut s = mock::Stream::with_salt(1545, "55104dc76695721d"); - let c = Producer::connect_with(s.clone(), Some("foobar".to_string())).unwrap(); + let c = Client::connect_with(s.clone(), Some("foobar".to_string())) + .await + .unwrap(); let written = s.pop_bytes_written(0); assert!(written.starts_with(b"HELLO {")); let written: serde_json::Value = serde_json::from_slice(&written[b"HELLO ".len()..]).unwrap(); @@ -44,14 +41,14 @@ fn hello_pwd() { drop(c); } -#[test] -fn enqueue() { +#[tokio::test(flavor = "multi_thread")] +async fn enqueue() { let mut s = mock::Stream::default(); - let mut p = Producer::connect_with(s.clone(), None).unwrap(); + let mut p = Client::connect_with(s.clone(), None).await.unwrap(); s.ignore(0); s.ok(0); - p.enqueue(Job::new("foobar", vec!["z"])).unwrap(); + p.enqueue(Job::new("foobar", vec!["z"])).await.unwrap(); let written = s.pop_bytes_written(0); assert!(written.starts_with(b"PUSH {")); @@ -85,17 +82,18 @@ fn enqueue() { assert_eq!(written.get("backtrace").and_then(|h| h.as_u64()), Some(0)); } -#[test] -fn queue_control() { +#[tokio::test(flavor = "multi_thread")] +async fn queue_control() { let mut s = mock::Stream::default(); - let mut p = Producer::connect_with(s.clone(), None).unwrap(); + let mut p = Client::connect_with(s.clone(), None).await.unwrap(); s.ignore(0); s.ok(0); - p.queue_pause(&["test", "test2"]).unwrap(); + p.queue_pause(&["test", "test2"]).await.unwrap(); s.ok(0); p.queue_resume(&["test3".to_string(), "test4".to_string()]) + .await .unwrap(); let written = s.pop_bytes_written(0); diff --git a/tests/real/community.rs b/tests/real/community.rs index 8b45840a..6ed32d9f 100644 --- a/tests/real/community.rs +++ b/tests/real/community.rs @@ -1,171 +1,196 @@ -extern crate faktory; -extern crate serde_json; -extern crate url; - -use faktory::*; +use crate::skip_check; +use faktory::{Client, Job, JobBuilder, JobId, Worker, WorkerBuilder, WorkerId}; use serde_json::Value; -use std::io; -use std::sync; - -macro_rules! skip_check { - () => { - if std::env::var_os("FAKTORY_URL").is_none() { - return; - } - }; -} +use std::{io, sync}; -#[test] -fn hello_p() { +#[tokio::test(flavor = "multi_thread")] +async fn hello_client() { skip_check!(); - let p = Producer::connect(None).unwrap(); + let p = Client::connect(None).await.unwrap(); drop(p); } -#[test] -fn hello_c() { +#[tokio::test(flavor = "multi_thread")] +async fn hello_worker() { + skip_check!(); + let w = Worker::builder::() + .hostname("tester".to_string()) + .labels(vec!["foo".to_string(), "bar".to_string()]) + .register_fn("never_called", |_| async move { unreachable!() }) + .connect(None) + .await + .unwrap(); + drop(w); +} + +#[tokio::test(flavor = "multi_thread")] +async fn enqueue_job() { skip_check!(); - let mut c = ConsumerBuilder::default(); - c.hostname("tester".to_string()) - .wid("hello".to_string()) - .labels(vec!["foo".to_string(), "bar".to_string()]); - c.register("never_called", |_| -> io::Result<()> { unreachable!() }); - let c = c.connect(None).unwrap(); - drop(c); + let mut p = Client::connect(None).await.unwrap(); + p.enqueue(JobBuilder::new("order").build()).await.unwrap(); } -#[test] -fn roundtrip() { +#[tokio::test(flavor = "multi_thread")] +async fn roundtrip() { skip_check!(); + let local = "roundtrip"; + let jid = JobId::new("x-job-id-0123456782"); + + let mut worker = Worker::builder() + .labels(vec!["rust".into(), local.into()]) + .workers(1) + .wid(WorkerId::random()) + .register_fn("order", move |job| async move { + assert_eq!(job.kind(), "order"); + assert_eq!(job.queue, local); + assert_eq!(job.args(), &[Value::from("ISBN-13:9781718501850")]); + Ok::<(), io::Error>(()) + }) + .register_fn("image", |_| async move { unreachable!() }) + .connect(None) + .await + .unwrap(); - let (tx, rx) = sync::mpsc::channel(); - let tx = sync::Arc::new(sync::Mutex::new(tx)); - let mut c = ConsumerBuilder::default(); - c.hostname("tester".to_string()).wid(local.to_string()); - { - let tx = sync::Arc::clone(&tx); - c.register(local, move |j| -> io::Result<()> { - tx.lock().unwrap().send(j).unwrap(); - Ok(()) - }); - } - let mut c = c.connect(None).unwrap(); - - let mut p = Producer::connect(None).unwrap(); - p.enqueue(Job::new(local, vec!["z"]).on_queue(local)) + let mut client = Client::connect(None).await.unwrap(); + client + .enqueue( + JobBuilder::new("order") + .jid(jid) + .args(vec!["ISBN-13:9781718501850"]) + .queue(local) + .build(), + ) + .await .unwrap(); - c.run_one(0, &[local]).unwrap(); - let job = rx.recv().unwrap(); - assert_eq!(job.queue, local); - assert_eq!(job.kind(), local); - assert_eq!(job.args(), &[Value::from("z")]); + let had_one = worker.run_one(0, &[local]).await.unwrap(); + assert!(had_one); + + let drained = !worker.run_one(0, &[local]).await.unwrap(); + assert!(drained); } -#[test] -fn multi() { +#[tokio::test(flavor = "multi_thread")] +async fn multi() { skip_check!(); - let local = "multi"; + let local = "multi_async"; let (tx, rx) = sync::mpsc::channel(); let tx = sync::Arc::new(sync::Mutex::new(tx)); - let mut c = ConsumerBuilder::default(); - c.hostname("tester".to_string()).wid(local.to_string()); - { - let tx = sync::Arc::clone(&tx); - c.register(local, move |j| -> io::Result<()> { - tx.lock().unwrap().send(j).unwrap(); - Ok(()) - }); - } - let mut c = c.connect(None).unwrap(); - - let mut p = Producer::connect(None).unwrap(); + + let mut w = WorkerBuilder::default() + .hostname("tester".to_string()) + .wid(WorkerId::new(local)) + .register_fn(local, move |j| { + let tx = sync::Arc::clone(&tx); + Box::pin(async move { + tx.lock().unwrap().send(j).unwrap(); + Ok::<(), io::Error>(()) + }) + }) + .connect(None) + .await + .unwrap(); + + let mut p = Client::connect(None).await.unwrap(); p.enqueue(Job::new(local, vec![Value::from(1), Value::from("foo")]).on_queue(local)) + .await .unwrap(); p.enqueue(Job::new(local, vec![Value::from(2), Value::from("bar")]).on_queue(local)) + .await .unwrap(); - c.run_one(0, &[local]).unwrap(); + w.run_one(0, &[local]).await.unwrap(); let job = rx.recv().unwrap(); assert_eq!(job.queue, local); assert_eq!(job.kind(), local); assert_eq!(job.args(), &[Value::from(1), Value::from("foo")]); - c.run_one(0, &[local]).unwrap(); + w.run_one(0, &[local]).await.unwrap(); let job = rx.recv().unwrap(); assert_eq!(job.queue, local); assert_eq!(job.kind(), local); assert_eq!(job.args(), &[Value::from(2), Value::from("bar")]); } -#[test] -fn fail() { +#[tokio::test(flavor = "multi_thread")] +async fn fail() { skip_check!(); let local = "fail"; let (tx, rx) = sync::mpsc::channel(); let tx = sync::Arc::new(sync::Mutex::new(tx)); - let mut c = ConsumerBuilder::default(); - c.hostname("tester".to_string()).wid(local.to_string()); - { - let tx = sync::Arc::clone(&tx); - c.register(local, move |j| -> io::Result<()> { - tx.lock().unwrap().send(j).unwrap(); - Err(io::Error::new(io::ErrorKind::Other, "nope")) - }); - } - let mut c = c.connect(None).unwrap(); - - let mut p = Producer::connect(None).unwrap(); + + let mut w = WorkerBuilder::default() + .hostname("tester".to_string()) + .wid(WorkerId::new(local)) + .register_fn(local, move |j| { + let tx = sync::Arc::clone(&tx); + Box::pin(async move { + tx.lock().unwrap().send(j).unwrap(); + Err(io::Error::new(io::ErrorKind::Other, "nope")) + }) + }) + .connect(None) + .await + .unwrap(); + + let mut p = Client::connect(None).await.unwrap(); // note that *enqueueing* the jobs didn't fail! p.enqueue(Job::new(local, vec![Value::from(1), Value::from("foo")]).on_queue(local)) + .await .unwrap(); p.enqueue(Job::new(local, vec![Value::from(2), Value::from("bar")]).on_queue(local)) + .await .unwrap(); - c.run_one(0, &[local]).unwrap(); - c.run_one(0, &[local]).unwrap(); - drop(c); + w.run_one(0, &[local]).await.unwrap(); + w.run_one(0, &[local]).await.unwrap(); + drop(w); assert_eq!(rx.into_iter().take(2).count(), 2); - - // TODO: check that jobs *actually* failed! } -#[test] -fn queue() { +#[tokio::test(flavor = "multi_thread")] +async fn queue() { skip_check!(); let local = "pause"; let (tx, rx) = sync::mpsc::channel(); let tx = sync::Arc::new(sync::Mutex::new(tx)); - let mut c = ConsumerBuilder::default(); - c.hostname("tester".to_string()).wid(local.to_string()); - c.register(local, move |_job| tx.lock().unwrap().send(true)); - let mut c = c.connect(None).unwrap(); + let mut w = WorkerBuilder::default() + .hostname("tester".to_string()) + .wid(WorkerId::new(local)) + .register_fn(local, move |_job| { + let tx = sync::Arc::clone(&tx); + Box::pin(async move { tx.lock().unwrap().send(true) }) + }) + .connect(None) + .await + .unwrap(); - let mut p = Producer::connect(None).unwrap(); + let mut p = Client::connect(None).await.unwrap(); p.enqueue(Job::new(local, vec![Value::from(1)]).on_queue(local)) + .await .unwrap(); - p.queue_pause(&[local]).unwrap(); + p.queue_pause(&[local]).await.unwrap(); - let had_job = c.run_one(0, &[local]).unwrap(); + let had_job = w.run_one(0, &[local]).await.unwrap(); assert!(!had_job); let worker_executed = rx.try_recv().is_ok(); assert!(!worker_executed); - p.queue_resume(&[local]).unwrap(); + p.queue_resume(&[local]).await.unwrap(); - let had_job = c.run_one(0, &[local]).unwrap(); + let had_job = w.run_one(0, &[local]).await.unwrap(); assert!(had_job); let worker_executed = rx.try_recv().is_ok(); assert!(worker_executed); } -#[test] -fn test_jobs_pushed_in_bulk() { +#[tokio::test(flavor = "multi_thread")] +async fn test_jobs_pushed_in_bulk() { skip_check!(); let local_1 = "test_jobs_pushed_in_bulk_1"; @@ -173,13 +198,14 @@ fn test_jobs_pushed_in_bulk() { let local_3 = "test_jobs_pushed_in_bulk_3"; let local_4 = "test_jobs_pushed_in_bulk_4"; - let mut p = Producer::connect(None).unwrap(); + let mut p = Client::connect(None).await.unwrap(); let (enqueued_count, errors) = p .enqueue_many(vec![ Job::builder("common").queue(local_1).build(), Job::builder("common").queue(local_2).build(), Job::builder("special").queue(local_2).build(), ]) + .await .unwrap(); assert_eq!(enqueued_count, 3); assert!(errors.is_none()); // error-free @@ -193,13 +219,16 @@ fn test_jobs_pushed_in_bulk() { let (enqueued_count, errors) = p .enqueue_many([ - Job::builder("broken").jid("short").queue(local_3).build(), // jid.len() < 8 + Job::builder("broken") + .jid(JobId::new("short")) + .queue(local_3) + .build(), // jid.len() < 8 Job::builder("") // empty string jobtype - .jid("3sZCbdp8e9WX__0") + .jid(JobId::new("3sZCbdp8e9WX__0")) .queue(local_3) .build(), Job::builder("broken") - .jid("3sZCbdp8e9WX__1") + .jid(JobId::new("3sZCbdp8e9WX__1")) .queue(local_3) .reserve_for(864001) // reserve_for exceeded .build(), @@ -207,6 +236,7 @@ fn test_jobs_pushed_in_bulk() { Job::builder("very_special").queue(local_4).build(), Job::builder("very_special").queue(local_4).build(), ]) + .await .unwrap(); // 3 out of 5 not enqueued; @@ -229,39 +259,49 @@ fn test_jobs_pushed_in_bulk() { // Let's check that the two well-formatted jobs // have _really_ been enqueued, i.e. that `enqueue_many` // is not an all-or-nothing operation: - let mut c = ConsumerBuilder::default(); - c.hostname("tester".to_string()).wid(local_3.to_string()); - c.register("very_special", move |_job| -> io::Result<()> { Ok(()) }); - c.register("broken", move |_job| -> io::Result<()> { Ok(()) }); - let mut c = c.connect(None).unwrap(); + let mut c = WorkerBuilder::default() + .hostname("tester".to_string()) + .wid(WorkerId::new(local_3)) + .register_fn("very_special", move |_job| async { + Ok::<(), io::Error>(()) + }) + .register_fn("broken", move |_job| async { Ok::<(), io::Error>(()) }) + .connect(None) + .await + .unwrap(); // we targeted "very_special" jobs to "local_4" queue - assert!(c.run_one(0, &[local_4]).unwrap()); - assert!(c.run_one(0, &[local_4]).unwrap()); - assert!(!c.run_one(0, &[local_4]).unwrap()); // drained + assert!(c.run_one(0, &[local_4]).await.unwrap()); + assert!(c.run_one(0, &[local_4]).await.unwrap()); + assert!(!c.run_one(0, &[local_4]).await.unwrap()); // drained // also let's check that the 'broken' jobs have NOT been enqueued, // reminder: we target the broken jobs to "local_3" queue - assert!(!c.run_one(0, &[local_3]).unwrap()); // empty + assert!(!c.run_one(0, &[local_3]).await.unwrap()); // empty } -#[test] -fn test_jobs_created_with_builder() { - skip_check!(); +async fn assert_args_empty(j: Job) -> io::Result<()> { + assert!(j.args().is_empty()); + Ok(eprintln!("{:?}", j)) +} - // prepare a producer ("client" in Faktory terms) and consumer ("worker"): - let mut producer = Producer::connect(None).unwrap(); - let mut consumer = ConsumerBuilder::default(); - consumer.register("rebuild_index", move |job| -> io::Result<_> { - assert!(job.args().is_empty()); - Ok(eprintln!("{:?}", job)) - }); - consumer.register("register_order", move |job| -> io::Result<_> { - assert!(job.args().len() != 0); - Ok(eprintln!("{:?}", job)) - }); +async fn assert_args_not_empty(j: Job) -> io::Result<()> { + assert!(j.args().len() != 0); + Ok(eprintln!("{:?}", j)) +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_jobs_created_with_builder() { + skip_check!(); - let mut consumer = consumer.connect(None).unwrap(); + // prepare a client and a worker: + let mut cl = Client::connect(None).await.unwrap(); + let mut w = Worker::builder() + .register_fn("rebuild_index", assert_args_empty) + .register_fn("register_order", assert_args_not_empty) + .connect(None) + .await + .unwrap(); // prepare some jobs with JobBuilder: let job1 = JobBuilder::new("rebuild_index") @@ -277,23 +317,26 @@ fn test_jobs_created_with_builder() { job3.queue = "test_jobs_created_with_builder_1".to_string(); // enqueue ... - producer.enqueue(job1).unwrap(); - producer.enqueue(job2).unwrap(); - producer.enqueue(job3).unwrap(); + cl.enqueue(job1).await.unwrap(); + cl.enqueue(job2).await.unwrap(); + cl.enqueue(job3).await.unwrap(); // ... and execute: - let had_job = consumer + let had_job = w .run_one(0, &["test_jobs_created_with_builder_0"]) + .await .unwrap(); assert!(had_job); - let had_job = consumer + let had_job = w .run_one(0, &["test_jobs_created_with_builder_1"]) + .await .unwrap(); assert!(had_job); - let had_job = consumer + let had_job = w .run_one(0, &["test_jobs_created_with_builder_1"]) + .await .unwrap(); assert!(had_job); } diff --git a/tests/real/enterprise.rs b/tests/real/enterprise.rs index e2fd807b..94fe13b8 100644 --- a/tests/real/enterprise.rs +++ b/tests/real/enterprise.rs @@ -1,42 +1,29 @@ -extern crate faktory; -extern crate serde_json; -extern crate url; - +use crate::skip_if_not_enterprise; +use crate::utils::learn_faktory_url; use chrono::Utc; use faktory::ent::*; use faktory::*; use serde_json::Value; use std::io; +use tokio::time; -macro_rules! skip_if_not_enterprise { - () => { - if std::env::var_os("FAKTORY_ENT").is_none() { - return; - } - }; +async fn print_job(j: Job) -> io::Result<()> { + Ok(eprintln!("{:?}", j)) } - macro_rules! assert_had_one { ($c:expr, $q:expr) => { - let had_one_job = $c.run_one(0, &[$q]).unwrap(); + let had_one_job = $c.run_one(0, &[$q]).await.unwrap(); assert!(had_one_job); }; } macro_rules! assert_is_empty { ($c:expr, $q:expr) => { - let had_one_job = $c.run_one(0, &[$q]).unwrap(); + let had_one_job = $c.run_one(0, &[$q]).await.unwrap(); assert!(!had_one_job); }; } -fn learn_faktory_url() -> String { - let url = std::env::var_os("FAKTORY_URL").expect( - "Enterprise Faktory should be running for this test, and 'FAKTORY_URL' environment variable should be provided", - ); - url.to_str().expect("Is a utf-8 string").to_owned() -} - fn some_jobs(kind: S, q: S, count: usize) -> impl Iterator where S: Into + Clone + 'static, @@ -46,21 +33,20 @@ where .map(move |_| Job::builder(kind.clone()).queue(q.clone()).build()) } -#[test] -fn ent_expiring_job() { - use std::{thread, time}; - +#[tokio::test(flavor = "multi_thread")] +async fn ent_expiring_job() { skip_if_not_enterprise!(); let url = learn_faktory_url(); + let local = "ent_expiring_job"; - // prepare a producer ("client" in Faktory terms) and consumer ("worker"): - let mut p = Producer::connect(Some(&url)).unwrap(); - let mut c = ConsumerBuilder::default(); - c.register("AnExpiringJob", move |job| -> io::Result<_> { - Ok(eprintln!("{:?}", job)) - }); - let mut c = c.connect(Some(&url)).unwrap(); + // prepare a client and a worker: + let mut p = Client::connect(Some(&url)).await.unwrap(); + let mut w = WorkerBuilder::default() + .register_fn("AnExpiringJob", print_job) + .connect(Some(&url)) + .await + .unwrap(); // prepare an expiring job: let job_ttl_secs: u64 = 3; @@ -68,35 +54,34 @@ fn ent_expiring_job() { let ttl = chrono::Duration::seconds(job_ttl_secs as i64); let job1 = JobBuilder::new("AnExpiringJob") .args(vec!["ISBN-13:9781718501850"]) - .queue("ent_expiring_job") + .queue(local) .expires_at(chrono::Utc::now() + ttl) .build(); // enqueue and fetch immediately job1: - p.enqueue(job1).unwrap(); - assert_had_one!(&mut c, "ent_expiring_job"); + p.enqueue(job1).await.unwrap(); + assert_had_one!(&mut w, "ent_expiring_job"); // check that the queue is drained: - assert_is_empty!(&mut c, "ent_expiring_job"); + assert_is_empty!(&mut w, "ent_expiring_job"); // prepare another one: let job2 = JobBuilder::new("AnExpiringJob") .args(vec!["ISBN-13:9781718501850"]) - .queue("ent_expiring_job") + .queue(local) .expires_at(chrono::Utc::now() + ttl) .build(); // enqueue and then fetch job2, but after ttl: - p.enqueue(job2).unwrap(); - thread::sleep(time::Duration::from_secs(job_ttl_secs * 2)); - + p.enqueue(job2).await.unwrap(); + tokio::time::sleep(time::Duration::from_secs(job_ttl_secs * 2)).await; // For the non-enterprise edition of Faktory, this assertion will // fail, which should be taken into account when running the test suite on CI. - assert_is_empty!(&mut c, "ent_expiring_job"); + assert_is_empty!(&mut w, local); } -#[test] -fn ent_unique_job() { +#[tokio::test(flavor = "multi_thread")] +async fn ent_unique_job() { use faktory::error; use serde_json::Value; @@ -106,13 +91,13 @@ fn ent_unique_job() { let job_type = "order"; - // prepare producer and consumer: - let mut p = Producer::connect(Some(&url)).unwrap(); - let mut c = ConsumerBuilder::default(); - c.register(job_type, |job| -> io::Result<_> { - Ok(eprintln!("{:?}", job)) - }); - let mut c = c.connect(Some(&url)).unwrap(); + // prepare client and worker: + let mut p = Client::connect(Some(&url)).await.unwrap(); + let mut w = WorkerBuilder::default() + .register_fn(job_type, print_job) + .connect(Some(&url)) + .await + .unwrap(); // Reminder. Jobs are considered unique for kind + args + queue. // So the following two jobs, will be accepted by Faktory, since we @@ -123,18 +108,20 @@ fn ent_unique_job() { .args(args.clone()) .queue(queue_name) .build(); - p.enqueue(job1).unwrap(); + + p.enqueue(job1).await.unwrap(); let job2 = JobBuilder::new(job_type) .args(args.clone()) .queue(queue_name) .build(); - p.enqueue(job2).unwrap(); - let had_job = c.run_one(0, &[queue_name]).unwrap(); + p.enqueue(job2).await.unwrap(); + + let had_job = w.run_one(0, &[queue_name]).await.unwrap(); assert!(had_job); - let had_another_one = c.run_one(0, &[queue_name]).unwrap(); + let had_another_one = w.run_one(0, &[queue_name]).await.unwrap(); assert!(had_another_one); - let and_that_is_it_for_now = !c.run_one(0, &[queue_name]).unwrap(); + let and_that_is_it_for_now = !w.run_one(0, &[queue_name]).await.unwrap(); assert!(and_that_is_it_for_now); // let's now create a unique job and followed by a job with @@ -146,7 +133,9 @@ fn ent_unique_job() { .queue(queue_name) .unique_for(unique_for_secs) .build(); - p.enqueue(job1).unwrap(); + + p.enqueue(job1).await.unwrap(); + // this one is a 'duplicate' ... let job2 = Job::builder(job_type) .args(args.clone()) @@ -154,7 +143,8 @@ fn ent_unique_job() { .unique_for(unique_for_secs) .build(); // ... so the server will respond accordingly: - let res = p.enqueue(job2).unwrap_err(); + let res = p.enqueue(job2).await.unwrap_err(); + if let error::Error::Protocol(error::Protocol::UniqueConstraintViolation { msg }) = res { assert_eq!(msg, "Job not unique"); } else { @@ -162,12 +152,14 @@ fn ent_unique_job() { } // Let's now consume the job which is 'holding' a unique lock: - let had_job = c.run_one(0, &[queue_name]).unwrap(); + let had_job = w.run_one(0, &[queue_name]).await.unwrap(); + assert!(had_job); // And check that the queue is really empty (`job2` from above // has not been queued indeed): - let queue_is_empty = !c.run_one(0, &[queue_name]).unwrap(); + let queue_is_empty = !w.run_one(0, &[queue_name]).await.unwrap(); + assert!(queue_is_empty); // Now let's repeat the latter case, but providing different args to job2: @@ -176,7 +168,9 @@ fn ent_unique_job() { .queue(queue_name) .unique_for(unique_for_secs) .build(); - p.enqueue(job1).unwrap(); + + p.enqueue(job1).await.unwrap(); + // this one is *NOT* a 'duplicate' ... let job2 = JobBuilder::new(job_type) .args(vec![Value::from("ISBN-13:9781718501850"), Value::from(101)]) @@ -184,19 +178,19 @@ fn ent_unique_job() { .unique_for(unique_for_secs) .build(); // ... so the server will accept it: - p.enqueue(job2).unwrap(); + p.enqueue(job2).await.unwrap(); - assert_had_one!(&mut c, queue_name); - assert_had_one!(&mut c, queue_name); + assert_had_one!(&mut w, queue_name); + assert_had_one!(&mut w, queue_name); // and the queue is empty again: - assert_is_empty!(&mut c, queue_name); + assert_is_empty!(&mut w, queue_name); } -#[test] -fn ent_unique_job_until_success() { +#[tokio::test(flavor = "multi_thread")] +async fn ent_unique_job_until_success() { use faktory::error; - use std::thread; - use std::time; + use std::io; + use tokio::time; skip_if_not_enterprise!(); @@ -211,42 +205,45 @@ fn ent_unique_job_until_success() { let unique_for = 4; let url1 = url.clone(); - let handle = thread::spawn(move || { - // prepare producer and consumer, where the former can + let handle = tokio::spawn(async move { + // prepare client and worker, where the former can // send a job difficulty level as a job's args and the lattter // will sleep for a corresponding period of time, pretending // to work hard: - let mut producer_a = Producer::connect(Some(&url1)).unwrap(); - let mut consumer_a = ConsumerBuilder::default(); - consumer_a.register(job_type, |job| -> io::Result<_> { - let args = job.args().to_owned(); - let mut args = args.iter(); - let diffuculty_level = args - .next() - .expect("job difficulty level is there") - .to_owned(); - let sleep_secs = - serde_json::from_value::(diffuculty_level).expect("a valid number"); - thread::sleep(time::Duration::from_secs(sleep_secs as u64)); - Ok(eprintln!("{:?}", job)) - }); - let mut consumer_a = consumer_a.connect(Some(&url1)).unwrap(); + let mut client_a = Client::connect(Some(&url1)).await.unwrap(); + let mut worker_a = WorkerBuilder::default() + .register_fn(job_type, |job| async move { + let args = job.args().to_owned(); + let mut args = args.iter(); + let diffuculty_level = args + .next() + .expect("job difficulty level is there") + .to_owned(); + let sleep_secs = + serde_json::from_value::(diffuculty_level).expect("a valid number"); + time::sleep(time::Duration::from_secs(sleep_secs as u64)).await; + eprintln!("{:?}", job); + Ok::<(), io::Error>(()) + }) + .connect(Some(&url1)) + .await + .unwrap(); let job = JobBuilder::new(job_type) .args(vec![difficulty_level]) .queue(queue_name) .unique_for(unique_for) .unique_until_success() // Faktory's default .build(); - producer_a.enqueue(job).unwrap(); - let had_job = consumer_a.run_one(0, &[queue_name]).unwrap(); + client_a.enqueue(job).await.unwrap(); + let had_job = worker_a.run_one(0, &[queue_name]).await.unwrap(); assert!(had_job); }); // let spawned thread gain momentum: - thread::sleep(time::Duration::from_secs(1)); + time::sleep(time::Duration::from_secs(1)).await; // continue - let mut producer_b = Producer::connect(Some(&url)).unwrap(); + let mut client_b = Client::connect(Some(&url)).await.unwrap(); // this one is a 'duplicate' because the job is still // being executed in the spawned thread: @@ -257,18 +254,18 @@ fn ent_unique_job_until_success() { .build(); // as a result: - let res = producer_b.enqueue(job).unwrap_err(); + let res = client_b.enqueue(job).await.unwrap_err(); if let error::Error::Protocol(error::Protocol::UniqueConstraintViolation { msg }) = res { assert_eq!(msg, "Job not unique"); } else { panic!("Expected protocol error.") } - handle.join().expect("should join successfully"); + handle.await.expect("should join successfully"); // Now that the job submitted in a spawned thread has been successfully executed // (with ACK sent to server), the producer 'B' can push another one: - producer_b + client_b .enqueue( JobBuilder::new(job_type) .args(vec![difficulty_level]) @@ -276,13 +273,13 @@ fn ent_unique_job_until_success() { .unique_for(unique_for) .build(), ) + .await .unwrap(); } -#[test] -fn ent_unique_job_until_start() { - use std::thread; - use std::time; +#[tokio::test(flavor = "multi_thread")] +async fn ent_unique_job_until_start() { + use tokio::time; skip_if_not_enterprise!(); @@ -294,23 +291,26 @@ fn ent_unique_job_until_start() { let unique_for = 4; let url1 = url.clone(); - let handle = thread::spawn(move || { - let mut producer_a = Producer::connect(Some(&url1)).unwrap(); - let mut consumer_a = ConsumerBuilder::default(); - consumer_a.register(job_type, |job| -> io::Result<_> { - let args = job.args().to_owned(); - let mut args = args.iter(); - let diffuculty_level = args - .next() - .expect("job difficulty level is there") - .to_owned(); - let sleep_secs = - serde_json::from_value::(diffuculty_level).expect("a valid number"); - thread::sleep(time::Duration::from_secs(sleep_secs as u64)); - Ok(eprintln!("{:?}", job)) - }); - let mut consumer_a = consumer_a.connect(Some(&url1)).unwrap(); - producer_a + let handle = tokio::spawn(async move { + let mut client_a = Client::connect(Some(&url1)).await.unwrap(); + let mut worker_a = WorkerBuilder::default() + .register_fn(job_type, |job| async move { + let args = job.args().to_owned(); + let mut args = args.iter(); + let diffuculty_level = args + .next() + .expect("job difficulty level is there") + .to_owned(); + let sleep_secs = + serde_json::from_value::(diffuculty_level).expect("a valid number"); + time::sleep(time::Duration::from_secs(sleep_secs as u64)).await; + eprintln!("{:?}", job); + Ok::<(), io::Error>(()) + }) + .connect(Some(&url1)) + .await + .unwrap(); + client_a .enqueue( JobBuilder::new(job_type) .args(vec![difficulty_level]) @@ -319,18 +319,19 @@ fn ent_unique_job_until_start() { .unique_until_start() // NB! .build(), ) + .await .unwrap(); // as soon as the job is fetched, the unique lock gets released - let had_job = consumer_a.run_one(0, &[queue_name]).unwrap(); + let had_job = worker_a.run_one(0, &[queue_name]).await.unwrap(); assert!(had_job); }); // let spawned thread gain momentum: - thread::sleep(time::Duration::from_secs(1)); + time::sleep(time::Duration::from_secs(1)).await; // the unique lock has been released by this time, so the job is enqueued successfully: - let mut producer_b = Producer::connect(Some(&url)).unwrap(); - producer_b + let mut client_b = Client::connect(Some(&url)).await.unwrap(); + client_b .enqueue( JobBuilder::new(job_type) .args(vec![difficulty_level]) @@ -338,20 +339,20 @@ fn ent_unique_job_until_start() { .unique_for(unique_for) .build(), ) + .await .unwrap(); - handle.join().expect("should join successfully"); + handle.await.expect("should join successfully"); } -#[test] -fn ent_unique_job_bypass_unique_lock() { +#[tokio::test(flavor = "multi_thread")] +async fn ent_unique_job_bypass_unique_lock() { use faktory::error; skip_if_not_enterprise!(); let url = learn_faktory_url(); - - let mut producer = Producer::connect(Some(&url)).unwrap(); + let mut producer = Client::connect(Some(&url)).await.unwrap(); let queue_name = "ent_unique_job_bypass_unique_lock"; let job1 = Job::builder("order") .queue(queue_name) @@ -365,8 +366,8 @@ fn ent_unique_job_bypass_unique_lock() { .queue(queue_name) // same queue .build(); // NB: `unique_for` not set - producer.enqueue(job1).unwrap(); - producer.enqueue(job2).unwrap(); // bypassing the lock! + producer.enqueue(job1).await.unwrap(); + producer.enqueue(job2).await.unwrap(); // bypassing the lock! // This _is_ a 'duplicate'. let job3 = Job::builder("order") @@ -374,7 +375,7 @@ fn ent_unique_job_bypass_unique_lock() { .unique_for(60) // NB .build(); - let res = producer.enqueue(job3).unwrap_err(); // NOT bypassing the lock! + let res = producer.enqueue(job3).await.unwrap_err(); // NOT bypassing the lock! if let error::Error::Protocol(error::Protocol::UniqueConstraintViolation { msg }) = res { assert_eq!(msg, "Job not unique"); @@ -384,19 +385,20 @@ fn ent_unique_job_bypass_unique_lock() { // let's consume three times from the queue to verify that the first two jobs // have been enqueued for real, while the last one has not. - let mut c = ConsumerBuilder::default(); - c.register("order", |j| -> io::Result<_> { Ok(eprintln!("{:?}", j)) }); - let mut c = c.connect(Some(&url)).unwrap(); + let mut c = WorkerBuilder::default() + .register_fn("order", print_job) + .connect(Some(&url)) + .await + .unwrap(); - assert!(c.run_one(0, &[queue_name]).unwrap()); - assert!(c.run_one(0, &[queue_name]).unwrap()); - assert!(!c.run_one(0, &[queue_name]).unwrap()); // empty; + assert!(c.run_one(0, &[queue_name]).await.unwrap()); + assert!(c.run_one(0, &[queue_name]).await.unwrap()); + assert!(!c.run_one(0, &[queue_name]).await.unwrap()); // empty; } -#[test] -fn test_tracker_can_send_and_retrieve_job_execution_progress() { +#[tokio::test(flavor = "multi_thread")] +async fn test_tracker_can_send_and_retrieve_job_execution_progress() { use std::{ - io, sync::{Arc, Mutex}, thread, time, }; @@ -406,12 +408,12 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { let url = learn_faktory_url(); let t = Arc::new(Mutex::new( - Client::connect(Some(&url)).expect("job progress tracker created successfully"), + Client::connect(Some(&url)) + .await + .expect("job progress tracker created successfully"), )); - let t_captured = Arc::clone(&t); - - let mut p = Producer::connect(Some(&url)).unwrap(); + let mut p = Client::connect(Some(&url)).await.unwrap(); let job_tackable = JobBuilder::new("order") .args(vec![Value::from("ISBN-13:9781718501850")]) @@ -427,56 +429,61 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { // let's remember this job's id: let job_id = job_tackable.id().to_owned(); - let job_id_captured = job_id.clone(); - - p.enqueue(job_tackable).expect("enqueued"); - - let mut c = ConsumerBuilder::default(); - c.register("order", move |job| -> io::Result<_> { - // trying to set progress on a community edition of Faktory will give: - // 'an internal server error occurred: tracking subsystem is only available in Faktory Enterprise' - assert!(t_captured - .lock() - .expect("lock acquired") - .set_progress( - ProgressUpdate::builder(&job_id_captured) - .desc("Still processing...".to_owned()) - .percent(32) - .build(), - ) - .is_ok()); - // Let's update the progress once again, to check the 'set_progress' shortcut: - assert!(t_captured - .lock() - .unwrap() - .set_progress(ProgressUpdate::set(&job_id_captured, 33)) - .is_ok()); - - // let's sleep for a while ... - thread::sleep(time::Duration::from_secs(2)); - - // ... and read the progress info - let result = t_captured - .lock() - .expect("lock acquired") - .get_progress(job_id_captured.clone()) - .expect("Retrieved progress update over the wire"); - - assert!(result.is_some()); - let result = result.unwrap(); - assert_eq!(result.jid, job_id_captured.clone()); - match result.state { - JobState::Working => {} - _ => panic!("expected job's state to be 'working'"), - } - assert!(result.updated_at.is_some()); - assert_eq!(result.percent, Some(33)); - // considering the job done - Ok(eprintln!("{:?}", job)) - }); - let mut c = c + p.enqueue(job_tackable).await.expect("enqueued"); + + let url_copy = url.clone(); + let job_id_copy = job_id.clone(); + let mut c = WorkerBuilder::default() + .register_fn("order", move |job| { + let job_id = job_id_copy.clone(); + let url = url_copy.clone(); + Box::pin(async move { + let mut t = Client::connect(Some(&url)) + .await + .expect("job progress tracker created successfully"); + + // trying to set progress on a community edition of Faktory will give: + // 'an internal server error occurred: tracking subsystem is only available in Faktory Enterprise' + assert!(t + .set_progress( + ProgressUpdate::builder(job_id.clone()) + .desc("Still processing...".to_owned()) + .percent(32) + .build(), + ) + .await + .is_ok()); + // Let's update the progress once again, to check the 'set_progress' shortcut: + assert!(t + .set_progress(ProgressUpdate::set(job_id.clone(), 33)) + .await + .is_ok()); + + // let's sleep for a while ... + thread::sleep(time::Duration::from_secs(2)); + + // ... and read the progress info + let result = t + .get_progress(job_id.clone()) + .await + .expect("Retrieved progress update over the wire"); + + assert!(result.is_some()); + let result = result.unwrap(); + assert_eq!(result.jid, job_id.clone()); + match result.state { + JobState::Working => {} + _ => panic!("expected job's state to be 'working'"), + } + assert!(result.updated_at.is_some()); + assert_eq!(result.percent, Some(33)); + // considering the job done + Ok::<(), io::Error>(eprintln!("{:?}", job)) + }) + }) .connect(Some(&url)) + .await .expect("Successfully ran a handshake with 'Faktory'"); assert_had_one!(&mut c, "test_tracker_can_send_progress_update"); @@ -484,6 +491,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { .lock() .expect("lock acquired successfully") .get_progress(job_id.clone()) + .await .expect("Retrieved progress update over the wire once again") .expect("Some progress"); @@ -491,7 +499,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { // 'Faktory' will be keeping last known update for at least 30 minutes: assert_eq!(progress.percent, Some(33)); - // But it actually knows the job's real status, since the consumer (worker) + // But it actually knows the job's real status, since the worker // informed it immediately after finishing with the job: assert_eq!(progress.state, JobState::Success); @@ -502,19 +510,20 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { .desc("Final stage.".to_string()) .percent(99) .build(); - assert!(t.lock().unwrap().set_progress(upd).is_ok()); + assert!(t.lock().unwrap().set_progress(upd).await.is_ok()); let progress = t .lock() .unwrap() .get_progress(job_id) + .await .expect("Retrieved progress update over the wire once again") .expect("Some progress"); if progress.percent != Some(100) { let upd = progress.update_percent(100); assert_eq!(upd.desc, progress.desc); - assert!(t.lock().unwrap().set_progress(upd).is_ok()) + assert!(t.lock().unwrap().set_progress(upd).await.is_ok()) } // What about 'ordinary' job ? @@ -522,6 +531,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { // Sending it ... p.enqueue(job_ordinary) + .await .expect("Successfuly send to Faktory"); // ... and asking for its progress @@ -529,6 +539,7 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { .lock() .expect("lock acquired") .get_progress(job_id.clone()) + .await .expect("Retrieved progress update over the wire once again") .expect("Some progress"); @@ -546,17 +557,21 @@ fn test_tracker_can_send_and_retrieve_job_execution_progress() { assert!(progress.desc.is_none()); } -#[test] -fn test_batch_of_jobs_can_be_initiated() { +#[tokio::test(flavor = "multi_thread")] +async fn test_batch_of_jobs_can_be_initiated() { skip_if_not_enterprise!(); let url = learn_faktory_url(); - let mut p = Producer::connect(Some(&url)).unwrap(); - let mut c = ConsumerBuilder::default(); - c.register("thumbnail", move |_job| -> io::Result<_> { Ok(()) }); - c.register("clean_up", move |_job| -> io::Result<_> { Ok(()) }); - let mut c = c.connect(Some(&url)).unwrap(); - let mut t = Client::connect(Some(&url)).expect("job progress tracker created successfully"); + let mut p = Client::connect(Some(&url)).await.unwrap(); + let mut w = WorkerBuilder::default() + .register_fn("thumbnail", |_job| async { Ok::<(), io::Error>(()) }) + .register_fn("clean_up", |_job| async { Ok(()) }) + .connect(Some(&url)) + .await + .unwrap(); + let mut t = Client::connect(Some(&url)) + .await + .expect("job progress tracker created successfully"); let job_1 = Job::builder("thumbnail") .args(vec!["path/to/original/image1"]) @@ -582,21 +597,22 @@ fn test_batch_of_jobs_can_be_initiated() { let time_just_before_batch_init = Utc::now(); - let mut b = p.start_batch(batch).unwrap(); + let mut b = p.start_batch(batch).await.unwrap(); // let's remember batch id: - let bid = b.id().to_string(); + let bid = b.id().to_owned(); - assert!(b.add(job_1).unwrap().is_none()); - assert!(b.add(job_2).unwrap().is_none()); - assert_eq!(b.add(job_3).unwrap().unwrap(), "check-check"); - b.commit().unwrap(); + assert!(b.add(job_1).await.unwrap().is_none()); + assert!(b.add(job_2).await.unwrap().is_none()); + assert_eq!(b.add(job_3).await.unwrap().unwrap(), "check-check"); + b.commit().await.unwrap(); // The batch has been committed, let's see its status: let time_just_before_getting_status = Utc::now(); let s = t .get_batch_status(bid.clone()) + .await .expect("successfully fetched batch status from server...") .expect("...and it's not none"); @@ -614,14 +630,15 @@ fn test_batch_of_jobs_can_be_initiated() { assert_eq!(s.complete_callback_state, CallbackState::Pending); // consume and execute job 1 ... - assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated"); + assert_had_one!(&mut w, "test_batch_of_jobs_can_be_initiated"); // ... and try consuming from the "callback" queue: - assert_is_empty!(&mut c, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); + assert_is_empty!(&mut w, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); // let's ask the Faktory server about the batch status after // we have consumed one job from this batch: let s = t .get_batch_status(bid.clone()) + .await .expect("successfully fetched batch status from server...") .expect("...and it's not none"); @@ -631,13 +648,14 @@ fn test_batch_of_jobs_can_be_initiated() { assert_eq!(s.failed, 0); // now, consume and execute job 2 - assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated"); + assert_had_one!(&mut w, "test_batch_of_jobs_can_be_initiated"); // ... and check the callback queue again: - assert_is_empty!(&mut c, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); // not just yet ... + assert_is_empty!(&mut w, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); // not just yet ... // let's check batch status once again: let s = t .get_batch_status(bid.clone()) + .await .expect("successfully fetched batch status from server...") .expect("...and it's not none"); @@ -647,12 +665,13 @@ fn test_batch_of_jobs_can_be_initiated() { assert_eq!(s.failed, 0); // finally, consume and execute job 3 - the last one from the batch - assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated"); + assert_had_one!(&mut w, "test_batch_of_jobs_can_be_initiated"); // let's check batch status to see what happens after // all the jobs from the batch have been executed: let s = t .get_batch_status(bid.clone()) + .await .expect("successfully fetched batch status from server...") .expect("...and it's not none"); @@ -663,11 +682,12 @@ fn test_batch_of_jobs_can_be_initiated() { assert_eq!(s.complete_callback_state, CallbackState::Enqueued); // let's now successfully consume from the "callback" queue: - assert_had_one!(&mut c, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); + assert_had_one!(&mut w, "test_batch_of_jobs_can_be_initiated__CALLBACKs"); // let's check batch status one last time: let s = t .get_batch_status(bid.clone()) + .await .expect("successfully fetched batch status from server...") .expect("...and it's not none"); @@ -675,17 +695,21 @@ fn test_batch_of_jobs_can_be_initiated() { assert_eq!(s.complete_callback_state, CallbackState::FinishedOk); } -#[test] -fn test_batches_can_be_nested() { +#[tokio::test(flavor = "multi_thread")] +async fn test_batches_can_be_nested() { skip_if_not_enterprise!(); let url = learn_faktory_url(); - // Set up 'producer', 'consumer', and 'tracker': - let mut p = Producer::connect(Some(&url)).unwrap(); - let mut c = ConsumerBuilder::default(); - c.register("jobtype", move |_job| -> io::Result<_> { Ok(()) }); - let mut _c = c.connect(Some(&url)).unwrap(); - let mut t = Client::connect(Some(&url)).expect("job progress tracker created successfully"); + // Set up 'client', 'worker', and 'tracker': + let mut p = Client::connect(Some(&url)).await.unwrap(); + let _w = WorkerBuilder::default() + .register_fn("jobtype", |_job| async { Ok::<(), io::Error>(()) }) + .connect(Some(&url)) + .await + .unwrap(); + let mut t = Client::connect(Some(&url)) + .await + .expect("job progress tracker created successfully"); // Prepare some jobs: let parent_job1 = Job::builder("jobtype") @@ -701,7 +725,7 @@ fn test_batches_can_be_nested() { .queue("test_batches_can_be_nested") .build(); - // Sccording to Faktory docs: + // According to Faktory docs: // "The callback for a parent batch will not enqueue until the callback for the child batch has finished." // See: https://github.com/contribsys/faktory/wiki/Ent-Batches#guarantees let parent_cb_job = Job::builder("clean_up") @@ -718,44 +742,53 @@ fn test_batches_can_be_nested() { let parent_batch = Batch::builder() .description("Parent batch") .with_success_callback(parent_cb_job); - let mut parent_batch = p.start_batch(parent_batch).unwrap(); + let mut parent_batch = p.start_batch(parent_batch).await.unwrap(); let parent_batch_id = parent_batch.id().to_owned(); - parent_batch.add(parent_job1).unwrap(); + parent_batch.add(parent_job1).await.unwrap(); let child_batch = Batch::builder() .description("Child batch") .with_success_callback(child_cb_job); - let mut child_batch = parent_batch.start_batch(child_batch).unwrap(); + let mut child_batch = parent_batch.start_batch(child_batch).await.unwrap(); let child_batch_id = child_batch.id().to_owned(); - child_batch.add(child_job_1).unwrap(); - child_batch.add(child_job_2).unwrap(); + child_batch.add(child_job_1).await.unwrap(); + child_batch.add(child_job_2).await.unwrap(); let grandchild_batch = Batch::builder() .description("Grandchild batch") .with_success_callback(grandchild_cb_job); - let mut grandchild_batch = child_batch.start_batch(grandchild_batch).unwrap(); + let mut grandchild_batch = child_batch.start_batch(grandchild_batch).await.unwrap(); let grandchild_batch_id = grandchild_batch.id().to_owned(); - grandchild_batch.add(grand_child_job_1).unwrap(); + grandchild_batch.add(grand_child_job_1).await.unwrap(); - grandchild_batch.commit().unwrap(); - child_batch.commit().unwrap(); - parent_batch.commit().unwrap(); + grandchild_batch.commit().await.unwrap(); + child_batch.commit().await.unwrap(); + parent_batch.commit().await.unwrap(); // batches finish let parent_status = t .get_batch_status(parent_batch_id.clone()) + .await .unwrap() .unwrap(); assert_eq!(parent_status.description, Some("Parent batch".to_string())); assert_eq!(parent_status.total, 1); assert_eq!(parent_status.parent_bid, None); - let child_status = t.get_batch_status(child_batch_id.clone()).unwrap().unwrap(); + let child_status = t + .get_batch_status(child_batch_id.clone()) + .await + .unwrap() + .unwrap(); assert_eq!(child_status.description, Some("Child batch".to_string())); assert_eq!(child_status.total, 2); assert_eq!(child_status.parent_bid, Some(parent_batch_id)); - let grandchild_status = t.get_batch_status(grandchild_batch_id).unwrap().unwrap(); + let grandchild_status = t + .get_batch_status(grandchild_batch_id) + .await + .unwrap() + .unwrap(); assert_eq!( grandchild_status.description, Some("Grandchild batch".to_string()) @@ -764,18 +797,20 @@ fn test_batches_can_be_nested() { assert_eq!(grandchild_status.parent_bid, Some(child_batch_id)); } -#[test] -fn test_callback_will_not_be_queued_unless_batch_gets_committed() { +#[tokio::test(flavor = "multi_thread")] +async fn test_callback_will_not_be_queued_unless_batch_gets_committed() { skip_if_not_enterprise!(); let url = learn_faktory_url(); - // prepare a producer, a consumer of 'order' jobs, and a tracker: - let mut p = Producer::connect(Some(&url)).unwrap(); - let mut c = ConsumerBuilder::default(); - c.register("order", move |_job| -> io::Result<_> { Ok(()) }); - c.register("order_clean_up", move |_job| -> io::Result<_> { Ok(()) }); - let mut c = c.connect(Some(&url)).unwrap(); - let mut t = Client::connect(Some(&url)).unwrap(); + // prepare a client, a worker of 'order' jobs, and a tracker: + let mut cl = Client::connect(Some(&url)).await.unwrap(); + let mut tr = Client::connect(Some(&url)).await.unwrap(); + let mut w = WorkerBuilder::default() + .register_fn("order", |_job| async { Ok(()) }) + .register_fn("order_clean_up", |_job| async { Ok::<(), io::Error>(()) }) + .connect(Some(&url)) + .await + .unwrap(); let mut jobs = some_jobs( "order", @@ -789,22 +824,23 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { ); // start a 'batch': - let mut b = p + let mut b = cl .start_batch( Batch::builder() .description("Orders processing workload") .with_success_callback(callbacks.next().unwrap()), ) + .await .unwrap(); - let bid = b.id().to_string(); + let bid = b.id().to_owned(); // push 3 jobs onto this batch, but DO NOT commit the batch: for _ in 0..3 { - b.add(jobs.next().unwrap()).unwrap(); + b.add(jobs.next().unwrap()).await.unwrap(); } // check this batch's status: - let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let s = tr.get_batch_status(bid.clone()).await.unwrap().unwrap(); assert_eq!(s.total, 3); assert_eq!(s.pending, 3); assert_eq!(s.success_callback_state, CallbackState::Pending); @@ -812,19 +848,19 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { // consume those 3 jobs successfully; for _ in 0..3 { assert_had_one!( - &mut c, + &mut w, "test_callback_will_not_be_queued_unless_batch_gets_committed" ); } // verify the queue is drained: assert_is_empty!( - &mut c, + &mut w, "test_callback_will_not_be_queued_unless_batch_gets_committed" ); // check this batch's status again: - let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let s = tr.get_batch_status(bid.clone()).await.unwrap().unwrap(); assert_eq!(s.total, 3); assert_eq!(s.pending, 0); assert_eq!(s.failed, 0); @@ -832,61 +868,70 @@ fn test_callback_will_not_be_queued_unless_batch_gets_committed() { // to double-check, let's assert the success callbacks queue is empty: assert_is_empty!( - &mut c, + &mut w, "test_callback_will_not_be_queued_unless_batch_gets_committed__CALLBACKs" ); // now let's COMMIT the batch ... - b.commit().unwrap(); + b.commit().await.unwrap(); // ... and check batch status: - let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let s = cl.get_batch_status(bid.clone()).await.unwrap().unwrap(); assert_eq!(s.success_callback_state, CallbackState::Enqueued); // finally, let's consume from the success callbacks queue ... assert_had_one!( - &mut c, + &mut w, "test_callback_will_not_be_queued_unless_batch_gets_committed__CALLBACKs" ); // ... and see the final status: - let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let s = cl.get_batch_status(bid.clone()).await.unwrap().unwrap(); assert_eq!(s.success_callback_state, CallbackState::FinishedOk); } -#[test] -fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { +#[tokio::test(flavor = "multi_thread")] +async fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { use std::{thread, time}; skip_if_not_enterprise!(); let url = learn_faktory_url(); - let mut p = Producer::connect(Some(&url)).unwrap(); - let mut t = Client::connect(Some(&url)).unwrap(); + let mut cl = Client::connect(Some(&url)).await.unwrap(); + let mut tracker = Client::connect(Some(&url)).await.unwrap(); let q_name = "test_callback_will_be_queued_upon_commit_even_if_batch_is_empty"; let complete_cb_jobtype = "complete_callback_jobtype"; let success_cb_jobtype = "success_cb_jobtype"; let complete_cb = some_jobs(complete_cb_jobtype, q_name, 1).next().unwrap(); let success_cb = some_jobs(success_cb_jobtype, q_name, 1).next().unwrap(); - let b = p + let b = cl .start_batch( Batch::builder() .description("Orders processing workload") .with_callbacks(success_cb, complete_cb), ) + .await .unwrap(); let bid = b.id().to_owned(); - let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let s = tracker + .get_batch_status(bid.clone()) + .await + .unwrap() + .unwrap(); assert_eq!(s.total, 0); // no jobs in the batch; assert_eq!(s.success_callback_state, CallbackState::Pending); assert_eq!(s.complete_callback_state, CallbackState::Pending); - b.commit().unwrap(); + b.commit().await.unwrap(); // let's give the Faktory server some time: thread::sleep(time::Duration::from_secs(2)); - let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let s = tracker + .get_batch_status(bid.clone()) + .await + .unwrap() + .unwrap(); assert_eq!(s.total, 0); // again, there are no jobs in the batch ... // The docs say "If you don't push any jobs into the batch, any callbacks will fire immediately upon BATCH COMMIT." @@ -894,20 +939,25 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { assert_eq!(s.complete_callback_state, CallbackState::Enqueued); assert_eq!(s.success_callback_state, CallbackState::Pending); - let mut c = ConsumerBuilder::default(); - c.register(complete_cb_jobtype, move |_job| -> io::Result<_> { Ok(()) }); - c.register(success_cb_jobtype, move |_job| -> io::Result<_> { - Err(io::Error::new( - io::ErrorKind::Other, - "we want this one to fail to test the 'CallbackState' behavior", - )) - }); - - let mut c = c.connect(Some(&url)).unwrap(); + let mut w = WorkerBuilder::default() + .register_fn(complete_cb_jobtype, |_job| async { Ok(()) }) + .register_fn(success_cb_jobtype, |_job| async { + Err(io::Error::new( + io::ErrorKind::Other, + "we want this one to fail to test the 'CallbackState' behavior", + )) + }) + .connect(Some(&url)) + .await + .unwrap(); - assert_had_one!(&mut c, q_name); // complete callback consumed + assert_had_one!(&mut w, q_name); // complete callback consumed - let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let s = tracker + .get_batch_status(bid.clone()) + .await + .unwrap() + .unwrap(); assert_eq!(s.total, 0); match s.complete_callback_state { CallbackState::FinishedOk => {} @@ -917,9 +967,13 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { CallbackState::Enqueued => {} _ => panic!("Expected the callback to have been enqueued, since the `complete` callback has already executed"), } - assert_had_one!(&mut c, q_name); // success callback consumed + assert_had_one!(&mut w, q_name); // success callback consumed - let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let s = tracker + .get_batch_status(bid.clone()) + .await + .unwrap() + .unwrap(); assert_eq!(s.total, 0); assert_eq!(s.complete_callback_state, CallbackState::FinishedOk); // Still `Enqueued` due to the fact that it was not finished with success. @@ -928,12 +982,12 @@ fn test_callback_will_be_queued_upon_commit_even_if_batch_is_empty() { assert_eq!(s.success_callback_state, CallbackState::Enqueued); } -#[test] -fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { +#[tokio::test(flavor = "multi_thread")] +async fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { skip_if_not_enterprise!(); let url = learn_faktory_url(); - let mut p = Producer::connect(Some(&url)).unwrap(); - let mut t = Client::connect(Some(&url)).unwrap(); + let mut p = Client::connect(Some(&url)).await.unwrap(); + let mut t = Client::connect(Some(&url)).await.unwrap(); let mut jobs = some_jobs("order", "test_batch_can_be_reopned_add_extra_jobs_added", 4); let mut callbacks = some_jobs( "order_clean_up", @@ -945,18 +999,21 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { .description("Orders processing workload") .with_success_callback(callbacks.next().unwrap()); - let mut b = p.start_batch(b).unwrap(); - let bid = b.id().to_string(); - b.add(jobs.next().unwrap()).unwrap(); // 1 job - b.add(jobs.next().unwrap()).unwrap(); // 2 jobs + let mut b = p.start_batch(b).await.unwrap(); + let bid = b.id().to_owned(); + b.add(jobs.next().unwrap()).await.unwrap(); // 1 job + b.add(jobs.next().unwrap()).await.unwrap(); // 2 jobs - let status = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let status = t.get_batch_status(bid.clone()).await.unwrap().unwrap(); assert_eq!(status.total, 2); assert_eq!(status.pending, 2); // ############################## SUBTEST 0 ########################################## // Let's try to open/reopen a batch we have never declared: - let b = p.open_batch(String::from("non-existent-batch-id")).unwrap(); + let b = p + .open_batch(BatchId::new("non-existent-batch-id")) + .await + .unwrap(); // The server will error back on this, with "No such batch ", but // we are handling this case for the end-user and returning `Ok(None)` instead, indicating // this way that there is not such batch. @@ -967,12 +1024,12 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { // Let's fist of all try to open the batch we have not committed yet: // [We can use `producer::open_batch` specifying a bid OR - even we previously retrived // a status for this batch, we can go with `status::open` providing an exclusive ref to producer] - let mut b = status.open(&mut p).unwrap().unwrap(); - b.add(jobs.next().unwrap()).unwrap(); // 3 jobs + let mut b = status.open(&mut p).await.unwrap().unwrap(); + b.add(jobs.next().unwrap()).await.unwrap(); // 3 jobs - b.commit().unwrap(); // committig the batch + b.commit().await.unwrap(); // committig the batch - let status = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let status = t.get_batch_status(bid.clone()).await.unwrap().unwrap(); assert_eq!(status.total, 3); assert_eq!(status.pending, 3); @@ -991,12 +1048,13 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { // Let's try to open an already committed batch: let mut b = p .open_batch(bid.clone()) + .await .expect("no error") .expect("is some"); - b.add(jobs.next().unwrap()).unwrap(); // 4 jobs - b.commit().unwrap(); // committing the batch again! + b.add(jobs.next().unwrap()).await.unwrap(); // 4 jobs + b.commit().await.unwrap(); // committing the batch again! - let s = t.get_batch_status(bid.clone()).unwrap().unwrap(); + let s = t.get_batch_status(bid.clone()).await.unwrap().unwrap(); assert_eq!(s.total, 4); assert_eq!(s.pending, 4); @@ -1007,7 +1065,7 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { // ############################## SUBTEST 3 ############################################ // Let's see if we will be able to - again - open the committed batch "from outside" and // add a nested batch to it. - let mut b = p.open_batch(bid.clone()).unwrap().expect("is some"); + let mut b = p.open_batch(bid.clone()).await.unwrap().expect("is some"); let mut nested_callbacks = some_jobs( "order_clean_up__NESTED", "test_batch_can_be_reopned_add_extra_jobs_added__CALLBACKs__NESTED", @@ -1019,13 +1077,17 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { nested_callbacks.next().unwrap(), nested_callbacks.next().unwrap(), ); - let nested_batch = b.start_batch(nested_batch_declaration).unwrap(); - let nested_bid = nested_batch.id().to_string(); + let nested_batch = b.start_batch(nested_batch_declaration).await.unwrap(); + let nested_bid = nested_batch.id().to_owned(); // committing the nested batch without any jobs // since those are just not relevant for this test: - nested_batch.commit().unwrap(); + nested_batch.commit().await.unwrap(); - let s = t.get_batch_status(nested_bid.clone()).unwrap().unwrap(); + let s = t + .get_batch_status(nested_bid.clone()) + .await + .unwrap() + .unwrap(); assert_eq!(s.total, 0); assert_eq!(s.parent_bid, Some(bid)); // this is really our child batch assert_eq!(s.complete_callback_state, CallbackState::Enqueued); @@ -1048,6 +1110,7 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { // Let's try to re-open the nested batch that we have already committed and add some jobs to it. let mut b = p .open_batch(nested_bid.clone()) + .await .expect("no error") .expect("is some"); let mut more_jobs = some_jobs( @@ -1055,11 +1118,15 @@ fn test_batch_can_be_reopened_add_extra_jobs_and_batches_added() { "test_batch_can_be_reopned_add_extra_jobs_added__NESTED", 2, ); - b.add(more_jobs.next().unwrap()).unwrap(); - b.add(more_jobs.next().unwrap()).unwrap(); - b.commit().unwrap(); + b.add(more_jobs.next().unwrap()).await.unwrap(); + b.add(more_jobs.next().unwrap()).await.unwrap(); + b.commit().await.unwrap(); - let s = t.get_batch_status(nested_bid.clone()).unwrap().unwrap(); + let s = t + .get_batch_status(nested_bid.clone()) + .await + .unwrap() + .unwrap(); match s.complete_callback_state { CallbackState::Enqueued => {} _ => panic!("Expected the callback to have been enqueued"), diff --git a/tests/real/main.rs b/tests/real/main.rs index b8b8f3dd..7fbfb954 100644 --- a/tests/real/main.rs +++ b/tests/real/main.rs @@ -1,4 +1,5 @@ mod community; +mod utils; #[cfg(feature = "ent")] mod enterprise; diff --git a/tests/real/utils.rs b/tests/real/utils.rs new file mode 100644 index 00000000..9fefb92f --- /dev/null +++ b/tests/real/utils.rs @@ -0,0 +1,25 @@ +#[macro_export] +macro_rules! skip_check { + () => { + if std::env::var_os("FAKTORY_URL").is_none() { + return; + } + }; +} + +#[macro_export] +macro_rules! skip_if_not_enterprise { + () => { + if std::env::var_os("FAKTORY_ENT").is_none() { + return; + } + }; +} + +#[cfg(feature = "ent")] +pub fn learn_faktory_url() -> String { + let url = std::env::var_os("FAKTORY_URL").expect( + "Enterprise Faktory should be running for this test, and 'FAKTORY_URL' environment variable should be provided", + ); + url.to_str().expect("Is a utf-8 string").to_owned() +} diff --git a/tests/tls.rs b/tests/tls.rs deleted file mode 100644 index 3260ea7c..00000000 --- a/tests/tls.rs +++ /dev/null @@ -1,74 +0,0 @@ -#![cfg(feature = "tls")] - -use faktory::*; -use serde_json::Value; -use std::{env, fs, io, sync}; - -#[test] -fn roundtrip_tls() { - use native_tls::{Certificate, TlsConnector}; - - // We are utilizing the fact that the "FAKTORY_URL_SECURE" environment variable is set - // as an indicator that the integration test can and should be performed. - // - // In case the variable is not set we are returning early. This will show `test ... ok` - // in the test run output, which is admittedly confusing. Ideally, we would like to be able to decorate - // a test with a macro and to see something like `test ... skipped due to `, in case - // the test has been skipped, but it is currently not "natively" supported. - // - // See: https://github.com/rust-lang/rust/issues/68007 - if env::var_os("FAKTORY_URL_SECURE").is_none() { - return; - } - - let local = "roundtrip_tls"; - - let (tx, rx) = sync::mpsc::channel(); - let tx = sync::Arc::new(sync::Mutex::new(tx)); - let mut c = ConsumerBuilder::default(); - c.hostname("tester".to_string()).wid(local.to_string()); - { - let tx = sync::Arc::clone(&tx); - c.register(local, move |j| -> io::Result<()> { - tx.lock().unwrap().send(j).unwrap(); - Ok(()) - }); - } - - let cert_path = env::current_dir() - .unwrap() - .join("docker") - .join("certs") - .join("faktory.local.crt"); - let cert = fs::read_to_string(cert_path).unwrap(); - - let tls = || { - let connector = if cfg!(target_os = "macos") { - TlsConnector::builder() - // Danger! Only for testing! - // On the macos CI runner, the certs are not trusted: - // { code: -67843, message: "The certificate was not trusted." } - .danger_accept_invalid_certs(true) - .build() - .unwrap() - } else { - let cert = Certificate::from_pem(cert.as_bytes()).unwrap(); - TlsConnector::builder() - .add_root_certificate(cert) - .build() - .unwrap() - }; - TlsStream::with_connector(connector, Some(&env::var("FAKTORY_URL_SECURE").unwrap())) - .unwrap() - }; - let mut c = c.connect_with(tls(), None).unwrap(); - let mut p = Producer::connect_with(tls(), None).unwrap(); - p.enqueue(Job::new(local, vec!["z"]).on_queue(local)) - .unwrap(); - c.run_one(0, &[local]).unwrap(); - - let job = rx.recv().unwrap(); - assert_eq!(job.queue, local); - assert_eq!(job.kind(), local); - assert_eq!(job.args(), &[Value::from("z")]); -} diff --git a/tests/tls/main.rs b/tests/tls/main.rs new file mode 100644 index 00000000..5369ef6a --- /dev/null +++ b/tests/tls/main.rs @@ -0,0 +1,5 @@ +#[cfg(feature = "native_tls")] +mod native_tls; + +#[cfg(feature = "rustls")] +mod rustls; diff --git a/tests/tls/native_tls.rs b/tests/tls/native_tls.rs new file mode 100644 index 00000000..582a6871 --- /dev/null +++ b/tests/tls/native_tls.rs @@ -0,0 +1,100 @@ +use faktory::native_tls::TlsStream; +use faktory::{Client, Job, WorkerBuilder, WorkerId}; +use serde_json::Value; +use std::{env, sync}; + +#[tokio::test(flavor = "multi_thread")] +async fn roundtrip_tls() { + use tokio_native_tls::native_tls::TlsConnector; + + // We are utilizing the fact that the "FAKTORY_URL_SECURE" environment variable is set + // as an indicator that the integration test can and should be performed. + // + // In case the variable is not set we are returning early. This will show `test ... ok` + // in the test run output, which is admittedly confusing. Ideally, we would like to be able to decorate + // a test with a macro and to see something like `test ... skipped due to `, in case + // the test has been skipped, but it is currently not "natively" supported. + // + // See: https://github.com/rust-lang/rust/issues/68007 + if env::var_os("FAKTORY_URL_SECURE").is_none() { + return; + } + let local = "roundtrip_tls"; + let (tx, rx) = sync::mpsc::channel(); + let tls = || async { + let connector = TlsConnector::builder() + .danger_accept_invalid_certs(true) + .build() + .unwrap(); + TlsStream::with_connector(connector, Some(&env::var("FAKTORY_URL_SECURE").unwrap())) + .await + .unwrap() + }; + + let mut worker = WorkerBuilder::default() + .hostname("tester".to_string()) + .wid(WorkerId::new(local)) + .register(local, fixtures::JobHandler::new(tx)) + .connect_with(tls().await, None) + .await + .unwrap(); + + // "one-shot" client + Client::connect_with(tls().await, None) + .await + .unwrap() + .enqueue(Job::new(local, vec!["z"]).on_queue(local)) + .await + .unwrap(); + + worker.run_one(0, &[local]).await.unwrap(); + + let job = rx.recv().unwrap(); + assert_eq!(job.queue, local); + assert_eq!(job.kind(), local); + assert_eq!(job.args(), &[Value::from("z")]); +} + +mod fixtures { + pub use handler::JobHandler; + + mod handler { + use async_trait::async_trait; + use faktory::{Job, JobRunner}; + use std::{ + io, + sync::{mpsc::Sender, Arc, Mutex}, + time::Duration, + }; + use tokio::time; + + pub struct JobHandler { + chan: Arc>>, + } + + impl JobHandler { + pub fn new(chan: Sender) -> Self { + Self { + chan: Arc::new(Mutex::new(chan)), + } + } + + async fn process_one(&self, job: Job) -> io::Result<()> { + time::sleep(Duration::from_millis(100)).await; + eprintln!("{:?}", job); + self.chan.lock().unwrap().send(job).unwrap(); + Ok(()) + } + } + + #[async_trait] + impl JobRunner for JobHandler { + type Error = io::Error; + + async fn run(&self, job: Job) -> Result<(), Self::Error> { + self.process_one(job).await.unwrap(); + Ok(()) + } + } + } +} diff --git a/tests/tls/rustls.rs b/tests/tls/rustls.rs new file mode 100644 index 00000000..9bdc4301 --- /dev/null +++ b/tests/tls/rustls.rs @@ -0,0 +1,182 @@ +use faktory::rustls::TlsStream; +use faktory::{Client, Job, WorkerBuilder, WorkerId}; +use serde_json::Value; +use std::{ + env, + sync::{self, Arc}, +}; +use tokio_rustls::rustls::{ClientConfig, SignatureScheme}; + +#[tokio::test(flavor = "multi_thread")] +async fn roundtrip_tls() { + // We are utilizing the fact that the "FAKTORY_URL_SECURE" environment variable is set + // as an indicator that the integration test can and should be performed. + // + // In case the variable is not set we are returning early. This will show `test ... ok` + // in the test run output, which is admittedly confusing. Ideally, we would like to be able to decorate + // a test with a macro and to see something like `test ... skipped due to `, in case + // the test has been skipped, but it is currently not "natively" supported. + // + // See: https://github.com/rust-lang/rust/issues/68007 + if env::var_os("FAKTORY_URL_SECURE").is_none() { + return; + } + let local = "roundtrip_tls"; + let (tx, rx) = sync::mpsc::channel(); + let tls = || async { + let verifier = fixtures::TestServerCertVerifier::new( + SignatureScheme::RSA_PSS_SHA512, + env::current_dir() + .unwrap() + .join("docker") + .join("certs") + .join("faktory.local.crt"), + ); + let client_config = ClientConfig::builder() + .dangerous() + .with_custom_certificate_verifier(Arc::new(verifier)) + .with_no_client_auth(); + + TlsStream::with_client_config( + client_config, + Some(&env::var("FAKTORY_URL_SECURE").unwrap()), + ) + .await + .unwrap() + }; + + let mut worker = WorkerBuilder::default() + .hostname("tester".to_string()) + .wid(WorkerId::new(local)) + .register(local, fixtures::JobHandler::new(tx)) + .connect_with(tls().await, None) + .await + .unwrap(); + + // "one-shot" client + Client::connect_with(tls().await, None) + .await + .unwrap() + .enqueue(Job::new(local, vec!["z"]).on_queue(local)) + .await + .unwrap(); + + worker.run_one(0, &[local]).await.unwrap(); + + let job = rx.recv().unwrap(); + assert_eq!(job.queue, local); + assert_eq!(job.kind(), local); + assert_eq!(job.args(), &[Value::from("z")]); +} + +mod fixtures { + pub use handler::JobHandler; + pub use tls::TestServerCertVerifier; + + mod handler { + use async_trait::async_trait; + use faktory::{Job, JobRunner}; + use std::{ + io, + sync::{mpsc::Sender, Arc, Mutex}, + time::Duration, + }; + use tokio::time; + + pub struct JobHandler { + chan: Arc>>, + } + + impl JobHandler { + pub fn new(chan: Sender) -> Self { + Self { + chan: Arc::new(Mutex::new(chan)), + } + } + + async fn process_one(&self, job: Job) -> io::Result<()> { + time::sleep(Duration::from_millis(100)).await; + eprintln!("{:?}", job); + self.chan.lock().unwrap().send(job).unwrap(); + Ok(()) + } + } + + #[async_trait] + impl JobRunner for JobHandler { + type Error = io::Error; + + async fn run(&self, job: Job) -> Result<(), Self::Error> { + self.process_one(job).await.unwrap(); + Ok(()) + } + } + } + + mod tls { + #![allow(unused_variables)] + + use std::fs; + use std::path::PathBuf; + + use rustls_pki_types::{CertificateDer, ServerName, UnixTime}; + use tokio_rustls::rustls::client::danger::{ + HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier, + }; + use tokio_rustls::rustls::DigitallySignedStruct; + use tokio_rustls::rustls::Error as RustlsError; + use tokio_rustls::rustls::SignatureScheme; + use x509_parser::pem::parse_x509_pem; + + #[derive(Debug)] + pub struct TestServerCertVerifier<'a> { + scheme: SignatureScheme, + cert_der: CertificateDer<'a>, + } + + impl TestServerCertVerifier<'_> { + pub fn new(scheme: SignatureScheme, cert_path: PathBuf) -> Self { + let cert = fs::read(&cert_path).unwrap(); + let (_, pem) = parse_x509_pem(&cert).unwrap(); + let cert_der = CertificateDer::try_from(pem.contents).unwrap(); + Self { scheme, cert_der } + } + } + + impl ServerCertVerifier for TestServerCertVerifier<'_> { + fn verify_server_cert( + &self, + end_entity: &CertificateDer<'_>, + intermediates: &[CertificateDer<'_>], + server_name: &ServerName<'_>, + ocsp_response: &[u8], + now: UnixTime, + ) -> Result { + assert_eq!(&self.cert_der, end_entity); + Ok(ServerCertVerified::assertion()) + } + + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &DigitallySignedStruct, + ) -> Result { + Ok(HandshakeSignatureValid::assertion()) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &DigitallySignedStruct, + ) -> Result { + Ok(HandshakeSignatureValid::assertion()) + } + + fn supported_verify_schemes(&self) -> Vec { + vec![self.scheme] + } + } + } +}