From 4c1c4dea7b95ce7df842992e1e0cd8e32a4d944f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 6 Nov 2024 15:59:44 +0000 Subject: [PATCH 01/59] chore: update dependencies (#2216) idna 1.0 crate added a lot of dependencies --- e2e_cleanup/go.mod | 31 +- e2e_cleanup/go.sum | 82 ++--- flow/go.mod | 80 +++-- flow/go.sum | 178 ++++++----- nexus/Cargo.lock | 484 ++++++++++++++++++++++++------ nexus/Cargo.toml | 2 +- nexus/peer-bigquery/Cargo.toml | 2 +- nexus/peer-bigquery/src/lib.rs | 12 +- nexus/peer-bigquery/src/stream.rs | 22 +- ui/package-lock.json | 329 ++++++++++---------- ui/package.json | 4 +- 11 files changed, 764 insertions(+), 462 deletions(-) diff --git a/e2e_cleanup/go.mod b/e2e_cleanup/go.mod index 4779d8c877..64dbd19f36 100644 --- a/e2e_cleanup/go.mod +++ b/e2e_cleanup/go.mod @@ -3,17 +3,17 @@ module github.com/PeerDB-io/peer-flow-cleanup go 1.23.0 require ( - cloud.google.com/go/bigquery v1.63.1 + cloud.google.com/go/bigquery v1.64.0 cloud.google.com/go/pubsub v1.45.1 - github.com/snowflakedb/gosnowflake v1.11.2 + github.com/snowflakedb/gosnowflake v1.12.0 github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 - google.golang.org/api v0.203.0 + google.golang.org/api v0.204.0 ) require ( cloud.google.com/go v0.116.0 // indirect - cloud.google.com/go/auth v0.9.9 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.2 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect @@ -21,20 +21,21 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/apache/arrow/go/v15 v15.0.2 // indirect - github.com/aws/aws-sdk-go-v2 v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2 v1.32.3 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.41 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.42 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 // indirect github.com/aws/smithy-go v1.22.0 // indirect github.com/danieljoos/wincred v1.2.2 // indirect github.com/dvsekhvalnov/jose2go v1.7.0 // indirect diff --git a/e2e_cleanup/go.sum b/e2e_cleanup/go.sum index 42b74df808..198f6b4ec0 100644 --- a/e2e_cleanup/go.sum +++ b/e2e_cleanup/go.sum @@ -1,12 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= -cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= -cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/bigquery v1.63.1 h1:/6syiWrSpardKNxdvldS5CUTRJX1iIkSPXCjLjiGL+g= -cloud.google.com/go/bigquery v1.63.1/go.mod h1:ufaITfroCk17WTqBhMpi8CRjsfHjMX07pDrQaRKKX2o= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/bigquery v1.64.0 h1:vSSZisNyhr2ioJE1OuYBQrnrpB7pIhRQm4jfjc7E/js= +cloud.google.com/go/bigquery v1.64.0/go.mod h1:gy8Ooz6HF7QmA+TRtX8tZmXBKH5mCFBwUApGAb3zI7Y= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datacatalog v1.22.1 h1:i0DyKb/o7j+0vgaFtimcRFjYsD6wFw1jpnODYUyiYRs= @@ -38,46 +38,48 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1/go.mod h1:ap1dmS6vQK github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/apache/arrow/go/v15 v15.0.2 h1:60IliRbiyTWCWjERBCkO1W4Qun9svcYoZrSLcyOsMLE= github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= -github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= -github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk= +github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA= -github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ= -github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= -github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= -github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34 h1:os83HS/WfOwi1LsZWLCSHTyj+whvPGaxUsq/D1Ol2Q0= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34/go.mod h1:tG0BaDCAweumHRsOHm72tuPgAfRLASQThgthWYeTyV8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= +github.com/aws/aws-sdk-go-v2/config v1.28.1 h1:oxIvOUXy8x0U3fR//0eq+RdCKimWI900+SV+10xsCBw= +github.com/aws/aws-sdk-go-v2/config v1.28.1/go.mod h1:bRQcttQJiARbd5JZxw6wG0yIK3eLeSCPdg6uqmmlIiI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.42 h1:sBP0RPjBU4neGpIYyx8mkU2QqLPl5u9cmdTWVzIpHkM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.42/go.mod h1:FwZBfU530dJ26rv9saAbxa9Ej3eF/AK0OAY86k13n4M= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 h1:68jFVtt3NulEzojFesM/WVarlFpCaXLKaBxDpzkQ9OQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18/go.mod h1:Fjnn5jQVIo6VyedMc0/EhPpfNlPl7dHV916O6B+49aE= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 h1:ihPPdcCVSN0IvBByXwqVp28/l4VosBZ6sDulcvU2J7w= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35/go.mod h1:JkgEhs3SVF51Dj3m1Bj+yL8IznpxzkwlA3jLg3x7Kls= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 h1:Jw50LwEkVjuVzE1NzkhNKkBf9cRN7MtE1F/b2cOKTUM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22/go.mod h1:Y/SmAyPcOTmpeVaWSzSKiILfXTVJwrGmYZhcRbhWuEY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 h1:981MHwBaRZM7+9QSR6XamDzF/o7ouUGxFzr+nVSIhrs= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22/go.mod h1:1RA1+aBEfn+CAB/Mh0MB6LsdCYCnjZm7tKXtnk499ZQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 h1:7edmS3VOBDhK00b/MwGtGglCm7hhwNYnjJs/PgFdMQE= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21/go.mod h1:Q9o5h4HoIWG8XfzxqiuK/CGUbepCJ8uTlaE3bAbxytQ= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 h1:yV+hCAHZZYJQcwAaszoBNwLbPItHvApxT0kVIw6jRgs= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22/go.mod h1:kbR1TL8llqB1eGnVbybcA4/wgScxdylOdyAd51yxPdw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 h1:4FMHqLfk0efmTqhXVRL5xYRqlEBNBiRI7N6w4jsEdd4= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2/go.mod h1:LWoqeWlK9OZeJxsROW2RqrSPvQHKTpp69r/iDjwsSaw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 h1:t7iUP9+4wdc5lt3E41huP+GvQZJD38WLsgVp4iOtAjg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2/go.mod h1:/niFCtmuQNxqx9v8WAPq5qh7EH25U4BF6tjoyq9bObM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1 h1:MkQ4unegQEStiQYmfFj+Aq5uTp265ncSmm0XTQwDwi0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1/go.mod h1:cB6oAuus7YXRZhWCc1wIwPywwZ1XwweNp2TVAEGYeB8= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 h1:kT6BcZsmMtNkP/iYMcRG+mIEA/IbeiUimXtGmqF39y0= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3/go.mod h1:Z8uGua2k4PPaGOYn66pK02rhMrot3Xk3tpBuUFPomZU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 h1:qcxX0JYlgWH3hpPUnd6U0ikcl6LLA9sLkXE2w1fpMvY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3/go.mod h1:cLSNEmI45soc+Ef8K/L+8sEA3A3pYFEYf5B5UI+6bH4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 h1:ZC7Y/XgKUxwqcdhO5LE8P6oGP1eh6xlQReWNKfhvJno= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3/go.mod h1:WqfO7M9l9yUAw0HcHaikwRd/H6gzYdz7vjejCA5e2oY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 h1:p9TNFL8bFUMd+38YIpTAXpoxyz0MxC7FlbFEH4P4E1U= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2/go.mod h1:fNjyo0Coen9QTwQLWeV6WO2Nytwiu+cCcWaTdKCAqqE= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 h1:UTpsIf0loCIWEbrqdLb+0RxnTXfWh2vhw4nQmFi4nPc= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.3/go.mod h1:FZ9j3PFHHAR+w0BSEjK955w5YD2UwB/l/H0yAK3MJvI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 h1:2YCmIXv3tmiItw0LlYf6v7gEHebLY45kBEnPezbUKyU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3/go.mod h1:u19stRyNPxGhj6dRm+Cdgu6N75qnbW7+QN0q0dsAk58= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 h1:wVnQ6tigGsRqSWDEEyH6lSAJ9OyFUsSnbaUWChuSGzs= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.3/go.mod h1:VZa9yTFyj4o10YGsmDO4gbQJUvvhY72fhumT8W4LqsE= github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -174,8 +176,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/snowflakedb/gosnowflake v1.11.2 h1:eAMsxrCiC6ij5wX3dHx1TQCBOdDmCK062Ir8rndUkRg= -github.com/snowflakedb/gosnowflake v1.11.2/go.mod h1:WFe+8mpsapDaQjHX6BqJBKtfQCGlGD3lHKeDsKfpx2A= +github.com/snowflakedb/gosnowflake v1.12.0 h1:Saez8egtn5xAoVMBxFaMu9MYfAG9SS9dpAEXD1/ECIo= +github.com/snowflakedb/gosnowflake v1.12.0/go.mod h1:wHfYmZi3zvtWItojesAhWWXBN7+niex2R1h/S7QCZYg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -266,8 +268,8 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= -google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= -google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= diff --git a/flow/go.mod b/flow/go.mod index e24ffa9fb0..b7eb9d1d65 100644 --- a/flow/go.mod +++ b/flow/go.mod @@ -4,9 +4,9 @@ go 1.23.1 require ( cloud.google.com/go v0.116.0 - cloud.google.com/go/bigquery v1.63.1 + cloud.google.com/go/bigquery v1.64.0 cloud.google.com/go/pubsub v1.45.1 - cloud.google.com/go/storage v1.45.0 + cloud.google.com/go/storage v1.46.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 github.com/Azure/azure-sdk-for-go/sdk/messaging/azeventhubs v1.2.3 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/eventhub/armeventhub v1.3.0 @@ -17,20 +17,20 @@ require ( github.com/PeerDB-io/gluajson v1.0.2 github.com/PeerDB-io/gluamsgpack v1.0.4 github.com/PeerDB-io/gluautf8 v1.0.0 - github.com/aws/aws-sdk-go-v2 v1.32.2 - github.com/aws/aws-sdk-go-v2/config v1.28.0 - github.com/aws/aws-sdk-go-v2/credentials v1.17.41 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34 - github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 - github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1 - github.com/aws/aws-sdk-go-v2/service/ses v1.28.2 - github.com/aws/aws-sdk-go-v2/service/sns v1.33.2 + github.com/aws/aws-sdk-go-v2 v1.32.3 + github.com/aws/aws-sdk-go-v2/config v1.28.1 + github.com/aws/aws-sdk-go-v2/credentials v1.17.42 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 + github.com/aws/aws-sdk-go-v2/service/kms v1.37.3 + github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 + github.com/aws/aws-sdk-go-v2/service/ses v1.28.3 + github.com/aws/aws-sdk-go-v2/service/sns v1.33.3 github.com/aws/smithy-go v1.22.0 github.com/cockroachdb/pebble v1.1.2 github.com/elastic/go-elasticsearch/v8 v8.15.0 github.com/google/uuid v1.6.0 github.com/grafana/pyroscope-go v1.2.0 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 github.com/jackc/pgerrcode v0.0.0-20240316143900-6e2875d9b438 github.com/jackc/pglogrepl v0.0.0-20240307033717-828fbfe908e9 github.com/jackc/pgx/v5 v5.7.1 @@ -44,56 +44,53 @@ require ( github.com/orcaman/concurrent-map/v2 v2.0.1 github.com/shopspring/decimal v1.4.0 github.com/slack-go/slack v0.15.0 - github.com/snowflakedb/gosnowflake v1.11.2 + github.com/snowflakedb/gosnowflake v1.12.0 github.com/stretchr/testify v1.9.0 github.com/twmb/franz-go v1.18.0 github.com/twmb/franz-go/pkg/kadm v1.14.0 github.com/twmb/franz-go/plugin/kslog v1.0.0 github.com/twpayne/go-geos v0.19.0 - github.com/urfave/cli/v3 v3.0.0-alpha9.1 + github.com/urfave/cli/v3 v3.0.0-alpha9.2 github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 github.com/yuin/gopher-lua v1.1.1 go.opentelemetry.io/otel v1.31.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 go.opentelemetry.io/otel/metric v1.31.0 go.opentelemetry.io/otel/sdk v1.31.0 go.opentelemetry.io/otel/sdk/metric v1.31.0 - go.opentelemetry.io/otel/trace v1.31.0 - go.temporal.io/api v1.40.0 - go.temporal.io/sdk v1.29.1 + go.temporal.io/api v1.41.0 + go.temporal.io/sdk v1.30.0 go.temporal.io/sdk/contrib/opentelemetry v0.6.0 go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.28.0 golang.org/x/sync v0.8.0 - google.golang.org/api v0.203.0 - google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 + google.golang.org/api v0.204.0 + google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.35.1 ) require ( cel.dev/expr v0.18.0 // indirect - cloud.google.com/go/auth v0.9.9 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/auth v0.10.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect cloud.google.com/go/monitoring v1.21.2 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/ClickHouse/ch-go v0.63.1 // indirect github.com/DataDog/zstd v1.5.6 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.3 // indirect - github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.3 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect github.com/apache/arrow/go/v15 v15.0.2 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect @@ -129,7 +126,7 @@ require ( github.com/lestrrat-go/option v1.0.1 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nexus-rpc/sdk-go v0.0.10 // indirect + github.com/nexus-rpc/sdk-go v0.0.11 // indirect github.com/paulmach/orb v0.11.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect @@ -144,10 +141,11 @@ require ( go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/term v0.25.0 // indirect - google.golang.org/grpc/stats/opentelemetry v0.0.0-20241025232817-cb329375b14e // indirect + google.golang.org/grpc/stats/opentelemetry v0.0.0-20241028142157-ada6787961b3 // indirect ) require ( @@ -157,16 +155,16 @@ require ( github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 // indirect github.com/Azure/go-amqp v1.2.0 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.3.0 // indirect github.com/andybalholm/brotli v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/djherbis/buffer v1.2.0 github.com/djherbis/nio/v3 v3.0.1 @@ -208,7 +206,7 @@ require ( golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect - google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/genproto v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/flow/go.sum b/flow/go.sum index 8f783af565..71299452d2 100644 --- a/flow/go.sum +++ b/flow/go.sum @@ -3,32 +3,32 @@ cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE= cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U= -cloud.google.com/go/auth v0.9.9 h1:BmtbpNQozo8ZwW2t7QJjnrQtdganSdmqeIBxHxNkEZQ= -cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= -cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/bigquery v1.63.1 h1:/6syiWrSpardKNxdvldS5CUTRJX1iIkSPXCjLjiGL+g= -cloud.google.com/go/bigquery v1.63.1/go.mod h1:ufaITfroCk17WTqBhMpi8CRjsfHjMX07pDrQaRKKX2o= +cloud.google.com/go/auth v0.10.0 h1:tWlkvFAh+wwTOzXIjrwM64karR1iTBZ/GRr0S/DULYo= +cloud.google.com/go/auth v0.10.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/bigquery v1.64.0 h1:vSSZisNyhr2ioJE1OuYBQrnrpB7pIhRQm4jfjc7E/js= +cloud.google.com/go/bigquery v1.64.0/go.mod h1:gy8Ooz6HF7QmA+TRtX8tZmXBKH5mCFBwUApGAb3zI7Y= cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= -cloud.google.com/go/datacatalog v1.22.1 h1:i0DyKb/o7j+0vgaFtimcRFjYsD6wFw1jpnODYUyiYRs= -cloud.google.com/go/datacatalog v1.22.1/go.mod h1:MscnJl9B2lpYlFoxRjicw19kFTwEke8ReKL5Y/6TWg8= +cloud.google.com/go/datacatalog v1.22.2 h1:9Bi8YO+WBE0YSSQL1tX62Gy/KcdNGLufyVlEJ0eYMrc= +cloud.google.com/go/datacatalog v1.22.2/go.mod h1:9Wamq8TDfL2680Sav7q3zEhBJSPBrDxJU8WtPJ25dBM= cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA= cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= -cloud.google.com/go/kms v1.20.0 h1:uKUvjGqbBlI96xGE669hcVnEMw1Px/Mvfa62dhM5UrY= -cloud.google.com/go/kms v1.20.0/go.mod h1:/dMbFF1tLLFnQV44AoI2GlotbjowyUfgVwezxW291fM= +cloud.google.com/go/kms v1.20.1 h1:og29Wv59uf2FVaZlesaiDAqHFzHaoUyHI3HYp9VUHVg= +cloud.google.com/go/kms v1.20.1/go.mod h1:LywpNiVCvzYNJWS9JUcGJSVTNSwPwi0vBAotzDqn2nc= cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= -cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= -cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/longrunning v0.6.2 h1:xjDfh1pQcWPEvnfjZmwjKQEcHnpz6lHjfy7Fo0MK+hc= +cloud.google.com/go/longrunning v0.6.2/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU= cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= cloud.google.com/go/pubsub v1.45.1 h1:ZC/UzYcrmK12THWn1P72z+Pnp2vu/zCZRXyhAfP1hJY= cloud.google.com/go/pubsub v1.45.1/go.mod h1:3bn7fTmzZFwaUjllitv1WlsNMkqBgGUb3UdMhI54eCc= -cloud.google.com/go/storage v1.45.0 h1:5av0QcIVj77t+44mV4gffFC/LscFRUhto6UBMB5SimM= -cloud.google.com/go/storage v1.45.0/go.mod h1:wpPblkIuMP5jCB/E48Pz9zIo2S/zD8g+ITmxKkPCITE= -cloud.google.com/go/trace v1.11.1 h1:UNqdP+HYYtnm6lb91aNA5JQ0X14GnxkABGlfz2PzPew= -cloud.google.com/go/trace v1.11.1/go.mod h1:IQKNQuBzH72EGaXEodKlNJrWykGZxet2zgjtS60OtjA= +cloud.google.com/go/storage v1.46.0 h1:OTXISBpFd8KaA2ClT3K3oRk8UGOcTHtrZ1bW88xKiic= +cloud.google.com/go/storage v1.46.0/go.mod h1:lM+gMAW91EfXIeMTBmixRsKL/XCxysytoAgduVikjMk= +cloud.google.com/go/trace v1.11.2 h1:4ZmaBdL8Ng/ajrgKqY5jfvzqMXbrDcBsUGXOT9aqTtI= +cloud.google.com/go/trace v1.11.2/go.mod h1:bn7OwXd4pd5rFuAnTrzBuoZ4ax2XQeG3qNgYmfCy0Io= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= @@ -63,23 +63,25 @@ github.com/Azure/go-amqp v1.2.0 h1:NNyfN3/cRszWzMvjmm64yaPZDHX/2DJkowv8Ub9y01I= github.com/Azure/go-amqp v1.2.0/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.0 h1:YjxrAyf/5z9yK0ecQsKjgSdaC4FjXUbwlgxLz05E3YY= +github.com/AzureAD/microsoft-authentication-library-for-go v1.3.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/ClickHouse/ch-go v0.63.1 h1:s2JyZvWLTCSAGdtjMBBmAgQQHMco6pawLJMOXi0FODM= github.com/ClickHouse/ch-go v0.63.1/go.mod h1:I1kJJCL3WJcBMGe1m+HVK0+nREaG+JOYYBWjrDrF3R0= github.com/ClickHouse/clickhouse-go/v2 v2.30.0 h1:AG4D/hW39qa58+JHQIFOSnxyL46H6h2lrmGGk17dhFo= github.com/ClickHouse/clickhouse-go/v2 v2.30.0/go.mod h1:i9ZQAojcayW3RsdCb3YR+n+wC2h65eJsZCscZ1Z1wyo= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3 h1:cb3br57K508pQEFgBxn9GDhPS9HefpyMPK1RzmtMNzk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.3/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.3 h1:xir5X8TS8UBVPWg2jHL+cSTf0jZgqYQSA54TscSt1/0= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.3/go.mod h1:SsdWig2J5PMnfMvfJuEb1uZa8Y+kvNyvrULFo69gTFk= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.3 h1:Nl7phYyHjnqofWDpD+6FYdiwtNIxebn0AHLry7Sxb0M= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.3/go.mod h1:pNP/L2wDlaQnQlFvkDKGSruDoYRpmAxB6drgsskfYwg= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.3 h1:2vcVkrNdSMJpoOVAWi9ApsQR5iqNeFGt5Qx8Xlt3IoI= -github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.3/go.mod h1:wRbFgBQUVm1YXrvWKofAEmq9HNJTDphbAaJSSX01KUI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0 h1:o90wcURuxekmXrtxmYWTyNla0+ZEHhud6DI1ZTxd1vI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.49.0/go.mod h1:6fTWu4m3jocfUZLYF5KsZC1TUfRvEjs7lM4crme/irw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.49.0 h1:jJKWl98inONJAr/IZrdFQUWcwUO95DLY1XMD1ZIut+g= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.49.0/go.mod h1:l2fIqmwB+FKSfvn3bAD/0i+AXAxhIZjTK2svT/mgUXs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0 h1:GYUJLfvd++4DMuMhCFLgLXvFwofIxh/qOwoGuS/LTew= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.49.0/go.mod h1:wRbFgBQUVm1YXrvWKofAEmq9HNJTDphbAaJSSX01KUI= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/PeerDB-io/glua64 v1.0.1 h1:biXLlFF/L5pnJCwDon7hkWkuQPozC8NjKS3J7Wzi69I= @@ -102,48 +104,48 @@ github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7X github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= github.com/apache/arrow/go/v15 v15.0.2 h1:60IliRbiyTWCWjERBCkO1W4Qun9svcYoZrSLcyOsMLE= github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA= -github.com/aws/aws-sdk-go-v2 v1.32.2 h1:AkNLZEyYMLnx/Q/mSKkcMqwNFXMAvFto9bNsHqcTduI= -github.com/aws/aws-sdk-go-v2 v1.32.2/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= +github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk= +github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6 h1:pT3hpW0cOHRJx8Y0DfJUEQuqPild8jRGmSFmBgvydr0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.6/go.mod h1:j/I2++U0xX+cr44QjHay4Cvxj6FUbnxrgmqN3H1jTZA= -github.com/aws/aws-sdk-go-v2/config v1.28.0 h1:FosVYWcqEtWNxHn8gB/Vs6jOlNwSoyOCA/g/sxyySOQ= -github.com/aws/aws-sdk-go-v2/config v1.28.0/go.mod h1:pYhbtvg1siOOg8h5an77rXle9tVG8T+BWLWAo7cOukc= -github.com/aws/aws-sdk-go-v2/credentials v1.17.41 h1:7gXo+Axmp+R4Z+AK8YFQO0ZV3L0gizGINCOWxSLY9W8= -github.com/aws/aws-sdk-go-v2/credentials v1.17.41/go.mod h1:u4Eb8d3394YLubphT4jLEwN1rLNq2wFOlT6OuxFwPzU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17 h1:TMH3f/SCAWdNtXXVPPu5D6wrr4G5hI1rAxbcocKfC7Q= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.17/go.mod h1:1ZRXLdTpzdJb9fwTMXiLipENRxkGMTn1sfKexGllQCw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34 h1:os83HS/WfOwi1LsZWLCSHTyj+whvPGaxUsq/D1Ol2Q0= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.34/go.mod h1:tG0BaDCAweumHRsOHm72tuPgAfRLASQThgthWYeTyV8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21 h1:UAsR3xA31QGf79WzpG/ixT9FZvQlh5HY1NRqSHBNOCk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.21/go.mod h1:JNr43NFf5L9YaG3eKTm7HQzls9J+A9YYcGI5Quh1r2Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21 h1:6jZVETqmYCadGFvrYEQfC5fAQmlo80CeL5psbno6r0s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.21/go.mod h1:1SR0GbLlnN3QUmYaflZNiH1ql+1qrSiB2vwcJ+4UM60= +github.com/aws/aws-sdk-go-v2/config v1.28.1 h1:oxIvOUXy8x0U3fR//0eq+RdCKimWI900+SV+10xsCBw= +github.com/aws/aws-sdk-go-v2/config v1.28.1/go.mod h1:bRQcttQJiARbd5JZxw6wG0yIK3eLeSCPdg6uqmmlIiI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.42 h1:sBP0RPjBU4neGpIYyx8mkU2QqLPl5u9cmdTWVzIpHkM= +github.com/aws/aws-sdk-go-v2/credentials v1.17.42/go.mod h1:FwZBfU530dJ26rv9saAbxa9Ej3eF/AK0OAY86k13n4M= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18 h1:68jFVtt3NulEzojFesM/WVarlFpCaXLKaBxDpzkQ9OQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.18/go.mod h1:Fjnn5jQVIo6VyedMc0/EhPpfNlPl7dHV916O6B+49aE= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35 h1:ihPPdcCVSN0IvBByXwqVp28/l4VosBZ6sDulcvU2J7w= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.35/go.mod h1:JkgEhs3SVF51Dj3m1Bj+yL8IznpxzkwlA3jLg3x7Kls= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22 h1:Jw50LwEkVjuVzE1NzkhNKkBf9cRN7MtE1F/b2cOKTUM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.22/go.mod h1:Y/SmAyPcOTmpeVaWSzSKiILfXTVJwrGmYZhcRbhWuEY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22 h1:981MHwBaRZM7+9QSR6XamDzF/o7ouUGxFzr+nVSIhrs= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.22/go.mod h1:1RA1+aBEfn+CAB/Mh0MB6LsdCYCnjZm7tKXtnk499ZQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21 h1:7edmS3VOBDhK00b/MwGtGglCm7hhwNYnjJs/PgFdMQE= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.21/go.mod h1:Q9o5h4HoIWG8XfzxqiuK/CGUbepCJ8uTlaE3bAbxytQ= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22 h1:yV+hCAHZZYJQcwAaszoBNwLbPItHvApxT0kVIw6jRgs= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.22/go.mod h1:kbR1TL8llqB1eGnVbybcA4/wgScxdylOdyAd51yxPdw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0 h1:TToQNkvGguu209puTojY/ozlqy2d/SFNcoLIqTFi42g= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.0/go.mod h1:0jp+ltwkf+SwG2fm/PKo8t4y8pJSgOCO4D8Lz3k0aHQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2 h1:4FMHqLfk0efmTqhXVRL5xYRqlEBNBiRI7N6w4jsEdd4= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.2/go.mod h1:LWoqeWlK9OZeJxsROW2RqrSPvQHKTpp69r/iDjwsSaw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2 h1:s7NA1SOw8q/5c0wr8477yOPp0z+uBaXBnLE0XYb0POA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.2/go.mod h1:fnjjWyAW/Pj5HYOxl9LJqWtEwS7W2qgcRLWP+uWbss0= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2 h1:t7iUP9+4wdc5lt3E41huP+GvQZJD38WLsgVp4iOtAjg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.2/go.mod h1:/niFCtmuQNxqx9v8WAPq5qh7EH25U4BF6tjoyq9bObM= -github.com/aws/aws-sdk-go-v2/service/kms v1.37.2 h1:tfBABi5R6aSZlhgTWHxL+opYUDOnIGoNcJLwVYv0jLM= -github.com/aws/aws-sdk-go-v2/service/kms v1.37.2/go.mod h1:dZYFcQwuoh+cLOlFnZItijZptmyDhRIkOKWFO1CfzV8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1 h1:MkQ4unegQEStiQYmfFj+Aq5uTp265ncSmm0XTQwDwi0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.66.1/go.mod h1:cB6oAuus7YXRZhWCc1wIwPywwZ1XwweNp2TVAEGYeB8= -github.com/aws/aws-sdk-go-v2/service/ses v1.28.2 h1:FtmzF/j5v++pa0tuuE0wwvWckHzad+vl/Dy5as0Ateo= -github.com/aws/aws-sdk-go-v2/service/ses v1.28.2/go.mod h1:bSPQlnLDUiQy7XxmKqTBsCVkYrLfnYJbEyAmm/gWcaI= -github.com/aws/aws-sdk-go-v2/service/sns v1.33.2 h1:GeVRrB1aJsGdXxdPY6VOv0SWs+pfdeDlKgiBxi0+V6I= -github.com/aws/aws-sdk-go-v2/service/sns v1.33.2/go.mod h1:c6Sj8zleZXYs4nyU3gpDKTzPWu7+t30YUXoLYRpbUvU= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.2 h1:bSYXVyUzoTHoKalBmwaZxs97HU9DWWI3ehHSAMa7xOk= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.2/go.mod h1:skMqY7JElusiOUjMJMOv1jJsP7YUg7DrhgqZZWuzu1U= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2 h1:AhmO1fHINP9vFYUE0LHzCWg/LfUWUF+zFPEcY9QXb7o= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.2/go.mod h1:o8aQygT2+MVP0NaV6kbdE1YnnIM8RRVQzoeUH45GOdI= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.2 h1:CiS7i0+FUe+/YY1GvIBLLrR/XNGZ4CtM1Ll0XavNuVo= -github.com/aws/aws-sdk-go-v2/service/sts v1.32.2/go.mod h1:HtaiBI8CjYoNVde8arShXb94UbQQi9L4EMr6D+xGBwo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 h1:kT6BcZsmMtNkP/iYMcRG+mIEA/IbeiUimXtGmqF39y0= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3/go.mod h1:Z8uGua2k4PPaGOYn66pK02rhMrot3Xk3tpBuUFPomZU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 h1:qcxX0JYlgWH3hpPUnd6U0ikcl6LLA9sLkXE2w1fpMvY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3/go.mod h1:cLSNEmI45soc+Ef8K/L+8sEA3A3pYFEYf5B5UI+6bH4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 h1:ZC7Y/XgKUxwqcdhO5LE8P6oGP1eh6xlQReWNKfhvJno= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3/go.mod h1:WqfO7M9l9yUAw0HcHaikwRd/H6gzYdz7vjejCA5e2oY= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.3 h1:VpyBA6KP6JgzwokQps8ArQPGy9rFej8adwuuQGcduH8= +github.com/aws/aws-sdk-go-v2/service/kms v1.37.3/go.mod h1:TT/9V4PcmSPpd8LPUNJ8hBHJmpqcfhx6MrbWTkvyR+4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2 h1:p9TNFL8bFUMd+38YIpTAXpoxyz0MxC7FlbFEH4P4E1U= +github.com/aws/aws-sdk-go-v2/service/s3 v1.66.2/go.mod h1:fNjyo0Coen9QTwQLWeV6WO2Nytwiu+cCcWaTdKCAqqE= +github.com/aws/aws-sdk-go-v2/service/ses v1.28.3 h1:/ioiCUft5rJL1ufmmVq3Qzg198wgzj/NRm6eHFthEAc= +github.com/aws/aws-sdk-go-v2/service/ses v1.28.3/go.mod h1:IbgnKNoFmzljAdSbce2P4cNT6pLmrdFI+q02Q2PCM7o= +github.com/aws/aws-sdk-go-v2/service/sns v1.33.3 h1:coZW/SqpINT0VWG8vRWWY9TWUof8TDdxublw2Xur0Zc= +github.com/aws/aws-sdk-go-v2/service/sns v1.33.3/go.mod h1:J/G2xuhwNBlDvEi0WR/bnBbac4KSgpkERna/IXEF52w= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.3 h1:UTpsIf0loCIWEbrqdLb+0RxnTXfWh2vhw4nQmFi4nPc= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.3/go.mod h1:FZ9j3PFHHAR+w0BSEjK955w5YD2UwB/l/H0yAK3MJvI= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3 h1:2YCmIXv3tmiItw0LlYf6v7gEHebLY45kBEnPezbUKyU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.3/go.mod h1:u19stRyNPxGhj6dRm+Cdgu6N75qnbW7+QN0q0dsAk58= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.3 h1:wVnQ6tigGsRqSWDEEyH6lSAJ9OyFUsSnbaUWChuSGzs= +github.com/aws/aws-sdk-go-v2/service/sts v1.32.3/go.mod h1:VZa9yTFyj4o10YGsmDO4gbQJUvvhY72fhumT8W4LqsE= github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM= github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -300,8 +302,8 @@ github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKt github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -372,8 +374,8 @@ github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nexus-rpc/sdk-go v0.0.10 h1:7jEPUlsghxoD4OJ2H8YbFJ1t4wbxsUef7yZgBfyY3uA= -github.com/nexus-rpc/sdk-go v0.0.10/go.mod h1:TpfkM2Cw0Rlk9drGkoiSMpFqflKTiQLWUNyKJjF8mKQ= +github.com/nexus-rpc/sdk-go v0.0.11 h1:qH3Us3spfp50t5ca775V1va2eE6z1zMQDZY4mvbw0CI= +github.com/nexus-rpc/sdk-go v0.0.11/go.mod h1:TpfkM2Cw0Rlk9drGkoiSMpFqflKTiQLWUNyKJjF8mKQ= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= @@ -424,8 +426,8 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/slack-go/slack v0.15.0 h1:LE2lj2y9vqqiOf+qIIy0GvEoxgF1N5yLGZffmEZykt0= github.com/slack-go/slack v0.15.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= -github.com/snowflakedb/gosnowflake v1.11.2 h1:eAMsxrCiC6ij5wX3dHx1TQCBOdDmCK062Ir8rndUkRg= -github.com/snowflakedb/gosnowflake v1.11.2/go.mod h1:WFe+8mpsapDaQjHX6BqJBKtfQCGlGD3lHKeDsKfpx2A= +github.com/snowflakedb/gosnowflake v1.12.0 h1:Saez8egtn5xAoVMBxFaMu9MYfAG9SS9dpAEXD1/ECIo= +github.com/snowflakedb/gosnowflake v1.12.0/go.mod h1:wHfYmZi3zvtWItojesAhWWXBN7+niex2R1h/S7QCZYg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -454,8 +456,8 @@ github.com/twmb/franz-go/plugin/kslog v1.0.0 h1:I64oEmF+0PDvmyLgwrlOtg4mfpSE9Gwl github.com/twmb/franz-go/plugin/kslog v1.0.0/go.mod h1:8pMjK3OJJJNNYddBSbnXZkIK5dCKFIk9GcVVCDgvnQc= github.com/twpayne/go-geos v0.19.0 h1:V7vnLe7gY7JOHLTg8+2oykZOw6wpBLHVNlcnzS2FlG0= github.com/twpayne/go-geos v0.19.0/go.mod h1:XGpUjCtZf4Ul6BMii6KA4EmJ9JCNhVP1mohdoReopZ8= -github.com/urfave/cli/v3 v3.0.0-alpha9.1 h1:1fJU+bltkwN8lF4Sni/X0i1d8XwPIrS82ivZ8qsp/q4= -github.com/urfave/cli/v3 v3.0.0-alpha9.1/go.mod h1:FnIeEMYu+ko8zP1F9Ypr3xkZMIDqW3DR92yUtY39q1Y= +github.com/urfave/cli/v3 v3.0.0-alpha9.2 h1:CL8llQj3dGRLVQQzHxS+ZYRLanOuhyK1fXgLKD+qV+Y= +github.com/urfave/cli/v3 v3.0.0-alpha9.2/go.mod h1:FnIeEMYu+ko8zP1F9Ypr3xkZMIDqW3DR92yUtY39q1Y= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= @@ -490,12 +492,6 @@ go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 h1:ZsXq73BERAiNuuFXYqP4MR5hBrjXfMGSO+Cx7qoOZiM= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0/go.mod h1:hg1zaDMpyZJuUzjFxFsRYBoccE86tM9Uf4IqNMUxvrY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= @@ -506,18 +502,16 @@ go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HY go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.temporal.io/api v1.40.0 h1:rH3HvUUCFr0oecQTBW5tI6DdDQsX2Xb6OFVgt/bvLto= -go.temporal.io/api v1.40.0/go.mod h1:1WwYUMo6lao8yl0371xWUm13paHExN5ATYT/B7QtFis= -go.temporal.io/sdk v1.29.1 h1:y+sUMbUhTU9rj50mwIZAPmcXCtgUdOWS9xHDYRYSgZ0= -go.temporal.io/sdk v1.29.1/go.mod h1:kp//DRvn3CqQVBCtjL51Oicp9wrZYB2s6row1UgzcKQ= +go.temporal.io/api v1.41.0 h1:VYzyWJjJk1jeB9urntA/t7Hiyo2tHdM5xEdtdib4EO8= +go.temporal.io/api v1.41.0/go.mod h1:1WwYUMo6lao8yl0371xWUm13paHExN5ATYT/B7QtFis= +go.temporal.io/sdk v1.30.0 h1:7jzSFZYk+tQ2kIYEP+dvrM7AW9EsCEP52JHCjVGuwbI= +go.temporal.io/sdk v1.30.0/go.mod h1:Pv45F/fVDgWKx+jhix5t/dGgqROVaI+VjPLd3CHWqq0= go.temporal.io/sdk/contrib/opentelemetry v0.6.0 h1:rNBArDj5iTUkcMwKocUShoAW59o6HdS7Nq4CTp4ldj8= go.temporal.io/sdk/contrib/opentelemetry v0.6.0/go.mod h1:Lem8VrE2ks8P+FYcRM3UphPoBr+tfM3v/Kaf0qStzSg= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -609,20 +603,20 @@ golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhS golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= -google.golang.org/api v0.203.0 h1:SrEeuwU3S11Wlscsn+LA1kb/Y5xT8uggJSkIhD08NAU= -google.golang.org/api v0.203.0/go.mod h1:BuOVyCSYEPwJb3npWvDnNmFI92f3GeRnHNkETneT3SI= +google.golang.org/api v0.204.0 h1:3PjmQQEDkR/ENVZZwIYB4W/KzYtN8OrqnNcHWpeR8E4= +google.golang.org/api v0.204.0/go.mod h1:69y8QSoKIbL9F94bWgWAq6wGqGwyjBgi2y8rAK8zLag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38 h1:Q3nlH8iSQSRUwOskjbcSMcF2jiYMNiQYZ0c2KEJLKKU= -google.golang.org/genproto v0.0.0-20241021214115-324edc3d5d38/go.mod h1:xBI+tzfqGGN2JBeSebfKXFSdBpWVQ7sLW40PTupVRm4= -google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw= -google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto v0.0.0-20241104194629-dd2ea8efbc28 h1:KJjNNclfpIkVqrZlTWcgOOaVQ00LdBnoEaRfkUx760s= +google.golang.org/genproto v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:mt9/MofW7AWQ+Gy179ChOnvmJatV8YHUmrcedo9CIFI= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -631,8 +625,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20241025232817-cb329375b14e h1:SoMI+r+Qsp379U9BlVzrHtqAqYP3NEv9vNhYqUaAWOg= -google.golang.org/grpc/stats/opentelemetry v0.0.0-20241025232817-cb329375b14e/go.mod h1:jzYlkSMbKypzuu6xoAEijsNVo9ZeDF1u/zCfFgsx7jg= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20241028142157-ada6787961b3 h1:hUfOButuEtpc0UvYiaYRbNwxVYr0mQQOWq6X8beJ9Gc= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20241028142157-ada6787961b3/go.mod h1:jzYlkSMbKypzuu6xoAEijsNVo9ZeDF1u/zCfFgsx7jg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/nexus/Cargo.lock b/nexus/Cargo.lock index 8dd801e5e9..580ff2b99c 100644 --- a/nexus/Cargo.lock +++ b/nexus/Cargo.lock @@ -95,9 +95,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -110,9 +110,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" @@ -144,9 +144,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "ar" @@ -187,7 +187,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -209,7 +209,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -220,7 +220,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -304,9 +304,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2afbd208dabc6785946d4ef2444eb1f54fe0aaf0f62f2a4f9a9e9c303aeff0be" +checksum = "1f4c89f1d2e0df99ccd21f98598c1e587ad78bd87ae22a74aba392b5566bb038" dependencies = [ "aws-credential-types", "aws-runtime", @@ -326,9 +326,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.47.0" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8776850becacbd3a82a4737a9375ddb5c6832a51379f24443a98e61513f852c" +checksum = "ded855583fa1d22e88fe39fd6062b062376e50a8211989e07cf5e38d52eb3453" dependencies = [ "aws-credential-types", "aws-runtime", @@ -348,9 +348,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0007b5b8004547133319b6c4e87193eee2a0bcb3e4c18c75d09febe9dab7b383" +checksum = "9177ea1192e6601ae16c7273385690d88a7ed386a00b74a6bc894d12103cd933" dependencies = [ "aws-credential-types", "aws-runtime", @@ -370,9 +370,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.47.0" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fffaa356e7f1c725908b75136d53207fa714e348f365671df14e95a60530ad3" +checksum = "823ef553cf36713c97453e2ddff1eb8f62be7f4523544e2a5db64caf80100f0a" dependencies = [ "aws-credential-types", "aws-runtime", @@ -493,9 +493,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.7.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e086682a53d3aa241192aa110fa8dfce98f2f5ac2ead0de84d41582c7e8fdb96" +checksum = "92165296a47a812b267b4f41032ff8069ab7ff783696d217f0994a0d7ab585cd" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -510,9 +510,9 @@ dependencies = [ [[package]] name = "aws-smithy-types" -version = "1.2.8" +version = "1.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07c9cdc179e6afbf5d391ab08c85eac817b51c87e1892a5edb5f7bbdc64314b4" +checksum = "4fbd94a32b3a7d55d3806fe27d98d3ad393050439dd05eb53ece36ec5e3d3510" dependencies = [ "base64-simd", "bytes", @@ -672,7 +672,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -731,7 +731,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "syn_derive", ] @@ -865,9 +865,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.31" +version = "1.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +checksum = "baee610e9452a8f6f0a1b6194ec09ff9e2d85dea54432acdae41aa0761c95d70" dependencies = [ "jobserver", "libc", @@ -987,7 +987,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1186,7 +1186,7 @@ checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1201,13 +1201,13 @@ dependencies = [ [[package]] name = "derive-new" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad" +checksum = "2cdc8d50f426189eef89dac62fabfa0abb27d5cc008f25bf4156a0203325becc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1222,6 +1222,17 @@ dependencies = [ "subtle", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "dotenvy" version = "0.15.7" @@ -1434,7 +1445,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -1469,9 +1480,9 @@ dependencies = [ [[package]] name = "gcp-bigquery-client" -version = "0.23.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51480b6aca9d7997b8575b7e8b68441a847235673cdd739ae576bbfc708dbd3d" +checksum = "7642bdf60deda83c29b045188527fbc8b633636e0337a65c026436e7cdb26d3b" dependencies = [ "async-stream", "async-trait", @@ -1594,9 +1605,9 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" dependencies = [ "allocator-api2", "equivalent", @@ -1775,7 +1786,7 @@ dependencies = [ "http 1.1.0", "hyper 1.5.0", "hyper-util", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -1786,9 +1797,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper 1.5.0", "hyper-util", @@ -1799,9 +1810,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -1839,14 +1850,143 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", ] [[package]] @@ -1866,7 +2006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -1948,6 +2088,29 @@ dependencies = [ "indexmap 2.6.0", ] +[[package]] +name = "lazy-regex" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d8e41c97e6bc7ecb552016274b99fbb5d035e8de288c582d9b933af6677bfda" +dependencies = [ + "lazy-regex-proc_macros", + "once_cell", + "regex-lite", +] + +[[package]] +name = "lazy-regex-proc_macros" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76e1d8b05d672c53cb9c7b920bbba8783845ae4f0b076e02a3db1d02c81b4163" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.87", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -1975,9 +2138,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bda4c6077b0b08da2c48b172195795498381a7c8988c9e6212a6c55c5b9bd70" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libredox" @@ -1996,6 +2159,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" + [[package]] name = "lock_api" version = "0.4.12" @@ -2024,7 +2193,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -2647,9 +2816,9 @@ dependencies = [ [[package]] name = "pgwire" -version = "0.23.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fb7a8b4570b74080587c5f3e187553375d18e72a38c72ca7f70a065972c65d" +checksum = "99e0f273b9ffa92a06b0a900c012df432de901c1854b2411cd7b27e2db165cc8" dependencies = [ "async-trait", "base64 0.22.1", @@ -2658,10 +2827,12 @@ dependencies = [ "derive-new", "futures", "hex", + "lazy-regex", "md5", "postgres-types", "rand", "ring", + "rust_decimal", "stringprep", "thiserror", "tokio", @@ -2705,7 +2876,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2795,7 +2966,7 @@ version = "0.1.0" dependencies = [ "anyhow", "pt", - "rustls 0.23.15", + "rustls 0.23.16", "tokio", "tokio-postgres", "tokio-postgres-rustls", @@ -2869,7 +3040,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -2942,7 +3113,7 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.85", + "syn 2.0.87", "tempfile", ] @@ -2956,7 +3127,7 @@ dependencies = [ "itertools", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3041,7 +3212,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.16", "socket2", "thiserror", "tokio", @@ -3058,7 +3229,7 @@ dependencies = [ "rand", "ring", "rustc-hash 2.0.0", - "rustls 0.23.15", + "rustls 0.23.16", "slab", "thiserror", "tinyvec", @@ -3067,10 +3238,11 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fe68c2e9e1a1234e218683dbdf9f9dfcb094113c5ac2b938dfcb9bab4c4140b" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" dependencies = [ + "cfg_aliases", "libc", "once_cell", "socket2", @@ -3192,7 +3364,7 @@ dependencies = [ "quote", "refinery-core", "regex", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3256,9 +3428,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.8" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "async-compression", "base64 0.22.1", @@ -3279,7 +3451,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", @@ -3408,9 +3580,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.38" +version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" +checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ "bitflags", "errno", @@ -3447,9 +3619,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.15" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fbb44d7acc4e873d613422379f69f237a1b141928c02f6bc6ccfddddc2d7993" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "log", "once_cell", @@ -3648,9 +3820,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] @@ -3666,13 +3838,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3863,9 +4035,15 @@ source = "git+https://github.com/peerdb-io/sqlparser-rs.git?branch=main#8c341b80 dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -3918,9 +4096,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.85" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", @@ -3936,7 +4114,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -3954,6 +4132,17 @@ dependencies = [ "futures-core", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "tap" version = "1.0.1" @@ -3962,9 +4151,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tar" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ff6c40d3aedb5e06b57c6f669ad17ab063dd1e63d977c6a88e7f4dfa4f04020" +checksum = "c65998313f8e17d0d553d28f91a0df93e4dbbbf770279c7bc21ca0f09ea1a1f6" dependencies = [ "filetime", "libc", @@ -3985,22 +4174,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4046,6 +4235,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" version = "1.8.0" @@ -4079,7 +4278,7 @@ checksum = "8d9ef545650e79f30233c0003bcc2504d7efac6dad25fca40744de773fe2049c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4108,7 +4307,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4145,7 +4344,7 @@ checksum = "27d684bad428a0f2481f42241f821db42c54e2dc81d8c00db8536c506b0a0144" dependencies = [ "const-oid", "ring", - "rustls 0.23.15", + "rustls 0.23.16", "tokio", "tokio-postgres", "tokio-rustls 0.26.0", @@ -4179,7 +4378,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "tokio", ] @@ -4286,7 +4485,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4392,7 +4591,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", ] [[package]] @@ -4523,7 +4722,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pki-types", "serde", "serde_json", @@ -4533,9 +4732,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" dependencies = [ "form_urlencoded", "idna", @@ -4548,6 +4747,18 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -4653,7 +4864,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-shared", ] @@ -4687,7 +4898,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4965,6 +5176,18 @@ dependencies = [ "memchr", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "wyz" version = "0.5.1" @@ -5020,6 +5243,30 @@ dependencies = [ "lzma-sys", ] +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure", +] + [[package]] name = "yup-oauth2" version = "11.0.0" @@ -5037,7 +5284,7 @@ dependencies = [ "hyper-util", "log", "percent-encoding", - "rustls 0.23.15", + "rustls 0.23.16", "rustls-pemfile 2.2.0", "seahash", "serde", @@ -5065,7 +5312,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure", ] [[package]] @@ -5085,7 +5353,29 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.85", + "syn 2.0.87", +] + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", ] [[package]] diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 081df6671a..1131c8fd51 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -30,7 +30,7 @@ rust_decimal = { version = "1", default-features = false, features = [ ] } sqlparser = { git = "https://github.com/peerdb-io/sqlparser-rs.git", branch = "main" } tracing = "0.1" -pgwire = { version = "0.23", default-features = false, features = [ +pgwire = { version = "0.26", default-features = false, features = [ "scram", "server-api-ring", ] } diff --git a/nexus/peer-bigquery/Cargo.toml b/nexus/peer-bigquery/Cargo.toml index 8fdd68b5e5..c10964568b 100644 --- a/nexus/peer-bigquery/Cargo.toml +++ b/nexus/peer-bigquery/Cargo.toml @@ -22,6 +22,6 @@ serde_bytes = "0.11" sqlparser.workspace = true tracing.workspace = true tokio = { version = "1.0", features = ["full"] } -gcp-bigquery-client = "0.23" +gcp-bigquery-client = "0.24" uuid = { version = "1.0", features = ["serde", "v4"] } value = { path = "../value" } diff --git a/nexus/peer-bigquery/src/lib.rs b/nexus/peer-bigquery/src/lib.rs index 797406934d..4998dbf7cc 100644 --- a/nexus/peer-bigquery/src/lib.rs +++ b/nexus/peer-bigquery/src/lib.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context; use gcp_bigquery_client::{ - model::{query_request::QueryRequest, query_response::ResultSet}, + model::{query_request::QueryRequest, query_response::QueryResponse}, yup_oauth2, Client, }; use peer_connections::PeerConnectionTracker; @@ -62,7 +62,7 @@ impl BigQueryQueryExecutor { }) } - async fn run_tracked(&self, query: &str) -> PgWireResult { + async fn run_tracked(&self, query: &str) -> PgWireResult { let mut query_req = QueryRequest::new(query); query_req.timeout_ms = Some(Duration::from_secs(120).as_millis() as i32); @@ -105,9 +105,9 @@ impl QueryExecutor for BigQueryQueryExecutor { let query = query.to_string(); tracing::info!("bq rewritten query: {}", query); - let result_set = self.run_tracked(&query).await?; + let query_response = self.run_tracked(&query).await?; - let cursor = BqRecordStream::new(result_set); + let cursor = BqRecordStream::from(query_response); tracing::info!( "retrieved {} rows for query {}", cursor.get_num_records(), @@ -220,8 +220,8 @@ impl QueryExecutor for BigQueryQueryExecutor { query.limit = Some(Expr::Value(Value::Number("0".to_owned(), false))); let query = query.to_string(); - let result_set = self.run_tracked(&query).await?; - let schema = BqSchema::from_result_set(&result_set); + let query_response = self.run_tracked(&query).await?; + let schema = BqSchema::from(&query_response); // log the schema tracing::info!("[bigquery] schema: {:?}", schema); diff --git a/nexus/peer-bigquery/src/stream.rs b/nexus/peer-bigquery/src/stream.rs index d0f7e5b5b7..4a7ceb58ba 100644 --- a/nexus/peer-bigquery/src/stream.rs +++ b/nexus/peer-bigquery/src/stream.rs @@ -8,7 +8,7 @@ use std::{ use chrono::DateTime; use futures::Stream; use gcp_bigquery_client::model::{ - field_type::FieldType, query_response::ResultSet, table_field_schema::TableFieldSchema, + field_type::FieldType, query_response::{ResultSet, QueryResponse}, table_field_schema::TableFieldSchema, }; use peer_cursor::{Record, RecordStream, Schema}; use pgwire::{ @@ -57,10 +57,9 @@ fn convert_field_type(field_type: &FieldType) -> Type { } } -impl BqSchema { - pub fn from_result_set(result_set: &ResultSet) -> Self { - let bq_schema = result_set - .query_response() +impl From<&QueryResponse> for BqSchema { + fn from(query_response: &QueryResponse) -> Self { + let bq_schema = query_response .schema .as_ref() .expect("Schema is not present"); @@ -84,24 +83,29 @@ impl BqSchema { fields: fields.clone(), } } +} +impl BqSchema { pub fn schema(&self) -> Schema { self.schema.clone() } } -impl BqRecordStream { - pub fn new(result_set: ResultSet) -> Self { - let bq_schema = BqSchema::from_result_set(&result_set); +impl From for BqRecordStream { + fn from(query_response: QueryResponse) -> Self { + let schema = BqSchema::from(&query_response); + let result_set = ResultSet::new_from_query_response(query_response); let num_records = result_set.row_count(); Self { result_set, - schema: bq_schema, + schema, num_records, } } +} +impl BqRecordStream { pub fn get_num_records(&self) -> usize { self.num_records } diff --git a/ui/package-lock.json b/ui/package-lock.json index 636f50209c..5d64807ea2 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -29,8 +29,8 @@ "@types/react": "^18.3.11", "@types/react-dom": "^18.3.0", "classnames": "^2.5.1", - "lucide-react": "^0.453.0", - "material-symbols": "^0.25.0", + "lucide-react": "^0.454.0", + "material-symbols": "^0.26.0", "moment": "^2.30.1", "moment-timezone": "^0.5.46", "next": "^14.2.14", @@ -77,9 +77,9 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.0.tgz", - "integrity": "sha512-INCKxTtbXtcNbUZ3YXutwMpEleqttcswhAdee7dhuoVrD2cnuc3PqtERBtxkX5nziX9vnBL8WXmSGwv8CuPV6g==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.26.2.tgz", + "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", "license": "MIT", "dependencies": { "@babel/helper-validator-identifier": "^7.25.9", @@ -91,12 +91,12 @@ } }, "node_modules/@babel/generator": { - "version": "7.26.0", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.0.tgz", - "integrity": "sha512-/AIkAmInnWwgEAJGQr9vY0c66Mj6kjkE2ZPB1PurTRaRAh3U+J45sAQMjQDJdh4WbR3l0x5xkimXBKyBXXAu2w==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.26.2.tgz", + "integrity": "sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw==", "license": "MIT", "dependencies": { - "@babel/parser": "^7.26.0", + "@babel/parser": "^7.26.2", "@babel/types": "^7.26.0", "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25", @@ -138,9 +138,9 @@ } }, "node_modules/@babel/parser": { - "version": "7.26.1", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.1.tgz", - "integrity": "sha512-reoQYNiAJreZNsJzyrDNzFQ+IQ5JFiIzAHJg9bn94S3l+4++J7RsIhNMoB+lgP/9tpmiAQqspv+xfdxTSzREOw==", + "version": "7.26.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.26.2.tgz", + "integrity": "sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ==", "license": "MIT", "dependencies": { "@babel/types": "^7.26.0" @@ -376,9 +376,9 @@ } }, "node_modules/@eslint-community/regexpp": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.0.tgz", - "integrity": "sha512-gh7PdNombP8ftL8TinYC8Xd7WEypB8EKV4PI2h0eMzndKjPCXuo2zUiZtD2Hu+MSPt02Ty2MdS788ADl9ai1rA==", + "version": "4.12.1", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", + "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==", "dev": true, "license": "MIT", "engines": { @@ -429,9 +429,9 @@ } }, "node_modules/@floating-ui/dom": { - "version": "1.6.11", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.11.tgz", - "integrity": "sha512-qkMCxSR24v2vGkhYDo/UzxfJN3D4syqSjyuTFz6C7XcpU1pASPRieNI0Kj5VP3/503mOfYiGY891ugBX1GlABQ==", + "version": "1.6.12", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.12.tgz", + "integrity": "sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w==", "license": "MIT", "dependencies": { "@floating-ui/core": "^1.6.0", @@ -699,15 +699,15 @@ } }, "node_modules/@next/env": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.16.tgz", - "integrity": "sha512-fLrX5TfJzHCbnZ9YUSnGW63tMV3L4nSfhgOQ0iCcX21Pt+VSTDuaLsSuL8J/2XAiVA5AnzvXDpf6pMs60QxOag==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.17.tgz", + "integrity": "sha512-MCgO7VHxXo8sYR/0z+sk9fGyJJU636JyRmkjc7ZJY8Hurl8df35qG5hoAh5KMs75FLjhlEo9bb2LGe89Y/scDA==", "license": "MIT" }, "node_modules/@next/eslint-plugin-next": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-14.2.16.tgz", - "integrity": "sha512-noORwKUMkKc96MWjTOwrsUCjky0oFegHbeJ1yEnQBGbMHAaTEIgLZIIfsYF0x3a06PiS+2TXppfifR+O6VWslg==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/eslint-plugin-next/-/eslint-plugin-next-14.2.17.tgz", + "integrity": "sha512-fW6/u1jjlBQrMs1ExyINehaK3B+LEW5UqdF6QYL07QK+SECkX0hnEyPMaNKj0ZFzirQ9D8jLWQ00P8oua4yx9g==", "dev": true, "license": "MIT", "dependencies": { @@ -715,9 +715,9 @@ } }, "node_modules/@next/swc-darwin-arm64": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.16.tgz", - "integrity": "sha512-uFT34QojYkf0+nn6MEZ4gIWQ5aqGF11uIZ1HSxG+cSbj+Mg3+tYm8qXYd3dKN5jqKUm5rBVvf1PBRO/MeQ6rxw==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.17.tgz", + "integrity": "sha512-WiOf5nElPknrhRMTipXYTJcUz7+8IAjOYw3vXzj3BYRcVY0hRHKWgTgQ5439EvzQyHEko77XK+yN9x9OJ0oOog==", "cpu": [ "arm64" ], @@ -731,9 +731,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.16.tgz", - "integrity": "sha512-mCecsFkYezem0QiZlg2bau3Xul77VxUD38b/auAjohMA22G9KTJneUYMv78vWoCCFkleFAhY1NIvbyjj1ncG9g==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.17.tgz", + "integrity": "sha512-29y425wYnL17cvtxrDQWC3CkXe/oRrdt8ie61S03VrpwpPRI0XsnTvtKO06XCisK4alaMnZlf8riwZIbJTaSHQ==", "cpu": [ "x64" ], @@ -747,9 +747,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.16.tgz", - "integrity": "sha512-yhkNA36+ECTC91KSyZcgWgKrYIyDnXZj8PqtJ+c2pMvj45xf7y/HrgI17hLdrcYamLfVt7pBaJUMxADtPaczHA==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.17.tgz", + "integrity": "sha512-SSHLZls3ZwNEHsc+d0ynKS+7Af0Nr8+KTUBAy9pm6xz9SHkJ/TeuEg6W3cbbcMSh6j4ITvrjv3Oi8n27VR+IPw==", "cpu": [ "arm64" ], @@ -763,9 +763,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.16.tgz", - "integrity": "sha512-X2YSyu5RMys8R2lA0yLMCOCtqFOoLxrq2YbazFvcPOE4i/isubYjkh+JCpRmqYfEuCVltvlo+oGfj/b5T2pKUA==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.17.tgz", + "integrity": "sha512-VFge37us5LNPatB4F7iYeuGs9Dprqe4ZkW7lOEJM91r+Wf8EIdViWHLpIwfdDXinvCdLl6b4VyLpEBwpkctJHA==", "cpu": [ "arm64" ], @@ -779,9 +779,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.16.tgz", - "integrity": "sha512-9AGcX7VAkGbc5zTSa+bjQ757tkjr6C/pKS7OK8cX7QEiK6MHIIezBLcQ7gQqbDW2k5yaqba2aDtaBeyyZh1i6Q==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.17.tgz", + "integrity": "sha512-aaQlpxUVb9RZ41adlTYVQ3xvYEfBPUC8+6rDgmQ/0l7SvK8S1YNJzPmDPX6a4t0jLtIoNk7j+nroS/pB4nx7vQ==", "cpu": [ "x64" ], @@ -795,9 +795,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.16.tgz", - "integrity": "sha512-Klgeagrdun4WWDaOizdbtIIm8khUDQJ/5cRzdpXHfkbY91LxBXeejL4kbZBrpR/nmgRrQvmz4l3OtttNVkz2Sg==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.17.tgz", + "integrity": "sha512-HSyEiFaEY3ay5iATDqEup5WAfrhMATNJm8dYx3ZxL+e9eKv10XKZCwtZByDoLST7CyBmyDz+OFJL1wigyXeaoA==", "cpu": [ "x64" ], @@ -811,9 +811,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.16.tgz", - "integrity": "sha512-PwW8A1UC1Y0xIm83G3yFGPiOBftJK4zukTmk7DI1CebyMOoaVpd8aSy7K6GhobzhkjYvqS/QmzcfsWG2Dwizdg==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.17.tgz", + "integrity": "sha512-h5qM9Btqv87eYH8ArrnLoAHLyi79oPTP2vlGNSg4CDvUiXgi7l0+5KuEGp5pJoMhjuv9ChRdm7mRlUUACeBt4w==", "cpu": [ "arm64" ], @@ -827,9 +827,9 @@ } }, "node_modules/@next/swc-win32-ia32-msvc": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.16.tgz", - "integrity": "sha512-jhPl3nN0oKEshJBNDAo0etGMzv0j3q3VYorTSFqH1o3rwv1MQRdor27u1zhkgsHPNeY1jxcgyx1ZsCkDD1IHgg==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.17.tgz", + "integrity": "sha512-BD/G++GKSLexQjdyoEUgyo5nClU7er5rK0sE+HlEqnldJSm96CIr/+YOTT063LVTT/dUOeQsNgp5DXr86/K7/A==", "cpu": [ "ia32" ], @@ -843,9 +843,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.16.tgz", - "integrity": "sha512-OA7NtfxgirCjfqt+02BqxC3MIgM/JaGjw9tOe4fyZgPsqfseNiMPnCRP44Pfs+Gpo9zPN+SXaFsgP6vk8d571A==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.17.tgz", + "integrity": "sha512-vkQfN1+4V4KqDibkW2q0sJ6CxQuXq5l2ma3z0BRcfIqkAMZiiW67T9yCpwqJKP68QghBtPEFjPAlaqe38O6frw==", "cpu": [ "x64" ], @@ -1337,12 +1337,12 @@ } }, "node_modules/@radix-ui/react-icons": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@radix-ui/react-icons/-/react-icons-1.3.0.tgz", - "integrity": "sha512-jQxj/0LKgp+j9BiTXz3O3sgs26RNet2iLWmsPyRz2SIcR4q/4SbazXfnYwbAr+vLYKSfc7qxzyGQA1HLlYiuNw==", + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@radix-ui/react-icons/-/react-icons-1.3.1.tgz", + "integrity": "sha512-QvYompk0X+8Yjlo/Fv4McrzxohDdM5GgLHyQcPpcsPvlOSXCGFjdbuyGL5dzRbg0GpknAjQJJZzdiRK7iWVuFQ==", "license": "MIT", "peerDependencies": { - "react": "^16.x || ^17.x || ^18.x" + "react": "^16.x || ^17.x || ^18.x || ^19.x" } }, "node_modules/@radix-ui/react-id": { @@ -2211,6 +2211,28 @@ "integrity": "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==", "license": "MIT" }, + "node_modules/@types/eslint": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/@types/eslint/-/eslint-9.6.1.tgz", + "integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://registry.npmjs.org/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, "node_modules/@types/estree": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.6.tgz", @@ -2233,9 +2255,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.8.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.8.1.tgz", - "integrity": "sha512-k6Gi8Yyo8EtrNtkHXutUu2corfDf9su95VYVP10aGYMMROM6SAItZi0w1XszA6RtWTHSVp5OeFof37w0IEqCQg==", + "version": "22.9.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.9.0.tgz", + "integrity": "sha512-vuyHg81vvWA1Z1ELfvLko2c8f34gyA0zaic0+Rllc5lbCnbSyuvb2Oxpm6TAUAC/2xZN3QGqxBNggD1nNR2AfQ==", "license": "MIT", "dependencies": { "undici-types": "~6.19.8" @@ -2288,17 +2310,17 @@ "license": "MIT" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.11.0.tgz", - "integrity": "sha512-KhGn2LjW1PJT2A/GfDpiyOfS4a8xHQv2myUagTM5+zsormOmBlYsnQ6pobJ8XxJmh6hnHwa2Mbe3fPrDJoDhbA==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.13.0.tgz", + "integrity": "sha512-nQtBLiZYMUPkclSeC3id+x4uVd1SGtHuElTxL++SfP47jR0zfkZBJHc+gL4qPsgTuypz0k8Y2GheaDYn6Gy3rg==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.11.0", - "@typescript-eslint/type-utils": "8.11.0", - "@typescript-eslint/utils": "8.11.0", - "@typescript-eslint/visitor-keys": "8.11.0", + "@typescript-eslint/scope-manager": "8.13.0", + "@typescript-eslint/type-utils": "8.13.0", + "@typescript-eslint/utils": "8.13.0", + "@typescript-eslint/visitor-keys": "8.13.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -2322,16 +2344,16 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.11.0.tgz", - "integrity": "sha512-lmt73NeHdy1Q/2ul295Qy3uninSqi6wQI18XwSpm8w0ZbQXUpjCAWP1Vlv/obudoBiIjJVjlztjQ+d/Md98Yxg==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.13.0.tgz", + "integrity": "sha512-w0xp+xGg8u/nONcGw1UXAr6cjCPU1w0XVyBs6Zqaj5eLmxkKQAByTdV/uGgNN5tVvN/kKpoQlP2cL7R+ajZZIQ==", "dev": true, "license": "BSD-2-Clause", "dependencies": { - "@typescript-eslint/scope-manager": "8.11.0", - "@typescript-eslint/types": "8.11.0", - "@typescript-eslint/typescript-estree": "8.11.0", - "@typescript-eslint/visitor-keys": "8.11.0", + "@typescript-eslint/scope-manager": "8.13.0", + "@typescript-eslint/types": "8.13.0", + "@typescript-eslint/typescript-estree": "8.13.0", + "@typescript-eslint/visitor-keys": "8.13.0", "debug": "^4.3.4" }, "engines": { @@ -2351,14 +2373,14 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.11.0.tgz", - "integrity": "sha512-Uholz7tWhXmA4r6epo+vaeV7yjdKy5QFCERMjs1kMVsLRKIrSdM6o21W2He9ftp5PP6aWOVpD5zvrvuHZC0bMQ==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.13.0.tgz", + "integrity": "sha512-XsGWww0odcUT0gJoBZ1DeulY1+jkaHUciUq4jKNv4cpInbvvrtDoyBH9rE/n2V29wQJPk8iCH1wipra9BhmiMA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.11.0", - "@typescript-eslint/visitor-keys": "8.11.0" + "@typescript-eslint/types": "8.13.0", + "@typescript-eslint/visitor-keys": "8.13.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2369,14 +2391,14 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.11.0.tgz", - "integrity": "sha512-ItiMfJS6pQU0NIKAaybBKkuVzo6IdnAhPFZA/2Mba/uBjuPQPet/8+zh5GtLHwmuFRShZx+8lhIs7/QeDHflOg==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.13.0.tgz", + "integrity": "sha512-Rqnn6xXTR316fP4D2pohZenJnp+NwQ1mo7/JM+J1LWZENSLkJI8ID8QNtlvFeb0HnFSK94D6q0cnMX6SbE5/vA==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/typescript-estree": "8.11.0", - "@typescript-eslint/utils": "8.11.0", + "@typescript-eslint/typescript-estree": "8.13.0", + "@typescript-eslint/utils": "8.13.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -2394,9 +2416,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.11.0.tgz", - "integrity": "sha512-tn6sNMHf6EBAYMvmPUaKaVeYvhUsrE6x+bXQTxjQRp360h1giATU0WvgeEys1spbvb5R+VpNOZ+XJmjD8wOUHw==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.13.0.tgz", + "integrity": "sha512-4cyFErJetFLckcThRUFdReWJjVsPCqyBlJTi6IDEpc1GWCIIZRFxVppjWLIMcQhNGhdWJJRYFHpHoDWvMlDzng==", "dev": true, "license": "MIT", "engines": { @@ -2408,14 +2430,14 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.11.0.tgz", - "integrity": "sha512-yHC3s1z1RCHoCz5t06gf7jH24rr3vns08XXhfEqzYpd6Hll3z/3g23JRi0jM8A47UFKNc3u/y5KIMx8Ynbjohg==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.13.0.tgz", + "integrity": "sha512-v7SCIGmVsRK2Cy/LTLGN22uea6SaUIlpBcO/gnMGT/7zPtxp90bphcGf4fyrCQl3ZtiBKqVTG32hb668oIYy1g==", "dev": true, "license": "BSD-2-Clause", "dependencies": { - "@typescript-eslint/types": "8.11.0", - "@typescript-eslint/visitor-keys": "8.11.0", + "@typescript-eslint/types": "8.13.0", + "@typescript-eslint/visitor-keys": "8.13.0", "debug": "^4.3.4", "fast-glob": "^3.3.2", "is-glob": "^4.0.3", @@ -2463,16 +2485,16 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.11.0.tgz", - "integrity": "sha512-CYiX6WZcbXNJV7UNB4PLDIBtSdRmRI/nb0FMyqHPTQD1rMjA0foPLaPUV39C/MxkTd/QKSeX+Gb34PPsDVC35g==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.13.0.tgz", + "integrity": "sha512-A1EeYOND6Uv250nybnLZapeXpYMl8tkzYUxqmoKAWnI4sei3ihf2XdZVd+vVOmHGcp3t+P7yRrNsyyiXTvShFQ==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "8.11.0", - "@typescript-eslint/types": "8.11.0", - "@typescript-eslint/typescript-estree": "8.11.0" + "@typescript-eslint/scope-manager": "8.13.0", + "@typescript-eslint/types": "8.13.0", + "@typescript-eslint/typescript-estree": "8.13.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2486,13 +2508,13 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.11.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.11.0.tgz", - "integrity": "sha512-EaewX6lxSjRJnc+99+dqzTeoDZUfyrA52d2/HRrkI830kgovWsmIiTfmr0NZorzqic7ga+1bS60lRBUgR3n/Bw==", + "version": "8.13.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.13.0.tgz", + "integrity": "sha512-7N/+lztJqH4Mrf0lb10R/CbI1EaAMMGyF5y0oJvFoAhafwgiRA7TXyd8TFn8FC8k5y2dTsYogg238qavRGNnlw==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.11.0", + "@typescript-eslint/types": "8.13.0", "eslint-visitor-keys": "^3.4.3" }, "engines": { @@ -2698,16 +2720,6 @@ "node": ">=0.4.0" } }, - "node_modules/acorn-import-attributes": { - "version": "1.9.5", - "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", - "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", - "dev": true, - "license": "MIT", - "peerDependencies": { - "acorn": "^8" - } - }, "node_modules/acorn-jsx": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", @@ -3259,9 +3271,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001672", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001672.tgz", - "integrity": "sha512-XhW1vRo1ob6aeK2w3rTohwTPBLse/rvjq+s3RTSBwnlZqoFFjx9cHsShJjAIbLsLjyoacaTxpLZy9v3gg6zypw==", + "version": "1.0.30001677", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001677.tgz", + "integrity": "sha512-fmfjsOlJUpMWu+mAAtZZZHz7UEwsUxIIvu1TJfO1HqFQvB/B+ii0xr9B5HpbZY/mC4XZ8SvjHJqtAY6pDPQEog==", "funding": [ { "type": "opencollective", @@ -3832,9 +3844,9 @@ "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.5.47", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.47.tgz", - "integrity": "sha512-zS5Yer0MOYw4rtK2iq43cJagHZ8sXN0jDHDKzB+86gSBSAI4v07S97mcq+Gs2vclAxSh1j7vOAHxSVgduiiuVQ==", + "version": "1.5.52", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.52.tgz", + "integrity": "sha512-xtoijJTZ+qeucLBDNztDOuQBE1ksqjvNjvqFoST3nGC7fSpqJ+X6BdTBaY5BHG+IhWWmpc6b/KfpeuEDupEPOQ==", "dev": true, "license": "ISC" }, @@ -3966,9 +3978,9 @@ } }, "node_modules/es-iterator-helpers": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.1.0.tgz", - "integrity": "sha512-/SurEfycdyssORP/E+bj4sEu1CWw4EmLDsHynHwSXQ7utgbrMRWW195pTrCjFgFCddf/UkYm3oqKPRq5i8bJbw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/es-iterator-helpers/-/es-iterator-helpers-1.2.0.tgz", + "integrity": "sha512-tpxqxncxnpw3c93u8n3VOzACmRFoVmWJqbWXvX/JfKbkhBw1oslgPrUfeSt2psuqyEJFD6N/9lg5i7bsKpoq+Q==", "dev": true, "license": "MIT", "dependencies": { @@ -3980,6 +3992,7 @@ "function-bind": "^1.1.2", "get-intrinsic": "^1.2.4", "globalthis": "^1.0.4", + "gopd": "^1.0.1", "has-property-descriptors": "^1.0.2", "has-proto": "^1.0.3", "has-symbols": "^1.0.3", @@ -4134,13 +4147,13 @@ } }, "node_modules/eslint-config-next": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-14.2.16.tgz", - "integrity": "sha512-HOcnCJsyLXR7B8wmjaCgkTSpz+ijgOyAkP8OlvANvciP8PspBYFEBTmakNMxOf71fY0aKOm/blFIiKnrM4K03Q==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/eslint-config-next/-/eslint-config-next-14.2.17.tgz", + "integrity": "sha512-5tVFG/BoJ4xZwMmumBe3xcDXb2dvVEvy4BeBCXTxrl+DTHjHv687FN2qBjYx6xVH/Se7YRhsH0KoxvZkJOGRVA==", "dev": true, "license": "MIT", "dependencies": { - "@next/eslint-plugin-next": "14.2.16", + "@next/eslint-plugin-next": "14.2.17", "@rushstack/eslint-patch": "^1.3.3", "@typescript-eslint/eslint-plugin": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", "@typescript-eslint/parser": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", @@ -5988,9 +6001,9 @@ } }, "node_modules/lucide-react": { - "version": "0.453.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.453.0.tgz", - "integrity": "sha512-kL+RGZCcJi9BvJtzg2kshO192Ddy9hv3ij+cPrVPWSRzgCWCVazoQJxOjAwgK53NomL07HB7GPHW120FimjNhQ==", + "version": "0.454.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.454.0.tgz", + "integrity": "sha512-hw7zMDwykCLnEzgncEEjHeA6+45aeEzRYuKHuyRSOPkhko+J3ySGjGIzu+mmMfDFG1vazHepMaYFYHbTFAZAAQ==", "license": "ISC", "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc" @@ -6023,9 +6036,9 @@ } }, "node_modules/material-symbols": { - "version": "0.25.1", - "resolved": "https://registry.npmjs.org/material-symbols/-/material-symbols-0.25.1.tgz", - "integrity": "sha512-0HopmXLjRs4H99LWajFWIXAt8DpaVMf9lyhKp35HQ+ocb7JJ3eXJTJNkOwccfbJ34qIuwYDwLJQtlzheMFmizw==", + "version": "0.26.0", + "resolved": "https://registry.npmjs.org/material-symbols/-/material-symbols-0.26.0.tgz", + "integrity": "sha512-7WefpjuZLsXjE4MHlbi7QVca9y6M45YJws8oC3l7UITfpGDxVwEddQaaqYqtGMGVRFeBw/dIxmlazR5eeZH0rg==", "license": "Apache-2.0" }, "node_modules/memoize-one": { @@ -6228,12 +6241,12 @@ "license": "MIT" }, "node_modules/next": { - "version": "14.2.16", - "resolved": "https://registry.npmjs.org/next/-/next-14.2.16.tgz", - "integrity": "sha512-LcO7WnFu6lYSvCzZoo1dB+IO0xXz5uEv52HF1IUN0IqVTUIZGHuuR10I5efiLadGt+4oZqTcNZyVVEem/TM5nA==", + "version": "14.2.17", + "resolved": "https://registry.npmjs.org/next/-/next-14.2.17.tgz", + "integrity": "sha512-hNo/Zy701DDO3nzKkPmsLRlDfNCtb1OJxFUvjGEl04u7SFa3zwC6hqsOUzMajcaEOEV8ey1GjvByvrg0Qr5AiQ==", "license": "MIT", "dependencies": { - "@next/env": "14.2.16", + "@next/env": "14.2.17", "@swc/helpers": "0.5.5", "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", @@ -6248,15 +6261,15 @@ "node": ">=18.17.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "14.2.16", - "@next/swc-darwin-x64": "14.2.16", - "@next/swc-linux-arm64-gnu": "14.2.16", - "@next/swc-linux-arm64-musl": "14.2.16", - "@next/swc-linux-x64-gnu": "14.2.16", - "@next/swc-linux-x64-musl": "14.2.16", - "@next/swc-win32-arm64-msvc": "14.2.16", - "@next/swc-win32-ia32-msvc": "14.2.16", - "@next/swc-win32-x64-msvc": "14.2.16" + "@next/swc-darwin-arm64": "14.2.17", + "@next/swc-darwin-x64": "14.2.17", + "@next/swc-linux-arm64-gnu": "14.2.17", + "@next/swc-linux-arm64-musl": "14.2.17", + "@next/swc-linux-x64-gnu": "14.2.17", + "@next/swc-linux-x64-musl": "14.2.17", + "@next/swc-win32-arm64-msvc": "14.2.17", + "@next/swc-win32-ia32-msvc": "14.2.17", + "@next/swc-win32-x64-msvc": "14.2.17" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", @@ -7290,9 +7303,9 @@ } }, "node_modules/recharts": { - "version": "2.13.0", - "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.13.0.tgz", - "integrity": "sha512-sbfxjWQ+oLWSZEWmvbq/DFVdeRLqqA6d0CDjKx2PkxVVdoXo16jvENCE+u/x7HxOO+/fwx//nYRwb8p8X6s/lQ==", + "version": "2.13.3", + "resolved": "https://registry.npmjs.org/recharts/-/recharts-2.13.3.tgz", + "integrity": "sha512-YDZ9dOfK9t3ycwxgKbrnDlRC4BHdjlY73fet3a0C1+qGMjXVZe6+VXmpOIIhzkje5MMEL8AN4hLIe4AMskBzlA==", "license": "MIT", "dependencies": { "clsx": "^2.0.0", @@ -8404,9 +8417,9 @@ } }, "node_modules/ts-api-utils": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.3.0.tgz", - "integrity": "sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.0.tgz", + "integrity": "sha512-032cPxaEKwM+GT3vA5JXNzIaizx388rhsSW79vGRNGXfRRAdEAn2mvk36PvK5HnOchyWZ7afLEXqYCvPCrzuzQ==", "dev": true, "license": "MIT", "engines": { @@ -8436,9 +8449,9 @@ } }, "node_modules/tslib": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.0.tgz", - "integrity": "sha512-jWVzBLplnCmoaTr13V9dYbiQ99wvZRd0vNWaDRg+aVYRcjDF3nDksxFDE/+fkXnKhpnUUkmx5pK/v8mCtLVqZA==", + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", "license": "0BSD" }, "node_modules/type-check": { @@ -8767,19 +8780,19 @@ } }, "node_modules/webpack": { - "version": "5.95.0", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.95.0.tgz", - "integrity": "sha512-2t3XstrKULz41MNMBF+cJ97TyHdyQ8HCt//pqErqDvNjU9YQBnZxIHa11VXsi7F3mb5/aO2tuDxdeTPdU7xu9Q==", + "version": "5.96.1", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.96.1.tgz", + "integrity": "sha512-l2LlBSvVZGhL4ZrPwyr8+37AunkcYj5qh8o6u2/2rzoPc8gxFJkLj1WxNgooi9pnoc06jh0BjuXnamM4qlujZA==", "dev": true, "license": "MIT", "dependencies": { - "@types/estree": "^1.0.5", + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.6", "@webassemblyjs/ast": "^1.12.1", "@webassemblyjs/wasm-edit": "^1.12.1", "@webassemblyjs/wasm-parser": "^1.12.1", - "acorn": "^8.7.1", - "acorn-import-attributes": "^1.9.5", - "browserslist": "^4.21.10", + "acorn": "^8.14.0", + "browserslist": "^4.24.0", "chrome-trace-event": "^1.0.2", "enhanced-resolve": "^5.17.1", "es-module-lexer": "^1.2.1", diff --git a/ui/package.json b/ui/package.json index 816cbac3a1..3f42598386 100644 --- a/ui/package.json +++ b/ui/package.json @@ -31,8 +31,8 @@ "@types/react": "^18.3.11", "@types/react-dom": "^18.3.0", "classnames": "^2.5.1", - "lucide-react": "^0.453.0", - "material-symbols": "^0.25.0", + "lucide-react": "^0.454.0", + "material-symbols": "^0.26.0", "moment": "^2.30.1", "moment-timezone": "^0.5.46", "next": "^14.2.14", From bd6f8a1ccaa6e7e387332ba19d41f4b4a6b93192 Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Wed, 6 Nov 2024 22:10:23 +0530 Subject: [PATCH 02/59] Create renovate.json (#2222) Will create a PR per package manager (for all packages in that manager) --- renovate.json | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 renovate.json diff --git a/renovate.json b/renovate.json new file mode 100644 index 0000000000..16cee01247 --- /dev/null +++ b/renovate.json @@ -0,0 +1,29 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "local>PeerDB-io/.github:renovate-config" + ], + "packageRules": [ + { + "groupName": "{{manager}} dependencies", + "groupSlug": "{{manager}}", + "packageRules": [ + { + "groupName": "{{manager}} dependencies", + "groupSlug": "{{manager}}", + "matchPackagePatterns": [ + "*" + ] + } + ], + "separateMajorMinor": false + } + ], + "vulnerabilityAlerts": { + "enabled": true + }, + "timezone": "Etc/UTC", + "schedule": [ + "after 5pm on monday" + ] +} From 7ceb780e9d212b27ea1c5535b387e54a62fb7c41 Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Wed, 6 Nov 2024 22:17:14 +0530 Subject: [PATCH 03/59] Update renovate.json --- renovate.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/renovate.json b/renovate.json index 16cee01247..c26dcfc960 100644 --- a/renovate.json +++ b/renovate.json @@ -1,7 +1,7 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "local>PeerDB-io/.github:renovate-config" + "github>PeerDB-io/.github:renovate-config" ], "packageRules": [ { From 0d5d3e3add2542a86ea8ea16947934b1a7cb3037 Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Wed, 6 Nov 2024 22:31:42 +0530 Subject: [PATCH 04/59] Update renovate.json --- renovate.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/renovate.json b/renovate.json index c26dcfc960..cf219fa4bb 100644 --- a/renovate.json +++ b/renovate.json @@ -1,7 +1,7 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "github>PeerDB-io/.github:renovate-config" + "local>PeerDB-io/.github:renovate-config" ], "packageRules": [ { @@ -25,5 +25,8 @@ "timezone": "Etc/UTC", "schedule": [ "after 5pm on monday" + ], + "additionalReviewers": [ + "team:eng" ] } From 42e3636c1a4f37a8e2b7c15a91b2ca474418cf96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 6 Nov 2024 20:00:07 +0000 Subject: [PATCH 05/59] avoid dividing by zero when calculating page (#2228) --- flow/cmd/mirror_status.go | 9 +++++++-- flow/connectors/postgres/qrep.go | 12 ++++-------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/flow/cmd/mirror_status.go b/flow/cmd/mirror_status.go index ffd6eba459..a0c4a989e2 100644 --- a/flow/cmd/mirror_status.go +++ b/flow/cmd/mirror_status.go @@ -598,10 +598,15 @@ func (h *FlowRequestHandler) CDCBatches(ctx context.Context, req *protos.GetCDCB return nil, err } + var page int32 + if req.Limit != 0 { + page = rowsBehind/int32(req.Limit) + 1 + } + return &protos.GetCDCBatchesResponse{ CdcBatches: batches, Total: total, - Page: rowsBehind/int32(req.Limit) + 1, + Page: page, }, nil } @@ -755,7 +760,7 @@ func (h *FlowRequestHandler) ListMirrorLogs( } page := req.Page - if page == 0 { + if page == 0 && req.NumPerPage != 0 { page = rowsBehind/req.NumPerPage + 1 } diff --git a/flow/connectors/postgres/qrep.go b/flow/connectors/postgres/qrep.go index 2a65ec3534..b393a46913 100644 --- a/flow/connectors/postgres/qrep.go +++ b/flow/connectors/postgres/qrep.go @@ -84,7 +84,6 @@ func (c *PostgresConnector) getNumRowsPartitions( config *protos.QRepConfig, last *protos.QRepPartition, ) ([]*protos.QRepPartition, error) { - var err error numRowsPerPartition := int64(config.NumRowsPerPartition) quotedWatermarkColumn := QuoteIdentifier(config.WatermarkColumn) @@ -116,7 +115,7 @@ func (c *PostgresConnector) getNumRowsPartitions( } var totalRows pgtype.Int8 - if err = row.Scan(&totalRows); err != nil { + if err := row.Scan(&totalRows); err != nil { return nil, fmt.Errorf("failed to query for total rows: %w", err) } @@ -177,19 +176,16 @@ func (c *PostgresConnector) getNumRowsPartitions( return nil, fmt.Errorf("failed to scan row: %w", err) } - err = partitionHelper.AddPartition(start, end) - if err != nil { + if err := partitionHelper.AddPartition(start, end); err != nil { return nil, fmt.Errorf("failed to add partition: %w", err) } } - err = rows.Err() - if err != nil { + if err := rows.Err(); err != nil { return nil, fmt.Errorf("failed to read rows: %w", err) } - err = tx.Commit(ctx) - if err != nil { + if err := tx.Commit(ctx); err != nil { return nil, fmt.Errorf("failed to commit transaction: %w", err) } From ab638d5f62a2ee7f45d773ecdec47cca61596307 Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Thu, 7 Nov 2024 06:28:58 +0530 Subject: [PATCH 06/59] fix: temporal admin tools image version (#2230) Image exists, but not tested, should be fine --- docker-compose-dev.yml | 2 +- docker-compose.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 2d492ca4c9..4807bde8b8 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -95,7 +95,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CLI_ADDRESS=temporal:7233 - TEMPORAL_CLI_SHOW_STACKS=1 - image: temporalio/admin-tools:1.25 + image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1 stdin_open: true tty: true entrypoint: /etc/temporal/entrypoint.sh diff --git a/docker-compose.yml b/docker-compose.yml index 860281b99c..393549f892 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -85,7 +85,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CLI_ADDRESS=temporal:7233 - TEMPORAL_CLI_SHOW_STACKS=1 - image: temporalio/admin-tools:1.25 + image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1 stdin_open: true tty: true entrypoint: /etc/temporal/entrypoint.sh From b7ca715239c4ac4fe63320eb6e04da462fdbc9f0 Mon Sep 17 00:00:00 2001 From: Martin Koppehel Date: Fri, 8 Nov 2024 17:31:28 +0100 Subject: [PATCH 07/59] Implement table rename with atomic exchange (#2229) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, peerdb creates a _resync table and then tries to run ```sql DROP TABLE IF EXISTS target_table; RENAME TABLE _resync_table TO target_table; ``` however, the problem is that this procedure breaks, once a depending dictionary is defined on these tables. In order to enable such behavior, this PR modifies the ClickhouseConnector of Flow to - check whether the database engine supports the `EXCHANGE TABLES` command (https://clickhouse.com/docs/en/sql-reference/statements/exchange) - If it does and the table already existed, runs `EXCHANGE TABLES` followed by a `DROP` which atomically swaps and therefore keeps the dictionary references - If it doesnt, there's a fallback to the old method. Currently, this will work with the Atomic() database engine. --------- Co-authored-by: Philip Dubé --- flow/connectors/clickhouse/cdc.go | 69 +++++++++++++++++-------------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/flow/connectors/clickhouse/cdc.go b/flow/connectors/clickhouse/cdc.go index 1c0b651ad0..ad47a9dfed 100644 --- a/flow/connectors/clickhouse/cdc.go +++ b/flow/connectors/clickhouse/cdc.go @@ -8,7 +8,7 @@ import ( "log/slog" "strings" - _ "github.com/ClickHouse/clickhouse-go/v2" + "github.com/ClickHouse/clickhouse-go/v2" _ "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "github.com/PeerDB-io/peer-flow/connectors/utils" @@ -93,8 +93,7 @@ func (c *ClickHouseConnector) syncRecordsViaAvro( return nil, err } - err = c.ReplayTableSchemaDeltas(ctx, req.FlowJobName, req.Records.SchemaDeltas) - if err != nil { + if err := c.ReplayTableSchemaDeltas(ctx, req.FlowJobName, req.Records.SchemaDeltas); err != nil { return nil, fmt.Errorf("failed to sync schema changes: %w", err) } @@ -113,8 +112,7 @@ func (c *ClickHouseConnector) SyncRecords(ctx context.Context, req *model.SyncRe return nil, err } - err = c.FinishBatch(ctx, req.FlowJobName, req.SyncBatchID, res.LastSyncedCheckpointID) - if err != nil { + if err := c.FinishBatch(ctx, req.FlowJobName, req.SyncBatchID, res.LastSyncedCheckpointID); err != nil { c.logger.Error("failed to increment id", slog.Any("error", err)) return nil, err } @@ -137,15 +135,13 @@ func (c *ClickHouseConnector) ReplayTableSchemaDeltas(ctx context.Context, flowJ for _, addedColumn := range schemaDelta.AddedColumns { clickHouseColType, err := qvalue.QValueKind(addedColumn.Type).ToDWHColumnType(protos.DBType_CLICKHOUSE) if err != nil { - return fmt.Errorf("failed to convert column type %s to ClickHouse type: %w", - addedColumn.Type, err) + return fmt.Errorf("failed to convert column type %s to ClickHouse type: %w", addedColumn.Type, err) } err = c.execWithLogging(ctx, fmt.Sprintf("ALTER TABLE %s ADD COLUMN IF NOT EXISTS \"%s\" %s", schemaDelta.DstTableName, addedColumn.Name, clickHouseColType)) if err != nil { - return fmt.Errorf("failed to add column %s for table %s: %w", addedColumn.Name, - schemaDelta.DstTableName, err) + return fmt.Errorf("failed to add column %s for table %s: %w", addedColumn.Name, schemaDelta.DstTableName, err) } c.logger.Info(fmt.Sprintf("[schema delta replay] added column %s with data type %s", addedColumn.Name, addedColumn.Type), @@ -186,34 +182,47 @@ func (c *ClickHouseConnector) RenameTables( } allCols := strings.Join(columnNames, ",") - c.logger.Info(fmt.Sprintf("handling soft-deletes for table '%s'...", renameRequest.NewName)) - err = c.execWithLogging(ctx, - fmt.Sprintf("INSERT INTO %s(%s,%s) SELECT %s,true FROM %s WHERE %s = 1", - renameRequest.CurrentName, allCols, signColName, allCols, renameRequest.NewName, signColName)) - if err != nil { + c.logger.Info("handling soft-deletes for table before rename", slog.String("NewName", renameRequest.NewName)) + if err := c.execWithLogging(ctx, + fmt.Sprintf("INSERT INTO `%s`(%s,%s) SELECT %s,true FROM `%s` WHERE %s = 1", + renameRequest.CurrentName, allCols, signColName, allCols, renameRequest.NewName, signColName), + ); err != nil { return nil, fmt.Errorf("unable to handle soft-deletes for table %s: %w", renameRequest.NewName, err) } - } else { - c.logger.Info(fmt.Sprintf("table '%s' does not exist, skipping soft-deletes transfer for it", renameRequest.NewName)) - } - // drop the dst table if exists - err = c.execWithLogging(ctx, "DROP TABLE IF EXISTS "+renameRequest.NewName) - if err != nil { - return nil, fmt.Errorf("unable to drop table %s: %w", renameRequest.NewName, err) + // target table exists, so we can attempt to swap. In most cases, we will have Atomic engine, + // which supports a special query to exchange two tables, allowing dependent (materialized) views and dictionaries on these tables + c.logger.Info("attempting atomic exchange", + slog.String("OldName", renameRequest.CurrentName), slog.String("NewName", renameRequest.NewName)) + if err = c.execWithLogging(ctx, + fmt.Sprintf("EXCHANGE TABLES %s and %s", renameRequest.NewName, renameRequest.CurrentName), + ); err == nil { + if err := c.execWithLogging(ctx, fmt.Sprintf(dropTableIfExistsSQL, renameRequest.CurrentName)); err != nil { + return nil, fmt.Errorf("unable to drop exchanged table %s: %w", renameRequest.CurrentName, err) + } + } else if ex, ok := err.(*clickhouse.Exception); !ok || ex.Code != 48 { + // code 48 == not implemented -> move on to the fallback code, in all other error codes / types + // return, since we know/assume that the exchange would be the sensible action + return nil, fmt.Errorf("unable to exchange tables %s and %s: %w", renameRequest.NewName, renameRequest.CurrentName, err) + } } - // rename the src table to dst - err = c.execWithLogging(ctx, fmt.Sprintf("RENAME TABLE %s TO %s", - renameRequest.CurrentName, - renameRequest.NewName)) - if err != nil { - return nil, fmt.Errorf("unable to rename table %s to %s: %w", - renameRequest.CurrentName, renameRequest.NewName, err) + // either original table doesn't exist, in which case it is safe to just run rename, + // or err is set (in which case err comes from EXCHANGE TABLES) + if !originalTableExists || err != nil { + if err := c.execWithLogging(ctx, fmt.Sprintf(dropTableIfExistsSQL, renameRequest.NewName)); err != nil { + return nil, fmt.Errorf("unable to drop table %s: %w", renameRequest.NewName, err) + } + + if err := c.execWithLogging(ctx, + fmt.Sprintf("RENAME TABLE %s TO %s", renameRequest.CurrentName, renameRequest.NewName), + ); err != nil { + return nil, fmt.Errorf("unable to rename table %s to %s: %w", renameRequest.CurrentName, renameRequest.NewName, err) + } } - c.logger.Info(fmt.Sprintf("successfully renamed table '%s' to '%s'", - renameRequest.CurrentName, renameRequest.NewName)) + c.logger.Info("successfully renamed table", + slog.String("OldName", renameRequest.CurrentName), slog.String("NewName", renameRequest.NewName)) } return &protos.RenameTablesOutput{ From 1f445f821864c7b65a9d13b04d563393358c916b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Fri, 8 Nov 2024 21:46:11 +0000 Subject: [PATCH 08/59] nexus: EXECUTE peer($$query$$) (#2232) --- nexus/analyzer/src/lib.rs | 30 +++++++++++++++- nexus/catalog/src/lib.rs | 6 +++- nexus/peer-bigquery/src/lib.rs | 21 ++++++----- nexus/peer-bigquery/src/stream.rs | 4 ++- nexus/peer-cursor/src/lib.rs | 1 + nexus/peer-mysql/src/lib.rs | 6 +++- nexus/peer-postgres/src/lib.rs | 60 +++++++++++++++++-------------- nexus/peer-snowflake/src/lib.rs | 16 +++++++++ nexus/server/src/main.rs | 43 +++++++++++++--------- 9 files changed, 130 insertions(+), 57 deletions(-) diff --git a/nexus/analyzer/src/lib.rs b/nexus/analyzer/src/lib.rs index 49c05a22b4..830da627de 100644 --- a/nexus/analyzer/src/lib.rs +++ b/nexus/analyzer/src/lib.rs @@ -18,7 +18,7 @@ use qrep::process_options; use sqlparser::ast::{ self, visit_relations, visit_statements, CreateMirror::{Select, CDC}, - Expr, FetchDirection, SqlOption, Statement, + DollarQuotedString, Expr, FetchDirection, SqlOption, Statement, Value, }; mod qrep; @@ -116,6 +116,10 @@ pub enum PeerDDL { peer_name: String, if_exists: bool, }, + ExecutePeer { + peer_name: String, + query: String, + }, CreateMirrorForCDC { if_not_exists: bool, flow_job: Box, @@ -388,6 +392,30 @@ impl StatementAnalyzer for PeerDDLAnalyzer { } } } + Statement::Execute { + name, parameters, .. + } => { + if let Some(Expr::Value(query)) = parameters.first() { + if let Some(query) = match query { + Value::DoubleQuotedString(query) + | Value::SingleQuotedString(query) + | Value::EscapedStringLiteral(query) => Some(query.clone()), + Value::DollarQuotedString(DollarQuotedString { value, .. }) => { + Some(value.clone()) + } + _ => None, + } { + Ok(Some(PeerDDL::ExecutePeer { + peer_name: name.to_string().to_lowercase(), + query: query.to_string(), + })) + } else { + Ok(None) + } + } else { + Ok(None) + } + } Statement::ExecuteMirror { mirror_name } => Ok(Some(PeerDDL::ExecuteMirrorForSelect { flow_job_name: mirror_name.to_string().to_lowercase(), })), diff --git a/nexus/catalog/src/lib.rs b/nexus/catalog/src/lib.rs index f103fce021..015c66b29d 100644 --- a/nexus/catalog/src/lib.rs +++ b/nexus/catalog/src/lib.rs @@ -201,7 +201,7 @@ impl Catalog { let stmt = self .pg .prepare_typed( - "SELECT id, name, type, options, enc_key_id FROM public.peers WHERE name = $1", + "SELECT name, type, options, enc_key_id FROM public.peers WHERE name = $1", &[], ) .await?; @@ -516,6 +516,10 @@ impl Catalog { #[async_trait::async_trait] impl QueryExecutor for Catalog { + async fn execute_raw(&self, query: &str) -> PgWireResult { + peer_postgres::pg_execute_raw(&self.pg, query).await + } + #[tracing::instrument(skip(self, stmt), fields(stmt = %stmt))] async fn execute(&self, stmt: &Statement) -> PgWireResult { peer_postgres::pg_execute(&self.pg, ast::PostgresAst { peername: None }, stmt).await diff --git a/nexus/peer-bigquery/src/lib.rs b/nexus/peer-bigquery/src/lib.rs index 4998dbf7cc..18c9b145e9 100644 --- a/nexus/peer-bigquery/src/lib.rs +++ b/nexus/peer-bigquery/src/lib.rs @@ -91,6 +91,17 @@ impl BigQueryQueryExecutor { #[async_trait::async_trait] impl QueryExecutor for BigQueryQueryExecutor { + async fn execute_raw(&self, query: &str) -> PgWireResult { + let query_response = self.run_tracked(query).await?; + let cursor = BqRecordStream::from(query_response); + tracing::info!( + "retrieved {} rows for query {}", + cursor.get_num_records(), + query + ); + Ok(QueryOutput::Stream(Box::pin(cursor))) + } + #[tracing::instrument(skip(self, stmt), fields(stmt = %stmt))] async fn execute(&self, stmt: &Statement) -> PgWireResult { // only support SELECT statements @@ -105,15 +116,7 @@ impl QueryExecutor for BigQueryQueryExecutor { let query = query.to_string(); tracing::info!("bq rewritten query: {}", query); - let query_response = self.run_tracked(&query).await?; - - let cursor = BqRecordStream::from(query_response); - tracing::info!( - "retrieved {} rows for query {}", - cursor.get_num_records(), - query - ); - Ok(QueryOutput::Stream(Box::pin(cursor))) + self.execute_raw(&query).await } Statement::Declare { stmts } => { if stmts.len() != 1 { diff --git a/nexus/peer-bigquery/src/stream.rs b/nexus/peer-bigquery/src/stream.rs index 4a7ceb58ba..76f2f7615d 100644 --- a/nexus/peer-bigquery/src/stream.rs +++ b/nexus/peer-bigquery/src/stream.rs @@ -8,7 +8,9 @@ use std::{ use chrono::DateTime; use futures::Stream; use gcp_bigquery_client::model::{ - field_type::FieldType, query_response::{ResultSet, QueryResponse}, table_field_schema::TableFieldSchema, + field_type::FieldType, + query_response::{QueryResponse, ResultSet}, + table_field_schema::TableFieldSchema, }; use peer_cursor::{Record, RecordStream, Schema}; use pgwire::{ diff --git a/nexus/peer-cursor/src/lib.rs b/nexus/peer-cursor/src/lib.rs index 3a31531f4f..306bf0dfca 100644 --- a/nexus/peer-cursor/src/lib.rs +++ b/nexus/peer-cursor/src/lib.rs @@ -46,6 +46,7 @@ pub enum QueryOutput { #[async_trait::async_trait] pub trait QueryExecutor: Send + Sync { + async fn execute_raw(&self, stmt: &str) -> PgWireResult; async fn execute(&self, stmt: &Statement) -> PgWireResult; async fn describe(&self, stmt: &Statement) -> PgWireResult>; } diff --git a/nexus/peer-mysql/src/lib.rs b/nexus/peer-mysql/src/lib.rs index 6868a53a18..831acd56d3 100644 --- a/nexus/peer-mysql/src/lib.rs +++ b/nexus/peer-mysql/src/lib.rs @@ -59,7 +59,11 @@ impl MySqlQueryExecutor { #[async_trait::async_trait] impl QueryExecutor for MySqlQueryExecutor { - // #[tracing::instrument(skip(self, stmt), fields(stmt = %stmt))] + async fn execute_raw(&self, query: &str) -> PgWireResult { + let cursor = self.query(query.to_string()).await?; + Ok(QueryOutput::Stream(Box::pin(cursor))) + } + async fn execute(&self, stmt: &Statement) -> PgWireResult { // only support SELECT statements match stmt { diff --git a/nexus/peer-postgres/src/lib.rs b/nexus/peer-postgres/src/lib.rs index cb29104d87..4e9c317d25 100644 --- a/nexus/peer-postgres/src/lib.rs +++ b/nexus/peer-postgres/src/lib.rs @@ -44,6 +44,34 @@ async fn schema_from_query(client: &Client, query: &str) -> anyhow::Result PgWireResult { + // first fetch the schema as this connection will be + // short lived, only then run the query as the query + // could hold the pin on the connection for a long time. + let schema = schema_from_query(client, query).await.map_err(|e| { + tracing::error!("error getting schema: {}", e); + PgWireError::ApiError(format!("error getting schema: {}", e).into()) + })?; + + tracing::info!("[peer-postgres] rewritten query: {}", query); + // given that there could be a lot of rows returned, we + // need to use a cursor to stream the rows back to the + // client. + let stream = client + .query_raw(query, std::iter::empty::<&str>()) + .await + .map_err(|e| { + tracing::error!("error executing query: {}", e); + PgWireError::ApiError(format!("error executing query: {}", e).into()) + })?; + + // log that raw query execution has completed + tracing::info!("[peer-postgres] raw query execution completed"); + + let cursor = stream::PgRecordStream::new(stream, schema); + Ok(QueryOutput::Stream(Box::pin(cursor))) +} + pub async fn pg_execute( client: &Client, ast: ast::PostgresAst, @@ -58,33 +86,7 @@ pub async fn pg_execute( ast.rewrite_query(&mut query); let rewritten_query = query.to_string(); - // first fetch the schema as this connection will be - // short lived, only then run the query as the query - // could hold the pin on the connection for a long time. - let schema = schema_from_query(client, &rewritten_query) - .await - .map_err(|e| { - tracing::error!("error getting schema: {}", e); - PgWireError::ApiError(format!("error getting schema: {}", e).into()) - })?; - - tracing::info!("[peer-postgres] rewritten query: {}", rewritten_query); - // given that there could be a lot of rows returned, we - // need to use a cursor to stream the rows back to the - // client. - let stream = client - .query_raw(&rewritten_query, std::iter::empty::<&str>()) - .await - .map_err(|e| { - tracing::error!("error executing query: {}", e); - PgWireError::ApiError(format!("error executing query: {}", e).into()) - })?; - - // log that raw query execution has completed - tracing::info!("[peer-postgres] raw query execution completed"); - - let cursor = stream::PgRecordStream::new(stream, schema); - Ok(QueryOutput::Stream(Box::pin(cursor))) + pg_execute_raw(client, &rewritten_query).await } _ => { let mut rewritten_stmt = stmt.clone(); @@ -120,6 +122,10 @@ pub async fn pg_describe(client: &Client, stmt: &Statement) -> PgWireResult PgWireResult { + pg_execute_raw(&self.client, query).await + } + #[tracing::instrument(skip(self, stmt), fields(stmt = %stmt))] async fn execute(&self, stmt: &Statement) -> PgWireResult { pg_execute( diff --git a/nexus/peer-snowflake/src/lib.rs b/nexus/peer-snowflake/src/lib.rs index e0af65b58c..7905be7245 100644 --- a/nexus/peer-snowflake/src/lib.rs +++ b/nexus/peer-snowflake/src/lib.rs @@ -292,6 +292,22 @@ impl SnowflakeQueryExecutor { #[async_trait::async_trait] impl QueryExecutor for SnowflakeQueryExecutor { + async fn execute_raw(&self, query: &str) -> PgWireResult { + let result_set = self + .process_query(query) + .await + .map_err(|err| PgWireError::ApiError(err.into()))?; + + let cursor = stream::SnowflakeRecordStream::new( + result_set, + self.partition_index, + self.partition_number, + self.endpoint_url.clone(), + self.auth.clone(), + ); + Ok(QueryOutput::Stream(Box::pin(cursor))) + } + #[tracing::instrument(skip(self, stmt), fields(stmt = %stmt))] async fn execute(&self, stmt: &Statement) -> PgWireResult { match stmt { diff --git a/nexus/server/src/main.rs b/nexus/server/src/main.rs index e1207066ce..ca082d6de2 100644 --- a/nexus/server/src/main.rs +++ b/nexus/server/src/main.rs @@ -105,14 +105,12 @@ impl NexusBackend { } // execute a statement on a peer - async fn execute_statement<'a>( + async fn process_execution<'a>( &self, - executor: &dyn QueryExecutor, - stmt: &sqlparser::ast::Statement, + result: QueryOutput, peer_holder: Option>, ) -> PgWireResult>> { - let res = executor.execute(stmt).await?; - match res { + match result { QueryOutput::AffectedRows(rows) => { Ok(vec![Response::Execution(Tag::new("OK").with_rows(rows))]) } @@ -413,6 +411,20 @@ impl NexusBackend { )))) } } + PeerDDL::ExecutePeer { peer_name, query } => { + let peer = self.catalog.get_peer(peer_name).await.map_err(|err| { + PgWireError::ApiError( + format!("unable to get peer config: {:?}", err).into(), + ) + })?; + let executor = self.get_peer_executor(&peer).await.map_err(|err| { + PgWireError::ApiError( + format!("unable to get peer executor: {:?}", err).into(), + ) + })?; + let res = executor.execute_raw(query).await?; + self.process_execution(res, Some(Box::new(peer))).await + } PeerDDL::DropMirror { .. } => self.handle_drop_mirror(&nexus_stmt).await, PeerDDL::DropPeer { if_exists, @@ -578,14 +590,8 @@ impl NexusBackend { } }; - let res = self - .execute_statement(executor.as_ref(), &stmt, peer_holder) - .await; - // log the error if execution failed - if let Err(err) = &res { - tracing::error!("query execution failed: {:?}", err); - } - res + let res = executor.execute(&stmt).await?; + self.process_execution(res, peer_holder).await } NexusStatement::PeerCursor { stmt, cursor } => { @@ -606,12 +612,13 @@ impl NexusBackend { } }; - self.execute_statement(executor.as_ref(), &stmt, None).await + let res = executor.execute(&stmt).await?; + self.process_execution(res, None).await } NexusStatement::Rollback { stmt } => { - self.execute_statement(self.catalog.as_ref(), &stmt, None) - .await + let res = self.catalog.execute(&stmt).await?; + self.process_execution(res, None).await } NexusStatement::Empty => Ok(vec![Response::EmptyQuery]), @@ -1105,7 +1112,9 @@ pub async fn main() -> anyhow::Result<()> { let catalog_config = get_catalog_config(&args).await?; if args.migrations_disabled && args.migrations_only { - return Err(anyhow::anyhow!("Invalid configuration, migrations cannot be enabled and disabled at the same time")); + return Err(anyhow::anyhow!( + "Invalid configuration, migrations cannot be enabled and disabled at the same time" + )); } if !args.migrations_disabled { From 5f431bbc15eebe2cfbc20f5fec971b1123985030 Mon Sep 17 00:00:00 2001 From: Kevin Biju <52661649+heavycrystal@users.noreply.github.com> Date: Sat, 9 Nov 2024 04:15:57 +0530 Subject: [PATCH 09/59] EXCHANGE: quote table names (#2233) --- flow/connectors/clickhouse/cdc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/connectors/clickhouse/cdc.go b/flow/connectors/clickhouse/cdc.go index ad47a9dfed..4792321ae3 100644 --- a/flow/connectors/clickhouse/cdc.go +++ b/flow/connectors/clickhouse/cdc.go @@ -195,7 +195,7 @@ func (c *ClickHouseConnector) RenameTables( c.logger.Info("attempting atomic exchange", slog.String("OldName", renameRequest.CurrentName), slog.String("NewName", renameRequest.NewName)) if err = c.execWithLogging(ctx, - fmt.Sprintf("EXCHANGE TABLES %s and %s", renameRequest.NewName, renameRequest.CurrentName), + fmt.Sprintf("EXCHANGE TABLES `%s` and `%s`", renameRequest.NewName, renameRequest.CurrentName), ); err == nil { if err := c.execWithLogging(ctx, fmt.Sprintf(dropTableIfExistsSQL, renameRequest.CurrentName)); err != nil { return nil, fmt.Errorf("unable to drop exchanged table %s: %w", renameRequest.CurrentName, err) From e05ce0724f076040c592533f5cc04f372cece0b4 Mon Sep 17 00:00:00 2001 From: Kevin Biju <52661649+heavycrystal@users.noreply.github.com> Date: Sat, 9 Nov 2024 04:26:17 +0530 Subject: [PATCH 10/59] quote table names for drop,fallback rename (#2235) --- flow/connectors/clickhouse/cdc.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/connectors/clickhouse/cdc.go b/flow/connectors/clickhouse/cdc.go index 4792321ae3..97b51a4903 100644 --- a/flow/connectors/clickhouse/cdc.go +++ b/flow/connectors/clickhouse/cdc.go @@ -20,7 +20,7 @@ import ( const ( checkIfTableExistsSQL = `SELECT exists(SELECT 1 FROM system.tables WHERE database = ? AND name = ?) AS table_exists;` - dropTableIfExistsSQL = `DROP TABLE IF EXISTS %s;` + dropTableIfExistsSQL = "DROP TABLE IF EXISTS `%s`;" ) // getRawTableName returns the raw table name for the given table identifier. @@ -215,7 +215,7 @@ func (c *ClickHouseConnector) RenameTables( } if err := c.execWithLogging(ctx, - fmt.Sprintf("RENAME TABLE %s TO %s", renameRequest.CurrentName, renameRequest.NewName), + fmt.Sprintf("RENAME TABLE `%s` TO `%s`", renameRequest.CurrentName, renameRequest.NewName), ); err != nil { return nil, fmt.Errorf("unable to rename table %s to %s: %w", renameRequest.CurrentName, renameRequest.NewName, err) } From 0fa358bfcc2c254c4d2bb0ca4fa8da8e6b17bfd0 Mon Sep 17 00:00:00 2001 From: Kaushik Iska Date: Fri, 8 Nov 2024 21:03:46 -0600 Subject: [PATCH 11/59] quote table names (#2234) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Philip Dubé --- flow/connectors/clickhouse/cdc.go | 2 +- flow/connectors/clickhouse/clickhouse.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/connectors/clickhouse/cdc.go b/flow/connectors/clickhouse/cdc.go index 97b51a4903..3e002f5028 100644 --- a/flow/connectors/clickhouse/cdc.go +++ b/flow/connectors/clickhouse/cdc.go @@ -138,7 +138,7 @@ func (c *ClickHouseConnector) ReplayTableSchemaDeltas(ctx context.Context, flowJ return fmt.Errorf("failed to convert column type %s to ClickHouse type: %w", addedColumn.Type, err) } err = c.execWithLogging(ctx, - fmt.Sprintf("ALTER TABLE %s ADD COLUMN IF NOT EXISTS \"%s\" %s", + fmt.Sprintf("ALTER TABLE `%s` ADD COLUMN IF NOT EXISTS `%s` %s", schemaDelta.DstTableName, addedColumn.Name, clickHouseColType)) if err != nil { return fmt.Errorf("failed to add column %s for table %s: %w", addedColumn.Name, schemaDelta.DstTableName, err) diff --git a/flow/connectors/clickhouse/clickhouse.go b/flow/connectors/clickhouse/clickhouse.go index 1c7d110e37..8bb5504495 100644 --- a/flow/connectors/clickhouse/clickhouse.go +++ b/flow/connectors/clickhouse/clickhouse.go @@ -92,21 +92,21 @@ func (c *ClickHouseConnector) ValidateCheck(ctx context.Context) error { // add a column if err := c.exec(ctx, - fmt.Sprintf("ALTER TABLE %s ADD COLUMN updated_at DateTime64(9) DEFAULT now64()", validateDummyTableName), + fmt.Sprintf("ALTER TABLE `%s` ADD COLUMN updated_at DateTime64(9) DEFAULT now64()", validateDummyTableName), ); err != nil { return fmt.Errorf("failed to add column to validation table %s: %w", validateDummyTableName, err) } // rename the table if err := c.exec(ctx, - fmt.Sprintf("RENAME TABLE %s TO %s", validateDummyTableName, validateDummyTableName+"_renamed"), + fmt.Sprintf("RENAME TABLE `%s` TO `%s`", validateDummyTableName, validateDummyTableName+"_renamed"), ); err != nil { return fmt.Errorf("failed to rename validation table %s: %w", validateDummyTableName, err) } validateDummyTableName += "_renamed" // insert a row - if err := c.exec(ctx, fmt.Sprintf("INSERT INTO %s VALUES (1, now64())", validateDummyTableName)); err != nil { + if err := c.exec(ctx, fmt.Sprintf("INSERT INTO `%s` VALUES (1, now64())", validateDummyTableName)); err != nil { return fmt.Errorf("failed to insert into validation table %s: %w", validateDummyTableName, err) } From 2dff106c02f2fafcb84f07c8cb21304ad601b9ae Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Tue, 12 Nov 2024 04:59:55 +0530 Subject: [PATCH 12/59] chore(renovate): ignore mysql_async --- renovate.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/renovate.json b/renovate.json index cf219fa4bb..e053c6ed54 100644 --- a/renovate.json +++ b/renovate.json @@ -17,6 +17,11 @@ } ], "separateMajorMinor": false + }, + { + "matchPackageNames": ["mysql_async"], + "matchManagers": ["cargo"], + "enabled": false } ], "vulnerabilityAlerts": { From e3168da63e2b5d71c440a7e4cf823c9a54226026 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 13:01:35 +0000 Subject: [PATCH 13/59] chore(deps): pin dependencies (#2245) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | alpine | final | pinDigest | -> `1e42bbe` | | [docker/dockerfile](https://redirect.github.com/moby/buildkit) | syntax | minor | `1.2` -> `1.11` | | [docker/dockerfile](https://redirect.github.com/moby/buildkit) | syntax | pinDigest | -> `865e5dd` | | golang | stage | pinDigest | -> `9f68de8` | | lukemathwalker/cargo-chef | stage | pinDigest | -> `9ba204a` | | [node](https://redirect.github.com/nodejs/node) | final | pinDigest | -> `dc8ba2f` | --- ### Configuration 📅 **Schedule**: Branch creation - "after 5pm on monday" in timezone Etc/UTC, Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR is behind base branch, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://redirect.github.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/PeerDB-io/peerdb). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- stacks/flow.Dockerfile | 6 +++--- stacks/peerdb-server.Dockerfile | 6 +++--- stacks/peerdb-ui.Dockerfile | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/stacks/flow.Dockerfile b/stacks/flow.Dockerfile index de9dbb0d5e..0f997777e9 100644 --- a/stacks/flow.Dockerfile +++ b/stacks/flow.Dockerfile @@ -1,6 +1,6 @@ -# syntax=docker/dockerfile:1.2 +# syntax=docker/dockerfile:1.11@sha256:10c699f1b6c8bdc8f6b4ce8974855dd8542f1768c26eb240237b8f1c9c6c9976 -FROM golang:1.23-alpine AS builder +FROM golang:1.23-alpine@sha256:9f68de83bef9e75cda99597d51778f4f5776ab8d9374e1094a3cd724401094c3 AS builder RUN apk add --no-cache gcc geos-dev musl-dev WORKDIR /root/flow @@ -18,7 +18,7 @@ WORKDIR /root/flow ENV CGO_ENABLED=1 RUN go build -ldflags="-s -w" -o /root/peer-flow -FROM alpine:3.20 AS flow-base +FROM alpine:3.20@sha256:1e42bbe2508154c9126d48c2b8a75420c3544343bf86fd041fb7527e017a4b4a AS flow-base RUN apk add --no-cache ca-certificates geos && \ adduser -s /bin/sh -D peerdb USER peerdb diff --git a/stacks/peerdb-server.Dockerfile b/stacks/peerdb-server.Dockerfile index e30dd8bfce..c4c5a2b041 100644 --- a/stacks/peerdb-server.Dockerfile +++ b/stacks/peerdb-server.Dockerfile @@ -1,6 +1,6 @@ -# syntax=docker/dockerfile:1 +# syntax=docker/dockerfile:1@sha256:865e5dd094beca432e8c0a1d5e1c465db5f998dca4e439981029b3b81fb39ed5 -FROM lukemathwalker/cargo-chef:latest-rust-alpine3.20 as chef +FROM lukemathwalker/cargo-chef:latest-rust-alpine3.20@sha256:9ba204a79235804a3a2f41467b09e499daad8bd637c72449ba30ada4070526ff as chef WORKDIR /root FROM chef as planner @@ -21,7 +21,7 @@ COPY protos /root/protos WORKDIR /root/nexus RUN cargo build --release --bin peerdb-server -FROM alpine:3.20 +FROM alpine:3.20@sha256:1e42bbe2508154c9126d48c2b8a75420c3544343bf86fd041fb7527e017a4b4a RUN apk add --no-cache ca-certificates postgresql-client curl iputils && \ adduser -s /bin/sh -D peerdb && \ install -d -m 0755 -o peerdb /var/log/peerdb diff --git a/stacks/peerdb-ui.Dockerfile b/stacks/peerdb-ui.Dockerfile index 8f281273bf..cd99e61a5f 100644 --- a/stacks/peerdb-ui.Dockerfile +++ b/stacks/peerdb-ui.Dockerfile @@ -1,7 +1,7 @@ -# syntax=docker/dockerfile:1.2 +# syntax=docker/dockerfile:1.11@sha256:10c699f1b6c8bdc8f6b4ce8974855dd8542f1768c26eb240237b8f1c9c6c9976 # Base stage -FROM node:22-alpine AS base +FROM node:22-alpine@sha256:dc8ba2f61dd86c44e43eb25a7812ad03c5b1b224a19fc6f77e1eb9e5669f0b82 AS base ENV NPM_CONFIG_UPDATE_NOTIFIER=false RUN apk add --no-cache openssl && \ mkdir /app && \ From cea742d6c675fb86e29ebfca8aeb049a6c72e949 Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj <65964360+Amogh-Bharadwaj@users.noreply.github.com> Date: Wed, 13 Nov 2024 01:26:48 +0530 Subject: [PATCH 14/59] ClickHouse: Normalize one batch at a time (#2219) This helps with: - Memory utilisation on ClickHouse side - Observability - Debuggability --------- Co-authored-by: Kevin Biju Co-authored-by: Kevin Biju <52661649+heavycrystal@users.noreply.github.com> --- flow/connectors/clickhouse/normalize.go | 152 +++++++++++---------- flow/connectors/external_metadata/store.go | 4 +- 2 files changed, 80 insertions(+), 76 deletions(-) diff --git a/flow/connectors/clickhouse/normalize.go b/flow/connectors/clickhouse/normalize.go index d65d61e9d7..2c9796fa1a 100644 --- a/flow/connectors/clickhouse/normalize.go +++ b/flow/connectors/clickhouse/normalize.go @@ -3,9 +3,8 @@ package connclickhouse import ( "cmp" "context" - "database/sql" - "errors" "fmt" + "log/slog" "slices" "strconv" "strings" @@ -266,22 +265,78 @@ func (c *ClickHouseConnector) NormalizeRecords( return nil, fmt.Errorf("failed to copy avro stages to destination: %w", err) } - destinationTableNames, err := c.getDistinctTableNamesInBatch( - ctx, - req.FlowJobName, - req.SyncBatchID, - normBatchID, - ) + rawTbl := c.getRawTableName(req.FlowJobName) + distinctTableNamesBatchMapping, err := c.getDistinctTableNamesInBatchRange( + ctx, req.FlowJobName, req.SyncBatchID, normBatchID) if err != nil { - c.logger.Error("[clickhouse] error while getting distinct table names in batch", "error", err) - return nil, err + return nil, fmt.Errorf("failed to get distinct table names in batch range: %w", err) } - rawTbl := c.getRawTableName(req.FlowJobName) + for batchID := normBatchID + 1; batchID <= req.SyncBatchID; batchID++ { + if err := c.syncTablesInThisBatch(ctx, req, rawTbl, batchID, distinctTableNamesBatchMapping[batchID]); err != nil { + c.logger.Error("[clickhouse] error while syncing tables in this batch", slog.Any("error", err), + slog.Int64("batchID", batchID)) + return nil, err + } + + if err := c.UpdateNormalizeBatchID(ctx, req.FlowJobName, batchID); err != nil { + c.logger.Error("[clickhouse] error while updating normalize batch id", + slog.Any("error", err), + slog.Int64("batchID", batchID)) + return nil, err + } + } + + return &model.NormalizeResponse{ + Done: true, + StartBatchID: normBatchID + 1, + EndBatchID: req.SyncBatchID, + }, nil +} + +func (c *ClickHouseConnector) getDistinctTableNamesInBatchRange( + ctx context.Context, + flowJobName string, + syncBatchID int64, + normalizeBatchID int64, +) (map[int64][]string, error) { + rawTbl := c.getRawTableName(flowJobName) + + q := fmt.Sprintf( + `SELECT DISTINCT _peerdb_batch_id,groupArray(DISTINCT _peerdb_destination_table_name) + FROM %s WHERE _peerdb_batch_id>%d AND _peerdb_batch_id<=%d GROUP BY _peerdb_batch_id`, + rawTbl, normalizeBatchID, syncBatchID) + rows, err := c.query(ctx, q) + if err != nil { + return nil, fmt.Errorf("error while querying raw table for distinct table names in batch: %w", err) + } + defer rows.Close() + distinctTableNamesBatchMapping := make(map[int64][]string) + for rows.Next() { + var batchID int32 + var tableNames []string + if err := rows.Scan(&batchID, &tableNames); err != nil { + return nil, fmt.Errorf("error while scanning rows: %w", err) + } + distinctTableNamesBatchMapping[int64(batchID)] = tableNames + } + if rows.Err() != nil { + return nil, fmt.Errorf("failed to read rows: %w", err) + } + + return distinctTableNamesBatchMapping, nil +} + +func (c *ClickHouseConnector) syncTablesInThisBatch( + ctx context.Context, + req *model.NormalizeRecordsRequest, + rawTableName string, + batchID int64, + destinationTableNames []string, +) error { // model the raw table data as inserts. for _, tbl := range destinationTableNames { - // SELECT projection FROM raw_table WHERE _peerdb_batch_id > normalize_batch_id AND _peerdb_batch_id <= sync_batch_id selectQuery := strings.Builder{} selectQuery.WriteString("SELECT ") @@ -300,7 +355,7 @@ func (c *ClickHouseConnector) NormalizeRecords( enablePrimaryUpdate, err := peerdbenv.PeerDBEnableClickHousePrimaryUpdate(ctx, req.Env) if err != nil { - return nil, err + return err } projection := strings.Builder{} @@ -337,7 +392,7 @@ func (c *ClickHouseConnector) NormalizeRecords( var err error clickHouseType, err = colType.ToDWHColumnType(protos.DBType_CLICKHOUSE) if err != nil { - return nil, fmt.Errorf("error while converting column type to clickhouse type: %w", err) + return fmt.Errorf("error while converting column type to clickhouse type: %w", err) } } if (schema.NullableEnabled || columnNullableEnabled) && column.Nullable && !colType.IsArray() { @@ -396,11 +451,9 @@ func (c *ClickHouseConnector) NormalizeRecords( selectQuery.WriteString(projection.String()) selectQuery.WriteString(" FROM ") - selectQuery.WriteString(rawTbl) - selectQuery.WriteString(" WHERE _peerdb_batch_id > ") - selectQuery.WriteString(strconv.FormatInt(normBatchID, 10)) - selectQuery.WriteString(" AND _peerdb_batch_id <= ") - selectQuery.WriteString(strconv.FormatInt(req.SyncBatchID, 10)) + selectQuery.WriteString(rawTableName) + selectQuery.WriteString(" WHERE _peerdb_batch_id = ") + selectQuery.WriteString(strconv.FormatInt(batchID, 10)) selectQuery.WriteString(" AND _peerdb_destination_table_name = '") selectQuery.WriteString(tbl) selectQuery.WriteString("'") @@ -415,11 +468,9 @@ func (c *ClickHouseConnector) NormalizeRecords( selectQuery.WriteString("UNION ALL SELECT ") selectQuery.WriteString(projectionUpdate.String()) selectQuery.WriteString(" FROM ") - selectQuery.WriteString(rawTbl) - selectQuery.WriteString(" WHERE _peerdb_batch_id > ") - selectQuery.WriteString(strconv.FormatInt(normBatchID, 10)) - selectQuery.WriteString(" AND _peerdb_batch_id <= ") - selectQuery.WriteString(strconv.FormatInt(req.SyncBatchID, 10)) + selectQuery.WriteString(rawTableName) + selectQuery.WriteString(" WHERE _peerdb_batch_id = ") + selectQuery.WriteString(strconv.FormatInt(batchID, 10)) selectQuery.WriteString(" AND _peerdb_destination_table_name = '") selectQuery.WriteString(tbl) selectQuery.WriteString("' AND _peerdb_record_type = 1") @@ -435,60 +486,11 @@ func (c *ClickHouseConnector) NormalizeRecords( q := insertIntoSelectQuery.String() if err := c.execWithLogging(ctx, q); err != nil { - return nil, fmt.Errorf("error while inserting into normalized table: %w", err) + return fmt.Errorf("error while inserting into normalized table: %w", err) } } - err = c.UpdateNormalizeBatchID(ctx, req.FlowJobName, req.SyncBatchID) - if err != nil { - c.logger.Error("[clickhouse] error while updating normalize batch id", "error", err) - return nil, err - } - - return &model.NormalizeResponse{ - Done: true, - StartBatchID: normBatchID + 1, - EndBatchID: req.SyncBatchID, - }, nil -} - -func (c *ClickHouseConnector) getDistinctTableNamesInBatch( - ctx context.Context, - flowJobName string, - syncBatchID int64, - normalizeBatchID int64, -) ([]string, error) { - rawTbl := c.getRawTableName(flowJobName) - - q := fmt.Sprintf( - `SELECT DISTINCT _peerdb_destination_table_name FROM %s WHERE _peerdb_batch_id > %d AND _peerdb_batch_id <= %d`, - rawTbl, normalizeBatchID, syncBatchID) - - rows, err := c.query(ctx, q) - if err != nil { - return nil, fmt.Errorf("error while querying raw table for distinct table names in batch: %w", err) - } - defer rows.Close() - var tableNames []string - for rows.Next() { - var tableName sql.NullString - err = rows.Scan(&tableName) - if err != nil { - return nil, fmt.Errorf("error while scanning table name: %w", err) - } - - if !tableName.Valid { - return nil, errors.New("table name is not valid") - } - - tableNames = append(tableNames, tableName.String) - } - - if rows.Err() != nil { - return nil, fmt.Errorf("failed to read rows: %w", err) - } - - return tableNames, nil + return nil } func (c *ClickHouseConnector) copyAvroStageToDestination(ctx context.Context, flowJobName string, syncBatchID int64) error { diff --git a/flow/connectors/external_metadata/store.go b/flow/connectors/external_metadata/store.go index 515b622ee1..566bf4c4c7 100644 --- a/flow/connectors/external_metadata/store.go +++ b/flow/connectors/external_metadata/store.go @@ -176,7 +176,9 @@ func (p *PostgresMetadata) UpdateNormalizeBatchID(ctx context.Context, jobName s `UPDATE `+lastSyncStateTableName+ ` SET normalize_batch_id=$2 WHERE job_name=$1`, jobName, batchID) if err != nil { - p.logger.Error("failed to update normalize batch id", slog.Any("error", err)) + p.logger.Error("failed to update normalize batch id", + slog.Any("error", err), + slog.Int64("batchID", batchID)) return err } From 6c618df2921d4e215f3cca03b8c826c491bc6aeb Mon Sep 17 00:00:00 2001 From: Kevin Biju <52661649+heavycrystal@users.noreply.github.com> Date: Wed, 13 Nov 2024 22:00:53 +0530 Subject: [PATCH 15/59] Revert "ClickHouse: Normalize one batch at a time" (#2249) Reverts PeerDB-io/peerdb#2219 seems to degrade perf --- flow/connectors/clickhouse/normalize.go | 152 ++++++++++----------- flow/connectors/external_metadata/store.go | 4 +- 2 files changed, 76 insertions(+), 80 deletions(-) diff --git a/flow/connectors/clickhouse/normalize.go b/flow/connectors/clickhouse/normalize.go index 2c9796fa1a..d65d61e9d7 100644 --- a/flow/connectors/clickhouse/normalize.go +++ b/flow/connectors/clickhouse/normalize.go @@ -3,8 +3,9 @@ package connclickhouse import ( "cmp" "context" + "database/sql" + "errors" "fmt" - "log/slog" "slices" "strconv" "strings" @@ -265,78 +266,22 @@ func (c *ClickHouseConnector) NormalizeRecords( return nil, fmt.Errorf("failed to copy avro stages to destination: %w", err) } - rawTbl := c.getRawTableName(req.FlowJobName) - distinctTableNamesBatchMapping, err := c.getDistinctTableNamesInBatchRange( - ctx, req.FlowJobName, req.SyncBatchID, normBatchID) - if err != nil { - return nil, fmt.Errorf("failed to get distinct table names in batch range: %w", err) - } - - for batchID := normBatchID + 1; batchID <= req.SyncBatchID; batchID++ { - if err := c.syncTablesInThisBatch(ctx, req, rawTbl, batchID, distinctTableNamesBatchMapping[batchID]); err != nil { - c.logger.Error("[clickhouse] error while syncing tables in this batch", slog.Any("error", err), - slog.Int64("batchID", batchID)) - return nil, err - } - - if err := c.UpdateNormalizeBatchID(ctx, req.FlowJobName, batchID); err != nil { - c.logger.Error("[clickhouse] error while updating normalize batch id", - slog.Any("error", err), - slog.Int64("batchID", batchID)) - return nil, err - } - } - - return &model.NormalizeResponse{ - Done: true, - StartBatchID: normBatchID + 1, - EndBatchID: req.SyncBatchID, - }, nil -} - -func (c *ClickHouseConnector) getDistinctTableNamesInBatchRange( - ctx context.Context, - flowJobName string, - syncBatchID int64, - normalizeBatchID int64, -) (map[int64][]string, error) { - rawTbl := c.getRawTableName(flowJobName) - - q := fmt.Sprintf( - `SELECT DISTINCT _peerdb_batch_id,groupArray(DISTINCT _peerdb_destination_table_name) - FROM %s WHERE _peerdb_batch_id>%d AND _peerdb_batch_id<=%d GROUP BY _peerdb_batch_id`, - rawTbl, normalizeBatchID, syncBatchID) - - rows, err := c.query(ctx, q) + destinationTableNames, err := c.getDistinctTableNamesInBatch( + ctx, + req.FlowJobName, + req.SyncBatchID, + normBatchID, + ) if err != nil { - return nil, fmt.Errorf("error while querying raw table for distinct table names in batch: %w", err) - } - defer rows.Close() - distinctTableNamesBatchMapping := make(map[int64][]string) - for rows.Next() { - var batchID int32 - var tableNames []string - if err := rows.Scan(&batchID, &tableNames); err != nil { - return nil, fmt.Errorf("error while scanning rows: %w", err) - } - distinctTableNamesBatchMapping[int64(batchID)] = tableNames - } - if rows.Err() != nil { - return nil, fmt.Errorf("failed to read rows: %w", err) + c.logger.Error("[clickhouse] error while getting distinct table names in batch", "error", err) + return nil, err } - return distinctTableNamesBatchMapping, nil -} + rawTbl := c.getRawTableName(req.FlowJobName) -func (c *ClickHouseConnector) syncTablesInThisBatch( - ctx context.Context, - req *model.NormalizeRecordsRequest, - rawTableName string, - batchID int64, - destinationTableNames []string, -) error { // model the raw table data as inserts. for _, tbl := range destinationTableNames { + // SELECT projection FROM raw_table WHERE _peerdb_batch_id > normalize_batch_id AND _peerdb_batch_id <= sync_batch_id selectQuery := strings.Builder{} selectQuery.WriteString("SELECT ") @@ -355,7 +300,7 @@ func (c *ClickHouseConnector) syncTablesInThisBatch( enablePrimaryUpdate, err := peerdbenv.PeerDBEnableClickHousePrimaryUpdate(ctx, req.Env) if err != nil { - return err + return nil, err } projection := strings.Builder{} @@ -392,7 +337,7 @@ func (c *ClickHouseConnector) syncTablesInThisBatch( var err error clickHouseType, err = colType.ToDWHColumnType(protos.DBType_CLICKHOUSE) if err != nil { - return fmt.Errorf("error while converting column type to clickhouse type: %w", err) + return nil, fmt.Errorf("error while converting column type to clickhouse type: %w", err) } } if (schema.NullableEnabled || columnNullableEnabled) && column.Nullable && !colType.IsArray() { @@ -451,9 +396,11 @@ func (c *ClickHouseConnector) syncTablesInThisBatch( selectQuery.WriteString(projection.String()) selectQuery.WriteString(" FROM ") - selectQuery.WriteString(rawTableName) - selectQuery.WriteString(" WHERE _peerdb_batch_id = ") - selectQuery.WriteString(strconv.FormatInt(batchID, 10)) + selectQuery.WriteString(rawTbl) + selectQuery.WriteString(" WHERE _peerdb_batch_id > ") + selectQuery.WriteString(strconv.FormatInt(normBatchID, 10)) + selectQuery.WriteString(" AND _peerdb_batch_id <= ") + selectQuery.WriteString(strconv.FormatInt(req.SyncBatchID, 10)) selectQuery.WriteString(" AND _peerdb_destination_table_name = '") selectQuery.WriteString(tbl) selectQuery.WriteString("'") @@ -468,9 +415,11 @@ func (c *ClickHouseConnector) syncTablesInThisBatch( selectQuery.WriteString("UNION ALL SELECT ") selectQuery.WriteString(projectionUpdate.String()) selectQuery.WriteString(" FROM ") - selectQuery.WriteString(rawTableName) - selectQuery.WriteString(" WHERE _peerdb_batch_id = ") - selectQuery.WriteString(strconv.FormatInt(batchID, 10)) + selectQuery.WriteString(rawTbl) + selectQuery.WriteString(" WHERE _peerdb_batch_id > ") + selectQuery.WriteString(strconv.FormatInt(normBatchID, 10)) + selectQuery.WriteString(" AND _peerdb_batch_id <= ") + selectQuery.WriteString(strconv.FormatInt(req.SyncBatchID, 10)) selectQuery.WriteString(" AND _peerdb_destination_table_name = '") selectQuery.WriteString(tbl) selectQuery.WriteString("' AND _peerdb_record_type = 1") @@ -486,11 +435,60 @@ func (c *ClickHouseConnector) syncTablesInThisBatch( q := insertIntoSelectQuery.String() if err := c.execWithLogging(ctx, q); err != nil { - return fmt.Errorf("error while inserting into normalized table: %w", err) + return nil, fmt.Errorf("error while inserting into normalized table: %w", err) } } - return nil + err = c.UpdateNormalizeBatchID(ctx, req.FlowJobName, req.SyncBatchID) + if err != nil { + c.logger.Error("[clickhouse] error while updating normalize batch id", "error", err) + return nil, err + } + + return &model.NormalizeResponse{ + Done: true, + StartBatchID: normBatchID + 1, + EndBatchID: req.SyncBatchID, + }, nil +} + +func (c *ClickHouseConnector) getDistinctTableNamesInBatch( + ctx context.Context, + flowJobName string, + syncBatchID int64, + normalizeBatchID int64, +) ([]string, error) { + rawTbl := c.getRawTableName(flowJobName) + + q := fmt.Sprintf( + `SELECT DISTINCT _peerdb_destination_table_name FROM %s WHERE _peerdb_batch_id > %d AND _peerdb_batch_id <= %d`, + rawTbl, normalizeBatchID, syncBatchID) + + rows, err := c.query(ctx, q) + if err != nil { + return nil, fmt.Errorf("error while querying raw table for distinct table names in batch: %w", err) + } + defer rows.Close() + var tableNames []string + for rows.Next() { + var tableName sql.NullString + err = rows.Scan(&tableName) + if err != nil { + return nil, fmt.Errorf("error while scanning table name: %w", err) + } + + if !tableName.Valid { + return nil, errors.New("table name is not valid") + } + + tableNames = append(tableNames, tableName.String) + } + + if rows.Err() != nil { + return nil, fmt.Errorf("failed to read rows: %w", err) + } + + return tableNames, nil } func (c *ClickHouseConnector) copyAvroStageToDestination(ctx context.Context, flowJobName string, syncBatchID int64) error { diff --git a/flow/connectors/external_metadata/store.go b/flow/connectors/external_metadata/store.go index 566bf4c4c7..515b622ee1 100644 --- a/flow/connectors/external_metadata/store.go +++ b/flow/connectors/external_metadata/store.go @@ -176,9 +176,7 @@ func (p *PostgresMetadata) UpdateNormalizeBatchID(ctx context.Context, jobName s `UPDATE `+lastSyncStateTableName+ ` SET normalize_batch_id=$2 WHERE job_name=$1`, jobName, batchID) if err != nil { - p.logger.Error("failed to update normalize batch id", - slog.Any("error", err), - slog.Int64("batchID", batchID)) + p.logger.Error("failed to update normalize batch id", slog.Any("error", err)) return err } From c37b057053404cf6384db8c75027580c249de2be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 13 Nov 2024 16:40:51 +0000 Subject: [PATCH 16/59] cleanup error handling, fixes 2 bugs (#2250) Searched for `if rows.Err` & removed all instances Has tendency to typo into branch referencing `err` which code expects to be result of `rows.Err()` --- flow/connectors/clickhouse/clickhouse.go | 14 ++++++-------- flow/connectors/clickhouse/normalize.go | 8 ++++---- flow/connectors/external_metadata/store.go | 9 ++++----- flow/connectors/postgres/qrep_query_executor.go | 4 ++-- flow/connectors/snowflake/qrep.go | 14 +++++--------- flow/connectors/snowflake/snowflake.go | 6 ++---- 6 files changed, 23 insertions(+), 32 deletions(-) diff --git a/flow/connectors/clickhouse/clickhouse.go b/flow/connectors/clickhouse/clickhouse.go index 8bb5504495..46bec64fbc 100644 --- a/flow/connectors/clickhouse/clickhouse.go +++ b/flow/connectors/clickhouse/clickhouse.go @@ -381,8 +381,7 @@ func (c *ClickHouseConnector) checkTablesEmptyAndEngine(ctx context.Context, tab for rows.Next() { var tableName, engine string var totalRows uint64 - err = rows.Scan(&tableName, &engine, &totalRows) - if err != nil { + if err := rows.Scan(&tableName, &engine, &totalRows); err != nil { return fmt.Errorf("failed to scan information for tables: %w", err) } if totalRows != 0 && optedForInitialLoad { @@ -393,8 +392,8 @@ func (c *ClickHouseConnector) checkTablesEmptyAndEngine(ctx context.Context, tab slog.String("table", tableName), slog.String("engine", engine)) } } - if rows.Err() != nil { - return fmt.Errorf("failed to read rows: %w", rows.Err()) + if err := rows.Err(); err != nil { + return fmt.Errorf("failed to read rows: %w", err) } return nil } @@ -418,14 +417,13 @@ func (c *ClickHouseConnector) getTableColumnsMapping(ctx context.Context, for rows.Next() { var tableName string var fieldDescription protos.FieldDescription - err = rows.Scan(&fieldDescription.Name, &fieldDescription.Type, &tableName) - if err != nil { + if err := rows.Scan(&fieldDescription.Name, &fieldDescription.Type, &tableName); err != nil { return nil, fmt.Errorf("failed to scan columns for tables: %w", err) } tableColumnsMapping[tableName] = append(tableColumnsMapping[tableName], &fieldDescription) } - if rows.Err() != nil { - return nil, fmt.Errorf("failed to read rows: %w", rows.Err()) + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("failed to read rows: %w", err) } return tableColumnsMapping, nil } diff --git a/flow/connectors/clickhouse/normalize.go b/flow/connectors/clickhouse/normalize.go index d65d61e9d7..4474c6118d 100644 --- a/flow/connectors/clickhouse/normalize.go +++ b/flow/connectors/clickhouse/normalize.go @@ -6,6 +6,7 @@ import ( "database/sql" "errors" "fmt" + "log/slog" "slices" "strconv" "strings" @@ -441,7 +442,7 @@ func (c *ClickHouseConnector) NormalizeRecords( err = c.UpdateNormalizeBatchID(ctx, req.FlowJobName, req.SyncBatchID) if err != nil { - c.logger.Error("[clickhouse] error while updating normalize batch id", "error", err) + c.logger.Error("[clickhouse] error while updating normalize batch id", slog.Int64("BatchID", req.SyncBatchID), slog.Any("error", err)) return nil, err } @@ -472,8 +473,7 @@ func (c *ClickHouseConnector) getDistinctTableNamesInBatch( var tableNames []string for rows.Next() { var tableName sql.NullString - err = rows.Scan(&tableName) - if err != nil { + if err := rows.Scan(&tableName); err != nil { return nil, fmt.Errorf("error while scanning table name: %w", err) } @@ -484,7 +484,7 @@ func (c *ClickHouseConnector) getDistinctTableNamesInBatch( tableNames = append(tableNames, tableName.String) } - if rows.Err() != nil { + if err := rows.Err(); err != nil { return nil, fmt.Errorf("failed to read rows: %w", err) } diff --git a/flow/connectors/external_metadata/store.go b/flow/connectors/external_metadata/store.go index 515b622ee1..f253bf2288 100644 --- a/flow/connectors/external_metadata/store.go +++ b/flow/connectors/external_metadata/store.go @@ -172,11 +172,10 @@ func (p *PostgresMetadata) FinishBatch(ctx context.Context, jobName string, sync func (p *PostgresMetadata) UpdateNormalizeBatchID(ctx context.Context, jobName string, batchID int64) error { p.logger.Info("updating normalize batch id for job", slog.Int64("batchID", batchID)) - _, err := p.pool.Exec(ctx, - `UPDATE `+lastSyncStateTableName+ - ` SET normalize_batch_id=$2 WHERE job_name=$1`, jobName, batchID) - if err != nil { - p.logger.Error("failed to update normalize batch id", slog.Any("error", err)) + if _, err := p.pool.Exec(ctx, + `UPDATE `+lastSyncStateTableName+` SET normalize_batch_id=$2 WHERE job_name=$1`, jobName, batchID, + ); err != nil { + p.logger.Error("failed to update normalize batch id", slog.Int64("batchID", batchID), slog.Any("error", err)) return err } diff --git a/flow/connectors/postgres/qrep_query_executor.go b/flow/connectors/postgres/qrep_query_executor.go index 2fa6ecd7fe..bdfa7038ba 100644 --- a/flow/connectors/postgres/qrep_query_executor.go +++ b/flow/connectors/postgres/qrep_query_executor.go @@ -115,8 +115,8 @@ func (qe *QRepQueryExecutor) ProcessRows( } // Check for any errors encountered during iteration - if rows.Err() != nil { - return nil, fmt.Errorf("row iteration failed: %w", rows.Err()) + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("row iteration failed: %w", err) } batch := &model.QRecordBatch{ diff --git a/flow/connectors/snowflake/qrep.go b/flow/connectors/snowflake/qrep.go index 96a1fa9116..5566276999 100644 --- a/flow/connectors/snowflake/qrep.go +++ b/flow/connectors/snowflake/qrep.go @@ -89,14 +89,12 @@ func (c *SnowflakeConnector) SetupQRepMetadataTables(ctx context.Context, config } stageName := c.getStageNameForJob(config.FlowJobName) - err = c.createStage(ctx, stageName, config) - if err != nil { + if err := c.createStage(ctx, stageName, config); err != nil { return err } if config.WriteMode.WriteType == protos.QRepWriteType_QREP_WRITE_MODE_OVERWRITE { - _, err = c.execWithLogging(ctx, "TRUNCATE TABLE "+config.DestinationTableIdentifier) - if err != nil { + if _, err := c.execWithLogging(ctx, "TRUNCATE TABLE "+config.DestinationTableIdentifier); err != nil { return fmt.Errorf("failed to TRUNCATE table before query replication: %w", err) } } @@ -224,8 +222,7 @@ func (c *SnowflakeConnector) getColsFromTable(ctx context.Context, tableName str }) } - err = rows.Err() - if err != nil { + if err := rows.Err(); err != nil { return nil, fmt.Errorf("failed to read rows: %w", err) } @@ -280,11 +277,10 @@ func (c *SnowflakeConnector) dropStage(ctx context.Context, stagingPath string, } for _, object := range page.Contents { - _, err = s3svc.DeleteObject(ctx, &s3.DeleteObjectInput{ + if _, err := s3svc.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: aws.String(s3o.Bucket), Key: object.Key, - }) - if err != nil { + }); err != nil { c.logger.Error("failed to delete objects from bucket", slog.Any("error", err)) return fmt.Errorf("failed to delete objects from bucket: %w", err) } diff --git a/flow/connectors/snowflake/snowflake.go b/flow/connectors/snowflake/snowflake.go index 124a5c65a8..7a400d78a7 100644 --- a/flow/connectors/snowflake/snowflake.go +++ b/flow/connectors/snowflake/snowflake.go @@ -259,15 +259,13 @@ func (c *SnowflakeConnector) getDistinctTableNamesInBatch( var result pgtype.Text destinationTableNames := make([]string, 0) for rows.Next() { - err = rows.Scan(&result) - if err != nil { + if err := rows.Scan(&result); err != nil { return nil, fmt.Errorf("failed to read row: %w", err) } destinationTableNames = append(destinationTableNames, result.String) } - err = rows.Err() - if err != nil { + if err := rows.Err(); err != nil { return nil, fmt.Errorf("failed to read rows: %w", err) } return destinationTableNames, nil From 19d3c15e1dcd4c093e7f567bbaadd00621759a63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 13 Nov 2024 18:40:48 +0000 Subject: [PATCH 17/59] ci: replace s3 with minio (#2241) --- .github/workflows/cleanup.yml | 15 ---- .github/workflows/flow.yml | 43 ++++++++-- docker-compose-dev.yml | 2 +- docker-compose.yml | 2 +- flow/connectors/clickhouse/clickhouse.go | 16 ++-- flow/connectors/clickhouse/normalize.go | 2 +- flow/connectors/clickhouse/qrep_avro_sync.go | 45 ++++++----- flow/connectors/clickhouse/s3_stage.go | 40 +++------- flow/connectors/utils/aws.go | 82 +++++++++----------- flow/e2e/clickhouse/clickhouse.go | 9 ++- flow/e2e/s3/qrep_flow_s3_test.go | 24 ++++-- flow/e2e/s3/s3_helper.go | 60 +++++++++----- flow/e2e/snowflake/qrep_flow_sf_test.go | 2 + 13 files changed, 182 insertions(+), 160 deletions(-) diff --git a/.github/workflows/cleanup.yml b/.github/workflows/cleanup.yml index adf7e24039..5897eae7fd 100644 --- a/.github/workflows/cleanup.yml +++ b/.github/workflows/cleanup.yml @@ -54,20 +54,5 @@ jobs: run: go run main.go working-directory: ./e2e_cleanup env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_REGION: ${{ secrets.AWS_REGION }} TEST_BQ_CREDS: ${{ github.workspace }}/bq_service_account.json TEST_SF_CREDS: ${{ github.workspace }}/snowflake_creds.json - TEST_S3_CREDS: ${{ github.workspace }}/s3_creds.json - TEST_GCS_CREDS: ${{ github.workspace }}/gcs_creds.json - AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }} - AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} - AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} - AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} - ENABLE_SQLSERVER_TESTS: true - SQLSERVER_HOST: ${{ secrets.SQLSERVER_HOST }} - SQLSERVER_PORT: ${{ secrets.SQLSERVER_PORT }} - SQLSERVER_USER: ${{ secrets.SQLSERVER_USER }} - SQLSERVER_PASSWORD: ${{ secrets.SQLSERVER_PASSWORD }} - SQLSERVER_DB: ${{ secrets.SQLSERVER_DB }} diff --git a/.github/workflows/flow.yml b/.github/workflows/flow.yml index ab9366d487..2673bda3f8 100644 --- a/.github/workflows/flow.yml +++ b/.github/workflows/flow.yml @@ -31,6 +31,16 @@ jobs: discovery.type: single-node xpack.security.enabled: false xpack.security.enrollment.enabled: false + minio: + image: bitnami/minio:2024.11.7 + ports: + - 9999:9999 + env: + MINIO_ROOT_USER: minio + MINIO_ROOT_PASSWORD: miniosecret + MINIO_API_PORT_NUMBER: 9999 + AWS_EC2_METADATA_DISABLED: true + MINIO_DEFAULT_BUCKETS: peerdb steps: - uses: actions/checkout@v4 @@ -103,8 +113,20 @@ jobs: with: version: "latest" - - name: start clickhouse - uses: getsentry/action-clickhouse-in-ci@v1 + - uses: ubicloud/cache@v4 + id: cache-clickhouse + with: + path: ./clickhouse + key: ${{ runner.os }}-clickhouse + + - name: Install ClickHouse + if: steps.cache-clickhouse.outputs.cache-hit != 'true' + run: | + curl https://clickhouse.com | sh + + - name: Run ClickHouse + run: | + ./clickhouse server & - name: Install Temporal CLI uses: temporalio/setup-temporal@v0 @@ -119,9 +141,20 @@ jobs: go test -p 32 ./... -timeout 900s working-directory: ./flow env: - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - AWS_REGION: ${{ secrets.AWS_REGION }} + AWS_ENDPOINT_URL_S3: http://localhost:9999 + AWS_ACCESS_KEY_ID: minio + AWS_SECRET_ACCESS_KEY: miniosecret + AWS_REGION: us-east-1 + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID: minio + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY: miniosecret + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_REGION: us-east-1 + PEERDB_CLICKHOUSE_AWS_CREDENTIALS_AWS_ENDPOINT_URL_S3: http://localhost:9999 + PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME: peerdb + PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_ACCESS_KEY_ID: minio + PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_SECRET_ACCESS_KEY: miniosecret + PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_REGION: us-east-1 + PEERDB_SNOWFLAKE_AWS_CREDENTIALS_AWS_ENDPOINT_URL_S3: http://localhost:9999 + PEERDB_SNOWFLAKE_AWS_S3_BUCKET_NAME: peerdb TEST_BQ_CREDS: ${{ github.workspace }}/bq_service_account.json TEST_SF_CREDS: ${{ github.workspace }}/snowflake_creds.json TEST_S3_CREDS: ${{ github.workspace }}/s3_creds.json diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 4807bde8b8..7110819257 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -209,7 +209,7 @@ services: - flow-api minio: - image: minio/minio:RELEASE.2024-07-16T23-46-41Z + image: minio/minio:RELEASE.2024-11-07T00-52-20Z volumes: - minio-data:/data ports: diff --git a/docker-compose.yml b/docker-compose.yml index 393549f892..ce4a3994ad 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -184,7 +184,7 @@ services: - flow-api minio: - image: minio/minio:RELEASE.2024-07-16T23-46-41Z + image: minio/minio:RELEASE.2024-11-07T00-52-20Z restart: unless-stopped volumes: - minio-data:/data diff --git a/flow/connectors/clickhouse/clickhouse.go b/flow/connectors/clickhouse/clickhouse.go index 46bec64fbc..4e89757014 100644 --- a/flow/connectors/clickhouse/clickhouse.go +++ b/flow/connectors/clickhouse/clickhouse.go @@ -33,7 +33,6 @@ type ClickHouseConnector struct { logger log.Logger config *protos.ClickhouseConfig credsProvider *utils.ClickHouseS3Credentials - s3Stage *ClickHouseS3Stage } func ValidateS3(ctx context.Context, creds *utils.ClickHouseS3Credentials) error { @@ -153,12 +152,10 @@ func NewClickHouseConnector( } awsBucketPath := config.S3Path - if awsBucketPath == "" { deploymentUID := peerdbenv.PeerDBDeploymentUID() flowName, _ := ctx.Value(shared.FlowNameKey).(string) - bucketPathSuffix := fmt.Sprintf("%s/%s", - url.PathEscape(deploymentUID), url.PathEscape(flowName)) + bucketPathSuffix := fmt.Sprintf("%s/%s", url.PathEscape(deploymentUID), url.PathEscape(flowName)) // Fallback: Get S3 credentials from environment awsBucketName, err := peerdbenv.PeerDBClickHouseAWSS3BucketName(ctx, env) if err != nil { @@ -170,10 +167,7 @@ func NewClickHouseConnector( awsBucketPath = fmt.Sprintf("s3://%s/%s", awsBucketName, bucketPathSuffix) } - clickHouseS3CredentialsNew := utils.ClickHouseS3Credentials{ - Provider: credentialsProvider, - BucketPath: awsBucketPath, - } + credentials, err := credentialsProvider.Retrieve(ctx) if err != nil { return nil, err @@ -184,8 +178,10 @@ func NewClickHouseConnector( PostgresMetadata: pgMetadata, config: config, logger: logger, - credsProvider: &clickHouseS3CredentialsNew, - s3Stage: NewClickHouseS3Stage(), + credsProvider: &utils.ClickHouseS3Credentials{ + Provider: credentialsProvider, + BucketPath: awsBucketPath, + }, } if credentials.AWS.SessionToken != "" { diff --git a/flow/connectors/clickhouse/normalize.go b/flow/connectors/clickhouse/normalize.go index 4474c6118d..770abc7f20 100644 --- a/flow/connectors/clickhouse/normalize.go +++ b/flow/connectors/clickhouse/normalize.go @@ -493,7 +493,7 @@ func (c *ClickHouseConnector) getDistinctTableNamesInBatch( func (c *ClickHouseConnector) copyAvroStageToDestination(ctx context.Context, flowJobName string, syncBatchID int64) error { avroSyncMethod := c.avroSyncMethod(flowJobName) - avroFile, err := c.s3Stage.GetAvroStage(ctx, flowJobName, syncBatchID) + avroFile, err := GetAvroStage(ctx, flowJobName, syncBatchID) if err != nil { return fmt.Errorf("failed to get avro stage: %w", err) } diff --git a/flow/connectors/clickhouse/qrep_avro_sync.go b/flow/connectors/clickhouse/qrep_avro_sync.go index edbd0392c9..f8277e3aad 100644 --- a/flow/connectors/clickhouse/qrep_avro_sync.go +++ b/flow/connectors/clickhouse/qrep_avro_sync.go @@ -18,8 +18,8 @@ import ( ) type ClickHouseAvroSyncMethod struct { - config *protos.QRepConfig - connector *ClickHouseConnector + *ClickHouseConnector + config *protos.QRepConfig } func NewClickHouseAvroSyncMethod( @@ -27,22 +27,22 @@ func NewClickHouseAvroSyncMethod( connector *ClickHouseConnector, ) *ClickHouseAvroSyncMethod { return &ClickHouseAvroSyncMethod{ - config: config, - connector: connector, + ClickHouseConnector: connector, + config: config, } } func (s *ClickHouseAvroSyncMethod) CopyStageToDestination(ctx context.Context, avroFile *avro.AvroFile) error { - stagingPath := s.connector.credsProvider.BucketPath + stagingPath := s.credsProvider.BucketPath s3o, err := utils.NewS3BucketAndPrefix(stagingPath) if err != nil { return err } - endpoint := s.connector.credsProvider.Provider.GetEndpointURL() - region := s.connector.credsProvider.Provider.GetRegion() + endpoint := s.credsProvider.Provider.GetEndpointURL() + region := s.credsProvider.Provider.GetRegion() avroFileUrl := utils.FileURLForS3Service(endpoint, region, s3o.Bucket, avroFile.FilePath) - creds, err := s.connector.credsProvider.Provider.Retrieve(ctx) + creds, err := s.credsProvider.Provider.Retrieve(ctx) if err != nil { return err } @@ -55,7 +55,7 @@ func (s *ClickHouseAvroSyncMethod) CopyStageToDestination(ctx context.Context, a s.config.DestinationTableIdentifier, avroFileUrl, creds.AWS.AccessKeyID, creds.AWS.SecretAccessKey, sessionTokenPart) - return s.connector.database.Exec(ctx, query) + return s.database.Exec(ctx, query) } func (s *ClickHouseAvroSyncMethod) SyncRecords( @@ -67,7 +67,7 @@ func (s *ClickHouseAvroSyncMethod) SyncRecords( dstTableName := s.config.DestinationTableIdentifier schema := stream.Schema() - s.connector.logger.Info("sync function called and schema acquired", + s.logger.Info("sync function called and schema acquired", slog.String("dstTable", dstTableName)) avroSchema, err := s.getAvroSchema(dstTableName, schema) @@ -81,14 +81,13 @@ func (s *ClickHouseAvroSyncMethod) SyncRecords( return 0, err } - s.connector.logger.Info("[SyncRecords] written records to Avro file", + s.logger.Info("[SyncRecords] written records to Avro file", slog.String("dstTable", dstTableName), slog.String("avroFile", avroFile.FilePath), slog.Int("numRecords", avroFile.NumRecords), slog.Int64("syncBatchID", syncBatchID)) - err = s.connector.s3Stage.SetAvroStage(ctx, flowJobName, syncBatchID, avroFile) - if err != nil { + if err := SetAvroStage(ctx, flowJobName, syncBatchID, avroFile); err != nil { return 0, fmt.Errorf("failed to set avro stage: %w", err) } @@ -103,7 +102,7 @@ func (s *ClickHouseAvroSyncMethod) SyncQRepRecords( stream *model.QRecordStream, ) (int, error) { dstTableName := config.DestinationTableIdentifier - stagingPath := s.connector.credsProvider.BucketPath + stagingPath := s.credsProvider.BucketPath startTime := time.Now() avroSchema, err := s.getAvroSchema(dstTableName, stream.Schema()) @@ -121,13 +120,13 @@ func (s *ClickHouseAvroSyncMethod) SyncQRepRecords( return 0, err } - creds, err := s.connector.credsProvider.Provider.Retrieve(ctx) + creds, err := s.credsProvider.Provider.Retrieve(ctx) if err != nil { return 0, err } - endpoint := s.connector.credsProvider.Provider.GetEndpointURL() - region := s.connector.credsProvider.Provider.GetRegion() + endpoint := s.credsProvider.Provider.GetEndpointURL() + region := s.credsProvider.Provider.GetRegion() avroFileUrl := utils.FileURLForS3Service(endpoint, region, s3o.Bucket, avroFile.FilePath) selector := make([]string, 0, len(dstTableSchema)) for _, col := range dstTableSchema { @@ -151,13 +150,13 @@ func (s *ClickHouseAvroSyncMethod) SyncQRepRecords( config.DestinationTableIdentifier, selectorStr, selectorStr, avroFileUrl, creds.AWS.AccessKeyID, creds.AWS.SecretAccessKey, sessionTokenPart) - if err := s.connector.database.Exec(ctx, query); err != nil { - s.connector.logger.Error("Failed to insert into select for ClickHouse", slog.Any("error", err)) + if err := s.database.Exec(ctx, query); err != nil { + s.logger.Error("Failed to insert into select for ClickHouse", slog.Any("error", err)) return 0, err } - if err := s.connector.FinishQRepPartition(ctx, partition, config.FlowJobName, startTime); err != nil { - s.connector.logger.Error("Failed to finish QRep partition", slog.Any("error", err)) + if err := s.FinishQRepPartition(ctx, partition, config.FlowJobName, startTime); err != nil { + s.logger.Error("Failed to finish QRep partition", slog.Any("error", err)) return 0, err } @@ -182,7 +181,7 @@ func (s *ClickHouseAvroSyncMethod) writeToAvroFile( identifierForFile string, flowJobName string, ) (*avro.AvroFile, error) { - stagingPath := s.connector.credsProvider.BucketPath + stagingPath := s.credsProvider.BucketPath ocfWriter := avro.NewPeerDBOCFWriter(stream, avroSchema, avro.CompressZstd, protos.DBType_CLICKHOUSE) s3o, err := utils.NewS3BucketAndPrefix(stagingPath) if err != nil { @@ -191,7 +190,7 @@ func (s *ClickHouseAvroSyncMethod) writeToAvroFile( s3AvroFileKey := fmt.Sprintf("%s/%s/%s.avro.zst", s3o.Prefix, flowJobName, identifierForFile) s3AvroFileKey = strings.Trim(s3AvroFileKey, "/") - avroFile, err := ocfWriter.WriteRecordsToS3(ctx, s3o.Bucket, s3AvroFileKey, s.connector.credsProvider.Provider) + avroFile, err := ocfWriter.WriteRecordsToS3(ctx, s3o.Bucket, s3AvroFileKey, s.credsProvider.Provider) if err != nil { return nil, fmt.Errorf("failed to write records to S3: %w", err) } diff --git a/flow/connectors/clickhouse/s3_stage.go b/flow/connectors/clickhouse/s3_stage.go index b4ca7d71c2..5f5eb899a4 100644 --- a/flow/connectors/clickhouse/s3_stage.go +++ b/flow/connectors/clickhouse/s3_stage.go @@ -6,19 +6,12 @@ import ( "fmt" "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" utils "github.com/PeerDB-io/peer-flow/connectors/utils/avro" "github.com/PeerDB-io/peer-flow/peerdbenv" ) -type ClickHouseS3Stage struct{} - -func NewClickHouseS3Stage() *ClickHouseS3Stage { - return &ClickHouseS3Stage{} -} - -func (c *ClickHouseS3Stage) SetAvroStage( +func SetAvroStage( ctx context.Context, flowJobName string, syncBatchID int64, @@ -29,36 +22,36 @@ func (c *ClickHouseS3Stage) SetAvroStage( return fmt.Errorf("failed to marshal avro file: %w", err) } - conn, err := c.getConn(ctx) + conn, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) if err != nil { return fmt.Errorf("failed to get connection: %w", err) } - _, err = conn.Exec(ctx, ` + if _, err := conn.Exec(ctx, ` INSERT INTO ch_s3_stage (flow_job_name, sync_batch_id, avro_file) VALUES ($1, $2, $3) ON CONFLICT (flow_job_name, sync_batch_id) - DO UPDATE SET avro_file = $3, created_at = CURRENT_TIMESTAMP - `, flowJobName, syncBatchID, avroFileJSON) - if err != nil { + DO UPDATE SET avro_file = $3, created_at = CURRENT_TIMESTAMP`, + flowJobName, syncBatchID, avroFileJSON, + ); err != nil { return fmt.Errorf("failed to set avro stage: %w", err) } return nil } -func (c *ClickHouseS3Stage) GetAvroStage(ctx context.Context, flowJobName string, syncBatchID int64) (*utils.AvroFile, error) { - conn, err := c.getConn(ctx) +func GetAvroStage(ctx context.Context, flowJobName string, syncBatchID int64) (*utils.AvroFile, error) { + conn, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) if err != nil { return nil, fmt.Errorf("failed to get connection: %w", err) } var avroFileJSON []byte - err = conn.QueryRow(ctx, ` + if err := conn.QueryRow(ctx, ` SELECT avro_file FROM ch_s3_stage - WHERE flow_job_name = $1 AND sync_batch_id = $2 - `, flowJobName, syncBatchID).Scan(&avroFileJSON) - if err != nil { + WHERE flow_job_name = $1 AND sync_batch_id = $2`, + flowJobName, syncBatchID, + ).Scan(&avroFileJSON); err != nil { if err == pgx.ErrNoRows { return nil, fmt.Errorf("no avro stage found for flow job %s and sync batch %d", flowJobName, syncBatchID) } @@ -72,12 +65,3 @@ func (c *ClickHouseS3Stage) GetAvroStage(ctx context.Context, flowJobName string return &avroFile, nil } - -func (c *ClickHouseS3Stage) getConn(ctx context.Context) (*pgxpool.Pool, error) { - conn, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) - if err != nil { - return nil, fmt.Errorf("unable to create catalog connection pool: %w", err) - } - - return conn, nil -} diff --git a/flow/connectors/utils/aws.go b/flow/connectors/utils/aws.go index 89fcab7b22..73eab604ad 100644 --- a/flow/connectors/utils/aws.go +++ b/flow/connectors/utils/aws.go @@ -126,12 +126,10 @@ func (s *StaticAWSCredentialsProvider) Retrieve(ctx context.Context) (AWSCredent } func (s *StaticAWSCredentialsProvider) GetEndpointURL() string { - endpoint := "" if s.credentials.EndpointUrl != nil { - endpoint = *s.credentials.EndpointUrl + return *s.credentials.EndpointUrl } - - return endpoint + return "" } func NewStaticAWSCredentialsProvider(credentials AWSCredentials, region string) AWSCredentialsProvider { @@ -209,12 +207,9 @@ func GetAWSCredentialsProvider(ctx context.Context, connectorName string, peerCr } func FileURLForS3Service(endpoint string, region string, bucket string, filePath string) string { - // example: min.io local bucket or GCS - matches := s3CompatibleServiceEndpointPattern.MatchString(endpoint) - if matches { + if s3CompatibleServiceEndpointPattern.MatchString(endpoint) { return fmt.Sprintf("%s/%s/%s", endpoint, bucket, filePath) } - return fmt.Sprintf("https://%s.s3.%s.amazonaws.com/%s", bucket, region, filePath) } @@ -238,25 +233,17 @@ func NewS3BucketAndPrefix(s3Path string) (*S3BucketAndPrefix, error) { } type resolverV2 struct { - userProvidedEndpointUrl string + url.URL } func (r *resolverV2) ResolveEndpoint(ctx context.Context, params s3.EndpointParameters) ( smithyendpoints.Endpoint, error, ) { - if r.userProvidedEndpointUrl != "" { - u, err := url.Parse(r.userProvidedEndpointUrl) - if err != nil { - return smithyendpoints.Endpoint{}, err - } - - u.Path += "/" + *params.Bucket - return smithyendpoints.Endpoint{ - URI: *u, - }, nil - } - - return s3.NewDefaultEndpointResolverV2().ResolveEndpoint(ctx, params) + uri := r.URL + uri.Path += "/" + *params.Bucket + return smithyendpoints.Endpoint{ + URI: uri, + }, nil } func CreateS3Client(ctx context.Context, credsProvider AWSCredentialsProvider) (*s3.Client, error) { @@ -265,28 +252,35 @@ func CreateS3Client(ctx context.Context, credsProvider AWSCredentialsProvider) ( return nil, err } - s3Client := s3.NewFromConfig(aws.Config{}, func(options *s3.Options) { - options.Region = credsProvider.GetRegion() - options.Credentials = credsProvider.GetUnderlyingProvider() - - if awsCredentials.EndpointUrl != nil && *awsCredentials.EndpointUrl != "" { - options.BaseEndpoint = awsCredentials.EndpointUrl - options.EndpointResolverV2 = &resolverV2{ - userProvidedEndpointUrl: *awsCredentials.EndpointUrl, - } + options := s3.Options{ + Region: credsProvider.GetRegion(), + Credentials: credsProvider.GetUnderlyingProvider(), + } + if awsCredentials.EndpointUrl != nil && *awsCredentials.EndpointUrl != "" { + options.BaseEndpoint = awsCredentials.EndpointUrl + options.UsePathStyle = true + url, err := url.Parse(*awsCredentials.EndpointUrl) + if err != nil { + return nil, err + } + options.EndpointResolverV2 = &resolverV2{ + URL: *url, + } + if strings.Contains(*awsCredentials.EndpointUrl, "storage.googleapis.com") { // Assign custom client with our own transport options.HTTPClient = &http.Client{ Transport: &RecalculateV4Signature{ next: http.DefaultTransport, signer: v4.NewSigner(), credentials: credsProvider.GetUnderlyingProvider(), - region: credsProvider.GetRegion(), + region: options.Region, }, } } - }) - return s3Client, nil + } + + return s3.New(options), nil } // RecalculateV4Signature allow GCS over S3, removing Accept-Encoding header from sign @@ -314,8 +308,7 @@ func (lt *RecalculateV4Signature) RoundTrip(req *http.Request) (*http.Response, if err != nil { return nil, err } - err = lt.signer.SignHTTP(req.Context(), creds, req, v4.GetPayloadHash(req.Context()), "s3", lt.region, timeDate) - if err != nil { + if err := lt.signer.SignHTTP(req.Context(), creds, req, v4.GetPayloadHash(req.Context()), "s3", lt.region, timeDate); err != nil { return nil, err } // Reset Accept-Encoding if desired @@ -331,21 +324,20 @@ func PutAndRemoveS3(ctx context.Context, client *s3.Client, bucket string, prefi reader := strings.NewReader(time.Now().Format(time.RFC3339)) bucketName := aws.String(bucket) temporaryObjectPath := prefix + "/" + _peerDBCheck + uuid.New().String() - temporaryObjectPath = strings.TrimPrefix(temporaryObjectPath, "/") - _, putErr := client.PutObject(ctx, &s3.PutObjectInput{ + key := aws.String(strings.TrimPrefix(temporaryObjectPath, "/")) + + if _, putErr := client.PutObject(ctx, &s3.PutObjectInput{ Bucket: bucketName, - Key: aws.String(temporaryObjectPath), + Key: key, Body: reader, - }) - if putErr != nil { + }); putErr != nil { return fmt.Errorf("failed to write to bucket: %w", putErr) } - _, delErr := client.DeleteObject(ctx, &s3.DeleteObjectInput{ + if _, delErr := client.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: bucketName, - Key: aws.String(temporaryObjectPath), - }) - if delErr != nil { + Key: key, + }); delErr != nil { return fmt.Errorf("failed to delete from bucket: %w", delErr) } diff --git a/flow/e2e/clickhouse/clickhouse.go b/flow/e2e/clickhouse/clickhouse.go index e1eafd6b4b..79ff2aa7bb 100644 --- a/flow/e2e/clickhouse/clickhouse.go +++ b/flow/e2e/clickhouse/clickhouse.go @@ -56,6 +56,11 @@ func (s ClickHouseSuite) Peer() *protos.Peer { } func (s ClickHouseSuite) PeerForDatabase(dbname string) *protos.Peer { + region := "" + if s.s3Helper.S3Config.Region != nil { + region = *s.s3Helper.S3Config.Region + } + ret := &protos.Peer{ Name: e2e.AddSuffix(s, dbname), Type: protos.DBType_CLICKHOUSE, @@ -67,7 +72,7 @@ func (s ClickHouseSuite) PeerForDatabase(dbname string) *protos.Peer { S3Path: s.s3Helper.BucketName, AccessKeyId: *s.s3Helper.S3Config.AccessKeyId, SecretAccessKey: *s.s3Helper.S3Config.SecretAccessKey, - Region: *s.s3Helper.S3Config.Region, + Region: region, DisableTls: true, Endpoint: s.s3Helper.S3Config.Endpoint, }, @@ -188,7 +193,7 @@ func SetupSuite(t *testing.T) ClickHouseSuite { conn, err := e2e.SetupPostgres(t, suffix) require.NoError(t, err, "failed to setup postgres") - s3Helper, err := e2e_s3.NewS3TestHelper(false) + s3Helper, err := e2e_s3.NewS3TestHelper(e2e_s3.Minio) require.NoError(t, err, "failed to setup S3") s := ClickHouseSuite{ diff --git a/flow/e2e/s3/qrep_flow_s3_test.go b/flow/e2e/s3/qrep_flow_s3_test.go index 8148715148..c52fca7a38 100644 --- a/flow/e2e/s3/qrep_flow_s3_test.go +++ b/flow/e2e/s3/qrep_flow_s3_test.go @@ -50,6 +50,7 @@ func (s PeerFlowE2ETestSuiteS3) Peer() *protos.Peer { } func TestPeerFlowE2ETestSuiteS3(t *testing.T) { + t.Skip("skipping AWS, CI credentials revoked") // TODO fix CI e2eshared.RunSuite(t, SetupSuiteS3) } @@ -57,14 +58,16 @@ func TestPeerFlowE2ETestSuiteGCS(t *testing.T) { e2eshared.RunSuite(t, SetupSuiteGCS) } +func TestPeerFlowE2ETestSuiteMinIO(t *testing.T) { + e2eshared.RunSuite(t, SetupSuiteMinIO) +} + func (s PeerFlowE2ETestSuiteS3) setupSourceTable(tableName string, rowCount int) { - err := e2e.CreateTableForQRep(s.conn.Conn(), s.suffix, tableName) - require.NoError(s.t, err) - err = e2e.PopulateSourceTable(s.conn.Conn(), s.suffix, tableName, rowCount) - require.NoError(s.t, err) + require.NoError(s.t, e2e.CreateTableForQRep(s.conn.Conn(), s.suffix, tableName)) + require.NoError(s.t, e2e.PopulateSourceTable(s.conn.Conn(), s.suffix, tableName, rowCount)) } -func setupSuite(t *testing.T, gcs bool) PeerFlowE2ETestSuiteS3 { +func setupSuite(t *testing.T, s3environment S3Environment) PeerFlowE2ETestSuiteS3 { t.Helper() suffix := "s3_" + strings.ToLower(shared.RandomString(8)) @@ -73,7 +76,7 @@ func setupSuite(t *testing.T, gcs bool) PeerFlowE2ETestSuiteS3 { require.Fail(t, "failed to setup postgres", err) } - helper, err := NewS3TestHelper(gcs) + helper, err := NewS3TestHelper(s3environment) if err != nil { require.Fail(t, "failed to setup S3", err) } @@ -97,12 +100,17 @@ func (s PeerFlowE2ETestSuiteS3) Teardown() { func SetupSuiteS3(t *testing.T) PeerFlowE2ETestSuiteS3 { t.Helper() - return setupSuite(t, false) + return setupSuite(t, Aws) } func SetupSuiteGCS(t *testing.T) PeerFlowE2ETestSuiteS3 { t.Helper() - return setupSuite(t, true) + return setupSuite(t, Gcs) +} + +func SetupSuiteMinIO(t *testing.T) PeerFlowE2ETestSuiteS3 { + t.Helper() + return setupSuite(t, Minio) } func (s PeerFlowE2ETestSuiteS3) Test_Complete_QRep_Flow_S3() { diff --git a/flow/e2e/s3/s3_helper.go b/flow/e2e/s3/s3_helper.go index 20ac3e9039..af6be64f94 100644 --- a/flow/e2e/s3/s3_helper.go +++ b/flow/e2e/s3/s3_helper.go @@ -24,28 +24,48 @@ type S3TestHelper struct { prefix string } -func NewS3TestHelper(switchToGCS bool) (*S3TestHelper, error) { - credsPath := os.Getenv("TEST_S3_CREDS") - bucketName := "peerdb-test-bucket" - if switchToGCS { +type S3Environment int + +const ( + Aws S3Environment = iota + Gcs + Minio +) + +func NewS3TestHelper(s3environment S3Environment) (*S3TestHelper, error) { + var config utils.S3PeerCredentials + var endpoint string + var credsPath string + var bucketName string + switch s3environment { + case Aws: + credsPath = os.Getenv("TEST_S3_CREDS") + bucketName = "peerdb-test-bucket" + case Gcs: credsPath = os.Getenv("TEST_GCS_CREDS") bucketName = "peerdb_staging" + endpoint = "https://storage.googleapis.com" + case Minio: + bucketName = "peerdb" + endpoint = os.Getenv("AWS_ENDPOINT_URL_S3") + config.AccessKeyID = os.Getenv("AWS_ACCESS_KEY_ID") + config.SecretAccessKey = os.Getenv("AWS_SECRET_ACCESS_KEY") + config.Region = os.Getenv("AWS_REGION") + default: + panic(fmt.Sprintf("invalid s3environment %d", s3environment)) } - content, err := e2eshared.ReadFileToBytes(credsPath) - if err != nil { - return nil, fmt.Errorf("failed to read file: %w", err) - } + if credsPath != "" { + content, err := e2eshared.ReadFileToBytes(credsPath) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } - var config utils.S3PeerCredentials - err = json.Unmarshal(content, &config) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal json: %w", err) - } - endpoint := "" - if switchToGCS { - endpoint = "https://storage.googleapis.com" + if err := json.Unmarshal(content, &config); err != nil { + return nil, fmt.Errorf("failed to unmarshal json: %w", err) + } } + var endpointUrlPtr *string if endpoint != "" { endpointUrlPtr = &endpoint @@ -62,6 +82,7 @@ func NewS3TestHelper(switchToGCS bool) (*S3TestHelper, error) { if err != nil { return nil, err } + prefix := fmt.Sprintf("peerdb_test/%d_%s", time.Now().Unix(), shared.RandomString(6)) return &S3TestHelper{ client, @@ -106,13 +127,10 @@ func (h *S3TestHelper) CleanUp(ctx context.Context) error { // Delete each object for _, obj := range files.Contents { - deleteInput := &s3.DeleteObjectInput{ + if _, err := h.client.DeleteObject(ctx, &s3.DeleteObjectInput{ Bucket: &h.BucketName, Key: obj.Key, - } - - _, err := h.client.DeleteObject(ctx, deleteInput) - if err != nil { + }); err != nil { return err } } diff --git a/flow/e2e/snowflake/qrep_flow_sf_test.go b/flow/e2e/snowflake/qrep_flow_sf_test.go index 7fed8ada0a..0f86ce7670 100644 --- a/flow/e2e/snowflake/qrep_flow_sf_test.go +++ b/flow/e2e/snowflake/qrep_flow_sf_test.go @@ -123,6 +123,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_Upsert_Simple() } func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_S3() { + s.t.Skip("aws s3 broken in ci") // TODO fix tc := e2e.NewTemporalClient(s.t) numRows := 10 @@ -199,6 +200,7 @@ func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_Upsert_XMIN() { } func (s PeerFlowE2ETestSuiteSF) Test_Complete_QRep_Flow_Avro_SF_S3_Integration() { + s.t.Skip("aws s3 broken in ci") // TODO fix tc := e2e.NewTemporalClient(s.t) numRows := 10 From 7c4e1b23004af279ccd1ca1f5f457b31633f709d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 13 Nov 2024 18:48:15 +0000 Subject: [PATCH 18/59] e2e: resync weird names (#2237) fixes #2236 --- flow/cmd/handler.go | 28 ++++------ flow/connectors/postgres/client.go | 5 +- flow/connectors/postgres/postgres.go | 44 +++++++--------- flow/connectors/postgres/validate.go | 6 +-- flow/e2e/clickhouse/peer_flow_ch_test.go | 66 +++++++++++++++++++----- flow/e2e/congen.go | 22 +++----- flow/e2e/test_utils.go | 6 +-- 7 files changed, 96 insertions(+), 81 deletions(-) diff --git a/flow/cmd/handler.go b/flow/cmd/handler.go index 8b30331ae8..e2d1da2e39 100644 --- a/flow/cmd/handler.go +++ b/flow/cmd/handler.go @@ -408,7 +408,7 @@ func (h *FlowRequestHandler) handleCancelWorkflow(ctx context.Context, workflowI if err != nil { slog.Error(fmt.Sprintf("unable to cancel PeerFlow workflow: %s. Attempting to terminate.", err.Error())) terminationReason := fmt.Sprintf("workflow %s did not cancel in time.", workflowID) - if err = h.temporalClient.TerminateWorkflow(ctx, workflowID, runID, terminationReason); err != nil { + if err := h.temporalClient.TerminateWorkflow(ctx, workflowID, runID, terminationReason); err != nil { return fmt.Errorf("unable to terminate PeerFlow workflow: %w", err) } } @@ -456,10 +456,9 @@ func (h *FlowRequestHandler) DropPeer( } var inMirror pgtype.Int8 - queryErr := h.pool.QueryRow(ctx, - "SELECT COUNT(*) FROM flows WHERE source_peer=$1 or destination_peer=$2", - peerID, peerID).Scan(&inMirror) - if queryErr != nil { + if queryErr := h.pool.QueryRow(ctx, + "SELECT COUNT(*) FROM flows WHERE source_peer=$1 or destination_peer=$1", peerID, + ).Scan(&inMirror); queryErr != nil { return nil, fmt.Errorf("failed to check for existing mirrors with peer %s: %w", req.PeerName, queryErr) } @@ -467,8 +466,7 @@ func (h *FlowRequestHandler) DropPeer( return nil, fmt.Errorf("peer %s is currently involved in an ongoing mirror", req.PeerName) } - _, delErr := h.pool.Exec(ctx, "DELETE FROM peers WHERE name = $1", req.PeerName) - if delErr != nil { + if _, delErr := h.pool.Exec(ctx, "DELETE FROM peers WHERE name = $1", req.PeerName); delErr != nil { return nil, fmt.Errorf("failed to delete peer %s from metadata table: %w", req.PeerName, delErr) } @@ -477,9 +475,8 @@ func (h *FlowRequestHandler) DropPeer( func (h *FlowRequestHandler) getWorkflowID(ctx context.Context, flowJobName string) (string, error) { q := "SELECT workflow_id FROM flows WHERE name = $1" - row := h.pool.QueryRow(ctx, q, flowJobName) var workflowID string - if err := row.Scan(&workflowID); err != nil { + if err := h.pool.QueryRow(ctx, q, flowJobName).Scan(&workflowID); err != nil { return "", fmt.Errorf("unable to get workflowID for flow job %s: %w", flowJobName, err) } @@ -507,22 +504,19 @@ func (h *FlowRequestHandler) ResyncMirror( config.Resync = true config.DoInitialSnapshot = true // validate mirror first because once the mirror is dropped, there's no going back - _, err = h.ValidateCDCMirror(ctx, &protos.CreateCDCFlowRequest{ + if _, err := h.ValidateCDCMirror(ctx, &protos.CreateCDCFlowRequest{ ConnectionConfigs: config, - }) - if err != nil { + }); err != nil { return nil, err } - err = h.shutdownFlow(ctx, req.FlowJobName, req.DropStats) - if err != nil { + if err := h.shutdownFlow(ctx, req.FlowJobName, req.DropStats); err != nil { return nil, err } - _, err = h.CreateCDCFlow(ctx, &protos.CreateCDCFlowRequest{ + if _, err := h.CreateCDCFlow(ctx, &protos.CreateCDCFlowRequest{ ConnectionConfigs: config, - }) - if err != nil { + }); err != nil { return nil, err } return &protos.ResyncMirrorResponse{}, nil diff --git a/flow/connectors/postgres/client.go b/flow/connectors/postgres/client.go index 2d480f780f..1daabbf684 100644 --- a/flow/connectors/postgres/client.go +++ b/flow/connectors/postgres/client.go @@ -376,8 +376,7 @@ func (c *PostgresConnector) createSlotAndPublication( } srcTableNames = append(srcTableNames, parsedSrcTableName.String()) } - err := c.CreatePublication(ctx, srcTableNames, publication) - if err != nil { + if err := c.CreatePublication(ctx, srcTableNames, publication); err != nil { signal.SlotCreated <- SlotCreationResult{Err: err} return } @@ -395,7 +394,7 @@ func (c *PostgresConnector) createSlotAndPublication( c.logger.Warn(fmt.Sprintf("Creating replication slot '%s'", slot)) // THIS IS NOT IN A TX! - if _, err = conn.Exec(ctx, "SET idle_in_transaction_session_timeout=0"); err != nil { + if _, err := conn.Exec(ctx, "SET idle_in_transaction_session_timeout=0"); err != nil { signal.SlotCreated <- SlotCreationResult{Err: fmt.Errorf("[slot] error setting idle_in_transaction_session_timeout: %w", err)} return } diff --git a/flow/connectors/postgres/postgres.go b/flow/connectors/postgres/postgres.go index d0087d3beb..b3161161e1 100644 --- a/flow/connectors/postgres/postgres.go +++ b/flow/connectors/postgres/postgres.go @@ -1096,7 +1096,7 @@ func (c *PostgresConnector) SetupReplication(ctx context.Context, signal SlotSig return } - tableNameMapping := make(map[string]model.NameAndExclude) + tableNameMapping := make(map[string]model.NameAndExclude, len(req.TableNameMapping)) for k, v := range req.TableNameMapping { tableNameMapping[k] = model.NameAndExclude{ Name: v, @@ -1110,9 +1110,9 @@ func (c *PostgresConnector) SetupReplication(ctx context.Context, signal SlotSig func (c *PostgresConnector) PullFlowCleanup(ctx context.Context, jobName string) error { // Slotname would be the job name prefixed with "peerflow_slot_" slotName := "peerflow_slot_" + jobName - _, err := c.conn.Exec(ctx, `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots - WHERE slot_name=$1`, slotName) - if err != nil { + if _, err := c.conn.Exec( + ctx, `SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name=$1`, slotName, + ); err != nil { return fmt.Errorf("error dropping replication slot: %w", err) } @@ -1122,14 +1122,14 @@ func (c *PostgresConnector) PullFlowCleanup(ctx context.Context, jobName string) // as drop publication if exists requires permissions // for a publication which we did not create via peerdb user var publicationExists bool - err = c.conn.QueryRow(ctx, "SELECT EXISTS(SELECT 1 FROM pg_publication WHERE pubname=$1)", publicationName).Scan(&publicationExists) - if err != nil { + if err := c.conn.QueryRow( + ctx, "SELECT EXISTS(SELECT 1 FROM pg_publication WHERE pubname=$1)", publicationName, + ).Scan(&publicationExists); err != nil { return fmt.Errorf("error checking if publication exists: %w", err) } if publicationExists { - _, err = c.conn.Exec(ctx, "DROP PUBLICATION IF EXISTS "+publicationName) - if err != nil { + if _, err := c.conn.Exec(ctx, "DROP PUBLICATION IF EXISTS "+publicationName); err != nil { return fmt.Errorf("error dropping publication: %w", err) } } @@ -1144,9 +1144,9 @@ func (c *PostgresConnector) SyncFlowCleanup(ctx context.Context, jobName string) } defer shared.RollbackTx(syncFlowCleanupTx, c.logger) - _, err = c.execWithLoggingTx(ctx, fmt.Sprintf(dropTableIfExistsSQL, c.metadataSchema, - getRawTableIdentifier(jobName)), syncFlowCleanupTx) - if err != nil { + if _, err := c.execWithLoggingTx(ctx, + fmt.Sprintf(dropTableIfExistsSQL, c.metadataSchema, getRawTableIdentifier(jobName)), syncFlowCleanupTx, + ); err != nil { return fmt.Errorf("unable to drop raw table: %w", err) } @@ -1162,8 +1162,7 @@ func (c *PostgresConnector) SyncFlowCleanup(ctx context.Context, jobName string) } } - err = syncFlowCleanupTx.Commit(ctx) - if err != nil { + if err := syncFlowCleanupTx.Commit(ctx); err != nil { return fmt.Errorf("unable to commit transaction for sync flow cleanup: %w", err) } return nil @@ -1222,9 +1221,9 @@ func (c *PostgresConnector) HandleSlotInfo( attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) var intervalSinceLastNormalize *time.Duration - err = alerter.CatalogPool.QueryRow(ctx, "SELECT now()-max(end_time) FROM peerdb_stats.cdc_batches WHERE flow_name=$1", - alertKeys.FlowName).Scan(&intervalSinceLastNormalize) - if err != nil { + if err := alerter.CatalogPool.QueryRow( + ctx, "SELECT now()-max(end_time) FROM peerdb_stats.cdc_batches WHERE flow_name=$1", alertKeys.FlowName, + ).Scan(&intervalSinceLastNormalize); err != nil { logger.Warn("failed to get interval since last normalize", slog.Any("error", err)) } // what if the first normalize errors out/hangs? @@ -1244,12 +1243,9 @@ func (c *PostgresConnector) HandleSlotInfo( } func getOpenConnectionsForUser(ctx context.Context, conn *pgx.Conn, user string) (*protos.GetOpenConnectionsForUserResult, error) { - row := conn.QueryRow(ctx, getNumConnectionsForUser, user) - // COUNT() returns BIGINT var result pgtype.Int8 - err := row.Scan(&result) - if err != nil { + if err := conn.QueryRow(ctx, getNumConnectionsForUser, user).Scan(&result); err != nil { return nil, fmt.Errorf("error while reading result row: %w", err) } @@ -1260,12 +1256,9 @@ func getOpenConnectionsForUser(ctx context.Context, conn *pgx.Conn, user string) } func getOpenReplicationConnectionsForUser(ctx context.Context, conn *pgx.Conn, user string) (*protos.GetOpenConnectionsForUserResult, error) { - row := conn.QueryRow(ctx, getNumReplicationConnections, user) - // COUNT() returns BIGINT var result pgtype.Int8 - err := row.Scan(&result) - if err != nil { + if err := conn.QueryRow(ctx, getNumReplicationConnections, user).Scan(&result); err != nil { return nil, fmt.Errorf("error while reading result row: %w", err) } @@ -1483,8 +1476,7 @@ func (c *PostgresConnector) RemoveTableEntriesFromRawTable( func (c *PostgresConnector) GetVersion(ctx context.Context) (string, error) { var version string - err := c.conn.QueryRow(ctx, "SELECT version()").Scan(&version) - if err != nil { + if err := c.conn.QueryRow(ctx, "SELECT version()").Scan(&version); err != nil { return "", err } c.logger.Info("[postgres] version", slog.String("version", version)) diff --git a/flow/connectors/postgres/validate.go b/flow/connectors/postgres/validate.go index 2b5729f679..ca9665a317 100644 --- a/flow/connectors/postgres/validate.go +++ b/flow/connectors/postgres/validate.go @@ -145,13 +145,11 @@ func (c *PostgresConnector) CheckReplicationConnectivity(ctx context.Context) er func (c *PostgresConnector) CheckPublicationCreationPermissions(ctx context.Context, srcTableNames []string) error { pubName := "_peerdb_tmp_test_publication_" + shared.RandomString(5) - err := c.CreatePublication(ctx, srcTableNames, pubName) - if err != nil { + if err := c.CreatePublication(ctx, srcTableNames, pubName); err != nil { return err } - _, err = c.conn.Exec(ctx, "DROP PUBLICATION "+pubName) - if err != nil { + if _, err := c.conn.Exec(ctx, "DROP PUBLICATION "+pubName); err != nil { return fmt.Errorf("failed to drop publication: %v", err) } return nil diff --git a/flow/e2e/clickhouse/peer_flow_ch_test.go b/flow/e2e/clickhouse/peer_flow_ch_test.go index 3cf1f97597..8b28573104 100644 --- a/flow/e2e/clickhouse/peer_flow_ch_test.go +++ b/flow/e2e/clickhouse/peer_flow_ch_test.go @@ -11,6 +11,7 @@ import ( "github.com/shopspring/decimal" "github.com/stretchr/testify/require" + "github.com/PeerDB-io/peer-flow/connectors/clickhouse" "github.com/PeerDB-io/peer-flow/e2e" "github.com/PeerDB-io/peer-flow/e2eshared" "github.com/PeerDB-io/peer-flow/generated/protos" @@ -460,11 +461,10 @@ func (s ClickHouseSuite) Test_Replident_Full_Unchanged_TOAST_Updates() { e2e.RequireEnvCanceled(s.t, env) } -// Replicate a table called "table" and a column with hyphen in it -func (s ClickHouseSuite) Test_Weird_Table_And_Column() { - srcTableName := "table" - srcFullName := s.attachSchemaSuffix("\"table\"") - dstTableName := "table" +func (s ClickHouseSuite) WeirdTable(tableName string) { + srcTableName := tableName + srcFullName := s.attachSchemaSuffix(fmt.Sprintf("\"%s\"", tableName)) + dstTableName := tableName _, err := s.Conn().Exec(context.Background(), fmt.Sprintf(` CREATE TABLE IF NOT EXISTS %s ( @@ -474,14 +474,12 @@ func (s ClickHouseSuite) Test_Weird_Table_And_Column() { `, srcFullName)) require.NoError(s.t, err) - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s (key) VALUES ('init'); - `, srcFullName)) + _, err = s.Conn().Exec(context.Background(), fmt.Sprintf("INSERT INTO %s (key) VALUES ('init')", srcFullName)) require.NoError(s.t, err) connectionGen := e2e.FlowConnectionGenerationConfig{ - FlowJobName: s.attachSuffix("clickhouse_test_weird_table_and_column"), - TableNameMapping: map[string]string{s.attachSchemaSuffix("table"): dstTableName}, + FlowJobName: s.attachSuffix("clickhouse_test_weird_table_" + strings.ReplaceAll(tableName, "-", "_")), + TableNameMapping: map[string]string{s.attachSchemaSuffix(tableName): dstTableName}, Destination: s.Peer().Name, } flowConnConfig := connectionGen.GenerateFlowConnectionConfigs(s.t) @@ -492,15 +490,57 @@ func (s ClickHouseSuite) Test_Weird_Table_And_Column() { e2e.EnvWaitForEqualTablesWithNames(env, s, "waiting on initial", srcTableName, dstTableName, "id,key") - _, err = s.Conn().Exec(context.Background(), fmt.Sprintf(` - INSERT INTO %s (key) VALUES ('cdc'); - `, srcFullName)) + _, err = s.Conn().Exec(context.Background(), fmt.Sprintf("INSERT INTO %s (key) VALUES ('cdc')", srcFullName)) require.NoError(s.t, err) e2e.EnvWaitForEqualTablesWithNames(env, s, "waiting on cdc", srcTableName, dstTableName, "id,key") env.Cancel() e2e.RequireEnvCanceled(s.t, env) + + env = e2e.ExecuteWorkflow(tc, shared.PeerFlowTaskQueue, peerflow.DropFlowWorkflow, &protos.DropFlowInput{ + FlowJobName: flowConnConfig.FlowJobName, + DropFlowStats: false, + FlowConnectionConfigs: flowConnConfig, + }) + e2e.EnvWaitForFinished(s.t, env, 3*time.Minute) + // now test weird names with rename based resync + ch, err := connclickhouse.Connect(context.Background(), s.Peer().GetClickhouseConfig()) + require.NoError(s.t, err) + require.NoError(s.t, ch.Exec(context.Background(), fmt.Sprintf("DROP TABLE `%s`", dstTableName))) + require.NoError(s.t, ch.Close()) + flowConnConfig.Resync = true + env = e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) + e2e.SetupCDCFlowStatusQuery(s.t, env, flowConnConfig) + e2e.EnvWaitForEqualTablesWithNames(env, s, "waiting on initial", srcTableName, dstTableName, "id,key") + env.Cancel() + e2e.RequireEnvCanceled(s.t, env) + + env = e2e.ExecuteWorkflow(tc, shared.PeerFlowTaskQueue, peerflow.DropFlowWorkflow, &protos.DropFlowInput{ + FlowJobName: flowConnConfig.FlowJobName, + DropFlowStats: false, + FlowConnectionConfigs: flowConnConfig, + }) + e2e.EnvWaitForFinished(s.t, env, 3*time.Minute) + // now test weird names with exchange based resync + ch, err = connclickhouse.Connect(context.Background(), s.Peer().GetClickhouseConfig()) + require.NoError(s.t, err) + require.NoError(s.t, ch.Exec(context.Background(), fmt.Sprintf("TRUNCATE TABLE `%s`", dstTableName))) + require.NoError(s.t, ch.Close()) + env = e2e.ExecutePeerflow(tc, peerflow.CDCFlowWorkflow, flowConnConfig, nil) + e2e.SetupCDCFlowStatusQuery(s.t, env, flowConnConfig) + e2e.EnvWaitForEqualTablesWithNames(env, s, "waiting on initial", srcTableName, dstTableName, "id,key") + env.Cancel() + e2e.RequireEnvCanceled(s.t, env) +} + +func (s ClickHouseSuite) Test_WeirdTable_Keyword() { + s.WeirdTable("table") +} + +func (s ClickHouseSuite) Test_WeirdTable_Dash() { + s.t.SkipNow() // TODO fix avro errors by sanitizing names + s.WeirdTable("table-group") } // large NUMERICs (precision >76) are mapped to String on CH, test diff --git a/flow/e2e/congen.go b/flow/e2e/congen.go index bcb0bf48f8..91c5817d40 100644 --- a/flow/e2e/congen.go +++ b/flow/e2e/congen.go @@ -18,18 +18,16 @@ import ( func cleanPostgres(conn *pgx.Conn, suffix string) error { // drop the e2e_test schema with the given suffix if it exists - _, err := conn.Exec(context.Background(), fmt.Sprintf("DROP SCHEMA IF EXISTS e2e_test_%s CASCADE", suffix)) - if err != nil { + if _, err := conn.Exec(context.Background(), fmt.Sprintf("DROP SCHEMA IF EXISTS e2e_test_%s CASCADE", suffix)); err != nil { return fmt.Errorf("failed to drop e2e_test schema: %w", err) } // drop all open slots with the given suffix - _, err = conn.Exec( + if _, err := conn.Exec( context.Background(), "SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots WHERE slot_name LIKE $1", "%_"+suffix, - ) - if err != nil { + ); err != nil { return fmt.Errorf("failed to drop replication slots: %w", err) } @@ -47,8 +45,7 @@ func cleanPostgres(conn *pgx.Conn, suffix string) error { } for _, pubName := range publications { - _, err = conn.Exec(context.Background(), "DROP PUBLICATION "+pubName) - if err != nil { + if _, err := conn.Exec(context.Background(), "DROP PUBLICATION "+pubName); err != nil { return fmt.Errorf("failed to drop publication %s: %w", pubName, err) } } @@ -65,8 +62,7 @@ func setupPostgresSchema(t *testing.T, conn *pgx.Conn, suffix string) error { } // create an e2e_test schema - _, err = setupTx.Exec(context.Background(), "SELECT pg_advisory_xact_lock(hashtext('Megaton Mile'))") - if err != nil { + if _, err := setupTx.Exec(context.Background(), "SELECT pg_advisory_xact_lock(hashtext('Megaton Mile'))"); err != nil { return fmt.Errorf("failed to get lock: %w", err) } defer func() { @@ -77,12 +73,11 @@ func setupPostgresSchema(t *testing.T, conn *pgx.Conn, suffix string) error { }() // create an e2e_test schema - _, err = setupTx.Exec(context.Background(), "CREATE SCHEMA e2e_test_"+suffix) - if err != nil { + if _, err := setupTx.Exec(context.Background(), "CREATE SCHEMA e2e_test_"+suffix); err != nil { return fmt.Errorf("failed to create e2e_test schema: %w", err) } - _, err = setupTx.Exec(context.Background(), ` + if _, err := setupTx.Exec(context.Background(), ` CREATE OR REPLACE FUNCTION random_string( int ) RETURNS TEXT as $$ SELECT string_agg(substring('0123456789bcdfghjkmnpqrstvwxyz', round(random() * 30)::integer, 1), '') FROM generate_series(1, $1); @@ -95,8 +90,7 @@ func setupPostgresSchema(t *testing.T, conn *pgx.Conn, suffix string) error { LANGUAGE 'sql' VOLATILE SET search_path = 'pg_catalog'; - `) - if err != nil { + `); err != nil { return fmt.Errorf("failed to create utility functions: %w", err) } diff --git a/flow/e2e/test_utils.go b/flow/e2e/test_utils.go index 9dadc49852..ce134f819a 100644 --- a/flow/e2e/test_utils.go +++ b/flow/e2e/test_utils.go @@ -195,10 +195,9 @@ func EnvWaitForCount( func RequireEnvCanceled(t *testing.T, env WorkflowRun) { t.Helper() EnvWaitForFinished(t, env, time.Minute) - err := env.Error() var panicErr *temporal.PanicError var canceledErr *temporal.CanceledError - if err == nil { + if err := env.Error(); err == nil { t.Fatal("Expected workflow to be canceled, not completed") } else if errors.As(err, &panicErr) { t.Fatalf("Workflow panic: %s %s", panicErr.Error(), panicErr.StackTrace()) @@ -217,8 +216,7 @@ func SetupCDCFlowStatusQuery(t *testing.T, env WorkflowRun, config *protos.FlowC response, err := env.Query(shared.FlowStatusQuery, config.FlowJobName) if err == nil { var status protos.FlowStatus - err = response.Get(&status) - if err != nil { + if err := response.Get(&status); err != nil { t.Fatal(err) } else if status == protos.FlowStatus_STATUS_RUNNING { return From e885a2a3aef11d94d60c4369ce0d003daf85e295 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 19:02:38 +0000 Subject: [PATCH 19/59] fix(deps): update cargo dependencies (#2242) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [aws-config](https://redirect.github.com/smithy-lang/smithy-rs) | dependencies | patch | `1.5.9` -> `1.5.10` | | [aws-sdk-kms](https://redirect.github.com/awslabs/aws-sdk-rust) | dependencies | minor | `1.49.0` -> `1.50.0` | | [cargo-deb](https://lib.rs/crates/cargo-deb) ([source](https://redirect.github.com/kornelski/cargo-deb)) | dependencies | minor | `2.7.0` -> `2.8.0` | | [serde](https://serde.rs) ([source](https://redirect.github.com/serde-rs/serde)) | dependencies | patch | `1.0.214` -> `1.0.215` | | [tokio](https://tokio.rs) ([source](https://redirect.github.com/tokio-rs/tokio)) | dependencies | patch | `1.41.0` -> `1.41.1` | --- ### Release Notes
kornelski/cargo-deb (cargo-deb) ### [`v2.8.0`](https://redirect.github.com/kornelski/cargo-deb/blob/HEAD/CHANGELOG.md#280) [Compare Source](https://redirect.github.com/kornelski/cargo-deb/compare/v2.7.0...v2.8.0) - Don't add Vcs-\* to the binary control file, since lintian doesn't like it. - Don't generate sha256sums files, since lintian doesn't like it either.
serde-rs/serde (serde) ### [`v1.0.215`](https://redirect.github.com/serde-rs/serde/releases/tag/v1.0.215) [Compare Source](https://redirect.github.com/serde-rs/serde/compare/v1.0.214...v1.0.215) - Produce warning when multiple fields or variants have the same deserialization name ([#​2855](https://redirect.github.com/serde-rs/serde/issues/2855), [#​2856](https://redirect.github.com/serde-rs/serde/issues/2856), [#​2857](https://redirect.github.com/serde-rs/serde/issues/2857))
tokio-rs/tokio (tokio) ### [`v1.41.1`](https://redirect.github.com/tokio-rs/tokio/releases/tag/tokio-1.41.1): Tokio v1.41.1 [Compare Source](https://redirect.github.com/tokio-rs/tokio/compare/tokio-1.41.0...tokio-1.41.1) ### 1.41.1 (Nov 7th, 2024) ##### Fixed - metrics: fix bug with wrong number of buckets for the histogram ([#​6957]) - net: display `net` requirement for `net::UdpSocket` in docs ([#​6938]) - net: fix typo in `TcpStream` internal comment ([#​6944]) [#​6957]: https://redirect.github.com/tokio-rs/tokio/pull/6957 [#​6938]: https://redirect.github.com/tokio-rs/tokio/pull/6938 [#​6944]: https://redirect.github.com/tokio-rs/tokio/pull/6944
--- ### Configuration 📅 **Schedule**: Branch creation - "after 5pm on monday" in timezone Etc/UTC, Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR is behind base branch, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://redirect.github.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/PeerDB-io/peerdb). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- nexus/Cargo.lock | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/nexus/Cargo.lock b/nexus/Cargo.lock index 580ff2b99c..d48d10d36d 100644 --- a/nexus/Cargo.lock +++ b/nexus/Cargo.lock @@ -237,9 +237,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-config" -version = "1.5.9" +version = "1.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d6448cfb224dd6a9b9ac734f58622dd0d4751f3589f3b777345745f46b2eb14" +checksum = "9b49afaa341e8dd8577e1a2200468f98956d6eda50bcf4a53246cc00174ba924" dependencies = [ "aws-credential-types", "aws-runtime", @@ -304,9 +304,9 @@ dependencies = [ [[package]] name = "aws-sdk-kms" -version = "1.49.0" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4c89f1d2e0df99ccd21f98598c1e587ad78bd87ae22a74aba392b5566bb038" +checksum = "bfd059dacda4dfd5b57f2bd453fc6555f9acb496cb77508d517da24cf5d73167" dependencies = [ "aws-credential-types", "aws-runtime", @@ -326,9 +326,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ded855583fa1d22e88fe39fd6062b062376e50a8211989e07cf5e38d52eb3453" +checksum = "09677244a9da92172c8dc60109b4a9658597d4d298b188dd0018b6a66b410ca4" dependencies = [ "aws-credential-types", "aws-runtime", @@ -348,9 +348,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.49.0" +version = "1.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9177ea1192e6601ae16c7273385690d88a7ed386a00b74a6bc894d12103cd933" +checksum = "81fea2f3a8bb3bd10932ae7ad59cc59f65f270fc9183a7e91f501dc5efbef7ee" dependencies = [ "aws-credential-types", "aws-runtime", @@ -370,9 +370,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "823ef553cf36713c97453e2ddff1eb8f62be7f4523544e2a5db64caf80100f0a" +checksum = "53dcf5e7d9bd1517b8b998e170e650047cea8a2b85fe1835abe3210713e541b7" dependencies = [ "aws-credential-types", "aws-runtime", @@ -796,9 +796,9 @@ dependencies = [ [[package]] name = "cargo-deb" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acc4fba3972a758843b4908ccc47e7b376a9c2d35f05f0d8ed54b6890d0647b" +checksum = "74d0500c935971265437386796faad57064d17bf2648f3f0a7e3c8d5a631de23" dependencies = [ "ar", "cargo_toml", @@ -814,7 +814,6 @@ dependencies = [ "regex", "serde", "serde_json", - "sha2", "tar", "tempfile", "toml", @@ -2133,7 +2132,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -3820,9 +3819,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -3838,9 +3837,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", @@ -4283,9 +4282,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.41.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -4971,7 +4970,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] From 58a753f71db7551ada0db516423e79957f543a77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 13 Nov 2024 19:13:05 +0000 Subject: [PATCH 20/59] nexus: ssh (#2231) --- nexus/Cargo.lock | 187 +++++++++++++++++++-------- nexus/Cargo.toml | 2 + nexus/catalog/Cargo.toml | 2 +- nexus/catalog/src/lib.rs | 4 +- nexus/parser/Cargo.toml | 2 +- nexus/peer-bigquery/Cargo.toml | 2 +- nexus/peer-connections/Cargo.toml | 2 +- nexus/peer-cursor/Cargo.toml | 2 +- nexus/peer-mysql/Cargo.toml | 2 +- nexus/peer-postgres/Cargo.toml | 7 +- nexus/peer-postgres/src/lib.rs | 16 ++- nexus/peer-snowflake/Cargo.toml | 2 +- nexus/postgres-connection/Cargo.toml | 8 +- nexus/postgres-connection/src/lib.rs | 113 +++++++++++++--- nexus/server/Cargo.toml | 2 +- stacks/peerdb-server.Dockerfile | 3 +- 16 files changed, 263 insertions(+), 93 deletions(-) diff --git a/nexus/Cargo.lock b/nexus/Cargo.lock index d48d10d36d..cc3650b6f8 100644 --- a/nexus/Cargo.lock +++ b/nexus/Cargo.lock @@ -663,7 +663,7 @@ version = "0.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" dependencies = [ - "bitflags", + "bitflags 2.6.0", "cexpr", "clang-sys", "itertools", @@ -675,6 +675,12 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.6.0" @@ -713,9 +719,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" +checksum = "f5327f6c99920069d1fe374aa743be1af0031dea9f250852cdf1ae6a0861ee24" dependencies = [ "borsh-derive", "cfg_aliases", @@ -723,16 +729,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +checksum = "10aedd8f1a81a8aafbfde924b0e3061cd6fedd6f6bbcfc6a76e6fd426d7bfe26" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", "syn 2.0.87", - "syn_derive", ] [[package]] @@ -1127,7 +1132,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core", + "parking_lot_core 0.9.10", ] [[package]] @@ -2018,6 +2023,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + [[package]] name = "ipnet" version = "2.10.1" @@ -2121,9 +2135,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.161" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" @@ -2147,9 +2161,35 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags", + "bitflags 2.6.0", + "libc", + "redox_syscall 0.5.7", +] + +[[package]] +name = "libssh2-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dc8a030b787e2119a731f1951d6a773e2280c660f8ec4b0f5e1505a386e71ee" +dependencies = [ + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +dependencies = [ + "cc", "libc", - "redox_syscall", + "pkg-config", + "vcpkg", ] [[package]] @@ -2339,7 +2379,7 @@ checksum = "478b0ff3f7d67b79da2b96f56f334431aef65e15ba4b29dd74a4236e29582bdc" dependencies = [ "base64 0.21.7", "bindgen", - "bitflags", + "bitflags 2.6.0", "btoi", "byteorder", "bytes", @@ -2495,6 +2535,18 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-sys" +version = "0.9.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "outref" version = "0.5.1" @@ -2507,6 +2559,17 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core 0.8.6", +] + [[package]] name = "parking_lot" version = "0.12.3" @@ -2514,7 +2577,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core", + "parking_lot_core 0.9.10", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", ] [[package]] @@ -2525,7 +2602,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.7", "smallvec", "windows-targets 0.52.6", ] @@ -2685,6 +2762,7 @@ dependencies = [ "serde_bytes", "serde_json", "sqlparser", + "ssh2", "tokio", "tokio-postgres", "tracing", @@ -2964,11 +3042,15 @@ name = "postgres-connection" version = "0.1.0" dependencies = [ "anyhow", + "futures-util", "pt", "rustls 0.23.16", + "ssh2", "tokio", "tokio-postgres", "tokio-postgres-rustls", + "tokio-stream", + "tokio-util", "tracing", "urlencoding", ] @@ -3051,29 +3133,6 @@ dependencies = [ "toml_edit", ] -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro2" version = "1.0.89" @@ -3180,7 +3239,7 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" dependencies = [ - "bitflags", + "bitflags 2.6.0", "memchr", "unicase", ] @@ -3314,13 +3373,22 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_syscall" version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags", + "bitflags 2.6.0", ] [[package]] @@ -3583,7 +3651,7 @@ version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ - "bitflags", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -3794,7 +3862,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -4037,6 +4105,18 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "ssh2" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7fe461910559f6d5604c3731d00d2aafc4a83d1665922e280f42f9a168d5455" +dependencies = [ + "bitflags 1.3.2", + "libc", + "libssh2-sys", + "parking_lot 0.11.2", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -4104,18 +4184,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "syn_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "syn 2.0.87", -] - [[package]] name = "sync_wrapper" version = "0.1.2" @@ -4290,7 +4358,7 @@ dependencies = [ "bytes", "libc", "mio 1.0.2", - "parking_lot", + "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", "socket2", @@ -4322,7 +4390,7 @@ dependencies = [ "futures-channel", "futures-util", "log", - "parking_lot", + "parking_lot 0.12.3", "percent-encoding", "phf", "pin-project-lite", @@ -4401,6 +4469,7 @@ checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "tokio", @@ -4798,6 +4867,12 @@ dependencies = [ "uuid", ] +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + [[package]] name = "version_check" version = "0.9.5" @@ -4943,7 +5018,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall", + "redox_syscall 0.5.7", "wasite", "web-sys", ] diff --git a/nexus/Cargo.toml b/nexus/Cargo.toml index 1131c8fd51..6efea5f4b2 100644 --- a/nexus/Cargo.toml +++ b/nexus/Cargo.toml @@ -28,7 +28,9 @@ dashmap = "6" rust_decimal = { version = "1", default-features = false, features = [ "tokio-pg", ] } +ssh2 = "0.9" sqlparser = { git = "https://github.com/peerdb-io/sqlparser-rs.git", branch = "main" } +tokio = { version = "1", features = ["full"] } tracing = "0.1" pgwire = { version = "0.26", default-features = false, features = [ "scram", diff --git a/nexus/catalog/Cargo.toml b/nexus/catalog/Cargo.toml index 162df1da24..af7b799636 100644 --- a/nexus/catalog/Cargo.toml +++ b/nexus/catalog/Cargo.toml @@ -18,7 +18,7 @@ pt = { path = "../pt" } refinery = { version = "0.8", default-features = false, features = ["tokio-postgres"] } serde_json = "1.0" sqlparser.workspace = true -tokio = { version = "1.13.0", features = ["full"] } +tokio.workspace = true tokio-postgres = { version = "0.7.6", features = [ "with-chrono-0_4", "with-serde_json-1", diff --git a/nexus/catalog/src/lib.rs b/nexus/catalog/src/lib.rs index 015c66b29d..d5d023e571 100644 --- a/nexus/catalog/src/lib.rs +++ b/nexus/catalog/src/lib.rs @@ -72,8 +72,8 @@ impl<'a> CatalogConfig<'a> { impl Catalog { pub async fn new(pt_config: pt::peerdb_peers::PostgresConfig) -> anyhow::Result { - let client = connect_postgres(&pt_config).await?; - Ok(Self { pg: client }) + let (pg, _) = connect_postgres(&pt_config).await?; + Ok(Self { pg }) } pub async fn run_migrations(&mut self) -> anyhow::Result<()> { diff --git a/nexus/parser/Cargo.toml b/nexus/parser/Cargo.toml index b6aac7d88b..45bdc558c7 100644 --- a/nexus/parser/Cargo.toml +++ b/nexus/parser/Cargo.toml @@ -14,5 +14,5 @@ pgwire.workspace = true pt = { path = "../pt" } rand = "0.8" sqlparser.workspace = true -tokio = { version = "1", features = ["full"] } +tokio.workspace = true tracing.workspace = true diff --git a/nexus/peer-bigquery/Cargo.toml b/nexus/peer-bigquery/Cargo.toml index c10964568b..c3cf3c18b0 100644 --- a/nexus/peer-bigquery/Cargo.toml +++ b/nexus/peer-bigquery/Cargo.toml @@ -21,7 +21,7 @@ serde_json = "1.0" serde_bytes = "0.11" sqlparser.workspace = true tracing.workspace = true -tokio = { version = "1.0", features = ["full"] } +tokio.workspace = true gcp-bigquery-client = "0.24" uuid = { version = "1.0", features = ["serde", "v4"] } value = { path = "../value" } diff --git a/nexus/peer-connections/Cargo.toml b/nexus/peer-connections/Cargo.toml index 8aa69c7f05..01b172c74e 100644 --- a/nexus/peer-connections/Cargo.toml +++ b/nexus/peer-connections/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" anyhow = "1.0" chrono.workspace = true deadpool-postgres = { version = "0.14", features = ["rt_tokio_1"] } -tokio = { version = "1", features = ["full"] } +tokio.workspace = true tokio-postgres = { version = "0.7.6", features = [ "with-chrono-0_4", "with-serde_json-1", diff --git a/nexus/peer-cursor/Cargo.toml b/nexus/peer-cursor/Cargo.toml index 74a2fe9de6..3db4eecbaf 100644 --- a/nexus/peer-cursor/Cargo.toml +++ b/nexus/peer-cursor/Cargo.toml @@ -12,6 +12,6 @@ dashmap.workspace = true futures = "0.3" pgwire.workspace = true sqlparser.workspace = true -tokio = { version = "1.0", features = ["full"] } +tokio.workspace = true tracing.workspace = true value = { path = "../value" } diff --git a/nexus/peer-mysql/Cargo.toml b/nexus/peer-mysql/Cargo.toml index 2fe32d8454..a6fed50b88 100644 --- a/nexus/peer-mysql/Cargo.toml +++ b/nexus/peer-mysql/Cargo.toml @@ -22,6 +22,6 @@ serde_json = "1.0" serde_bytes = "0.11" sqlparser.workspace = true tracing.workspace = true -tokio = { version = "1.0", features = ["full"] } +tokio.workspace = true tokio-stream = "0.1" value = { path = "../value" } diff --git a/nexus/peer-postgres/Cargo.toml b/nexus/peer-postgres/Cargo.toml index 873baa2673..78b055500b 100644 --- a/nexus/peer-postgres/Cargo.toml +++ b/nexus/peer-postgres/Cargo.toml @@ -8,7 +8,6 @@ edition = "2021" [dependencies] anyhow = "1.0" async-trait = "0.1" -rust_decimal.workspace = true bytes = "1.0" chrono.workspace = true futures = "0.3" @@ -16,13 +15,15 @@ peer-cursor = { path = "../peer-cursor" } peer-connections = { path = "../peer-connections" } pgwire.workspace = true postgres-connection = { path = "../postgres-connection" } +postgres-inet = "0.19.0" pt = { path = "../pt" } +rust_decimal.workspace = true serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_bytes = "0.11" -postgres-inet = "0.19.0" +ssh2.workspace = true sqlparser.workspace = true -tokio = { version = "1.0", features = ["full"] } +tokio.workspace = true tokio-postgres = { version = "0.7.6", features = [ "with-chrono-0_4", "with-serde_json-1", diff --git a/nexus/peer-postgres/src/lib.rs b/nexus/peer-postgres/src/lib.rs index 4e9c317d25..36676618ac 100644 --- a/nexus/peer-postgres/src/lib.rs +++ b/nexus/peer-postgres/src/lib.rs @@ -16,19 +16,29 @@ pub mod stream; // backing store. pub struct PostgresQueryExecutor { peername: String, - client: Box, + client: Client, + session: Option, } impl PostgresQueryExecutor { pub async fn new(peername: String, config: &PostgresConfig) -> anyhow::Result { - let client = postgres_connection::connect_postgres(config).await?; + let (client, session) = postgres_connection::connect_postgres(config).await?; Ok(Self { peername, - client: Box::new(client), + client, + session, }) } } +impl Drop for PostgresQueryExecutor { + fn drop(&mut self) { + if let Some(session) = &mut self.session { + session.disconnect(None, "", None).ok(); + } + } +} + async fn schema_from_query(client: &Client, query: &str) -> anyhow::Result { let prepared = client.prepare_typed(query, &[]).await?; diff --git a/nexus/peer-snowflake/Cargo.toml b/nexus/peer-snowflake/Cargo.toml index e74c543181..bb33eed01d 100644 --- a/nexus/peer-snowflake/Cargo.toml +++ b/nexus/peer-snowflake/Cargo.toml @@ -25,7 +25,7 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" sha2 = "0.10" sqlparser.workspace = true -tokio = { version = "1.21", features = ["full"] } +tokio.workspace = true tracing.workspace = true ureq = { version = "2", features = ["json", "charset"] } value = { path = "../value" } diff --git a/nexus/postgres-connection/Cargo.toml b/nexus/postgres-connection/Cargo.toml index 93da581099..4d981c7c66 100644 --- a/nexus/postgres-connection/Cargo.toml +++ b/nexus/postgres-connection/Cargo.toml @@ -7,10 +7,14 @@ edition = "2021" [dependencies] anyhow = "1" +futures-util = { version = "0.3", default-features = false, features = ["io"] } pt = { path = "../pt" } rustls = { version = "0.23", default-features = false, features = ["ring"] } -urlencoding = "2" +ssh2.workspace = true +tokio.workspace = true tokio-postgres = "0.7.2" tokio-postgres-rustls = "0.13" -tokio = { version = "1", features = ["full"] } +tokio-util = { version = "0.7", features = ["compat"] } +tokio-stream = "0.1" tracing.workspace = true +urlencoding = "2" diff --git a/nexus/postgres-connection/src/lib.rs b/nexus/postgres-connection/src/lib.rs index 7b2591687a..69dafbde6e 100644 --- a/nexus/postgres-connection/src/lib.rs +++ b/nexus/postgres-connection/src/lib.rs @@ -1,9 +1,12 @@ -use pt::peerdb_peers::PostgresConfig; +use pt::peerdb_peers::{PostgresConfig, SshConfig}; use rustls::pki_types::{CertificateDer, ServerName, UnixTime}; use rustls::{ClientConfig, DigitallySignedStruct, RootCertStore, SignatureScheme}; use std::fmt::Write; +use std::io; use std::sync::Arc; +use tokio::net::UnixStream; use tokio_postgres_rustls::MakeRustlsConnect; +use tokio_util::compat::FuturesAsyncReadCompatExt; #[derive(Copy, Clone, Debug)] struct NoCertificateVerification; @@ -77,25 +80,99 @@ pub fn get_pg_connection_string(config: &PostgresConfig) -> String { connection_string } -pub async fn connect_postgres(config: &PostgresConfig) -> anyhow::Result { - let connection_string = get_pg_connection_string(config); - - let mut config = ClientConfig::builder() - .with_root_certificates(RootCertStore::empty()) - .with_no_client_auth(); - config - .dangerous() - .set_certificate_verifier(Arc::new(NoCertificateVerification)); - let tls_connector = MakeRustlsConnect::new(config); - let (client, connection) = tokio_postgres::connect(&connection_string, tls_connector) - .await - .map_err(|e| anyhow::anyhow!("error encountered while connecting to postgres {:?}", e))?; +pub async fn create_tunnel( + tcp: std::net::TcpStream, + ssh_config: &SshConfig, + remote_server: String, + remote_port: u16, +) -> io::Result<(ssh2::Session, UnixStream)> { + let mut session = ssh2::Session::new()?; + session.set_tcp_stream(tcp); + session.set_compress(true); + session.handshake()?; + if !ssh_config.password.is_empty() { + session.userauth_password(&ssh_config.user, &ssh_config.password)?; + } + if !ssh_config.private_key.is_empty() { + session.userauth_pubkey_memory(&ssh_config.user, None, &ssh_config.private_key, None)?; + } + if !ssh_config.host_key.is_empty() { + let mut known_hosts = session.known_hosts()?; + known_hosts.read_str(&ssh_config.host_key, ssh2::KnownHostFileKind::OpenSSH)?; + } + let (mut stream1, stream2) = tokio::net::UnixStream::pair()?; + let channel = session.channel_direct_tcpip(remote_server.as_str(), remote_port, None)?; + tracing::info!( + "tunnel to {:}:{:} opened", + remote_server.as_str(), + remote_port + ); - tokio::task::spawn(async move { - if let Err(e) = connection.await { - tracing::info!("connection error: {}", e) + session.set_blocking(false); + tokio::spawn(async move { + let mut channel_stream = futures_util::io::AllowStdIo::new(channel.stream(0)).compat(); + loop { + if let Err(err) = tokio::io::copy_bidirectional(&mut stream1, &mut channel_stream).await + { + if err.kind() == io::ErrorKind::WouldBlock { + tokio::time::sleep(std::time::Duration::new(0, 123456789)).await; + continue; + } + tracing::error!( + "tunnel to {:}:{:} failed: {:}", + remote_server.as_str(), + remote_port, + err + ); + } + break; } }); - Ok(client) + Ok((session, stream2)) +} + +pub async fn connect_postgres( + config: &PostgresConfig, +) -> anyhow::Result<(tokio_postgres::Client, Option)> { + if let Some(ssh_config) = &config.ssh_config { + let tcp = std::net::TcpStream::connect((ssh_config.host.as_str(), ssh_config.port as u16))?; + tcp.set_nodelay(true)?; + let (session, stream) = + create_tunnel(tcp, ssh_config, config.host.clone(), config.port as u16).await?; + let (client, connection) = tokio_postgres::Config::default() + .user(&config.user) + .password(&config.password) + .dbname(&config.database) + .application_name("peerdb_nexus") + .connect_raw(stream, tokio_postgres::NoTls) + .await?; + tokio::task::spawn(async move { + if let Err(e) = connection.await { + tracing::info!("connection error: {}", e) + } + }); + Ok((client, Some(session))) + } else { + let connection_string = get_pg_connection_string(config); + + let mut tls_config = ClientConfig::builder() + .with_root_certificates(RootCertStore::empty()) + .with_no_client_auth(); + tls_config + .dangerous() + .set_certificate_verifier(Arc::new(NoCertificateVerification)); + let tls_connector = MakeRustlsConnect::new(tls_config); + let (client, connection) = tokio_postgres::connect(&connection_string, tls_connector) + .await + .map_err(|e| { + anyhow::anyhow!("error encountered while connecting to postgres {:?}", e) + })?; + tokio::task::spawn(async move { + if let Err(e) = connection.await { + tracing::info!("connection error: {}", e) + } + }); + Ok((client, None)) + } } diff --git a/nexus/server/Cargo.toml b/nexus/server/Cargo.toml index bb0e181c42..b7f05da271 100644 --- a/nexus/server/Cargo.toml +++ b/nexus/server/Cargo.toml @@ -53,7 +53,7 @@ sqlparser = { workspace = true, features = ["visitor"] } serde_json = "1.0" rand = "0.8" time = "0.3" -tokio = { version = "1", features = ["full"] } +tokio.workspace = true tracing.workspace = true tracing-appender = "0.2" tracing-subscriber = { version = "0.3", features = ["env-filter"] } diff --git a/stacks/peerdb-server.Dockerfile b/stacks/peerdb-server.Dockerfile index c4c5a2b041..689e3cf5b9 100644 --- a/stacks/peerdb-server.Dockerfile +++ b/stacks/peerdb-server.Dockerfile @@ -9,7 +9,8 @@ WORKDIR /root/nexus RUN cargo chef prepare --recipe-path recipe.json FROM chef as builder -RUN apk add --no-cache build-base pkgconfig curl unzip +ENV OPENSSL_STATIC=1 +RUN apk add --no-cache build-base pkgconfig curl unzip openssl-dev openssl-libs-static WORKDIR /root/nexus COPY scripts /root/scripts RUN /root/scripts/install-protobuf.sh From 9641aecdcbbdc374b6caf08744bcbc991d973bae Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 19:20:48 +0000 Subject: [PATCH 21/59] chore(deps): pin dependencies (#2244) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Update | Change | |---|---|---| | ghcr.io/peerdb-io/flow-api | pinDigest | -> `a7c7d1c` | | ghcr.io/peerdb-io/flow-snapshot-worker | pinDigest | -> `9b6db50` | | ghcr.io/peerdb-io/flow-worker | pinDigest | -> `3b4da65` | | ghcr.io/peerdb-io/peerdb-server | pinDigest | -> `dfb652e` | | ghcr.io/peerdb-io/peerdb-ui | pinDigest | -> `cb4d1db` | | grafana/pyroscope | pinDigest | -> `319bf32` | | minio/minio | pinDigest | -> `ac59185` | | postgres | major | `16-alpine` -> `17-alpine` | | [temporalio/admin-tools](https://redirect.github.com/temporalio/docker-builds) | pinDigest | -> `da0c7a7` | | [temporalio/auto-setup](https://redirect.github.com/temporalio/docker-builds) | pinDigest | -> `b1edc1e` | | [temporalio/ui](https://redirect.github.com/temporalio/ui-server) | minor | `2.29.1` -> `2.31.2` | --- ### Release Notes
temporalio/ui-server (temporalio/ui) ### [`v2.31.2`](https://redirect.github.com/temporalio/ui-server/releases/tag/v2.31.2) [Compare Source](https://redirect.github.com/temporalio/ui-server/compare/v2.31.1...v2.31.2) - 2024-10-02 - [`7a19ad5`](https://redirect.github.com/temporalio/ui-server/commit/7a19ad564) - Update version.go to v2.30.2 ([#​2363](https://redirect.github.com/temporalio/ui-server/issues/2363)) - 2024-10-01 - [`1bb9b9a`](https://redirect.github.com/temporalio/ui-server/commit/1bb9b9a9a) - Set startWorkflowDisabled to false ([#​2362](https://redirect.github.com/temporalio/ui-server/issues/2362)) - 2024-10-01 - [`229e732`](https://redirect.github.com/temporalio/ui-server/commit/229e732f0) - MenuItem updates ([#​2361](https://redirect.github.com/temporalio/ui-server/issues/2361)) - 2024-09-30 - [`fcc8279`](https://redirect.github.com/temporalio/ui-server/commit/fcc827931) - DT-2499 - wf reset ux improvement ([#​2359](https://redirect.github.com/temporalio/ui-server/issues/2359)) ### [`v2.31.1`](https://redirect.github.com/temporalio/ui-server/releases/tag/v2.31.1) [Compare Source](https://redirect.github.com/temporalio/ui-server/compare/v2.31.0...v2.31.1) - 2024-09-30 - [`a1d2cff`](https://redirect.github.com/temporalio/ui-server/commit/a1d2cff9) - update UI server version to v2.31.1 ([#​2358](https://redirect.github.com/temporalio/ui-server/issues/2358)) - 2024-09-30 - [`07d826c`](https://redirect.github.com/temporalio/ui-server/commit/07d826c8) - 2.31.2 ([#​2357](https://redirect.github.com/temporalio/ui-server/issues/2357)) - 2024-09-30 - [`c9415a2`](https://redirect.github.com/temporalio/ui-server/commit/c9415a2b) - DT-1740 - update workflow reset UI for new reapply types in 1.24 ([#​2014](https://redirect.github.com/temporalio/ui-server/issues/2014)) - 2024-09-30 - [`7bf4fc6`](https://redirect.github.com/temporalio/ui-server/commit/7bf4fc69) - Nexus form with searchable combobox ([#​2355](https://redirect.github.com/temporalio/ui-server/issues/2355)) - 2024-09-27 - [`8ebc23d`](https://redirect.github.com/temporalio/ui-server/commit/8ebc23dc) - Move stopPropagation to on:click for links ([#​2356](https://redirect.github.com/temporalio/ui-server/issues/2356)) - 2024-09-27 - [`0106d97`](https://redirect.github.com/temporalio/ui-server/commit/0106d976) - Fix differences in duration ([#​2353](https://redirect.github.com/temporalio/ui-server/issues/2353)) - 2024-09-27 - [`4ac3e46`](https://redirect.github.com/temporalio/ui-server/commit/4ac3e462) - Add empty state for Upcoming Runs on schedule detail page ([#​2354](https://redirect.github.com/temporalio/ui-server/issues/2354)) - 2024-09-26 - [`6292257`](https://redirect.github.com/temporalio/ui-server/commit/6292257e) - DT-2347 - support multiselect in combobox ([#​2352](https://redirect.github.com/temporalio/ui-server/issues/2352)) **Full Changelog**: https://github.com/temporalio/ui-server/compare/v2.31.0...v2.31.1 ### [`v2.31.0`](https://redirect.github.com/temporalio/ui-server/releases/tag/v2.31.0) [Compare Source](https://redirect.github.com/temporalio/ui-server/compare/v2.30.3...v2.31.0) - 2024-09-26 - [`a0123c6`](https://redirect.github.com/temporalio/ui-server/commit/a0123c610) - Update go version to 1.23 in update ui-server ([#​2351](https://redirect.github.com/temporalio/ui-server/issues/2351)) - 2024-09-25 - [`43ce09c`](https://redirect.github.com/temporalio/ui-server/commit/43ce09c62) - Use 1.23 ([#​2349](https://redirect.github.com/temporalio/ui-server/issues/2349)) - 2024-09-25 - [`4f9ca12`](https://redirect.github.com/temporalio/ui-server/commit/4f9ca1292) - Use go 1.23.0 ([#​2348](https://redirect.github.com/temporalio/ui-server/issues/2348)) - 2024-09-25 - [`3b5b1f0`](https://redirect.github.com/temporalio/ui-server/commit/3b5b1f0d2) - Update version.go to v2.31.0 ([#​2347](https://redirect.github.com/temporalio/ui-server/issues/2347)) - 2024-09-24 - [`55b45e0`](https://redirect.github.com/temporalio/ui-server/commit/55b45e01f) - 2.31.1 ([#​2346](https://redirect.github.com/temporalio/ui-server/issues/2346)) - 2024-09-24 - [`a9487eb`](https://redirect.github.com/temporalio/ui-server/commit/a9487eb8c) - Small UI fixes ([#​2345](https://redirect.github.com/temporalio/ui-server/issues/2345)) - 2024-09-24 - [`24c74ab`](https://redirect.github.com/temporalio/ui-server/commit/24c74ab30) - 2.31.0 ([#​2344](https://redirect.github.com/temporalio/ui-server/issues/2344)) - 2024-09-24 - [`1ab17ee`](https://redirect.github.com/temporalio/ui-server/commit/1ab17eec2) - Codefreeze 09.12.24 -> Main ([#​2341](https://redirect.github.com/temporalio/ui-server/issues/2341)) - 2024-09-17 - [`651f1d0`](https://redirect.github.com/temporalio/ui-server/commit/651f1d089) - 2.30.8 ([#​2331](https://redirect.github.com/temporalio/ui-server/issues/2331)) - 2024-09-17 - [`6853d85`](https://redirect.github.com/temporalio/ui-server/commit/6853d85db) - Remove child workflow timeline from row ([#​2330](https://redirect.github.com/temporalio/ui-server/issues/2330)) - 2024-09-12 - [`04e1fa8`](https://redirect.github.com/temporalio/ui-server/commit/04e1fa8e7) - Nexus text changes ([#​2326](https://redirect.github.com/temporalio/ui-server/issues/2326)) - 2024-09-12 - [`0ffe093`](https://redirect.github.com/temporalio/ui-server/commit/0ffe09333) - Don't allow multiple status filters of the same value ([#​2325](https://redirect.github.com/temporalio/ui-server/issues/2325)) - 2024-09-12 - [`292196a`](https://redirect.github.com/temporalio/ui-server/commit/292196a08) - 2.30.7 ([#​2322](https://redirect.github.com/temporalio/ui-server/issues/2322)) - 2024-09-12 - [`d9d9e45`](https://redirect.github.com/temporalio/ui-server/commit/d9d9e453c) - Allow slots to override text content ([#​2321](https://redirect.github.com/temporalio/ui-server/issues/2321)) - 2024-09-12 - [`1b1c817`](https://redirect.github.com/temporalio/ui-server/commit/1b1c81762) - Fix workflow counts and reactive selectedId ([#​2319](https://redirect.github.com/temporalio/ui-server/issues/2319)) - 2024-09-12 - [`8d238c3`](https://redirect.github.com/temporalio/ui-server/commit/8d238c3ef) - Add != operator to conditional menu defaults ([#​2320](https://redirect.github.com/temporalio/ui-server/issues/2320)) - 2024-09-11 - [`13ccfd8`](https://redirect.github.com/temporalio/ui-server/commit/13ccfd833) - Add initial tooltip to badge ([#​2310](https://redirect.github.com/temporalio/ui-server/issues/2310)) - 2024-09-11 - [`83abb85`](https://redirect.github.com/temporalio/ui-server/commit/83abb8512) - Add links to workflow counts on list workflows page ([#​2315](https://redirect.github.com/temporalio/ui-server/issues/2315)) ### [`v2.30.3`](https://redirect.github.com/temporalio/ui-server/releases/tag/v2.30.3) [Compare Source](https://redirect.github.com/temporalio/ui-server/compare/v2.30.2...v2.30.3) - 2024-09-10 - [`7e73a9d`](https://redirect.github.com/temporalio/ui-server/commit/7e73a9ddd) - Update version.go to v2.30.3 ([#​2318](https://redirect.github.com/temporalio/ui-server/issues/2318)) - 2024-09-10 - [`6b4c425`](https://redirect.github.com/temporalio/ui-server/commit/6b4c4258a) - 2.30.6 ([#​2317](https://redirect.github.com/temporalio/ui-server/issues/2317)) - 2024-09-10 - [`25b4795`](https://redirect.github.com/temporalio/ui-server/commit/25b4795cc) - Schedule search attributes ([#​2275](https://redirect.github.com/temporalio/ui-server/issues/2275)) - 2024-09-06 - [`6f842c2`](https://redirect.github.com/temporalio/ui-server/commit/6f842c23e) - Timer value ([#​2313](https://redirect.github.com/temporalio/ui-server/issues/2313)) - 2024-09-05 - [`8f10db3`](https://redirect.github.com/temporalio/ui-server/commit/8f10db330) - Support IS NULL and IS NOT NULL filter search ([#​2298](https://redirect.github.com/temporalio/ui-server/issues/2298)) - 2024-09-04 - [`830de68`](https://redirect.github.com/temporalio/ui-server/commit/830de6836) - Add taskQueueView store to persist worker tab on task queue page ([#​2309](https://redirect.github.com/temporalio/ui-server/issues/2309)) - 2024-09-04 - [`a6d1d8a`](https://redirect.github.com/temporalio/ui-server/commit/a6d1d8a4e) - Add checks for page settings ([#​2307](https://redirect.github.com/temporalio/ui-server/issues/2307)) - 2024-09-04 - [`5ce6856`](https://redirect.github.com/temporalio/ui-server/commit/5ce6856a9) - 2.30.5 ([#​2306](https://redirect.github.com/temporalio/ui-server/issues/2306)) ### [`v2.30.2`](https://redirect.github.com/temporalio/ui-server/releases/tag/v2.30.2) [Compare Source](https://redirect.github.com/temporalio/ui-server/compare/v2.30.1...v2.30.2) - 2024-09-04 - [`6847ad7`](https://redirect.github.com/temporalio/ui-server/commit/6847ad72b) - Update version.go to v2.30.2 ([#​2305](https://redirect.github.com/temporalio/ui-server/issues/2305)) - 2024-09-03 - [`4eb321e`](https://redirect.github.com/temporalio/ui-server/commit/4eb321eb4) - Fix copy button styles on CodeBlock ([#​2303](https://redirect.github.com/temporalio/ui-server/issues/2303)) - 2024-09-03 - [`cde49cd`](https://redirect.github.com/temporalio/ui-server/commit/cde49cd71) - reduce hr margin on side nav ([#​2304](https://redirect.github.com/temporalio/ui-server/issues/2304)) - 2024-09-03 - [`e269ec8`](https://redirect.github.com/temporalio/ui-server/commit/e269ec84a) - Fix max height on PaginatedTable component ([#​2302](https://redirect.github.com/temporalio/ui-server/issues/2302)) - 2024-09-03 - [`7817cdb`](https://redirect.github.com/temporalio/ui-server/commit/7817cdb13) - Bidirectional Links ([#​2299](https://redirect.github.com/temporalio/ui-server/issues/2299)) - 2024-09-03 - [`6ca894d`](https://redirect.github.com/temporalio/ui-server/commit/6ca894d74) - 2.30.4 ([#​2301](https://redirect.github.com/temporalio/ui-server/issues/2301)) - 2024-09-03 - [`3010499`](https://redirect.github.com/temporalio/ui-server/commit/301049978) - update paginated table to support custom page size options ([#​2300](https://redirect.github.com/temporalio/ui-server/issues/2300)) - 2024-09-03 - [`1b8db39`](https://redirect.github.com/temporalio/ui-server/commit/1b8db391a) - Make split card have a nicer fit and support hrefs. ([#​2295](https://redirect.github.com/temporalio/ui-server/issues/2295)) - 2024-09-03 - [`2d3c60e`](https://redirect.github.com/temporalio/ui-server/commit/2d3c60eeb) - DT-2422 - changes for billing center v2 ([#​2296](https://redirect.github.com/temporalio/ui-server/issues/2296)) ### [`v2.30.1`](https://redirect.github.com/temporalio/ui-server/releases/tag/v2.30.1) [Compare Source](https://redirect.github.com/temporalio/ui-server/compare/v2.30.0...v2.30.1) - 2024-08-29 - [`d459dc0`](https://redirect.github.com/temporalio/ui-server/commit/d459dc033) - Update version.go to v2.30.1 ([#​2294](https://redirect.github.com/temporalio/ui-server/issues/2294)) - 2024-08-29 - [`cd4a83b`](https://redirect.github.com/temporalio/ui-server/commit/cd4a83b85) - 2.30.3 ([#​2293](https://redirect.github.com/temporalio/ui-server/issues/2293)) - 2024-08-29 - [`7db9de7`](https://redirect.github.com/temporalio/ui-server/commit/7db9de75b) - Add select-all to code-block ([#​2292](https://redirect.github.com/temporalio/ui-server/issues/2292)) - 2024-08-29 - [`81b8507`](https://redirect.github.com/temporalio/ui-server/commit/81b850754) - 2.30.2 ([#​2290](https://redirect.github.com/temporalio/ui-server/issues/2290)) - 2024-08-29 - [`bcde92d`](https://redirect.github.com/temporalio/ui-server/commit/bcde92dd2) - Extra checks for attributes ([#​2291](https://redirect.github.com/temporalio/ui-server/issues/2291)) - 2024-08-29 - [`b2bc572`](https://redirect.github.com/temporalio/ui-server/commit/b2bc572c7) - Add check for primary key ([#​2289](https://redirect.github.com/temporalio/ui-server/issues/2289)) - 2024-08-28 - [`ce8a68d`](https://redirect.github.com/temporalio/ui-server/commit/ce8a68dde) - 2.30.1 ([#​2288](https://redirect.github.com/temporalio/ui-server/issues/2288)) - 2024-08-28 - [`67ab213`](https://redirect.github.com/temporalio/ui-server/commit/67ab21384) - Add label for Next Retry on Pending Activities ([#​2287](https://redirect.github.com/temporalio/ui-server/issues/2287)) - 2024-08-28 - [`cbf6f61`](https://redirect.github.com/temporalio/ui-server/commit/cbf6f61cc) - 2.30.0 ([#​2285](https://redirect.github.com/temporalio/ui-server/issues/2285)) - 2024-08-28 - [`fb8eb54`](https://redirect.github.com/temporalio/ui-server/commit/fb8eb5427) - Add next retry time and refresh workflow every 10 seconds ([#​2284](https://redirect.github.com/temporalio/ui-server/issues/2284)) - 2024-08-28 - [`ceac218`](https://redirect.github.com/temporalio/ui-server/commit/ceac218d5) - Remove nexus filter for now, format for time for pending events ([#​2283](https://redirect.github.com/temporalio/ui-server/issues/2283)) ### [`v2.30.0`](https://redirect.github.com/temporalio/ui-server/releases/tag/v2.30.0) [Compare Source](https://redirect.github.com/temporalio/ui-server/compare/v2.29.2...v2.30.0) - 2024-08-27 - [`458b816`](https://redirect.github.com/temporalio/ui-server/commit/458b81637) - Update version.go to v2.30.0 ([#​2282](https://redirect.github.com/temporalio/ui-server/issues/2282)) - 2024-08-27 - [`6b25926`](https://redirect.github.com/temporalio/ui-server/commit/6b259260a) - Workflow Execution Page - Bring into the Light ([#​2269](https://redirect.github.com/temporalio/ui-server/issues/2269)) - 2024-08-26 - [`5a99fd4`](https://redirect.github.com/temporalio/ui-server/commit/5a99fd4a3) - 2.29.4 ([#​2281](https://redirect.github.com/temporalio/ui-server/issues/2281)) - 2024-08-26 - [`389cc02`](https://redirect.github.com/temporalio/ui-server/commit/389cc02fa) - add pageSizeOptions prop and add max-h-none to table wrapper ([#​2280](https://redirect.github.com/temporalio/ui-server/issues/2280)) - 2024-08-26 - [`eaf2401`](https://redirect.github.com/temporalio/ui-server/commit/eaf240110) - 2.29.3 ([#​2279](https://redirect.github.com/temporalio/ui-server/issues/2279)) - 2024-08-23 - [`32d26b1`](https://redirect.github.com/temporalio/ui-server/commit/32d26b177) - Remove encodingDecoded field ([#​2274](https://redirect.github.com/temporalio/ui-server/issues/2274)) - 2024-08-20 - [`61ab2bf`](https://redirect.github.com/temporalio/ui-server/commit/61ab2bf44) - Fix the wrong nano conversion in timestampToDate ([#​2273](https://redirect.github.com/temporalio/ui-server/issues/2273)) - 2024-08-19 - [`38fa5e3`](https://redirect.github.com/temporalio/ui-server/commit/38fa5e31c) - remove expand when slot is empty ([#​2242](https://redirect.github.com/temporalio/ui-server/issues/2242)) - 2024-08-14 - [`60340c2`](https://redirect.github.com/temporalio/ui-server/commit/60340c208) - 2.29.2 ([#​2267](https://redirect.github.com/temporalio/ui-server/issues/2267)) - 2024-08-13 - [`e08f5ee`](https://redirect.github.com/temporalio/ui-server/commit/e08f5ee8a) - Add API PaginatedTable component ([#​2261](https://redirect.github.com/temporalio/ui-server/issues/2261)) - 2024-08-13 - [`149e011`](https://redirect.github.com/temporalio/ui-server/commit/149e0111d) - 2.29.1 ([#​2266](https://redirect.github.com/temporalio/ui-server/issues/2266)) - 2024-08-13 - [`5197587`](https://redirect.github.com/temporalio/ui-server/commit/51975879c) - Pass helpText prop to Nexus Form ([#​2265](https://redirect.github.com/temporalio/ui-server/issues/2265)) - 2024-08-12 - [`6b2cf56`](https://redirect.github.com/temporalio/ui-server/commit/6b2cf5640) - 2.29.0 ([#​2263](https://redirect.github.com/temporalio/ui-server/issues/2263)) - 2024-08-12 - [`0df27e6`](https://redirect.github.com/temporalio/ui-server/commit/0df27e674) - Ability to pass regex to form from create/edit ([#​2264](https://redirect.github.com/temporalio/ui-server/issues/2264)) - 2024-08-12 - [`d7781aa`](https://redirect.github.com/temporalio/ui-server/commit/d7781aac2) - Markdown Render - Nexus Description ([#​2258](https://redirect.github.com/temporalio/ui-server/issues/2258)) - 2024-08-09 - [`a458f6f`](https://redirect.github.com/temporalio/ui-server/commit/a458f6fd9) - Remove namespace name when loading schedule ([#​2259](https://redirect.github.com/temporalio/ui-server/issues/2259)) - 2024-08-07 - [`785d006`](https://redirect.github.com/temporalio/ui-server/commit/785d00669) - Nexus Callback Information ([#​2256](https://redirect.github.com/temporalio/ui-server/issues/2256)) - 2024-08-05 - [`3049ea3`](https://redirect.github.com/temporalio/ui-server/commit/3049ea3d7) - 2.28.11 ([#​2254](https://redirect.github.com/temporalio/ui-server/issues/2254)) - 2024-08-05 - [`771e3f1`](https://redirect.github.com/temporalio/ui-server/commit/771e3f19c) - fix text styles in navigation badge ([#​2253](https://redirect.github.com/temporalio/ui-server/issues/2253)) - 2024-08-02 - [`72d623d`](https://redirect.github.com/temporalio/ui-server/commit/72d623d36) - Revert "Update base images ([#​2226](https://redirect.github.com/temporalio/ui-server/issues/2226))" ([#​2252](https://redirect.github.com/temporalio/ui-server/issues/2252)) - 2024-08-01 - [`fc30f9a`](https://redirect.github.com/temporalio/ui-server/commit/fc30f9a02) - Extra Test Id's for easy testing ([#​2251](https://redirect.github.com/temporalio/ui-server/issues/2251)) - 2024-08-01 - [`555e018`](https://redirect.github.com/temporalio/ui-server/commit/555e018f4) - Update base images ([#​2226](https://redirect.github.com/temporalio/ui-server/issues/2226)) - 2024-08-01 - [`dae9aac`](https://redirect.github.com/temporalio/ui-server/commit/dae9aac42) - 2.28.10 ([#​2249](https://redirect.github.com/temporalio/ui-server/issues/2249)) - 2024-08-01 - [`6cb9798`](https://redirect.github.com/temporalio/ui-server/commit/6cb97985b) - Small UI fixes ([#​2248](https://redirect.github.com/temporalio/ui-server/issues/2248)) - 2024-08-01 - [`e9b014e`](https://redirect.github.com/temporalio/ui-server/commit/e9b014ede) - Nexus Help Text and various UI updates ([#​2247](https://redirect.github.com/temporalio/ui-server/issues/2247)) - 2024-08-01 - [`82030a9`](https://redirect.github.com/temporalio/ui-server/commit/82030a9b0) - change min text color ([#​2245](https://redirect.github.com/temporalio/ui-server/issues/2245)) - 2024-07-31 - [`7f8f1f5`](https://redirect.github.com/temporalio/ui-server/commit/7f8f1f512) - 2.28.9 ([#​2244](https://redirect.github.com/temporalio/ui-server/issues/2244)) - 2024-07-31 - [`d39dd5e`](https://redirect.github.com/temporalio/ui-server/commit/d39dd5e4e) - Text styles audit ([#​2236](https://redirect.github.com/temporalio/ui-server/issues/2236)) - 2024-07-30 - [`af0d545`](https://redirect.github.com/temporalio/ui-server/commit/af0d5451b) - Dispatch ChangeSet instead of creating new EditorState on resetView in CodeBlock ([#​2243](https://redirect.github.com/temporalio/ui-server/issues/2243)) - 2024-07-29 - [`5f41480`](https://redirect.github.com/temporalio/ui-server/commit/5f41480f1) - update range colors ([#​2240](https://redirect.github.com/temporalio/ui-server/issues/2240)) - 2024-07-29 - [`a638bce`](https://redirect.github.com/temporalio/ui-server/commit/a638bce01) - Fix editable CodeBlock ([#​2239](https://redirect.github.com/temporalio/ui-server/issues/2239)) - 2024-07-26 - [`944162d`](https://redirect.github.com/temporalio/ui-server/commit/944162d38) - Refactor encoding and decoding to use same token logic and add unit tests ([#​2237](https://redirect.github.com/temporalio/ui-server/issues/2237)) - 2024-07-24 - [`d15597a`](https://redirect.github.com/temporalio/ui-server/commit/d15597ac3) - 2.28.8 ([#​2234](https://redirect.github.com/temporalio/ui-server/issues/2234)) - 2024-07-24 - [`5bc4a06`](https://redirect.github.com/temporalio/ui-server/commit/5bc4a068d) - Fix alert ([#​2233](https://redirect.github.com/temporalio/ui-server/issues/2233)) - 2024-07-24 - [`2b14a4d`](https://redirect.github.com/temporalio/ui-server/commit/2b14a4d3d) - 2.28.7 ([#​2232](https://redirect.github.com/temporalio/ui-server/issues/2232)) - 2024-07-24 - [`793ea20`](https://redirect.github.com/temporalio/ui-server/commit/793ea2063) - Fix WorkflowTaskFailed Alert when Cause is not provided ([#​2230](https://redirect.github.com/temporalio/ui-server/issues/2230)) - 2024-07-23 - [`29fcbac`](https://redirect.github.com/temporalio/ui-server/commit/29fcbacc3) - Add handleChange to bottom nav controls ([#​2229](https://redirect.github.com/temporalio/ui-server/issues/2229)) - 2024-07-22 - [`88eeac1`](https://redirect.github.com/temporalio/ui-server/commit/88eeac1df) - Small CodeBlock in Modal fixes ([#​2225](https://redirect.github.com/temporalio/ui-server/issues/2225)) - 2024-07-22 - [`50a5e10`](https://redirect.github.com/temporalio/ui-server/commit/50a5e10e5) - Add Chromatic for visual regression testing ([#​2169](https://redirect.github.com/temporalio/ui-server/issues/2169)) - 2024-07-22 - [`a251302`](https://redirect.github.com/temporalio/ui-server/commit/a25130253) - Update spacing and font weight ([#​2222](https://redirect.github.com/temporalio/ui-server/issues/2222)) - 2024-07-22 - [`a205f52`](https://redirect.github.com/temporalio/ui-server/commit/a205f523c) - Return 404 page for endpoint not found. Use endpoint name on delete confirmation and remove the need to type DELETE. ([#​2224](https://redirect.github.com/temporalio/ui-server/issues/2224)) - 2024-07-19 - [`a852a06`](https://redirect.github.com/temporalio/ui-server/commit/a852a06c4) - 2.28.6 ([#​2221](https://redirect.github.com/temporalio/ui-server/issues/2221)) - 2024-07-19 - [`22e0d77`](https://redirect.github.com/temporalio/ui-server/commit/22e0d77f9) - fix navigation badge styles ([#​2220](https://redirect.github.com/temporalio/ui-server/issues/2220)) - 2024-07-19 - [`7f351f8`](https://redirect.github.com/temporalio/ui-server/commit/7f351f812) - Add WorkflowProperties event and group with Marker and SAUpsert into new Other group ([#​2219](https://redirect.github.com/temporalio/ui-server/issues/2219)) ### [`v2.29.2`](https://redirect.github.com/temporalio/ui-server/releases/tag/v2.29.2) [Compare Source](https://redirect.github.com/temporalio/ui-server/compare/v2.29.1...v2.29.2) - 2024-07-19 - [`2d20650`](https://redirect.github.com/temporalio/ui-server/commit/2d2065099) - Update version.go to v2.29.2 ([#​2218](https://redirect.github.com/temporalio/ui-server/issues/2218)) - 2024-07-18 - [`65ece25`](https://redirect.github.com/temporalio/ui-server/commit/65ece2565) - 2.28.5 ([#​2217](https://redirect.github.com/temporalio/ui-server/issues/2217)) - 2024-07-18 - [`7ef4701`](https://redirect.github.com/temporalio/ui-server/commit/7ef470154) - New nexus icon ([#​2216](https://redirect.github.com/temporalio/ui-server/issues/2216)) - 2024-07-17 - [`7ec7291`](https://redirect.github.com/temporalio/ui-server/commit/7ec729131) - Fix hover background on Namespace selector ([#​2214](https://redirect.github.com/temporalio/ui-server/issues/2214)) - 2024-07-17 - [`c84b7df`](https://redirect.github.com/temporalio/ui-server/commit/c84b7dff0) - Update codemirror to get bug fixes ([#​2213](https://redirect.github.com/temporalio/ui-server/issues/2213)) - 2024-07-16 - [`3d38441`](https://redirect.github.com/temporalio/ui-server/commit/3d38441b2) - 2.28.4 ([#​2211](https://redirect.github.com/temporalio/ui-server/issues/2211)) - 2024-07-16 - [`b04945a`](https://redirect.github.com/temporalio/ui-server/commit/b04945aac) - add new components for promo nav item ([#​2210](https://redirect.github.com/temporalio/ui-server/issues/2210)) - 2024-07-16 - [`118c87a`](https://redirect.github.com/temporalio/ui-server/commit/118c87a40) - 2.28.3 ([#​2209](https://redirect.github.com/temporalio/ui-server/issues/2209)) - 2024-07-16 - [`c0127ad`](https://redirect.github.com/temporalio/ui-server/commit/c0127ad05) - New Nexus icon ([#​2208](https://redirect.github.com/temporalio/ui-server/issues/2208)) - 2024-07-15 - [`a976552`](https://redirect.github.com/temporalio/ui-server/commit/a97655209) - Nexus Bug Bash UI updates ([#​2207](https://redirect.github.com/temporalio/ui-server/issues/2207)) - 2024-07-15 - [`f418976`](https://redirect.github.com/temporalio/ui-server/commit/f418976ce) - Mobile Nav ([#​2197](https://redirect.github.com/temporalio/ui-server/issues/2197))
--- ### Configuration 📅 **Schedule**: Branch creation - "after 5pm on monday" in timezone Etc/UTC, Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR is behind base branch, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://redirect.github.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/PeerDB-io/peerdb). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose-dev.yml | 12 ++++++------ docker-compose.yml | 20 ++++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 7110819257..7309472d75 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -39,7 +39,7 @@ x-flow-worker-env: &flow-worker-env services: catalog: container_name: catalog - image: postgres:16-alpine + image: postgres:17-alpine@sha256:d388be15cfb665c723da47cccdc7ea5c003ed71f700c5419bbd075033227ce1f command: -c config_file=/etc/postgresql.conf ports: - 9901:5432 @@ -73,7 +73,7 @@ services: - POSTGRES_PWD=postgres - POSTGRES_SEEDS=catalog - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml - image: temporalio/auto-setup:1.25 + image: temporalio/auto-setup:1.25@sha256:b1edc1e20002d958c8182f2ae08dee877a125083683a627a44917683419ba6a8 ports: - 7233:7233 volumes: @@ -83,7 +83,7 @@ services: pyroscope: container_name: pyroscope - image: grafana/pyroscope:latest + image: grafana/pyroscope:latest@sha256:319bf32ae06b67c1b9795c06ae6c3ba67e9b43382896df7a9df54cdb47a5c535 ports: - 4040:4040 @@ -95,7 +95,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CLI_ADDRESS=temporal:7233 - TEMPORAL_CLI_SHOW_STACKS=1 - image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1 + image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1@sha256:da0c7a7982b571857173ab8f058e7f139b3054800abb4dcb100445d29a563ee8 stdin_open: true tty: true entrypoint: /etc/temporal/entrypoint.sh @@ -116,7 +116,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CORS_ORIGINS=http://localhost:3000 - TEMPORAL_CSRF_COOKIE_INSECURE=true - image: temporalio/ui:2.29.1 + image: temporalio/ui:2.31.2@sha256:28bb3ea5a6ea3e09f16b521f32ab727c96470f7f1e420c66a6cbfb02001a8aa2 ports: - 8085:8080 @@ -209,7 +209,7 @@ services: - flow-api minio: - image: minio/minio:RELEASE.2024-11-07T00-52-20Z + image: minio/minio:RELEASE.2024-11-07T00-52-20Z@sha256:ac591851803a79aee64bc37f66d77c56b0a4b6e12d9e5356380f4105510f2332 volumes: - minio-data:/data ports: diff --git a/docker-compose.yml b/docker-compose.yml index ce4a3994ad..c03c9993de 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,7 +32,7 @@ x-flow-worker-env: &flow-worker-env services: catalog: container_name: catalog - image: postgres:16-alpine + image: postgres:17-alpine@sha256:d388be15cfb665c723da47cccdc7ea5c003ed71f700c5419bbd075033227ce1f command: -c config_file=/etc/postgresql.conf restart: unless-stopped ports: @@ -68,7 +68,7 @@ services: - POSTGRES_PWD=postgres - POSTGRES_SEEDS=catalog - DYNAMIC_CONFIG_FILE_PATH=config/dynamicconfig/development-sql.yaml - image: temporalio/auto-setup:1.25 + image: temporalio/auto-setup:1.25@sha256:b1edc1e20002d958c8182f2ae08dee877a125083683a627a44917683419ba6a8 ports: - 7233:7233 volumes: @@ -85,7 +85,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CLI_ADDRESS=temporal:7233 - TEMPORAL_CLI_SHOW_STACKS=1 - image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1 + image: temporalio/admin-tools:1.25.2-tctl-1.18.1-cli-1.1.1@sha256:da0c7a7982b571857173ab8f058e7f139b3054800abb4dcb100445d29a563ee8 stdin_open: true tty: true entrypoint: /etc/temporal/entrypoint.sh @@ -106,13 +106,13 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CORS_ORIGINS=http://localhost:3000 - TEMPORAL_CSRF_COOKIE_INSECURE=true - image: temporalio/ui:2.29.1 + image: temporalio/ui:2.31.2@sha256:28bb3ea5a6ea3e09f16b521f32ab727c96470f7f1e420c66a6cbfb02001a8aa2 ports: - 8085:8080 flow-api: container_name: flow_api - image: ghcr.io/peerdb-io/flow-api:latest-dev + image: ghcr.io/peerdb-io/flow-api:latest-dev@sha256:a7c7d1cb12f618a747233556820df114acc4d2e737e487bdfdbb0051720710c9 restart: unless-stopped ports: - 8112:8112 @@ -128,7 +128,7 @@ services: flow-snapshot-worker: container_name: flow-snapshot-worker - image: ghcr.io/peerdb-io/flow-snapshot-worker:latest-dev + image: ghcr.io/peerdb-io/flow-snapshot-worker:latest-dev@sha256:9b6db5039e4f73f7d205a40400683ff9ca242dbb2eee405a2e9b056e947c8666 restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -138,7 +138,7 @@ services: flow-worker: container_name: flow-worker - image: ghcr.io/peerdb-io/flow-worker:latest-dev + image: ghcr.io/peerdb-io/flow-worker:latest-dev@sha256:3b4da6550deacd638e99592b65d9f61191ed020b8268efa52297c3101ab37c16 restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -151,7 +151,7 @@ services: peerdb: container_name: peerdb-server stop_signal: SIGINT - image: ghcr.io/peerdb-io/peerdb-server:latest-dev + image: ghcr.io/peerdb-io/peerdb-server:latest-dev@sha256:dfb652eebb410198f28e720d04d17c4de0698581d214ce99337ee1efc0874ba4 restart: unless-stopped environment: <<: *catalog-config @@ -167,7 +167,7 @@ services: peerdb-ui: container_name: peerdb-ui - image: ghcr.io/peerdb-io/peerdb-ui:latest-dev + image: ghcr.io/peerdb-io/peerdb-ui:latest-dev@sha256:cb4d1dbd167c6bdd44e795a33b69ab5001932c7cbd8ac02126b79331022c17a0 restart: unless-stopped ports: - 3000:3000 @@ -184,7 +184,7 @@ services: - flow-api minio: - image: minio/minio:RELEASE.2024-11-07T00-52-20Z + image: minio/minio:RELEASE.2024-11-07T00-52-20Z@sha256:ac591851803a79aee64bc37f66d77c56b0a4b6e12d9e5356380f4105510f2332 restart: unless-stopped volumes: - minio-data:/data From 1f969d191ea98df5933f85a7782d77e8add441cc Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj <65964360+Amogh-Bharadwaj@users.noreply.github.com> Date: Thu, 14 Nov 2024 01:06:28 +0530 Subject: [PATCH 22/59] Fix resync logic for postgres to postgres (#2240) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR fixes the intended drop if exists logic for _resync tables when a Postgres to Postgres mirror is resynced, along with a casing issue Co-authored-by: Philip Dubé --- flow/connectors/postgres/postgres.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/flow/connectors/postgres/postgres.go b/flow/connectors/postgres/postgres.go index b3161161e1..c8a853287e 100644 --- a/flow/connectors/postgres/postgres.go +++ b/flow/connectors/postgres/postgres.go @@ -891,15 +891,17 @@ func (c *PostgresConnector) SetupNormalizedTable( if tableAlreadyExists { c.logger.Info("[postgres] table already exists, skipping", slog.String("table", tableIdentifier)) - if config.IsResync { - err := c.ExecuteCommand(ctx, fmt.Sprintf(dropTableIfExistsSQL, - QuoteIdentifier(parsedNormalizedTable.Schema), - QuoteIdentifier(parsedNormalizedTable.Table))) - if err != nil { - return false, fmt.Errorf("error while dropping _resync table: %w", err) - } + if !config.IsResync { + return true, nil + } + + err := c.ExecuteCommand(ctx, fmt.Sprintf(dropTableIfExistsSQL, + QuoteIdentifier(parsedNormalizedTable.Schema), + QuoteIdentifier(parsedNormalizedTable.Table))) + if err != nil { + return false, fmt.Errorf("error while dropping _resync table: %w", err) } - return true, nil + c.logger.Info("[postgres] dropped resync table for resync", slog.String("resyncTable", parsedNormalizedTable.String())) } // convert the column names and types to Postgres types @@ -1437,7 +1439,7 @@ func (c *PostgresConnector) RenameTables( } // rename the src table to dst - _, err = c.execWithLoggingTx(ctx, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", src, dstTable.Table), renameTablesTx) + _, err = c.execWithLoggingTx(ctx, fmt.Sprintf("ALTER TABLE %s RENAME TO %s", src, QuoteIdentifier(dstTable.Table)), renameTablesTx) if err != nil { return nil, fmt.Errorf("unable to rename table %s to %s: %w", src, dst, err) } From 0fbd7f752bfa838c6e8ee1263a3038501648920a Mon Sep 17 00:00:00 2001 From: Kevin Biju <52661649+heavycrystal@users.noreply.github.com> Date: Thu, 14 Nov 2024 02:10:53 +0530 Subject: [PATCH 23/59] [clickhouse] change raw table definition to use int64 for _peerdb_batch_id (#2248) --- flow/connectors/clickhouse/cdc.go | 12 ++++++------ flow/connectors/clickhouse/normalize.go | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/flow/connectors/clickhouse/cdc.go b/flow/connectors/clickhouse/cdc.go index 3e002f5028..8fae9d6f26 100644 --- a/flow/connectors/clickhouse/cdc.go +++ b/flow/connectors/clickhouse/cdc.go @@ -46,13 +46,13 @@ func (c *ClickHouseConnector) CreateRawTable(ctx context.Context, req *protos.Cr rawTableName := c.getRawTableName(req.FlowJobName) createRawTableSQL := `CREATE TABLE IF NOT EXISTS %s ( - _peerdb_uid UUID NOT NULL, - _peerdb_timestamp Int64 NOT NULL, - _peerdb_destination_table_name String NOT NULL, - _peerdb_data String NOT NULL, - _peerdb_record_type Int NOT NULL, + _peerdb_uid UUID, + _peerdb_timestamp Int64, + _peerdb_destination_table_name String, + _peerdb_data String, + _peerdb_record_type Int, _peerdb_match_data String, - _peerdb_batch_id Int, + _peerdb_batch_id Int64, _peerdb_unchanged_toast_columns String ) ENGINE = MergeTree() ORDER BY (_peerdb_batch_id, _peerdb_destination_table_name);` diff --git a/flow/connectors/clickhouse/normalize.go b/flow/connectors/clickhouse/normalize.go index 770abc7f20..d5357c9a14 100644 --- a/flow/connectors/clickhouse/normalize.go +++ b/flow/connectors/clickhouse/normalize.go @@ -462,7 +462,7 @@ func (c *ClickHouseConnector) getDistinctTableNamesInBatch( rawTbl := c.getRawTableName(flowJobName) q := fmt.Sprintf( - `SELECT DISTINCT _peerdb_destination_table_name FROM %s WHERE _peerdb_batch_id > %d AND _peerdb_batch_id <= %d`, + `SELECT DISTINCT _peerdb_destination_table_name FROM %s WHERE _peerdb_batch_id>%d AND _peerdb_batch_id<=%d`, rawTbl, normalizeBatchID, syncBatchID) rows, err := c.query(ctx, q) From 713f10b7c898e82b65f6c90fff671d2884f383a4 Mon Sep 17 00:00:00 2001 From: Kevin Biju <52661649+heavycrystal@users.noreply.github.com> Date: Thu, 14 Nov 2024 02:44:24 +0530 Subject: [PATCH 24/59] [postgres] reduce unnecessary queries (#2214) closes #2210 --- flow/connectors/postgres/cdc.go | 62 +++++++---- flow/connectors/postgres/client.go | 9 +- flow/connectors/postgres/postgres.go | 74 ++++++++----- flow/connectors/postgres/qrep.go | 21 +++- flow/connectors/postgres/qrep_bench_test.go | 13 ++- .../postgres/qrep_query_executor.go | 38 ++++--- .../postgres/qrep_query_executor_test.go | 102 ++++++------------ flow/e2e/postgres/postgres.go | 5 +- flow/e2e/test_utils.go | 5 +- flow/shared/postgres.go | 14 +-- 10 files changed, 189 insertions(+), 154 deletions(-) diff --git a/flow/connectors/postgres/cdc.go b/flow/connectors/postgres/cdc.go index a355cfa00e..9f4bd0b966 100644 --- a/flow/connectors/postgres/cdc.go +++ b/flow/connectors/postgres/cdc.go @@ -67,9 +67,9 @@ func (c *PostgresConnector) NewPostgresCDCSource(cdcConfig *PostgresCDCConfig) * relationMessageMapping: cdcConfig.RelationMessageMapping, slot: cdcConfig.Slot, publication: cdcConfig.Publication, - childToParentRelIDMapping: cdcConfig.ChildToParentRelIDMap, typeMap: pgtype.NewMap(), commitLock: nil, + childToParentRelIDMapping: cdcConfig.ChildToParentRelIDMap, catalogPool: cdcConfig.CatalogPool, flowJobName: cdcConfig.FlowJobName, hushWarnUnhandledMessageType: make(map[pglogrepl.MessageType]struct{}), @@ -85,21 +85,18 @@ func GetChildToParentRelIDMap(ctx context.Context, conn *pgx.Conn) (map[uint32]u WHERE parent.relkind='p'; ` - rows, err := conn.Query(ctx, query, pgx.QueryExecModeSimpleProtocol) + rows, err := conn.Query(ctx, query) if err != nil { return nil, fmt.Errorf("error querying for child to parent relid map: %w", err) } - defer rows.Close() childToParentRelIDMap := make(map[uint32]uint32) - var parentRelID pgtype.Uint32 - var childRelID pgtype.Uint32 - for rows.Next() { - err := rows.Scan(&parentRelID, &childRelID) - if err != nil { - return nil, fmt.Errorf("error scanning child to parent relid map: %w", err) - } + var parentRelID, childRelID pgtype.Uint32 + if _, err := pgx.ForEachRow(rows, []any{&parentRelID, &childRelID}, func() error { childToParentRelIDMap[childRelID.Uint32] = parentRelID.Uint32 + return nil + }); err != nil { + return nil, fmt.Errorf("error iterating over child to parent relid map: %w", err) } return childToParentRelIDMap, nil @@ -114,6 +111,7 @@ type replProcessor[Items model.Items] interface { p *PostgresCDCSource, tuple *pglogrepl.TupleDataColumn, col *pglogrepl.RelationMessageColumn, + customTypeMapping map[uint32]string, ) error } @@ -128,6 +126,7 @@ func (pgProcessor) Process( p *PostgresCDCSource, tuple *pglogrepl.TupleDataColumn, col *pglogrepl.RelationMessageColumn, + customTypeMapping map[uint32]string, ) error { switch tuple.DataType { case 'n': // null @@ -158,13 +157,14 @@ func (qProcessor) Process( p *PostgresCDCSource, tuple *pglogrepl.TupleDataColumn, col *pglogrepl.RelationMessageColumn, + customTypeMapping map[uint32]string, ) error { switch tuple.DataType { case 'n': // null items.AddColumn(col.Name, qvalue.QValueNull(qvalue.QValueKindInvalid)) case 't': // text // bytea also appears here as a hex - data, err := p.decodeColumnData(tuple.Data, col.DataType, pgtype.TextFormatCode) + data, err := p.decodeColumnData(tuple.Data, col.DataType, pgtype.TextFormatCode, customTypeMapping) if err != nil { p.logger.Error("error decoding text column data", slog.Any("error", err), slog.String("columnName", col.Name), slog.Int64("dataType", int64(col.DataType))) @@ -172,7 +172,7 @@ func (qProcessor) Process( } items.AddColumn(col.Name, data) case 'b': // binary - data, err := p.decodeColumnData(tuple.Data, col.DataType, pgtype.BinaryFormatCode) + data, err := p.decodeColumnData(tuple.Data, col.DataType, pgtype.BinaryFormatCode, customTypeMapping) if err != nil { return fmt.Errorf("error decoding binary column data: %w", err) } @@ -189,6 +189,7 @@ func processTuple[Items model.Items]( tuple *pglogrepl.TupleData, rel *pglogrepl.RelationMessage, exclude map[string]struct{}, + customTypeMapping map[uint32]string, ) (Items, map[string]struct{}, error) { // if the tuple is nil, return an empty map if tuple == nil { @@ -208,7 +209,7 @@ func processTuple[Items model.Items]( unchangedToastColumns = make(map[string]struct{}) } unchangedToastColumns[rcol.Name] = struct{}{} - } else if err := processor.Process(items, p, tcol, rcol); err != nil { + } else if err := processor.Process(items, p, tcol, rcol, customTypeMapping); err != nil { var none Items return none, nil, err } @@ -216,7 +217,9 @@ func processTuple[Items model.Items]( return items, unchangedToastColumns, nil } -func (p *PostgresCDCSource) decodeColumnData(data []byte, dataType uint32, formatCode int16) (qvalue.QValue, error) { +func (p *PostgresCDCSource) decodeColumnData(data []byte, dataType uint32, + formatCode int16, customTypeMapping map[uint32]string, +) (qvalue.QValue, error) { var parsedData any var err error if dt, ok := p.typeMap.TypeForOID(dataType); ok { @@ -260,7 +263,7 @@ func (p *PostgresCDCSource) decodeColumnData(data []byte, dataType uint32, forma return retVal, nil } - typeName, ok := p.customTypesMapping[dataType] + typeName, ok := customTypeMapping[dataType] if ok { customQKind := customTypeToQKind(typeName) switch customQKind { @@ -634,17 +637,21 @@ func processMessage[Items model.Items]( if err != nil { return nil, fmt.Errorf("error parsing logical message: %w", err) } + customTypeMapping, err := p.fetchCustomTypeMapping(ctx) + if err != nil { + return nil, err + } switch msg := logicalMsg.(type) { case *pglogrepl.BeginMessage: logger.Debug("BeginMessage", slog.Any("FinalLSN", msg.FinalLSN), slog.Any("XID", msg.Xid)) p.commitLock = msg case *pglogrepl.InsertMessage: - return processInsertMessage(p, xld.WALStart, msg, processor) + return processInsertMessage(p, xld.WALStart, msg, processor, customTypeMapping) case *pglogrepl.UpdateMessage: - return processUpdateMessage(p, xld.WALStart, msg, processor) + return processUpdateMessage(p, xld.WALStart, msg, processor, customTypeMapping) case *pglogrepl.DeleteMessage: - return processDeleteMessage(p, xld.WALStart, msg, processor) + return processDeleteMessage(p, xld.WALStart, msg, processor, customTypeMapping) case *pglogrepl.CommitMessage: // for a commit message, update the last checkpoint id for the record batch. logger.Debug("CommitMessage", slog.Any("CommitLSN", msg.CommitLSN), slog.Any("TransactionEndLSN", msg.TransactionEndLSN)) @@ -694,6 +701,7 @@ func processInsertMessage[Items model.Items]( lsn pglogrepl.LSN, msg *pglogrepl.InsertMessage, processor replProcessor[Items], + customTypeMapping map[uint32]string, ) (model.Record[Items], error) { relID := p.getParentRelIDIfPartitioned(msg.RelationID) @@ -710,7 +718,7 @@ func processInsertMessage[Items model.Items]( return nil, fmt.Errorf("unknown relation id: %d", relID) } - items, _, err := processTuple(processor, p, msg.Tuple, rel, p.tableNameMapping[tableName].Exclude) + items, _, err := processTuple(processor, p, msg.Tuple, rel, p.tableNameMapping[tableName].Exclude, customTypeMapping) if err != nil { return nil, fmt.Errorf("error converting tuple to map: %w", err) } @@ -729,6 +737,7 @@ func processUpdateMessage[Items model.Items]( lsn pglogrepl.LSN, msg *pglogrepl.UpdateMessage, processor replProcessor[Items], + customTypeMapping map[uint32]string, ) (model.Record[Items], error) { relID := p.getParentRelIDIfPartitioned(msg.RelationID) @@ -745,13 +754,14 @@ func processUpdateMessage[Items model.Items]( return nil, fmt.Errorf("unknown relation id: %d", relID) } - oldItems, _, err := processTuple(processor, p, msg.OldTuple, rel, p.tableNameMapping[tableName].Exclude) + oldItems, _, err := processTuple(processor, p, msg.OldTuple, rel, + p.tableNameMapping[tableName].Exclude, customTypeMapping) if err != nil { return nil, fmt.Errorf("error converting old tuple to map: %w", err) } newItems, unchangedToastColumns, err := processTuple( - processor, p, msg.NewTuple, rel, p.tableNameMapping[tableName].Exclude) + processor, p, msg.NewTuple, rel, p.tableNameMapping[tableName].Exclude, customTypeMapping) if err != nil { return nil, fmt.Errorf("error converting new tuple to map: %w", err) } @@ -785,6 +795,7 @@ func processDeleteMessage[Items model.Items]( lsn pglogrepl.LSN, msg *pglogrepl.DeleteMessage, processor replProcessor[Items], + customTypeMapping map[uint32]string, ) (model.Record[Items], error) { relID := p.getParentRelIDIfPartitioned(msg.RelationID) @@ -801,7 +812,8 @@ func processDeleteMessage[Items model.Items]( return nil, fmt.Errorf("unknown relation id: %d", relID) } - items, _, err := processTuple(processor, p, msg.OldTuple, rel, p.tableNameMapping[tableName].Exclude) + items, _, err := processTuple(processor, p, msg.OldTuple, rel, + p.tableNameMapping[tableName].Exclude, customTypeMapping) if err != nil { return nil, fmt.Errorf("error converting tuple to map: %w", err) } @@ -844,6 +856,10 @@ func processRelationMessage[Items model.Items]( slog.Uint64("relId", uint64(currRel.RelationID))) return nil, nil } + customTypeMapping, err := p.fetchCustomTypeMapping(ctx) + if err != nil { + return nil, err + } // retrieve current TableSchema for table changed, mapping uses dst table name as key, need to translate source name currRelDstInfo, ok := p.tableNameMapping[currRelName] @@ -867,7 +883,7 @@ func processRelationMessage[Items model.Items]( case protos.TypeSystem_Q: qKind := p.postgresOIDToQValueKind(column.DataType) if qKind == qvalue.QValueKindInvalid { - typeName, ok := p.customTypesMapping[column.DataType] + typeName, ok := customTypeMapping[column.DataType] if ok { qKind = customTypeToQKind(typeName) } diff --git a/flow/connectors/postgres/client.go b/flow/connectors/postgres/client.go index 1daabbf684..70b0d15d1d 100644 --- a/flow/connectors/postgres/client.go +++ b/flow/connectors/postgres/client.go @@ -550,7 +550,14 @@ func (c *PostgresConnector) jobMetadataExists(ctx context.Context, jobName strin } func (c *PostgresConnector) MajorVersion(ctx context.Context) (shared.PGVersion, error) { - return shared.GetMajorVersion(ctx, c.conn) + if c.pgVersion == 0 { + pgVersion, err := shared.GetMajorVersion(ctx, c.conn) + if err != nil { + return 0, err + } + c.pgVersion = pgVersion + } + return c.pgVersion, nil } func (c *PostgresConnector) updateSyncMetadata(ctx context.Context, flowJobName string, lastCP int64, syncBatchID int64, diff --git a/flow/connectors/postgres/postgres.go b/flow/connectors/postgres/postgres.go index c8a853287e..b179e2d075 100644 --- a/flow/connectors/postgres/postgres.go +++ b/flow/connectors/postgres/postgres.go @@ -39,12 +39,13 @@ type PostgresConnector struct { conn *pgx.Conn replConn *pgx.Conn replState *ReplState - customTypesMapping map[uint32]string + customTypeMapping map[uint32]string hushWarnOID map[uint32]struct{} relationMessageMapping model.RelationMessageMapping connStr string metadataSchema string replLock sync.Mutex + pgVersion shared.PGVersion } type ReplState struct { @@ -87,33 +88,39 @@ func NewPostgresConnector(ctx context.Context, env map[string]string, pgConfig * return nil, fmt.Errorf("failed to create connection: %w", err) } - customTypeMap, err := shared.GetCustomDataTypes(ctx, conn) - if err != nil { - logger.Error("failed to get custom type map", slog.Any("error", err)) - return nil, fmt.Errorf("failed to get custom type map: %w", err) - } - metadataSchema := "_peerdb_internal" if pgConfig.MetadataSchema != nil { metadataSchema = *pgConfig.MetadataSchema } return &PostgresConnector{ - connStr: connectionString, + logger: logger, config: pgConfig, ssh: tunnel, conn: conn, replConn: nil, replState: nil, - replLock: sync.Mutex{}, - customTypesMapping: customTypeMap, - metadataSchema: metadataSchema, + customTypeMapping: nil, hushWarnOID: make(map[uint32]struct{}), - logger: logger, relationMessageMapping: make(model.RelationMessageMapping), + connStr: connectionString, + metadataSchema: metadataSchema, + replLock: sync.Mutex{}, + pgVersion: 0, }, nil } +func (c *PostgresConnector) fetchCustomTypeMapping(ctx context.Context) (map[uint32]string, error) { + if c.customTypeMapping == nil { + customTypeMapping, err := shared.GetCustomDataTypes(ctx, c.conn) + if err != nil { + return nil, err + } + c.customTypeMapping = customTypeMapping + } + return c.customTypeMapping, nil +} + func (c *PostgresConnector) CreateReplConn(ctx context.Context) (*pgx.Conn, error) { // create a separate connection pool for non-replication queries as replication connections cannot // be used for extended query protocol, i.e. prepared statements @@ -129,6 +136,7 @@ func (c *PostgresConnector) CreateReplConn(ctx context.Context) (*pgx.Conn, erro replConfig.Config.RuntimeParams["replication"] = "database" replConfig.Config.RuntimeParams["bytea_output"] = "hex" replConfig.Config.RuntimeParams["intervalstyle"] = "postgres" + replConfig.DefaultQueryExecMode = pgx.QueryExecModeSimpleProtocol conn, err := c.ssh.NewPostgresConnFromConfig(ctx, replConfig) if err != nil { @@ -168,6 +176,7 @@ func (c *PostgresConnector) MaybeStartReplication( slotName string, publicationName string, lastOffset int64, + pgVersion shared.PGVersion, ) error { if c.replState != nil && (c.replState.Offset != lastOffset || c.replState.Slot != slotName || @@ -180,7 +189,7 @@ func (c *PostgresConnector) MaybeStartReplication( } if c.replState == nil { - replicationOpts, err := c.replicationOptions(ctx, publicationName) + replicationOpts, err := c.replicationOptions(publicationName, pgVersion) if err != nil { return fmt.Errorf("error getting replication options: %w", err) } @@ -210,7 +219,8 @@ func (c *PostgresConnector) MaybeStartReplication( return nil } -func (c *PostgresConnector) replicationOptions(ctx context.Context, publicationName string) (pglogrepl.StartReplicationOptions, error) { +func (c *PostgresConnector) replicationOptions(publicationName string, pgVersion shared.PGVersion, +) (pglogrepl.StartReplicationOptions, error) { pluginArguments := append(make([]string, 0, 3), "proto_version '1'") if publicationName != "" { @@ -220,10 +230,7 @@ func (c *PostgresConnector) replicationOptions(ctx context.Context, publicationN return pglogrepl.StartReplicationOptions{}, errors.New("publication name is not set") } - pgversion, err := c.MajorVersion(ctx) - if err != nil { - return pglogrepl.StartReplicationOptions{}, err - } else if pgversion >= shared.POSTGRES_14 { + if pgVersion >= shared.POSTGRES_14 { pluginArguments = append(pluginArguments, "messages 'true'") } @@ -380,12 +387,21 @@ func pullCore[Items model.Items]( c.logger.Info("PullRecords: performed checks for slot and publication") - childToParentRelIDMap, err := GetChildToParentRelIDMap(ctx, c.conn) + // cached, since this connector is reused + pgVersion, err := c.MajorVersion(ctx) if err != nil { - return fmt.Errorf("error getting child to parent relid map: %w", err) + return err + } + var childToParentRelIDMap map[uint32]uint32 + // only initialize the map if needed, escape hatch because custom publications may not have the right setting + if req.OverridePublicationName != "" || pgVersion < shared.POSTGRES_13 { + childToParentRelIDMap, err = GetChildToParentRelIDMap(ctx, c.conn) + if err != nil { + return fmt.Errorf("error getting child to parent relid map: %w", err) + } } - if err := c.MaybeStartReplication(ctx, slotName, publicationName, req.LastOffset); err != nil { + if err := c.MaybeStartReplication(ctx, slotName, publicationName, req.LastOffset, pgVersion); err != nil { // in case of Aurora error ERROR: replication slots cannot be used on RO (Read Only) node (SQLSTATE 55000) if shared.IsSQLStateError(err, pgerrcode.ObjectNotInPrerequisiteState) && strings.Contains(err.Error(), "replication slots cannot be used on RO (Read Only) node") { @@ -396,15 +412,15 @@ func pullCore[Items model.Items]( } cdc := c.NewPostgresCDCSource(&PostgresCDCConfig{ + CatalogPool: catalogPool, SrcTableIDNameMapping: req.SrcTableIDNameMapping, - Slot: slotName, - Publication: publicationName, TableNameMapping: req.TableNameMapping, TableNameSchemaMapping: req.TableNameSchemaMapping, ChildToParentRelIDMap: childToParentRelIDMap, - CatalogPool: catalogPool, - FlowJobName: req.FlowJobName, RelationMessageMapping: c.relationMessageMapping, + FlowJobName: req.FlowJobName, + Slot: slotName, + Publication: publicationName, }) if err := PullCdcRecords(ctx, cdc, req, processor, &c.replLock); err != nil { @@ -766,6 +782,10 @@ func (c *PostgresConnector) getTableSchemaForTable( if err != nil { return nil, err } + customTypeMapping, err := c.fetchCustomTypeMapping(ctx) + if err != nil { + return nil, err + } relID, err := c.getRelIDForTable(ctx, schemaTable) if err != nil { @@ -811,7 +831,7 @@ func (c *PostgresConnector) getTableSchemaForTable( case protos.TypeSystem_PG: colType = c.postgresOIDToName(fieldDescription.DataTypeOID) if colType == "" { - typeName, ok := c.customTypesMapping[fieldDescription.DataTypeOID] + typeName, ok := customTypeMapping[fieldDescription.DataTypeOID] if !ok { return nil, fmt.Errorf("error getting type name for %d", fieldDescription.DataTypeOID) } @@ -820,7 +840,7 @@ func (c *PostgresConnector) getTableSchemaForTable( case protos.TypeSystem_Q: qColType := c.postgresOIDToQValueKind(fieldDescription.DataTypeOID) if qColType == qvalue.QValueKindInvalid { - typeName, ok := c.customTypesMapping[fieldDescription.DataTypeOID] + typeName, ok := customTypeMapping[fieldDescription.DataTypeOID] if ok { qColType = customTypeToQKind(typeName) } else { diff --git a/flow/connectors/postgres/qrep.go b/flow/connectors/postgres/qrep.go index b393a46913..1cd2cd5952 100644 --- a/flow/connectors/postgres/qrep.go +++ b/flow/connectors/postgres/qrep.go @@ -328,10 +328,15 @@ func corePullQRepRecords( sink QRepPullSink, ) (int, error) { partitionIdLog := slog.String(string(shared.PartitionIDKey), partition.PartitionId) + if partition.FullTablePartition { c.logger.Info("pulling full table partition", partitionIdLog) - executor := c.NewQRepQueryExecutorSnapshot(config.SnapshotName, config.FlowJobName, partition.PartitionId) - _, err := executor.ExecuteQueryIntoSink(ctx, sink, config.Query) + executor, err := c.NewQRepQueryExecutorSnapshot(ctx, config.SnapshotName, + config.FlowJobName, partition.PartitionId) + if err != nil { + return 0, fmt.Errorf("failed to create query executor: %w", err) + } + _, err = executor.ExecuteQueryIntoSink(ctx, sink, config.Query) return 0, err } c.logger.Info("Obtained ranges for partition for PullQRepStream", partitionIdLog) @@ -369,7 +374,11 @@ func corePullQRepRecords( return 0, err } - executor := c.NewQRepQueryExecutorSnapshot(config.SnapshotName, config.FlowJobName, partition.PartitionId) + executor, err := c.NewQRepQueryExecutorSnapshot(ctx, config.SnapshotName, config.FlowJobName, + partition.PartitionId) + if err != nil { + return 0, fmt.Errorf("failed to create query executor: %w", err) + } numRecords, err := executor.ExecuteQueryIntoSink(ctx, sink, query, rangeStart, rangeEnd) if err != nil { @@ -669,7 +678,11 @@ func pullXminRecordStream( queryArgs = []interface{}{strconv.FormatInt(partition.Range.Range.(*protos.PartitionRange_IntRange).IntRange.Start&0xffffffff, 10)} } - executor := c.NewQRepQueryExecutorSnapshot(config.SnapshotName, config.FlowJobName, partition.PartitionId) + executor, err := c.NewQRepQueryExecutorSnapshot(ctx, config.SnapshotName, + config.FlowJobName, partition.PartitionId) + if err != nil { + return 0, 0, fmt.Errorf("failed to create query executor: %w", err) + } numRecords, currentSnapshotXmin, err := executor.ExecuteQueryIntoSinkGettingCurrentSnapshotXmin( ctx, diff --git a/flow/connectors/postgres/qrep_bench_test.go b/flow/connectors/postgres/qrep_bench_test.go index d880343f43..777faf6e6f 100644 --- a/flow/connectors/postgres/qrep_bench_test.go +++ b/flow/connectors/postgres/qrep_bench_test.go @@ -4,6 +4,8 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + "github.com/PeerDB-io/peer-flow/peerdbenv" ) @@ -12,13 +14,12 @@ func BenchmarkQRepQueryExecutor(b *testing.B) { ctx := context.Background() connector, err := NewPostgresConnector(ctx, nil, peerdbenv.GetCatalogPostgresConfigFromEnv(ctx)) - if err != nil { - b.Fatalf("failed to create connection: %v", err) - } + require.NoError(b, err, "error while creating connector") defer connector.Close() // Create a new QRepQueryExecutor instance - qe := connector.NewQRepQueryExecutor("test flow", "test part") + qe, err := connector.NewQRepQueryExecutor(ctx, "test flow", "test part") + require.NoError(b, err, "error while creating QRepQueryExecutor") // Run the benchmark b.ResetTimer() @@ -28,8 +29,6 @@ func BenchmarkQRepQueryExecutor(b *testing.B) { // Execute the query and process the rows _, err := qe.ExecuteAndProcessQuery(ctx, query) - if err != nil { - b.Fatalf("failed to execute query: %v", err) - } + require.NoError(b, err, "error while executing query") } } diff --git a/flow/connectors/postgres/qrep_query_executor.go b/flow/connectors/postgres/qrep_query_executor.go index bdfa7038ba..f3c915c133 100644 --- a/flow/connectors/postgres/qrep_query_executor.go +++ b/flow/connectors/postgres/qrep_query_executor.go @@ -18,24 +18,35 @@ import ( type QRepQueryExecutor struct { *PostgresConnector - logger log.Logger - snapshot string - flowJobName string - partitionID string + logger log.Logger + customTypeMapping map[uint32]string + snapshot string + flowJobName string + partitionID string } -func (c *PostgresConnector) NewQRepQueryExecutor(flowJobName string, partitionID string) *QRepQueryExecutor { - return c.NewQRepQueryExecutorSnapshot("", flowJobName, partitionID) +func (c *PostgresConnector) NewQRepQueryExecutor(ctx context.Context, + flowJobName string, partitionID string, +) (*QRepQueryExecutor, error) { + return c.NewQRepQueryExecutorSnapshot(ctx, "", flowJobName, partitionID) } -func (c *PostgresConnector) NewQRepQueryExecutorSnapshot(snapshot string, flowJobName string, partitionID string) *QRepQueryExecutor { +func (c *PostgresConnector) NewQRepQueryExecutorSnapshot(ctx context.Context, + snapshot string, flowJobName string, partitionID string, +) (*QRepQueryExecutor, error) { + customTypeMapping, err := c.fetchCustomTypeMapping(ctx) + if err != nil { + c.logger.Error("[pg_query_executor] failed to fetch custom type mapping", slog.Any("error", err)) + return nil, fmt.Errorf("failed to fetch custom type mapping: %w", err) + } return &QRepQueryExecutor{ PostgresConnector: c, snapshot: snapshot, flowJobName: flowJobName, partitionID: partitionID, logger: log.With(c.logger, slog.String(string(shared.PartitionIDKey), partitionID)), - } + customTypeMapping: customTypeMapping, + }, nil } func (qe *QRepQueryExecutor) ExecuteQuery(ctx context.Context, query string, args ...interface{}) (pgx.Rows, error) { @@ -67,7 +78,7 @@ func (qe *QRepQueryExecutor) fieldDescriptionsToSchema(fds []pgconn.FieldDescrip cname := fd.Name ctype := qe.postgresOIDToQValueKind(fd.DataTypeOID) if ctype == qvalue.QValueKindInvalid { - typeName, ok := qe.customTypesMapping[fd.DataTypeOID] + typeName, ok := qe.customTypeMapping[fd.DataTypeOID] if ok { ctype = customTypeToQKind(typeName) } else { @@ -98,6 +109,7 @@ func (qe *QRepQueryExecutor) fieldDescriptionsToSchema(fds []pgconn.FieldDescrip } func (qe *QRepQueryExecutor) ProcessRows( + ctx context.Context, rows pgx.Rows, fieldDescriptions []pgconn.FieldDescription, ) (*model.QRecordBatch, error) { @@ -119,8 +131,9 @@ func (qe *QRepQueryExecutor) ProcessRows( return nil, fmt.Errorf("row iteration failed: %w", err) } + schema := qe.fieldDescriptionsToSchema(fieldDescriptions) batch := &model.QRecordBatch{ - Schema: qe.fieldDescriptionsToSchema(fieldDescriptions), + Schema: schema, Records: records, } @@ -186,7 +199,8 @@ func (qe *QRepQueryExecutor) processFetchedRows( fieldDescriptions := rows.FieldDescriptions() if !stream.IsSchemaSet() { - stream.SetSchema(qe.fieldDescriptionsToSchema(fieldDescriptions)) + schema := qe.fieldDescriptionsToSchema(fieldDescriptions) + stream.SetSchema(schema) } numRows, err := qe.processRowsStream(ctx, cursorName, stream, rows, fieldDescriptions) @@ -324,7 +338,7 @@ func (qe *QRepQueryExecutor) mapRowToQRecord( for i, fd := range fds { // Check if it's a custom type first - typeName, ok := qe.customTypesMapping[fd.DataTypeOID] + typeName, ok := qe.customTypeMapping[fd.DataTypeOID] if !ok { tmp, err := qe.parseFieldFromPostgresOID(fd.DataTypeOID, values[i]) if err != nil { diff --git a/flow/connectors/postgres/qrep_query_executor_test.go b/flow/connectors/postgres/qrep_query_executor_test.go index d7932ba00e..f8f686c42f 100644 --- a/flow/connectors/postgres/qrep_query_executor_test.go +++ b/flow/connectors/postgres/qrep_query_executor_test.go @@ -1,7 +1,6 @@ package connpostgres import ( - "bytes" "context" "fmt" "testing" @@ -10,6 +9,7 @@ import ( "github.com/google/uuid" "github.com/jackc/pgx/v5" "github.com/shopspring/decimal" + "github.com/stretchr/testify/require" "github.com/PeerDB-io/peer-flow/peerdbenv" ) @@ -19,18 +19,14 @@ func setupDB(t *testing.T) (*PostgresConnector, string) { connector, err := NewPostgresConnector(context.Background(), nil, peerdbenv.GetCatalogPostgresConfigFromEnv(context.Background())) - if err != nil { - t.Fatalf("unable to create connector: %v", err) - } + require.NoError(t, err, "error while creating connector") // Create unique schema name using current time schemaName := fmt.Sprintf("schema_%d", time.Now().Unix()) // Create the schema _, err = connector.conn.Exec(context.Background(), fmt.Sprintf("CREATE SCHEMA %s;", schemaName)) - if err != nil { - t.Fatalf("unable to create schema: %v", err) - } + require.NoError(t, err, "error while creating schema") return connector, schemaName } @@ -39,9 +35,7 @@ func teardownDB(t *testing.T, conn *pgx.Conn, schemaName string) { t.Helper() _, err := conn.Exec(context.Background(), fmt.Sprintf("DROP SCHEMA %s CASCADE;", schemaName)) - if err != nil { - t.Fatalf("error while dropping schema: %v", err) - } + require.NoError(t, err, "error while dropping schema") } func TestExecuteAndProcessQuery(t *testing.T) { @@ -53,31 +47,20 @@ func TestExecuteAndProcessQuery(t *testing.T) { query := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s.test(id SERIAL PRIMARY KEY, data TEXT);", schemaName) _, err := conn.Exec(ctx, query) - if err != nil { - t.Fatalf("error while creating test table: %v", err) - } + require.NoError(t, err, "error while creating table") query = fmt.Sprintf("INSERT INTO %s.test(data) VALUES('testdata');", schemaName) _, err = conn.Exec(ctx, query) - if err != nil { - t.Fatalf("error while inserting into test table: %v", err) - } + require.NoError(t, err, "error while inserting data") - qe := connector.NewQRepQueryExecutor("test flow", "test part") + qe, err := connector.NewQRepQueryExecutor(ctx, "test flow", "test part") + require.NoError(t, err, "error while creating QRepQueryExecutor") query = fmt.Sprintf("SELECT * FROM %s.test;", schemaName) batch, err := qe.ExecuteAndProcessQuery(context.Background(), query) - if err != nil { - t.Fatalf("error while executing and processing query: %v", err) - } - - if len(batch.Records) != 1 { - t.Fatalf("expected 1 record, got %v", len(batch.Records)) - } - - if batch.Records[0][1].Value() != "testdata" { - t.Fatalf("expected 'testdata', got %v", batch.Records[0][0].Value()) - } + require.NoError(t, err, "error while executing query") + require.Len(t, batch.Records, 1, "expected 1 record") + require.Equal(t, "testdata", batch.Records[0][1].Value(), "expected 'testdata'") } func TestAllDataTypes(t *testing.T) { @@ -109,9 +92,7 @@ func TestAllDataTypes(t *testing.T) { );`, schemaName) _, err := conn.Exec(ctx, query) - if err != nil { - t.Fatalf("error while creating test table: %v", err) - } + require.NoError(t, err, "error while creating table") // Insert a row into the table query = fmt.Sprintf(` @@ -137,7 +118,7 @@ func TestAllDataTypes(t *testing.T) { )`, schemaName) - savedTime := time.Now() + savedTime := time.Now().UTC() savedUUID := uuid.New() _, err = conn.Exec( @@ -160,48 +141,34 @@ func TestAllDataTypes(t *testing.T) { savedTime, // col_tz4 savedTime, // col_date ) - if err != nil { - t.Fatalf("error while inserting into test table: %v", err) - } + require.NoError(t, err, "error while inserting into test table") - qe := connector.NewQRepQueryExecutor("test flow", "test part") + qe, err := connector.NewQRepQueryExecutor(ctx, "test flow", "test part") + require.NoError(t, err, "error while creating QRepQueryExecutor") // Select the row back out of the table query = fmt.Sprintf("SELECT * FROM %s.test;", schemaName) rows, err := qe.ExecuteQuery(context.Background(), query) - if err != nil { - t.Fatalf("error while executing query: %v", err) - } + require.NoError(t, err, "error while executing query") defer rows.Close() // Use rows.FieldDescriptions() to get field descriptions fieldDescriptions := rows.FieldDescriptions() - batch, err := qe.ProcessRows(rows, fieldDescriptions) - if err != nil { - t.Fatalf("failed to process rows: %v", err) - } - - if len(batch.Records) != 1 { - t.Fatalf("expected 1 record, got %v", len(batch.Records)) - } + batch, err := qe.ProcessRows(ctx, rows, fieldDescriptions) + require.NoError(t, err, "error while processing rows") + require.Len(t, batch.Records, 1, "expected 1 record") // Retrieve the results. record := batch.Records[0] expectedBool := true - if record[0].Value().(bool) != expectedBool { - t.Fatalf("expected %v, got %v", expectedBool, record[0].Value()) - } + require.Equal(t, expectedBool, record[0].Value(), "expected true") expectedInt4 := int32(2) - if record[1].Value().(int32) != expectedInt4 { - t.Fatalf("expected %v, got %v", expectedInt4, record[1].Value()) - } + require.Equal(t, expectedInt4, record[1].Value(), "expected 2") expectedInt8 := int64(3) - if record[2].Value().(int64) != expectedInt8 { - t.Fatalf("expected %v, got %v", expectedInt8, record[2].Value()) - } + require.Equal(t, expectedInt8, record[2].Value(), "expected 3") expectedFloat4 := float32(1.1) if record[3].Value().(float32) != expectedFloat4 { @@ -214,28 +181,21 @@ func TestAllDataTypes(t *testing.T) { } expectedText := "text" - if record[5].Value().(string) != expectedText { - t.Fatalf("expected %v, got %v", expectedText, record[5].Value()) - } + require.Equal(t, expectedText, record[5].Value(), "expected 'text'") expectedBytea := []byte("bytea") - if !bytes.Equal(record[6].Value().([]byte), expectedBytea) { - t.Fatalf("expected %v, got %v", expectedBytea, record[6].Value()) - } + require.Equal(t, expectedBytea, record[6].Value(), "expected 'bytea'") expectedJSON := `{"key":"value"}` - if record[7].Value().(string) != expectedJSON { - t.Fatalf("expected %v, got %v", expectedJSON, record[7].Value()) - } + require.Equal(t, expectedJSON, record[7].Value(), "expected '{\"key\":\"value\"}'") actualUUID := record[8].Value().([16]uint8) - if !bytes.Equal(actualUUID[:], savedUUID[:]) { - t.Fatalf("expected %v, got %v", savedUUID, actualUUID) - } + require.Equal(t, savedUUID[:], actualUUID[:], "expected savedUUID: %v", savedUUID) + actualTime := record[9].Value().(time.Time) + require.Equal(t, savedTime.Truncate(time.Second), + actualTime.Truncate(time.Second), "expected savedTime: %v", savedTime) expectedNumeric := "123.456" actualNumeric := record[10].Value().(decimal.Decimal).String() - if actualNumeric != expectedNumeric { - t.Fatalf("expected %v, got %v", expectedNumeric, actualNumeric) - } + require.Equal(t, expectedNumeric, actualNumeric, "expected 123.456") } diff --git a/flow/e2e/postgres/postgres.go b/flow/e2e/postgres/postgres.go index 37a0ace06b..ea43648f7c 100644 --- a/flow/e2e/postgres/postgres.go +++ b/flow/e2e/postgres/postgres.go @@ -54,7 +54,10 @@ func (s PeerFlowE2ETestSuitePG) DestinationTable(table string) string { func (s PeerFlowE2ETestSuitePG) GetRows(table string, cols string) (*model.QRecordBatch, error) { s.t.Helper() - pgQueryExecutor := s.conn.NewQRepQueryExecutor("testflow", "testpart") + pgQueryExecutor, err := s.conn.NewQRepQueryExecutor(context.Background(), "testflow", "testpart") + if err != nil { + return nil, err + } return pgQueryExecutor.ExecuteAndProcessQuery( context.Background(), diff --git a/flow/e2e/test_utils.go b/flow/e2e/test_utils.go index ce134f819a..c3e577a4dc 100644 --- a/flow/e2e/test_utils.go +++ b/flow/e2e/test_utils.go @@ -89,7 +89,10 @@ func EnvTrue(t *testing.T, env WorkflowRun, val bool) { } func GetPgRows(conn *connpostgres.PostgresConnector, suffix string, table string, cols string) (*model.QRecordBatch, error) { - pgQueryExecutor := conn.NewQRepQueryExecutor("testflow", "testpart") + pgQueryExecutor, err := conn.NewQRepQueryExecutor(context.Background(), "testflow", "testpart") + if err != nil { + return nil, err + } return pgQueryExecutor.ExecuteAndProcessQuery( context.Background(), diff --git a/flow/shared/postgres.go b/flow/shared/postgres.go index be3cf7d07d..121fb73bf4 100644 --- a/flow/shared/postgres.go +++ b/flow/shared/postgres.go @@ -58,17 +58,17 @@ func GetCustomDataTypes(ctx context.Context, conn *pgx.Conn) (map[uint32]string, AND n.nspname NOT IN ('pg_catalog', 'information_schema'); `) if err != nil { - return nil, fmt.Errorf("failed to get custom types: %w", err) + return nil, fmt.Errorf("failed to get customTypeMapping: %w", err) } customTypeMap := map[uint32]string{} - for rows.Next() { - var typeID pgtype.Uint32 - var typeName pgtype.Text - if err := rows.Scan(&typeID, &typeName); err != nil { - return nil, fmt.Errorf("failed to scan row: %w", err) - } + var typeID pgtype.Uint32 + var typeName pgtype.Text + if _, err := pgx.ForEachRow(rows, []any{&typeID, &typeName}, func() error { customTypeMap[typeID.Uint32] = typeName.String + return nil + }); err != nil { + return nil, fmt.Errorf("failed to scan into customTypeMapping: %w", err) } return customTypeMap, nil } From 08f4ffd6f7da7bce6f5058a9b3e838f61a5fe6ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Thu, 14 Nov 2024 13:13:32 +0000 Subject: [PATCH 25/59] add dynamic config to adjust s3 part size (#2251) fixes #2184 where a user with a large enough batch hit ``` failed to sync records: failed to write records to S3: failed to upload file to path s3://peerdb-cache/...... 4890f21240e1.avro.zst: upload multipart failed, upload id: OTA0ZTE5NTMtMTdiMi00MWE5LWJhY....., cause: exceeded total allowed configured MaxUploadParts (10000). Adjust PartSize to fit in this limit ``` s3 sdk defaults to 5MiB part sizes, this user was able to fix their upload by changing that to 500MiB Co-authored-by: joltcan --- flow/connectors/clickhouse/cdc.go | 2 +- flow/connectors/clickhouse/qrep_avro_sync.go | 8 ++- flow/connectors/s3/qrep.go | 5 +- flow/connectors/snowflake/qrep_avro_sync.go | 67 ++++++++++---------- flow/connectors/snowflake/snowflake.go | 5 +- flow/connectors/utils/avro/avro_writer.go | 24 +++++-- flow/peerdbenv/dynamicconf.go | 12 ++++ 7 files changed, 76 insertions(+), 47 deletions(-) diff --git a/flow/connectors/clickhouse/cdc.go b/flow/connectors/clickhouse/cdc.go index 8fae9d6f26..d3eb883b46 100644 --- a/flow/connectors/clickhouse/cdc.go +++ b/flow/connectors/clickhouse/cdc.go @@ -88,7 +88,7 @@ func (c *ClickHouseConnector) syncRecordsViaAvro( } avroSyncer := c.avroSyncMethod(req.FlowJobName) - numRecords, err := avroSyncer.SyncRecords(ctx, stream, req.FlowJobName, syncBatchID) + numRecords, err := avroSyncer.SyncRecords(ctx, req.Env, stream, req.FlowJobName, syncBatchID) if err != nil { return nil, err } diff --git a/flow/connectors/clickhouse/qrep_avro_sync.go b/flow/connectors/clickhouse/qrep_avro_sync.go index f8277e3aad..fa2cfe1034 100644 --- a/flow/connectors/clickhouse/qrep_avro_sync.go +++ b/flow/connectors/clickhouse/qrep_avro_sync.go @@ -60,6 +60,7 @@ func (s *ClickHouseAvroSyncMethod) CopyStageToDestination(ctx context.Context, a func (s *ClickHouseAvroSyncMethod) SyncRecords( ctx context.Context, + env map[string]string, stream *model.QRecordStream, flowJobName string, syncBatchID int64, @@ -76,7 +77,7 @@ func (s *ClickHouseAvroSyncMethod) SyncRecords( } batchIdentifierForFile := fmt.Sprintf("%s_%d", shared.RandomString(16), syncBatchID) - avroFile, err := s.writeToAvroFile(ctx, stream, avroSchema, batchIdentifierForFile, flowJobName) + avroFile, err := s.writeToAvroFile(ctx, env, stream, avroSchema, batchIdentifierForFile, flowJobName) if err != nil { return 0, err } @@ -110,7 +111,7 @@ func (s *ClickHouseAvroSyncMethod) SyncQRepRecords( return 0, err } - avroFile, err := s.writeToAvroFile(ctx, stream, avroSchema, partition.PartitionId, config.FlowJobName) + avroFile, err := s.writeToAvroFile(ctx, config.Env, stream, avroSchema, partition.PartitionId, config.FlowJobName) if err != nil { return 0, err } @@ -176,6 +177,7 @@ func (s *ClickHouseAvroSyncMethod) getAvroSchema( func (s *ClickHouseAvroSyncMethod) writeToAvroFile( ctx context.Context, + env map[string]string, stream *model.QRecordStream, avroSchema *model.QRecordAvroSchemaDefinition, identifierForFile string, @@ -190,7 +192,7 @@ func (s *ClickHouseAvroSyncMethod) writeToAvroFile( s3AvroFileKey := fmt.Sprintf("%s/%s/%s.avro.zst", s3o.Prefix, flowJobName, identifierForFile) s3AvroFileKey = strings.Trim(s3AvroFileKey, "/") - avroFile, err := ocfWriter.WriteRecordsToS3(ctx, s3o.Bucket, s3AvroFileKey, s.credsProvider.Provider) + avroFile, err := ocfWriter.WriteRecordsToS3(ctx, env, s3o.Bucket, s3AvroFileKey, s.credsProvider.Provider) if err != nil { return nil, fmt.Errorf("failed to write records to S3: %w", err) } diff --git a/flow/connectors/s3/qrep.go b/flow/connectors/s3/qrep.go index 14c7b31ef2..9fbb485ab8 100644 --- a/flow/connectors/s3/qrep.go +++ b/flow/connectors/s3/qrep.go @@ -25,7 +25,7 @@ func (c *S3Connector) SyncQRepRecords( return 0, err } - numRecords, err := c.writeToAvroFile(ctx, stream, avroSchema, partition.PartitionId, config.FlowJobName) + numRecords, err := c.writeToAvroFile(ctx, config.Env, stream, avroSchema, partition.PartitionId, config.FlowJobName) if err != nil { return 0, err } @@ -47,6 +47,7 @@ func getAvroSchema( func (c *S3Connector) writeToAvroFile( ctx context.Context, + env map[string]string, stream *model.QRecordStream, avroSchema *model.QRecordAvroSchemaDefinition, partitionID string, @@ -60,7 +61,7 @@ func (c *S3Connector) writeToAvroFile( s3AvroFileKey := fmt.Sprintf("%s/%s/%s.avro", s3o.Prefix, jobName, partitionID) writer := avro.NewPeerDBOCFWriter(stream, avroSchema, avro.CompressNone, protos.DBType_SNOWFLAKE) - avroFile, err := writer.WriteRecordsToS3(ctx, s3o.Bucket, s3AvroFileKey, c.credentialsProvider) + avroFile, err := writer.WriteRecordsToS3(ctx, env, s3o.Bucket, s3AvroFileKey, c.credentialsProvider) if err != nil { return 0, fmt.Errorf("failed to write records to S3: %w", err) } diff --git a/flow/connectors/snowflake/qrep_avro_sync.go b/flow/connectors/snowflake/qrep_avro_sync.go index 2e37705c14..0fea54b027 100644 --- a/flow/connectors/snowflake/qrep_avro_sync.go +++ b/flow/connectors/snowflake/qrep_avro_sync.go @@ -20,8 +20,8 @@ import ( ) type SnowflakeAvroSyncHandler struct { - config *protos.QRepConfig - connector *SnowflakeConnector + *SnowflakeConnector + config *protos.QRepConfig } func NewSnowflakeAvroSyncHandler( @@ -29,13 +29,14 @@ func NewSnowflakeAvroSyncHandler( connector *SnowflakeConnector, ) *SnowflakeAvroSyncHandler { return &SnowflakeAvroSyncHandler{ - config: config, - connector: connector, + SnowflakeConnector: connector, + config: config, } } func (s *SnowflakeAvroSyncHandler) SyncRecords( ctx context.Context, + env map[string]string, dstTableSchema []*sql.ColumnType, stream *model.QRecordStream, flowJobName string, @@ -45,7 +46,7 @@ func (s *SnowflakeAvroSyncHandler) SyncRecords( schema := stream.Schema() - s.connector.logger.Info("sync function called and schema acquired", tableLog) + s.logger.Info("sync function called and schema acquired", tableLog) avroSchema, err := s.getAvroSchema(dstTableName, schema) if err != nil { @@ -53,32 +54,31 @@ func (s *SnowflakeAvroSyncHandler) SyncRecords( } partitionID := shared.RandomString(16) - avroFile, err := s.writeToAvroFile(ctx, stream, avroSchema, partitionID, flowJobName) + avroFile, err := s.writeToAvroFile(ctx, env, stream, avroSchema, partitionID, flowJobName) if err != nil { return 0, err } defer avroFile.Cleanup() - s.connector.logger.Info(fmt.Sprintf("written %d records to Avro file", avroFile.NumRecords), tableLog) + s.logger.Info(fmt.Sprintf("written %d records to Avro file", avroFile.NumRecords), tableLog) - stage := s.connector.getStageNameForJob(s.config.FlowJobName) - err = s.connector.createStage(ctx, stage, s.config) - if err != nil { + stage := s.getStageNameForJob(s.config.FlowJobName) + if err := s.createStage(ctx, stage, s.config); err != nil { return 0, err } - s.connector.logger.Info("Created stage " + stage) + s.logger.Info("Created stage " + stage) err = s.putFileToStage(ctx, avroFile, stage) if err != nil { return 0, err } - s.connector.logger.Info("pushed avro file to stage", tableLog) + s.logger.Info("pushed avro file to stage", tableLog) - writeHandler := NewSnowflakeAvroConsolidateHandler(s.connector, s.config, s.config.DestinationTableIdentifier, stage) + writeHandler := NewSnowflakeAvroConsolidateHandler(s.SnowflakeConnector, s.config, s.config.DestinationTableIdentifier, stage) err = writeHandler.CopyStageToDestination(ctx) if err != nil { return 0, err } - s.connector.logger.Info(fmt.Sprintf("copying records into %s from stage %s", + s.logger.Info(fmt.Sprintf("copying records into %s from stage %s", s.config.DestinationTableIdentifier, stage)) return avroFile.NumRecords, nil @@ -96,7 +96,7 @@ func (s *SnowflakeAvroSyncHandler) SyncQRepRecords( dstTableName := config.DestinationTableIdentifier schema := stream.Schema() - s.connector.logger.Info("sync function called and schema acquired", partitionLog) + s.logger.Info("sync function called and schema acquired", partitionLog) err := s.addMissingColumns(ctx, schema, dstTableSchema, dstTableName, partition) if err != nil { @@ -108,22 +108,20 @@ func (s *SnowflakeAvroSyncHandler) SyncQRepRecords( return 0, err } - avroFile, err := s.writeToAvroFile(ctx, stream, avroSchema, partition.PartitionId, config.FlowJobName) + avroFile, err := s.writeToAvroFile(ctx, config.Env, stream, avroSchema, partition.PartitionId, config.FlowJobName) if err != nil { return 0, err } defer avroFile.Cleanup() - stage := s.connector.getStageNameForJob(config.FlowJobName) + stage := s.getStageNameForJob(config.FlowJobName) - err = s.putFileToStage(ctx, avroFile, stage) - if err != nil { + if err := s.putFileToStage(ctx, avroFile, stage); err != nil { return 0, err } - s.connector.logger.Info("Put file to stage in Avro sync for snowflake", partitionLog) + s.logger.Info("Put file to stage in Avro sync for snowflake", partitionLog) - err = s.connector.FinishQRepPartition(ctx, partition, config.FlowJobName, startTime) - if err != nil { + if err := s.FinishQRepPartition(ctx, partition, config.FlowJobName, startTime); err != nil { return 0, err } @@ -152,14 +150,14 @@ func (s *SnowflakeAvroSyncHandler) addMissingColumns( } if !hasColumn { - s.connector.logger.Info(fmt.Sprintf("adding column %s to destination table %s", + s.logger.Info(fmt.Sprintf("adding column %s to destination table %s", col.Name, dstTableName), partitionLog) colsToTypes[col.Name] = col.Type } } if len(colsToTypes) > 0 { - tx, err := s.connector.database.Begin() + tx, err := s.database.Begin() if err != nil { return fmt.Errorf("failed to begin transaction: %w", err) } @@ -173,7 +171,7 @@ func (s *SnowflakeAvroSyncHandler) addMissingColumns( alterTableCmd := fmt.Sprintf("ALTER TABLE %s ", dstTableName) alterTableCmd += fmt.Sprintf("ADD COLUMN IF NOT EXISTS \"%s\" %s;", upperCasedColName, sfColType) - s.connector.logger.Info(fmt.Sprintf("altering destination table %s with command `%s`", + s.logger.Info(fmt.Sprintf("altering destination table %s with command `%s`", dstTableName, alterTableCmd), partitionLog) if _, err := tx.ExecContext(ctx, alterTableCmd); err != nil { @@ -185,10 +183,10 @@ func (s *SnowflakeAvroSyncHandler) addMissingColumns( return fmt.Errorf("failed to commit transaction: %w", err) } - s.connector.logger.Info("successfully added missing columns to destination table "+ + s.logger.Info("successfully added missing columns to destination table "+ dstTableName, partitionLog) } else { - s.connector.logger.Info("no missing columns found in destination table "+dstTableName, partitionLog) + s.logger.Info("no missing columns found in destination table "+dstTableName, partitionLog) } return nil @@ -203,12 +201,13 @@ func (s *SnowflakeAvroSyncHandler) getAvroSchema( return nil, fmt.Errorf("failed to define Avro schema: %w", err) } - s.connector.logger.Info(fmt.Sprintf("Avro schema: %v\n", avroSchema)) + s.logger.Info(fmt.Sprintf("Avro schema: %v\n", avroSchema)) return avroSchema, nil } func (s *SnowflakeAvroSyncHandler) writeToAvroFile( ctx context.Context, + env map[string]string, stream *model.QRecordStream, avroSchema *model.QRecordAvroSchemaDefinition, partitionID string, @@ -223,7 +222,7 @@ func (s *SnowflakeAvroSyncHandler) writeToAvroFile( } localFilePath := fmt.Sprintf("%s/%s.avro.zst", tmpDir, partitionID) - s.connector.logger.Info("writing records to local file " + localFilePath) + s.logger.Info("writing records to local file " + localFilePath) avroFile, err := ocfWriter.WriteRecordsToAvroFile(ctx, localFilePath) if err != nil { return nil, fmt.Errorf("failed to write records to Avro file: %w", err) @@ -238,14 +237,14 @@ func (s *SnowflakeAvroSyncHandler) writeToAvroFile( } s3AvroFileKey := fmt.Sprintf("%s/%s/%s.avro.zst", s3o.Prefix, s.config.FlowJobName, partitionID) - s.connector.logger.Info("OCF: Writing records to S3", + s.logger.Info("OCF: Writing records to S3", slog.String(string(shared.PartitionIDKey), partitionID)) provider, err := utils.GetAWSCredentialsProvider(ctx, "snowflake", utils.PeerAWSCredentials{}) if err != nil { return nil, err } - avroFile, err := ocfWriter.WriteRecordsToS3(ctx, s3o.Bucket, s3AvroFileKey, provider) + avroFile, err := ocfWriter.WriteRecordsToS3(ctx, env, s3o.Bucket, s3AvroFileKey, provider) if err != nil { return nil, fmt.Errorf("failed to write records to S3: %w", err) } @@ -258,16 +257,16 @@ func (s *SnowflakeAvroSyncHandler) writeToAvroFile( func (s *SnowflakeAvroSyncHandler) putFileToStage(ctx context.Context, avroFile *avro.AvroFile, stage string) error { if avroFile.StorageLocation != avro.AvroLocalStorage { - s.connector.logger.Info("no file to put to stage") + s.logger.Info("no file to put to stage") return nil } putCmd := fmt.Sprintf("PUT file://%s @%s", avroFile.FilePath, stage) - if _, err := s.connector.database.ExecContext(ctx, putCmd); err != nil { + if _, err := s.database.ExecContext(ctx, putCmd); err != nil { return fmt.Errorf("failed to put file to stage: %w", err) } - s.connector.logger.Info(fmt.Sprintf("put file %s to stage %s", avroFile.FilePath, stage)) + s.logger.Info(fmt.Sprintf("put file %s to stage %s", avroFile.FilePath, stage)) return nil } diff --git a/flow/connectors/snowflake/snowflake.go b/flow/connectors/snowflake/snowflake.go index 7a400d78a7..06e3fb881e 100644 --- a/flow/connectors/snowflake/snowflake.go +++ b/flow/connectors/snowflake/snowflake.go @@ -423,8 +423,7 @@ func (c *SnowflakeConnector) SyncRecords(ctx context.Context, req *model.SyncRec return nil, err } - err = c.FinishBatch(ctx, req.FlowJobName, req.SyncBatchID, res.LastSyncedCheckpointID) - if err != nil { + if err := c.FinishBatch(ctx, req.FlowJobName, req.SyncBatchID, res.LastSyncedCheckpointID); err != nil { return nil, err } @@ -456,7 +455,7 @@ func (c *SnowflakeConnector) syncRecordsViaAvro( return nil, err } - numRecords, err := avroSyncer.SyncRecords(ctx, destinationTableSchema, stream, req.FlowJobName) + numRecords, err := avroSyncer.SyncRecords(ctx, req.Env, destinationTableSchema, stream, req.FlowJobName) if err != nil { return nil, err } diff --git a/flow/connectors/utils/avro/avro_writer.go b/flow/connectors/utils/avro/avro_writer.go index 6f193be88b..ee72e2c28b 100644 --- a/flow/connectors/utils/avro/avro_writer.go +++ b/flow/connectors/utils/avro/avro_writer.go @@ -23,6 +23,7 @@ import ( "github.com/PeerDB-io/peer-flow/connectors/utils" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model" + "github.com/PeerDB-io/peer-flow/peerdbenv" "github.com/PeerDB-io/peer-flow/shared" ) @@ -187,7 +188,11 @@ func (p *peerDBOCFWriter) WriteOCF(ctx context.Context, w io.Writer) (int, error } func (p *peerDBOCFWriter) WriteRecordsToS3( - ctx context.Context, bucketName, key string, s3Creds utils.AWSCredentialsProvider, + ctx context.Context, + env map[string]string, + bucketName string, + key string, + s3Creds utils.AWSCredentialsProvider, ) (*AvroFile, error) { logger := shared.LoggerFromCtx(ctx) s3svc, err := utils.CreateS3Client(ctx, s3Creds) @@ -215,12 +220,23 @@ func (p *peerDBOCFWriter) WriteRecordsToS3( numRows, writeOcfError = p.WriteOCF(ctx, w) }() - _, err = manager.NewUploader(s3svc).Upload(ctx, &s3.PutObjectInput{ + partSize, err := peerdbenv.PeerDBS3PartSize(ctx, env) + if err != nil { + return nil, fmt.Errorf("could not get s3 part size config: %w", err) + } + + // Create the uploader using the AWS SDK v2 manager + uploader := manager.NewUploader(s3svc, func(u *manager.Uploader) { + if partSize > 0 { + u.PartSize = partSize + } + }) + + if _, err := uploader.Upload(ctx, &s3.PutObjectInput{ Bucket: aws.String(bucketName), Key: aws.String(key), Body: r, - }) - if err != nil { + }); err != nil { s3Path := "s3://" + bucketName + "/" + key logger.Error("failed to upload file", slog.Any("error", err), slog.String("s3_path", s3Path)) return nil, fmt.Errorf("failed to upload file: %w", err) diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index 1e2f225906..4810faf0ae 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -116,6 +116,14 @@ DROP AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4); END;`, ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, TargetForSetting: protos.DynconfTarget_CLICKHOUSE, }, + { + Name: "PEERDB_S3_PART_SIZE", + Description: "S3 upload part size, may need to increase for large batches", + DefaultValue: "0", + ValueType: protos.DynconfValueType_INT, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, + TargetForSetting: protos.DynconfTarget_ALL, + }, { Name: "PEERDB_QUEUE_FORCE_TOPIC_CREATION", Description: "Force auto topic creation in mirrors, applies to Kafka and PubSub mirrors", @@ -340,6 +348,10 @@ func PeerDBClickHouseAWSS3BucketName(ctx context.Context, env map[string]string) return dynLookup(ctx, env, "PEERDB_CLICKHOUSE_AWS_S3_BUCKET_NAME") } +func PeerDBS3PartSize(ctx context.Context, env map[string]string) (int64, error) { + return dynamicConfSigned[int64](ctx, env, "PEERDB_S3_PART_SIZE") +} + // Kafka has topic auto create as an option, auto.create.topics.enable // But non-dedicated cluster maybe can't set config, may want peerdb to create topic. Similar for PubSub func PeerDBQueueForceTopicCreation(ctx context.Context, env map[string]string) (bool, error) { From 825c778ac43836811697ba1db09c42734c2f0b86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Thu, 14 Nov 2024 13:27:44 +0000 Subject: [PATCH 26/59] update s3 part size config description (#2252) #2184 feedback --- flow/peerdbenv/dynamicconf.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index 4810faf0ae..17b3b48bb0 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -118,7 +118,7 @@ DROP AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4); END;`, }, { Name: "PEERDB_S3_PART_SIZE", - Description: "S3 upload part size, may need to increase for large batches", + Description: "S3 upload part size in bytes, may need to increase for large batches. https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html", DefaultValue: "0", ValueType: protos.DynconfValueType_INT, ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, From 1ce7723a65cbc8855b99107776ac90cb3f780fac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Thu, 14 Nov 2024 13:38:08 +0000 Subject: [PATCH 27/59] fix lint (#2253) should really not use auto-merge so much with linting not actually being required --- flow/peerdbenv/dynamicconf.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index 17b3b48bb0..1eb53d3cb2 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -117,8 +117,9 @@ DROP AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4); END;`, TargetForSetting: protos.DynconfTarget_CLICKHOUSE, }, { - Name: "PEERDB_S3_PART_SIZE", - Description: "S3 upload part size in bytes, may need to increase for large batches. https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html", + Name: "PEERDB_S3_PART_SIZE", + Description: "S3 upload part size in bytes, may need to increase for large batches. " + + "https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html", DefaultValue: "0", ValueType: protos.DynconfValueType_INT, ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, From 350686f6d7e8532af1013796488f23a61cd6589e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Thu, 14 Nov 2024 13:45:52 +0000 Subject: [PATCH 28/59] Enable wal heartbeats by default (#2217) users predating PG14 should change the query or disable wal heartbeats --- flow/peerdbenv/dynamicconf.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index 1eb53d3cb2..c86c4616a1 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -68,17 +68,14 @@ var DynamicSettings = [...]*protos.DynamicSetting{ { Name: "PEERDB_ENABLE_WAL_HEARTBEAT", Description: "Enables WAL heartbeat to prevent replication slot lag from increasing during times of no activity", - DefaultValue: "false", + DefaultValue: "true", ValueType: protos.DynconfValueType_BOOL, ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, TargetForSetting: protos.DynconfTarget_ALL, }, { - Name: "PEERDB_WAL_HEARTBEAT_QUERY", - DefaultValue: `BEGIN; -DROP AGGREGATE IF EXISTS PEERDB_EPHEMERAL_HEARTBEAT(float4); -CREATE AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4) (SFUNC = float4pl, STYPE = float4); -DROP AGGREGATE PEERDB_EPHEMERAL_HEARTBEAT(float4); END;`, + Name: "PEERDB_WAL_HEARTBEAT_QUERY", + DefaultValue: "SELECT pg_logical_emit_message(false,'peerdb_heartbeat','')", ValueType: protos.DynconfValueType_STRING, Description: "SQL to run during each WAL heartbeat", ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, From 42b02080ff91a05b52ef5a1b02c7991a4307c968 Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Thu, 14 Nov 2024 20:26:22 +0530 Subject: [PATCH 29/59] feat: add maintenance mode for upgrades (#2211) - Introduces Maintenance mode (status is available via dynamic config: `PEERDB_MAINTENANCE_MODE_ENABLED`) - Maintenance mode consists of 2 Workflows: - `StartMaintenance` - for pre-upgrade, responsible for - Waiting for running snapshots - Updating dynamic config to true - Pausing and backing up currently running mirrors - `EndMaintenance` - for post-upgrade, responsible for - Resuming backed up mirrors - Updating dynamic config to false - During the upgrade (between `Start` and `End`), mirrors cannot be mutated/created in any way, - There is also an instance info API which returns `Ready`/`Maintenance` which can be used for UI changes later. There are 2 ways to trigger these 2 workflows: 1. API call to flow-api 2. Running the new `maintenance` entrypoint with the respective args A new task queue is added so that the maintenance tasks can be spun up even during pre-upgrade hooks (from version earlier than ones containing this PR) and this also ensures that always the latest version of the maintenance flows run irrespective of the old version. --- docker-bake.hcl | 30 ++ flow/activities/maintenance_activity.go | 284 ++++++++++++++++ flow/alerting/alerting.go | 6 +- flow/cmd/api.go | 41 +-- flow/cmd/handler.go | 66 ++++ flow/cmd/maintenance.go | 246 ++++++++++++++ flow/cmd/mirror_status.go | 15 +- flow/cmd/settings.go | 3 +- flow/cmd/validate_mirror.go | 12 + flow/cmd/worker.go | 14 +- flow/go.mod | 47 ++- flow/go.sum | 103 +++++- flow/main.go | 88 +++++ flow/peerdbenv/config.go | 6 + flow/peerdbenv/dynamicconf.go | 32 ++ flow/shared/constants.go | 5 +- flow/shared/telemetry/event_types.go | 10 +- flow/shared/worklow.go | 27 ++ flow/workflows/activities.go | 5 +- flow/workflows/maintenance_flow.go | 305 ++++++++++++++++++ flow/workflows/register.go | 3 + .../migrations/V40__maintenance_flows.sql | 29 ++ protos/flow.proto | 25 ++ protos/route.proto | 40 +++ stacks/flow.Dockerfile | 15 + stacks/peerdb-server.Dockerfile | 4 + stacks/peerdb-ui.Dockerfile | 3 + 27 files changed, 1393 insertions(+), 71 deletions(-) create mode 100644 flow/activities/maintenance_activity.go create mode 100644 flow/cmd/maintenance.go create mode 100644 flow/shared/worklow.go create mode 100644 flow/workflows/maintenance_flow.go create mode 100644 nexus/catalog/migrations/V40__maintenance_flows.sql diff --git a/docker-bake.hcl b/docker-bake.hcl index 6e6098ca14..4927cd5505 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -16,6 +16,7 @@ group "default" { "flow-worker", "flow-api", "flow-snapshot-worker", + "flow-maintenance", "peerdb-ui" ] } @@ -45,6 +46,9 @@ target "flow-snapshot-worker" { "linux/amd64", "linux/arm64", ] + args = { + PEERDB_VERSION_SHA_SHORT = "${SHA_SHORT}" + } tags = [ "${REGISTRY}/flow-snapshot-worker:${TAG}", "${REGISTRY}/flow-snapshot-worker:${SHA_SHORT}", @@ -59,12 +63,32 @@ target "flow-worker" { "linux/amd64", "linux/arm64", ] + args = { + PEERDB_VERSION_SHA_SHORT = "${SHA_SHORT}" + } tags = [ "${REGISTRY}/flow-worker:${TAG}", "${REGISTRY}/flow-worker:${SHA_SHORT}", ] } +target "flow-maintenance" { + context = "." + dockerfile = "stacks/flow.Dockerfile" + target = "flow-maintenance" + platforms = [ + "linux/amd64", + "linux/arm64", + ] + args = { + PEERDB_VERSION_SHA_SHORT = "${SHA_SHORT}" + } + tags = [ + "${REGISTRY}/flow-maintenance:${TAG}", + "${REGISTRY}/flow-maintenance:${SHA_SHORT}", + ] +} + target "peerdb" { context = "." dockerfile = "stacks/peerdb-server.Dockerfile" @@ -72,6 +96,9 @@ target "peerdb" { "linux/amd64", "linux/arm64", ] + args = { + PEERDB_VERSION_SHA_SHORT = "${SHA_SHORT}" + } tags = [ "${REGISTRY}/peerdb-server:${TAG}", "${REGISTRY}/peerdb-server:${SHA_SHORT}", @@ -85,6 +112,9 @@ target "peerdb-ui" { "linux/amd64", "linux/arm64", ] + args = { + PEERDB_VERSION_SHA_SHORT = "${SHA_SHORT}" + } tags = [ "${REGISTRY}/peerdb-ui:${TAG}", "${REGISTRY}/peerdb-ui:${SHA_SHORT}", diff --git a/flow/activities/maintenance_activity.go b/flow/activities/maintenance_activity.go new file mode 100644 index 0000000000..be42cc8e56 --- /dev/null +++ b/flow/activities/maintenance_activity.go @@ -0,0 +1,284 @@ +package activities + +import ( + "context" + "fmt" + "log/slog" + "time" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "go.temporal.io/sdk/activity" + "go.temporal.io/sdk/client" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/PeerDB-io/peer-flow/alerting" + "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/model" + "github.com/PeerDB-io/peer-flow/peerdbenv" + "github.com/PeerDB-io/peer-flow/shared" + "github.com/PeerDB-io/peer-flow/shared/telemetry" +) + +const ( + mirrorStateBackup = "backup" + mirrorStateRestored = "restore" +) + +type MaintenanceActivity struct { + CatalogPool *pgxpool.Pool + Alerter *alerting.Alerter + TemporalClient client.Client +} + +func (a *MaintenanceActivity) GetAllMirrors(ctx context.Context) (*protos.MaintenanceMirrors, error) { + rows, err := a.CatalogPool.Query(ctx, ` + select distinct on(name) + id, name, workflow_id, + created_at, coalesce(query_string, '')='' is_cdc + from flows + `) + if err != nil { + return &protos.MaintenanceMirrors{}, err + } + + maintenanceMirrorItems, err := pgx.CollectRows(rows, func(row pgx.CollectableRow) (*protos.MaintenanceMirror, error) { + var info protos.MaintenanceMirror + var createdAt time.Time + err := row.Scan(&info.MirrorId, &info.MirrorName, &info.WorkflowId, &createdAt, &info.IsCdc) + info.MirrorCreatedAt = timestamppb.New(createdAt) + return &info, err + }) + return &protos.MaintenanceMirrors{ + Mirrors: maintenanceMirrorItems, + }, err +} + +func (a *MaintenanceActivity) getMirrorStatus(ctx context.Context, mirror *protos.MaintenanceMirror) (protos.FlowStatus, error) { + return shared.GetWorkflowStatus(ctx, a.TemporalClient, mirror.WorkflowId) +} + +func (a *MaintenanceActivity) WaitForRunningSnapshots(ctx context.Context) (*protos.MaintenanceMirrors, error) { + mirrors, err := a.GetAllMirrors(ctx) + if err != nil { + return &protos.MaintenanceMirrors{}, err + } + + slog.Info("Found mirrors for snapshot check", "mirrors", mirrors, "len", len(mirrors.Mirrors)) + + for _, mirror := range mirrors.Mirrors { + lastStatus, err := a.checkAndWaitIfSnapshot(ctx, mirror, 2*time.Minute) + if err != nil { + return &protos.MaintenanceMirrors{}, err + } + slog.Info("Finished checking and waiting for snapshot", + "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId, "lastStatus", lastStatus.String()) + } + slog.Info("Finished checking and waiting for all mirrors to finish snapshot") + return mirrors, nil +} + +func (a *MaintenanceActivity) checkAndWaitIfSnapshot( + ctx context.Context, + mirror *protos.MaintenanceMirror, + logEvery time.Duration, +) (protos.FlowStatus, error) { + // In case a mirror was just kicked off, it shows up in the running state, we wait for a bit before checking for snapshot + if mirror.MirrorCreatedAt.AsTime().After(time.Now().Add(-30 * time.Second)) { + slog.Info("Mirror was created less than 30 seconds ago, waiting for it to be ready before checking for snapshot", + "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId) + time.Sleep(30 * time.Second) + } + + flowStatus, err := RunEveryIntervalUntilFinish(ctx, func() (bool, protos.FlowStatus, error) { + activity.RecordHeartbeat(ctx, fmt.Sprintf("Waiting for mirror %s to finish snapshot", mirror.MirrorName)) + mirrorStatus, err := a.getMirrorStatus(ctx, mirror) + if err != nil { + return false, mirrorStatus, err + } + if mirrorStatus == protos.FlowStatus_STATUS_SNAPSHOT || mirrorStatus == protos.FlowStatus_STATUS_SETUP { + return false, mirrorStatus, nil + } + return true, mirrorStatus, nil + }, 10*time.Second, fmt.Sprintf("Waiting for mirror %s to finish snapshot", mirror.MirrorName), logEvery) + return flowStatus, err +} + +func (a *MaintenanceActivity) EnableMaintenanceMode(ctx context.Context) error { + slog.Info("Enabling maintenance mode") + return peerdbenv.UpdatePeerDBMaintenanceModeEnabled(ctx, a.CatalogPool, true) +} + +func (a *MaintenanceActivity) BackupAllPreviouslyRunningFlows(ctx context.Context, mirrors *protos.MaintenanceMirrors) error { + tx, err := a.CatalogPool.Begin(ctx) + if err != nil { + return err + } + defer shared.RollbackTx(tx, slog.Default()) + + for _, mirror := range mirrors.Mirrors { + _, err := tx.Exec(ctx, ` + insert into maintenance.maintenance_flows + (flow_id, flow_name, workflow_id, flow_created_at, is_cdc, state, from_version) + values + ($1, $2, $3, $4, $5, $6, $7) + `, mirror.MirrorId, mirror.MirrorName, mirror.WorkflowId, mirror.MirrorCreatedAt.AsTime(), mirror.IsCdc, mirrorStateBackup, + peerdbenv.PeerDBVersionShaShort()) + if err != nil { + return err + } + } + return tx.Commit(ctx) +} + +func (a *MaintenanceActivity) PauseMirrorIfRunning(ctx context.Context, mirror *protos.MaintenanceMirror) (bool, error) { + mirrorStatus, err := a.getMirrorStatus(ctx, mirror) + if err != nil { + return false, err + } + + slog.Info("Checking if mirror is running", "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId, "status", mirrorStatus.String()) + + if mirrorStatus != protos.FlowStatus_STATUS_RUNNING { + return false, nil + } + + slog.Info("Pausing mirror for maintenance", "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId) + + if err := model.FlowSignal.SignalClientWorkflow(ctx, a.TemporalClient, mirror.WorkflowId, "", model.PauseSignal); err != nil { + slog.Error("Error signaling mirror running to pause for maintenance", + "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId, "error", err) + return false, err + } + + return RunEveryIntervalUntilFinish(ctx, func() (bool, bool, error) { + updatedMirrorStatus, statusErr := a.getMirrorStatus(ctx, mirror) + if statusErr != nil { + return false, false, statusErr + } + activity.RecordHeartbeat(ctx, "Waiting for mirror to pause with current status "+updatedMirrorStatus.String()) + if statusErr := model.FlowSignal.SignalClientWorkflow(ctx, a.TemporalClient, mirror.WorkflowId, "", + model.PauseSignal); statusErr != nil { + return false, false, statusErr + } + if updatedMirrorStatus == protos.FlowStatus_STATUS_PAUSED { + return true, true, nil + } + return false, false, nil + }, 10*time.Second, "Waiting for mirror to pause", 30*time.Second) +} + +func (a *MaintenanceActivity) CleanBackedUpFlows(ctx context.Context) error { + _, err := a.CatalogPool.Exec(ctx, ` + update maintenance.maintenance_flows + set state = $1, + restored_at = now(), + to_version = $2 + where state = $3 + `, mirrorStateRestored, peerdbenv.PeerDBVersionShaShort(), mirrorStateBackup) + return err +} + +func (a *MaintenanceActivity) GetBackedUpFlows(ctx context.Context) (*protos.MaintenanceMirrors, error) { + rows, err := a.CatalogPool.Query(ctx, ` + select flow_id, flow_name, workflow_id, flow_created_at, is_cdc + from maintenance.maintenance_flows + where state = $1 + `, mirrorStateBackup) + if err != nil { + return nil, err + } + + maintenanceMirrorItems, err := pgx.CollectRows(rows, func(row pgx.CollectableRow) (*protos.MaintenanceMirror, error) { + var info protos.MaintenanceMirror + var createdAt time.Time + err := row.Scan(&info.MirrorId, &info.MirrorName, &info.WorkflowId, &createdAt, &info.IsCdc) + info.MirrorCreatedAt = timestamppb.New(createdAt) + return &info, err + }) + if err != nil { + return nil, err + } + + return &protos.MaintenanceMirrors{ + Mirrors: maintenanceMirrorItems, + }, nil +} + +func (a *MaintenanceActivity) ResumeMirror(ctx context.Context, mirror *protos.MaintenanceMirror) error { + mirrorStatus, err := a.getMirrorStatus(ctx, mirror) + if err != nil { + return err + } + + if mirrorStatus != protos.FlowStatus_STATUS_PAUSED { + slog.Error("Cannot resume mirror that is not paused", + "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId, "status", mirrorStatus.String()) + return nil + } + + // There can also be "workflow already completed" errors, what should we do in that case? + if err := model.FlowSignal.SignalClientWorkflow(ctx, a.TemporalClient, mirror.WorkflowId, "", model.NoopSignal); err != nil { + slog.Error("Error signaling mirror to resume for maintenance", + "mirror", mirror.MirrorName, "workflowId", mirror.WorkflowId, "error", err) + return err + } + return nil +} + +func (a *MaintenanceActivity) DisableMaintenanceMode(ctx context.Context) error { + slog.Info("Disabling maintenance mode") + return peerdbenv.UpdatePeerDBMaintenanceModeEnabled(ctx, a.CatalogPool, false) +} + +func (a *MaintenanceActivity) BackgroundAlerter(ctx context.Context) error { + heartbeatTicker := time.NewTicker(30 * time.Second) + defer heartbeatTicker.Stop() + + alertTicker := time.NewTicker(time.Duration(peerdbenv.PeerDBMaintenanceModeWaitAlertSeconds()) * time.Second) + defer alertTicker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-heartbeatTicker.C: + activity.RecordHeartbeat(ctx, "Maintenance Workflow is still running") + case <-alertTicker.C: + slog.Warn("Maintenance Workflow is still running") + a.Alerter.LogNonFlowWarning(ctx, telemetry.MaintenanceWait, "Waiting", "Maintenance mode is still running") + } + } +} + +func RunEveryIntervalUntilFinish[T any]( + ctx context.Context, + runFunc func() (finished bool, result T, err error), + runInterval time.Duration, + logMessage string, + logInterval time.Duration, +) (T, error) { + runTicker := time.NewTicker(runInterval) + defer runTicker.Stop() + + logTicker := time.NewTicker(logInterval) + defer logTicker.Stop() + var lastResult T + for { + select { + case <-ctx.Done(): + return lastResult, ctx.Err() + case <-runTicker.C: + finished, result, err := runFunc() + lastResult = result + if err != nil { + return lastResult, err + } + if finished { + return lastResult, err + } + case <-logTicker.C: + slog.Info(logMessage, "lastResult", lastResult) + } + } +} diff --git a/flow/alerting/alerting.go b/flow/alerting/alerting.go index e9df410f91..5f05005d14 100644 --- a/flow/alerting/alerting.go +++ b/flow/alerting/alerting.go @@ -377,10 +377,10 @@ func (a *Alerter) sendTelemetryMessage( } if a.snsTelemetrySender != nil { - if status, err := a.snsTelemetrySender.SendMessage(ctx, details, details, attributes); err != nil { + if response, err := a.snsTelemetrySender.SendMessage(ctx, details, details, attributes); err != nil { logger.Warn("failed to send message to snsTelemetrySender", slog.Any("error", err)) } else { - logger.Info("received status from snsTelemetrySender", slog.String("status", status)) + logger.Info("received response from snsTelemetrySender", slog.String("response", response)) } } @@ -388,7 +388,7 @@ func (a *Alerter) sendTelemetryMessage( if status, err := a.incidentIoTelemetrySender.SendMessage(ctx, details, details, attributes); err != nil { logger.Warn("failed to send message to incidentIoTelemetrySender", slog.Any("error", err)) } else { - logger.Info("received status from incident.io", slog.String("status", status)) + logger.Info("received response from incident.io", slog.String("response", status)) } } } diff --git a/flow/cmd/api.go b/flow/cmd/api.go index ca225e4292..f81f9d923d 100644 --- a/flow/cmd/api.go +++ b/flow/cmd/api.go @@ -191,24 +191,7 @@ func APIMain(ctx context.Context, args *APIServerParams) error { Logger: slog.New(shared.NewSlogHandler(slog.NewJSONHandler(os.Stdout, nil))), } - if peerdbenv.PeerDBTemporalEnableCertAuth() { - slog.Info("Using temporal certificate/key for authentication") - - certs, err := parseTemporalCertAndKey(ctx) - if err != nil { - return fmt.Errorf("unable to base64 decode certificate and key: %w", err) - } - - connOptions := client.ConnectionOptions{ - TLS: &tls.Config{ - Certificates: certs, - MinVersion: tls.VersionTLS13, - }, - } - clientOptions.ConnectionOptions = connOptions - } - - tc, err := client.Dial(clientOptions) + tc, err := setupTemporalClient(ctx, clientOptions) if err != nil { return fmt.Errorf("unable to create Temporal client: %w", err) } @@ -309,3 +292,25 @@ func APIMain(ctx context.Context, args *APIServerParams) error { return nil } + +func setupTemporalClient(ctx context.Context, clientOptions client.Options) (client.Client, error) { + if peerdbenv.PeerDBTemporalEnableCertAuth() { + slog.Info("Using temporal certificate/key for authentication") + + certs, err := parseTemporalCertAndKey(ctx) + if err != nil { + return nil, fmt.Errorf("unable to base64 decode certificate and key: %w", err) + } + + connOptions := client.ConnectionOptions{ + TLS: &tls.Config{ + Certificates: certs, + MinVersion: tls.VersionTLS13, + }, + } + clientOptions.ConnectionOptions = connOptions + } + + tc, err := client.Dial(clientOptions) + return tc, err +} diff --git a/flow/cmd/handler.go b/flow/cmd/handler.go index e2d1da2e39..6caefaf47e 100644 --- a/flow/cmd/handler.go +++ b/flow/cmd/handler.go @@ -19,6 +19,7 @@ import ( "github.com/PeerDB-io/peer-flow/connectors/utils" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model" + "github.com/PeerDB-io/peer-flow/peerdbenv" "github.com/PeerDB-io/peer-flow/shared" peerflow "github.com/PeerDB-io/peer-flow/workflows" ) @@ -327,6 +328,17 @@ func (h *FlowRequestHandler) FlowStateChange( ) (*protos.FlowStateChangeResponse, error) { logs := slog.String("flowJobName", req.FlowJobName) slog.Info("FlowStateChange called", logs, slog.Any("req", req)) + underMaintenance, err := peerdbenv.PeerDBMaintenanceModeEnabled(ctx, nil) + if err != nil { + slog.Error("unable to check maintenance mode", logs, slog.Any("error", err)) + return nil, fmt.Errorf("unable to load dynamic config: %w", err) + } + + if underMaintenance { + slog.Warn("Flow state change request denied due to maintenance", logs) + return nil, errors.New("PeerDB is under maintenance") + } + workflowID, err := h.getWorkflowID(ctx, req.FlowJobName) if err != nil { slog.Error("[flow-state-change] unable to get workflowID", logs, slog.Any("error", err)) @@ -488,6 +500,14 @@ func (h *FlowRequestHandler) ResyncMirror( ctx context.Context, req *protos.ResyncMirrorRequest, ) (*protos.ResyncMirrorResponse, error) { + underMaintenance, err := peerdbenv.PeerDBMaintenanceModeEnabled(ctx, nil) + if err != nil { + return nil, fmt.Errorf("unable to get maintenance mode status: %w", err) + } + if underMaintenance { + return nil, errors.New("PeerDB is under maintenance") + } + isCDC, err := h.isCDCFlow(ctx, req.FlowJobName) if err != nil { return nil, err @@ -521,3 +541,49 @@ func (h *FlowRequestHandler) ResyncMirror( } return &protos.ResyncMirrorResponse{}, nil } + +func (h *FlowRequestHandler) GetInstanceInfo(ctx context.Context, in *protos.InstanceInfoRequest) (*protos.InstanceInfoResponse, error) { + enabled, err := peerdbenv.PeerDBMaintenanceModeEnabled(ctx, nil) + if err != nil { + slog.Error("unable to get maintenance mode status", slog.Any("error", err)) + return &protos.InstanceInfoResponse{ + Status: protos.InstanceStatus_INSTANCE_STATUS_UNKNOWN, + }, fmt.Errorf("unable to get maintenance mode status: %w", err) + } + if enabled { + return &protos.InstanceInfoResponse{ + Status: protos.InstanceStatus_INSTANCE_STATUS_MAINTENANCE, + }, nil + } + return &protos.InstanceInfoResponse{ + Status: protos.InstanceStatus_INSTANCE_STATUS_READY, + }, nil +} + +func (h *FlowRequestHandler) Maintenance(ctx context.Context, in *protos.MaintenanceRequest) (*protos.MaintenanceResponse, error) { + taskQueueId := shared.MaintenanceFlowTaskQueue + if in.UsePeerflowTaskQueue { + taskQueueId = shared.PeerFlowTaskQueue + } + switch { + case in.Status == protos.MaintenanceStatus_MAINTENANCE_STATUS_START: + workflowRun, err := peerflow.RunStartMaintenanceWorkflow(ctx, h.temporalClient, &protos.StartMaintenanceFlowInput{}, taskQueueId) + if err != nil { + return nil, err + } + return &protos.MaintenanceResponse{ + WorkflowId: workflowRun.GetID(), + RunId: workflowRun.GetRunID(), + }, nil + case in.Status == protos.MaintenanceStatus_MAINTENANCE_STATUS_END: + workflowRun, err := peerflow.RunEndMaintenanceWorkflow(ctx, h.temporalClient, &protos.EndMaintenanceFlowInput{}, taskQueueId) + if err != nil { + return nil, err + } + return &protos.MaintenanceResponse{ + WorkflowId: workflowRun.GetID(), + RunId: workflowRun.GetRunID(), + }, nil + } + return nil, errors.New("invalid maintenance status") +} diff --git a/flow/cmd/maintenance.go b/flow/cmd/maintenance.go new file mode 100644 index 0000000000..474a67db37 --- /dev/null +++ b/flow/cmd/maintenance.go @@ -0,0 +1,246 @@ +package cmd + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log/slog" + "os" + + "github.com/aws/smithy-go/ptr" + "go.temporal.io/sdk/client" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/peerdbenv" + "github.com/PeerDB-io/peer-flow/shared" + peerflow "github.com/PeerDB-io/peer-flow/workflows" +) + +type MaintenanceCLIParams struct { + TemporalHostPort string + TemporalNamespace string + Mode string + FlowGrpcAddress string + SkipIfK8sServiceMissing string + FlowTlsEnabled bool + SkipOnApiVersionMatch bool + SkipOnNoMirrors bool + UseMaintenanceTaskQueue bool + AssumeSkippedMaintenanceWorkflows bool +} + +type StartMaintenanceResult struct { + SkippedReason *string `json:"skippedReason,omitempty"` + APIVersion string `json:"apiVersion,omitempty"` + CLIVersion string `json:"cliVersion,omitempty"` + Skipped bool `json:"skipped,omitempty"` +} + +// MaintenanceMain is the entry point for the maintenance command, requires access to Temporal client, will exit after +// running the requested maintenance workflow +func MaintenanceMain(ctx context.Context, args *MaintenanceCLIParams) error { + slog.Info("Starting Maintenance Mode CLI") + clientOptions := client.Options{ + HostPort: args.TemporalHostPort, + Namespace: args.TemporalNamespace, + Logger: slog.New(shared.NewSlogHandler(slog.NewJSONHandler(os.Stdout, nil))), + } + tc, err := setupTemporalClient(ctx, clientOptions) + if err != nil { + return fmt.Errorf("unable to create Temporal client: %w", err) + } + + taskQueueId := shared.MaintenanceFlowTaskQueue + if !args.UseMaintenanceTaskQueue { + taskQueueId = shared.PeerFlowTaskQueue + } + + if args.Mode == "start" { + if args.AssumeSkippedMaintenanceWorkflows { + slog.Info("Assuming maintenance workflows were skipped") + return WriteMaintenanceOutputToCatalog(ctx, StartMaintenanceResult{ + Skipped: true, + SkippedReason: ptr.String("Assumed skipped by CLI Flag"), + CLIVersion: peerdbenv.PeerDBVersionShaShort(), + }) + } + skipped, err := skipStartMaintenanceIfNeeded(ctx, args) + if err != nil { + return err + } + if skipped { + return nil + } + slog.Info("Running start maintenance workflow") + workflowRun, err := peerflow.RunStartMaintenanceWorkflow(ctx, tc, &protos.StartMaintenanceFlowInput{}, taskQueueId) + if err != nil { + slog.Error("Error running start maintenance workflow", "error", err) + return err + } + var output *protos.StartMaintenanceFlowOutput + if err := workflowRun.Get(ctx, &output); err != nil { + slog.Error("Error in start maintenance workflow", "error", err) + return err + } + slog.Info("Start maintenance workflow completed", "output", output) + return WriteMaintenanceOutputToCatalog(ctx, StartMaintenanceResult{ + Skipped: false, + CLIVersion: peerdbenv.PeerDBVersionShaShort(), + }) + } else if args.Mode == "end" { + if input, err := ReadLastMaintenanceOutput(ctx); input != nil || err != nil { + if err != nil { + return err + } + slog.Info("Checking if end maintenance workflow should be skipped", "input", input) + if input.Skipped { + slog.Info("Skipping end maintenance workflow as start maintenance was skipped", "reason", input.SkippedReason) + return nil + } + } + workflowRun, err := peerflow.RunEndMaintenanceWorkflow(ctx, tc, &protos.EndMaintenanceFlowInput{}, taskQueueId) + if err != nil { + slog.Error("Error running end maintenance workflow", "error", err) + return err + } + var output *protos.EndMaintenanceFlowOutput + if err := workflowRun.Get(ctx, &output); err != nil { + slog.Error("Error in end maintenance workflow", "error", err) + return err + } + slog.Info("End maintenance workflow completed", "output", output) + } else { + return fmt.Errorf("unknown flow type %s", args.Mode) + } + slog.Info("Maintenance workflow completed with type", "type", args.Mode) + return nil +} + +func skipStartMaintenanceIfNeeded(ctx context.Context, args *MaintenanceCLIParams) (bool, error) { + if args.SkipIfK8sServiceMissing != "" { + slog.Info("Checking if k8s service exists", "service", args.SkipIfK8sServiceMissing) + exists, err := CheckK8sServiceExistence(ctx, args.SkipIfK8sServiceMissing) + if err != nil { + return false, err + } + if !exists { + slog.Info("Skipping maintenance workflow due to missing k8s service", "service", args.SkipIfK8sServiceMissing) + return true, WriteMaintenanceOutputToCatalog(ctx, StartMaintenanceResult{ + Skipped: true, + SkippedReason: ptr.String(fmt.Sprintf("K8s service %s missing", args.SkipIfK8sServiceMissing)), + CLIVersion: peerdbenv.PeerDBVersionShaShort(), + }) + } + } + if args.SkipOnApiVersionMatch || args.SkipOnNoMirrors { + if args.FlowGrpcAddress == "" { + return false, errors.New("flow address is required when skipping based on API") + } + slog.Info("Constructing flow client") + transportCredentials := credentials.NewTLS(&tls.Config{ + MinVersion: tls.VersionTLS12, + }) + if !args.FlowTlsEnabled { + transportCredentials = insecure.NewCredentials() + } + conn, err := grpc.NewClient(args.FlowGrpcAddress, + grpc.WithTransportCredentials(transportCredentials), + ) + if err != nil { + return false, fmt.Errorf("unable to dial grpc flow server: %w", err) + } + peerFlowClient := protos.NewFlowServiceClient(conn) + if args.SkipOnApiVersionMatch { + slog.Info("Checking if CLI version matches API version", "cliVersion", peerdbenv.PeerDBVersionShaShort()) + version, err := peerFlowClient.GetVersion(ctx, &protos.PeerDBVersionRequest{}) + if err != nil { + return false, err + } + slog.Info("Got version from flow", "version", version.Version) + if version.Version == peerdbenv.PeerDBVersionShaShort() { + slog.Info("Skipping maintenance workflow due to matching versions") + return true, WriteMaintenanceOutputToCatalog(ctx, StartMaintenanceResult{ + Skipped: true, + SkippedReason: ptr.String(fmt.Sprintf("CLI version %s matches API version %s", peerdbenv.PeerDBVersionShaShort(), + version.Version)), + APIVersion: version.Version, + CLIVersion: peerdbenv.PeerDBVersionShaShort(), + }) + } + } + if args.SkipOnNoMirrors { + slog.Info("Checking if there are any mirrors") + mirrors, err := peerFlowClient.ListMirrors(ctx, &protos.ListMirrorsRequest{}) + if err != nil { + return false, err + } + slog.Info("Got mirrors from flow", "mirrors", mirrors.Mirrors) + if len(mirrors.Mirrors) == 0 { + slog.Info("Skipping maintenance workflow due to no mirrors") + return true, WriteMaintenanceOutputToCatalog(ctx, StartMaintenanceResult{ + Skipped: true, + SkippedReason: ptr.String("No mirrors found"), + }) + } + } + } + return false, nil +} + +func WriteMaintenanceOutputToCatalog(ctx context.Context, result StartMaintenanceResult) error { + pool, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) + if err != nil { + return err + } + _, err = pool.Exec(ctx, ` + insert into maintenance.start_maintenance_outputs + (cli_version, api_version, skipped, skipped_reason) + values + ($1, $2, $3, $4) + `, result.CLIVersion, result.APIVersion, result.Skipped, result.SkippedReason) + return err +} + +func ReadLastMaintenanceOutput(ctx context.Context) (*StartMaintenanceResult, error) { + pool, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) + if err != nil { + return nil, err + } + var result StartMaintenanceResult + if err := pool.QueryRow(ctx, ` + select cli_version, api_version, skipped, skipped_reason + from maintenance.start_maintenance_outputs + order by created_at desc + limit 1 + `).Scan(&result.CLIVersion, &result.APIVersion, &result.Skipped, &result.SkippedReason); err != nil { + return nil, err + } + return &result, nil +} + +func CheckK8sServiceExistence(ctx context.Context, serviceName string) (bool, error) { + config, err := rest.InClusterConfig() + if err != nil { + return false, err + } + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return false, err + } + _, err = clientset.CoreV1().Services(peerdbenv.GetEnvString("POD_NAMESPACE", "")).Get(ctx, serviceName, v1.GetOptions{}) + if err != nil { + if k8sErrors.IsNotFound(err) { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/flow/cmd/mirror_status.go b/flow/cmd/mirror_status.go index a0c4a989e2..58cf20a80a 100644 --- a/flow/cmd/mirror_status.go +++ b/flow/cmd/mirror_status.go @@ -447,20 +447,7 @@ func (h *FlowRequestHandler) isCDCFlow(ctx context.Context, flowJobName string) } func (h *FlowRequestHandler) getWorkflowStatus(ctx context.Context, workflowID string) (protos.FlowStatus, error) { - res, err := h.temporalClient.QueryWorkflow(ctx, workflowID, "", shared.FlowStatusQuery) - if err != nil { - slog.Error(fmt.Sprintf("failed to get status in workflow with ID %s: %s", workflowID, err.Error())) - return protos.FlowStatus_STATUS_UNKNOWN, - fmt.Errorf("failed to get status in workflow with ID %s: %w", workflowID, err) - } - var state protos.FlowStatus - err = res.Get(&state) - if err != nil { - slog.Error(fmt.Sprintf("failed to get status in workflow with ID %s: %s", workflowID, err.Error())) - return protos.FlowStatus_STATUS_UNKNOWN, - fmt.Errorf("failed to get status in workflow with ID %s: %w", workflowID, err) - } - return state, nil + return shared.GetWorkflowStatus(ctx, h.temporalClient, workflowID) } func (h *FlowRequestHandler) getCDCWorkflowState(ctx context.Context, diff --git a/flow/cmd/settings.go b/flow/cmd/settings.go index 12e0728590..dd4755f4ae 100644 --- a/flow/cmd/settings.go +++ b/flow/cmd/settings.go @@ -55,8 +55,7 @@ func (h *FlowRequestHandler) PostDynamicSetting( ctx context.Context, req *protos.PostDynamicSettingRequest, ) (*protos.PostDynamicSettingResponse, error) { - _, err := h.pool.Exec(ctx, `insert into dynamic_settings (config_name, config_value) values ($1, $2) - on conflict (config_name) do update set config_value = $2`, req.Name, req.Value) + err := peerdbenv.UpdateDynamicSetting(ctx, h.pool, req.Name, req.Value) if err != nil { slog.Error("[PostDynamicConfig] failed to execute update setting", slog.Any("error", err)) return nil, err diff --git a/flow/cmd/validate_mirror.go b/flow/cmd/validate_mirror.go index 3e870aa667..83c9d2a073 100644 --- a/flow/cmd/validate_mirror.go +++ b/flow/cmd/validate_mirror.go @@ -14,6 +14,7 @@ import ( connpostgres "github.com/PeerDB-io/peer-flow/connectors/postgres" "github.com/PeerDB-io/peer-flow/connectors/utils" "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/peerdbenv" "github.com/PeerDB-io/peer-flow/shared/telemetry" ) @@ -25,6 +26,17 @@ var ( func (h *FlowRequestHandler) ValidateCDCMirror( ctx context.Context, req *protos.CreateCDCFlowRequest, ) (*protos.ValidateCDCMirrorResponse, error) { + underMaintenance, err := peerdbenv.PeerDBMaintenanceModeEnabled(ctx, nil) + if err != nil { + slog.Error("unable to check maintenance mode", slog.Any("error", err)) + return nil, fmt.Errorf("unable to load dynamic config: %w", err) + } + + if underMaintenance { + slog.Warn("Validate request denied due to maintenance", "flowName", req.ConnectionConfigs.FlowJobName) + return nil, errors.New("PeerDB is under maintenance") + } + if !req.ConnectionConfigs.Resync { mirrorExists, existCheckErr := h.CheckIfMirrorNameExists(ctx, req.ConnectionConfigs.FlowJobName) if existCheckErr != nil { diff --git a/flow/cmd/worker.go b/flow/cmd/worker.go index 9db97288cc..5c16376a12 100644 --- a/flow/cmd/worker.go +++ b/flow/cmd/worker.go @@ -30,6 +30,7 @@ type WorkerSetupOptions struct { TemporalMaxConcurrentWorkflowTasks int EnableProfiling bool EnableOtelMetrics bool + UseMaintenanceTaskQueue bool } type workerSetupResponse struct { @@ -124,8 +125,11 @@ func WorkerSetup(opts *WorkerSetupOptions) (*workerSetupResponse, error) { return nil, fmt.Errorf("unable to create Temporal client: %w", err) } slog.Info("Created temporal client") - - taskQueue := peerdbenv.PeerFlowTaskQueueName(shared.PeerFlowTaskQueue) + queueId := shared.PeerFlowTaskQueue + if opts.UseMaintenanceTaskQueue { + queueId = shared.MaintenanceFlowTaskQueue + } + taskQueue := peerdbenv.PeerFlowTaskQueueName(queueId) slog.Info( fmt.Sprintf("Creating temporal worker for queue %v: %v workflow workers %v activity workers", taskQueue, @@ -170,6 +174,12 @@ func WorkerSetup(opts *WorkerSetupOptions) (*workerSetupResponse, error) { OtelManager: otelManager, }) + w.RegisterActivity(&activities.MaintenanceActivity{ + CatalogPool: conn, + Alerter: alerting.NewAlerter(context.Background(), conn), + TemporalClient: c, + }) + return &workerSetupResponse{ Client: c, Worker: w, diff --git a/flow/go.mod b/flow/go.mod index b7eb9d1d65..a11ffb5a7e 100644 --- a/flow/go.mod +++ b/flow/go.mod @@ -53,22 +53,28 @@ require ( github.com/urfave/cli/v3 v3.0.0-alpha9.2 github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 github.com/yuin/gopher-lua v1.1.1 - go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 - go.opentelemetry.io/otel/metric v1.31.0 - go.opentelemetry.io/otel/sdk v1.31.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 + go.opentelemetry.io/otel/metric v1.32.0 + go.opentelemetry.io/otel/sdk v1.32.0 go.opentelemetry.io/otel/sdk/metric v1.31.0 + go.opentelemetry.io/otel/trace v1.32.0 go.temporal.io/api v1.41.0 go.temporal.io/sdk v1.30.0 go.temporal.io/sdk/contrib/opentelemetry v0.6.0 go.uber.org/automaxprocs v1.6.0 golang.org/x/crypto v0.28.0 - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.9.0 google.golang.org/api v0.204.0 google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 google.golang.org/grpc v1.67.1 google.golang.org/protobuf v1.35.1 + k8s.io/apimachinery v0.31.2 + k8s.io/client-go v0.31.2 ) require ( @@ -105,18 +111,29 @@ require ( github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/dvsekhvalnov/jose2go v1.7.0 // indirect github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/go-control-plane v0.13.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.6 // indirect github.com/getsentry/sentry-go v0.29.1 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lestrrat-go/blackmagic v1.0.2 // indirect @@ -124,6 +141,9 @@ require ( github.com/lestrrat-go/httprc v1.0.6 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect github.com/lestrrat-go/option v1.0.1 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mtibben/percent v0.2.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nexus-rpc/sdk-go v0.0.11 // indirect @@ -138,14 +158,23 @@ require ( github.com/segmentio/asm v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/twmb/franz-go/pkg/kmsg v1.9.0 // indirect + github.com/x448/float16 v0.8.4 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect - go.opentelemetry.io/otel/trace v1.31.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/mod v0.21.0 // indirect golang.org/x/term v0.25.0 // indirect google.golang.org/grpc/stats/opentelemetry v0.0.0-20241028142157-ada6787961b3 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/api v0.31.2 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) require ( @@ -165,7 +194,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.3 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.3 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/djherbis/buffer v1.2.0 github.com/djherbis/nio/v3 v3.0.1 github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect @@ -193,7 +222,7 @@ require ( github.com/pborman/uuid v1.2.1 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect @@ -201,8 +230,8 @@ require ( golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect diff --git a/flow/go.sum b/flow/go.sum index 71299452d2..7a0380da03 100644 --- a/flow/go.sum +++ b/flow/go.sum @@ -180,8 +180,9 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/danieljoos/wincred v1.2.2 h1:774zMFJrqaeYCK2W57BgAem/MLi6mtSE47MB6BOJ0i0= github.com/danieljoos/wincred v1.2.2/go.mod h1:w7w4Utbrz8lqeMbDAK0lkNJUv5sAOkFi7nd/ogr0Uh8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= @@ -197,6 +198,8 @@ github.com/elastic/elastic-transport-go/v8 v8.6.0 h1:Y2S/FBjx1LlCv5m6pWAF2kDJAHo github.com/elastic/elastic-transport-go/v8 v8.6.0/go.mod h1:YLHer5cj0csTzNFXoNQ8qhtGY1GTvSqPnKWKaqQE3Hk= github.com/elastic/go-elasticsearch/v8 v8.15.0 h1:IZyJhe7t7WI3NEFdcHnf6IJXqpRf+8S8QWLtZYYyBYk= github.com/elastic/go-elasticsearch/v8 v8.15.0/go.mod h1:HCON3zj4btpqs2N1jjsAy4a/fiAul+YBP00mBH4xik8= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -211,6 +214,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc= github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc= github.com/getsentry/sentry-go v0.29.1 h1:DyZuChN8Hz3ARxGVV8ePaNXh1dQ7d76AiB117xcREwA= @@ -228,9 +233,18 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= @@ -270,6 +284,8 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -279,10 +295,16 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM= +github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -330,6 +352,10 @@ github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6 h1:IsMZxCuZqKuao2vNdfD82fjjgPLfyHLpR41Z88viRWs= github.com/keybase/go-keychain v0.0.0-20231219164618-57a3676c3af6/go.mod h1:3VeWNIJaW+O5xpRQbPp0Ybqu1vJd/pm7s2F473HRrkw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -341,6 +367,7 @@ github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -365,10 +392,17 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/linkedin/goavro/v2 v2.13.0 h1:L8eI8GcuciwUkt41Ej62joSZS4kKaYIUdze+6for9NU= github.com/linkedin/goavro/v2 v2.13.0/go.mod h1:KXx+erlq+RPlGSPmLF7xGo6SAbh8sCQ53x064+ioxhk= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA= github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= @@ -377,6 +411,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/nexus-rpc/sdk-go v0.0.11 h1:qH3Us3spfp50t5ca775V1va2eE6z1zMQDZY4mvbw0CI= github.com/nexus-rpc/sdk-go v0.0.11/go.mod h1:TpfkM2Cw0Rlk9drGkoiSMpFqflKTiQLWUNyKJjF8mKQ= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA= +github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To= +github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM= @@ -397,8 +435,9 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= @@ -428,6 +467,8 @@ github.com/slack-go/slack v0.15.0 h1:LE2lj2y9vqqiOf+qIIy0GvEoxgF1N5yLGZffmEZykt0 github.com/slack-go/slack v0.15.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/snowflakedb/gosnowflake v1.12.0 h1:Saez8egtn5xAoVMBxFaMu9MYfAG9SS9dpAEXD1/ECIo= github.com/snowflakedb/gosnowflake v1.12.0/go.mod h1:wHfYmZi3zvtWItojesAhWWXBN7+niex2R1h/S7QCZYg= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -458,6 +499,8 @@ github.com/twpayne/go-geos v0.19.0 h1:V7vnLe7gY7JOHLTg8+2oykZOw6wpBLHVNlcnzS2FlG github.com/twpayne/go-geos v0.19.0/go.mod h1:XGpUjCtZf4Ul6BMii6KA4EmJ9JCNhVP1mohdoReopZ8= github.com/urfave/cli/v3 v3.0.0-alpha9.2 h1:CL8llQj3dGRLVQQzHxS+ZYRLanOuhyK1fXgLKD+qV+Y= github.com/urfave/cli/v3 v3.0.0-alpha9.2/go.mod h1:FnIeEMYu+ko8zP1F9Ypr3xkZMIDqW3DR92yUtY39q1Y= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= @@ -486,20 +529,26 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.5 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0 h1:FZ6ei8GFW7kyPYdxJaV2rgI6M+4tvZzhYsQ2wgyVC08= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.31.0/go.mod h1:MdEu/mC6j3D+tTEfvI15b5Ci2Fn7NneJ71YMoiS3tpI= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0 h1:ZsXq73BERAiNuuFXYqP4MR5hBrjXfMGSO+Cx7qoOZiM= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.31.0/go.mod h1:hg1zaDMpyZJuUzjFxFsRYBoccE86tM9Uf4IqNMUxvrY= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= -go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= -go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.temporal.io/api v1.41.0 h1:VYzyWJjJk1jeB9urntA/t7Hiyo2tHdM5xEdtdib4EO8= @@ -512,6 +561,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -554,8 +605,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -570,8 +621,8 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= @@ -579,8 +630,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -645,6 +696,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -655,5 +708,23 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.31.2 h1:3wLBbL5Uom/8Zy98GRPXpJ254nEFpl+hwndmk9RwmL0= +k8s.io/api v0.31.2/go.mod h1:bWmGvrGPssSK1ljmLzd3pwCQ9MgoTsRCuK35u6SygUk= +k8s.io/apimachinery v0.31.2 h1:i4vUt2hPK56W6mlT7Ry+AO8eEsyxMD1U44NR22CLTYw= +k8s.io/apimachinery v0.31.2/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.2 h1:Y2F4dxU5d3AQj+ybwSMqQnpZH9F30//1ObxOKlTI9yc= +k8s.io/client-go v0.31.2/go.mod h1:NPa74jSVR/+eez2dFsEIHNa+3o09vtNaWwWwb1qSxSs= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= +k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/flow/main.go b/flow/main.go index 4001a88912..9d499e957d 100644 --- a/flow/main.go +++ b/flow/main.go @@ -70,6 +70,60 @@ func main() { Sources: cli.EnvVars("TEMPORAL_MAX_CONCURRENT_WORKFLOW_TASKS"), } + maintenanceModeWorkflowFlag := &cli.StringFlag{ + Name: "run-maintenance-flow", + Value: "", + Usage: "Run a maintenance flow. Options are 'start' or 'end'", + Sources: cli.EnvVars("RUN_MAINTENANCE_FLOW"), + } + + maintenanceSkipOnApiVersionMatchFlag := &cli.BoolFlag{ + Name: "skip-on-api-version-match", + Value: false, + Usage: "Skip maintenance flow if the API version matches", + Sources: cli.EnvVars("MAINTENANCE_SKIP_ON_API_VERSION_MATCH"), + } + + maintenanceSkipOnNoMirrorsFlag := &cli.BoolFlag{ + Name: "skip-on-no-mirrors", + Value: false, + Usage: "Skip maintenance flow if there are no mirrors", + Sources: cli.EnvVars("MAINTENANCE_SKIP_ON_NO_MIRRORS"), + } + + flowGrpcAddressFlag := &cli.StringFlag{ + Name: "flow-grpc-address", + Value: "", + Usage: "Address of the flow gRPC server", + Sources: cli.EnvVars("FLOW_GRPC_ADDRESS"), + } + + flowTlsEnabledFlag := &cli.BoolFlag{ + Name: "flow-tls-enabled", + Value: false, + Usage: "Enable TLS for the flow gRPC server", + Sources: cli.EnvVars("FLOW_TLS_ENABLED"), + } + + useMaintenanceTaskQueueFlag := &cli.BoolFlag{ + Name: "use-maintenance-task-queue", + Value: false, + Usage: "Use the maintenance task queue for the worker", + Sources: cli.EnvVars("USE_MAINTENANCE_TASK_QUEUE"), + } + + assumedSkippedMaintenanceWorkflowsFlag := &cli.BoolFlag{ + Name: "assume-skipped-workflow", + Value: false, + Usage: "Skip running maintenance workflows and simply output to catalog", + } + + skipIfK8sServiceMissingFlag := &cli.StringFlag{ + Name: "skip-if-k8s-service-missing", + Value: "", + Usage: "Skip maintenance if the k8s service is missing, generally used during pre-upgrade hook", + } + app := &cli.Command{ Name: "PeerDB Flows CLI", Commands: []*cli.Command{ @@ -85,6 +139,7 @@ func main() { TemporalNamespace: clicmd.String("temporal-namespace"), TemporalMaxConcurrentActivities: int(clicmd.Int("temporal-max-concurrent-activities")), TemporalMaxConcurrentWorkflowTasks: int(clicmd.Int("temporal-max-concurrent-workflow-tasks")), + UseMaintenanceTaskQueue: clicmd.Bool(useMaintenanceTaskQueueFlag.Name), }) if err != nil { return err @@ -100,6 +155,7 @@ func main() { temporalNamespaceFlag, temporalMaxConcurrentActivitiesFlag, temporalMaxConcurrentWorkflowTasksFlag, + useMaintenanceTaskQueueFlag, }, }, { @@ -148,6 +204,37 @@ func main() { }) }, }, + { + Name: "maintenance", + Flags: []cli.Flag{ + temporalHostPortFlag, + temporalNamespaceFlag, + maintenanceModeWorkflowFlag, + maintenanceSkipOnApiVersionMatchFlag, + maintenanceSkipOnNoMirrorsFlag, + flowGrpcAddressFlag, + flowTlsEnabledFlag, + useMaintenanceTaskQueueFlag, + assumedSkippedMaintenanceWorkflowsFlag, + skipIfK8sServiceMissingFlag, + }, + Action: func(ctx context.Context, clicmd *cli.Command) error { + temporalHostPort := clicmd.String("temporal-host-port") + + return cmd.MaintenanceMain(ctx, &cmd.MaintenanceCLIParams{ + TemporalHostPort: temporalHostPort, + TemporalNamespace: clicmd.String(temporalNamespaceFlag.Name), + Mode: clicmd.String(maintenanceModeWorkflowFlag.Name), + SkipOnApiVersionMatch: clicmd.Bool(maintenanceSkipOnApiVersionMatchFlag.Name), + SkipOnNoMirrors: clicmd.Bool(maintenanceSkipOnNoMirrorsFlag.Name), + FlowGrpcAddress: clicmd.String(flowGrpcAddressFlag.Name), + FlowTlsEnabled: clicmd.Bool(flowTlsEnabledFlag.Name), + UseMaintenanceTaskQueue: clicmd.Bool(useMaintenanceTaskQueueFlag.Name), + AssumeSkippedMaintenanceWorkflows: clicmd.Bool(assumedSkippedMaintenanceWorkflowsFlag.Name), + SkipIfK8sServiceMissing: clicmd.String(skipIfK8sServiceMissingFlag.Name), + }) + }, + }, }, } @@ -164,5 +251,6 @@ func main() { if err := app.Run(appCtx, os.Args); err != nil { log.Printf("error running app: %+v", err) + panic(err) } } diff --git a/flow/peerdbenv/config.go b/flow/peerdbenv/config.go index e033b87195..9aa9d2c5ed 100644 --- a/flow/peerdbenv/config.go +++ b/flow/peerdbenv/config.go @@ -166,3 +166,9 @@ func PeerDBRAPIRequestLoggingEnabled() bool { } return requestLoggingEnabled } + +// PEERDB_MAINTENANCE_MODE_WAIT_ALERT_SECONDS tells how long to wait before alerting that peerdb has been stuck in maintenance mode +// for too long +func PeerDBMaintenanceModeWaitAlertSeconds() int { + return getEnvInt("PEERDB_MAINTENANCE_MODE_WAIT_ALERT_SECONDS", 600) +} diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index c86c4616a1..566c8ead11 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -8,8 +8,10 @@ import ( "strconv" "time" + "github.com/aws/smithy-go/ptr" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgtype" + "github.com/jackc/pgx/v5/pgxpool" "golang.org/x/exp/constraints" "github.com/PeerDB-io/peer-flow/generated/protos" @@ -186,6 +188,14 @@ var DynamicSettings = [...]*protos.DynamicSetting{ ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, TargetForSetting: protos.DynconfTarget_ALL, }, + { + Name: "PEERDB_MAINTENANCE_MODE_ENABLED", + Description: "Whether PeerDB is in maintenance mode, which disables any modifications to mirrors", + DefaultValue: "false", + ValueType: protos.DynconfValueType_BOOL, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, + TargetForSetting: protos.DynconfTarget_ALL, + }, } var DynamicIndex = func() map[string]int { @@ -267,6 +277,20 @@ func dynamicConfBool(ctx context.Context, env map[string]string, key string) (bo return value, nil } +func UpdateDynamicSetting(ctx context.Context, pool *pgxpool.Pool, name string, value *string) error { + if pool == nil { + var err error + pool, err = GetCatalogConnectionPoolFromEnv(ctx) + if err != nil { + shared.LoggerFromCtx(ctx).Error("Failed to get catalog connection pool for dynamic setting update", slog.Any("error", err)) + return fmt.Errorf("failed to get catalog connection pool: %w", err) + } + } + _, err := pool.Exec(ctx, `insert into dynamic_settings (config_name, config_value) values ($1, $2) + on conflict (config_name) do update set config_value = $2`, name, value) + return err +} + // PEERDB_SLOT_LAG_MB_ALERT_THRESHOLD, 0 disables slot lag alerting entirely func PeerDBSlotLagMBAlertThreshold(ctx context.Context, env map[string]string) (uint32, error) { return dynamicConfUnsigned[uint32](ctx, env, "PEERDB_SLOT_LAG_MB_ALERT_THRESHOLD") @@ -364,3 +388,11 @@ func PeerDBIntervalSinceLastNormalizeThresholdMinutes(ctx context.Context, env m func PeerDBApplicationNamePerMirrorName(ctx context.Context, env map[string]string) (bool, error) { return dynamicConfBool(ctx, env, "PEERDB_APPLICATION_NAME_PER_MIRROR_NAME") } + +func PeerDBMaintenanceModeEnabled(ctx context.Context, env map[string]string) (bool, error) { + return dynamicConfBool(ctx, env, "PEERDB_MAINTENANCE_MODE_ENABLED") +} + +func UpdatePeerDBMaintenanceModeEnabled(ctx context.Context, pool *pgxpool.Pool, enabled bool) error { + return UpdateDynamicSetting(ctx, pool, "PEERDB_MAINTENANCE_MODE_ENABLED", ptr.String(strconv.FormatBool(enabled))) +} diff --git a/flow/shared/constants.go b/flow/shared/constants.go index 2dc5a8a64e..955ecfc4b5 100644 --- a/flow/shared/constants.go +++ b/flow/shared/constants.go @@ -11,8 +11,9 @@ type ( const ( // Task Queues - PeerFlowTaskQueue TaskQueueID = "peer-flow-task-queue" - SnapshotFlowTaskQueue TaskQueueID = "snapshot-flow-task-queue" + PeerFlowTaskQueue TaskQueueID = "peer-flow-task-queue" + SnapshotFlowTaskQueue TaskQueueID = "snapshot-flow-task-queue" + MaintenanceFlowTaskQueue TaskQueueID = "maintenance-flow-task-queue" // Queries CDCFlowStateQuery = "q-cdc-flow-state" diff --git a/flow/shared/telemetry/event_types.go b/flow/shared/telemetry/event_types.go index 0d87ba3540..a68fab869f 100644 --- a/flow/shared/telemetry/event_types.go +++ b/flow/shared/telemetry/event_types.go @@ -3,7 +3,11 @@ package telemetry type EventType string const ( - CreatePeer EventType = "CreatePeer" - CreateMirror EventType = "CreateMirror" - Other EventType = "Other" + CreatePeer EventType = "CreatePeer" + CreateMirror EventType = "CreateMirror" + StartMaintenance EventType = "StartMaintenance" + EndMaintenance EventType = "EndMaintenance" + MaintenanceWait EventType = "MaintenanceWait" + + Other EventType = "Other" ) diff --git a/flow/shared/worklow.go b/flow/shared/worklow.go new file mode 100644 index 0000000000..c9cafc37e2 --- /dev/null +++ b/flow/shared/worklow.go @@ -0,0 +1,27 @@ +package shared + +import ( + "context" + "fmt" + "log/slog" + + "go.temporal.io/sdk/client" + + "github.com/PeerDB-io/peer-flow/generated/protos" +) + +func GetWorkflowStatus(ctx context.Context, temporalClient client.Client, workflowID string) (protos.FlowStatus, error) { + res, err := temporalClient.QueryWorkflow(ctx, workflowID, "", FlowStatusQuery) + if err != nil { + slog.Error("failed to query status in workflow with ID "+workflowID, slog.Any("error", err)) + return protos.FlowStatus_STATUS_UNKNOWN, + fmt.Errorf("failed to query status in workflow with ID %s: %w", workflowID, err) + } + var state protos.FlowStatus + if err := res.Get(&state); err != nil { + slog.Error("failed to get status in workflow with ID "+workflowID, slog.Any("error", err)) + return protos.FlowStatus_STATUS_UNKNOWN, + fmt.Errorf("failed to get status in workflow with ID %s: %w", workflowID, err) + } + return state, nil +} diff --git a/flow/workflows/activities.go b/flow/workflows/activities.go index 0b23d10dd1..5fe699419c 100644 --- a/flow/workflows/activities.go +++ b/flow/workflows/activities.go @@ -3,6 +3,7 @@ package peerflow import "github.com/PeerDB-io/peer-flow/activities" var ( - flowable *activities.FlowableActivity - snapshot *activities.SnapshotActivity + flowable *activities.FlowableActivity + snapshot *activities.SnapshotActivity + maintenance *activities.MaintenanceActivity ) diff --git a/flow/workflows/maintenance_flow.go b/flow/workflows/maintenance_flow.go new file mode 100644 index 0000000000..c48750a807 --- /dev/null +++ b/flow/workflows/maintenance_flow.go @@ -0,0 +1,305 @@ +package peerflow + +import ( + "context" + "log/slog" + "time" + + tEnums "go.temporal.io/api/enums/v1" + "go.temporal.io/sdk/client" + "go.temporal.io/sdk/log" + "go.temporal.io/sdk/workflow" + + "github.com/PeerDB-io/peer-flow/generated/protos" + "github.com/PeerDB-io/peer-flow/peerdbenv" + "github.com/PeerDB-io/peer-flow/shared" +) + +func getMaintenanceWorkflowOptions(workflowIDPrefix string, taskQueueId shared.TaskQueueID) client.StartWorkflowOptions { + maintenanceWorkflowOptions := client.StartWorkflowOptions{ + WorkflowIDReusePolicy: tEnums.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, + WorkflowIDConflictPolicy: tEnums.WORKFLOW_ID_CONFLICT_POLICY_USE_EXISTING, + TaskQueue: peerdbenv.PeerFlowTaskQueueName(taskQueueId), + ID: workflowIDPrefix, + } + if deploymentUid := peerdbenv.PeerDBDeploymentUID(); deploymentUid != "" { + maintenanceWorkflowOptions.ID += "-" + deploymentUid + } + return maintenanceWorkflowOptions +} + +// RunStartMaintenanceWorkflow is a helper function to start the StartMaintenanceWorkflow with sane defaults +func RunStartMaintenanceWorkflow( + ctx context.Context, + temporalClient client.Client, + input *protos.StartMaintenanceFlowInput, + taskQueueId shared.TaskQueueID, +) (client.WorkflowRun, error) { + workflowOptions := getMaintenanceWorkflowOptions("start-maintenance", taskQueueId) + workflowRun, err := temporalClient.ExecuteWorkflow(ctx, workflowOptions, StartMaintenanceWorkflow, input) + if err != nil { + return nil, err + } + return workflowRun, nil +} + +// RunEndMaintenanceWorkflow is a helper function to start the EndMaintenanceWorkflow with sane defaults +func RunEndMaintenanceWorkflow( + ctx context.Context, + temporalClient client.Client, + input *protos.EndMaintenanceFlowInput, + taskQueueId shared.TaskQueueID, +) (client.WorkflowRun, error) { + workflowOptions := getMaintenanceWorkflowOptions("end-maintenance", taskQueueId) + workflowRun, err := temporalClient.ExecuteWorkflow(ctx, workflowOptions, EndMaintenanceWorkflow, &protos.EndMaintenanceFlowInput{}) + if err != nil { + return nil, err + } + return workflowRun, nil +} + +func StartMaintenanceWorkflow(ctx workflow.Context, input *protos.StartMaintenanceFlowInput) (*protos.StartMaintenanceFlowOutput, error) { + logger := workflow.GetLogger(ctx) + logger.Info("Starting StartMaintenance workflow", "input", input) + defer runBackgroundAlerter(ctx)() + + maintenanceFlowOutput, err := startMaintenance(ctx, logger) + if err != nil { + slog.Error("Error in StartMaintenance workflow", "error", err) + return nil, err + } + return maintenanceFlowOutput, nil +} + +func startMaintenance(ctx workflow.Context, logger log.Logger) (*protos.StartMaintenanceFlowOutput, error) { + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 24 * time.Hour, + }) + + snapshotWaitCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 24 * time.Hour, + HeartbeatTimeout: 1 * time.Minute, + }) + waitSnapshotsFuture := workflow.ExecuteActivity(snapshotWaitCtx, + maintenance.WaitForRunningSnapshots, + ) + err := waitSnapshotsFuture.Get(snapshotWaitCtx, nil) + if err != nil { + return nil, err + } + + enableCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 5 * time.Minute, + }) + enableMaintenanceFuture := workflow.ExecuteActivity(enableCtx, maintenance.EnableMaintenanceMode) + + if err := enableMaintenanceFuture.Get(enableCtx, nil); err != nil { + return nil, err + } + + logger.Info("Waiting for all snapshot mirrors to finish snapshotting") + waitSnapshotsPostEnableFuture := workflow.ExecuteActivity(snapshotWaitCtx, + maintenance.WaitForRunningSnapshots, + ) + + if err := waitSnapshotsPostEnableFuture.Get(snapshotWaitCtx, nil); err != nil { + return nil, err + } + + mirrorsList, err := getAllMirrors(ctx) + if err != nil { + return nil, err + } + + runningMirrors, err := pauseAndGetRunningMirrors(ctx, mirrorsList, logger) + if err != nil { + return nil, err + } + + backupCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 2 * time.Minute, + }) + future := workflow.ExecuteActivity(backupCtx, maintenance.BackupAllPreviouslyRunningFlows, runningMirrors) + + if err := future.Get(backupCtx, nil); err != nil { + return nil, err + } + version, err := GetPeerDBVersion(ctx) + if err != nil { + return nil, err + } + logger.Info("StartMaintenance workflow completed", "version", version) + return &protos.StartMaintenanceFlowOutput{ + Version: version, + }, nil +} + +func pauseAndGetRunningMirrors( + ctx workflow.Context, + mirrorsList *protos.MaintenanceMirrors, + logger log.Logger, +) (*protos.MaintenanceMirrors, error) { + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 24 * time.Hour, + HeartbeatTimeout: 1 * time.Minute, + }) + selector := workflow.NewSelector(ctx) + runningMirrors := make([]bool, len(mirrorsList.Mirrors)) + for i, mirror := range mirrorsList.Mirrors { + f := workflow.ExecuteActivity( + ctx, + maintenance.PauseMirrorIfRunning, + mirror, + ) + + selector.AddFuture(f, func(f workflow.Future) { + var wasRunning bool + err := f.Get(ctx, &wasRunning) + if err != nil { + logger.Error("Error checking and pausing mirror", "mirror", mirror, "error", err) + } else { + logger.Info("Finished check and pause for mirror", "mirror", mirror, "wasRunning", wasRunning) + runningMirrors[i] = wasRunning + } + }) + } + onlyRunningMirrors := make([]*protos.MaintenanceMirror, 0, len(mirrorsList.Mirrors)) + for range mirrorsList.Mirrors { + selector.Select(ctx) + if err := ctx.Err(); err != nil { + return nil, err + } + } + for i, mirror := range mirrorsList.Mirrors { + if runningMirrors[i] { + onlyRunningMirrors = append(onlyRunningMirrors, mirror) + } + } + return &protos.MaintenanceMirrors{ + Mirrors: onlyRunningMirrors, + }, nil +} + +func getAllMirrors(ctx workflow.Context) (*protos.MaintenanceMirrors, error) { + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 2 * time.Minute, + }) + getMirrorsFuture := workflow.ExecuteActivity(ctx, maintenance.GetAllMirrors) + var mirrorsList protos.MaintenanceMirrors + err := getMirrorsFuture.Get(ctx, &mirrorsList) + return &mirrorsList, err +} + +func EndMaintenanceWorkflow(ctx workflow.Context, input *protos.EndMaintenanceFlowInput) (*protos.EndMaintenanceFlowOutput, error) { + logger := workflow.GetLogger(ctx) + logger.Info("Starting EndMaintenance workflow", "input", input) + defer runBackgroundAlerter(ctx)() + + flowOutput, err := endMaintenance(ctx, logger) + if err != nil { + slog.Error("Error in EndMaintenance workflow", "error", err) + return nil, err + } + return flowOutput, nil +} + +func endMaintenance(ctx workflow.Context, logger log.Logger) (*protos.EndMaintenanceFlowOutput, error) { + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 24 * time.Hour, + HeartbeatTimeout: 1 * time.Minute, + }) + + mirrorsList, err := resumeBackedUpMirrors(ctx, logger) + if err != nil { + return nil, err + } + + clearBackupsFuture := workflow.ExecuteActivity(ctx, maintenance.CleanBackedUpFlows) + if err := clearBackupsFuture.Get(ctx, nil); err != nil { + return nil, err + } + + logger.Info("Resumed backed up mirrors", "mirrors", mirrorsList) + + disableCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 5 * time.Minute, + }) + + future := workflow.ExecuteActivity(disableCtx, maintenance.DisableMaintenanceMode) + if err := future.Get(disableCtx, nil); err != nil { + return nil, err + } + logger.Info("Disabled maintenance mode") + version, err := GetPeerDBVersion(ctx) + if err != nil { + return nil, err + } + + logger.Info("EndMaintenance workflow completed", "version", version) + return &protos.EndMaintenanceFlowOutput{ + Version: version, + }, nil +} + +func resumeBackedUpMirrors(ctx workflow.Context, logger log.Logger) (*protos.MaintenanceMirrors, error) { + future := workflow.ExecuteActivity(ctx, maintenance.GetBackedUpFlows) + ctx = workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ + StartToCloseTimeout: 5 * time.Minute, + }) + var mirrorsList *protos.MaintenanceMirrors + err := future.Get(ctx, &mirrorsList) + if err != nil { + return nil, err + } + + selector := workflow.NewSelector(ctx) + for _, mirror := range mirrorsList.Mirrors { + activityInput := mirror + f := workflow.ExecuteActivity( + ctx, + maintenance.ResumeMirror, + activityInput, + ) + + selector.AddFuture(f, func(f workflow.Future) { + err := f.Get(ctx, nil) + if err != nil { + logger.Error("Error resuming mirror", "mirror", mirror, "error", err) + } else { + logger.Info("Finished resuming mirror", "mirror", mirror) + } + }) + } + + for range mirrorsList.Mirrors { + selector.Select(ctx) + if err := ctx.Err(); err != nil { + return nil, err + } + } + return mirrorsList, nil +} + +// runBackgroundAlerter Alerts every few minutes regarding currently running maintenance workflows +func runBackgroundAlerter(ctx workflow.Context) workflow.CancelFunc { + activityCtx, cancelActivity := workflow.WithCancel(ctx) + alerterCtx := workflow.WithActivityOptions(activityCtx, workflow.ActivityOptions{ + StartToCloseTimeout: 24 * time.Hour, + HeartbeatTimeout: 1 * time.Minute, + }) + workflow.ExecuteActivity(alerterCtx, maintenance.BackgroundAlerter) + return cancelActivity +} + +func GetPeerDBVersion(wCtx workflow.Context) (string, error) { + activityCtx := workflow.WithLocalActivityOptions(wCtx, workflow.LocalActivityOptions{ + StartToCloseTimeout: time.Minute, + }) + getVersionActivity := func(ctx context.Context) (string, error) { + return peerdbenv.PeerDBVersionShaShort(), nil + } + var version string + future := workflow.ExecuteLocalActivity(activityCtx, getVersionActivity) + err := future.Get(activityCtx, &version) + return version, err +} diff --git a/flow/workflows/register.go b/flow/workflows/register.go index 35adf135bf..2c4b32ba3c 100644 --- a/flow/workflows/register.go +++ b/flow/workflows/register.go @@ -18,4 +18,7 @@ func RegisterFlowWorkerWorkflows(w worker.WorkflowRegistry) { w.RegisterWorkflow(GlobalScheduleManagerWorkflow) w.RegisterWorkflow(HeartbeatFlowWorkflow) w.RegisterWorkflow(RecordSlotSizeWorkflow) + + w.RegisterWorkflow(StartMaintenanceWorkflow) + w.RegisterWorkflow(EndMaintenanceWorkflow) } diff --git a/nexus/catalog/migrations/V40__maintenance_flows.sql b/nexus/catalog/migrations/V40__maintenance_flows.sql new file mode 100644 index 0000000000..e43e8eb927 --- /dev/null +++ b/nexus/catalog/migrations/V40__maintenance_flows.sql @@ -0,0 +1,29 @@ +CREATE SCHEMA IF NOT EXISTS maintenance; + +CREATE TABLE IF NOT EXISTS maintenance.maintenance_flows +( + id SERIAL PRIMARY KEY, + flow_id BIGINT NOT NULL, + flow_name TEXT NOT NULL, + workflow_id TEXT NOT NULL, + flow_created_at TIMESTAMP NOT NULL, + is_cdc BOOLEAN NOT NULL, + state TEXT NOT NULL, + restored_at TIMESTAMP, + from_version TEXT, + to_version TEXT +); + +CREATE INDEX IF NOT EXISTS idx_maintenance_flows_state ON maintenance.maintenance_flows (state); + +CREATE TABLE IF NOT EXISTS maintenance.start_maintenance_outputs +( + id SERIAL PRIMARY KEY, + api_version TEXT NOT NULL, + cli_version TEXT NOT NULL, + skipped BOOLEAN NOT NULL, + skipped_reason TEXT, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_start_maintenance_outputs_created_at ON maintenance.start_maintenance_outputs (created_at DESC); diff --git a/protos/flow.proto b/protos/flow.proto index d1681fd8d5..de7bf740d0 100644 --- a/protos/flow.proto +++ b/protos/flow.proto @@ -466,3 +466,28 @@ message DropFlowActivityInput { string peer_name = 2; } +message StartMaintenanceFlowInput { +} + +message StartMaintenanceFlowOutput { + string version = 1; +} + +message EndMaintenanceFlowInput { +} + +message EndMaintenanceFlowOutput { + string version = 1; +} + +message MaintenanceMirror { + int64 mirror_id = 1; + string mirror_name = 2; + string workflow_id = 3; + bool is_cdc = 4; + google.protobuf.Timestamp mirror_created_at = 5; +} + +message MaintenanceMirrors { + repeated MaintenanceMirror mirrors = 1; +} diff --git a/protos/route.proto b/protos/route.proto index 0265f221ee..1c6d38ed69 100644 --- a/protos/route.proto +++ b/protos/route.proto @@ -441,6 +441,38 @@ message ResyncMirrorRequest { message ResyncMirrorResponse { } +message PeerDBStateRequest { +} + +enum InstanceStatus { + INSTANCE_STATUS_UNKNOWN = 0; + INSTANCE_STATUS_READY = 1; + INSTANCE_STATUS_MAINTENANCE = 3; +} + +message InstanceInfoRequest { +} + +message InstanceInfoResponse { + InstanceStatus status = 1; +} + +enum MaintenanceStatus { + MAINTENANCE_STATUS_UNKNOWN = 0; + MAINTENANCE_STATUS_START = 1; + MAINTENANCE_STATUS_END = 2; +} + +message MaintenanceRequest { + MaintenanceStatus status = 1; + bool use_peerflow_task_queue = 2; +} + +message MaintenanceResponse { + string workflow_id = 1; + string run_id = 2; +} + service FlowService { rpc ValidatePeer(ValidatePeerRequest) returns (ValidatePeerResponse) { option (google.api.http) = { @@ -595,4 +627,12 @@ service FlowService { rpc ResyncMirror(ResyncMirrorRequest) returns (ResyncMirrorResponse) { option (google.api.http) = { post: "/v1/mirrors/resync", body: "*" }; } + + rpc GetInstanceInfo(InstanceInfoRequest) returns (InstanceInfoResponse) { + option (google.api.http) = { get: "/v1/instance/info" }; + } + + rpc Maintenance(MaintenanceRequest) returns (MaintenanceResponse) { + option (google.api.http) = { post: "/v1/instance/maintenance", body: "*" }; + } } diff --git a/stacks/flow.Dockerfile b/stacks/flow.Dockerfile index 0f997777e9..13fc5b0895 100644 --- a/stacks/flow.Dockerfile +++ b/stacks/flow.Dockerfile @@ -45,6 +45,8 @@ FROM flow-base AS flow-worker # Sane defaults for OpenTelemetry ENV OTEL_METRIC_EXPORT_INTERVAL=10000 ENV OTEL_EXPORTER_OTLP_COMPRESSION=gzip +ARG PEERDB_VERSION_SHA_SHORT +ENV PEERDB_VERSION_SHA_SHORT=${PEERDB_VERSION_SHA_SHORT} ENTRYPOINT [\ "./peer-flow",\ @@ -52,7 +54,20 @@ ENTRYPOINT [\ ] FROM flow-base AS flow-snapshot-worker + +ARG PEERDB_VERSION_SHA_SHORT +ENV PEERDB_VERSION_SHA_SHORT=${PEERDB_VERSION_SHA_SHORT} ENTRYPOINT [\ "./peer-flow",\ "snapshot-worker"\ ] + + +FROM flow-base AS flow-maintenance + +ARG PEERDB_VERSION_SHA_SHORT +ENV PEERDB_VERSION_SHA_SHORT=${PEERDB_VERSION_SHA_SHORT} +ENTRYPOINT [\ + "./peer-flow",\ + "maintenance"\ + ] diff --git a/stacks/peerdb-server.Dockerfile b/stacks/peerdb-server.Dockerfile index 689e3cf5b9..3e9db5240d 100644 --- a/stacks/peerdb-server.Dockerfile +++ b/stacks/peerdb-server.Dockerfile @@ -29,4 +29,8 @@ RUN apk add --no-cache ca-certificates postgresql-client curl iputils && \ USER peerdb WORKDIR /home/peerdb COPY --from=builder --chown=peerdb /root/nexus/target/release/peerdb-server . + +ARG PEERDB_VERSION_SHA_SHORT +ENV PEERDB_VERSION_SHA_SHORT=${PEERDB_VERSION_SHA_SHORT} + ENTRYPOINT ["./peerdb-server"] diff --git a/stacks/peerdb-ui.Dockerfile b/stacks/peerdb-ui.Dockerfile index cd99e61a5f..def0aad72e 100644 --- a/stacks/peerdb-ui.Dockerfile +++ b/stacks/peerdb-ui.Dockerfile @@ -35,5 +35,8 @@ ENV PORT 3000 # set hostname to localhost ENV HOSTNAME "0.0.0.0" +ARG PEERDB_VERSION_SHA_SHORT +ENV PEERDB_VERSION_SHA_SHORT=${PEERDB_VERSION_SHA_SHORT} + ENTRYPOINT ["/app/entrypoint.sh"] CMD ["node", "server.js"] From 79732fdedb3200979b4df9c91befb1bce16feda1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Thu, 14 Nov 2024 21:03:36 +0000 Subject: [PATCH 30/59] PEERDB_CLICKHOUSE_MAX_INSERT_THREADS (#2255) https://clickhouse.com/docs/en/operations/settings/settings#max_insert_threads --- flow/connectors/clickhouse/clickhouse.go | 12 ++++++++++-- flow/e2e/clickhouse/clickhouse.go | 4 ++-- flow/e2e/clickhouse/peer_flow_ch_test.go | 4 ++-- flow/peerdbenv/dynamicconf.go | 12 ++++++++++++ 4 files changed, 26 insertions(+), 6 deletions(-) diff --git a/flow/connectors/clickhouse/clickhouse.go b/flow/connectors/clickhouse/clickhouse.go index 4e89757014..63ccea6937 100644 --- a/flow/connectors/clickhouse/clickhouse.go +++ b/flow/connectors/clickhouse/clickhouse.go @@ -128,7 +128,7 @@ func NewClickHouseConnector( config *protos.ClickhouseConfig, ) (*ClickHouseConnector, error) { logger := shared.LoggerFromCtx(ctx) - database, err := Connect(ctx, config) + database, err := Connect(ctx, env, config) if err != nil { return nil, fmt.Errorf("failed to open connection to ClickHouse peer: %w", err) } @@ -205,7 +205,7 @@ func NewClickHouseConnector( return connector, nil } -func Connect(ctx context.Context, config *protos.ClickhouseConfig) (clickhouse.Conn, error) { +func Connect(ctx context.Context, env map[string]string, config *protos.ClickhouseConfig) (clickhouse.Conn, error) { var tlsSetting *tls.Config if !config.DisableTls { tlsSetting = &tls.Config{MinVersion: tls.VersionTLS13} @@ -228,6 +228,13 @@ func Connect(ctx context.Context, config *protos.ClickhouseConfig) (clickhouse.C tlsSetting.RootCAs = caPool } + var settings clickhouse.Settings + if maxInsertThreads, err := peerdbenv.PeerDBClickHouseMaxInsertThreads(ctx, env); err != nil { + return nil, fmt.Errorf("failed to load max_insert_threads config: %w", err) + } else if maxInsertThreads != 0 { + settings = clickhouse.Settings{"max_insert_threads": maxInsertThreads} + } + conn, err := clickhouse.Open(&clickhouse.Options{ Addr: []string{fmt.Sprintf("%s:%d", config.Host, config.Port)}, Auth: clickhouse.Auth{ @@ -245,6 +252,7 @@ func Connect(ctx context.Context, config *protos.ClickhouseConfig) (clickhouse.C {Name: "peerdb"}, }, }, + Settings: settings, DialTimeout: 3600 * time.Second, ReadTimeout: 3600 * time.Second, }) diff --git a/flow/e2e/clickhouse/clickhouse.go b/flow/e2e/clickhouse/clickhouse.go index 79ff2aa7bb..9756761520 100644 --- a/flow/e2e/clickhouse/clickhouse.go +++ b/flow/e2e/clickhouse/clickhouse.go @@ -92,7 +92,7 @@ func (s ClickHouseSuite) Teardown() { } func (s ClickHouseSuite) GetRows(table string, cols string) (*model.QRecordBatch, error) { - ch, err := connclickhouse.Connect(context.Background(), s.Peer().GetClickhouseConfig()) + ch, err := connclickhouse.Connect(context.Background(), nil, s.Peer().GetClickhouseConfig()) if err != nil { return nil, err } @@ -203,7 +203,7 @@ func SetupSuite(t *testing.T) ClickHouseSuite { s3Helper: s3Helper, } - ch, err := connclickhouse.Connect(context.Background(), s.PeerForDatabase("default").GetClickhouseConfig()) + ch, err := connclickhouse.Connect(context.Background(), nil, s.PeerForDatabase("default").GetClickhouseConfig()) require.NoError(t, err, "failed to connect to clickhouse") err = ch.Exec(context.Background(), "CREATE DATABASE e2e_test_"+suffix) require.NoError(t, err, "failed to create clickhouse database") diff --git a/flow/e2e/clickhouse/peer_flow_ch_test.go b/flow/e2e/clickhouse/peer_flow_ch_test.go index 8b28573104..9c4fa2a167 100644 --- a/flow/e2e/clickhouse/peer_flow_ch_test.go +++ b/flow/e2e/clickhouse/peer_flow_ch_test.go @@ -505,7 +505,7 @@ func (s ClickHouseSuite) WeirdTable(tableName string) { }) e2e.EnvWaitForFinished(s.t, env, 3*time.Minute) // now test weird names with rename based resync - ch, err := connclickhouse.Connect(context.Background(), s.Peer().GetClickhouseConfig()) + ch, err := connclickhouse.Connect(context.Background(), nil, s.Peer().GetClickhouseConfig()) require.NoError(s.t, err) require.NoError(s.t, ch.Exec(context.Background(), fmt.Sprintf("DROP TABLE `%s`", dstTableName))) require.NoError(s.t, ch.Close()) @@ -523,7 +523,7 @@ func (s ClickHouseSuite) WeirdTable(tableName string) { }) e2e.EnvWaitForFinished(s.t, env, 3*time.Minute) // now test weird names with exchange based resync - ch, err = connclickhouse.Connect(context.Background(), s.Peer().GetClickhouseConfig()) + ch, err = connclickhouse.Connect(context.Background(), nil, s.Peer().GetClickhouseConfig()) require.NoError(s.t, err) require.NoError(s.t, ch.Exec(context.Background(), fmt.Sprintf("TRUNCATE TABLE `%s`", dstTableName))) require.NoError(s.t, ch.Close()) diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index 566c8ead11..f3c2de0979 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -172,6 +172,14 @@ var DynamicSettings = [...]*protos.DynamicSetting{ ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, TargetForSetting: protos.DynconfTarget_CLICKHOUSE, }, + { + Name: "PEERDB_CLICKHOUSE_MAX_INSERT_THREADS", + Description: "Configures max_insert_threads setting on clickhouse for inserting into destination table. Setting left unset when 0", + DefaultValue: "0", + ValueType: protos.DynconfValueType_UINT, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, + TargetForSetting: protos.DynconfTarget_CLICKHOUSE, + }, { Name: "PEERDB_INTERVAL_SINCE_LAST_NORMALIZE_THRESHOLD_MINUTES", Description: "Duration in minutes since last normalize to start alerting, 0 disables all alerting entirely", @@ -362,6 +370,10 @@ func PeerDBEnableClickHousePrimaryUpdate(ctx context.Context, env map[string]str return dynamicConfBool(ctx, env, "PEERDB_CLICKHOUSE_ENABLE_PRIMARY_UPDATE") } +func PeerDBClickHouseMaxInsertThreads(ctx context.Context, env map[string]string) (int64, error) { + return dynamicConfSigned[int64](ctx, env, "PEERDB_CLICKHOUSE_MAX_INSERT_THREADS") +} + func PeerDBSnowflakeMergeParallelism(ctx context.Context, env map[string]string) (int64, error) { return dynamicConfSigned[int64](ctx, env, "PEERDB_SNOWFLAKE_MERGE_PARALLELISM") } From 8c02a5eb9c4a66153554d09b92152f31bd86a2ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Thu, 14 Nov 2024 22:00:06 +0000 Subject: [PATCH 31/59] PEERDB_CLICKHOUSE_PARALLEL_NORMALIZE (#2256) distributes queries across multiple connections so normalization can be concurrent & potentially distributed across nodes uses channel since that'll allow unevenly distributed changes (one table may have much more activity than others) even out across connections --- flow/connectors/clickhouse/normalize.go | 72 +++++++++++++++++++------ flow/peerdbenv/dynamicconf.go | 24 ++++++--- 2 files changed, 74 insertions(+), 22 deletions(-) diff --git a/flow/connectors/clickhouse/normalize.go b/flow/connectors/clickhouse/normalize.go index d5357c9a14..fb221096c0 100644 --- a/flow/connectors/clickhouse/normalize.go +++ b/flow/connectors/clickhouse/normalize.go @@ -12,6 +12,9 @@ import ( "strings" "time" + "github.com/ClickHouse/clickhouse-go/v2" + "golang.org/x/sync/errgroup" + "github.com/PeerDB-io/peer-flow/datatypes" "github.com/PeerDB-io/peer-flow/generated/protos" "github.com/PeerDB-io/peer-flow/model" @@ -262,8 +265,7 @@ func (c *ClickHouseConnector) NormalizeRecords( }, nil } - err = c.copyAvroStagesToDestination(ctx, req.FlowJobName, normBatchID, req.SyncBatchID) - if err != nil { + if err := c.copyAvroStagesToDestination(ctx, req.FlowJobName, normBatchID, req.SyncBatchID); err != nil { return nil, fmt.Errorf("failed to copy avro stages to destination: %w", err) } @@ -278,9 +280,48 @@ func (c *ClickHouseConnector) NormalizeRecords( return nil, err } + enablePrimaryUpdate, err := peerdbenv.PeerDBEnableClickHousePrimaryUpdate(ctx, req.Env) + if err != nil { + return nil, err + } + + parallelNormalize, err := peerdbenv.PeerDBClickHouseParallelNormalize(ctx, req.Env) + if err != nil { + return nil, err + } + parallelNormalize = min(max(parallelNormalize, 1), len(destinationTableNames)) + if parallelNormalize > 1 { + c.logger.Info("normalizing in parallel", slog.Int("connections", parallelNormalize)) + } + + queries := make(chan string) rawTbl := c.getRawTableName(req.FlowJobName) - // model the raw table data as inserts. + group, errCtx := errgroup.WithContext(ctx) + for i := range parallelNormalize { + group.Go(func() error { + var chConn clickhouse.Conn + if i == 0 { + chConn = c.database + } else { + var err error + chConn, err = Connect(errCtx, req.Env, c.config) + if err != nil { + return err + } + defer chConn.Close() + } + + for query := range queries { + c.logger.Info("normalizing batch", slog.String("query", query)) + if err := chConn.Exec(errCtx, query); err != nil { + return fmt.Errorf("error while inserting into normalized table: %w", err) + } + } + return nil + }) + } + for _, tbl := range destinationTableNames { // SELECT projection FROM raw_table WHERE _peerdb_batch_id > normalize_batch_id AND _peerdb_batch_id <= sync_batch_id selectQuery := strings.Builder{} @@ -299,11 +340,6 @@ func (c *ClickHouseConnector) NormalizeRecords( } } - enablePrimaryUpdate, err := peerdbenv.PeerDBEnableClickHousePrimaryUpdate(ctx, req.Env) - if err != nil { - return nil, err - } - projection := strings.Builder{} projectionUpdate := strings.Builder{} @@ -338,6 +374,7 @@ func (c *ClickHouseConnector) NormalizeRecords( var err error clickHouseType, err = colType.ToDWHColumnType(protos.DBType_CLICKHOUSE) if err != nil { + close(queries) return nil, fmt.Errorf("error while converting column type to clickhouse type: %w", err) } } @@ -433,15 +470,19 @@ func (c *ClickHouseConnector) NormalizeRecords( insertIntoSelectQuery.WriteString(colSelector.String()) insertIntoSelectQuery.WriteString(selectQuery.String()) - q := insertIntoSelectQuery.String() - - if err := c.execWithLogging(ctx, q); err != nil { - return nil, fmt.Errorf("error while inserting into normalized table: %w", err) + select { + case queries <- insertIntoSelectQuery.String(): + case <-errCtx.Done(): + close(queries) + return nil, ctx.Err() } } + close(queries) + if err := group.Wait(); err != nil { + return nil, err + } - err = c.UpdateNormalizeBatchID(ctx, req.FlowJobName, req.SyncBatchID) - if err != nil { + if err := c.UpdateNormalizeBatchID(ctx, req.FlowJobName, req.SyncBatchID); err != nil { c.logger.Error("[clickhouse] error while updating normalize batch id", slog.Int64("BatchID", req.SyncBatchID), slog.Any("error", err)) return nil, err } @@ -510,8 +551,7 @@ func (c *ClickHouseConnector) copyAvroStagesToDestination( ctx context.Context, flowJobName string, normBatchID, syncBatchID int64, ) error { for s := normBatchID + 1; s <= syncBatchID; s++ { - err := c.copyAvroStageToDestination(ctx, flowJobName, s) - if err != nil { + if err := c.copyAvroStageToDestination(ctx, flowJobName, s); err != nil { return fmt.Errorf("failed to copy avro stage to destination: %w", err) } } diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index f3c2de0979..f149bf37bd 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -180,6 +180,14 @@ var DynamicSettings = [...]*protos.DynamicSetting{ ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, TargetForSetting: protos.DynconfTarget_CLICKHOUSE, }, + { + Name: "PEERDB_CLICKHOUSE_PARALLEL_NORMALIZE", + Description: "Divide tables in batch into N insert selects. Helps distribute load to multiple nodes", + DefaultValue: "0", + ValueType: protos.DynconfValueType_INT, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, + TargetForSetting: protos.DynconfTarget_CLICKHOUSE, + }, { Name: "PEERDB_INTERVAL_SINCE_LAST_NORMALIZE_THRESHOLD_MINUTES", Description: "Duration in minutes since last normalize to start alerting, 0 disables all alerting entirely", @@ -256,8 +264,8 @@ func dynamicConfSigned[T constraints.Signed](ctx context.Context, env map[string return strconv.ParseInt(value, 10, 64) }) if err != nil { - shared.LoggerFromCtx(ctx).Error("Failed to parse as int64", slog.Any("error", err)) - return 0, fmt.Errorf("failed to parse as int64: %w", err) + shared.LoggerFromCtx(ctx).Error("Failed to parse as int64", slog.String("key", key), slog.Any("error", err)) + return 0, fmt.Errorf("failed to parse %s as int64: %w", key, err) } return T(value), nil @@ -268,8 +276,8 @@ func dynamicConfUnsigned[T constraints.Unsigned](ctx context.Context, env map[st return strconv.ParseUint(value, 10, 64) }) if err != nil { - shared.LoggerFromCtx(ctx).Error("Failed to parse as uint64", slog.Any("error", err)) - return 0, fmt.Errorf("failed to parse as uint64: %w", err) + shared.LoggerFromCtx(ctx).Error("Failed to parse as uint64", slog.String("key", key), slog.Any("error", err)) + return 0, fmt.Errorf("failed to parse %s as uint64: %w", key, err) } return T(value), nil @@ -278,8 +286,8 @@ func dynamicConfUnsigned[T constraints.Unsigned](ctx context.Context, env map[st func dynamicConfBool(ctx context.Context, env map[string]string, key string) (bool, error) { value, err := dynLookupConvert(ctx, env, key, strconv.ParseBool) if err != nil { - shared.LoggerFromCtx(ctx).Error("Failed to parse bool", slog.Any("error", err)) - return false, fmt.Errorf("failed to parse bool: %w", err) + shared.LoggerFromCtx(ctx).Error("Failed to parse bool", slog.String("key", key), slog.Any("error", err)) + return false, fmt.Errorf("failed to parse %s as bool: %w", key, err) } return value, nil @@ -374,6 +382,10 @@ func PeerDBClickHouseMaxInsertThreads(ctx context.Context, env map[string]string return dynamicConfSigned[int64](ctx, env, "PEERDB_CLICKHOUSE_MAX_INSERT_THREADS") } +func PeerDBClickHouseParallelNormalize(ctx context.Context, env map[string]string) (int, error) { + return dynamicConfSigned[int](ctx, env, "PEERDB_CLICKHOUSE_PARALLEL_NORMALIZE") +} + func PeerDBSnowflakeMergeParallelism(ctx context.Context, env map[string]string) (int64, error) { return dynamicConfSigned[int64](ctx, env, "PEERDB_SNOWFLAKE_MERGE_PARALLELISM") } From 593093e522406e09fa1a30c0a30998f5761bd524 Mon Sep 17 00:00:00 2001 From: Kevin Biju <52661649+heavycrystal@users.noreply.github.com> Date: Fri, 15 Nov 2024 15:02:26 +0530 Subject: [PATCH 32/59] use disconnected ctx to ensure snapshot connection drops (#2258) Temporal has a thing for everything may close #2162 --- flow/workflows/snapshot_flow.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/flow/workflows/snapshot_flow.go b/flow/workflows/snapshot_flow.go index c8b6a3fd29..d4f494d1ff 100644 --- a/flow/workflows/snapshot_flow.go +++ b/flow/workflows/snapshot_flow.go @@ -274,6 +274,13 @@ func (s *SnapshotFlowExecution) cloneTablesWithSlot( if err != nil { return fmt.Errorf("failed to setup replication: %w", err) } + defer func() { + dCtx, cancel := workflow.NewDisconnectedContext(sessionCtx) + defer cancel() + if err := s.closeSlotKeepAlive(dCtx); err != nil { + s.logger.Error("failed to close slot keep alive", slog.Any("error", err)) + } + }() s.logger.Info(fmt.Sprintf("cloning %d tables in parallel", numTablesInParallel)) if err := s.cloneTables(ctx, @@ -283,13 +290,10 @@ func (s *SnapshotFlowExecution) cloneTablesWithSlot( slotInfo.SupportsTidScans, numTablesInParallel, ); err != nil { + s.logger.Error("failed to clone tables", slog.Any("error", err)) return fmt.Errorf("failed to clone tables: %w", err) } - if err := s.closeSlotKeepAlive(sessionCtx); err != nil { - return fmt.Errorf("failed to close slot keep alive: %w", err) - } - return nil } From 27996dbfc53c22ed61e43e6e31c117eb12cbbd61 Mon Sep 17 00:00:00 2001 From: Kaushik Iska Date: Fri, 15 Nov 2024 10:06:41 -0600 Subject: [PATCH 33/59] Update end time for batches where end time is not set (#2260) --- flow/connectors/utils/monitoring/monitoring.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/flow/connectors/utils/monitoring/monitoring.go b/flow/connectors/utils/monitoring/monitoring.go index 9c73970049..98a62ec65b 100644 --- a/flow/connectors/utils/monitoring/monitoring.go +++ b/flow/connectors/utils/monitoring/monitoring.go @@ -96,8 +96,10 @@ func UpdateEndTimeForCDCBatch( batchID int64, ) error { _, err := pool.Exec(ctx, - "UPDATE peerdb_stats.cdc_batches SET end_time=$1 WHERE flow_name=$2 AND batch_id=$3", - time.Now(), flowJobName, batchID) + `UPDATE peerdb_stats.cdc_batches + SET end_time = COALESCE(end_time, NOW()) + WHERE flow_name = $1 AND batch_id <= $2`, + flowJobName, batchID) if err != nil { return fmt.Errorf("error while updating batch in cdc_batch: %w", err) } From cd583155dec65a7de7dbb97f231d39ccb72b058f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Fri, 15 Nov 2024 16:24:57 +0000 Subject: [PATCH 34/59] cdc graph: fix filtering not including flow_name (#2261) --- flow/cmd/mirror_status.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/cmd/mirror_status.go b/flow/cmd/mirror_status.go index 58cf20a80a..156185054c 100644 --- a/flow/cmd/mirror_status.go +++ b/flow/cmd/mirror_status.go @@ -215,8 +215,8 @@ func (h *FlowRequestHandler) CDCGraph(ctx context.Context, req *protos.GraphRequ } rows, err := h.pool.Query(ctx, `select tm, coalesce(sum(rows_in_batch), 0) from generate_series(date_trunc($2, now() - $1::INTERVAL * 30), now(), $1::INTERVAL) tm - left join peerdb_stats.cdc_batches on start_time >= tm and start_time < tm + $1::INTERVAL - group by 1 order by 1`, req.AggregateType, truncField) + left join peerdb_stats.cdc_batches on start_time >= tm and start_time < tm + $1::INTERVAL and flow_name = $3 + group by 1 order by 1`, req.AggregateType, truncField, req.FlowJobName) if err != nil { return nil, err } From 2c52d4f4f19f94f7f12fcd3bf6c8cc9d88edca78 Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj <65964360+Amogh-Bharadwaj@users.noreply.github.com> Date: Mon, 18 Nov 2024 22:58:35 +0530 Subject: [PATCH 35/59] Postgres to Postgres: Introduce support for more types (#2238) - TSTZRange: Introduce logic to construct the final string in range format - Supports JSON array - Supports JSONB and JSONB array --- .../bigquery/merge_stmt_generator.go | 2 +- flow/connectors/bigquery/qvalue_convert.go | 2 +- flow/connectors/postgres/postgres.go | 1 + flow/connectors/postgres/qvalue_convert.go | 75 +++++++++++++++++-- .../snowflake/merge_stmt_generator.go | 2 +- flow/model/qrecord_copy_from_source.go | 13 +++- flow/model/qvalue/avro_converter.go | 8 +- flow/model/qvalue/kind.go | 11 ++- flow/model/qvalue/qvalue.go | 21 +++++- 9 files changed, 121 insertions(+), 14 deletions(-) diff --git a/flow/connectors/bigquery/merge_stmt_generator.go b/flow/connectors/bigquery/merge_stmt_generator.go index e903ef5869..5ee4f883c2 100644 --- a/flow/connectors/bigquery/merge_stmt_generator.go +++ b/flow/connectors/bigquery/merge_stmt_generator.go @@ -34,7 +34,7 @@ func (m *mergeStmtGenerator) generateFlattenedCTE(dstTable string, normalizedTab var castStmt string shortCol := m.shortColumn[column.Name] switch qvalue.QValueKind(colType) { - case qvalue.QValueKindJSON, qvalue.QValueKindHStore: + case qvalue.QValueKindJSON, qvalue.QValueKindJSONB, qvalue.QValueKindHStore: // if the type is JSON, then just extract JSON castStmt = fmt.Sprintf("CAST(PARSE_JSON(JSON_VALUE(_peerdb_data, '$.%s'),wide_number_mode=>'round') AS %s) AS `%s`", column.Name, bqTypeString, shortCol) diff --git a/flow/connectors/bigquery/qvalue_convert.go b/flow/connectors/bigquery/qvalue_convert.go index d2d9d9f0c2..aa798641ac 100644 --- a/flow/connectors/bigquery/qvalue_convert.go +++ b/flow/connectors/bigquery/qvalue_convert.go @@ -34,7 +34,7 @@ func qValueKindToBigQueryType(columnDescription *protos.FieldDescription, nullab case qvalue.QValueKindString: bqField.Type = bigquery.StringFieldType // json also is stored as string for now - case qvalue.QValueKindJSON, qvalue.QValueKindHStore: + case qvalue.QValueKindJSON, qvalue.QValueKindJSONB, qvalue.QValueKindHStore: bqField.Type = bigquery.JSONFieldType // time related case qvalue.QValueKindTimestamp, qvalue.QValueKindTimestampTZ: diff --git a/flow/connectors/postgres/postgres.go b/flow/connectors/postgres/postgres.go index b179e2d075..593a94aa49 100644 --- a/flow/connectors/postgres/postgres.go +++ b/flow/connectors/postgres/postgres.go @@ -1187,6 +1187,7 @@ func (c *PostgresConnector) SyncFlowCleanup(ctx context.Context, jobName string) if err := syncFlowCleanupTx.Commit(ctx); err != nil { return fmt.Errorf("unable to commit transaction for sync flow cleanup: %w", err) } + return nil } diff --git a/flow/connectors/postgres/qvalue_convert.go b/flow/connectors/postgres/qvalue_convert.go index d359212bdb..fe2489ed30 100644 --- a/flow/connectors/postgres/qvalue_convert.go +++ b/flow/connectors/postgres/qvalue_convert.go @@ -62,8 +62,10 @@ func (c *PostgresConnector) postgresOIDToQValueKind(recvOID uint32) qvalue.QValu return qvalue.QValueKindString case pgtype.ByteaOID: return qvalue.QValueKindBytes - case pgtype.JSONOID, pgtype.JSONBOID: + case pgtype.JSONOID: return qvalue.QValueKindJSON + case pgtype.JSONBOID: + return qvalue.QValueKindJSONB case pgtype.UUIDOID: return qvalue.QValueKindUUID case pgtype.TimeOID: @@ -104,8 +106,14 @@ func (c *PostgresConnector) postgresOIDToQValueKind(recvOID uint32) qvalue.QValu return qvalue.QValueKindArrayTimestampTZ case pgtype.TextArrayOID, pgtype.VarcharArrayOID, pgtype.BPCharArrayOID: return qvalue.QValueKindArrayString + case pgtype.JSONArrayOID: + return qvalue.QValueKindArrayJSON + case pgtype.JSONBArrayOID: + return qvalue.QValueKindArrayJSONB case pgtype.IntervalOID: return qvalue.QValueKindInterval + case pgtype.TstzrangeOID: + return qvalue.QValueKindTSTZRange default: typeName, ok := pgtype.NewMap().TypeForOID(recvOID) if !ok { @@ -161,6 +169,8 @@ func qValueKindToPostgresType(colTypeStr string) string { return "BYTEA" case qvalue.QValueKindJSON: return "JSON" + case qvalue.QValueKindJSONB: + return "JSONB" case qvalue.QValueKindHStore: return "HSTORE" case qvalue.QValueKindUUID: @@ -203,6 +213,10 @@ func qValueKindToPostgresType(colTypeStr string) string { return "BOOLEAN[]" case qvalue.QValueKindArrayString: return "TEXT[]" + case qvalue.QValueKindArrayJSON: + return "JSON[]" + case qvalue.QValueKindArrayJSONB: + return "JSONB[]" case qvalue.QValueKindGeography: return "GEOGRAPHY" case qvalue.QValueKindGeometry: @@ -214,12 +228,12 @@ func qValueKindToPostgresType(colTypeStr string) string { } } -func parseJSON(value interface{}) (qvalue.QValue, error) { +func parseJSON(value interface{}, isArray bool) (qvalue.QValue, error) { jsonVal, err := json.Marshal(value) if err != nil { return nil, fmt.Errorf("failed to parse JSON: %w", err) } - return qvalue.QValueJSON{Val: string(jsonVal)}, nil + return qvalue.QValueJSON{Val: string(jsonVal), IsArray: isArray}, nil } func convertToArray[T any](kind qvalue.QValueKind, value interface{}) ([]T, error) { @@ -277,6 +291,31 @@ func parseFieldFromQValueKind(qvalueKind qvalue.QValueKind, value interface{}) ( } return qvalue.QValueString{Val: string(intervalJSON)}, nil + case qvalue.QValueKindTSTZRange: + tstzrangeObject := value.(pgtype.Range[interface{}]) + lowerBoundType := tstzrangeObject.LowerType + upperBoundType := tstzrangeObject.UpperType + lowerTime, err := convertTimeRangeBound(tstzrangeObject.Lower) + if err != nil { + return nil, fmt.Errorf("[tstzrange]error for lower time bound: %w", err) + } + + upperTime, err := convertTimeRangeBound(tstzrangeObject.Upper) + if err != nil { + return nil, fmt.Errorf("[tstzrange]error for upper time bound: %w", err) + } + + lowerBracket := "[" + if lowerBoundType == pgtype.Exclusive { + lowerBracket = "(" + } + upperBracket := "]" + if upperBoundType == pgtype.Exclusive { + upperBracket = ")" + } + tstzrangeStr := fmt.Sprintf("%s%v,%v%s", + lowerBracket, lowerTime, upperTime, upperBracket) + return qvalue.QValueTSTZRange{Val: tstzrangeStr}, nil case qvalue.QValueKindDate: switch val := value.(type) { case time.Time: @@ -306,12 +345,18 @@ func parseFieldFromQValueKind(qvalueKind qvalue.QValueKind, value interface{}) ( case qvalue.QValueKindBoolean: boolVal := value.(bool) return qvalue.QValueBoolean{Val: boolVal}, nil - case qvalue.QValueKindJSON: - tmp, err := parseJSON(value) + case qvalue.QValueKindJSON, qvalue.QValueKindJSONB: + tmp, err := parseJSON(value, false) if err != nil { return nil, fmt.Errorf("failed to parse JSON: %w", err) } return tmp, nil + case qvalue.QValueKindArrayJSON, qvalue.QValueKindArrayJSONB: + tmp, err := parseJSON(value, true) + if err != nil { + return nil, fmt.Errorf("failed to parse JSON Array: %w", err) + } + return tmp, nil case qvalue.QValueKindInt16: intVal := value.(int16) return qvalue.QValueInt16{Val: intVal}, nil @@ -483,3 +528,23 @@ func customTypeToQKind(typeName string) qvalue.QValueKind { return qvalue.QValueKindString } } + +// Postgres does not like timestamps of the form 2006-01-02 15:04:05 +0000 UTC +// in tstzrange. +// convertTimeRangeBound removes the +0000 UTC part +func convertTimeRangeBound(timeBound interface{}) (string, error) { + layout := "2006-01-02 15:04:05 -0700 MST" + postgresFormat := "2006-01-02 15:04:05" + var convertedTime string + if timeBound != nil { + lowerParsed, err := time.Parse(layout, fmt.Sprint(timeBound)) + if err != nil { + return "", fmt.Errorf("unexpected lower bound value in tstzrange. Error: %v", err) + } + convertedTime = lowerParsed.Format(postgresFormat) + } else { + convertedTime = "" + } + + return convertedTime, nil +} diff --git a/flow/connectors/snowflake/merge_stmt_generator.go b/flow/connectors/snowflake/merge_stmt_generator.go index 3f0cfbc63a..37b4ed7bdb 100644 --- a/flow/connectors/snowflake/merge_stmt_generator.go +++ b/flow/connectors/snowflake/merge_stmt_generator.go @@ -52,7 +52,7 @@ func (m *mergeStmtGenerator) generateMergeStmt(dstTable string) (string, error) flattenedCastsSQLArray = append(flattenedCastsSQLArray, fmt.Sprintf("TO_GEOMETRY(CAST(%s:\"%s\" AS STRING),true) AS %s", toVariantColumnName, column.Name, targetColumnName)) - case qvalue.QValueKindJSON, qvalue.QValueKindHStore, qvalue.QValueKindInterval: + case qvalue.QValueKindJSON, qvalue.QValueKindJSONB, qvalue.QValueKindHStore, qvalue.QValueKindInterval: flattenedCastsSQLArray = append(flattenedCastsSQLArray, fmt.Sprintf("PARSE_JSON(CAST(%s:\"%s\" AS STRING)) AS %s", toVariantColumnName, column.Name, targetColumnName)) diff --git a/flow/model/qrecord_copy_from_source.go b/flow/model/qrecord_copy_from_source.go index 308676c5f5..d633fda999 100644 --- a/flow/model/qrecord_copy_from_source.go +++ b/flow/model/qrecord_copy_from_source.go @@ -1,6 +1,7 @@ package model import ( + "encoding/json" "errors" "fmt" "strings" @@ -82,6 +83,8 @@ func (src *QRecordCopyFromSource) Values() ([]interface{}, error) { values[i] = str case qvalue.QValueTime: values[i] = pgtype.Time{Microseconds: v.Val.UnixMicro(), Valid: true} + case qvalue.QValueTSTZRange: + values[i] = v.Val case qvalue.QValueTimestamp: values[i] = pgtype.Timestamp{Time: v.Val, Valid: true} case qvalue.QValueTimestampTZ: @@ -170,8 +173,16 @@ func (src *QRecordCopyFromSource) Values() ([]interface{}, error) { } values[i] = a case qvalue.QValueJSON: - values[i] = v.Val + if v.IsArray { + var arrayJ []interface{} + if err := json.Unmarshal([]byte(v.Value().(string)), &arrayJ); err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON array: %v", err) + } + values[i] = arrayJ + } else { + values[i] = v.Value() + } // And so on for the other types... default: return nil, fmt.Errorf("unsupported value type %T", qValue) diff --git a/flow/model/qvalue/avro_converter.go b/flow/model/qvalue/avro_converter.go index 9738f46e8f..97d9641b6b 100644 --- a/flow/model/qvalue/avro_converter.go +++ b/flow/model/qvalue/avro_converter.go @@ -138,7 +138,9 @@ func GetAvroSchemaFromQValueKind(kind QValueKind, targetDWH protos.DBType, preci }, nil } return "string", nil - case QValueKindHStore, QValueKindJSON, QValueKindStruct: + case QValueKindTSTZRange: + return "string", nil + case QValueKindHStore, QValueKindJSON, QValueKindJSONB, QValueKindStruct: return "string", nil case QValueKindArrayFloat32: return AvroSchemaArray{ @@ -193,6 +195,8 @@ func GetAvroSchemaFromQValueKind(kind QValueKind, targetDWH protos.DBType, preci Type: "array", Items: "string", }, nil + case QValueKindArrayJSON, QValueKindArrayJSONB: + return "string", nil case QValueKindArrayString: return AvroSchemaArray{ Type: "array", @@ -315,7 +319,7 @@ func QValueToAvro(value QValue, field *QField, targetDWH protos.DBType, logger l return t, nil case QValueQChar: return c.processNullableUnion("string", string(v.Val)) - case QValueString, QValueCIDR, QValueINET, QValueMacaddr, QValueInterval: + case QValueString, QValueCIDR, QValueINET, QValueMacaddr, QValueInterval, QValueTSTZRange: if c.TargetDWH == protos.DBType_SNOWFLAKE && v.Value() != nil && (len(v.Value().(string)) > 15*1024*1024) { slog.Warn("Clearing TEXT value > 15MB for Snowflake!") diff --git a/flow/model/qvalue/kind.go b/flow/model/qvalue/kind.go index 79e8f89e40..91ab867a0e 100644 --- a/flow/model/qvalue/kind.go +++ b/flow/model/qvalue/kind.go @@ -26,10 +26,12 @@ const ( QValueKindTime QValueKind = "time" QValueKindTimeTZ QValueKind = "timetz" QValueKindInterval QValueKind = "interval" + QValueKindTSTZRange QValueKind = "tstzrange" QValueKindNumeric QValueKind = "numeric" QValueKindBytes QValueKind = "bytes" QValueKindUUID QValueKind = "uuid" QValueKindJSON QValueKind = "json" + QValueKindJSONB QValueKind = "jsonb" QValueKindHStore QValueKind = "hstore" QValueKindGeography QValueKind = "geography" QValueKindGeometry QValueKind = "geometry" @@ -51,6 +53,8 @@ const ( QValueKindArrayTimestamp QValueKind = "array_timestamp" QValueKindArrayTimestampTZ QValueKind = "array_timestamptz" QValueKindArrayBoolean QValueKind = "array_bool" + QValueKindArrayJSON QValueKind = "array_json" + QValueKindArrayJSONB QValueKind = "array_jsonb" ) func (kind QValueKind) IsArray() bool { @@ -68,6 +72,7 @@ var QValueKindToSnowflakeTypeMap = map[QValueKind]string{ QValueKindQChar: "CHAR", QValueKindString: "STRING", QValueKindJSON: "VARIANT", + QValueKindJSONB: "VARIANT", QValueKindTimestamp: "TIMESTAMP_NTZ", QValueKindTimestampTZ: "TIMESTAMP_TZ", QValueKindInterval: "VARIANT", @@ -94,6 +99,8 @@ var QValueKindToSnowflakeTypeMap = map[QValueKind]string{ QValueKindArrayTimestamp: "VARIANT", QValueKindArrayTimestampTZ: "VARIANT", QValueKindArrayBoolean: "VARIANT", + QValueKindArrayJSON: "VARIANT", + QValueKindArrayJSONB: "VARIANT", } var QValueKindToClickHouseTypeMap = map[QValueKind]string{ @@ -109,6 +116,7 @@ var QValueKindToClickHouseTypeMap = map[QValueKind]string{ QValueKindJSON: "String", QValueKindTimestamp: "DateTime64(6)", QValueKindTimestampTZ: "DateTime64(6)", + QValueKindTSTZRange: "String", QValueKindTime: "DateTime64(6)", QValueKindTimeTZ: "DateTime64(6)", QValueKindDate: "Date32", @@ -118,7 +126,6 @@ var QValueKindToClickHouseTypeMap = map[QValueKind]string{ QValueKindInvalid: "String", QValueKindHStore: "String", - // array types will be mapped to VARIANT QValueKindArrayFloat32: "Array(Float32)", QValueKindArrayFloat64: "Array(Float64)", QValueKindArrayInt32: "Array(Int32)", @@ -129,6 +136,8 @@ var QValueKindToClickHouseTypeMap = map[QValueKind]string{ QValueKindArrayDate: "Array(Date)", QValueKindArrayTimestamp: "Array(DateTime64(6))", QValueKindArrayTimestampTZ: "Array(DateTime64(6))", + QValueKindArrayJSON: "String", + QValueKindArrayJSONB: "String", } func (kind QValueKind) ToDWHColumnType(dwhType protos.DBType) (string, error) { diff --git a/flow/model/qvalue/qvalue.go b/flow/model/qvalue/qvalue.go index 9b1c13f755..1277881a3d 100644 --- a/flow/model/qvalue/qvalue.go +++ b/flow/model/qvalue/qvalue.go @@ -6,7 +6,7 @@ import ( "github.com/google/uuid" "github.com/shopspring/decimal" - "github.com/yuin/gopher-lua" + lua "github.com/yuin/gopher-lua" "github.com/PeerDB-io/glua64" "github.com/PeerDB-io/peer-flow/shared" @@ -294,6 +294,22 @@ func (v QValueInterval) LValue(ls *lua.LState) lua.LValue { return lua.LString(v.Val) } +type QValueTSTZRange struct { + Val string +} + +func (QValueTSTZRange) Kind() QValueKind { + return QValueKindInterval +} + +func (v QValueTSTZRange) Value() any { + return v.Val +} + +func (v QValueTSTZRange) LValue(ls *lua.LState) lua.LValue { + return lua.LString(v.Val) +} + type QValueNumeric struct { Val decimal.Decimal } @@ -343,7 +359,8 @@ func (v QValueUUID) LValue(ls *lua.LState) lua.LValue { } type QValueJSON struct { - Val string + Val string + IsArray bool } func (QValueJSON) Kind() QValueKind { From 5ffc25c7440c6d2f7238ab091281f7e94535f36b Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj <65964360+Amogh-Bharadwaj@users.noreply.github.com> Date: Mon, 18 Nov 2024 23:08:50 +0530 Subject: [PATCH 36/59] QRep Overwrite Mode: introduce full refresh code path (#2239) Currently overwrite mode in query replication has the following behaviour: Before new rows are synced, truncate the destination table This PR introduces a dynconf-gated (default false) full refresh mode which if enabled, has the same behaviour as above but pulls the entire data from the source table at every wait time interval --- flow/peerdbenv/dynamicconf.go | 11 +++++++++++ flow/workflows/local_activities.go | 14 ++++++++++++++ flow/workflows/qrep_flow.go | 31 ++++++++++++++++++++++-------- 3 files changed, 48 insertions(+), 8 deletions(-) diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index f149bf37bd..314b365733 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -91,6 +91,13 @@ var DynamicSettings = [...]*protos.DynamicSetting{ ApplyMode: protos.DynconfApplyMode_APPLY_MODE_AFTER_RESUME, TargetForSetting: protos.DynconfTarget_ALL, }, + { + Name: "PEERDB_FULL_REFRESH_OVERWRITE_MODE", + Description: "Enables full refresh mode for query replication mirrors of overwrite type", + DefaultValue: "false", + ValueType: protos.DynconfValueType_BOOL, + ApplyMode: protos.DynconfApplyMode_APPLY_MODE_NEW_MIRROR, + }, { Name: "PEERDB_NULLABLE", Description: "Propagate nullability in schema", @@ -370,6 +377,10 @@ func PeerDBEnableParallelSyncNormalize(ctx context.Context, env map[string]strin return dynamicConfBool(ctx, env, "PEERDB_ENABLE_PARALLEL_SYNC_NORMALIZE") } +func PeerDBFullRefreshOverwriteMode(ctx context.Context, env map[string]string) (bool, error) { + return dynamicConfBool(ctx, env, "PEERDB_FULL_REFRESH_OVERWRITE_MODE") +} + func PeerDBNullable(ctx context.Context, env map[string]string) (bool, error) { return dynamicConfBool(ctx, env, "PEERDB_NULLABLE") } diff --git a/flow/workflows/local_activities.go b/flow/workflows/local_activities.go index d163352ca2..7a3e80f240 100644 --- a/flow/workflows/local_activities.go +++ b/flow/workflows/local_activities.go @@ -29,6 +29,20 @@ func getParallelSyncNormalize(wCtx workflow.Context, logger log.Logger, env map[ return parallel } +func getQRepOverwriteFullRefreshMode(wCtx workflow.Context, logger log.Logger, env map[string]string) bool { + checkCtx := workflow.WithLocalActivityOptions(wCtx, workflow.LocalActivityOptions{ + StartToCloseTimeout: time.Minute, + }) + + getFullRefreshFuture := workflow.ExecuteLocalActivity(checkCtx, peerdbenv.PeerDBFullRefreshOverwriteMode, env) + var fullRefreshEnabled bool + if err := getFullRefreshFuture.Get(checkCtx, &fullRefreshEnabled); err != nil { + logger.Warn("Failed to check if full refresh mode is enabled", slog.Any("error", err)) + return false + } + return fullRefreshEnabled +} + func localPeerType(ctx context.Context, name string) (protos.DBType, error) { pool, err := peerdbenv.GetCatalogConnectionPoolFromEnv(ctx) if err != nil { diff --git a/flow/workflows/qrep_flow.go b/flow/workflows/qrep_flow.go index c7348eefa9..f862b4f3d6 100644 --- a/flow/workflows/qrep_flow.go +++ b/flow/workflows/qrep_flow.go @@ -32,13 +32,15 @@ type QRepPartitionFlowExecution struct { runUUID string } +var InitialLastPartition = &protos.QRepPartition{ + PartitionId: "not-applicable-partition", + Range: nil, +} + // returns a new empty QRepFlowState func newQRepFlowState() *protos.QRepFlowState { return &protos.QRepFlowState{ - LastPartition: &protos.QRepPartition{ - PartitionId: "not-applicable-partition", - Range: nil, - }, + LastPartition: InitialLastPartition, NumPartitionsProcessed: 0, NeedsResync: true, CurrentFlowStatus: protos.FlowStatus_STATUS_RUNNING, @@ -461,8 +463,10 @@ func QRepWaitForNewRowsWorkflow(ctx workflow.Context, config *protos.QRepConfig, return fmt.Errorf("error checking for new rows: %w", err) } + optedForOverwrite := config.WriteMode.WriteType == protos.QRepWriteType_QREP_WRITE_MODE_OVERWRITE + fullRefresh := optedForOverwrite && getQRepOverwriteFullRefreshMode(ctx, logger, config.Env) // If no new rows are found, continue as new - if !hasNewRows { + if !hasNewRows || fullRefresh { waitBetweenBatches := 5 * time.Second if config.WaitBetweenBatchesSeconds > 0 { waitBetweenBatches = time.Duration(config.WaitBetweenBatchesSeconds) * time.Second @@ -472,6 +476,9 @@ func QRepWaitForNewRowsWorkflow(ctx workflow.Context, config *protos.QRepConfig, return sleepErr } + if fullRefresh { + return nil + } logger.Info("QRepWaitForNewRowsWorkflow: continuing the loop") return workflow.NewContinueAsNewError(ctx, QRepWaitForNewRowsWorkflow, config, lastPartition) } @@ -545,8 +552,16 @@ func QRepFlowWorkflow( return state, err } - if !config.InitialCopyOnly && state.LastPartition != nil { - if err := q.waitForNewRows(ctx, signalChan, state.LastPartition); err != nil { + fullRefresh := false + lastPartition := state.LastPartition + if config.WriteMode.WriteType == protos.QRepWriteType_QREP_WRITE_MODE_OVERWRITE { + if fullRefresh = getQRepOverwriteFullRefreshMode(ctx, q.logger, config.Env); fullRefresh { + lastPartition = InitialLastPartition + } + } + + if !config.InitialCopyOnly && lastPartition != nil { + if err := q.waitForNewRows(ctx, signalChan, lastPartition); err != nil { return state, err } } @@ -580,7 +595,7 @@ func QRepFlowWorkflow( q.logger.Info(fmt.Sprintf("%d partitions processed", len(partitions.Partitions))) state.NumPartitionsProcessed += uint64(len(partitions.Partitions)) - if len(partitions.Partitions) > 0 { + if len(partitions.Partitions) > 0 && !fullRefresh { state.LastPartition = partitions.Partitions[len(partitions.Partitions)-1] } } From 9f62aec1af1e6f080ec5aacfc9fe4ac7dc2fe0f9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 19:18:17 +0000 Subject: [PATCH 37/59] fix(deps): update cargo dependencies (#2266) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [cargo-deb](https://lib.rs/crates/cargo-deb) ([source](https://redirect.github.com/kornelski/cargo-deb)) | dependencies | minor | `2.8.0` -> `2.9.0` | | [clap](https://redirect.github.com/clap-rs/clap) | dependencies | patch | `4.5.20` -> `4.5.21` | | [rustls](https://redirect.github.com/rustls/rustls) | dependencies | patch | `0.23.16` -> `0.23.17` | | [serde_json](https://redirect.github.com/serde-rs/json) | dependencies | patch | `1.0.132` -> `1.0.133` | --- ### Release Notes
kornelski/cargo-deb (cargo-deb) ### [`v2.9.0`](https://redirect.github.com/kornelski/cargo-deb/compare/v2.8.0...v2.9.0) [Compare Source](https://redirect.github.com/kornelski/cargo-deb/compare/v2.8.0...v2.9.0)
clap-rs/clap (clap) ### [`v4.5.21`](https://redirect.github.com/clap-rs/clap/blob/HEAD/CHANGELOG.md#4521---2024-11-13) [Compare Source](https://redirect.github.com/clap-rs/clap/compare/v4.5.20...v4.5.21) ##### Fixes - *(parser)* Ensure defaults are filled in on error with `ignore_errors(true)`
serde-rs/json (serde_json) ### [`v1.0.133`](https://redirect.github.com/serde-rs/json/releases/tag/v1.0.133) [Compare Source](https://redirect.github.com/serde-rs/json/compare/v1.0.132...v1.0.133) - Implement From<\[T; N]> for serde_json::Value ([#​1215](https://redirect.github.com/serde-rs/json/issues/1215))
--- ### Configuration 📅 **Schedule**: Branch creation - "after 5pm on monday" in timezone Etc/UTC, Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR is behind base branch, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://redirect.github.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/PeerDB-io/peerdb). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- nexus/Cargo.lock | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/nexus/Cargo.lock b/nexus/Cargo.lock index cc3650b6f8..f827849ff0 100644 --- a/nexus/Cargo.lock +++ b/nexus/Cargo.lock @@ -801,9 +801,9 @@ dependencies = [ [[package]] name = "cargo-deb" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d0500c935971265437386796faad57064d17bf2648f3f0a7e3c8d5a631de23" +checksum = "9103cb60c68ef7ce14a3d17c6d697e8b180356a447685784f7951074bce0b844" dependencies = [ "ar", "cargo_toml", @@ -962,9 +962,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -972,9 +972,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -1790,7 +1790,7 @@ dependencies = [ "http 1.1.0", "hyper 1.5.0", "hyper-util", - "rustls 0.23.16", + "rustls 0.23.17", "rustls-native-certs 0.8.0", "rustls-pki-types", "tokio", @@ -3044,7 +3044,7 @@ dependencies = [ "anyhow", "futures-util", "pt", - "rustls 0.23.16", + "rustls 0.23.17", "ssh2", "tokio", "tokio-postgres", @@ -3159,7 +3159,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" dependencies = [ "bytes", - "heck 0.5.0", + "heck 0.4.1", "itertools", "log", "multimap", @@ -3270,7 +3270,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.0.0", - "rustls 0.23.16", + "rustls 0.23.17", "socket2", "thiserror", "tokio", @@ -3287,7 +3287,7 @@ dependencies = [ "rand", "ring", "rustc-hash 2.0.0", - "rustls 0.23.16", + "rustls 0.23.17", "slab", "thiserror", "tinyvec", @@ -3305,7 +3305,7 @@ dependencies = [ "once_cell", "socket2", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -3518,7 +3518,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.16", + "rustls 0.23.17", "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", @@ -3686,9 +3686,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.16" +version = "0.23.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "7f1a745511c54ba6d4465e8d5dfbd81b45791756de28d4981af70d6dca128f1e" dependencies = [ "log", "once_cell", @@ -3916,9 +3916,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", @@ -4411,7 +4411,7 @@ checksum = "27d684bad428a0f2481f42241f821db42c54e2dc81d8c00db8536c506b0a0144" dependencies = [ "const-oid", "ring", - "rustls 0.23.16", + "rustls 0.23.17", "tokio", "tokio-postgres", "tokio-rustls 0.26.0", @@ -4445,7 +4445,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.16", + "rustls 0.23.17", "rustls-pki-types", "tokio", ] @@ -4790,7 +4790,7 @@ dependencies = [ "flate2", "log", "once_cell", - "rustls 0.23.16", + "rustls 0.23.17", "rustls-pki-types", "serde", "serde_json", @@ -5358,7 +5358,7 @@ dependencies = [ "hyper-util", "log", "percent-encoding", - "rustls 0.23.16", + "rustls 0.23.17", "rustls-pemfile 2.2.0", "seahash", "serde", From 15677364dd1041a2822948ba200cba9e15707fc5 Mon Sep 17 00:00:00 2001 From: Kaushik Iska Date: Mon, 18 Nov 2024 18:16:50 -0600 Subject: [PATCH 38/59] refactor: reduce default batch and partition sizes for improved stability (#2262) --- flow/activities/flowable_core.go | 2 +- flow/workflows/snapshot_flow.go | 2 +- ui/app/mirrors/create/helpers/cdc.ts | 12 ++++++------ ui/app/mirrors/create/helpers/common.ts | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/flow/activities/flowable_core.go b/flow/activities/flowable_core.go index db04efea30..d583044b0c 100644 --- a/flow/activities/flowable_core.go +++ b/flow/activities/flowable_core.go @@ -139,7 +139,7 @@ func syncCore[TPull connectors.CDCPullConnectorCore, TSync connectors.CDCSyncCon batchSize := options.BatchSize if batchSize == 0 { - batchSize = 1_000_000 + batchSize = 250_000 } lastOffset, err := func() (int64, error) { diff --git a/flow/workflows/snapshot_flow.go b/flow/workflows/snapshot_flow.go index d4f494d1ff..9b21b7b384 100644 --- a/flow/workflows/snapshot_flow.go +++ b/flow/workflows/snapshot_flow.go @@ -166,7 +166,7 @@ func (s *SnapshotFlowExecution) cloneTable( numWorkers = s.config.SnapshotMaxParallelWorkers } - numRowsPerPartition := uint32(500000) + numRowsPerPartition := uint32(250000) if s.config.SnapshotNumRowsPerPartition > 0 { numRowsPerPartition = s.config.SnapshotNumRowsPerPartition } diff --git a/ui/app/mirrors/create/helpers/cdc.ts b/ui/app/mirrors/create/helpers/cdc.ts index 99dd229cb3..957564d678 100644 --- a/ui/app/mirrors/create/helpers/cdc.ts +++ b/ui/app/mirrors/create/helpers/cdc.ts @@ -22,12 +22,12 @@ export const cdcSettings: MirrorSetting[] = [ setter( (curr: CDCConfig): CDCConfig => ({ ...curr, - maxBatchSize: (value as number) || 1000000, + maxBatchSize: (value as number) || 250000, }) ), - tips: 'The number of rows PeerDB will pull from source at a time. If left empty, the default value is 1,000,000 rows.', + tips: 'The number of rows PeerDB will pull from source at a time. If left empty, the default value is 250,000 rows.', type: 'number', - default: '1000000', + default: '250000', advanced: AdvancedSettingType.ALL, }, { @@ -78,11 +78,11 @@ export const cdcSettings: MirrorSetting[] = [ setter( (curr: CDCConfig): CDCConfig => ({ ...curr, - snapshotNumRowsPerPartition: parseInt(value as string, 10) || 1000000, + snapshotNumRowsPerPartition: parseInt(value as string, 10) || 250000, }) ), - tips: 'PeerDB splits up table data into partitions for increased performance. This setting controls the number of rows per partition. The default value is 1000000.', - default: '1000000', + tips: 'PeerDB splits up table data into partitions for increased performance. This setting controls the number of rows per partition. The default value is 250000.', + default: '250000', type: 'number', advanced: AdvancedSettingType.ALL, }, diff --git a/ui/app/mirrors/create/helpers/common.ts b/ui/app/mirrors/create/helpers/common.ts index d4ba5747ad..f29a2376c9 100644 --- a/ui/app/mirrors/create/helpers/common.ts +++ b/ui/app/mirrors/create/helpers/common.ts @@ -25,10 +25,10 @@ export const blankCDCSetting: CDCConfig = { destinationName: '', flowJobName: '', tableMappings: [], - maxBatchSize: 1000000, + maxBatchSize: 250000, doInitialSnapshot: true, publicationName: '', - snapshotNumRowsPerPartition: 1000000, + snapshotNumRowsPerPartition: 250000, snapshotMaxParallelWorkers: 4, snapshotNumTablesInParallel: 1, snapshotStagingPath: '', From 6c7c50424e6e674c6a8b06441ca987ae69fab89d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 18:13:26 +0530 Subject: [PATCH 39/59] chore(deps): update dockerfile dependencies (#2265) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | golang | stage | digest | `9f68de8` -> `c694a4d` | | lukemathwalker/cargo-chef | stage | digest | `9ba204a` -> `75f772f` | | [node](https://redirect.github.com/nodejs/node) | final | digest | `dc8ba2f` -> `b64ced2` | --- ### Configuration 📅 **Schedule**: Branch creation - "after 5pm on monday" in timezone Etc/UTC, Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR is behind base branch, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://redirect.github.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/PeerDB-io/peerdb). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- stacks/flow.Dockerfile | 2 +- stacks/peerdb-server.Dockerfile | 2 +- stacks/peerdb-ui.Dockerfile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/stacks/flow.Dockerfile b/stacks/flow.Dockerfile index 13fc5b0895..2c6f375d0e 100644 --- a/stacks/flow.Dockerfile +++ b/stacks/flow.Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1.11@sha256:10c699f1b6c8bdc8f6b4ce8974855dd8542f1768c26eb240237b8f1c9c6c9976 -FROM golang:1.23-alpine@sha256:9f68de83bef9e75cda99597d51778f4f5776ab8d9374e1094a3cd724401094c3 AS builder +FROM golang:1.23-alpine@sha256:c694a4d291a13a9f9d94933395673494fc2cc9d4777b85df3a7e70b3492d3574 AS builder RUN apk add --no-cache gcc geos-dev musl-dev WORKDIR /root/flow diff --git a/stacks/peerdb-server.Dockerfile b/stacks/peerdb-server.Dockerfile index 3e9db5240d..50c69f8076 100644 --- a/stacks/peerdb-server.Dockerfile +++ b/stacks/peerdb-server.Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1@sha256:865e5dd094beca432e8c0a1d5e1c465db5f998dca4e439981029b3b81fb39ed5 -FROM lukemathwalker/cargo-chef:latest-rust-alpine3.20@sha256:9ba204a79235804a3a2f41467b09e499daad8bd637c72449ba30ada4070526ff as chef +FROM lukemathwalker/cargo-chef:latest-rust-alpine3.20@sha256:75f772fe2d870acb77ffdb2206810cd694a6720263f94c74fcc75080963dbff5 as chef WORKDIR /root FROM chef as planner diff --git a/stacks/peerdb-ui.Dockerfile b/stacks/peerdb-ui.Dockerfile index def0aad72e..42cedca118 100644 --- a/stacks/peerdb-ui.Dockerfile +++ b/stacks/peerdb-ui.Dockerfile @@ -1,7 +1,7 @@ # syntax=docker/dockerfile:1.11@sha256:10c699f1b6c8bdc8f6b4ce8974855dd8542f1768c26eb240237b8f1c9c6c9976 # Base stage -FROM node:22-alpine@sha256:dc8ba2f61dd86c44e43eb25a7812ad03c5b1b224a19fc6f77e1eb9e5669f0b82 AS base +FROM node:22-alpine@sha256:b64ced2e7cd0a4816699fe308ce6e8a08ccba463c757c00c14cd372e3d2c763e AS base ENV NPM_CONFIG_UPDATE_NOTIFIER=false RUN apk add --no-cache openssl && \ mkdir /app && \ From b6e95aa3348174d14edb97ca4a1735b171d71ebf Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Tue, 19 Nov 2024 18:19:31 +0530 Subject: [PATCH 40/59] enable automerge for renovate --- renovate.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/renovate.json b/renovate.json index e053c6ed54..dead092799 100644 --- a/renovate.json +++ b/renovate.json @@ -16,7 +16,8 @@ ] } ], - "separateMajorMinor": false + "separateMajorMinor": false, + "automerge": true }, { "matchPackageNames": ["mysql_async"], From 140edc39fc85a0544afee0fb28530ce384905c48 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 12:58:32 +0000 Subject: [PATCH 41/59] chore(deps): update docker-compose dependencies (#2264) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Update | Change | |---|---|---| | ghcr.io/peerdb-io/flow-api | digest | `a7c7d1c` -> `e1800e4` | | ghcr.io/peerdb-io/flow-snapshot-worker | digest | `9b6db50` -> `995d426` | | ghcr.io/peerdb-io/flow-worker | digest | `3b4da65` -> `66afae0` | | ghcr.io/peerdb-io/peerdb-server | digest | `dfb652e` -> `9abc818` | | ghcr.io/peerdb-io/peerdb-ui | digest | `cb4d1db` -> `d6261dd` | | postgres | digest | `d388be1` -> `0d96245` | --- ### Configuration 📅 **Schedule**: Branch creation - "after 5pm on monday" in timezone Etc/UTC, Automerge - At any time (no schedule defined). 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://redirect.github.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/PeerDB-io/peerdb). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose-dev.yml | 2 +- docker-compose.yml | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 7309472d75..9db08bbda4 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -39,7 +39,7 @@ x-flow-worker-env: &flow-worker-env services: catalog: container_name: catalog - image: postgres:17-alpine@sha256:d388be15cfb665c723da47cccdc7ea5c003ed71f700c5419bbd075033227ce1f + image: postgres:17-alpine@sha256:0d9624535618a135c5453258fd629f4963390338b11aaffb92292c12df3a6c17 command: -c config_file=/etc/postgresql.conf ports: - 9901:5432 diff --git a/docker-compose.yml b/docker-compose.yml index c03c9993de..4c59bc8896 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -32,7 +32,7 @@ x-flow-worker-env: &flow-worker-env services: catalog: container_name: catalog - image: postgres:17-alpine@sha256:d388be15cfb665c723da47cccdc7ea5c003ed71f700c5419bbd075033227ce1f + image: postgres:17-alpine@sha256:0d9624535618a135c5453258fd629f4963390338b11aaffb92292c12df3a6c17 command: -c config_file=/etc/postgresql.conf restart: unless-stopped ports: @@ -112,7 +112,7 @@ services: flow-api: container_name: flow_api - image: ghcr.io/peerdb-io/flow-api:latest-dev@sha256:a7c7d1cb12f618a747233556820df114acc4d2e737e487bdfdbb0051720710c9 + image: ghcr.io/peerdb-io/flow-api:latest-dev@sha256:e1800e45df01c20c654628f106d06b3e61fddaa71930cb044bb8a119f574eb69 restart: unless-stopped ports: - 8112:8112 @@ -128,7 +128,7 @@ services: flow-snapshot-worker: container_name: flow-snapshot-worker - image: ghcr.io/peerdb-io/flow-snapshot-worker:latest-dev@sha256:9b6db5039e4f73f7d205a40400683ff9ca242dbb2eee405a2e9b056e947c8666 + image: ghcr.io/peerdb-io/flow-snapshot-worker:latest-dev@sha256:995d426604275f14c59bbc198d382825832cf4f69b3f05083d746b78b28952ca restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -138,7 +138,7 @@ services: flow-worker: container_name: flow-worker - image: ghcr.io/peerdb-io/flow-worker:latest-dev@sha256:3b4da6550deacd638e99592b65d9f61191ed020b8268efa52297c3101ab37c16 + image: ghcr.io/peerdb-io/flow-worker:latest-dev@sha256:66afae04c334af2a7cdb791f8f5a025a287b88346a6f72fdbb62156a898f62f3 restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -151,7 +151,7 @@ services: peerdb: container_name: peerdb-server stop_signal: SIGINT - image: ghcr.io/peerdb-io/peerdb-server:latest-dev@sha256:dfb652eebb410198f28e720d04d17c4de0698581d214ce99337ee1efc0874ba4 + image: ghcr.io/peerdb-io/peerdb-server:latest-dev@sha256:9abc8184f8104599105cefc8f14c708cb9283e99bf904b860258a0d303bebea6 restart: unless-stopped environment: <<: *catalog-config @@ -167,7 +167,7 @@ services: peerdb-ui: container_name: peerdb-ui - image: ghcr.io/peerdb-io/peerdb-ui:latest-dev@sha256:cb4d1dbd167c6bdd44e795a33b69ab5001932c7cbd8ac02126b79331022c17a0 + image: ghcr.io/peerdb-io/peerdb-ui:latest-dev@sha256:d6261dd51d3f15d2a47fd690e19c87ffa2d59aeac3d3e950781fca56ab39f95c restart: unless-stopped ports: - 3000:3000 From e10e54fa0bc338b1f48bfd0beabf96ac80061ee9 Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Tue, 19 Nov 2024 21:41:38 +0530 Subject: [PATCH 42/59] chore: replace debezium in ci and test on multiple pg versions (#2269) --- .github/workflows/ci.yml | 5 +++-- .github/workflows/flow.yml | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4d98e8736d..5dbbb4ee97 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,18 +5,19 @@ on: branches: [main, release/*] pull_request: branches: [main, release/*] - paths: [nexus/**, protos/**] + paths: [nexus/**, protos/**, .github/workflows/ci.yml] jobs: build: strategy: matrix: runner: [ubicloud-standard-2-ubuntu-2204-arm] + postgres-version: [13, 14, 15, 16, 17] runs-on: ${{ matrix.runner }} timeout-minutes: 30 services: catalog_peer: - image: debezium/postgres:14-alpine + image: postgres:${{ matrix.postgres-version }}-alpine ports: - 7132:5432 env: diff --git a/.github/workflows/flow.yml b/.github/workflows/flow.yml index 2673bda3f8..d3168e96d8 100644 --- a/.github/workflows/flow.yml +++ b/.github/workflows/flow.yml @@ -11,11 +11,12 @@ jobs: strategy: matrix: runner: [ubicloud-standard-16-ubuntu-2204-arm] + postgres-version: [15, 16, 17] runs-on: ${{ matrix.runner }} timeout-minutes: 30 services: catalog: - image: imresamu/postgis:15-3.4-alpine + image: imresamu/postgis:${{ matrix.postgres-version }}-3.5-alpine ports: - 5432:5432 env: From 172d65297546379cc082dc55c30d02587e64ffdc Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Tue, 19 Nov 2024 21:48:21 +0530 Subject: [PATCH 43/59] renovate: disable major dependencies for next js --- renovate.json | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/renovate.json b/renovate.json index dead092799..b9a8ffef6a 100644 --- a/renovate.json +++ b/renovate.json @@ -23,6 +23,12 @@ "matchPackageNames": ["mysql_async"], "matchManagers": ["cargo"], "enabled": false + }, + { + "matchPackageNames": ["next"], + "matchManagers": ["npm"], + "matchUpdateTypes": ["major"], + "enabled": false } ], "vulnerabilityAlerts": { From 1e5f10f3a16d79312f03be5e65e3bc1be145f398 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 16:26:08 +0000 Subject: [PATCH 44/59] chore(deps): pin dependencies (#2267) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [actions/checkout](https://redirect.github.com/actions/checkout) | action | pinDigest | -> `11bd719` | | [bitnami/minio](https://redirect.github.com/bitnami/containers) ([source](https://redirect.github.com/bitnami/containers/tree/HEAD/bitnami/minio)) | service | pinDigest | -> `9f2d9c4` | | [bufbuild/buf-action](https://redirect.github.com/bufbuild/buf-action) | action | pinDigest | -> `3fb7035` | | [depot/bake-action](https://redirect.github.com/depot/bake-action) | action | pinDigest | -> `143e50b` | | [depot/setup-action](https://redirect.github.com/depot/setup-action) | action | pinDigest | -> `b0b1ea4` | | [docker/login-action](https://redirect.github.com/docker/login-action) | action | pinDigest | -> `9780b0c` | | elasticsearch | service | minor | `8.13.0` -> `8.16.0` | | [github/codeql-action](https://redirect.github.com/github/codeql-action) | action | pinDigest | -> `ea9e4e3` | | [golangci/golangci-lint-action](https://redirect.github.com/golangci/golangci-lint-action) | action | pinDigest | -> `971e284` | | [jsdaniell/create-json](https://redirect.github.com/jsdaniell/create-json) | action | pinDigest | -> `b8e77fa` | | [redpanda-data/github-action](https://redirect.github.com/redpanda-data/github-action) | action | pinDigest | -> `c68af8e` | | [temporalio/setup-temporal](https://redirect.github.com/temporalio/setup-temporal) | action | pinDigest | -> `1059a50` | | [ubicloud/cache](https://redirect.github.com/ubicloud/cache) | action | pinDigest | -> `0a97811` | | [ubicloud/rust-cache](https://redirect.github.com/ubicloud/rust-cache) | action | pinDigest | -> `69587b2` | | [ubicloud/setup-go](https://redirect.github.com/ubicloud/setup-go) | action | pinDigest | -> `35680fe` | | [wearerequired/lint-action](https://redirect.github.com/wearerequired/lint-action) | action | pinDigest | -> `548d8a7` | --- ### Configuration 📅 **Schedule**: Branch creation - "after 5pm on monday" in timezone Etc/UTC, Automerge - At any time (no schedule defined). 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR is behind base branch, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://redirect.github.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/PeerDB-io/peerdb). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- .github/actions/genprotos/action.yml | 6 +++--- .github/workflows/ci.yml | 8 ++++---- .github/workflows/cleanup.yml | 12 ++++++------ .github/workflows/codeql-analysis.yml | 6 +++--- .github/workflows/customer-docker.yml | 8 ++++---- .github/workflows/dev-docker.yml | 8 ++++---- .github/workflows/flow-api-client.yml | 2 +- .github/workflows/flow.yml | 24 ++++++++++++------------ .github/workflows/golang-lint.yml | 6 +++--- .github/workflows/rust-lint.yml | 2 +- .github/workflows/stable-docker.yml | 8 ++++---- .github/workflows/ui-build.yml | 2 +- .github/workflows/ui-lint.yml | 4 ++-- 13 files changed, 48 insertions(+), 48 deletions(-) diff --git a/.github/actions/genprotos/action.yml b/.github/actions/genprotos/action.yml index 84bc29d001..84dfd540f0 100644 --- a/.github/actions/genprotos/action.yml +++ b/.github/actions/genprotos/action.yml @@ -3,10 +3,10 @@ description: 'Install buf with local plugins, generate protos and cache' runs: using: "composite" steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: check cache id: cache - uses: ubicloud/cache@v4 + uses: ubicloud/cache@0a97811d53629b143a56b3c2b1f729fd11719ef7 # v4 with: path: | ./flow/generated/protos @@ -15,7 +15,7 @@ runs: key: ${{ runner.os }}-build-genprotos-${{ hashFiles('buf.gen.yaml', './protos/peers.proto', './protos/flow.proto', './protos/route.proto') }} - if: steps.cache.outputs.cache-hit != 'true' - uses: bufbuild/buf-action@v1 + uses: bufbuild/buf-action@3fb70352251376e958c4c2c92c3818de82a71c2b # v1 with: setup_only: true github_token: ${{ github.token }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5dbbb4ee97..5c86636ae9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,7 +30,7 @@ jobs: --health-timeout 5s --health-retries 5 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos @@ -42,7 +42,7 @@ jobs: - name: setup gcp service account id: gcp-service-account - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "bq_service_account.json" json: ${{ secrets.GCP_GH_CI_PKEY }} @@ -50,13 +50,13 @@ jobs: - name: setup snowflake credentials id: sf-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "snowflake_creds.json" json: ${{ secrets.SNOWFLAKE_GH_CI_PKEY }} dir: "nexus/server/tests/assets/" - - uses: ubicloud/rust-cache@v2 + - uses: ubicloud/rust-cache@69587b2b3f26e8938580c44a643d265ed12f3119 # v2 with: workspaces: nexus diff --git a/.github/workflows/cleanup.yml b/.github/workflows/cleanup.yml index 5897eae7fd..9471872f6b 100644 --- a/.github/workflows/cleanup.yml +++ b/.github/workflows/cleanup.yml @@ -10,9 +10,9 @@ jobs: timeout-minutes: 60 steps: - name: checkout sources - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - - uses: ubicloud/setup-go@v5 + - uses: ubicloud/setup-go@35680fe0723d4a9309d4b1ac1c67e0d46eac5f24 # v5 with: go-version: '1.23.0' cache-dependency-path: e2e_cleanup/go.sum @@ -24,28 +24,28 @@ jobs: - name: setup gcp service account id: gcp-service-account - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "bq_service_account.json" json: ${{ secrets.GCP_GH_CI_PKEY }} - name: setup snowflake credentials id: sf-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "snowflake_creds.json" json: ${{ secrets.SNOWFLAKE_GH_CI_PKEY }} - name: setup S3 credentials id: s3-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "s3_creds.json" json: ${{ secrets.S3_CREDS }} - name: setup GCS credentials id: gcs-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "gcs_creds.json" json: ${{ secrets.GCS_CREDS }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 5de1d92c40..303066f119 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -34,7 +34,7 @@ jobs: build-mode: none steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos @@ -47,12 +47,12 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@ea9e4e37992a54ee68a9622e985e60c8e8f12d9f # v3 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@ea9e4e37992a54ee68a9622e985e60c8e8f12d9f # v3 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/customer-docker.yml b/.github/workflows/customer-docker.yml index 8278ec3d27..67145512af 100644 --- a/.github/workflows/customer-docker.yml +++ b/.github/workflows/customer-docker.yml @@ -18,15 +18,15 @@ jobs: contents: read packages: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos - - uses: depot/setup-action@v1 + - uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1 - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3 with: registry: ghcr.io username: ${{github.actor}} @@ -42,7 +42,7 @@ jobs: echo "branch=$(echo $GITHUB_REF | sed -e 's/.*customer-//')" >> $GITHUB_OUTPUT - name: Build (optionally publish) PeerDB Images - uses: depot/bake-action@v1 + uses: depot/bake-action@143e50b965398f1f5dc8463be7dde6f62b9e9c21 # v1 with: token: ${{ secrets.DEPOT_TOKEN }} files: ./docker-bake.hcl diff --git a/.github/workflows/dev-docker.yml b/.github/workflows/dev-docker.yml index 6011ec4ab4..275ad28b77 100644 --- a/.github/workflows/dev-docker.yml +++ b/.github/workflows/dev-docker.yml @@ -17,15 +17,15 @@ jobs: contents: read packages: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos - - uses: depot/setup-action@v1 + - uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1 - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3 with: registry: ghcr.io username: ${{github.actor}} @@ -36,7 +36,7 @@ jobs: run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - name: Build (optionally publish) PeerDB Images - uses: depot/bake-action@v1 + uses: depot/bake-action@143e50b965398f1f5dc8463be7dde6f62b9e9c21 # v1 with: token: ${{ secrets.DEPOT_TOKEN }} files: ./docker-bake.hcl diff --git a/.github/workflows/flow-api-client.yml b/.github/workflows/flow-api-client.yml index 046b377db7..5e373b2d66 100644 --- a/.github/workflows/flow-api-client.yml +++ b/.github/workflows/flow-api-client.yml @@ -9,7 +9,7 @@ jobs: permissions: contents: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos diff --git a/.github/workflows/flow.yml b/.github/workflows/flow.yml index d3168e96d8..e794512a04 100644 --- a/.github/workflows/flow.yml +++ b/.github/workflows/flow.yml @@ -25,7 +25,7 @@ jobs: POSTGRES_DB: postgres POSTGRES_INITDB_ARGS: --locale=C.UTF-8 elasticsearch: - image: elasticsearch:8.13.0 + image: elasticsearch:8.16.0@sha256:a411f7c17549209c5839b69f929de00bd91f1e2dcf08b65d5f41b122eae17f5e ports: - 9200:9200 env: @@ -33,7 +33,7 @@ jobs: xpack.security.enabled: false xpack.security.enrollment.enabled: false minio: - image: bitnami/minio:2024.11.7 + image: bitnami/minio:2024.11.7@sha256:9f2d9c45006a2ada1bc485e1393291ce7d54ae1a46260dd491381a4eb8b2fd47 ports: - 9999:9999 env: @@ -44,12 +44,12 @@ jobs: MINIO_DEFAULT_BUCKETS: peerdb steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos - - uses: ubicloud/setup-go@v5 + - uses: ubicloud/setup-go@35680fe0723d4a9309d4b1ac1c67e0d46eac5f24 # v5 with: go-version: '1.23.0' cache-dependency-path: flow/go.sum @@ -64,35 +64,35 @@ jobs: - name: setup gcp service account id: gcp-service-account - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "bq_service_account.json" json: ${{ secrets.GCP_GH_CI_PKEY }} - name: setup snowflake credentials id: sf-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "snowflake_creds.json" json: ${{ secrets.SNOWFLAKE_GH_CI_PKEY }} - name: setup S3 credentials id: s3-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "s3_creds.json" json: ${{ secrets.S3_CREDS }} - name: setup GCS credentials id: gcs-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "gcs_creds.json" json: ${{ secrets.GCS_CREDS }} - name: setup Eventhubs credentials id: eventhubs-credentials - uses: jsdaniell/create-json@v1.2.3 + uses: jsdaniell/create-json@b8e77fa01397ca39cc4a6198cc29a3be5481afef # v1.2.3 with: name: "eh_creds.json" json: ${{ secrets.EH_CREDS }} @@ -110,11 +110,11 @@ jobs: PGPASSWORD: postgres - name: start redpanda - uses: redpanda-data/github-action@v0.1.4 + uses: redpanda-data/github-action@c68af8edc420b987e871615ca40b3a5dd70eb5b1 # v0.1.4 with: version: "latest" - - uses: ubicloud/cache@v4 + - uses: ubicloud/cache@0a97811d53629b143a56b3c2b1f729fd11719ef7 # v4 id: cache-clickhouse with: path: ./clickhouse @@ -130,7 +130,7 @@ jobs: ./clickhouse server & - name: Install Temporal CLI - uses: temporalio/setup-temporal@v0 + uses: temporalio/setup-temporal@1059a504f87e7fa2f385e3fa40d1aa7e62f1c6ca # v0 - name: run tests run: | diff --git a/.github/workflows/golang-lint.yml b/.github/workflows/golang-lint.yml index aadcfa7a57..2289eeae17 100644 --- a/.github/workflows/golang-lint.yml +++ b/.github/workflows/golang-lint.yml @@ -13,7 +13,7 @@ jobs: name: lint runs-on: [ubicloud-standard-4-ubuntu-2204-arm] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos @@ -22,12 +22,12 @@ jobs: run: | sudo apt-get update sudo apt-get install libgeos-dev - - uses: ubicloud/setup-go@v5 + - uses: ubicloud/setup-go@35680fe0723d4a9309d4b1ac1c67e0d46eac5f24 # v5 with: go-version: '1.23.0' cache: false - name: golangci-lint - uses: golangci/golangci-lint-action@v6 + uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6 with: version: v1.61 working-directory: ./flow diff --git a/.github/workflows/rust-lint.yml b/.github/workflows/rust-lint.yml index b9e43c1a24..c4e2782f1c 100644 --- a/.github/workflows/rust-lint.yml +++ b/.github/workflows/rust-lint.yml @@ -16,7 +16,7 @@ jobs: runner: [ubicloud-standard-4-ubuntu-2204-arm] runs-on: ${{ matrix.runner }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos diff --git a/.github/workflows/stable-docker.yml b/.github/workflows/stable-docker.yml index 9eabbcfb28..0056a7d9c3 100644 --- a/.github/workflows/stable-docker.yml +++ b/.github/workflows/stable-docker.yml @@ -15,22 +15,22 @@ jobs: contents: read packages: write steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos - - uses: depot/setup-action@v1 + - uses: depot/setup-action@b0b1ea4f69e92ebf5dea3f8713a1b0c37b2126a5 # v1 - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3 with: registry: ghcr.io username: ${{github.actor}} password: ${{secrets.GITHUB_TOKEN}} - name: Build (optionally publish) PeerDB Images - uses: depot/bake-action@v1 + uses: depot/bake-action@143e50b965398f1f5dc8463be7dde6f62b9e9c21 # v1 with: token: ${{ secrets.DEPOT_TOKEN }} files: ./docker-bake.hcl diff --git a/.github/workflows/ui-build.yml b/.github/workflows/ui-build.yml index feea1ffda5..7915445feb 100644 --- a/.github/workflows/ui-build.yml +++ b/.github/workflows/ui-build.yml @@ -16,7 +16,7 @@ jobs: runs-on: ${{ matrix.runner }} steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos diff --git a/.github/workflows/ui-lint.yml b/.github/workflows/ui-lint.yml index 31e2340ffb..6fb1f2b827 100644 --- a/.github/workflows/ui-lint.yml +++ b/.github/workflows/ui-lint.yml @@ -20,7 +20,7 @@ jobs: runs-on: ${{ matrix.runner }} steps: - name: checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - name: generate or hydrate protos uses: ./.github/actions/genprotos @@ -30,7 +30,7 @@ jobs: run: npm ci - name: lint - uses: wearerequired/lint-action@v2 + uses: wearerequired/lint-action@548d8a7c4b04d3553d32ed5b6e91eb171e10e7bb # v2 with: eslint: true prettier: true From 51d58ce2e344e5ddf57ce5f7ec3b936dc51f010d Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Tue, 19 Nov 2024 22:00:19 +0530 Subject: [PATCH 45/59] feat: use latest-stable in docker-compose.yml (#2270) renovate will automatically pin the latest stable weekly to ensure that whenever docker compose up is run again after updating the repo, it will use the latest sha --- docker-compose.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 4c59bc8896..cf1ec3efe0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -112,7 +112,7 @@ services: flow-api: container_name: flow_api - image: ghcr.io/peerdb-io/flow-api:latest-dev@sha256:e1800e45df01c20c654628f106d06b3e61fddaa71930cb044bb8a119f574eb69 + image: ghcr.io/peerdb-io/flow-api:latest-stable restart: unless-stopped ports: - 8112:8112 @@ -128,7 +128,7 @@ services: flow-snapshot-worker: container_name: flow-snapshot-worker - image: ghcr.io/peerdb-io/flow-snapshot-worker:latest-dev@sha256:995d426604275f14c59bbc198d382825832cf4f69b3f05083d746b78b28952ca + image: ghcr.io/peerdb-io/flow-snapshot-worker:latest-stable restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -138,7 +138,7 @@ services: flow-worker: container_name: flow-worker - image: ghcr.io/peerdb-io/flow-worker:latest-dev@sha256:66afae04c334af2a7cdb791f8f5a025a287b88346a6f72fdbb62156a898f62f3 + image: ghcr.io/peerdb-io/flow-worker:latest-stable restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -151,7 +151,7 @@ services: peerdb: container_name: peerdb-server stop_signal: SIGINT - image: ghcr.io/peerdb-io/peerdb-server:latest-dev@sha256:9abc8184f8104599105cefc8f14c708cb9283e99bf904b860258a0d303bebea6 + image: ghcr.io/peerdb-io/peerdb-server:latest-stable restart: unless-stopped environment: <<: *catalog-config @@ -167,7 +167,7 @@ services: peerdb-ui: container_name: peerdb-ui - image: ghcr.io/peerdb-io/peerdb-ui:latest-dev@sha256:d6261dd51d3f15d2a47fd690e19c87ffa2d59aeac3d3e950781fca56ab39f95c + image: ghcr.io/peerdb-io/peerdb-ui:latest-stable restart: unless-stopped ports: - 3000:3000 From 61fe601a503cd7d96d58420783c6248a074df60b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 17:23:08 +0000 Subject: [PATCH 46/59] fix(deps): update npm dependencies (#2268) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Change | Age | Adoption | Passing | Confidence | Type | Update | |---|---|---|---|---|---|---|---| | @​radix-ui/react-icons | [`1.3.1` -> `1.3.2`](https://renovatebot.com/diffs/npm/@radix-ui%2freact-icons/1.3.1/1.3.2) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@radix-ui%2freact-icons/1.3.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@radix-ui%2freact-icons/1.3.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@radix-ui%2freact-icons/1.3.1/1.3.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@radix-ui%2freact-icons/1.3.1/1.3.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [@radix-ui/react-tooltip](https://radix-ui.com/primitives) ([source](https://redirect.github.com/radix-ui/primitives)) | [`1.1.3` -> `1.1.4`](https://renovatebot.com/diffs/npm/@radix-ui%2freact-tooltip/1.1.3/1.1.4) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@radix-ui%2freact-tooltip/1.1.4?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@radix-ui%2freact-tooltip/1.1.4?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@radix-ui%2freact-tooltip/1.1.3/1.1.4?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@radix-ui%2freact-tooltip/1.1.3/1.1.4?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [@tremor/react](https://redirect.github.com/tremorlabs/tremor) | [`3.18.3` -> `3.18.4`](https://renovatebot.com/diffs/npm/@tremor%2freact/3.18.3/3.18.4) | [![age](https://developer.mend.io/api/mc/badges/age/npm/@tremor%2freact/3.18.4?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/@tremor%2freact/3.18.4?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/@tremor%2freact/3.18.3/3.18.4?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/@tremor%2freact/3.18.3/3.18.4?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [autoprefixer](https://redirect.github.com/postcss/autoprefixer) | [`^10.4.20` -> `10.4.20`](https://renovatebot.com/diffs/npm/autoprefixer/10.4.20/10.4.20) | [![age](https://developer.mend.io/api/mc/badges/age/npm/autoprefixer/10.4.20?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/autoprefixer/10.4.20?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/autoprefixer/10.4.20/10.4.20?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/autoprefixer/10.4.20/10.4.20?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [copy-webpack-plugin](https://redirect.github.com/webpack-contrib/copy-webpack-plugin) | [`^12.0.2` -> `12.0.2`](https://renovatebot.com/diffs/npm/copy-webpack-plugin/12.0.2/12.0.2) | [![age](https://developer.mend.io/api/mc/badges/age/npm/copy-webpack-plugin/12.0.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/copy-webpack-plugin/12.0.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/copy-webpack-plugin/12.0.2/12.0.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/copy-webpack-plugin/12.0.2/12.0.2?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [eslint](https://eslint.org) ([source](https://redirect.github.com/eslint/eslint)) | [`^8.57.1` -> `8.57.1`](https://renovatebot.com/diffs/npm/eslint/8.57.1/8.57.1) | [![age](https://developer.mend.io/api/mc/badges/age/npm/eslint/8.57.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/eslint/8.57.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/eslint/8.57.1/8.57.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/eslint/8.57.1/8.57.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [eslint-config-next](https://nextjs.org/docs/app/building-your-application/configuring/eslint#eslint-config) ([source](https://redirect.github.com/vercel/next.js/tree/HEAD/packages/eslint-config-next)) | [`^14.2.14` -> `14.2.17`](https://renovatebot.com/diffs/npm/eslint-config-next/14.2.17/14.2.17) | [![age](https://developer.mend.io/api/mc/badges/age/npm/eslint-config-next/14.2.17?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/eslint-config-next/14.2.17?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/eslint-config-next/14.2.17/14.2.17?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/eslint-config-next/14.2.17/14.2.17?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [eslint-config-prettier](https://redirect.github.com/prettier/eslint-config-prettier) | [`^9.1.0` -> `9.1.0`](https://renovatebot.com/diffs/npm/eslint-config-prettier/9.1.0/9.1.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/eslint-config-prettier/9.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/eslint-config-prettier/9.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/eslint-config-prettier/9.1.0/9.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/eslint-config-prettier/9.1.0/9.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [less](http://lesscss.org) ([source](https://redirect.github.com/less/less.js)) | [`^4.2.0` -> `4.2.0`](https://renovatebot.com/diffs/npm/less/4.2.0/4.2.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/less/4.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/less/4.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/less/4.2.0/4.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/less/4.2.0/4.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [lucide-react](https://lucide.dev) ([source](https://redirect.github.com/lucide-icons/lucide/tree/HEAD/packages/lucide-react)) | [`^0.454.0` -> `^0.460.0`](https://renovatebot.com/diffs/npm/lucide-react/0.454.0/0.460.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/lucide-react/0.460.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/lucide-react/0.460.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/lucide-react/0.454.0/0.460.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/lucide-react/0.454.0/0.460.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [material-symbols](https://marella.github.io/material-symbols/demo/) ([source](https://redirect.github.com/marella/material-symbols/tree/HEAD/material-symbols)) | [`^0.26.0` -> `^0.27.0`](https://renovatebot.com/diffs/npm/material-symbols/0.26.0/0.27.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/material-symbols/0.27.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/material-symbols/0.27.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/material-symbols/0.26.0/0.27.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/material-symbols/0.26.0/0.27.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | minor | | [postcss](https://postcss.org/) ([source](https://redirect.github.com/postcss/postcss)) | [`^8.4.47` -> `8.4.47`](https://renovatebot.com/diffs/npm/postcss/8.4.47/8.4.47) | [![age](https://developer.mend.io/api/mc/badges/age/npm/postcss/8.4.47?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/postcss/8.4.47?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/postcss/8.4.47/8.4.47?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/postcss/8.4.47/8.4.47?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [prettier](https://prettier.io) ([source](https://redirect.github.com/prettier/prettier)) | [`^3.3.3` -> `3.3.3`](https://renovatebot.com/diffs/npm/prettier/3.3.3/3.3.3) | [![age](https://developer.mend.io/api/mc/badges/age/npm/prettier/3.3.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/prettier/3.3.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/prettier/3.3.3/3.3.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/prettier/3.3.3/3.3.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [prettier-plugin-organize-imports](https://redirect.github.com/simonhaenisch/prettier-plugin-organize-imports) | [`^4.1.0` -> `4.1.0`](https://renovatebot.com/diffs/npm/prettier-plugin-organize-imports/4.1.0/4.1.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/prettier-plugin-organize-imports/4.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/prettier-plugin-organize-imports/4.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/prettier-plugin-organize-imports/4.1.0/4.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/prettier-plugin-organize-imports/4.1.0/4.1.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [react-select](https://redirect.github.com/JedWatson/react-select/tree/master#readme) ([source](https://redirect.github.com/JedWatson/react-select)) | [`5.8.2` -> `5.8.3`](https://renovatebot.com/diffs/npm/react-select/5.8.2/5.8.3) | [![age](https://developer.mend.io/api/mc/badges/age/npm/react-select/5.8.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/react-select/5.8.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/react-select/5.8.2/5.8.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/react-select/5.8.2/5.8.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | dependencies | patch | | [string-width](https://redirect.github.com/sindresorhus/string-width) | [`^7.2.0` -> `7.2.0`](https://renovatebot.com/diffs/npm/string-width/7.2.0/7.2.0) | [![age](https://developer.mend.io/api/mc/badges/age/npm/string-width/7.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/string-width/7.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/string-width/7.2.0/7.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/string-width/7.2.0/7.2.0?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [tailwindcss](https://tailwindcss.com) ([source](https://redirect.github.com/tailwindlabs/tailwindcss)) | [`^3.4.13` -> `3.4.14`](https://renovatebot.com/diffs/npm/tailwindcss/3.4.14/3.4.14) | [![age](https://developer.mend.io/api/mc/badges/age/npm/tailwindcss/3.4.14?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/tailwindcss/3.4.14?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/tailwindcss/3.4.14/3.4.14?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/tailwindcss/3.4.14/3.4.14?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | tailwindcss-animate | [`^1.0.7` -> `1.0.7`](https://renovatebot.com/diffs/npm/tailwindcss-animate/1.0.7/1.0.7) | [![age](https://developer.mend.io/api/mc/badges/age/npm/tailwindcss-animate/1.0.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/tailwindcss-animate/1.0.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/tailwindcss-animate/1.0.7/1.0.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/tailwindcss-animate/1.0.7/1.0.7?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [typescript](https://www.typescriptlang.org/) ([source](https://redirect.github.com/microsoft/TypeScript)) | [`^5.6.2` -> `5.6.3`](https://renovatebot.com/diffs/npm/typescript/5.6.3/5.6.3) | [![age](https://developer.mend.io/api/mc/badges/age/npm/typescript/5.6.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/typescript/5.6.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/typescript/5.6.3/5.6.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/typescript/5.6.3/5.6.3?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | | [webpack](https://redirect.github.com/webpack/webpack) | [`^5.95.0` -> `5.96.1`](https://renovatebot.com/diffs/npm/webpack/5.96.1/5.96.1) | [![age](https://developer.mend.io/api/mc/badges/age/npm/webpack/5.96.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![adoption](https://developer.mend.io/api/mc/badges/adoption/npm/webpack/5.96.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![passing](https://developer.mend.io/api/mc/badges/compatibility/npm/webpack/5.96.1/5.96.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | [![confidence](https://developer.mend.io/api/mc/badges/confidence/npm/webpack/5.96.1/5.96.1?slim=true)](https://docs.renovatebot.com/merge-confidence/) | devDependencies | pin | --- ### Release Notes
radix-ui/primitives (@​radix-ui/react-tooltip) ### [`v1.1.4`](https://redirect.github.com/radix-ui/primitives/compare/157415ed1f34c53b5afbf53a047895ed6a7f957f...d70e369c11188107319a4e11b598919251724629) [Compare Source](https://redirect.github.com/radix-ui/primitives/compare/157415ed1f34c53b5afbf53a047895ed6a7f957f...d70e369c11188107319a4e11b598919251724629)
tremorlabs/tremor (@​tremor/react) ### [`v3.18.4`](https://redirect.github.com/tremorlabs/tremor/compare/v3.18.3...v3.18.4) [Compare Source](https://redirect.github.com/tremorlabs/tremor/compare/v3.18.3...v3.18.4)
lucide-icons/lucide (lucide-react) ### [`v0.460.0`](https://redirect.github.com/lucide-icons/lucide/releases/tag/0.460.0): New icons 0.460.0 [Compare Source](https://redirect.github.com/lucide-icons/lucide/compare/0.459.0...0.460.0) #### New icons 🎨 - `toilet` ([#​2141](https://redirect.github.com/lucide-icons/lucide/issues/2141)) by [@​EthanHazel](https://redirect.github.com/EthanHazel) ### [`v0.459.0`](https://redirect.github.com/lucide-icons/lucide/releases/tag/0.459.0): New icons 0.459.0 [Compare Source](https://redirect.github.com/lucide-icons/lucide/compare/0.458.0...0.459.0) #### New icons 🎨 - `equal-approximately` ([#​2594](https://redirect.github.com/lucide-icons/lucide/issues/2594)) by [@​ksk3110](https://redirect.github.com/ksk3110) ### [`v0.458.0`](https://redirect.github.com/lucide-icons/lucide/releases/tag/0.458.0): New icons 0.458.0 [Compare Source](https://redirect.github.com/lucide-icons/lucide/compare/0.457.0...0.458.0) #### New icons 🎨 - `cloud-alert` ([#​2611](https://redirect.github.com/lucide-icons/lucide/issues/2611)) by [@​lscheibel](https://redirect.github.com/lscheibel) #### Modified Icons 🔨 - `drill` ([#​1919](https://redirect.github.com/lucide-icons/lucide/issues/1919)) by [@​jguddas](https://redirect.github.com/jguddas) #### Other Changes - feat(lucide-svelte): Aliased imports for direct imports by [@​ericfennis](https://redirect.github.com/ericfennis) in [https://github.com/lucide-icons/lucide/pull/2584](https://redirect.github.com/lucide-icons/lucide/pull/2584) **Full Changelog**: https://github.com/lucide-icons/lucide/compare/0.457.0...0.458.0 ### [`v0.457.0`](https://redirect.github.com/lucide-icons/lucide/releases/tag/0.457.0): New icons 0.457.0 [Compare Source](https://redirect.github.com/lucide-icons/lucide/compare/0.456.0...0.457.0) #### New icons 🎨 - `laptop-minimal-check` ([#​2563](https://redirect.github.com/lucide-icons/lucide/issues/2563)) by [@​jguddas](https://redirect.github.com/jguddas) #### Modified Icons 🔨 - `bath` ([#​2512](https://redirect.github.com/lucide-icons/lucide/issues/2512)) by [@​jamiemlaw](https://redirect.github.com/jamiemlaw) - `cross` ([#​2578](https://redirect.github.com/lucide-icons/lucide/issues/2578)) by [@​jguddas](https://redirect.github.com/jguddas) - `hand-platter` ([#​2326](https://redirect.github.com/lucide-icons/lucide/issues/2326)) by [@​karsa-mistmere](https://redirect.github.com/karsa-mistmere) - `hard-hat` ([#​2559](https://redirect.github.com/lucide-icons/lucide/issues/2559)) by [@​jguddas](https://redirect.github.com/jguddas) - `heading-4` ([#​2546](https://redirect.github.com/lucide-icons/lucide/issues/2546)) by [@​jguddas](https://redirect.github.com/jguddas) - `puzzle` ([#​2603](https://redirect.github.com/lucide-icons/lucide/issues/2603)) by [@​jamiemlaw](https://redirect.github.com/jamiemlaw) - `school` ([#​2598](https://redirect.github.com/lucide-icons/lucide/issues/2598)) by [@​jguddas](https://redirect.github.com/jguddas) - `vegan` ([#​2556](https://redirect.github.com/lucide-icons/lucide/issues/2556)) by [@​jguddas](https://redirect.github.com/jguddas) ### [`v0.456.0`](https://redirect.github.com/lucide-icons/lucide/releases/tag/0.456.0): Choosing import name style 0.456.0 [Compare Source](https://redirect.github.com/lucide-icons/lucide/compare/0.455.0...0.456.0) #### What's Changed - ci(pull-request): Fix generate comments for empty changes by [@​ericfennis](https://redirect.github.com/ericfennis) in [https://github.com/lucide-icons/lucide/pull/2593](https://redirect.github.com/lucide-icons/lucide/pull/2593) - feat(lucide-react, lucide-preact, lucide-react-native, lucide-solid, lucide-vue-next): Adjustable icon naming imports by [@​ericfennis](https://redirect.github.com/ericfennis) in [https://github.com/lucide-icons/lucide/pull/2328](https://redirect.github.com/lucide-icons/lucide/pull/2328) - fix(icons): changed `glass-water` icon by [@​jguddas](https://redirect.github.com/jguddas) in [https://github.com/lucide-icons/lucide/pull/2579](https://redirect.github.com/lucide-icons/lucide/pull/2579) ### Adjustable icon naming imports Customize import name styles for `lucide-react`, `lucide-vue`, `lucide-react-native`, `lucide-preact`, to manage autocompletion in your IDE. 1. **Turn off autocomplete in your IDE**: Add the following to your `settings.json` ```json { "typescript.preferences.autoImportFileExcludePatterns": [ "lucide-react", "lucide-preact", "lucide-react-native", "lucide-vue-next" ] } ``` 2. **Create a custom module declaration file**: It allows you to choose the import name style. For React: ```ts declare module "lucide-react" { // Prefixed import names export * from "lucide-react/dist/lucide-react.prefixed"; // or // Suffixed import names export * from "lucide-react/dist/lucide-react.suffixed"; } ``` For Vue: ```ts declare module "lucide-vue-next" { // Prefixed import names export * from "lucide-vue-next/dist/lucide-vue-next.prefixed"; // or // Suffixed import names export * from "lucide-vue-next/dist/lucide-vue-next.suffixed"; } ``` ### [`v0.455.0`](https://redirect.github.com/lucide-icons/lucide/releases/tag/0.455.0): New icons 0.455.0 [Compare Source](https://redirect.github.com/lucide-icons/lucide/compare/0.454.0...0.455.0) #### New icons 🎨 - `wind-arrow-down` ([#​2554](https://redirect.github.com/lucide-icons/lucide/issues/2554)) by [@​jamiemlaw](https://redirect.github.com/jamiemlaw) #### Modified Icons 🔨 - `file-music` ([#​2536](https://redirect.github.com/lucide-icons/lucide/issues/2536)) by [@​jguddas](https://redirect.github.com/jguddas) - `slice` ([#​2500](https://redirect.github.com/lucide-icons/lucide/issues/2500)) by [@​jguddas](https://redirect.github.com/jguddas) - `undo-dot` ([#​2557](https://redirect.github.com/lucide-icons/lucide/issues/2557)) by [@​jguddas](https://redirect.github.com/jguddas) - `wind` ([#​2554](https://redirect.github.com/lucide-icons/lucide/issues/2554)) by [@​jamiemlaw](https://redirect.github.com/jamiemlaw)
marella/material-symbols (material-symbols) ### [`v0.27.0`](https://redirect.github.com/marella/material-symbols/compare/v0.26.0...v0.27.0) [Compare Source](https://redirect.github.com/marella/material-symbols/compare/v0.26.0...v0.27.0)
JedWatson/react-select (react-select) ### [`v5.8.3`](https://redirect.github.com/JedWatson/react-select/releases/tag/react-select%405.8.3) [Compare Source](https://redirect.github.com/JedWatson/react-select/compare/react-select@5.8.2...react-select@5.8.3) ##### Patch Changes - [`111efad1`](https://redirect.github.com/JedWatson/react-select/commit/111efad170a11dbae96ae436251cd028e702eb72) [#​5974](https://redirect.github.com/JedWatson/react-select/pull/5974) Thanks [@​j2ghz](https://redirect.github.com/j2ghz)! - Fix types compatibility with React 19
--- ### Configuration 📅 **Schedule**: Branch creation - "after 5pm on monday" in timezone Etc/UTC, Automerge - At any time (no schedule defined). 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR is behind base branch, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://redirect.github.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/PeerDB-io/peerdb). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- ui/package-lock.json | 72 ++++++++++++++++++++++---------------------- ui/package.json | 32 ++++++++++---------- 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/ui/package-lock.json b/ui/package-lock.json index 5d64807ea2..f233b02b90 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -29,8 +29,8 @@ "@types/react": "^18.3.11", "@types/react-dom": "^18.3.0", "classnames": "^2.5.1", - "lucide-react": "^0.454.0", - "material-symbols": "^0.26.0", + "lucide-react": "^0.460.0", + "material-symbols": "^0.27.0", "moment": "^2.30.1", "moment-timezone": "^0.5.46", "next": "^14.2.14", @@ -48,20 +48,20 @@ "zod": "^3.23.8" }, "devDependencies": { - "autoprefixer": "^10.4.20", - "copy-webpack-plugin": "^12.0.2", - "eslint": "^8.57.1", - "eslint-config-next": "^14.2.14", - "eslint-config-prettier": "^9.1.0", - "less": "^4.2.0", - "postcss": "^8.4.47", - "prettier": "^3.3.3", - "prettier-plugin-organize-imports": "^4.1.0", - "string-width": "^7.2.0", - "tailwindcss": "^3.4.13", - "tailwindcss-animate": "^1.0.7", - "typescript": "^5.6.2", - "webpack": "^5.95.0" + "autoprefixer": "10.4.20", + "copy-webpack-plugin": "12.0.2", + "eslint": "8.57.1", + "eslint-config-next": "14.2.17", + "eslint-config-prettier": "9.1.0", + "less": "4.2.0", + "postcss": "8.4.47", + "prettier": "3.3.3", + "prettier-plugin-organize-imports": "4.1.0", + "string-width": "7.2.0", + "tailwindcss": "3.4.14", + "tailwindcss-animate": "1.0.7", + "typescript": "5.6.3", + "webpack": "5.96.1" } }, "node_modules/@alloc/quick-lru": { @@ -1337,12 +1337,12 @@ } }, "node_modules/@radix-ui/react-icons": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@radix-ui/react-icons/-/react-icons-1.3.1.tgz", - "integrity": "sha512-QvYompk0X+8Yjlo/Fv4McrzxohDdM5GgLHyQcPpcsPvlOSXCGFjdbuyGL5dzRbg0GpknAjQJJZzdiRK7iWVuFQ==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@radix-ui/react-icons/-/react-icons-1.3.2.tgz", + "integrity": "sha512-fyQIhGDhzfc9pK2kH6Pl9c4BDJGfMkPqkyIgYDthyNYoNg3wVhoJMMh19WS4Up/1KMPFVpNsT2q3WmXn2N1m6g==", "license": "MIT", "peerDependencies": { - "react": "^16.x || ^17.x || ^18.x || ^19.x" + "react": "^16.x || ^17.x || ^18.x || ^19.0.0 || ^19.0.0-rc" } }, "node_modules/@radix-ui/react-id": { @@ -1879,9 +1879,9 @@ } }, "node_modules/@radix-ui/react-tooltip": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.3.tgz", - "integrity": "sha512-Z4w1FIS0BqVFI2c1jZvb/uDVJijJjJ2ZMuPV81oVgTZ7g3BZxobplnMVvXtFWgtozdvYJ+MFWtwkM5S2HnAong==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.1.4.tgz", + "integrity": "sha512-QpObUH/ZlpaO4YgHSaYzrLO2VuO+ZBFFgGzjMUPwtiYnAzzNNDPJeEGRrT7qNOrWm/Jr08M1vlp+vTHtnSQ0Uw==", "license": "MIT", "dependencies": { "@radix-ui/primitive": "1.1.0", @@ -2129,9 +2129,9 @@ } }, "node_modules/@tremor/react": { - "version": "3.18.3", - "resolved": "https://registry.npmjs.org/@tremor/react/-/react-3.18.3.tgz", - "integrity": "sha512-7QyGE2W9f2FpwH24TKy3/mqBgLl4sHZeQcXP3rxXZ8W2AUq7AVaG1+vIT3xXxISrkh7zknjWlZsuhoF8NWNVDw==", + "version": "3.18.4", + "resolved": "https://registry.npmjs.org/@tremor/react/-/react-3.18.4.tgz", + "integrity": "sha512-HDjYbuzxQIZvosGzB1j1nCSuLLRdKRHPfRmoGUyI57cesbThFzWuFHz07Sio9Vhk/ew3TKJUZPy+ljfZ3u1M4g==", "license": "Apache 2.0", "dependencies": { "@floating-ui/react": "^0.19.2", @@ -2140,7 +2140,7 @@ "date-fns": "^3.6.0", "react-day-picker": "^8.10.1", "react-transition-state": "^2.1.2", - "recharts": "^2.12.7", + "recharts": "^2.13.3", "tailwind-merge": "^2.5.2" }, "peerDependencies": { @@ -6001,9 +6001,9 @@ } }, "node_modules/lucide-react": { - "version": "0.454.0", - "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.454.0.tgz", - "integrity": "sha512-hw7zMDwykCLnEzgncEEjHeA6+45aeEzRYuKHuyRSOPkhko+J3ySGjGIzu+mmMfDFG1vazHepMaYFYHbTFAZAAQ==", + "version": "0.460.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.460.0.tgz", + "integrity": "sha512-BVtq/DykVeIvRTJvRAgCsOwaGL8Un3Bxh8MbDxMhEWlZay3T4IpEKDEpwt5KZ0KJMHzgm6jrltxlT5eXOWXDHg==", "license": "ISC", "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0-rc" @@ -6036,9 +6036,9 @@ } }, "node_modules/material-symbols": { - "version": "0.26.0", - "resolved": "https://registry.npmjs.org/material-symbols/-/material-symbols-0.26.0.tgz", - "integrity": "sha512-7WefpjuZLsXjE4MHlbi7QVca9y6M45YJws8oC3l7UITfpGDxVwEddQaaqYqtGMGVRFeBw/dIxmlazR5eeZH0rg==", + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/material-symbols/-/material-symbols-0.27.0.tgz", + "integrity": "sha512-nRHpnw2Cz7eNl6GptgHHhkjauL0zvkYsuiqy1HBifOYCY4fdbZ/PwtdZN4RNmwA+9jQPoymvlArVPPX5nYTdZg==", "license": "Apache-2.0" }, "node_modules/memoize-one": { @@ -7165,9 +7165,9 @@ } }, "node_modules/react-select": { - "version": "5.8.2", - "resolved": "https://registry.npmjs.org/react-select/-/react-select-5.8.2.tgz", - "integrity": "sha512-a/LkOckoI62710gGPQSQqUp7A10fGbH/ya3/IR49qaq3XoBvwymgD5mJgtiHxBDsutyEQfdKNycWVh8Cg8UCjw==", + "version": "5.8.3", + "resolved": "https://registry.npmjs.org/react-select/-/react-select-5.8.3.tgz", + "integrity": "sha512-lVswnIq8/iTj1db7XCG74M/3fbGB6ZaluCzvwPGT5ZOjCdL/k0CLWhEK0vCBLuU5bHTEf6Gj8jtSvi+3v+tO1w==", "license": "MIT", "dependencies": { "@babel/runtime": "^7.12.0", diff --git a/ui/package.json b/ui/package.json index 3f42598386..d755ce8640 100644 --- a/ui/package.json +++ b/ui/package.json @@ -31,8 +31,8 @@ "@types/react": "^18.3.11", "@types/react-dom": "^18.3.0", "classnames": "^2.5.1", - "lucide-react": "^0.454.0", - "material-symbols": "^0.26.0", + "lucide-react": "^0.460.0", + "material-symbols": "^0.27.0", "moment": "^2.30.1", "moment-timezone": "^0.5.46", "next": "^14.2.14", @@ -50,19 +50,19 @@ "zod": "^3.23.8" }, "devDependencies": { - "autoprefixer": "^10.4.20", - "copy-webpack-plugin": "^12.0.2", - "eslint": "^8.57.1", - "eslint-config-next": "^14.2.14", - "eslint-config-prettier": "^9.1.0", - "less": "^4.2.0", - "postcss": "^8.4.47", - "prettier": "^3.3.3", - "prettier-plugin-organize-imports": "^4.1.0", - "string-width": "^7.2.0", - "tailwindcss": "^3.4.13", - "tailwindcss-animate": "^1.0.7", - "typescript": "^5.6.2", - "webpack": "^5.95.0" + "autoprefixer": "10.4.20", + "copy-webpack-plugin": "12.0.2", + "eslint": "8.57.1", + "eslint-config-next": "14.2.17", + "eslint-config-prettier": "9.1.0", + "less": "4.2.0", + "postcss": "8.4.47", + "prettier": "3.3.3", + "prettier-plugin-organize-imports": "4.1.0", + "string-width": "7.2.0", + "tailwindcss": "3.4.14", + "tailwindcss-animate": "1.0.7", + "typescript": "5.6.3", + "webpack": "5.96.1" } } From f57dd0c4ddfd02451f9985a3cb2852a646e55b96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 20 Nov 2024 00:19:16 +0000 Subject: [PATCH 47/59] Replace SyncGauge with upstream otel sdk gauges (#2272) Upstream added gauges in May: https://github.com/open-telemetry/opentelemetry-go/pull/5304 --- flow/cmd/worker.go | 5 +- flow/connectors/postgres/postgres.go | 33 ++++--- flow/otel_metrics/otel_manager.go | 4 +- flow/otel_metrics/peerdb_gauges/gauges.go | 10 ++- flow/otel_metrics/sync_gauges.go | 104 ++-------------------- 5 files changed, 38 insertions(+), 118 deletions(-) diff --git a/flow/cmd/worker.go b/flow/cmd/worker.go index 5c16376a12..cca0202ec7 100644 --- a/flow/cmd/worker.go +++ b/flow/cmd/worker.go @@ -10,6 +10,7 @@ import ( "runtime" "github.com/grafana/pyroscope-go" + "go.opentelemetry.io/otel/metric" "go.temporal.io/sdk/client" temporalotel "go.temporal.io/sdk/contrib/opentelemetry" "go.temporal.io/sdk/worker" @@ -157,8 +158,8 @@ func WorkerSetup(opts *WorkerSetupOptions) (*workerSetupResponse, error) { otelManager = &otel_metrics.OtelManager{ MetricsProvider: metricsProvider, Meter: metricsProvider.Meter("io.peerdb.flow-worker"), - Float64GaugesCache: make(map[string]*otel_metrics.Float64SyncGauge), - Int64GaugesCache: make(map[string]*otel_metrics.Int64SyncGauge), + Float64GaugesCache: make(map[string]metric.Float64Gauge), + Int64GaugesCache: make(map[string]metric.Int64Gauge), } cleanupOtelManagerFunc = func() { shutDownErr := otelManager.MetricsProvider.Shutdown(context.Background()) diff --git a/flow/connectors/postgres/postgres.go b/flow/connectors/postgres/postgres.go index 593a94aa49..ae0dbea52d 100644 --- a/flow/connectors/postgres/postgres.go +++ b/flow/connectors/postgres/postgres.go @@ -17,6 +17,7 @@ import ( "github.com/jackc/pgx/v5/pgtype" "github.com/jackc/pgx/v5/pgxpool" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" "go.temporal.io/sdk/log" "go.temporal.io/sdk/temporal" @@ -1214,11 +1215,12 @@ func (c *PostgresConnector) HandleSlotInfo( logger.Info(fmt.Sprintf("Checking %s lag for %s", alertKeys.SlotName, alertKeys.PeerName), slog.Float64("LagInMB", float64(slotInfo[0].LagInMb))) alerter.AlertIfSlotLag(ctx, alertKeys, slotInfo[0]) - slotMetricGauges.SlotLagGauge.Set(float64(slotInfo[0].LagInMb), attribute.NewSet( + slotMetricGauges.SlotLagGauge.Record(ctx, float64(slotInfo[0].LagInMb), metric.WithAttributeSet(attribute.NewSet( attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), attribute.String(otel_metrics.SlotNameKey, alertKeys.SlotName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID())), + )) // Also handles alerts for PeerDB user connections exceeding a given limit here res, err := getOpenConnectionsForUser(ctx, c.conn, c.config.User) @@ -1227,10 +1229,11 @@ func (c *PostgresConnector) HandleSlotInfo( return err } alerter.AlertIfOpenConnections(ctx, alertKeys, res) - slotMetricGauges.OpenConnectionsGauge.Set(res.CurrentOpenConnections, attribute.NewSet( + slotMetricGauges.OpenConnectionsGauge.Record(ctx, res.CurrentOpenConnections, metric.WithAttributeSet(attribute.NewSet( attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), + ))) replicationRes, err := getOpenReplicationConnectionsForUser(ctx, c.conn, c.config.User) if err != nil { @@ -1238,10 +1241,13 @@ func (c *PostgresConnector) HandleSlotInfo( return err } - slotMetricGauges.OpenReplicationConnectionsGauge.Set(replicationRes.CurrentOpenConnections, attribute.NewSet( - attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), - attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) + slotMetricGauges.OpenReplicationConnectionsGauge.Record(ctx, replicationRes.CurrentOpenConnections, + metric.WithAttributeSet(attribute.NewSet( + attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), + attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), + )), + ) var intervalSinceLastNormalize *time.Duration if err := alerter.CatalogPool.QueryRow( @@ -1255,10 +1261,13 @@ func (c *PostgresConnector) HandleSlotInfo( return nil } if intervalSinceLastNormalize != nil { - slotMetricGauges.IntervalSinceLastNormalizeGauge.Set(intervalSinceLastNormalize.Seconds(), attribute.NewSet( - attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), - attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()))) + slotMetricGauges.IntervalSinceLastNormalizeGauge.Record(ctx, intervalSinceLastNormalize.Seconds(), + metric.WithAttributeSet(attribute.NewSet( + attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), + attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), + )), + ) alerter.AlertIfTooLongSinceLastNormalize(ctx, alertKeys, *intervalSinceLastNormalize) } diff --git a/flow/otel_metrics/otel_manager.go b/flow/otel_metrics/otel_manager.go index becf13a16f..c59adecd41 100644 --- a/flow/otel_metrics/otel_manager.go +++ b/flow/otel_metrics/otel_manager.go @@ -20,8 +20,8 @@ import ( type OtelManager struct { MetricsProvider *sdkmetric.MeterProvider Meter metric.Meter - Float64GaugesCache map[string]*Float64SyncGauge - Int64GaugesCache map[string]*Int64SyncGauge + Float64GaugesCache map[string]metric.Float64Gauge + Int64GaugesCache map[string]metric.Int64Gauge } // newOtelResource returns a resource describing this application. diff --git a/flow/otel_metrics/peerdb_gauges/gauges.go b/flow/otel_metrics/peerdb_gauges/gauges.go index 767aac0945..a3b7d5c3e8 100644 --- a/flow/otel_metrics/peerdb_gauges/gauges.go +++ b/flow/otel_metrics/peerdb_gauges/gauges.go @@ -1,6 +1,8 @@ package peerdb_gauges import ( + "go.opentelemetry.io/otel/metric" + "github.com/PeerDB-io/peer-flow/otel_metrics" ) @@ -12,10 +14,10 @@ const ( ) type SlotMetricGauges struct { - SlotLagGauge *otel_metrics.Float64SyncGauge - OpenConnectionsGauge *otel_metrics.Int64SyncGauge - OpenReplicationConnectionsGauge *otel_metrics.Int64SyncGauge - IntervalSinceLastNormalizeGauge *otel_metrics.Float64SyncGauge + SlotLagGauge metric.Float64Gauge + OpenConnectionsGauge metric.Int64Gauge + OpenReplicationConnectionsGauge metric.Int64Gauge + IntervalSinceLastNormalizeGauge metric.Float64Gauge } func BuildGaugeName(baseGaugeName string) string { diff --git a/flow/otel_metrics/sync_gauges.go b/flow/otel_metrics/sync_gauges.go index d2ef4924c1..e9da02c875 100644 --- a/flow/otel_metrics/sync_gauges.go +++ b/flow/otel_metrics/sync_gauges.go @@ -1,106 +1,15 @@ package otel_metrics import ( - "context" - "fmt" - "sync" - - "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" ) -type ObservationMapValue[V comparable] struct { - Value V -} - -// SyncGauge is a generic synchronous gauge that can be used to observe any type of value -// Inspired from https://github.com/open-telemetry/opentelemetry-go/issues/3984#issuecomment-1743231837 -type SyncGauge[V comparable, O metric.Observable] struct { - observableGauge O - observations sync.Map - name string -} - -func (a *SyncGauge[V, O]) Callback(ctx context.Context, observeFunc func(value V, options ...metric.ObserveOption)) error { - a.observations.Range(func(key, value interface{}) bool { - attrs := key.(attribute.Set) - val := value.(*ObservationMapValue[V]) - observeFunc(val.Value, metric.WithAttributeSet(attrs)) - // If the pointer is still same we can safely delete, else it means that the value was overwritten in parallel - a.observations.CompareAndDelete(attrs, val) - return true - }) - return nil -} - -func (a *SyncGauge[V, O]) Set(input V, attrs attribute.Set) { - val := ObservationMapValue[V]{Value: input} - a.observations.Store(attrs, &val) -} - -type Int64SyncGauge struct { - syncGauge *SyncGauge[int64, metric.Int64Observable] -} - -func (a *Int64SyncGauge) Set(input int64, attrs attribute.Set) { - if a == nil { - return - } - a.syncGauge.Set(input, attrs) -} - -func NewInt64SyncGauge(meter metric.Meter, gaugeName string, opts ...metric.Int64ObservableGaugeOption) (*Int64SyncGauge, error) { - syncGauge := &SyncGauge[int64, metric.Int64Observable]{ - name: gaugeName, - } - observableGauge, err := meter.Int64ObservableGauge(gaugeName, - append(opts, metric.WithInt64Callback(func(ctx context.Context, observer metric.Int64Observer) error { - return syncGauge.Callback(ctx, func(value int64, options ...metric.ObserveOption) { - observer.Observe(value, options...) - }) - }))...) - if err != nil { - return nil, fmt.Errorf("failed to create Int64SyncGauge: %w", err) - } - syncGauge.observableGauge = observableGauge - return &Int64SyncGauge{syncGauge: syncGauge}, nil -} - -type Float64SyncGauge struct { - syncGauge *SyncGauge[float64, metric.Float64Observable] -} - -func (a *Float64SyncGauge) Set(input float64, attrs attribute.Set) { - if a == nil { - return - } - a.syncGauge.Set(input, attrs) -} - -func NewFloat64SyncGauge(meter metric.Meter, gaugeName string, opts ...metric.Float64ObservableGaugeOption) (*Float64SyncGauge, error) { - syncGauge := &SyncGauge[float64, metric.Float64Observable]{ - name: gaugeName, - } - observableGauge, err := meter.Float64ObservableGauge(gaugeName, - append(opts, metric.WithFloat64Callback(func(ctx context.Context, observer metric.Float64Observer) error { - return syncGauge.Callback(ctx, func(value float64, options ...metric.ObserveOption) { - observer.Observe(value, options...) - }) - }))...) - if err != nil { - return nil, fmt.Errorf("failed to create Float64SyncGauge: %w", err) - } - syncGauge.observableGauge = observableGauge - return &Float64SyncGauge{syncGauge: syncGauge}, nil -} - -func GetOrInitInt64SyncGauge(meter metric.Meter, cache map[string]*Int64SyncGauge, name string, - opts ...metric.Int64ObservableGaugeOption, -) (*Int64SyncGauge, error) { +func GetOrInitInt64SyncGauge(meter metric.Meter, cache map[string]metric.Int64Gauge, name string, opts ...metric.Int64GaugeOption, +) (metric.Int64Gauge, error) { gauge, ok := cache[name] if !ok { var err error - gauge, err = NewInt64SyncGauge(meter, name, opts...) + gauge, err = meter.Int64Gauge(name, opts...) if err != nil { return nil, err } @@ -109,13 +18,12 @@ func GetOrInitInt64SyncGauge(meter metric.Meter, cache map[string]*Int64SyncGaug return gauge, nil } -func GetOrInitFloat64SyncGauge(meter metric.Meter, cache map[string]*Float64SyncGauge, - name string, opts ...metric.Float64ObservableGaugeOption, -) (*Float64SyncGauge, error) { +func GetOrInitFloat64SyncGauge(meter metric.Meter, cache map[string]metric.Float64Gauge, name string, opts ...metric.Float64GaugeOption, +) (metric.Float64Gauge, error) { gauge, ok := cache[name] if !ok { var err error - gauge, err = NewFloat64SyncGauge(meter, name, opts...) + gauge, err = meter.Float64Gauge(name, opts...) if err != nil { return nil, err } From b8e55ea6d1e26f4782ec2198190086bec817564c Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj <65964360+Amogh-Bharadwaj@users.noreply.github.com> Date: Thu, 21 Nov 2024 02:44:20 +0530 Subject: [PATCH 48/59] Introduce Completed flow status (#2274) Introduces a new flow state - `Completed` Currently this is the state for an initial load only mirror which has finished initial load Functionally tested --- flow/e2e/test_utils.go | 2 +- flow/workflows/cdc_flow.go | 2 ++ protos/flow.proto | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/flow/e2e/test_utils.go b/flow/e2e/test_utils.go index c3e577a4dc..7fb3f857da 100644 --- a/flow/e2e/test_utils.go +++ b/flow/e2e/test_utils.go @@ -221,7 +221,7 @@ func SetupCDCFlowStatusQuery(t *testing.T, env WorkflowRun, config *protos.FlowC var status protos.FlowStatus if err := response.Get(&status); err != nil { t.Fatal(err) - } else if status == protos.FlowStatus_STATUS_RUNNING { + } else if status == protos.FlowStatus_STATUS_RUNNING || status == protos.FlowStatus_STATUS_COMPLETED { return } else if counter > 30 { env.Cancel() diff --git a/flow/workflows/cdc_flow.go b/flow/workflows/cdc_flow.go index 72e37b01fd..bd1d5459dd 100644 --- a/flow/workflows/cdc_flow.go +++ b/flow/workflows/cdc_flow.go @@ -485,6 +485,8 @@ func CDCFlowWorkflow( // if initial_copy_only is opted for, we end the flow here. if cfg.InitialSnapshotOnly { + logger.Info("initial snapshot only, ending flow") + state.CurrentFlowStatus = protos.FlowStatus_STATUS_COMPLETED return state, nil } } diff --git a/protos/flow.proto b/protos/flow.proto index de7bf740d0..42170a5630 100644 --- a/protos/flow.proto +++ b/protos/flow.proto @@ -385,6 +385,7 @@ enum FlowStatus { STATUS_SNAPSHOT = 5; STATUS_TERMINATING = 6; STATUS_TERMINATED = 7; + STATUS_COMPLETED = 8; } message CDCFlowConfigUpdate { From b5e2063d946e9cb30e6d739655ea0924ff3def2c Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj <65964360+Amogh-Bharadwaj@users.noreply.github.com> Date: Thu, 21 Nov 2024 03:08:11 +0530 Subject: [PATCH 49/59] Optimise state setting for initial load only (#2275) Set status to running after the initial load only check --- flow/workflows/cdc_flow.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/workflows/cdc_flow.go b/flow/workflows/cdc_flow.go index bd1d5459dd..0c97af9b7d 100644 --- a/flow/workflows/cdc_flow.go +++ b/flow/workflows/cdc_flow.go @@ -480,15 +480,15 @@ func CDCFlowWorkflow( } } - state.CurrentFlowStatus = protos.FlowStatus_STATUS_RUNNING logger.Info("executed setup flow and snapshot flow") - // if initial_copy_only is opted for, we end the flow here. if cfg.InitialSnapshotOnly { logger.Info("initial snapshot only, ending flow") state.CurrentFlowStatus = protos.FlowStatus_STATUS_COMPLETED return state, nil } + + state.CurrentFlowStatus = protos.FlowStatus_STATUS_RUNNING } syncFlowID := GetChildWorkflowID("sync-flow", cfg.FlowJobName, originalRunID) From 324afd01defb4f8425a173c591685f4865cbfbfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Philip=20Dub=C3=A9?= Date: Wed, 20 Nov 2024 23:40:35 +0000 Subject: [PATCH 50/59] Remove PEERDB_MAX_SYNCS_PER_CDC_FLOW (#2273) Already unused in rest of code --- flow/peerdbenv/dynamicconf.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/flow/peerdbenv/dynamicconf.go b/flow/peerdbenv/dynamicconf.go index 314b365733..b0cbe05f51 100644 --- a/flow/peerdbenv/dynamicconf.go +++ b/flow/peerdbenv/dynamicconf.go @@ -19,14 +19,6 @@ import ( ) var DynamicSettings = [...]*protos.DynamicSetting{ - { - Name: "PEERDB_MAX_SYNCS_PER_CDC_FLOW", - Description: "Experimental setting: changes number of syncs per workflow, affects frequency of replication slot disconnects", - DefaultValue: "32", - ValueType: protos.DynconfValueType_UINT, - ApplyMode: protos.DynconfApplyMode_APPLY_MODE_IMMEDIATE, - TargetForSetting: protos.DynconfTarget_ALL, - }, { Name: "PEERDB_CDC_CHANNEL_BUFFER_SIZE", Description: "Advanced setting: changes buffer size of channel PeerDB uses while streaming rows read to destination in CDC", From 2aeb13de078f3b4f118c43cb51f89fde3cfcecd0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 21 Nov 2024 11:12:49 +0000 Subject: [PATCH 51/59] chore(deps): pin dependencies (#2277) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR contains the following updates: | Package | Update | Change | |---|---|---| | ghcr.io/peerdb-io/flow-api | pinDigest | -> `53a6de3` | | ghcr.io/peerdb-io/flow-snapshot-worker | pinDigest | -> `036d609` | | ghcr.io/peerdb-io/flow-worker | pinDigest | -> `f5d4d5e` | | ghcr.io/peerdb-io/peerdb-server | pinDigest | -> `15249fc` | | ghcr.io/peerdb-io/peerdb-ui | pinDigest | -> `f4d1cdf` | | [temporalio/ui](https://redirect.github.com/temporalio/ui-server) | minor | `2.31.2` -> `2.32.0` | --- ### Release Notes
temporalio/ui-server (temporalio/ui) ### [`v2.32.0`](https://redirect.github.com/temporalio/ui-server/releases/tag/v2.32.0) [Compare Source](https://redirect.github.com/temporalio/ui-server/compare/v2.31.2...v2.32.0) - 2024-11-20 - [`10e52ae`](https://redirect.github.com/temporalio/ui-server/commit/10e52ae39) - Update version.go to v2.32.0 ([#​2435](https://redirect.github.com/temporalio/ui-server/issues/2435)) - 2024-11-20 - [`303d7f1`](https://redirect.github.com/temporalio/ui-server/commit/303d7f1b4) - KeywordList search attribute support ([#​2420](https://redirect.github.com/temporalio/ui-server/issues/2420)) - 2024-11-20 - [`49063e4`](https://redirect.github.com/temporalio/ui-server/commit/49063e442) - User Metadata ([#​2423](https://redirect.github.com/temporalio/ui-server/issues/2423)) - 2024-11-20 - [`a4b9d3c`](https://redirect.github.com/temporalio/ui-server/commit/a4b9d3c3e) - Add render route in ui-server ([#​2430](https://redirect.github.com/temporalio/ui-server/issues/2430)) - 2024-11-20 - [`532f95f`](https://redirect.github.com/temporalio/ui-server/commit/532f95fe2) - Use input name for Update attribute in event summary row ([#​2434](https://redirect.github.com/temporalio/ui-server/issues/2434)) - 2024-11-19 - [`bbaa395`](https://redirect.github.com/temporalio/ui-server/commit/bbaa395db) - Update combobox to support integration with async code ([#​2431](https://redirect.github.com/temporalio/ui-server/issues/2431)) - 2024-11-19 - [`105ad96`](https://redirect.github.com/temporalio/ui-server/commit/105ad96e9) - Bump [@​grpc/grpc-js](https://redirect.github.com/grpc/grpc-js) from 1.10.8 to 1.10.9 ([#​2200](https://redirect.github.com/temporalio/ui-server/issues/2200)) - 2024-11-19 - [`e6de85f`](https://redirect.github.com/temporalio/ui-server/commit/e6de85f82) - Bump rollup from 2.79.1 to 2.79.2 ([#​2419](https://redirect.github.com/temporalio/ui-server/issues/2419)) - 2024-11-19 - [`ce45d90`](https://redirect.github.com/temporalio/ui-server/commit/ce45d908f) - Bump vite from 5.2.14 to 5.3.6 ([#​2343](https://redirect.github.com/temporalio/ui-server/issues/2343)) - 2024-11-19 - [`71a35a0`](https://redirect.github.com/temporalio/ui-server/commit/71a35a064) - Bump axios from 1.7.2 to 1.7.4 ([#​2268](https://redirect.github.com/temporalio/ui-server/issues/2268)) - 2024-11-18 - [`f65d406`](https://redirect.github.com/temporalio/ui-server/commit/f65d406e7) - 2.32.6 ([#​2429](https://redirect.github.com/temporalio/ui-server/issues/2429)) - 2024-11-18 - [`f52de86`](https://redirect.github.com/temporalio/ui-server/commit/f52de8663) - Add href prop to Combobox to get reactive hrefs working correctly ([#​2428](https://redirect.github.com/temporalio/ui-server/issues/2428)) - 2024-11-18 - [`3c339b9`](https://redirect.github.com/temporalio/ui-server/commit/3c339b9ff) - 2.32.5 ([#​2427](https://redirect.github.com/temporalio/ui-server/issues/2427)) - 2024-11-14 - [`ea68f93`](https://redirect.github.com/temporalio/ui-server/commit/ea68f9378) - Use reactive route for namespace ([#​2426](https://redirect.github.com/temporalio/ui-server/issues/2426)) - 2024-11-13 - [`a580770`](https://redirect.github.com/temporalio/ui-server/commit/a58077007) - Permanently fix formatted utc offset test ([#​2422](https://redirect.github.com/temporalio/ui-server/issues/2422)) - 2024-11-12 - [`08dc657`](https://redirect.github.com/temporalio/ui-server/commit/08dc65724) - Make ids in event history mono for better spacing ([#​2425](https://redirect.github.com/temporalio/ui-server/issues/2425)) - 2024-11-12 - [`6276fd5`](https://redirect.github.com/temporalio/ui-server/commit/6276fd5a6) - Update go-oidc to v3.11 ([#​2424](https://redirect.github.com/temporalio/ui-server/issues/2424)) - 2024-11-07 - [`687ae03`](https://redirect.github.com/temporalio/ui-server/commit/687ae0305) - fix: keep extra row cell when write actions are disabled ([#​2416](https://redirect.github.com/temporalio/ui-server/issues/2416)) - 2024-11-06 - [`185075a`](https://redirect.github.com/temporalio/ui-server/commit/185075a4f) - Remove dark theme on CopyButton in CodeBlock ([#​2421](https://redirect.github.com/temporalio/ui-server/issues/2421)) - 2024-11-06 - [`34944e5`](https://redirect.github.com/temporalio/ui-server/commit/34944e595) - 2.32.4 ([#​2417](https://redirect.github.com/temporalio/ui-server/issues/2417)) - 2024-11-06 - [`ec43569`](https://redirect.github.com/temporalio/ui-server/commit/ec4356929) - Update daylight savings test ([#​2418](https://redirect.github.com/temporalio/ui-server/issues/2418)) - 2024-11-06 - [`3a423d2`](https://redirect.github.com/temporalio/ui-server/commit/3a423d2bb) - Add mobile option for playwright tests ([#​2412](https://redirect.github.com/temporalio/ui-server/issues/2412)) - 2024-11-04 - [`d540904`](https://redirect.github.com/temporalio/ui-server/commit/d54090426) - Only encode Schedule Input when added or edited ([#​2384](https://redirect.github.com/temporalio/ui-server/issues/2384)) - 2024-11-01 - [`197e238`](https://redirect.github.com/temporalio/ui-server/commit/197e238d4) - Don't update browser history on query param changes in start workflow. preserve order of query params on goto ([#​2415](https://redirect.github.com/temporalio/ui-server/issues/2415)) - 2024-11-01 - [`558ae1b`](https://redirect.github.com/temporalio/ui-server/commit/558ae1bc3) - Light/Dark mode themes for Codemirror Editor ([#​2414](https://redirect.github.com/temporalio/ui-server/issues/2414)) - 2024-10-31 - [`fa5e6b3`](https://redirect.github.com/temporalio/ui-server/commit/fa5e6b3ae) - Add Authorization-Extras if authUser idToken exists ([#​2250](https://redirect.github.com/temporalio/ui-server/issues/2250)) - 2024-10-30 - [`0090c09`](https://redirect.github.com/temporalio/ui-server/commit/0090c0945) - Use \__user_metadata query if it exists ([#​2411](https://redirect.github.com/temporalio/ui-server/issues/2411)) - 2024-10-29 - [`e80a783`](https://redirect.github.com/temporalio/ui-server/commit/e80a7839d) - Add query argument support ([#​2407](https://redirect.github.com/temporalio/ui-server/issues/2407)) - 2024-10-29 - [`39d1a21`](https://redirect.github.com/temporalio/ui-server/commit/39d1a21a3) - Get rid of empty class, not needed ([#​2410](https://redirect.github.com/temporalio/ui-server/issues/2410)) - 2024-10-24 - [`e129594`](https://redirect.github.com/temporalio/ui-server/commit/e129594c6) - Use attempt versus retry ([#​2408](https://redirect.github.com/temporalio/ui-server/issues/2408)) - 2024-10-23 - [`2cf3d6f`](https://redirect.github.com/temporalio/ui-server/commit/2cf3d6f8e) - 2.32.3 ([#​2406](https://redirect.github.com/temporalio/ui-server/issues/2406)) - 2024-10-23 - [`eeb2cca`](https://redirect.github.com/temporalio/ui-server/commit/eeb2ccad0) - Bump webpack from 5.90.3 to 5.94.0 ([#​2286](https://redirect.github.com/temporalio/ui-server/issues/2286)) - 2024-10-23 - [`eb11a09`](https://redirect.github.com/temporalio/ui-server/commit/eb11a09e4) - Encode header fields if they exist on schedule edit ([#​2405](https://redirect.github.com/temporalio/ui-server/issues/2405)) - 2024-10-22 - [`aacf78e`](https://redirect.github.com/temporalio/ui-server/commit/aacf78e02) - Don't use translation for default schedule column names ([#​2404](https://redirect.github.com/temporalio/ui-server/issues/2404)) - 2024-10-22 - [`f2d9742`](https://redirect.github.com/temporalio/ui-server/commit/f2d974210) - Don't omit header attribute on event details ([#​2371](https://redirect.github.com/temporalio/ui-server/issues/2371)) - 2024-10-22 - [`42ba898`](https://redirect.github.com/temporalio/ui-server/commit/42ba89818) - 2.32.2 ([#​2403](https://redirect.github.com/temporalio/ui-server/issues/2403)) - 2024-10-22 - [`e91708a`](https://redirect.github.com/temporalio/ui-server/commit/e91708a57) - Add toast instead of redirect on Start Workflow with link to workflow ([#​2402](https://redirect.github.com/temporalio/ui-server/issues/2402)) - 2024-10-22 - [`c2047bf`](https://redirect.github.com/temporalio/ui-server/commit/c2047bf1c) - Add new responsive styles to input on api pagination ([#​2333](https://redirect.github.com/temporalio/ui-server/issues/2333)) - 2024-10-22 - [`9d64fab`](https://redirect.github.com/temporalio/ui-server/commit/9d64fabce) - Create CodecServerErrorBanner component ([#​2399](https://redirect.github.com/temporalio/ui-server/issues/2399)) - 2024-10-22 - [`d5f3e76`](https://redirect.github.com/temporalio/ui-server/commit/d5f3e7690) - Make workflow action menu smaller on small screens ([#​2401](https://redirect.github.com/temporalio/ui-server/issues/2401)) - 2024-10-22 - [`7de4ab5`](https://redirect.github.com/temporalio/ui-server/commit/7de4ab598) - Fix loading state to not flash, set delay to 2 seconds ([#​2400](https://redirect.github.com/temporalio/ui-server/issues/2400)) - 2024-10-21 - [`1550c3a`](https://redirect.github.com/temporalio/ui-server/commit/1550c3afa) - 2.32.1 ([#​2398](https://redirect.github.com/temporalio/ui-server/issues/2398)) - 2024-10-21 - [`4f0d49a`](https://redirect.github.com/temporalio/ui-server/commit/4f0d49a75) - Remove logic for WorkflowUpdate to find corresponding initial event, add Nexus operation events to failed/timedout/canceled ([#​2397](https://redirect.github.com/temporalio/ui-server/issues/2397)) - 2024-10-21 - [`495efe9`](https://redirect.github.com/temporalio/ui-server/commit/495efe966) - Codec Server error banner ([#​2394](https://redirect.github.com/temporalio/ui-server/issues/2394)) - 2024-10-15 - [`8e3486f`](https://redirect.github.com/temporalio/ui-server/commit/8e3486fe5) - Fix pending activity text in timeline ([#​2392](https://redirect.github.com/temporalio/ui-server/issues/2392)) - 2024-10-14 - [`6eb3c15`](https://redirect.github.com/temporalio/ui-server/commit/6eb3c1555) - Add a second wait on start workflow for eventual consistency. Make all borders consistent ([#​2391](https://redirect.github.com/temporalio/ui-server/issues/2391)) - 2024-10-14 - [`ae77f01`](https://redirect.github.com/temporalio/ui-server/commit/ae77f01d0) - 2.32.0 ([#​2390](https://redirect.github.com/temporalio/ui-server/issues/2390)) - 2024-10-14 - [`69f569c`](https://redirect.github.com/temporalio/ui-server/commit/69f569cf8) - Move timestamps to the left side of row, include timestamp in compact view, remove icons, add duration/elapsed tooltips ([#​2389](https://redirect.github.com/temporalio/ui-server/issues/2389)) - 2024-10-14 - [`f7aeff7`](https://redirect.github.com/temporalio/ui-server/commit/f7aeff7d6) - Check filter value onRowFilterClick ([#​2387](https://redirect.github.com/temporalio/ui-server/issues/2387)) - 2024-10-11 - [`570ca0f`](https://redirect.github.com/temporalio/ui-server/commit/570ca0f13) - Fix retry attempt text when textAnchor is end ([#​2381](https://redirect.github.com/temporalio/ui-server/issues/2381)) - 2024-10-11 - [`605e906`](https://redirect.github.com/temporalio/ui-server/commit/605e906d7) - Show correct encoding value in PayloadInput ([#​2385](https://redirect.github.com/temporalio/ui-server/issues/2385)) - 2024-10-11 - [`7351e7e`](https://redirect.github.com/temporalio/ui-server/commit/7351e7e8d) - Use whitespace-pre-line to wrap links correctly, remove badge on links ([#​2386](https://redirect.github.com/temporalio/ui-server/issues/2386)) - 2024-10-10 - [`bcc9f02`](https://redirect.github.com/temporalio/ui-server/commit/bcc9f02de) - Explicitly check for response ok false in codeServerRequest ([#​2383](https://redirect.github.com/temporalio/ui-server/issues/2383)) - 2024-10-10 - [`241a40b`](https://redirect.github.com/temporalio/ui-server/commit/241a40b76) - Use encodeUriComponent and decodeUriComponent and add tests ([#​2382](https://redirect.github.com/temporalio/ui-server/issues/2382)) - 2024-10-09 - [`6415ada`](https://redirect.github.com/temporalio/ui-server/commit/6415adaca) - Add newTab prop to MenuItem ([#​2378](https://redirect.github.com/temporalio/ui-server/issues/2378)) - 2024-10-09 - [`f620c44`](https://redirect.github.com/temporalio/ui-server/commit/f620c44be) - Check for initial/lastEvent for eventTime ([#​2380](https://redirect.github.com/temporalio/ui-server/issues/2380)) - 2024-10-09 - [`258e83b`](https://redirect.github.com/temporalio/ui-server/commit/258e83b80) - Add logout icon ([#​2379](https://redirect.github.com/temporalio/ui-server/issues/2379)) - 2024-10-09 - [`3a0ebb0`](https://redirect.github.com/temporalio/ui-server/commit/3a0ebb096) - 2.31.3 ([#​2377](https://redirect.github.com/temporalio/ui-server/issues/2377)) - 2024-10-09 - [`0b39a94`](https://redirect.github.com/temporalio/ui-server/commit/0b39a94c8) - Add pending activity attempt count to timeline ([#​2372](https://redirect.github.com/temporalio/ui-server/issues/2372)) - 2024-10-09 - [`b0df11c`](https://redirect.github.com/temporalio/ui-server/commit/b0df11c19) - UserMenu refactoring ([#​2374](https://redirect.github.com/temporalio/ui-server/issues/2374)) - 2024-10-08 - [`17398e7`](https://redirect.github.com/temporalio/ui-server/commit/17398e79a) - DT-1495 - use description instead of tooltip on reset menu item ([#​2373](https://redirect.github.com/temporalio/ui-server/issues/2373)) - 2024-10-08 - [`f4f9bfc`](https://redirect.github.com/temporalio/ui-server/commit/f4f9bfc54) - Refactor out payload inputs to its own component and add encoding field ([#​2370](https://redirect.github.com/temporalio/ui-server/issues/2370)) - 2024-10-04 - [`7757a34`](https://redirect.github.com/temporalio/ui-server/commit/7757a34ec) - Combobox component updates ([#​2369](https://redirect.github.com/temporalio/ui-server/issues/2369)) - 2024-10-04 - [`11742db`](https://redirect.github.com/temporalio/ui-server/commit/11742db15) - DT-2502 - handle WorkflowExecutionUpdateAdmitted events ([#​2364](https://redirect.github.com/temporalio/ui-server/issues/2364)) - 2024-10-03 - [`78eb5d4`](https://redirect.github.com/temporalio/ui-server/commit/78eb5d428) - Small responsive UI improvements for EventDetailsRow ([#​2366](https://redirect.github.com/temporalio/ui-server/issues/2366)) - 2024-10-03 - [`5b6371b`](https://redirect.github.com/temporalio/ui-server/commit/5b6371b36) - Refactor event details link, add link support for endpointId ([#​2360](https://redirect.github.com/temporalio/ui-server/issues/2360))
--- ### Configuration 📅 **Schedule**: Branch creation - "after 5pm on monday" in timezone Etc/UTC, Automerge - At any time (no schedule defined). 🚦 **Automerge**: Enabled. ♻ **Rebasing**: Whenever PR is behind base branch, or you tick the rebase/retry checkbox. 👻 **Immortal**: This PR will be recreated if closed unmerged. Get [config help](https://redirect.github.com/renovatebot/renovate/discussions) if that's undesired. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR was generated by [Mend Renovate](https://mend.io/renovate/). View the [repository job log](https://developer.mend.io/github/PeerDB-io/peerdb). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- docker-compose-dev.yml | 2 +- docker-compose.yml | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docker-compose-dev.yml b/docker-compose-dev.yml index 9db08bbda4..98ee987b36 100644 --- a/docker-compose-dev.yml +++ b/docker-compose-dev.yml @@ -116,7 +116,7 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CORS_ORIGINS=http://localhost:3000 - TEMPORAL_CSRF_COOKIE_INSECURE=true - image: temporalio/ui:2.31.2@sha256:28bb3ea5a6ea3e09f16b521f32ab727c96470f7f1e420c66a6cbfb02001a8aa2 + image: temporalio/ui:2.32.0@sha256:82bf98dbe005a831b6bc5dc12ccd7bffd606af2032dae4821ae133caaa943d3d ports: - 8085:8080 diff --git a/docker-compose.yml b/docker-compose.yml index cf1ec3efe0..4d714e9973 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -106,13 +106,13 @@ services: - TEMPORAL_ADDRESS=temporal:7233 - TEMPORAL_CORS_ORIGINS=http://localhost:3000 - TEMPORAL_CSRF_COOKIE_INSECURE=true - image: temporalio/ui:2.31.2@sha256:28bb3ea5a6ea3e09f16b521f32ab727c96470f7f1e420c66a6cbfb02001a8aa2 + image: temporalio/ui:2.32.0@sha256:82bf98dbe005a831b6bc5dc12ccd7bffd606af2032dae4821ae133caaa943d3d ports: - 8085:8080 flow-api: container_name: flow_api - image: ghcr.io/peerdb-io/flow-api:latest-stable + image: ghcr.io/peerdb-io/flow-api:latest-stable@sha256:53a6de3d7537b4a90b4ff13d822d0a9fa3015857fc739fc2497d33f33b05dfaa restart: unless-stopped ports: - 8112:8112 @@ -128,7 +128,7 @@ services: flow-snapshot-worker: container_name: flow-snapshot-worker - image: ghcr.io/peerdb-io/flow-snapshot-worker:latest-stable + image: ghcr.io/peerdb-io/flow-snapshot-worker:latest-stable@sha256:036d6091e32c9d15f2738bc6aab312aa1f412f5c06c57687b497cde233b73d4c restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -138,7 +138,7 @@ services: flow-worker: container_name: flow-worker - image: ghcr.io/peerdb-io/flow-worker:latest-stable + image: ghcr.io/peerdb-io/flow-worker:latest-stable@sha256:f5d4d5e4e44336d6917e3c8b3d753c77d813d5d1e55ca7fb4d3a3d3d1d3253cc restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -151,7 +151,7 @@ services: peerdb: container_name: peerdb-server stop_signal: SIGINT - image: ghcr.io/peerdb-io/peerdb-server:latest-stable + image: ghcr.io/peerdb-io/peerdb-server:latest-stable@sha256:15249fc45b8b5384fb7a046bc73f75cc679c570a3d2fd3fd8c40c7d7e85f7eef restart: unless-stopped environment: <<: *catalog-config @@ -167,7 +167,7 @@ services: peerdb-ui: container_name: peerdb-ui - image: ghcr.io/peerdb-io/peerdb-ui:latest-stable + image: ghcr.io/peerdb-io/peerdb-ui:latest-stable@sha256:f4d1cdf966eb06f4a4a03db4b02593b44c8a37bd32143c937d3c59c2586c4bb1 restart: unless-stopped ports: - 3000:3000 From 3facb2da4c4fb94d9f1e6152a34e4e5b4eec6dc3 Mon Sep 17 00:00:00 2001 From: Kunal Gupta <39487888+iamKunalGupta@users.noreply.github.com> Date: Thu, 21 Nov 2024 18:45:26 +0530 Subject: [PATCH 52/59] fix: latest-stable tag in docker compose (#2279) --- docker-compose.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 4d714e9973..5448632ac1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -112,7 +112,7 @@ services: flow-api: container_name: flow_api - image: ghcr.io/peerdb-io/flow-api:latest-stable@sha256:53a6de3d7537b4a90b4ff13d822d0a9fa3015857fc739fc2497d33f33b05dfaa + image: ghcr.io/peerdb-io/flow-api:stable-v0.19.1 restart: unless-stopped ports: - 8112:8112 @@ -128,7 +128,7 @@ services: flow-snapshot-worker: container_name: flow-snapshot-worker - image: ghcr.io/peerdb-io/flow-snapshot-worker:latest-stable@sha256:036d6091e32c9d15f2738bc6aab312aa1f412f5c06c57687b497cde233b73d4c + image: ghcr.io/peerdb-io/flow-snapshot-worker:stable-v0.19.1 restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -138,7 +138,7 @@ services: flow-worker: container_name: flow-worker - image: ghcr.io/peerdb-io/flow-worker:latest-stable@sha256:f5d4d5e4e44336d6917e3c8b3d753c77d813d5d1e55ca7fb4d3a3d3d1d3253cc + image: ghcr.io/peerdb-io/flow-worker:stable-v0.19.1 restart: unless-stopped environment: <<: [*catalog-config, *flow-worker-env, *minio-config] @@ -151,7 +151,7 @@ services: peerdb: container_name: peerdb-server stop_signal: SIGINT - image: ghcr.io/peerdb-io/peerdb-server:latest-stable@sha256:15249fc45b8b5384fb7a046bc73f75cc679c570a3d2fd3fd8c40c7d7e85f7eef + image: ghcr.io/peerdb-io/peerdb-server:stable-v0.19.1 restart: unless-stopped environment: <<: *catalog-config @@ -167,7 +167,7 @@ services: peerdb-ui: container_name: peerdb-ui - image: ghcr.io/peerdb-io/peerdb-ui:latest-stable@sha256:f4d1cdf966eb06f4a4a03db4b02593b44c8a37bd32143c937d3c59c2586c4bb1 + image: ghcr.io/peerdb-io/peerdb-ui:stable-v0.19.1 restart: unless-stopped ports: - 3000:3000 From 136a28345eece0eeef638fe7445a6198347e0a53 Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj <65964360+Amogh-Bharadwaj@users.noreply.github.com> Date: Thu, 21 Nov 2024 20:59:19 +0530 Subject: [PATCH 53/59] Miscellaneous fixes - add more alerting for maintainpull, fix lag graph function (#2263) - Account for NaN values in LSN function - Add alerts for failure to get postgres connector in maintainpull - Fix <= sign in slack alert log --------- Co-authored-by: Kevin Biju <52661649+heavycrystal@users.noreply.github.com> --- flow/activities/flowable.go | 2 ++ flow/alerting/alerting.go | 2 +- ui/app/peers/[peerName]/lagGraph.tsx | 7 ++++--- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/flow/activities/flowable.go b/flow/activities/flowable.go index cc09bae0d7..8a65f5dded 100644 --- a/flow/activities/flowable.go +++ b/flow/activities/flowable.go @@ -287,11 +287,13 @@ func (a *FlowableActivity) MaintainPull( ctx = context.WithValue(ctx, shared.FlowNameKey, config.FlowJobName) srcConn, err := connectors.GetByNameAs[connectors.CDCPullConnector](ctx, config.Env, a.CatalogPool, config.SourceName) if err != nil { + a.Alerter.LogFlowError(ctx, config.FlowJobName, err) return err } defer connectors.CloseConnector(ctx, srcConn) if err := srcConn.SetupReplConn(ctx); err != nil { + a.Alerter.LogFlowError(ctx, config.FlowJobName, err) return err } diff --git a/flow/alerting/alerting.go b/flow/alerting/alerting.go index 5f05005d14..69282330d6 100644 --- a/flow/alerting/alerting.go +++ b/flow/alerting/alerting.go @@ -356,7 +356,7 @@ func (a *Alerter) checkAndAddAlertToCatalog(ctx context.Context, alertConfigId i return true } - logger.Info(fmt.Sprintf("Skipped sending alerts: last alert was sent at %s, which was >=%s ago", createdTimestamp.String(), dur.String())) + logger.Info(fmt.Sprintf("Skipped sending alerts: last alert was sent at %s, which was <=%s ago", createdTimestamp.String(), dur.String())) return false } diff --git a/ui/app/peers/[peerName]/lagGraph.tsx b/ui/app/peers/[peerName]/lagGraph.tsx index 87b90fa8c8..d971bee8f0 100644 --- a/ui/app/peers/[peerName]/lagGraph.tsx +++ b/ui/app/peers/[peerName]/lagGraph.tsx @@ -21,9 +21,10 @@ type LagGraphProps = { function parseLSN(lsn: string): number { if (!lsn) return 0; const [lsn1, lsn2] = lsn.split('/'); - return Number( - (BigInt(parseInt(lsn1, 16)) << BigInt(32)) | BigInt(parseInt(lsn2, 16)) - ); + const parsedLsn1 = parseInt(lsn1, 16); + const parsedLsn2 = parseInt(lsn2, 16); + if (isNaN(parsedLsn1) || isNaN(parsedLsn2)) return 0; + return Number((BigInt(parsedLsn1) << BigInt(32)) | BigInt(parsedLsn2)); } export default function LagGraph({ peerName }: LagGraphProps) { From 60c2744edd581f3880b3dc1b0210a0a6d9512b41 Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj <65964360+Amogh-Bharadwaj@users.noreply.github.com> Date: Thu, 21 Nov 2024 21:17:59 +0530 Subject: [PATCH 54/59] Revamp settings page (#2280) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR opts for a card grid display for the settings page rather than a table - as the content was being cramped in to fit in that tabular layout ![Screenshot 2024-11-21 at 6 49 06 PM](https://github.com/user-attachments/assets/08b43cf7-83e7-4c5d-91f2-afc4aacff2de) --- ui/app/settings/page.tsx | 236 +++++++++++++++++---------------------- 1 file changed, 102 insertions(+), 134 deletions(-) diff --git a/ui/app/settings/page.tsx b/ui/app/settings/page.tsx index 7ebb1b4cd0..c1d51a2280 100644 --- a/ui/app/settings/page.tsx +++ b/ui/app/settings/page.tsx @@ -9,10 +9,7 @@ import { Button } from '@/lib/Button'; import { Icon } from '@/lib/Icon'; import { Label } from '@/lib/Label'; import { SearchField } from '@/lib/SearchField'; -import { Table, TableCell, TableRow } from '@/lib/Table'; import { TextField } from '@/lib/TextField'; -import { Tooltip } from '@/lib/Tooltip'; -import { MaterialSymbol } from 'material-symbols'; import { useEffect, useMemo, useState } from 'react'; import { ToastContainer } from 'react-toastify'; import 'react-toastify/dist/ReactToastify.css'; @@ -22,40 +19,32 @@ const ROWS_PER_PAGE = 7; const ApplyModeIconWithTooltip = ({ applyMode }: { applyMode: number }) => { let tooltipText = ''; - let iconName: MaterialSymbol = 'help'; + switch (applyMode.toString()) { case DynconfApplyMode[DynconfApplyMode.APPLY_MODE_IMMEDIATE].toString(): tooltipText = 'Changes to this configuration will apply immediately'; - iconName = 'bolt'; break; case DynconfApplyMode[DynconfApplyMode.APPLY_MODE_AFTER_RESUME].toString(): tooltipText = 'Changes to this configuration will apply after resume'; - iconName = 'cached'; break; case DynconfApplyMode[DynconfApplyMode.APPLY_MODE_RESTART].toString(): tooltipText = 'Changes to this configuration will apply after server restart.'; - iconName = 'restart_alt'; break; case DynconfApplyMode[DynconfApplyMode.APPLY_MODE_NEW_MIRROR].toString(): tooltipText = 'Changes to this configuration will apply only to new mirrors'; - iconName = 'new_window'; break; default: tooltipText = 'Unknown apply mode'; - iconName = 'help'; } return (
- - - +
); }; - const DynamicSettingItem = ({ setting, onSettingUpdate, @@ -65,7 +54,7 @@ const DynamicSettingItem = ({ }) => { const [editMode, setEditMode] = useState(false); const [newValue, setNewValue] = useState(setting.value); - + const [showDescription, setShowDescription] = useState(false); const handleEdit = () => { setEditMode(true); }; @@ -130,41 +119,80 @@ const DynamicSettingItem = ({ }; return ( - - - - - - {editMode ? ( -
- setNewValue(e.target.value)} - variant='simple' - /> - +
+
+ +
+
+
+
+
+ setNewValue(e.target.value)} + variant='simple' + readOnly={!editMode} + disabled={!editMode} + /> + +
+
+ +
+
- ) : ( -
- {setting.value || 'N/A'} - +
+
- )} - - - {setting.defaultValue || 'N/A'} - - - {setting.description || 'N/A'} - - - - - + + {showDescription && ( +
+ +
+ )} +
+
+
); }; @@ -172,10 +200,7 @@ const SettingsPage = () => { const [settings, setSettings] = useState({ settings: [], }); - const [currentPage, setCurrentPage] = useState(1); const [searchQuery, setSearchQuery] = useState(''); - const [sortDir, setSortDir] = useState<'asc' | 'dsc'>('asc'); - const sortField = 'name'; const fetchSettings = async () => { const response = await fetch('/api/v1/dynamic_settings'); @@ -189,101 +214,44 @@ const SettingsPage = () => { const filteredSettings = useMemo( () => - settings.settings - .filter((setting) => - setting.name.toLowerCase().includes(searchQuery.toLowerCase()) - ) - .sort((a, b) => { - const aValue = a[sortField]; - const bValue = b[sortField]; - if (aValue < bValue) return sortDir === 'dsc' ? 1 : -1; - if (aValue > bValue) return sortDir === 'dsc' ? -1 : 1; - return 0; - }), - [settings, searchQuery, sortDir] + settings.settings.filter((setting) => + setting.name.toLowerCase().includes(searchQuery.toLowerCase()) + ), + [settings, searchQuery] ); - const totalPages = Math.ceil(filteredSettings.length / ROWS_PER_PAGE); - const displayedSettings = useMemo(() => { - const startRow = (currentPage - 1) * ROWS_PER_PAGE; - const endRow = startRow + ROWS_PER_PAGE; - return filteredSettings.slice(startRow, endRow); - }, [filteredSettings, currentPage]); - - const handlePrevPage = () => { - if (currentPage > 1) setCurrentPage(currentPage - 1); - }; - - const handleNextPage = () => { - if (currentPage < totalPages) setCurrentPage(currentPage + 1); - }; return ( -
- Settings List} - toolbar={{ - left: ( -
- - - - - - -
- ), - right: ( - setSearchQuery(e.target.value)} - /> - ), +
+ + setSearchQuery(e.target.value)} + style={{ fontSize: 13 }} + /> +
- {[ - { header: 'Configuration Name', width: '35%' }, - { header: 'Current Value', width: '10%' }, - { header: 'Default Value', width: '10%' }, - { header: 'Description', width: '35%' }, - { header: 'Apply Mode', width: '10%' }, - ].map(({ header, width }) => ( - - {header} - - ))} - - } > - {displayedSettings.map((setting) => ( + {filteredSettings.map((setting) => ( ))} -
- + +
); }; From cf14792add157c7bbceed069157510b730719131 Mon Sep 17 00:00:00 2001 From: Kevin Biju <52661649+heavycrystal@users.noreply.github.com> Date: Fri, 22 Nov 2024 17:21:35 +0530 Subject: [PATCH 55/59] [dropflow] delete flow configs only after finish drop (#2281) --- flow/workflows/drop_flow.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/flow/workflows/drop_flow.go b/flow/workflows/drop_flow.go index 51bf0091a1..93086157d8 100644 --- a/flow/workflows/drop_flow.go +++ b/flow/workflows/drop_flow.go @@ -92,6 +92,15 @@ func DropFlowWorkflow(ctx workflow.Context, input *protos.DropFlowInput) error { } } + if input.FlowConnectionConfigs != nil { + err := executeCDCDropActivities(ctx, input) + if err != nil { + workflow.GetLogger(ctx).Error("failed to drop CDC flow", slog.Any("error", err)) + return err + } + workflow.GetLogger(ctx).Info("CDC flow dropped successfully") + } + removeFlowEntriesCtx := workflow.WithActivityOptions(ctx, workflow.ActivityOptions{ StartToCloseTimeout: 1 * time.Minute, }) @@ -103,14 +112,5 @@ func DropFlowWorkflow(ctx workflow.Context, input *protos.DropFlowInput) error { return err } - if input.FlowConnectionConfigs != nil { - err := executeCDCDropActivities(ctx, input) - if err != nil { - workflow.GetLogger(ctx).Error("failed to drop CDC flow", slog.Any("error", err)) - return err - } - workflow.GetLogger(ctx).Info("CDC flow dropped successfully") - } - return nil } From 3d813211d36989fe03d573505f4c052d63f38077 Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj <65964360+Amogh-Bharadwaj@users.noreply.github.com> Date: Sun, 24 Nov 2024 20:24:01 +0530 Subject: [PATCH 56/59] CH Normalize: fix error return (#2282) This PR fixes a bug where we are returning the wrong error object resulting in flow-worker panics --- flow/connectors/clickhouse/normalize.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/connectors/clickhouse/normalize.go b/flow/connectors/clickhouse/normalize.go index fb221096c0..a5edb40d5a 100644 --- a/flow/connectors/clickhouse/normalize.go +++ b/flow/connectors/clickhouse/normalize.go @@ -474,7 +474,7 @@ func (c *ClickHouseConnector) NormalizeRecords( case queries <- insertIntoSelectQuery.String(): case <-errCtx.Done(): close(queries) - return nil, ctx.Err() + return nil, errCtx.Err() } } close(queries) From c0d1d8abd5f8c4a5d8d1cbe09239b72fae032bc1 Mon Sep 17 00:00:00 2001 From: Amogh Bharadwaj <65964360+Amogh-Bharadwaj@users.noreply.github.com> Date: Sun, 24 Nov 2024 22:10:35 +0530 Subject: [PATCH 57/59] Fix slot guage (#2283) There seems to be an issue where RecordSlotSize panics in this line with nil pointer dereference: ``` slotMetricGauges.SlotLagGauge.Record(ctx, float64(slotInfo[0].LagInMb), metric.WithAttributeSet(attribute.NewSet( ``` As far as I can see the only thing that can be nil is `slotMetricGauges.SlotLagGauge` , so this PR adds a guard for that and the other guages here --- flow/connectors/postgres/postgres.go | 67 +++++++++++++++++----------- 1 file changed, 42 insertions(+), 25 deletions(-) diff --git a/flow/connectors/postgres/postgres.go b/flow/connectors/postgres/postgres.go index ae0dbea52d..e685b5c128 100644 --- a/flow/connectors/postgres/postgres.go +++ b/flow/connectors/postgres/postgres.go @@ -1215,12 +1215,17 @@ func (c *PostgresConnector) HandleSlotInfo( logger.Info(fmt.Sprintf("Checking %s lag for %s", alertKeys.SlotName, alertKeys.PeerName), slog.Float64("LagInMB", float64(slotInfo[0].LagInMb))) alerter.AlertIfSlotLag(ctx, alertKeys, slotInfo[0]) - slotMetricGauges.SlotLagGauge.Record(ctx, float64(slotInfo[0].LagInMb), metric.WithAttributeSet(attribute.NewSet( - attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), - attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.SlotNameKey, alertKeys.SlotName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID())), - )) + + if slotMetricGauges.SlotLagGauge != nil { + slotMetricGauges.SlotLagGauge.Record(ctx, float64(slotInfo[0].LagInMb), metric.WithAttributeSet(attribute.NewSet( + attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), + attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), + attribute.String(otel_metrics.SlotNameKey, alertKeys.SlotName), + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID())), + )) + } else { + logger.Warn("warning: slotMetricGauges.SlotLagGauge is nil") + } // Also handles alerts for PeerDB user connections exceeding a given limit here res, err := getOpenConnectionsForUser(ctx, c.conn, c.config.User) @@ -1229,25 +1234,33 @@ func (c *PostgresConnector) HandleSlotInfo( return err } alerter.AlertIfOpenConnections(ctx, alertKeys, res) - slotMetricGauges.OpenConnectionsGauge.Record(ctx, res.CurrentOpenConnections, metric.WithAttributeSet(attribute.NewSet( - attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), - attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), - ))) + if slotMetricGauges.OpenConnectionsGauge != nil { + slotMetricGauges.OpenConnectionsGauge.Record(ctx, res.CurrentOpenConnections, metric.WithAttributeSet(attribute.NewSet( + attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), + attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), + ))) + } else { + logger.Warn("warning: slotMetricGauges.OpenConnectionsGauge is nil") + } replicationRes, err := getOpenReplicationConnectionsForUser(ctx, c.conn, c.config.User) if err != nil { logger.Warn("warning: failed to get current open replication connections", "error", err) return err } - slotMetricGauges.OpenReplicationConnectionsGauge.Record(ctx, replicationRes.CurrentOpenConnections, - metric.WithAttributeSet(attribute.NewSet( - attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), - attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), - )), - ) + if slotMetricGauges.OpenReplicationConnectionsGauge != nil { + slotMetricGauges.OpenReplicationConnectionsGauge.Record(ctx, replicationRes.CurrentOpenConnections, + metric.WithAttributeSet(attribute.NewSet( + attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), + attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), + )), + ) + } else { + logger.Warn("warning: slotMetricGauges.OpenReplicationConnectionsGauge is nil") + } var intervalSinceLastNormalize *time.Duration if err := alerter.CatalogPool.QueryRow( @@ -1261,13 +1274,17 @@ func (c *PostgresConnector) HandleSlotInfo( return nil } if intervalSinceLastNormalize != nil { - slotMetricGauges.IntervalSinceLastNormalizeGauge.Record(ctx, intervalSinceLastNormalize.Seconds(), - metric.WithAttributeSet(attribute.NewSet( - attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), - attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), - attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), - )), - ) + if slotMetricGauges.IntervalSinceLastNormalizeGauge != nil { + slotMetricGauges.IntervalSinceLastNormalizeGauge.Record(ctx, intervalSinceLastNormalize.Seconds(), + metric.WithAttributeSet(attribute.NewSet( + attribute.String(otel_metrics.FlowNameKey, alertKeys.FlowName), + attribute.String(otel_metrics.PeerNameKey, alertKeys.PeerName), + attribute.String(otel_metrics.DeploymentUidKey, peerdbenv.PeerDBDeploymentUID()), + )), + ) + } else { + logger.Warn("warning: slotMetricGauges.IntervalSinceLastNormalizeGauge is nil") + } alerter.AlertIfTooLongSinceLastNormalize(ctx, alertKeys, *intervalSinceLastNormalize) } From 82bd46102ff5915cffaf782228c0a5d3c57d8730 Mon Sep 17 00:00:00 2001 From: Kaushik Iska Date: Sun, 24 Nov 2024 13:19:24 -0600 Subject: [PATCH 58/59] add cause for context in ch normalize errgroup (#2285) --- flow/activities/flowable.go | 8 ++++---- flow/connectors/clickhouse/normalize.go | 5 ++++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/flow/activities/flowable.go b/flow/activities/flowable.go index 8a65f5dded..8001b5344c 100644 --- a/flow/activities/flowable.go +++ b/flow/activities/flowable.go @@ -409,7 +409,7 @@ func (a *FlowableActivity) StartNormalize( if errors.Is(err, errors.ErrUnsupported) { return nil, monitoring.UpdateEndTimeForCDCBatch(ctx, a.CatalogPool, input.FlowConnectionConfigs.FlowJobName, input.SyncBatchID) } else if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get normalize connector: %w", err) } defer connectors.CloseConnector(ctx, dstConn) @@ -420,7 +420,7 @@ func (a *FlowableActivity) StartNormalize( tableNameSchemaMapping, err := a.getTableNameSchemaMapping(ctx, input.FlowConnectionConfigs.FlowJobName) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get table name schema mapping: %w", err) } res, err := dstConn.NormalizeRecords(ctx, &model.NormalizeRecordsRequest{ @@ -438,13 +438,13 @@ func (a *FlowableActivity) StartNormalize( } dstType, err := connectors.LoadPeerType(ctx, a.CatalogPool, input.FlowConnectionConfigs.DestinationName) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get peer type: %w", err) } if dstType == protos.DBType_POSTGRES { err = monitoring.UpdateEndTimeForCDCBatch(ctx, a.CatalogPool, input.FlowConnectionConfigs.FlowJobName, input.SyncBatchID) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to update end time for cdc batch: %w", err) } } diff --git a/flow/connectors/clickhouse/normalize.go b/flow/connectors/clickhouse/normalize.go index a5edb40d5a..2debe0f4d5 100644 --- a/flow/connectors/clickhouse/normalize.go +++ b/flow/connectors/clickhouse/normalize.go @@ -474,7 +474,10 @@ func (c *ClickHouseConnector) NormalizeRecords( case queries <- insertIntoSelectQuery.String(): case <-errCtx.Done(): close(queries) - return nil, errCtx.Err() + c.logger.Error("[clickhouse] context canceled while normalizing", + slog.Any("error", errCtx.Err()), + slog.Any("cause", context.Cause(errCtx))) + return nil, context.Cause(errCtx) } } close(queries) From 4379613bbc8e2df8949bf918dc3cd11555ea62d3 Mon Sep 17 00:00:00 2001 From: Kaushik Iska Date: Mon, 25 Nov 2024 08:37:28 -0600 Subject: [PATCH 59/59] Tag network errors as err:Net (#2287) --- flow/alerting/alerting.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/flow/alerting/alerting.go b/flow/alerting/alerting.go index 69282330d6..4413b2efbd 100644 --- a/flow/alerting/alerting.go +++ b/flow/alerting/alerting.go @@ -440,6 +440,10 @@ func (a *Alerter) LogFlowError(ctx context.Context, flowName string, err error) if errors.As(err, &pgErr) { tags = append(tags, "pgcode:"+pgErr.Code) } + var netErr *net.OpError + if errors.As(err, &netErr) { + tags = append(tags, "err:Net") + } a.sendTelemetryMessage(ctx, logger, flowName, errorWithStack, telemetry.ERROR, tags...) }