diff --git a/flux/stdlib/influxdata/influxdb/rules.go b/flux/stdlib/influxdata/influxdb/rules.go index 4b6425ac95b..8ce3a20e041 100644 --- a/flux/stdlib/influxdata/influxdb/rules.go +++ b/flux/stdlib/influxdata/influxdb/rules.go @@ -16,6 +16,7 @@ func init() { PushDownGroupRule{}, PushDownReadTagKeysRule{}, PushDownReadTagValuesRule{}, + SortedPivotRule{}, ) } @@ -100,6 +101,11 @@ func (PushDownFilterRule) Rewrite(pn plan.Node) (plan.Node, bool, error) { fromNode := pn.Predecessors()[0] fromSpec := fromNode.ProcedureSpec().(*ReadRangePhysSpec) + // Cannot push down when keeping empty tables. + if filterSpec.KeepEmptyTables { + return pn, false, nil + } + bodyExpr, ok := filterSpec.Fn.Fn.Block.Body.(semantic.Expression) if !ok { return pn, false, nil @@ -460,6 +466,15 @@ func rewritePushableExpr(e semantic.Expression) (semantic.Expression, bool) { e.Left, e.Right = left, right return e, true } + + case *semantic.LogicalExpression: + left, lok := rewritePushableExpr(e.Left) + right, rok := rewritePushableExpr(e.Right) + if lok || rok { + e = e.Copy().(*semantic.LogicalExpression) + e.Left, e.Right = left, right + return e, true + } } return e, false } @@ -551,3 +566,55 @@ func isPushableFieldOperator(kind ast.OperatorKind) bool { return false } + +// SortedPivotRule is a rule that optimizes a pivot when it is directly +// after an influxdb from. +type SortedPivotRule struct{} + +func (SortedPivotRule) Name() string { + return "SortedPivotRule" +} + +func (SortedPivotRule) Pattern() plan.Pattern { + return plan.Pat(universe.PivotKind, plan.Pat(ReadRangePhysKind)) +} + +func (SortedPivotRule) Rewrite(pn plan.Node) (plan.Node, bool, error) { + pivotSpec := pn.ProcedureSpec().Copy().(*universe.PivotProcedureSpec) + pivotSpec.IsSortedByFunc = func(cols []string, desc bool) bool { + if desc { + return false + } + + // The only thing that disqualifies this from being + // sorted is if the _value column is mentioned or if + // the tag does not exist. + for _, label := range cols { + if label == execute.DefaultTimeColLabel { + continue + } else if label == execute.DefaultValueColLabel { + return false + } + + // Everything else is a tag. Even if the tag does not exist, + // this is still considered sorted since sorting doesn't depend + // on a tag existing. + } + + // We are already sorted. + return true + } + pivotSpec.IsKeyColumnFunc = func(label string) bool { + if label == execute.DefaultTimeColLabel || label == execute.DefaultValueColLabel { + return false + } + // Everything else would be a tag if it existed. + // The transformation itself will catch if the column does not exist. + return true + } + + if err := pn.ReplaceSpec(pivotSpec); err != nil { + return nil, false, err + } + return pn, false, nil +} diff --git a/flux/stdlib/influxdata/influxdb/rules_test.go b/flux/stdlib/influxdata/influxdb/rules_test.go index 676deda556c..e27ecabf240 100644 --- a/flux/stdlib/influxdata/influxdb/rules_test.go +++ b/flux/stdlib/influxdata/influxdb/rules_test.go @@ -581,6 +581,73 @@ func TestPushDownFilterRule(t *testing.T) { }, NoChange: true, }, + { + Name: `r._measurement == "cpu" and exists r.host`, + Rules: []plan.Rule{influxdb.PushDownFilterRule{}}, + Before: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("ReadRange", &influxdb.ReadRangePhysSpec{ + Bounds: bounds, + }), + plan.CreatePhysicalNode("filter", &universe.FilterProcedureSpec{ + Fn: makeResolvedFilterFn(&semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "host", + }, + Right: &semantic.StringLiteral{ + Value: "cpu", + }, + }, + Right: &semantic.UnaryExpression{ + Operator: ast.ExistsOperator, + Argument: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "host", + }, + }, + }), + }), + }, + Edges: [][2]int{ + {0, 1}, + }, + }, + After: &plantest.PlanSpec{ + Nodes: []plan.Node{ + plan.CreatePhysicalNode("merged_ReadRange_filter", &influxdb.ReadRangePhysSpec{ + Bounds: bounds, + FilterSet: true, + Filter: makeFilterFn(&semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "host", + }, + Right: &semantic.StringLiteral{ + Value: "cpu", + }, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "host", + }, + Right: &semantic.StringLiteral{ + Value: "", + }, + }, + }), + }), + }, + }, + }, } for _, tc := range tests { diff --git a/flux/stdlib/influxdata/influxdb/to.go b/flux/stdlib/influxdata/influxdb/to.go new file mode 100644 index 00000000000..6fecaa533a1 --- /dev/null +++ b/flux/stdlib/influxdata/influxdb/to.go @@ -0,0 +1,4 @@ +package influxdb + +// TODO(jsternberg): Implement the to method in influxdb 1.x. +// This file is kept around so it shows up in the patch. diff --git a/flux/stdlib/influxdata/influxdb/to_test.go b/flux/stdlib/influxdata/influxdb/to_test.go new file mode 100644 index 00000000000..daba8c93621 --- /dev/null +++ b/flux/stdlib/influxdata/influxdb/to_test.go @@ -0,0 +1 @@ +package influxdb_test diff --git a/go.mod b/go.mod index e12ece68c82..c060936dd90 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( collectd.org v0.3.0 github.com/BurntSushi/toml v0.3.1 github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db - github.com/aws/aws-sdk-go v1.25.16 // indirect github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 github.com/boltdb/bolt v1.3.1 github.com/cespare/xxhash v1.1.0 @@ -19,7 +18,7 @@ require ( github.com/gogo/protobuf v1.1.1 github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db github.com/google/go-cmp v0.4.0 - github.com/influxdata/flux v0.50.2 + github.com/influxdata/flux v0.64.0 github.com/influxdata/influxql v1.0.1 github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6 github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 @@ -30,7 +29,6 @@ require ( github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 // indirect github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada github.com/mattn/go-isatty v0.0.4 - github.com/mattn/go-zglob v0.0.1 // indirect github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae // indirect github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947 github.com/paulbellamy/ratecounter v0.2.0 diff --git a/go.sum b/go.sum index a07141c57e5..64d3c383ae4 100644 --- a/go.sum +++ b/go.sum @@ -30,45 +30,26 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/alecthomas/kingpin v2.2.6+incompatible h1:5svnBTFgJjZvGKyYBtMB0+m5wvrbUHiqye8wRJMlnYI= -github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db h1:nxAtV4VajJDhKysp2kdcJZsq8Ss1xSA0vZTkVHHJd0E= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/apex/log v1.1.0 h1:J5rld6WVFi6NxA6m8GJ1LJqu3+GiTFIt3mYv27gdQWI= -github.com/apex/log v1.1.0/go.mod h1:yA770aXIDQrhVOIGurT/pVdfCpSq1GQV/auzMN5fzvY= -github.com/aws/aws-sdk-go v1.15.64/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= -github.com/aws/aws-sdk-go v1.25.16 h1:k7Fy6T/uNuLX6zuayU/TJoP7yMgGcJSkZpF7QVjwYpA= -github.com/aws/aws-sdk-go v1.25.16/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= -github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= -github.com/caarlos0/ctrlc v1.0.0 h1:2DtF8GSIcajgffDFJzyG15vO+1PuBWOMUdFut7NnXhw= -github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= -github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e h1:V9a67dfYqPLAvzk5hMQOXYJlZ4SLIXgyKIE+ZiHzgGQ= -github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -86,17 +67,9 @@ github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgo github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo= -github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8= @@ -112,6 +85,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -137,10 +112,6 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -152,22 +123,13 @@ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/goreleaser/goreleaser v0.94.0 h1:2CFMxMTLODjYfNOx2sADNzpgCwH9ltMqvQYtj+ntK1Q= -github.com/goreleaser/goreleaser v0.94.0/go.mod h1:OjbYR2NhOI6AEUWCowMSBzo9nP1aRif3sYtx+rhp+Zo= -github.com/goreleaser/nfpm v0.9.7 h1:h8RQMDztu6cW7b0/s4PGbdeMYykAbJG0UMXaWG5uBMI= -github.com/goreleaser/nfpm v0.9.7/go.mod h1:F2yzin6cBAL9gb+mSiReuXdsfTrOQwDMsuSpULof+y4= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/changelog v1.1.0 h1:HXhmLZDrbuC+Ca5YX7g8B8cH5DmJpaOjd844d9Y7aTQ= -github.com/influxdata/changelog v1.1.0/go.mod h1:uzpGWE/qehT8L426YuXwpMQub+a63vIINhIeEI9mnSM= -github.com/influxdata/flux v0.50.2 h1:3qSoVZ5y1kjlQ0kPQ0sHZ8Wl0bi5NOxcIH0xGEG1GZI= -github.com/influxdata/flux v0.50.2/go.mod h1:absI6L1dQnJhd0+NFj+Kl/BVTnm/BG1dbJIHDiDQbA4= +github.com/influxdata/flux v0.64.0 h1:tQ1ydITwqmLM/Cuf20JRf/1ZMHkpgYv63ZbxIy8Bnvs= +github.com/influxdata/flux v0.64.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= github.com/influxdata/influxql v1.0.1 h1:6PGG0SunRmptIMIreNRolhQ38Sq4qDfi2dS3BS1YD8Y= github.com/influxdata/influxql v1.0.1/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e h1:/o3vQtpWJhvnIbXley4/jwzzqNeigJK9z+LZcJZ9zfM= @@ -179,12 +141,6 @@ github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9 h1:MHTrDWmQpHq/ github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaFa4YD1Q+7bH9o5NCHQGPMqZCYJiNW6lIIS9z4= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= @@ -197,8 +153,6 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= -github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8= -github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0 h1:8nsMz3tWa9SWWPL60G1V6CUsf4lLjWLTNEtibhe8gh8= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -223,17 +177,12 @@ github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.11.0 h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= -github.com/mattn/go-zglob v0.0.0-20171230104132-4959821b4817/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= -github.com/mattn/go-zglob v0.0.0-20180803001819-2ea3427bfa53/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= -github.com/mattn/go-zglob v0.0.1 h1:xsEx/XUoVlI6yXjqBK062zYhRTZltCNmYPx6v+8DNaY= -github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= @@ -244,8 +193,6 @@ github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947 h1:oF github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs= github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= -github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA= -github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f h1:O62NGAXV0cNzBI6e7vI3zTHSTgPHsWIcS3Q4XC1/pAU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= @@ -276,8 +223,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= @@ -296,12 +243,9 @@ github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= -github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= @@ -310,8 +254,6 @@ github.com/tinylib/msgp v1.0.2 h1:DfdQrzQa7Yh2es9SuLkixqxuXS2SxsdYn0KbdrOGWD8= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro= -github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6 h1:YdYsPAZ2pC6Tow/nPZOPQ96O3hm/ToAkGsPLzedXERk= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -361,8 +303,6 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -378,7 +318,6 @@ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -394,13 +333,10 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181030150119-7e31e0c00fa0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -414,7 +350,6 @@ golang.org/x/sys v0.0.0-20200107162124-548cf772de50 h1:YvQ10rzcqWXLlJZ3XCUoO25sa golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -467,7 +402,6 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -500,14 +434,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/src-d/go-billy.v4 v4.2.1 h1:omN5CrMrMcQ+4I8bJ0wEhOBPanIRWzFC953IiXKdYzo= -gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= -gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45iOf1dKJs= -gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= -gopkg.in/src-d/go-git.v4 v4.8.1 h1:aAyBmkdE1QUUEHcP4YFCGKmsMQRAuRmUcPEQR7lOAa0= -gopkg.in/src-d/go-git.v4 v4.8.1/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/patches/cursors.patch b/patches/cursors.patch new file mode 100644 index 00000000000..2507aa278fc --- /dev/null +++ b/patches/cursors.patch @@ -0,0 +1,8 @@ +diff --git b/tsdb/cursors/gen.go a/tsdb/cursors/gen.go +index 63316e5c0..ee7a8876a 100644 +--- b/tsdb/cursors/gen.go ++++ a/tsdb/cursors/gen.go +@@ -1,3 +1 @@ + package cursors +- +-//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@arrayvalues.gen.go.tmpldata arrayvalues.gen.go.tmpl diff --git a/patches/flux.patch b/patches/flux.patch index 1e384739ff9..c7c1cd74f04 100644 --- a/patches/flux.patch +++ b/patches/flux.patch @@ -1,5 +1,5 @@ diff --git b/flux/stdlib/influxdata/influxdb/buckets.go a/flux/stdlib/influxdata/influxdb/buckets.go -index 4fd36f948..9ecbe4923 100644 +index 4fd36f948d..9ecbe49234 100644 --- b/flux/stdlib/influxdata/influxdb/buckets.go +++ a/flux/stdlib/influxdata/influxdb/buckets.go @@ -2,6 +2,7 @@ package influxdb @@ -168,7 +168,7 @@ index 4fd36f948..9ecbe4923 100644 } -type BucketDependencies AllBucketLookup diff --git b/flux/stdlib/influxdata/influxdb/dependencies.go a/flux/stdlib/influxdata/influxdb/dependencies.go -index 3303c2758..ad9a36ab6 100644 +index 3303c27589..ad9a36ab6d 100644 --- b/flux/stdlib/influxdata/influxdb/dependencies.go +++ a/flux/stdlib/influxdata/influxdb/dependencies.go @@ -2,13 +2,9 @@ package influxdb @@ -293,7 +293,7 @@ index 3303c2758..ad9a36ab6 100644 } return deps, nil diff --git b/flux/stdlib/influxdata/influxdb/from.go a/flux/stdlib/influxdata/influxdb/from.go -index cdd6789c1..6662f54fd 100644 +index cdd6789c19..6662f54fd4 100644 --- b/flux/stdlib/influxdata/influxdb/from.go +++ a/flux/stdlib/influxdata/influxdb/from.go @@ -8,7 +8,6 @@ import ( @@ -337,7 +337,7 @@ index cdd6789c1..6662f54fd 100644 Bucket string BucketID string diff --git b/flux/stdlib/influxdata/influxdb/from_test.go a/flux/stdlib/influxdata/influxdb/from_test.go -index dac3b13ee..daba8c936 100644 +index dac3b13eee..daba8c9362 100644 --- b/flux/stdlib/influxdata/influxdb/from_test.go +++ a/flux/stdlib/influxdata/influxdb/from_test.go @@ -1,192 +1 @@ @@ -534,7 +534,7 @@ index dac3b13ee..daba8c936 100644 - } -} diff --git b/flux/stdlib/influxdata/influxdb/metrics.go a/flux/stdlib/influxdata/influxdb/metrics.go -index 82577e3a5..dd3cee868 100644 +index 82577e3a57..dd3cee868b 100644 --- b/flux/stdlib/influxdata/influxdb/metrics.go +++ a/flux/stdlib/influxdata/influxdb/metrics.go @@ -1,83 +1,3 @@ @@ -623,7 +623,7 @@ index 82577e3a5..dd3cee868 100644 -} +// Storage metrics are not implemented in the 1.x flux engine. diff --git b/flux/stdlib/influxdata/influxdb/operators.go a/flux/stdlib/influxdata/influxdb/operators.go -index 4203b074b..b0db49591 100644 +index 4203b074be..b0db495917 100644 --- b/flux/stdlib/influxdata/influxdb/operators.go +++ a/flux/stdlib/influxdata/influxdb/operators.go @@ -2,14 +2,16 @@ package influxdb @@ -716,10 +716,10 @@ index 4203b074b..b0db49591 100644 // TimeBounds implements plan.BoundsAwareProcedureSpec. diff --git b/flux/stdlib/influxdata/influxdb/rules.go a/flux/stdlib/influxdata/influxdb/rules.go -index 9e9e66283..4b6425ac9 100644 +index 4102a2a73e..8ce3a20e04 100644 --- b/flux/stdlib/influxdata/influxdb/rules.go +++ a/flux/stdlib/influxdata/influxdb/rules.go -@@ -192,6 +192,12 @@ func (rule PushDownReadTagKeysRule) Rewrite(pn plan.Node) (plan.Node, bool, erro +@@ -198,6 +198,12 @@ func (rule PushDownReadTagKeysRule) Rewrite(pn plan.Node) (plan.Node, bool, erro // constructing our own replacement. We do not care about it // at the moment though which is why it is not in the pattern. @@ -732,7 +732,7 @@ index 9e9e66283..4b6425ac9 100644 // The schema mutator needs to correspond to a keep call // on the column specified by the keys procedure. if len(keepSpec.Mutations) != 1 { -@@ -221,6 +227,20 @@ func (rule PushDownReadTagKeysRule) Rewrite(pn plan.Node) (plan.Node, bool, erro +@@ -227,6 +233,20 @@ func (rule PushDownReadTagKeysRule) Rewrite(pn plan.Node) (plan.Node, bool, erro }), true, nil } @@ -753,7 +753,7 @@ index 9e9e66283..4b6425ac9 100644 // PushDownReadTagValuesRule matches 'ReadRange |> keep(columns: [tag]) |> group() |> distinct(column: tag)'. // The 'from()' must have already been merged with 'range' and, optionally, // may have been merged with 'filter'. -@@ -298,6 +318,9 @@ var invalidTagKeysForTagValues = []string{ +@@ -304,6 +324,9 @@ var invalidTagKeysForTagValues = []string{ execute.DefaultValueColLabel, execute.DefaultStartColLabel, execute.DefaultStopColLabel, @@ -764,7 +764,7 @@ index 9e9e66283..4b6425ac9 100644 // isValidTagKeyForTagValues returns true if the given key can diff --git b/flux/stdlib/influxdata/influxdb/rules_test.go a/flux/stdlib/influxdata/influxdb/rules_test.go -index 12514b8d8..676deda55 100644 +index b4f7b79efe..e27ecabf24 100644 --- b/flux/stdlib/influxdata/influxdb/rules_test.go +++ a/flux/stdlib/influxdata/influxdb/rules_test.go @@ -12,7 +12,7 @@ import ( @@ -777,7 +777,7 @@ index 12514b8d8..676deda55 100644 func fluxTime(t int64) flux.Time { diff --git b/flux/stdlib/influxdata/influxdb/source.go a/flux/stdlib/influxdata/influxdb/source.go -index 1cff76b5b..3e0d5b654 100644 +index 0ace5b7dc5..3e0d5b6542 100644 --- b/flux/stdlib/influxdata/influxdb/source.go +++ a/flux/stdlib/influxdata/influxdb/source.go @@ -3,7 +3,6 @@ package influxdb @@ -798,7 +798,7 @@ index 1cff76b5b..3e0d5b654 100644 "github.com/influxdata/influxdb/tsdb/cursors" ) -@@ -36,17 +32,10 @@ type Source struct { +@@ -36,24 +32,10 @@ type Source struct { stats cursors.CursorStats runner runner @@ -811,12 +811,20 @@ index 1cff76b5b..3e0d5b654 100644 func (s *Source) Run(ctx context.Context) { - labelValues := s.m.getLabelValues(ctx, s.orgID, s.op) - start := time.Now() - err := s.runner.run(ctx) +- var err error +- if flux.IsExperimentalTracingEnabled() { +- span, ctxWithSpan := tracing.StartSpanFromContextWithOperationName(ctx, "source-"+s.op) +- err = s.runner.run(ctxWithSpan) +- span.Finish() +- } else { +- err = s.runner.run(ctx) +- } - s.m.recordMetrics(labelValues, start) ++ err := s.runner.run(ctx) for _, t := range s.ts { t.Finish(s.id, err) } -@@ -123,10 +112,6 @@ func ReadFilterSource(id execute.DatasetID, r Reader, readSpec ReadFilterSpec, a +@@ -130,10 +112,6 @@ func ReadFilterSource(id execute.DatasetID, r Reader, readSpec ReadFilterSpec, a src.reader = r src.readSpec = readSpec @@ -827,7 +835,7 @@ index 1cff76b5b..3e0d5b654 100644 src.runner = src return src } -@@ -145,8 +130,7 @@ func (s *readFilterSource) run(ctx context.Context) error { +@@ -152,8 +130,7 @@ func (s *readFilterSource) run(ctx context.Context) error { } func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) { @@ -837,7 +845,7 @@ index 1cff76b5b..3e0d5b654 100644 spec := s.(*ReadRangePhysSpec) -@@ -158,18 +142,9 @@ func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execut +@@ -165,18 +142,9 @@ func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execut } } @@ -858,7 +866,7 @@ index 1cff76b5b..3e0d5b654 100644 if err != nil { return nil, err } -@@ -182,10 +157,10 @@ func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execut +@@ -189,10 +157,10 @@ func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execut id, deps.Reader, ReadFilterSpec{ @@ -873,7 +881,7 @@ index 1cff76b5b..3e0d5b654 100644 }, a, ), nil -@@ -206,10 +181,6 @@ func ReadGroupSource(id execute.DatasetID, r Reader, readSpec ReadGroupSpec, a e +@@ -213,10 +181,6 @@ func ReadGroupSource(id execute.DatasetID, r Reader, readSpec ReadGroupSpec, a e src.reader = r src.readSpec = readSpec @@ -884,7 +892,7 @@ index 1cff76b5b..3e0d5b654 100644 src.runner = src return src } -@@ -228,8 +199,7 @@ func (s *readGroupSource) run(ctx context.Context) error { +@@ -235,8 +199,7 @@ func (s *readGroupSource) run(ctx context.Context) error { } func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) { @@ -894,7 +902,7 @@ index 1cff76b5b..3e0d5b654 100644 spec := s.(*ReadGroupPhysSpec) -@@ -238,15 +208,9 @@ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute +@@ -245,15 +208,9 @@ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute return nil, errors.New("nil bounds passed to from") } @@ -912,7 +920,7 @@ index 1cff76b5b..3e0d5b654 100644 if err != nil { return nil, err } -@@ -260,10 +224,10 @@ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute +@@ -267,10 +224,10 @@ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute deps.Reader, ReadGroupSpec{ ReadFilterSpec: ReadFilterSpec{ @@ -927,7 +935,7 @@ index 1cff76b5b..3e0d5b654 100644 }, GroupMode: ToGroupMode(spec.GroupMode), GroupKeys: spec.GroupKeys, -@@ -274,18 +238,12 @@ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute +@@ -281,18 +238,12 @@ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute } func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { @@ -949,7 +957,7 @@ index 1cff76b5b..3e0d5b654 100644 if err != nil { return nil, err } -@@ -301,10 +259,10 @@ func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, +@@ -308,10 +259,10 @@ func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, deps.Reader, ReadTagKeysSpec{ ReadFilterSpec: ReadFilterSpec{ @@ -964,7 +972,7 @@ index 1cff76b5b..3e0d5b654 100644 }, }, a, -@@ -326,10 +284,6 @@ func ReadTagKeysSource(id execute.DatasetID, r Reader, readSpec ReadTagKeysSpec, +@@ -333,10 +284,6 @@ func ReadTagKeysSource(id execute.DatasetID, r Reader, readSpec ReadTagKeysSpec, src.id = id src.alloc = a.Allocator() @@ -975,7 +983,7 @@ index 1cff76b5b..3e0d5b654 100644 src.runner = src return src } -@@ -343,18 +297,12 @@ func (s *readTagKeysSource) run(ctx context.Context) error { +@@ -350,18 +297,12 @@ func (s *readTagKeysSource) run(ctx context.Context) error { } func createReadTagValuesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { @@ -997,7 +1005,7 @@ index 1cff76b5b..3e0d5b654 100644 if err != nil { return nil, err } -@@ -370,10 +318,10 @@ func createReadTagValuesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID +@@ -377,10 +318,10 @@ func createReadTagValuesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID deps.Reader, ReadTagValuesSpec{ ReadFilterSpec: ReadFilterSpec{ @@ -1012,7 +1020,7 @@ index 1cff76b5b..3e0d5b654 100644 }, TagKey: spec.TagKey, }, -@@ -396,10 +344,6 @@ func ReadTagValuesSource(id execute.DatasetID, r Reader, readSpec ReadTagValuesS +@@ -403,10 +344,6 @@ func ReadTagValuesSource(id execute.DatasetID, r Reader, readSpec ReadTagValuesS src.id = id src.alloc = a.Allocator() @@ -1024,7 +1032,7 @@ index 1cff76b5b..3e0d5b654 100644 return src } diff --git b/flux/stdlib/influxdata/influxdb/source_test.go a/flux/stdlib/influxdata/influxdb/source_test.go -index 1cdda0f93..daba8c936 100644 +index 1cdda0f935..daba8c9362 100644 --- b/flux/stdlib/influxdata/influxdb/source_test.go +++ a/flux/stdlib/influxdata/influxdb/source_test.go @@ -1,131 +1 @@ @@ -1160,7 +1168,7 @@ index 1cdda0f93..daba8c936 100644 - } -} diff --git b/flux/stdlib/influxdata/influxdb/storage.go a/flux/stdlib/influxdata/influxdb/storage.go -index 4f574fb20..727894907 100644 +index 4f574fb20e..7278949076 100644 --- b/flux/stdlib/influxdata/influxdb/storage.go +++ a/flux/stdlib/influxdata/influxdb/storage.go @@ -8,75 +8,33 @@ import ( @@ -1262,1837 +1270,1745 @@ index 4f574fb20..727894907 100644 Bounds execute.Bounds -diff --git b/flux/stdlib/influxdata/influxdb/v1/databases.go a/flux/stdlib/influxdata/influxdb/v1/databases.go -index 6a6c59a76..1779f411c 100644 ---- b/flux/stdlib/influxdata/influxdb/v1/databases.go -+++ a/flux/stdlib/influxdata/influxdb/v1/databases.go -@@ -2,8 +2,8 @@ package v1 +diff --git b/flux/stdlib/influxdata/influxdb/to.go a/flux/stdlib/influxdata/influxdb/to.go +index a70161bf32..6fecaa533a 100644 +--- b/flux/stdlib/influxdata/influxdb/to.go ++++ a/flux/stdlib/influxdata/influxdb/to.go +@@ -1,702 +1,4 @@ + package influxdb - import ( - "context" -+ "errors" - "fmt" +-import ( +- "context" +- "errors" +- "fmt" +- "sort" - "time" - - "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" -@@ -11,9 +11,9 @@ import ( - "github.com/influxdata/flux/plan" - "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1" - "github.com/influxdata/flux/values" +- +- "github.com/influxdata/flux" +- "github.com/influxdata/flux/codes" +- "github.com/influxdata/flux/compiler" +- "github.com/influxdata/flux/execute" +- "github.com/influxdata/flux/interpreter" +- "github.com/influxdata/flux/plan" +- "github.com/influxdata/flux/semantic" +- "github.com/influxdata/flux/stdlib/influxdata/influxdb" +- "github.com/influxdata/flux/stdlib/kafka" +- "github.com/influxdata/flux/values" - platform "github.com/influxdata/influxdb" +- "github.com/influxdata/influxdb/kit/tracing" +- "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/query" -- "github.com/pkg/errors" -+ "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" -+ "github.com/influxdata/influxdb/services/meta" -+ "github.com/influxdata/influxql" - ) - - const DatabasesKind = v1.DatabasesKind -@@ -67,9 +67,9 @@ func init() { - } - - type DatabasesDecoder struct { -- orgID platform.ID -- deps *DatabasesDependencies -- databases []*platform.DBRPMapping -+ deps *influxdb.StorageDependencies -+ databases []meta.DatabaseInfo -+ user meta.User - alloc *memory.Allocator - } - -@@ -78,45 +78,13 @@ func (bd *DatabasesDecoder) Connect(ctx context.Context) error { - } - - func (bd *DatabasesDecoder) Fetch(ctx context.Context) (bool, error) { -- b, _, err := bd.deps.DBRP.FindMany(ctx, platform.DBRPMappingFilter{}) -- if err != nil { -- return false, err -- } -- bd.databases = b -+ bd.databases = bd.deps.MetaClient.Databases() - return false, nil - } - - func (bd *DatabasesDecoder) Decode(ctx context.Context) (flux.Table, error) { -- type databaseInfo struct { -- *platform.DBRPMapping -- RetentionPeriod time.Duration -- } +- "github.com/influxdata/influxdb/storage" +- "github.com/influxdata/influxdb/tsdb" +-) - -- databases := make([]databaseInfo, 0, len(bd.databases)) -- for _, db := range bd.databases { -- bucket, err := bd.deps.BucketLookup.FindBucketByID(ctx, db.BucketID) -- if err != nil { -- code := platform.ErrorCode(err) -- if code == platform.EUnauthorized || code == platform.EForbidden { -- continue -- } -- return nil, err -- } -- databases = append(databases, databaseInfo{ -- DBRPMapping: db, -- RetentionPeriod: bucket.RetentionPeriod, -- }) -- } +-// ToKind is the kind for the `to` flux function +-const ToKind = influxdb.ToKind +- +-// TODO(jlapacik) remove this once we have execute.DefaultFieldColLabel +-const defaultFieldColLabel = "_field" +-const DefaultMeasurementColLabel = "_measurement" +-const DefaultBufferSize = 1 << 14 +- +-// ToOpSpec is the flux.OperationSpec for the `to` flux function. +-type ToOpSpec struct { +- Bucket string `json:"bucket"` +- BucketID string `json:"bucketID"` +- Org string `json:"org"` +- OrgID string `json:"orgID"` +- Host string `json:"host"` +- Token string `json:"token"` +- TimeColumn string `json:"timeColumn"` +- MeasurementColumn string `json:"measurementColumn"` +- TagColumns []string `json:"tagColumns"` +- FieldFn interpreter.ResolvedFunction `json:"fieldFn"` +-} - -- if len(databases) == 0 { -- return nil, &platform.Error{ -- Code: platform.ENotFound, -- Msg: "no 1.x databases found", -- } -- } +-func init() { +- toSignature := flux.FunctionSignature( +- map[string]semantic.PolyType{ +- "bucket": semantic.String, +- "bucketID": semantic.String, +- "org": semantic.String, +- "orgID": semantic.String, +- "host": semantic.String, +- "token": semantic.String, +- "timeColumn": semantic.String, +- "measurementColumn": semantic.String, +- "tagColumns": semantic.Array, +- "fieldFn": semantic.NewFunctionPolyType(semantic.FunctionPolySignature{ +- Parameters: map[string]semantic.PolyType{ +- "r": semantic.Tvar(1), +- }, +- Required: semantic.LabelSet{"r"}, +- Return: semantic.Tvar(2), +- }), +- }, +- []string{}, +- ) - - kb := execute.NewGroupKeyBuilder(nil) -- kb.AddKeyValue("organizationID", values.NewString(databases[0].OrganizationID.String())) -+ kb.AddKeyValue("organizationID", values.NewString("")) - gk, err := kb.Build() - if err != nil { - return nil, err -@@ -160,13 +128,29 @@ func (bd *DatabasesDecoder) Decode(ctx context.Context) (flux.Table, error) { - return nil, err - } - -- for _, db := range databases { -- _ = b.AppendString(0, db.OrganizationID.String()) -- _ = b.AppendString(1, db.Database) -- _ = b.AppendString(2, db.RetentionPolicy) -- _ = b.AppendInt(3, db.RetentionPeriod.Nanoseconds()) -- _ = b.AppendBool(4, db.Default) -- _ = b.AppendString(5, db.BucketID.String()) -+ var hasAccess func(db string) bool -+ if bd.user == nil { -+ hasAccess = func(db string) bool { -+ return true -+ } -+ } else { -+ hasAccess = func(db string) bool { -+ return bd.deps.Authorizer.AuthorizeDatabase(bd.user, influxql.ReadPrivilege, db) == nil || -+ bd.deps.Authorizer.AuthorizeDatabase(bd.user, influxql.WritePrivilege, db) == nil -+ } -+ } -+ -+ for _, db := range bd.databases { -+ if hasAccess(db.Name) { -+ for _, rp := range db.RetentionPolicies { -+ _ = b.AppendString(0, "") -+ _ = b.AppendString(1, db.Name) -+ _ = b.AppendString(2, rp.Name) -+ _ = b.AppendInt(3, rp.Duration.Nanoseconds()) -+ _ = b.AppendBool(4, db.DefaultRetentionPolicy == rp.Name) -+ _ = b.AppendString(5, "") -+ } -+ } - } - - return b.Table() -@@ -181,41 +165,14 @@ func createDatabasesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a - if !ok { - return nil, fmt.Errorf("invalid spec type %T", prSpec) - } -- deps := GetDatabasesDependencies(a.Context()) -- req := query.RequestFromContext(a.Context()) -- if req == nil { -- return nil, errors.New("missing request on context") -+ deps := influxdb.GetStorageDependencies(a.Context()) -+ var user meta.User -+ if deps.AuthEnabled { -+ user = meta.UserFromContext(a.Context()) -+ if user == nil { -+ return nil, errors.New("createDatabasesSource: no user") -+ } - } -- orgID := req.OrganizationID +- flux.ReplacePackageValue("influxdata/influxdb", "to", flux.FunctionValueWithSideEffect(ToKind, createToOpSpec, toSignature)) +- flux.RegisterOpSpec(ToKind, func() flux.OperationSpec { return &ToOpSpec{} }) +- plan.RegisterProcedureSpecWithSideEffect(ToKind, newToProcedure, ToKind) +- execute.RegisterTransformation(ToKind, createToTransformation) +-} - -- bd := &DatabasesDecoder{orgID: orgID, deps: &deps, alloc: a.Allocator()} +-// argsReader is an interface for OperationSpec that have the same method to read args. +-type argsReader interface { +- flux.OperationSpec +- ReadArgs(args flux.Arguments) error +-} - -+ bd := &DatabasesDecoder{deps: &deps, alloc: a.Allocator(), user: user} - return execute.CreateSourceFromDecoder(bd, dsid, a) - } +-// ReadArgs reads the args from flux.Arguments into the op spec +-func (o *ToOpSpec) ReadArgs(args flux.Arguments) error { +- var err error +- var ok bool - --type key int +- if o.Bucket, ok, _ = args.GetString("bucket"); !ok { +- if o.BucketID, err = args.GetRequiredString("bucketID"); err != nil { +- return err +- } +- } else if o.BucketID, ok, _ = args.GetString("bucketID"); ok { +- return &flux.Error{ +- Code: codes.Invalid, +- Msg: "cannot provide both `bucket` and `bucketID` parameters to the `to` function", +- } +- } - --const dependenciesKey key = iota +- if o.Org, ok, _ = args.GetString("org"); !ok { +- if o.OrgID, _, err = args.GetString("orgID"); err != nil { +- return err +- } +- } else if o.OrgID, ok, _ = args.GetString("orgID"); ok { +- return &flux.Error{ +- Code: codes.Invalid, +- Msg: "cannot provide both `org` and `orgID` parameters to the `to` function", +- } +- } - --type DatabasesDependencies struct { -- DBRP platform.DBRPMappingService -- BucketLookup platform.BucketService --} +- if o.Host, ok, _ = args.GetString("host"); ok { +- if o.Token, err = args.GetRequiredString("token"); err != nil { +- return err +- } +- } - --func (d DatabasesDependencies) Inject(ctx context.Context) context.Context { -- return context.WithValue(ctx, dependenciesKey, d) --} +- if o.TimeColumn, ok, _ = args.GetString("timeColumn"); !ok { +- o.TimeColumn = execute.DefaultTimeColLabel +- } - --func GetDatabasesDependencies(ctx context.Context) DatabasesDependencies { -- return ctx.Value(dependenciesKey).(DatabasesDependencies) --} +- if o.MeasurementColumn, ok, _ = args.GetString("measurementColumn"); !ok { +- o.MeasurementColumn = DefaultMeasurementColLabel +- } - --func (d DatabasesDependencies) Validate() error { -- if d.DBRP == nil { -- return errors.New("missing all databases lookup dependency") +- if tags, ok, _ := args.GetArray("tagColumns", semantic.String); ok { +- o.TagColumns = make([]string, tags.Len()) +- tags.Sort(func(i, j values.Value) bool { +- return i.Str() < j.Str() +- }) +- tags.Range(func(i int, v values.Value) { +- o.TagColumns[i] = v.Str() +- }) - } -- if d.BucketLookup == nil { -- return errors.New("missing buckets lookup dependency") +- +- if fieldFn, ok, _ := args.GetFunction("fieldFn"); ok { +- if o.FieldFn, err = interpreter.ResolveFunction(fieldFn); err != nil { +- return err +- } - } -- return nil +- +- return err -} -diff --git b/patches/flux.patch a/patches/flux.patch -index a35a722a5..743b1ddd2 100644 ---- b/patches/flux.patch -+++ a/patches/flux.patch -@@ -0,0 +1,905 @@ -+diff -ur a/flux/stdlib/influxdata/influxdb/buckets.go b/flux/stdlib/influxdata/influxdb/buckets.go -+--- a/flux/stdlib/influxdata/influxdb/buckets.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/flux/stdlib/influxdata/influxdb/buckets.go 2019-06-25 10:03:53.000000000 -0500 -+@@ -1,6 +1,7 @@ -+ package influxdb -+ -+ import ( -++ "errors" -+ "fmt" -+ -+ "github.com/influxdata/flux" -+@@ -9,9 +10,8 @@ -+ "github.com/influxdata/flux/plan" -+ "github.com/influxdata/flux/stdlib/influxdata/influxdb" -+ "github.com/influxdata/flux/values" -+- platform "github.com/influxdata/influxdb" -+- "github.com/influxdata/influxdb/query" -+- "github.com/pkg/errors" -++ "github.com/influxdata/influxdb/services/meta" -++ "github.com/influxdata/influxql" -+ ) -+ -+ func init() { -+@@ -19,10 +19,9 @@ -+ } -+ -+ type BucketsDecoder struct { -+- orgID platform.ID -+- deps BucketDependencies -+- buckets []*platform.Bucket -+- alloc *memory.Allocator -++ deps BucketDependencies -++ alloc *memory.Allocator -++ user meta.User -+ } -+ -+ func (bd *BucketsDecoder) Connect() error { -+@@ -30,17 +29,12 @@ -+ } -+ -+ func (bd *BucketsDecoder) Fetch() (bool, error) { -+- b, count := bd.deps.FindAllBuckets(bd.orgID) -+- if count <= 0 { -+- return false, fmt.Errorf("no buckets found in organization %v", bd.orgID) -+- } -+- bd.buckets = b -+ return false, nil -+ } -+ -+ func (bd *BucketsDecoder) Decode() (flux.Table, error) { -+ kb := execute.NewGroupKeyBuilder(nil) -+- kb.AddKeyValue("organizationID", values.NewString(bd.buckets[0].OrgID.String())) -++ kb.AddKeyValue("organizationID", values.NewString("")) -+ gk, err := kb.Build() -+ if err != nil { -+ return nil, err -+@@ -48,43 +42,54 @@ -+ -+ b := execute.NewColListTableBuilder(gk, bd.alloc) -+ -+- if _, err := b.AddCol(flux.ColMeta{ -++ _, _ = b.AddCol(flux.ColMeta{ -+ Label: "name", -+ Type: flux.TString, -+- }); err != nil { -+- return nil, err -+- } -+- if _, err := b.AddCol(flux.ColMeta{ -++ }) -++ _, _ = b.AddCol(flux.ColMeta{ -+ Label: "id", -+ Type: flux.TString, -+- }); err != nil { -+- return nil, err -+- } -+- if _, err := b.AddCol(flux.ColMeta{ -++ }) -++ _, _ = b.AddCol(flux.ColMeta{ -++ Label: "organization", -++ Type: flux.TString, -++ }) -++ _, _ = b.AddCol(flux.ColMeta{ -+ Label: "organizationID", -+ Type: flux.TString, -+- }); err != nil { -+- return nil, err -+- } -+- if _, err := b.AddCol(flux.ColMeta{ -++ }) -++ _, _ = b.AddCol(flux.ColMeta{ -+ Label: "retentionPolicy", -+ Type: flux.TString, -+- }); err != nil { -+- return nil, err -+- } -+- if _, err := b.AddCol(flux.ColMeta{ -++ }) -++ _, _ = b.AddCol(flux.ColMeta{ -+ Label: "retentionPeriod", -+ Type: flux.TInt, -+- }); err != nil { -+- return nil, err -+- } -++ }) -+ -+- for _, bucket := range bd.buckets { -+- _ = b.AppendString(0, bucket.Name) -+- _ = b.AppendString(1, bucket.ID.String()) -+- _ = b.AppendString(2, bucket.OrgID.String()) -+- _ = b.AppendString(3, bucket.RetentionPolicyName) -+- _ = b.AppendInt(4, bucket.RetentionPeriod.Nanoseconds()) -++ var hasAccess func(db string) bool -++ if bd.user == nil { -++ hasAccess = func(db string) bool { -++ return true -++ } -++ } else { -++ hasAccess = func(db string) bool { -++ return bd.deps.Authorizer.AuthorizeDatabase(bd.user, influxql.ReadPrivilege, db) == nil || -++ bd.deps.Authorizer.AuthorizeDatabase(bd.user, influxql.WritePrivilege, db) == nil -++ } -++ } -++ -++ for _, db := range bd.deps.MetaClient.Databases() { -++ if hasAccess(db.Name) { -++ for _, rp := range db.RetentionPolicies { -++ _ = b.AppendString(0, db.Name+"/"+rp.Name) -++ _ = b.AppendString(1, "") -++ _ = b.AppendString(2, "influxdb") -++ _ = b.AppendString(3, "") -++ _ = b.AppendString(4, rp.Name) -++ _ = b.AppendInt(5, rp.Duration.Nanoseconds()) -++ } -++ } -+ } -+ -+ return b.Table() -+@@ -103,25 +108,45 @@ -+ // the dependencies used for FromKind are adequate for what we need here -+ // so there's no need to inject custom dependencies for buckets() -+ deps := a.Dependencies()[influxdb.BucketsKind].(BucketDependencies) -+- req := query.RequestFromContext(a.Context()) -+- if req == nil { -+- return nil, errors.New("missing request on context") -++ -++ var user meta.User -++ if deps.AuthEnabled { -++ user = meta.UserFromContext(a.Context()) -++ if user == nil { -++ return nil, errors.New("createBucketsSource: no user") -++ } -+ } -+- orgID := req.OrganizationID -+ -+- bd := &BucketsDecoder{orgID: orgID, deps: deps, alloc: a.Allocator()} -++ bd := &BucketsDecoder{deps: deps, alloc: a.Allocator(), user: user} -+ -+ return execute.CreateSourceFromDecoder(bd, dsid, a) -++ -++} -++ -++type MetaClient interface { -++ Databases() []meta.DatabaseInfo -++ Database(name string) *meta.DatabaseInfo -+ } -+ -+-type AllBucketLookup interface { -+- FindAllBuckets(orgID platform.ID) ([]*platform.Bucket, int) -++type BucketDependencies struct { -++ MetaClient MetaClient -++ Authorizer Authorizer -++ AuthEnabled bool -++} -++ -++func (d BucketDependencies) Validate() error { -++ if d.MetaClient == nil { -++ return errors.New("validate BucketDependencies: missing MetaClient") -++ } -++ if d.AuthEnabled && d.Authorizer == nil { -++ return errors.New("validate BucketDependencies: missing Authorizer") -++ } -++ return nil -+ } -+-type BucketDependencies AllBucketLookup -+ -+ func InjectBucketDependencies(depsMap execute.Dependencies, deps BucketDependencies) error { -+- if deps == nil { -+- return errors.New("missing all bucket lookup dependency") -++ if err := deps.Validate(); err != nil { -++ return err -+ } -+ depsMap[influxdb.BucketsKind] = deps -+ return nil -+diff -ur a/flux/stdlib/influxdata/influxdb/from.go b/flux/stdlib/influxdata/influxdb/from.go -+--- a/flux/stdlib/influxdata/influxdb/from.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/flux/stdlib/influxdata/influxdb/from.go 2019-06-25 13:49:49.000000000 -0500 -+@@ -8,7 +8,6 @@ -+ "github.com/influxdata/flux/plan" -+ "github.com/influxdata/flux/semantic" -+ "github.com/influxdata/flux/stdlib/influxdata/influxdb" -+- platform "github.com/influxdata/influxdb" -+ "github.com/pkg/errors" -+ ) -+ -+@@ -66,31 +65,6 @@ -+ return FromKind -+ } -+ -+-// BucketsAccessed makes FromOpSpec a query.BucketAwareOperationSpec -+-func (s *FromOpSpec) BucketsAccessed(orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter) { -+- bf := platform.BucketFilter{} -+- if s.Bucket != "" { -+- bf.Name = &s.Bucket -+- } -+- if orgID != nil { -+- bf.OrganizationID = orgID -+- } -+- -+- if len(s.BucketID) > 0 { -+- if id, err := platform.IDFromString(s.BucketID); err != nil { -+- invalidID := platform.InvalidID() -+- bf.ID = &invalidID -+- } else { -+- bf.ID = id -+- } -+- } -+- -+- if bf.ID != nil || bf.Name != nil { -+- readBuckets = append(readBuckets, bf) -+- } -+- return readBuckets, writeBuckets -+-} -+- -+ type FromProcedureSpec struct { -+ Bucket string -+ BucketID string -+diff -ur a/flux/stdlib/influxdata/influxdb/operators.go b/flux/stdlib/influxdata/influxdb/operators.go -+--- a/flux/stdlib/influxdata/influxdb/operators.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/flux/stdlib/influxdata/influxdb/operators.go 2019-06-25 13:49:49.000000000 -0500 -+@@ -3,13 +3,15 @@ -+ import ( -+ "context" -+ "errors" -+- "fmt" -++ "strings" -+ -+ "github.com/influxdata/flux" -++ "github.com/influxdata/flux/execute" -+ "github.com/influxdata/flux/plan" -+ "github.com/influxdata/flux/semantic" -+ "github.com/influxdata/flux/values" -+- "github.com/influxdata/influxdb" -++ "github.com/influxdata/influxdb/services/meta" -++ "github.com/influxdata/influxql" -+ ) -+ -+ const ( -+@@ -79,24 +81,43 @@ -+ return ns -+ } -+ -+-func (s *ReadRangePhysSpec) LookupBucketID(ctx context.Context, orgID influxdb.ID, buckets BucketLookup) (influxdb.ID, error) { -+- // Determine bucketID -+- switch { -+- case s.Bucket != "": -+- b, ok := buckets.Lookup(ctx, orgID, s.Bucket) -+- if !ok { -+- return 0, fmt.Errorf("could not find bucket %q", s.Bucket) -++func (s *ReadRangePhysSpec) LookupDatabase(ctx context.Context, deps Dependencies, a execute.Administration) (string, string, error) { -++ if len(s.BucketID) != 0 { -++ return "", "", errors.New("cannot refer to buckets by their id in 1.x") -++ } -++ -++ var db, rp string -++ if i := strings.IndexByte(s.Bucket, '/'); i == -1 { -++ db = s.Bucket -++ } else { -++ rp = s.Bucket[i+1:] -++ db = s.Bucket[:i] -++ } -++ -++ // validate and resolve db/rp -++ di := deps.MetaClient.Database(db) -++ if di == nil { -++ return "", "", errors.New("no database") -++ } -++ -++ if deps.AuthEnabled { -++ user := meta.UserFromContext(a.Context()) -++ if user == nil { -++ return "", "", errors.New("createFromSource: no user") -+ } -+- return b, nil -+- case len(s.BucketID) != 0: -+- var b influxdb.ID -+- if err := b.DecodeFromString(s.BucketID); err != nil { -+- return 0, err -++ if err := deps.Authorizer.AuthorizeDatabase(user, influxql.ReadPrivilege, db); err != nil { -++ return "", "", err -+ } -+- return b, nil -+- default: -+- return 0, errors.New("no bucket name or id have been specified") -+ } -++ -++ if rp == "" { -++ rp = di.DefaultRetentionPolicy -++ } -++ -++ if rpi := di.RetentionPolicy(rp); rpi == nil { -++ return "", "", errors.New("invalid retention policy") -++ } -++ return db, rp, nil -+ } -+ -+ // TimeBounds implements plan.BoundsAwareProcedureSpec. -+diff -ur a/flux/stdlib/influxdata/influxdb/rules.go b/flux/stdlib/influxdata/influxdb/rules.go -+--- a/flux/stdlib/influxdata/influxdb/rules.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/flux/stdlib/influxdata/influxdb/rules.go 2019-06-25 13:49:50.000000000 -0500 -+@@ -190,6 +190,12 @@ -+ // constructing our own replacement. We do not care about it -+ // at the moment though which is why it is not in the pattern. -+ -++ // The tag keys mechanism doesn't know about fields so we cannot -++ // push down _field comparisons in 1.x. -++ if hasFieldExpr(fromSpec.Filter) { -++ return pn, false, nil -++ } -++ -+ // The schema mutator needs to correspond to a keep call -+ // on the column specified by the keys procedure. -+ if len(keepSpec.Mutations) != 1 { -+@@ -219,6 +225,20 @@ -+ }), true, nil -+ } -+ -++func hasFieldExpr(expr semantic.Expression) bool { -++ hasField := false -++ v := semantic.CreateVisitor(func(node semantic.Node) { -++ switch n := node.(type) { -++ case *semantic.MemberExpression: -++ if n.Property == "_field" { -++ hasField = true -++ } -++ } -++ }) -++ semantic.Walk(v, expr) -++ return hasField -++} -++ -+ // PushDownReadTagValuesRule matches 'ReadRange |> keep(columns: [tag]) |> group() |> distinct(column: tag)'. -+ // The 'from()' must have already been merged with 'range' and, optionally, -+ // may have been merged with 'filter'. -+@@ -296,6 +316,9 @@ -+ execute.DefaultValueColLabel, -+ execute.DefaultStartColLabel, -+ execute.DefaultStopColLabel, -++ // TODO(jsternberg): There just doesn't seem to be a good way to do this -++ // in the 1.x line of the release. -++ "_field", -+ } -+ -+ // isValidTagKeyForTagValues returns true if the given key can -+diff -ur a/flux/stdlib/influxdata/influxdb/rules_test.go b/flux/stdlib/influxdata/influxdb/rules_test.go -+--- a/flux/stdlib/influxdata/influxdb/rules_test.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/flux/stdlib/influxdata/influxdb/rules_test.go 2019-06-25 13:49:49.000000000 -0500 -+@@ -11,7 +11,7 @@ -+ "github.com/influxdata/flux/plan/plantest" -+ "github.com/influxdata/flux/semantic" -+ "github.com/influxdata/flux/stdlib/universe" -+- "github.com/influxdata/influxdb/query/stdlib/influxdata/influxdb" -++ "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" -+ ) -+ -+ func fluxTime(t int64) flux.Time { -+diff -ur a/flux/stdlib/influxdata/influxdb/source.go b/flux/stdlib/influxdata/influxdb/source.go -+--- a/flux/stdlib/influxdata/influxdb/source.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/flux/stdlib/influxdata/influxdb/source.go 2019-06-25 13:49:49.000000000 -0500 -+@@ -9,8 +9,6 @@ -+ "github.com/influxdata/flux/memory" -+ "github.com/influxdata/flux/plan" -+ "github.com/influxdata/flux/semantic" -+- "github.com/influxdata/influxdb/kit/tracing" -+- "github.com/influxdata/influxdb/query" -+ "github.com/influxdata/influxdb/tsdb/cursors" -+ ) -+ -+@@ -131,8 +129,7 @@ -+ } -+ -+ func createReadFilterSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) { -+- span, ctx := tracing.StartSpanFromContext(a.Context()) -+- defer span.Finish() -++ ctx := a.Context() -+ -+ spec := s.(*ReadRangePhysSpec) -+ -+@@ -143,13 +140,7 @@ -+ -+ deps := a.Dependencies()[FromKind].(Dependencies) -+ -+- req := query.RequestFromContext(a.Context()) -+- if req == nil { -+- return nil, errors.New("missing request on context") -+- } -+- -+- orgID := req.OrganizationID -+- bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup) -++ db, rp, err := spec.LookupDatabase(ctx, deps, a) -+ if err != nil { -+ return nil, err -+ } -+@@ -162,10 +153,10 @@ -+ id, -+ deps.Reader, -+ ReadFilterSpec{ -+- OrganizationID: orgID, -+- BucketID: bucketID, -+- Bounds: *bounds, -+- Predicate: filter, -++ Database: db, -++ RetentionPolicy: rp, -++ Bounds: *bounds, -++ Predicate: filter, -+ }, -+ a.Allocator(), -+ ), nil -+@@ -204,8 +195,7 @@ -+ } -+ -+ func createReadGroupSource(s plan.ProcedureSpec, id execute.DatasetID, a execute.Administration) (execute.Source, error) { -+- span, ctx := tracing.StartSpanFromContext(a.Context()) -+- defer span.Finish() -++ ctx := a.Context() -+ -+ spec := s.(*ReadGroupPhysSpec) -+ -+@@ -216,13 +206,7 @@ -+ -+ deps := a.Dependencies()[FromKind].(Dependencies) -+ -+- req := query.RequestFromContext(a.Context()) -+- if req == nil { -+- return nil, errors.New("missing request on context") -+- } -+- -+- orgID := req.OrganizationID -+- bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup) -++ db, rp, err := spec.LookupDatabase(ctx, deps, a) -+ if err != nil { -+ return nil, err -+ } -+@@ -236,10 +220,10 @@ -+ deps.Reader, -+ ReadGroupSpec{ -+ ReadFilterSpec: ReadFilterSpec{ -+- OrganizationID: orgID, -+- BucketID: bucketID, -+- Bounds: *bounds, -+- Predicate: filter, -++ Database: db, -++ RetentionPolicy: rp, -++ Bounds: *bounds, -++ Predicate: filter, -+ }, -+ GroupMode: ToGroupMode(spec.GroupMode), -+ GroupKeys: spec.GroupKeys, -+@@ -250,18 +234,12 @@ -+ } -+ -+ func createReadTagKeysSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { -+- span, ctx := tracing.StartSpanFromContext(a.Context()) -+- defer span.Finish() -++ ctx := a.Context() -+ -+ spec := prSpec.(*ReadTagKeysPhysSpec) -+ deps := a.Dependencies()[FromKind].(Dependencies) -+- req := query.RequestFromContext(a.Context()) -+- if req == nil { -+- return nil, errors.New("missing request on context") -+- } -+- orgID := req.OrganizationID -+ -+- bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup) -++ db, rp, err := spec.LookupDatabase(ctx, deps, a) -+ if err != nil { -+ return nil, err -+ } -+@@ -277,10 +255,10 @@ -+ deps.Reader, -+ ReadTagKeysSpec{ -+ ReadFilterSpec: ReadFilterSpec{ -+- OrganizationID: orgID, -+- BucketID: bucketID, -+- Bounds: *bounds, -+- Predicate: filter, -++ Database: db, -++ RetentionPolicy: rp, -++ Bounds: *bounds, -++ Predicate: filter, -+ }, -+ }, -+ a.Allocator(), -+@@ -314,18 +292,12 @@ -+ } -+ -+ func createReadTagValuesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) { -+- span, ctx := tracing.StartSpanFromContext(a.Context()) -+- defer span.Finish() -++ ctx := a.Context() -+ -+ spec := prSpec.(*ReadTagValuesPhysSpec) -+ deps := a.Dependencies()[FromKind].(Dependencies) -+- req := query.RequestFromContext(a.Context()) -+- if req == nil { -+- return nil, errors.New("missing request on context") -+- } -+- orgID := req.OrganizationID -+ -+- bucketID, err := spec.LookupBucketID(ctx, orgID, deps.BucketLookup) -++ db, rp, err := spec.LookupDatabase(ctx, deps, a) -+ if err != nil { -+ return nil, err -+ } -+@@ -341,10 +313,10 @@ -+ deps.Reader, -+ ReadTagValuesSpec{ -+ ReadFilterSpec: ReadFilterSpec{ -+- OrganizationID: orgID, -+- BucketID: bucketID, -+- Bounds: *bounds, -+- Predicate: filter, -++ Database: db, -++ RetentionPolicy: rp, -++ Bounds: *bounds, -++ Predicate: filter, -+ }, -+ TagKey: spec.TagKey, -+ }, -+diff -ur a/flux/stdlib/influxdata/influxdb/storage.go b/flux/stdlib/influxdata/influxdb/storage.go -+--- a/flux/stdlib/influxdata/influxdb/storage.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/flux/stdlib/influxdata/influxdb/storage.go 2019-06-25 13:49:49.000000000 -0500 -+@@ -8,61 +8,36 @@ -+ "github.com/influxdata/flux/execute" -+ "github.com/influxdata/flux/memory" -+ "github.com/influxdata/flux/semantic" -+- platform "github.com/influxdata/influxdb" -++ "github.com/influxdata/influxdb/services/meta" -+ "github.com/influxdata/influxdb/tsdb/cursors" -++ "github.com/influxdata/influxql" -+ "github.com/pkg/errors" -+ ) -+ -+-type HostLookup interface { -+- Hosts() []string -+- Watch() <-chan struct{} -+-} -+- -+-type BucketLookup interface { -+- Lookup(ctx context.Context, orgID platform.ID, name string) (platform.ID, bool) -+-} -+- -+-type OrganizationLookup interface { -+- Lookup(ctx context.Context, name string) (platform.ID, bool) -++type Authorizer interface { -++ AuthorizeDatabase(u meta.User, priv influxql.Privilege, database string) error -+ } -+ -+ type Dependencies struct { -+- Reader Reader -+- BucketLookup BucketLookup -+- OrganizationLookup OrganizationLookup -++ Reader Reader -++ MetaClient MetaClient -++ Authorizer Authorizer -++ AuthEnabled bool -+ } -+ -+ func (d Dependencies) Validate() error { -+ if d.Reader == nil { -+ return errors.New("missing reader dependency") -+ } -+- if d.BucketLookup == nil { -+- return errors.New("missing bucket lookup dependency") -++ if d.MetaClient == nil { -++ return errors.New("missing meta client dependency") -+ } -+- if d.OrganizationLookup == nil { -+- return errors.New("missing organization lookup dependency") -++ if d.AuthEnabled && d.Authorizer == nil { -++ return errors.New("validate Dependencies: missing Authorizer") -+ } -+ return nil -+ } -+ -+-type StaticLookup struct { -+- hosts []string -+-} -+- -+-func NewStaticLookup(hosts []string) StaticLookup { -+- return StaticLookup{ -+- hosts: hosts, -+- } -+-} -+- -+-func (l StaticLookup) Hosts() []string { -+- return l.hosts -+-} -+-func (l StaticLookup) Watch() <-chan struct{} { -+- // A nil channel always blocks, since hosts never change this is appropriate. -+- return nil -+-} -+- -+ type GroupMode int -+ -+ const ( -+@@ -85,8 +60,8 @@ -+ } -+ -+ type ReadFilterSpec struct { -+- OrganizationID platform.ID -+- BucketID platform.ID -++ Database string -++ RetentionPolicy string -+ -+ Bounds execute.Bounds -+ -+diff -ur a/flux/stdlib/influxdata/influxdb/v1/databases.go b/flux/stdlib/influxdata/influxdb/v1/databases.go -+--- a/flux/stdlib/influxdata/influxdb/v1/databases.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/flux/stdlib/influxdata/influxdb/v1/databases.go 2019-06-25 10:03:53.000000000 -0500 -+@@ -2,17 +2,19 @@ -+ -+ import ( -+ "context" -++ "errors" -+ "fmt" -+ -+ "github.com/influxdata/flux" -+ "github.com/influxdata/flux/execute" -+ "github.com/influxdata/flux/memory" -+ "github.com/influxdata/flux/plan" -+- "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1" -++ v1 "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1" -+ "github.com/influxdata/flux/values" -+- platform "github.com/influxdata/influxdb" -+- "github.com/influxdata/influxdb/query" -+- "github.com/pkg/errors" -++ "github.com/influxdata/influxdb/coordinator" -++ "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" -++ "github.com/influxdata/influxdb/services/meta" -++ "github.com/influxdata/influxql" -+ ) -+ -+ const DatabasesKind = v1.DatabasesKind -+@@ -66,9 +68,9 @@ -+ } -+ -+ type DatabasesDecoder struct { -+- orgID platform.ID -+- deps *DatabasesDependencies -+- databases []*platform.DBRPMapping -++ deps *DatabaseDependencies -++ databases []meta.DatabaseInfo -++ user meta.User -+ alloc *memory.Allocator -+ ctx context.Context -+ } -+@@ -78,20 +80,13 @@ -+ } -+ -+ func (bd *DatabasesDecoder) Fetch() (bool, error) { -+- b, _, err := bd.deps.DBRP.FindMany(bd.ctx, platform.DBRPMappingFilter{}) -+- if err != nil { -+- return false, err -+- } -+- bd.databases = b -++ bd.databases = bd.deps.MetaClient.Databases() -+ return false, nil -+ } -+ -+ func (bd *DatabasesDecoder) Decode() (flux.Table, error) { -+ kb := execute.NewGroupKeyBuilder(nil) -+- if len(bd.databases) == 0 { -+- return nil, errors.New("no 1.x databases found") -+- } -+- kb.AddKeyValue("organizationID", values.NewString(bd.databases[0].OrganizationID.String())) -++ kb.AddKeyValue("organizationID", values.NewString("")) -+ gk, err := kb.Build() -+ if err != nil { -+ return nil, err -+@@ -136,16 +131,28 @@ -+ return nil, err -+ } -+ -++ var hasAccess func(db string) bool -++ if bd.user == nil { -++ hasAccess = func(db string) bool { -++ return true -++ } -++ } else { -++ hasAccess = func(db string) bool { -++ return bd.deps.Authorizer.AuthorizeDatabase(bd.user, influxql.ReadPrivilege, db) == nil || -++ bd.deps.Authorizer.AuthorizeDatabase(bd.user, influxql.WritePrivilege, db) == nil -++ } -++ } -++ -+ for _, db := range bd.databases { -+- if bucket, err := bd.deps.BucketLookup.FindBucketByID(bd.ctx, db.BucketID); err != nil { -+- return nil, err -+- } else { -+- _ = b.AppendString(0, db.OrganizationID.String()) -+- _ = b.AppendString(1, db.Database) -+- _ = b.AppendString(2, db.RetentionPolicy) -+- _ = b.AppendInt(3, bucket.RetentionPeriod.Nanoseconds()) -+- _ = b.AppendBool(4, db.Default) -+- _ = b.AppendString(5, db.BucketID.String()) -++ if hasAccess(db.Name) { -++ for _, rp := range db.RetentionPolicies { -++ _ = b.AppendString(0, "") -++ _ = b.AppendString(1, db.Name) -++ _ = b.AppendString(2, rp.Name) -++ _ = b.AppendInt(3, rp.Duration.Nanoseconds()) -++ _ = b.AppendBool(4, db.DefaultRetentionPolicy == rp.Name) -++ _ = b.AppendString(5, "") -++ } -+ } -+ } -+ -+@@ -162,34 +169,31 @@ -+ return nil, fmt.Errorf("invalid spec type %T", prSpec) -+ } -+ -+- // the dependencies used for FromKind are adequate for what we need here -+- // so there's no need to inject custom dependencies for databases() -+- deps := a.Dependencies()[DatabasesKind].(DatabasesDependencies) -+- req := query.RequestFromContext(a.Context()) -+- if req == nil { -+- return nil, errors.New("missing request on context") -++ deps := a.Dependencies()[DatabasesKind].(DatabaseDependencies) -++ var user meta.User -++ if deps.AuthEnabled { -++ user = meta.UserFromContext(a.Context()) -++ if user == nil { -++ return nil, errors.New("createDatabasesSource: no user") -++ } -+ } -+- orgID := req.OrganizationID -+- -+- bd := &DatabasesDecoder{orgID: orgID, deps: &deps, alloc: a.Allocator(), ctx: a.Context()} -+- -++ bd := &DatabasesDecoder{deps: &deps, alloc: a.Allocator(), ctx: a.Context(), user: user} -+ return execute.CreateSourceFromDecoder(bd, dsid, a) -+ } -+ -+-type DatabasesDependencies struct { -+- DBRP platform.DBRPMappingService -+- BucketLookup platform.BucketService -++type DatabaseDependencies struct { -++ MetaClient coordinator.MetaClient -++ Authorizer influxdb.Authorizer -++ AuthEnabled bool -+ } -+ -+-func InjectDatabasesDependencies(depsMap execute.Dependencies, deps DatabasesDependencies) error { -+- if deps.DBRP == nil { -+- return errors.New("missing all databases lookup dependency") -++func InjectDatabaseDependencies(depsMap execute.Dependencies, deps DatabaseDependencies) error { -++ if deps.MetaClient == nil { -++ return errors.New("missing meta client dependency") -+ } -+- -+- if deps.BucketLookup == nil { -+- return errors.New("missing buckets lookup dependency") -++ if deps.AuthEnabled && deps.Authorizer == nil { -++ return errors.New("missing authorizer with auth enabled") -+ } -+- -+ depsMap[DatabasesKind] = deps -+ return nil -+ } -+diff -ur a/storage/reads/group_resultset.go b/storage/reads/group_resultset.go -+--- a/storage/reads/group_resultset.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/storage/reads/group_resultset.go 2019-06-25 13:49:49.000000000 -0500 -+@@ -7,7 +7,6 @@ -+ "math" -+ "sort" -+ -+- "github.com/influxdata/influxdb/kit/tracing" -+ "github.com/influxdata/influxdb/models" -+ "github.com/influxdata/influxdb/storage/reads/datatypes" -+ "github.com/influxdata/influxdb/tsdb/cursors" -+@@ -112,16 +111,7 @@ -+ } -+ -+ func (g *groupResultSet) sort() (int, error) { -+- span, _ := tracing.StartSpanFromContext(g.ctx) -+- defer span.Finish() -+- span.LogKV("group_type", g.req.Group.String()) -+- -+ n, err := g.sortFn(g) -+- -+- if err != nil { -+- span.LogKV("rows", n) -+- } -+- -+ return n, err -+ } -+ -+diff -ur a/storage/reads/group_resultset_test.go b/storage/reads/group_resultset_test.go -+--- a/storage/reads/group_resultset_test.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/storage/reads/group_resultset_test.go 2019-06-25 13:49:49.000000000 -0500 -+@@ -394,7 +394,7 @@ -+ vals[i] = gen.NewCounterByteSequenceCount(card[i]) -+ } -+ -+- tags := gen.NewTagsValuesSequenceValues("m0", "f0", "tag", vals) -++ tags := gen.NewTagsValuesSequenceValues("tag", vals) -+ rows := make([]reads.SeriesRow, tags.Count()) -+ for i := range rows { -+ tags.Next() -+diff -ur a/storage/reads/reader.go b/storage/reads/reader.go -+--- a/storage/reads/reader.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/storage/reads/reader.go 2019-06-25 13:49:49.000000000 -0500 -+@@ -10,8 +10,8 @@ -+ "github.com/influxdata/flux/execute" -+ "github.com/influxdata/flux/memory" -+ "github.com/influxdata/flux/values" -++ "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" -+ "github.com/influxdata/influxdb/models" -+- "github.com/influxdata/influxdb/query/stdlib/influxdata/influxdb" -+ "github.com/influxdata/influxdb/storage/reads/datatypes" -+ "github.com/influxdata/influxdb/tsdb/cursors" -+ ) -+@@ -103,8 +103,8 @@ -+ -+ func (fi *filterIterator) Do(f func(flux.Table) error) error { -+ src := fi.s.GetSource( -+- uint64(fi.spec.OrganizationID), -+- uint64(fi.spec.BucketID), -++ fi.spec.Database, -++ fi.spec.RetentionPolicy, -+ ) -+ -+ // Setup read request -+@@ -225,8 +225,8 @@ -+ -+ func (gi *groupIterator) Do(f func(flux.Table) error) error { -+ src := gi.s.GetSource( -+- uint64(gi.spec.OrganizationID), -+- uint64(gi.spec.BucketID), -++ gi.spec.Database, -++ gi.spec.RetentionPolicy, -+ ) -+ -+ // Setup read request -+@@ -504,8 +504,8 @@ -+ -+ func (ti *tagKeysIterator) Do(f func(flux.Table) error) error { -+ src := ti.s.GetSource( -+- uint64(ti.readSpec.OrganizationID), -+- uint64(ti.readSpec.BucketID), -++ ti.readSpec.Database, -++ ti.readSpec.RetentionPolicy, -+ ) -+ -+ var req datatypes.TagKeysRequest -+@@ -586,8 +586,8 @@ -+ -+ func (ti *tagValuesIterator) Do(f func(flux.Table) error) error { -+ src := ti.s.GetSource( -+- uint64(ti.readSpec.OrganizationID), -+- uint64(ti.readSpec.BucketID), -++ ti.readSpec.Database, -++ ti.readSpec.RetentionPolicy, -+ ) -+ -+ var req datatypes.TagValuesRequest -+diff -ur a/storage/reads/store.go b/storage/reads/store.go -+--- a/storage/reads/store.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/storage/reads/store.go 2019-06-25 13:49:49.000000000 -0500 -+@@ -80,5 +80,5 @@ -+ TagKeys(ctx context.Context, req *datatypes.TagKeysRequest) (cursors.StringIterator, error) -+ TagValues(ctx context.Context, req *datatypes.TagValuesRequest) (cursors.StringIterator, error) -+ -+- GetSource(orgID, bucketID uint64) proto.Message -++ GetSource(db, rp string) proto.Message -+ } -+diff -ur a/storage/reads/table.go b/storage/reads/table.go -+--- a/storage/reads/table.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/storage/reads/table.go 2019-06-25 13:49:49.000000000 -0500 -+@@ -1,7 +1,5 @@ -+ package reads -+ -+-//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@types.tmpldata table.gen.go.tmpl -+- -+ import ( -+ "sync/atomic" -+ -+diff -ur a/tsdb/cursors/gen.go b/tsdb/cursors/gen.go -+--- a/tsdb/cursors/gen.go 2019-06-20 14:35:12.000000000 -0500 -++++ b/tsdb/cursors/gen.go 2019-06-25 14:00:51.000000000 -0500 -+@@ -1,3 +1 @@ -+ package cursors -+- -+-//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@arrayvalues.gen.go.tmpldata arrayvalues.gen.go.tmpl -diff --git b/storage/reads/group_resultset.go a/storage/reads/group_resultset.go -index c7ed990bb..5d6ca33a4 100644 ---- b/storage/reads/group_resultset.go -+++ a/storage/reads/group_resultset.go -@@ -7,7 +7,6 @@ import ( - "math" - "sort" - -- "github.com/influxdata/influxdb/kit/tracing" - "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxdb/tsdb/cursors" -@@ -112,16 +111,7 @@ func (g *groupResultSet) Next() GroupCursor { - } - - func (g *groupResultSet) sort() (int, error) { -- span, _ := tracing.StartSpanFromContext(g.ctx) -- defer span.Finish() -- span.LogKV("group_type", g.req.Group.String()) - - n, err := g.sortFn(g) +-func createToOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) { +- if err := a.AddParentFromArgs(args); err != nil { +- return nil, err +- } - +- _, httpOK, err := args.GetString("url") - if err != nil { -- span.LogKV("rows", n) +- return nil, err - } - - return n, err - } - -diff --git b/storage/reads/group_resultset_test.go a/storage/reads/group_resultset_test.go -index ee13d1616..eb1fc91fc 100644 ---- b/storage/reads/group_resultset_test.go -+++ a/storage/reads/group_resultset_test.go -@@ -394,7 +394,7 @@ func BenchmarkNewGroupResultSet_GroupBy(b *testing.B) { - vals[i] = gen.NewCounterByteSequenceCount(card[i]) - } - -- tags := gen.NewTagsValuesSequenceValues("m0", "f0", "tag", vals) -+ tags := gen.NewTagsValuesSequenceValues("tag", vals) - rows := make([]reads.SeriesRow, tags.Count()) - for i := range rows { - tags.Next() -diff --git b/storage/reads/helpers_test.go a/storage/reads/helpers_test.go -index d688ae365..ff8698b89 100644 ---- b/storage/reads/helpers_test.go -+++ a/storage/reads/helpers_test.go -@@ -1,169 +1 @@ - package reads_test -- --import ( -- "context" +- _, kafkaOK, err := args.GetString("brokers") +- if err != nil { +- return nil, err +- } - -- "github.com/influxdata/influxdb/models" -- "github.com/influxdata/influxdb/pkg/data/gen" -- "github.com/influxdata/influxdb/storage/reads" -- "github.com/influxdata/influxdb/tsdb" -- "github.com/influxdata/influxdb/tsdb/cursors" --) +- var s argsReader - --type seriesGeneratorCursorIterator struct { -- g gen.SeriesGenerator -- f floatTimeValuesGeneratorCursor -- i integerTimeValuesGeneratorCursor -- u unsignedTimeValuesGeneratorCursor -- s stringTimeValuesGeneratorCursor -- b booleanTimeValuesGeneratorCursor -- cur cursors.Cursor --} -- --func (ci *seriesGeneratorCursorIterator) Next(ctx context.Context, r *cursors.CursorRequest) (cursors.Cursor, error) { -- switch ci.g.FieldType() { -- case models.Float: -- ci.f.tv = ci.g.TimeValuesGenerator() -- ci.cur = &ci.f -- case models.Integer: -- ci.i.tv = ci.g.TimeValuesGenerator() -- ci.cur = &ci.i -- case models.Unsigned: -- ci.u.tv = ci.g.TimeValuesGenerator() -- ci.cur = &ci.u -- case models.String: -- ci.s.tv = ci.g.TimeValuesGenerator() -- ci.cur = &ci.s -- case models.Boolean: -- ci.b.tv = ci.g.TimeValuesGenerator() -- ci.cur = &ci.b +- switch { +- case httpOK && kafkaOK: +- return nil, &flux.Error{ +- Code: codes.Invalid, +- Msg: "specify at most one of url, brokers in the same `to` function", +- } +- case kafkaOK: +- s = &kafka.ToKafkaOpSpec{} - default: -- panic("unreachable") +- s = &ToOpSpec{} - } -- -- return ci.cur, nil --} -- --func (ci *seriesGeneratorCursorIterator) Stats() cursors.CursorStats { -- return ci.cur.Stats() --} -- --type seriesGeneratorSeriesCursor struct { -- ci seriesGeneratorCursorIterator -- r reads.SeriesRow +- if err := s.ReadArgs(args); err != nil { +- return nil, err +- } +- return s, nil -} - --func newSeriesGeneratorSeriesCursor(g gen.SeriesGenerator) *seriesGeneratorSeriesCursor { -- s := &seriesGeneratorSeriesCursor{} -- s.ci.g = g -- s.r.Query = tsdb.CursorIterators{&s.ci} -- return s +-// Kind returns the kind for the ToOpSpec function. +-func (ToOpSpec) Kind() flux.OperationKind { +- return ToKind -} - --func (s *seriesGeneratorSeriesCursor) Close() {} --func (s *seriesGeneratorSeriesCursor) Err() error { return nil } -- --func (s *seriesGeneratorSeriesCursor) Next() *reads.SeriesRow { -- if s.ci.g.Next() { -- s.r.SeriesTags = s.ci.g.Tags() -- s.r.Tags = s.ci.g.Tags() -- return &s.r +-// BucketsAccessed returns the buckets accessed by the spec. +-func (o *ToOpSpec) BucketsAccessed(orgID *platform.ID) (readBuckets, writeBuckets []platform.BucketFilter) { +- bf := platform.BucketFilter{} +- if o.Bucket != "" { +- bf.Name = &o.Bucket - } -- return nil +- if o.BucketID != "" { +- id, err := platform.IDFromString(o.BucketID) +- if err == nil { +- bf.ID = id +- } +- } +- if o.Org != "" { +- bf.Org = &o.Org +- } +- if o.OrgID != "" { +- id, err := platform.IDFromString(o.OrgID) +- if err == nil { +- bf.OrganizationID = id +- } +- } +- writeBuckets = append(writeBuckets, bf) +- return readBuckets, writeBuckets -} - --type timeValuesGeneratorCursor struct { -- tv gen.TimeValuesSequence -- stats cursors.CursorStats +-// ToProcedureSpec is the procedure spec for the `to` flux function. +-type ToProcedureSpec struct { +- plan.DefaultCost +- Spec *ToOpSpec -} - --func (t timeValuesGeneratorCursor) Close() {} --func (t timeValuesGeneratorCursor) Err() error { return nil } --func (t timeValuesGeneratorCursor) Stats() cursors.CursorStats { return t.stats } -- --type floatTimeValuesGeneratorCursor struct { -- timeValuesGeneratorCursor -- a tsdb.FloatArray +-// Kind returns the kind for the procedure spec for the `to` flux function. +-func (o *ToProcedureSpec) Kind() plan.ProcedureKind { +- return ToKind -} - --func (c *floatTimeValuesGeneratorCursor) Next() *cursors.FloatArray { -- if c.tv.Next() { -- c.tv.Values().(gen.FloatValues).Copy(&c.a) -- } else { -- c.a.Timestamps = c.a.Timestamps[:0] -- c.a.Values = c.a.Values[:0] +-// Copy clones the procedure spec for `to` flux function. +-func (o *ToProcedureSpec) Copy() plan.ProcedureSpec { +- s := o.Spec +- res := &ToProcedureSpec{ +- Spec: &ToOpSpec{ +- Bucket: s.Bucket, +- BucketID: s.BucketID, +- Org: s.Org, +- OrgID: s.OrgID, +- Host: s.Host, +- Token: s.Token, +- TimeColumn: s.TimeColumn, +- MeasurementColumn: s.MeasurementColumn, +- TagColumns: append([]string(nil), s.TagColumns...), +- FieldFn: s.FieldFn.Copy(), +- }, - } -- c.stats.ScannedBytes += len(c.a.Values) * 8 -- c.stats.ScannedValues += c.a.Len() -- return &c.a +- return res -} - --type integerTimeValuesGeneratorCursor struct { -- timeValuesGeneratorCursor -- a tsdb.IntegerArray +-func newToProcedure(qs flux.OperationSpec, a plan.Administration) (plan.ProcedureSpec, error) { +- spec, ok := qs.(*ToOpSpec) +- if !ok && spec != nil { +- return nil, &flux.Error{ +- Code: codes.Internal, +- Msg: fmt.Sprintf("invalid spec type %T", qs), +- } +- } +- return &ToProcedureSpec{Spec: spec}, nil -} - --func (c *integerTimeValuesGeneratorCursor) Next() *cursors.IntegerArray { -- if c.tv.Next() { -- c.tv.Values().(gen.IntegerValues).Copy(&c.a) -- } else { -- c.a.Timestamps = c.a.Timestamps[:0] -- c.a.Values = c.a.Values[:0] +-func createToTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { +- s, ok := spec.(*ToProcedureSpec) +- if !ok { +- return nil, nil, &flux.Error{ +- Code: codes.Internal, +- Msg: fmt.Sprintf("invalid spec type %T", spec), +- } +- } +- cache := execute.NewTableBuilderCache(a.Allocator()) +- d := execute.NewDataset(id, mode, cache) +- deps := GetStorageDependencies(a.Context()).ToDeps +- t, err := NewToTransformation(a.Context(), d, cache, s, deps) +- if err != nil { +- return nil, nil, err - } -- c.stats.ScannedBytes += len(c.a.Values) * 8 -- c.stats.ScannedValues += c.a.Len() -- return &c.a +- return t, d, nil -} - --type unsignedTimeValuesGeneratorCursor struct { -- timeValuesGeneratorCursor -- a tsdb.UnsignedArray +-// ToTransformation is the transformation for the `to` flux function. +-type ToTransformation struct { +- Ctx context.Context +- OrgID platform.ID +- BucketID platform.ID +- d execute.Dataset +- fn *execute.RowMapFn +- cache execute.TableBuilderCache +- spec *ToProcedureSpec +- implicitTagColumns bool +- deps ToDependencies +- buf *storage.BufferedPointsWriter -} - --func (c *unsignedTimeValuesGeneratorCursor) Next() *cursors.UnsignedArray { -- if c.tv.Next() { -- c.tv.Values().(gen.UnsignedValues).Copy(&c.a) -- } else { -- c.a.Timestamps = c.a.Timestamps[:0] -- c.a.Values = c.a.Values[:0] -- } -- c.stats.ScannedBytes += len(c.a.Values) * 8 -- c.stats.ScannedValues += c.a.Len() -- return &c.a +-// RetractTable retracts the table for the transformation for the `to` flux function. +-func (t *ToTransformation) RetractTable(id execute.DatasetID, key flux.GroupKey) error { +- return t.d.RetractTable(key) -} - --type stringTimeValuesGeneratorCursor struct { -- timeValuesGeneratorCursor -- a tsdb.StringArray --} -- --func (c *stringTimeValuesGeneratorCursor) Next() *cursors.StringArray { -- if c.tv.Next() { -- c.tv.Values().(gen.StringValues).Copy(&c.a) +-// NewToTransformation returns a new *ToTransformation with the appropriate fields set. +-func NewToTransformation(ctx context.Context, d execute.Dataset, cache execute.TableBuilderCache, toSpec *ToProcedureSpec, deps ToDependencies) (x *ToTransformation, err error) { +- var fn *execute.RowMapFn +- //var err error +- spec := toSpec.Spec +- var bucketID, orgID *platform.ID +- if spec.FieldFn.Fn != nil { +- if fn, err = execute.NewRowMapFn(spec.FieldFn.Fn, compiler.ToScope(spec.FieldFn.Scope)); err != nil { +- return nil, err +- } +- } +- // Get organization ID +- if spec.Org != "" { +- oID, ok := deps.OrganizationLookup.Lookup(ctx, spec.Org) +- if !ok { +- return nil, &flux.Error{ +- Code: codes.NotFound, +- Msg: fmt.Sprintf("failed to look up organization %q", spec.Org), +- } +- } +- orgID = &oID +- } else if spec.OrgID != "" { +- if orgID, err = platform.IDFromString(spec.OrgID); err != nil { +- return nil, err +- } - } else { -- c.a.Timestamps = c.a.Timestamps[:0] -- c.a.Values = c.a.Values[:0] +- // No org or orgID provided as an arg, use the orgID from the context +- req := query.RequestFromContext(ctx) +- if req == nil { +- return nil, errors.New("missing request on context") +- } +- orgID = &req.OrganizationID +- } +- +- // Get bucket ID +- if spec.Bucket != "" { +- bID, ok := deps.BucketLookup.Lookup(ctx, *orgID, spec.Bucket) +- if !ok { +- return nil, &flux.Error{ +- Code: codes.NotFound, +- Msg: fmt.Sprintf("failed to look up bucket %q in org %q", spec.Bucket, spec.Org), +- } +- } +- bucketID = &bID +- } else if bucketID, err = platform.IDFromString(spec.BucketID); err != nil { +- return nil, &flux.Error{ +- Code: codes.Invalid, +- Msg: "invalid bucket id", +- Err: err, +- } - } -- for _, v := range c.a.Values { -- c.stats.ScannedBytes += len(v) +- if orgID == nil || bucketID == nil { +- return nil, &flux.Error{ +- Code: codes.Unknown, +- Msg: "You must specify org and bucket", +- } - } -- c.stats.ScannedValues += c.a.Len() -- return &c.a +- return &ToTransformation{ +- Ctx: ctx, +- OrgID: *orgID, +- BucketID: *bucketID, +- d: d, +- fn: fn, +- cache: cache, +- spec: toSpec, +- implicitTagColumns: spec.TagColumns == nil, +- deps: deps, +- buf: storage.NewBufferedPointsWriter(DefaultBufferSize, deps.PointsWriter), +- }, nil -} - --type booleanTimeValuesGeneratorCursor struct { -- timeValuesGeneratorCursor -- a tsdb.BooleanArray +-// Process does the actual work for the ToTransformation. +-func (t *ToTransformation) Process(id execute.DatasetID, tbl flux.Table) error { +- if t.implicitTagColumns { +- +- // If no tag columns are specified, by default we exclude +- // _field and _value from being tag columns. +- excludeColumns := map[string]bool{ +- execute.DefaultValueColLabel: true, +- defaultFieldColLabel: true, +- } +- +- // If a field function is specified then we exclude any column that +- // is referenced in the function expression from being a tag column. +- if t.spec.Spec.FieldFn.Fn != nil { +- recordParam := t.spec.Spec.FieldFn.Fn.Block.Parameters.List[0].Key.Name +- exprNode := t.spec.Spec.FieldFn.Fn +- colVisitor := newFieldFunctionVisitor(recordParam, tbl.Cols()) +- +- // Walk the field function expression and record which columns +- // are referenced. None of these columns will be used as tag columns. +- semantic.Walk(colVisitor, exprNode) +- excludeColumns = colVisitor.captured +- } +- +- addTagsFromTable(t.spec.Spec, tbl, excludeColumns) +- } +- return writeTable(t.Ctx, t, tbl) -} - --func (c *booleanTimeValuesGeneratorCursor) Next() *cursors.BooleanArray { -- if c.tv.Next() { -- c.tv.Values().(gen.BooleanValues).Copy(&c.a) -- } else { -- c.a.Timestamps = c.a.Timestamps[:0] -- c.a.Values = c.a.Values[:0] -- } -- c.stats.ScannedBytes += len(c.a.Values) -- c.stats.ScannedValues += c.a.Len() -- return &c.a --} -diff --git b/storage/reads/reader.go a/storage/reads/reader.go -index f9765dbe1..38acd4157 100644 ---- b/storage/reads/reader.go -+++ a/storage/reads/reader.go -@@ -10,8 +10,8 @@ import ( - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/memory" - "github.com/influxdata/flux/values" -+ "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" - "github.com/influxdata/influxdb/models" -- "github.com/influxdata/influxdb/query/stdlib/influxdata/influxdb" - "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxdb/tsdb/cursors" - ) -@@ -103,8 +103,8 @@ func (fi *filterIterator) Statistics() cursors.CursorStats { return fi.stats } - - func (fi *filterIterator) Do(f func(flux.Table) error) error { - src := fi.s.GetSource( -- uint64(fi.spec.OrganizationID), -- uint64(fi.spec.BucketID), -+ fi.spec.Database, -+ fi.spec.RetentionPolicy, - ) - - // Setup read request -@@ -225,8 +225,8 @@ func (gi *groupIterator) Statistics() cursors.CursorStats { return gi.stats } - - func (gi *groupIterator) Do(f func(flux.Table) error) error { - src := gi.s.GetSource( -- uint64(gi.spec.OrganizationID), -- uint64(gi.spec.BucketID), -+ gi.spec.Database, -+ gi.spec.RetentionPolicy, - ) - - // Setup read request -@@ -504,8 +504,8 @@ type tagKeysIterator struct { - - func (ti *tagKeysIterator) Do(f func(flux.Table) error) error { - src := ti.s.GetSource( -- uint64(ti.readSpec.OrganizationID), -- uint64(ti.readSpec.BucketID), -+ ti.readSpec.Database, -+ ti.readSpec.RetentionPolicy, - ) - - var req datatypes.TagKeysRequest -@@ -586,8 +586,8 @@ type tagValuesIterator struct { - - func (ti *tagValuesIterator) Do(f func(flux.Table) error) error { - src := ti.s.GetSource( -- uint64(ti.readSpec.OrganizationID), -- uint64(ti.readSpec.BucketID), -+ ti.readSpec.Database, -+ ti.readSpec.RetentionPolicy, - ) - - var req datatypes.TagValuesRequest -diff --git b/storage/reads/reader_test.go a/storage/reads/reader_test.go -index 4d660b3ff..8cc72783a 100644 ---- b/storage/reads/reader_test.go -+++ a/storage/reads/reader_test.go -@@ -9,8 +9,8 @@ import ( - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/execute/executetest" - "github.com/influxdata/flux/memory" -+ "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" - "github.com/influxdata/influxdb/mock" -- "github.com/influxdata/influxdb/query/stdlib/influxdata/influxdb" - "github.com/influxdata/influxdb/storage/reads" - "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxdb/tsdb/cursors" -diff --git b/storage/reads/response_writer_test.go a/storage/reads/response_writer_test.go -index 0916c822b..0abaf7544 100644 ---- b/storage/reads/response_writer_test.go -+++ a/storage/reads/response_writer_test.go -@@ -1,21 +1,12 @@ - package reads_test - - import ( -- "context" -- "errors" - "fmt" - "reflect" -- "strings" - "testing" -- "time" - -- "github.com/influxdata/influxdb" - "github.com/influxdata/influxdb/mock" -- "github.com/influxdata/influxdb/pkg/data/gen" -- "github.com/influxdata/influxdb/pkg/testing/assert" - "github.com/influxdata/influxdb/storage/reads" -- "github.com/influxdata/influxdb/storage/reads/datatypes" -- "github.com/influxdata/influxdb/tsdb" - "github.com/influxdata/influxdb/tsdb/cursors" - "google.golang.org/grpc/metadata" - ) -@@ -132,403 +123,3 @@ func TestResponseWriter_WriteGroupResultSet_Stats(t *testing.T) { - t.Errorf("expected scanned-bytes '%v' but got '%v'", []string{fmt.Sprint(scannedBytes)}, gotTrailer.Get("scanned-bytes")) - } - } +-// fieldFunctionVisitor implements semantic.Visitor. +-// fieldFunctionVisitor is used to walk the the field function expression +-// of the `to` operation and to record all referenced columns. This visitor +-// is only used when no tag columns are provided as input to the `to` func. +-type fieldFunctionVisitor struct { +- columns map[string]bool +- visited map[semantic.Node]bool +- captured map[string]bool +- rowParam string +-} - --var ( -- org = influxdb.ID(0xff00ff00) -- bucket = influxdb.ID(0xcc00cc00) -- orgBucketID = tsdb.EncodeName(org, bucket) --) +-func newFieldFunctionVisitor(rowParam string, cols []flux.ColMeta) *fieldFunctionVisitor { +- columns := make(map[string]bool, len(cols)) +- for _, col := range cols { +- columns[col.Label] = true +- } +- return &fieldFunctionVisitor{ +- columns: columns, +- visited: make(map[semantic.Node]bool, len(cols)), +- captured: make(map[string]bool, len(cols)), +- rowParam: rowParam, +- } +-} - --func makeTypedSeries(m, prefix, field string, val interface{}, valueCount int, counts ...int) gen.SeriesGenerator { -- spec := gen.TimeSequenceSpec{Count: valueCount, Start: time.Unix(0, 0), Delta: time.Second} -- ts := gen.NewTimestampSequenceFromSpec(spec) -- var vg gen.TimeValuesSequence -- switch val := val.(type) { -- case float64: -- vg = gen.NewTimeFloatValuesSequence(spec.Count, ts, gen.NewFloatConstantValuesSequence(val)) -- case int64: -- vg = gen.NewTimeIntegerValuesSequence(spec.Count, ts, gen.NewIntegerConstantValuesSequence(val)) -- case int: -- vg = gen.NewTimeIntegerValuesSequence(spec.Count, ts, gen.NewIntegerConstantValuesSequence(int64(val))) -- case uint64: -- vg = gen.NewTimeUnsignedValuesSequence(spec.Count, ts, gen.NewUnsignedConstantValuesSequence(val)) -- case string: -- vg = gen.NewTimeStringValuesSequence(spec.Count, ts, gen.NewStringConstantValuesSequence(val)) -- case bool: -- vg = gen.NewTimeBooleanValuesSequence(spec.Count, ts, gen.NewBooleanConstantValuesSequence(val)) -- default: -- panic(fmt.Sprintf("unexpected type %T", val)) -- } -- -- return gen.NewSeriesGenerator(orgBucketID, []byte(field), vg, gen.NewTagsValuesSequenceCounts(m, field, prefix, counts)) --} -- --type sendSummary struct { -- groupCount int -- seriesCount int -- floatCount int -- integerCount int -- unsignedCount int -- stringCount int -- booleanCount int --} -- --func (ss *sendSummary) makeSendFunc() func(*datatypes.ReadResponse) error { -- return func(r *datatypes.ReadResponse) error { -- for i := range r.Frames { -- d := r.Frames[i].Data -- switch p := d.(type) { -- case *datatypes.ReadResponse_Frame_FloatPoints: -- ss.floatCount += len(p.FloatPoints.Values) -- case *datatypes.ReadResponse_Frame_IntegerPoints: -- ss.integerCount += len(p.IntegerPoints.Values) -- case *datatypes.ReadResponse_Frame_UnsignedPoints: -- ss.unsignedCount += len(p.UnsignedPoints.Values) -- case *datatypes.ReadResponse_Frame_StringPoints: -- ss.stringCount += len(p.StringPoints.Values) -- case *datatypes.ReadResponse_Frame_BooleanPoints: -- ss.booleanCount += len(p.BooleanPoints.Values) -- case *datatypes.ReadResponse_Frame_Series: -- ss.seriesCount++ -- case *datatypes.ReadResponse_Frame_Group: -- ss.groupCount++ -- default: -- panic("unexpected") +-// A field function is of the form `(r) => { Function Body }`, and it returns an object +-// mapping field keys to values for each row r of the input. Visit records every column +-// that is referenced in `Function Body`. These columns are either directly or indirectly +-// used as value columns and as such need to be recorded so as not to be used as tag columns. +-func (v *fieldFunctionVisitor) Visit(node semantic.Node) semantic.Visitor { +- if v.visited[node] { +- return v +- } +- if member, ok := node.(*semantic.MemberExpression); ok { +- if obj, ok := member.Object.(*semantic.IdentifierExpression); ok { +- if obj.Name == v.rowParam && v.columns[member.Property] { +- v.captured[member.Property] = true - } - } -- return nil - } +- v.visited[node] = true +- return v -} - --func TestResponseWriter_WriteResultSet(t *testing.T) { -- t.Run("normal", func(t *testing.T) { -- t.Run("all types one series each", func(t *testing.T) { -- exp := sendSummary{ -- seriesCount: 5, -- floatCount: 500, -- integerCount: 400, -- unsignedCount: 300, -- stringCount: 200, -- booleanCount: 100, -- } -- var ss sendSummary +-func (v *fieldFunctionVisitor) Done(node semantic.Node) {} +- +-func addTagsFromTable(spec *ToOpSpec, table flux.Table, exclude map[string]bool) { +- if cap(spec.TagColumns) < len(table.Cols()) { +- spec.TagColumns = make([]string, 0, len(table.Cols())) +- } else { +- spec.TagColumns = spec.TagColumns[:0] +- } +- for _, column := range table.Cols() { +- if column.Type == flux.TString && !exclude[column.Label] { +- spec.TagColumns = append(spec.TagColumns, column.Label) +- } +- } +- sort.Strings(spec.TagColumns) +-} - -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) +-// UpdateWatermark updates the watermark for the transformation for the `to` flux function. +-func (t *ToTransformation) UpdateWatermark(id execute.DatasetID, pt execute.Time) error { +- return t.d.UpdateWatermark(pt) +-} - -- var gens []gen.SeriesGenerator +-// UpdateProcessingTime updates the processing time for the transformation for the `to` flux function. +-func (t *ToTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { +- return t.d.UpdateProcessingTime(pt) +-} - -- gens = append(gens, makeTypedSeries("m0", "t", "ff", 3.3, exp.floatCount, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "if", 100, exp.integerCount, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "uf", uint64(25), exp.unsignedCount, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "sf", "foo", exp.stringCount, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "bf", false, exp.booleanCount, 1)) +-// Finish is called after the `to` flux function's transformation is done processing. +-func (t *ToTransformation) Finish(id execute.DatasetID, err error) { +- if err == nil { +- err = t.buf.Flush(t.Ctx) +- } +- t.d.Finish(err) +-} - -- cur := newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)) -- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) -- err := w.WriteResultSet(rs) -- if err != nil { -- t.Fatalf("unexpected err: %v", err) -- } -- w.Flush() +-// ToDependencies contains the dependencies for executing the `to` function. +-type ToDependencies struct { +- BucketLookup BucketLookup +- OrganizationLookup OrganizationLookup +- PointsWriter storage.PointsWriter +-} - -- assert.Equal(t, ss, exp) -- }) -- t.Run("multi-series floats", func(t *testing.T) { -- exp := sendSummary{ -- seriesCount: 5, -- floatCount: 8600, -- } -- var ss sendSummary -- -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) -- -- var gens []gen.SeriesGenerator -- gens = append(gens, makeTypedSeries("m0", "t", "f0", 3.3, 2000, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "f1", 5.3, 1500, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "f2", 5.3, 2500, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "f3", -2.2, 900, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "f4", -9.2, 1700, 1)) -- -- cur := newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)) -- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) -- err := w.WriteResultSet(rs) -- if err != nil { -- t.Fatalf("unexpected err: %v", err) -- } -- w.Flush() +-// Validate returns an error if any required field is unset. +-func (d ToDependencies) Validate() error { +- if d.BucketLookup == nil { +- return errors.New("missing bucket lookup dependency") +- } +- if d.OrganizationLookup == nil { +- return errors.New("missing organization lookup dependency") +- } +- if d.PointsWriter == nil { +- return errors.New("missing points writer dependency") +- } +- return nil +-} - -- assert.Equal(t, ss, exp) -- }) +-type Stats struct { +- NRows int +- Latest time.Time +- Earliest time.Time +- NFields int +- NTags int +-} - -- t.Run("multi-series strings", func(t *testing.T) { -- exp := sendSummary{ -- seriesCount: 4, -- stringCount: 6900, -- } -- var ss sendSummary -- -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) -- -- var gens []gen.SeriesGenerator -- gens = append(gens, makeTypedSeries("m0", "t", "s0", strings.Repeat("aaa", 100), 2000, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s1", strings.Repeat("bbb", 200), 1500, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s2", strings.Repeat("ccc", 300), 2500, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s3", strings.Repeat("ddd", 200), 900, 1)) -- -- cur := newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)) -- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) -- err := w.WriteResultSet(rs) -- if err != nil { -- t.Fatalf("unexpected err: %v", err) -- } -- w.Flush() +-func (s Stats) Update(o Stats) { +- s.NRows += o.NRows +- if s.Latest.IsZero() || o.Latest.Unix() > s.Latest.Unix() { +- s.Latest = o.Latest +- } - -- assert.Equal(t, ss, exp) -- }) +- if s.Earliest.IsZero() || o.Earliest.Unix() < s.Earliest.Unix() { +- s.Earliest = o.Earliest +- } - -- t.Run("writer doesn't send series with no values", func(t *testing.T) { -- exp := sendSummary{ -- seriesCount: 2, -- stringCount: 3700, -- } -- var ss sendSummary -- -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) -- -- var gens []gen.SeriesGenerator -- gens = append(gens, makeTypedSeries("m0", "t", "s0", strings.Repeat("aaa", 100), 2000, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s1", strings.Repeat("bbb", 200), 0, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s2", strings.Repeat("ccc", 100), 1700, 1)) -- cur := newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)) -- -- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) -- err := w.WriteResultSet(rs) -- if err != nil { -- t.Fatalf("unexpected err: %v", err) -- } -- w.Flush() +- if o.NFields > s.NFields { +- s.NFields = o.NFields +- } - -- assert.Equal(t, ss, exp) -- }) -- }) +- if o.NTags > s.NTags { +- s.NTags = o.NTags +- } +-} - -- t.Run("error conditions", func(t *testing.T) { -- t.Run("writer returns stream error", func(t *testing.T) { -- exp := errors.New("no write") +-func writeTable(ctx context.Context, t *ToTransformation, tbl flux.Table) (err error) { +- span, ctx := tracing.StartSpanFromContext(ctx) +- defer span.Finish() - -- stream := mock.NewResponseStream() -- stream.SendFunc = func(r *datatypes.ReadResponse) error { return exp } -- w := reads.NewResponseWriter(stream, 0) +- spec := t.spec.Spec - -- cur := newSeriesGeneratorSeriesCursor(makeTypedSeries("m0", "t", "f0", strings.Repeat("0", 1000), 2000, 1)) -- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) -- _ = w.WriteResultSet(rs) -- assert.Equal(t, w.Err(), exp) -- }) -- }) +- // cache tag columns +- columns := tbl.Cols() +- isTag := make([]bool, len(columns)) +- for i, col := range columns { +- tagIdx := sort.SearchStrings(spec.TagColumns, col.Label) +- isTag[i] = tagIdx < len(spec.TagColumns) && spec.TagColumns[tagIdx] == col.Label +- } +- // do time +- timeColLabel := spec.TimeColumn +- timeColIdx := execute.ColIdx(timeColLabel, columns) - -- t.Run("issues", func(t *testing.T) { -- t.Run("short write", func(t *testing.T) { -- t.Run("single string series", func(t *testing.T) { -- exp := sendSummary{seriesCount: 1, stringCount: 1020} -- var ss sendSummary +- if timeColIdx < 0 { +- return &flux.Error{ +- Code: codes.Invalid, +- Msg: "no time column detected", +- } +- } +- if columns[timeColIdx].Type != flux.TTime { +- return &flux.Error{ +- Code: codes.Invalid, +- Msg: fmt.Sprintf("column %s of type %s is not of type %s", timeColLabel, columns[timeColIdx].Type, flux.TTime), +- } +- } - -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) +- // prepare field function if applicable and record the number of values to write per row +- if spec.FieldFn.Fn != nil { +- if err = t.fn.Prepare(columns); err != nil { +- return err +- } - -- cur := newSeriesGeneratorSeriesCursor(makeTypedSeries("m0", "t", "f0", strings.Repeat("0", 1000), exp.stringCount, 1)) -- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) -- err := w.WriteResultSet(rs) -- if err != nil { -- t.Fatalf("unexpected err: %v", err) -- } -- w.Flush() +- } - -- assert.Equal(t, ss, exp) -- }) +- builder, new := t.cache.TableBuilder(tbl.Key()) +- if new { +- if err := execute.AddTableCols(tbl, builder); err != nil { +- return err +- } +- } - -- t.Run("single float series", func(t *testing.T) { -- exp := sendSummary{seriesCount: 1, floatCount: 50500} -- var ss sendSummary +- measurementStats := make(map[string]Stats) +- measurementName := "" +- return tbl.Do(func(er flux.ColReader) error { +- var pointTime time.Time +- var points models.Points +- var tags models.Tags +- kv := make([][]byte, 2, er.Len()*2+2) // +2 for field key, value +- var fieldValues values.Object +- for i := 0; i < er.Len(); i++ { +- measurementName = "" +- fields := make(models.Fields) +- // leave space for measurement key, value at start, in an effort to +- // keep kv sorted +- kv = kv[:2] +- // Gather the timestamp and the tags. +- for j, col := range er.Cols() { +- switch { +- case col.Label == spec.MeasurementColumn: +- measurementName = string(er.Strings(j).Value(i)) +- kv[0] = models.MeasurementTagKeyBytes +- kv[1] = er.Strings(j).Value(i) +- case col.Label == timeColLabel: +- pointTime = execute.ValueForRow(er, i, j).Time().Time() +- case isTag[j]: +- if col.Type != flux.TString { +- return errors.New("invalid type for tag column") +- } +- // TODO(docmerlin): instead of doing this sort of thing, it would be nice if we had a way that allocated a lot less. +- kv = append(kv, []byte(col.Label), er.Strings(j).Value(i)) +- } +- } - -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) +- if pointTime.IsZero() { +- return &flux.Error{ +- Code: codes.Invalid, +- Msg: "timestamp missing from block", +- } +- } - -- cur := newSeriesGeneratorSeriesCursor(makeTypedSeries("m0", "t", "f0", 5.5, exp.floatCount, 1)) -- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) -- err := w.WriteResultSet(rs) -- if err != nil { -- t.Fatalf("unexpected err: %v", err) +- if measurementName == "" { +- return &flux.Error{ +- Code: codes.Invalid, +- Msg: fmt.Sprintf("no column with label %s exists", spec.MeasurementColumn), +- } +- } +- +- if spec.FieldFn.Fn == nil { +- if fieldValues, err = defaultFieldMapping(er, i); err != nil { +- return err - } -- w.Flush() +- } else if fieldValues, err = t.fn.Eval(t.Ctx, i, er); err != nil { +- return err +- } - -- assert.Equal(t, ss, exp) +- fieldValues.Range(func(k string, v values.Value) { +- if v.IsNull() { +- fields[k] = nil +- return +- } +- switch v.Type() { +- case semantic.Float: +- fields[k] = v.Float() +- case semantic.Int: +- fields[k] = v.Int() +- case semantic.UInt: +- fields[k] = v.UInt() +- case semantic.String: +- fields[k] = v.Str() +- case semantic.Time: +- fields[k] = v.Time() +- case semantic.Bool: +- fields[k] = v.Bool() +- } - }) - -- t.Run("multi series", func(t *testing.T) { -- exp := sendSummary{seriesCount: 2, stringCount: 3700} -- var ss sendSummary +- mstats := Stats{ +- NRows: 1, +- Latest: pointTime, +- Earliest: pointTime, +- NFields: len(fields), +- NTags: len(kv) / 2, +- } +- _, ok := measurementStats[measurementName] +- if !ok { +- measurementStats[measurementName] = mstats +- } else { +- measurementStats[measurementName].Update(mstats) +- } - -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) +- name := tsdb.EncodeNameString(t.OrgID, t.BucketID) +- +- fieldNames := make([]string, 0, len(fields)) +- for k := range fields { +- fieldNames = append(fieldNames, k) +- } +- sort.Strings(fieldNames) - -- var gens []gen.SeriesGenerator -- gens = append(gens, makeTypedSeries("m0", "t", "s0", strings.Repeat("aaa", 1000), 2200, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s1", strings.Repeat("bbb", 1000), 1500, 1)) +- for _, k := range fieldNames { +- v := fields[k] +- // append field tag key and field key +- kvf := append(kv, models.FieldKeyTagKeyBytes, []byte(k)) +- tags, _ = models.NewTagsKeyValues(tags, kvf...) - -- cur := newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)) -- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) -- err := w.WriteResultSet(rs) +- pt, err := models.NewPoint(name, tags, models.Fields{k: v}, pointTime) - if err != nil { -- t.Fatalf("unexpected err: %v", err) +- return err - } -- w.Flush() +- points = append(points, pt) +- } - -- assert.Equal(t, ss, exp) -- }) -- }) +- if err := execute.AppendRecord(i, er, builder); err != nil { +- return err +- } +- } +- +- return t.buf.WritePoints(ctx, points) - }) -} - --func TestResponseWriter_WriteGroupResultSet(t *testing.T) { -- t.Run("normal", func(t *testing.T) { -- t.Run("all types one series each", func(t *testing.T) { -- exp := sendSummary{ -- groupCount: 1, -- seriesCount: 5, -- floatCount: 500, -- integerCount: 400, -- unsignedCount: 300, -- stringCount: 200, -- booleanCount: 100, -- } -- var ss sendSummary -- -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) -- -- newCursor := func() (cursor reads.SeriesCursor, e error) { -- var gens []gen.SeriesGenerator -- gens = append(gens, makeTypedSeries("m0", "t", "ff", 3.3, exp.floatCount, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "if", 100, exp.integerCount, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "uf", uint64(25), exp.unsignedCount, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "sf", "foo", exp.stringCount, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "bf", false, exp.booleanCount, 1)) -- return newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)), nil -- } +-func defaultFieldMapping(er flux.ColReader, row int) (values.Object, error) { +- fieldColumnIdx := execute.ColIdx(defaultFieldColLabel, er.Cols()) +- valueColumnIdx := execute.ColIdx(execute.DefaultValueColLabel, er.Cols()) - -- rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.GroupNone}, newCursor) -- err := w.WriteGroupResultSet(rs) -- if err != nil { -- t.Fatalf("unexpected err: %v", err) -- } -- w.Flush() +- if fieldColumnIdx < 0 { +- return nil, &flux.Error{ +- Code: codes.Invalid, +- Msg: "table has no _field column", +- } +- } - -- assert.Equal(t, ss, exp) -- }) -- t.Run("multi-series floats", func(t *testing.T) { -- exp := sendSummary{ -- groupCount: 1, -- seriesCount: 5, -- floatCount: 8600, -- } -- var ss sendSummary -- -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) -- -- newCursor := func() (cursor reads.SeriesCursor, e error) { -- var gens []gen.SeriesGenerator -- gens = append(gens, makeTypedSeries("m0", "t", "f0", 3.3, 2000, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "f1", 5.3, 1500, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "f2", 5.3, 2500, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "f3", -2.2, 900, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "f4", -9.2, 1700, 1)) -- return newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)), nil -- } +- if valueColumnIdx < 0 { +- return nil, &flux.Error{ +- Code: codes.Invalid, +- Msg: "table has no _value column", +- } +- } - -- rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.GroupNone}, newCursor) -- err := w.WriteGroupResultSet(rs) -- if err != nil { -- t.Fatalf("unexpected err: %v", err) -- } -- w.Flush() +- value := execute.ValueForRow(er, row, valueColumnIdx) - -- assert.Equal(t, ss, exp) -- }) +- fieldValueMapping := values.NewObject() +- field := execute.ValueForRow(er, row, fieldColumnIdx) +- fieldValueMapping.Set(field.Str(), value) - -- t.Run("multi-series strings", func(t *testing.T) { -- exp := sendSummary{ -- groupCount: 1, -- seriesCount: 4, -- stringCount: 6900, -- } -- var ss sendSummary -- -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) -- -- newCursor := func() (cursor reads.SeriesCursor, e error) { -- var gens []gen.SeriesGenerator -- gens = append(gens, makeTypedSeries("m0", "t", "s0", strings.Repeat("aaa", 100), 2000, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s1", strings.Repeat("bbb", 200), 1500, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s2", strings.Repeat("ccc", 300), 2500, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s3", strings.Repeat("ddd", 200), 900, 1)) -- return newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)), nil -- } +- return fieldValueMapping, nil +-} ++// TODO(jsternberg): Implement the to method in influxdb 1.x. ++// This file is kept around so it shows up in the patch. +diff --git b/flux/stdlib/influxdata/influxdb/to_test.go a/flux/stdlib/influxdata/influxdb/to_test.go +index 8afc9128a8..daba8c9362 100644 +--- b/flux/stdlib/influxdata/influxdb/to_test.go ++++ a/flux/stdlib/influxdata/influxdb/to_test.go +@@ -1,853 +1 @@ + package influxdb_test - -- rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.GroupNone}, newCursor) -- err := w.WriteGroupResultSet(rs) -- if err != nil { -- t.Fatalf("unexpected err: %v", err) -- } -- w.Flush() +-import ( +- "context" +- "fmt" +- "testing" - -- assert.Equal(t, ss, exp) -- }) +- "github.com/google/go-cmp/cmp" +- "github.com/influxdata/flux" +- "github.com/influxdata/flux/ast" +- "github.com/influxdata/flux/dependencies/dependenciestest" +- "github.com/influxdata/flux/execute" +- "github.com/influxdata/flux/execute/executetest" +- "github.com/influxdata/flux/interpreter" +- "github.com/influxdata/flux/querytest" +- "github.com/influxdata/flux/semantic" +- "github.com/influxdata/flux/values/valuestest" +- platform "github.com/influxdata/influxdb" +- "github.com/influxdata/influxdb/mock" +- "github.com/influxdata/influxdb/models" +- _ "github.com/influxdata/influxdb/query/builtin" +- pquerytest "github.com/influxdata/influxdb/query/querytest" +- "github.com/influxdata/influxdb/query/stdlib/influxdata/influxdb" +- "github.com/influxdata/influxdb/tsdb" +-) - -- t.Run("writer doesn't send series with no values", func(t *testing.T) { -- exp := sendSummary{ -- groupCount: 1, -- seriesCount: 2, -- stringCount: 3700, -- } -- var ss sendSummary -- -- stream := mock.NewResponseStream() -- stream.SendFunc = ss.makeSendFunc() -- w := reads.NewResponseWriter(stream, 0) -- -- newCursor := func() (cursor reads.SeriesCursor, e error) { -- var gens []gen.SeriesGenerator -- gens = append(gens, makeTypedSeries("m0", "t", "s0", strings.Repeat("aaa", 100), 2000, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s1", strings.Repeat("bbb", 200), 0, 1)) -- gens = append(gens, makeTypedSeries("m0", "t", "s2", strings.Repeat("ccc", 100), 1700, 1)) -- return newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)), nil -- } +-func TestTo_Query(t *testing.T) { +- tests := []querytest.NewQueryTestCase{ +- { +- Name: "from with database with range", +- Raw: `from(bucket:"mydb") |> to(bucket:"series1", org:"fred", host:"localhost", token:"auth-token", fieldFn: (r) => ({ col: r.col }) )`, +- Want: &flux.Spec{ +- Operations: []*flux.Operation{ +- { +- ID: "influxDBFrom0", +- Spec: &influxdb.FromOpSpec{ +- Bucket: "mydb", +- }, +- }, +- { +- ID: "to1", +- Spec: &influxdb.ToOpSpec{ +- Bucket: "series1", +- Org: "fred", +- Host: "localhost", +- Token: "auth-token", +- TimeColumn: execute.DefaultTimeColLabel, +- MeasurementColumn: influxdb.DefaultMeasurementColLabel, +- FieldFn: interpreter.ResolvedFunction{ +- Scope: valuestest.NowScope(), +- Fn: &semantic.FunctionExpression{ +- Block: &semantic.FunctionBlock{ +- Parameters: &semantic.FunctionParameters{ +- List: []*semantic.FunctionParameter{ +- { +- Key: &semantic.Identifier{Name: "r"}, +- }, +- }, +- }, +- Body: &semantic.ObjectExpression{ +- Properties: []*semantic.Property{ +- { +- Key: &semantic.Identifier{Name: "col"}, +- Value: &semantic.MemberExpression{ +- Object: &semantic.IdentifierExpression{Name: "r"}, +- Property: "col", +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- Edges: []flux.Edge{ +- {Parent: "influxDBFrom0", Child: "to1"}, +- }, +- }, +- }, +- } +- for _, tc := range tests { +- tc := tc +- t.Run(tc.Name, func(t *testing.T) { +- t.Parallel() +- querytest.NewQueryTestHelper(t, tc) +- }) +- } +-} - -- rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.GroupNone}, newCursor) -- err := w.WriteGroupResultSet(rs) -- if err != nil { -- t.Fatalf("unexpected err: %v", err) -- } -- w.Flush() +-func TestToOpSpec_BucketsAccessed(t *testing.T) { +- bucketName := "my_bucket" +- bucketIDString := "ddddccccbbbbaaaa" +- bucketID, err := platform.IDFromString(bucketIDString) +- if err != nil { +- t.Fatal(err) +- } +- orgName := "my_org" +- orgIDString := "aaaabbbbccccdddd" +- orgID, err := platform.IDFromString(orgIDString) +- if err != nil { +- t.Fatal(err) +- } +- tests := []pquerytest.BucketsAccessedTestCase{ +- { +- Name: "from() with bucket and to with org and bucket", +- Raw: fmt.Sprintf(`from(bucket:"%s") |> to(bucket:"%s", org:"%s")`, bucketName, bucketName, orgName), +- WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, +- WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, Org: &orgName}}, +- }, +- { +- Name: "from() with bucket and to with orgID and bucket", +- Raw: fmt.Sprintf(`from(bucket:"%s") |> to(bucket:"%s", orgID:"%s")`, bucketName, bucketName, orgIDString), +- WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, +- WantWriteBuckets: &[]platform.BucketFilter{{Name: &bucketName, OrganizationID: orgID}}, +- }, +- { +- Name: "from() with bucket and to with orgID and bucketID", +- Raw: fmt.Sprintf(`from(bucket:"%s") |> to(bucketID:"%s", orgID:"%s")`, bucketName, bucketIDString, orgIDString), +- WantReadBuckets: &[]platform.BucketFilter{{Name: &bucketName}}, +- WantWriteBuckets: &[]platform.BucketFilter{{ID: bucketID, OrganizationID: orgID}}, +- }, +- } - -- assert.Equal(t, ss, exp) +- for _, tc := range tests { +- tc := tc +- t.Run(tc.Name, func(t *testing.T) { +- t.Parallel() +- pquerytest.BucketsAccessedTestHelper(t, tc) - }) -- }) +- } -} -diff --git b/storage/reads/store.go a/storage/reads/store.go -index 8918794b3..655d12d21 100644 ---- b/storage/reads/store.go -+++ a/storage/reads/store.go -@@ -80,5 +80,5 @@ type Store interface { - TagKeys(ctx context.Context, req *datatypes.TagKeysRequest) (cursors.StringIterator, error) - TagValues(ctx context.Context, req *datatypes.TagValuesRequest) (cursors.StringIterator, error) - -- GetSource(orgID, bucketID uint64) proto.Message -+ GetSource(db, rp string) proto.Message - } -diff --git b/storage/reads/table.go a/storage/reads/table.go -index 32784a538..3aa7485e2 100644 ---- b/storage/reads/table.go -+++ a/storage/reads/table.go -@@ -1,7 +1,5 @@ - package reads - --//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@types.tmpldata table.gen.go.tmpl - - import ( - "errors" - "sync/atomic" -diff --git b/tsdb/cursors/gen.go a/tsdb/cursors/gen.go -index 63316e5c0..ee7a8876a 100644 ---- b/tsdb/cursors/gen.go -+++ a/tsdb/cursors/gen.go -@@ -1,3 +1 @@ - package cursors -- --//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@arrayvalues.gen.go.tmpldata arrayvalues.gen.go.tmpl +-func TestTo_Process(t *testing.T) { +- oid, _ := mock.OrganizationLookup{}.Lookup(context.Background(), "my-org") +- bid, _ := mock.BucketLookup{}.Lookup(context.Background(), oid, "my-bucket") +- type wanted struct { +- result *mock.PointsWriter +- tables []*executetest.Table +- } +- testCases := []struct { +- name string +- spec *influxdb.ToProcedureSpec +- data []flux.Table +- want wanted +- }{ +- { +- name: "default case", +- spec: &influxdb.ToProcedureSpec{ +- Spec: &influxdb.ToOpSpec{ +- Org: "my-org", +- Bucket: "my-bucket", +- TimeColumn: "_time", +- MeasurementColumn: "_measurement", +- }, +- }, +- data: []flux.Table{executetest.MustCopyTable(&executetest.Table{ +- ColMeta: []flux.ColMeta{ +- {Label: "_start", Type: flux.TTime}, +- {Label: "_stop", Type: flux.TTime}, +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(0), execute.Time(100), execute.Time(11), "a", "_value", 2.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "a", "_value", 2.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "b", "_value", 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(31), "a", "_value", 3.0}, +- {execute.Time(0), execute.Time(100), execute.Time(41), "c", "_value", 4.0}, +- }, +- })}, +- want: wanted{ +- result: &mock.PointsWriter{ +- Points: mockPoints(oid, bid, `a _value=2 11 +-a _value=2 21 +-b _value=1 21 +-a _value=3 31 +-c _value=4 41`), +- }, +- tables: []*executetest.Table{{ +- ColMeta: []flux.ColMeta{ +- {Label: "_start", Type: flux.TTime}, +- {Label: "_stop", Type: flux.TTime}, +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(0), execute.Time(100), execute.Time(11), "a", "_value", 2.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "a", "_value", 2.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "b", "_value", 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(31), "a", "_value", 3.0}, +- {execute.Time(0), execute.Time(100), execute.Time(41), "c", "_value", 4.0}, +- }, +- }}, +- }, +- }, +- { +- name: "default with heterogeneous tag columns", +- spec: &influxdb.ToProcedureSpec{ +- Spec: &influxdb.ToOpSpec{ +- Org: "my-org", +- Bucket: "my-bucket", +- TimeColumn: "_time", +- MeasurementColumn: "_measurement", +- }, +- }, +- data: []flux.Table{executetest.MustCopyTable(&executetest.Table{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "tag1", Type: flux.TString}, +- {Label: "tag2", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- }, +- KeyCols: []string{"_measurement", "tag1", "tag2", "_field"}, +- Data: [][]interface{}{ +- {execute.Time(11), "a", "a", "aa", "_value", 2.0}, +- {execute.Time(21), "a", "a", "bb", "_value", 2.0}, +- {execute.Time(21), "a", "b", "cc", "_value", 1.0}, +- {execute.Time(31), "a", "a", "dd", "_value", 3.0}, +- {execute.Time(41), "a", "c", "ee", "_value", 4.0}, +- }, +- }), +- executetest.MustCopyTable(&executetest.Table{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "tagA", Type: flux.TString}, +- {Label: "tagB", Type: flux.TString}, +- {Label: "tagC", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- }, +- KeyCols: []string{"_measurement", "tagA", "tagB", "tagC", "_field"}, +- Data: [][]interface{}{ +- {execute.Time(11), "b", "a", "aa", "ff", "_value", 2.0}, +- {execute.Time(21), "b", "a", "bb", "gg", "_value", 2.0}, +- {execute.Time(21), "b", "b", "cc", "hh", "_value", 1.0}, +- {execute.Time(31), "b", "a", "dd", "ii", "_value", 3.0}, +- {execute.Time(41), "b", "c", "ee", "jj", "_value", 4.0}, +- }, +- }), +- }, +- want: wanted{ +- result: &mock.PointsWriter{ +- Points: mockPoints(oid, bid, `a,tag1=a,tag2=aa _value=2 11 +-a,tag1=a,tag2=bb _value=2 21 +-a,tag1=b,tag2=cc _value=1 21 +-a,tag1=a,tag2=dd _value=3 31 +-a,tag1=c,tag2=ee _value=4 41 +-b,tagA=a,tagB=aa,tagC=ff _value=2 11 +-b,tagA=a,tagB=bb,tagC=gg _value=2 21 +-b,tagA=b,tagB=cc,tagC=hh _value=1 21 +-b,tagA=a,tagB=dd,tagC=ii _value=3 31 +-b,tagA=c,tagB=ee,tagC=jj _value=4 41`), +- }, +- tables: []*executetest.Table{{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "tag1", Type: flux.TString}, +- {Label: "tag2", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- }, +- KeyCols: []string{"_measurement", "tag1", "tag2", "_field"}, +- Data: [][]interface{}{ +- {execute.Time(11), "a", "a", "aa", "_value", 2.0}, +- {execute.Time(21), "a", "a", "bb", "_value", 2.0}, +- {execute.Time(21), "a", "b", "cc", "_value", 1.0}, +- {execute.Time(31), "a", "a", "dd", "_value", 3.0}, +- {execute.Time(41), "a", "c", "ee", "_value", 4.0}, +- }, +- }, +- { +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "tagA", Type: flux.TString}, +- {Label: "tagB", Type: flux.TString}, +- {Label: "tagC", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- }, +- KeyCols: []string{"_measurement", "tagA", "tagB", "tagC", "_field"}, +- Data: [][]interface{}{ +- {execute.Time(11), "b", "a", "aa", "ff", "_value", 2.0}, +- {execute.Time(21), "b", "a", "bb", "gg", "_value", 2.0}, +- {execute.Time(21), "b", "b", "cc", "hh", "_value", 1.0}, +- {execute.Time(31), "b", "a", "dd", "ii", "_value", 3.0}, +- {execute.Time(41), "b", "c", "ee", "jj", "_value", 4.0}, +- }, +- }, +- }, +- }, +- }, +- { +- name: "no _measurement with multiple tag columns", +- spec: &influxdb.ToProcedureSpec{ +- Spec: &influxdb.ToOpSpec{ +- Org: "my-org", +- Bucket: "my-bucket", +- TimeColumn: "_time", +- MeasurementColumn: "tag1", +- }, +- }, +- data: []flux.Table{executetest.MustCopyTable(&executetest.Table{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "tag1", Type: flux.TString}, +- {Label: "tag2", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(11), "a", "aa", "_value", 2.0}, +- {execute.Time(21), "a", "bb", "_value", 2.0}, +- {execute.Time(21), "b", "cc", "_value", 1.0}, +- {execute.Time(31), "a", "dd", "_value", 3.0}, +- {execute.Time(41), "c", "ee", "_value", 4.0}, +- }, +- })}, +- want: wanted{ +- result: &mock.PointsWriter{ +- Points: mockPoints(oid, bid, `a,tag2=aa _value=2 11 +-a,tag2=bb _value=2 21 +-b,tag2=cc _value=1 21 +-a,tag2=dd _value=3 31 +-c,tag2=ee _value=4 41`), +- }, +- tables: []*executetest.Table{{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "tag1", Type: flux.TString}, +- {Label: "tag2", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(11), "a", "aa", "_value", 2.0}, +- {execute.Time(21), "a", "bb", "_value", 2.0}, +- {execute.Time(21), "b", "cc", "_value", 1.0}, +- {execute.Time(31), "a", "dd", "_value", 3.0}, +- {execute.Time(41), "c", "ee", "_value", 4.0}, +- }, +- }}, +- }, +- }, +- { +- name: "explicit tags", +- spec: &influxdb.ToProcedureSpec{ +- Spec: &influxdb.ToOpSpec{ +- Org: "my-org", +- Bucket: "my-bucket", +- TimeColumn: "_time", +- TagColumns: []string{"tag1", "tag2"}, +- MeasurementColumn: "_measurement", +- }, +- }, +- data: []flux.Table{executetest.MustCopyTable(&executetest.Table{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- {Label: "tag1", Type: flux.TString}, +- {Label: "tag2", Type: flux.TString}, +- }, +- Data: [][]interface{}{ +- {execute.Time(11), "m", "_value", 2.0, "a", "aa"}, +- {execute.Time(21), "m", "_value", 2.0, "a", "bb"}, +- {execute.Time(21), "m", "_value", 1.0, "b", "cc"}, +- {execute.Time(31), "m", "_value", 3.0, "a", "dd"}, +- {execute.Time(41), "m", "_value", 4.0, "c", "ee"}, +- }, +- })}, +- want: wanted{ +- result: &mock.PointsWriter{ +- Points: mockPoints(oid, bid, `m,tag1=a,tag2=aa _value=2 11 +-m,tag1=a,tag2=bb _value=2 21 +-m,tag1=b,tag2=cc _value=1 21 +-m,tag1=a,tag2=dd _value=3 31 +-m,tag1=c,tag2=ee _value=4 41`), +- }, +- tables: []*executetest.Table{{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- {Label: "tag1", Type: flux.TString}, +- {Label: "tag2", Type: flux.TString}, +- }, +- Data: [][]interface{}{ +- {execute.Time(11), "m", "_value", 2.0, "a", "aa"}, +- {execute.Time(21), "m", "_value", 2.0, "a", "bb"}, +- {execute.Time(21), "m", "_value", 1.0, "b", "cc"}, +- {execute.Time(31), "m", "_value", 3.0, "a", "dd"}, +- {execute.Time(41), "m", "_value", 4.0, "c", "ee"}, +- }, +- }}, +- }, +- }, +- { +- name: "explicit field function", +- spec: &influxdb.ToProcedureSpec{ +- Spec: &influxdb.ToOpSpec{ +- Org: "my-org", +- Bucket: "my-bucket", +- TimeColumn: "_time", +- MeasurementColumn: "_measurement", +- FieldFn: interpreter.ResolvedFunction{ +- Scope: valuestest.NowScope(), +- Fn: &semantic.FunctionExpression{ +- Block: &semantic.FunctionBlock{ +- Parameters: &semantic.FunctionParameters{ +- List: []*semantic.FunctionParameter{ +- { +- Key: &semantic.Identifier{Name: "r"}, +- }, +- }, +- }, +- Body: &semantic.ObjectExpression{ +- Properties: []*semantic.Property{ +- { +- Key: &semantic.Identifier{Name: "temperature"}, +- Value: &semantic.MemberExpression{ +- Object: &semantic.IdentifierExpression{Name: "r"}, +- Property: "temperature", +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- data: []flux.Table{executetest.MustCopyTable(&executetest.Table{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "temperature", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(11), "a", 2.0}, +- {execute.Time(21), "a", 2.0}, +- {execute.Time(21), "b", 1.0}, +- {execute.Time(31), "a", 3.0}, +- {execute.Time(41), "c", 4.0}, +- }, +- })}, +- want: wanted{ +- result: &mock.PointsWriter{ +- Points: mockPoints(oid, bid, `a temperature=2 11 +-a temperature=2 21 +-b temperature=1 21 +-a temperature=3 31 +-c temperature=4 41`), +- }, +- tables: []*executetest.Table{{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "temperature", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(11), "a", 2.0}, +- {execute.Time(21), "a", 2.0}, +- {execute.Time(21), "b", 1.0}, +- {execute.Time(31), "a", 3.0}, +- {execute.Time(41), "c", 4.0}, +- }, +- }}, +- }, +- }, +- { +- name: "infer tags from complex field function", +- spec: &influxdb.ToProcedureSpec{ +- Spec: &influxdb.ToOpSpec{ +- Org: "my-org", +- Bucket: "my-bucket", +- TimeColumn: "_time", +- MeasurementColumn: "tag", +- FieldFn: interpreter.ResolvedFunction{ +- Scope: valuestest.NowScope(), +- Fn: &semantic.FunctionExpression{ +- Block: &semantic.FunctionBlock{ +- Parameters: &semantic.FunctionParameters{ +- List: []*semantic.FunctionParameter{ +- { +- Key: &semantic.Identifier{Name: "r"}, +- }, +- }, +- }, +- Body: &semantic.ObjectExpression{ +- Properties: []*semantic.Property{ +- { +- Key: &semantic.Identifier{Name: "day"}, +- Value: &semantic.MemberExpression{ +- Object: &semantic.IdentifierExpression{Name: "r"}, +- Property: "day", +- }, +- }, +- { +- Key: &semantic.Identifier{Name: "temperature"}, +- Value: &semantic.MemberExpression{ +- Object: &semantic.IdentifierExpression{Name: "r"}, +- Property: "temperature", +- }, +- }, +- { +- Key: &semantic.Identifier{Name: "humidity"}, +- Value: &semantic.MemberExpression{ +- Object: &semantic.IdentifierExpression{Name: "r"}, +- Property: "humidity", +- }, +- }, +- { +- Key: &semantic.Identifier{Name: "ratio"}, +- Value: &semantic.BinaryExpression{ +- Operator: ast.DivisionOperator, +- Left: &semantic.MemberExpression{ +- Object: &semantic.IdentifierExpression{Name: "r"}, +- Property: "temperature", +- }, +- Right: &semantic.MemberExpression{ +- Object: &semantic.IdentifierExpression{Name: "r"}, +- Property: "humidity", +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- data: []flux.Table{executetest.MustCopyTable(&executetest.Table{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "day", Type: flux.TString}, +- {Label: "tag", Type: flux.TString}, +- {Label: "temperature", Type: flux.TFloat}, +- {Label: "humidity", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(11), "Monday", "a", 2.0, 1.0}, +- {execute.Time(21), "Tuesday", "a", 2.0, 2.0}, +- {execute.Time(21), "Wednesday", "b", 1.0, 4.0}, +- {execute.Time(31), "Thursday", "a", 3.0, 3.0}, +- {execute.Time(41), "Friday", "c", 4.0, 5.0}, +- }, +- })}, +- want: wanted{ +- result: &mock.PointsWriter{ +- Points: mockPoints(oid, bid, `a day="Monday",humidity=1,ratio=2,temperature=2 11 +-a day="Tuesday",humidity=2,ratio=1,temperature=2 21 +-b day="Wednesday",humidity=4,ratio=0.25,temperature=1 21 +-a day="Thursday",humidity=3,ratio=1,temperature=3 31 +-c day="Friday",humidity=5,ratio=0.8,temperature=4 41`), +- }, +- tables: []*executetest.Table{{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "day", Type: flux.TString}, +- {Label: "tag", Type: flux.TString}, +- {Label: "temperature", Type: flux.TFloat}, +- {Label: "humidity", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(11), "Monday", "a", 2.0, 1.0}, +- {execute.Time(21), "Tuesday", "a", 2.0, 2.0}, +- {execute.Time(21), "Wednesday", "b", 1.0, 4.0}, +- {execute.Time(31), "Thursday", "a", 3.0, 3.0}, +- {execute.Time(41), "Friday", "c", 4.0, 5.0}, +- }, +- }}, +- }, +- }, +- { +- name: "explicit tag columns, multiple values in field function, and extra columns", +- spec: &influxdb.ToProcedureSpec{ +- Spec: &influxdb.ToOpSpec{ +- Org: "my-org", +- Bucket: "my-bucket", +- TimeColumn: "_time", +- MeasurementColumn: "tag1", +- TagColumns: []string{"tag2"}, +- FieldFn: interpreter.ResolvedFunction{ +- Scope: valuestest.NowScope(), +- Fn: &semantic.FunctionExpression{ +- Block: &semantic.FunctionBlock{ +- Parameters: &semantic.FunctionParameters{ +- List: []*semantic.FunctionParameter{ +- { +- Key: &semantic.Identifier{Name: "r"}, +- }, +- }, +- }, +- Body: &semantic.ObjectExpression{ +- Properties: []*semantic.Property{ +- { +- Key: &semantic.Identifier{Name: "temperature"}, +- Value: &semantic.MemberExpression{ +- Object: &semantic.IdentifierExpression{Name: "r"}, +- Property: "temperature", +- }, +- }, +- { +- Key: &semantic.Identifier{Name: "humidity"}, +- Value: &semantic.MemberExpression{ +- Object: &semantic.IdentifierExpression{Name: "r"}, +- Property: "humidity", +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- }, +- data: []flux.Table{executetest.MustCopyTable(&executetest.Table{ +- ColMeta: []flux.ColMeta{ +- {Label: "_start", Type: flux.TTime}, +- {Label: "_stop", Type: flux.TTime}, +- {Label: "_time", Type: flux.TTime}, +- {Label: "tag1", Type: flux.TString}, +- {Label: "tag2", Type: flux.TString}, +- {Label: "other-string-column", Type: flux.TString}, +- {Label: "temperature", Type: flux.TFloat}, +- {Label: "humidity", Type: flux.TInt}, +- {Label: "other-value-column", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(0), execute.Time(100), execute.Time(11), "a", "d", "misc", 2.0, int64(50), 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "a", "d", "misc", 2.0, int64(50), 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "b", "d", "misc", 1.0, int64(50), 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(31), "a", "e", "misc", 3.0, int64(60), 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(41), "c", "e", "misc", 4.0, int64(65), 1.0}, +- }, +- })}, +- want: wanted{ +- result: &mock.PointsWriter{ +- Points: mockPoints(oid, bid, `a,tag2=d humidity=50i,temperature=2 11 +-a,tag2=d humidity=50i,temperature=2 21 +-b,tag2=d humidity=50i,temperature=1 21 +-a,tag2=e humidity=60i,temperature=3 31 +-c,tag2=e humidity=65i,temperature=4 41`), +- }, +- tables: []*executetest.Table{{ +- ColMeta: []flux.ColMeta{ +- {Label: "_start", Type: flux.TTime}, +- {Label: "_stop", Type: flux.TTime}, +- {Label: "_time", Type: flux.TTime}, +- {Label: "tag1", Type: flux.TString}, +- {Label: "tag2", Type: flux.TString}, +- {Label: "other-string-column", Type: flux.TString}, +- {Label: "temperature", Type: flux.TFloat}, +- {Label: "humidity", Type: flux.TInt}, +- {Label: "other-value-column", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(0), execute.Time(100), execute.Time(11), "a", "d", "misc", 2.0, int64(50), 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "a", "d", "misc", 2.0, int64(50), 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "b", "d", "misc", 1.0, int64(50), 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(31), "a", "e", "misc", 3.0, int64(60), 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(41), "c", "e", "misc", 4.0, int64(65), 1.0}, +- }, +- }}, +- }, +- }, +- { +- name: "multiple _field", +- spec: &influxdb.ToProcedureSpec{ +- Spec: &influxdb.ToOpSpec{ +- Org: "my-org", +- Bucket: "my-bucket", +- TimeColumn: "_time", +- MeasurementColumn: "_measurement", +- }, +- }, +- data: []flux.Table{executetest.MustCopyTable(&executetest.Table{ +- ColMeta: []flux.ColMeta{ +- {Label: "_start", Type: flux.TTime}, +- {Label: "_stop", Type: flux.TTime}, +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(0), execute.Time(100), execute.Time(11), "a", "_value", 2.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "a", "_value", 2.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "b", "_value", 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(31), "a", "_hello", 3.0}, +- {execute.Time(0), execute.Time(100), execute.Time(41), "c", "_hello", 4.0}, +- }, +- })}, +- want: wanted{ +- result: &mock.PointsWriter{ +- Points: mockPoints(oid, bid, `a _value=2 11 +-a _value=2 21 +-b _value=1 21 +-a _hello=3 31 +-c _hello=4 41`), +- }, +- tables: []*executetest.Table{{ +- ColMeta: []flux.ColMeta{ +- {Label: "_start", Type: flux.TTime}, +- {Label: "_stop", Type: flux.TTime}, +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- }, +- Data: [][]interface{}{ +- {execute.Time(0), execute.Time(100), execute.Time(11), "a", "_value", 2.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "a", "_value", 2.0}, +- {execute.Time(0), execute.Time(100), execute.Time(21), "b", "_value", 1.0}, +- {execute.Time(0), execute.Time(100), execute.Time(31), "a", "_hello", 3.0}, +- {execute.Time(0), execute.Time(100), execute.Time(41), "c", "_hello", 4.0}, +- }, +- }}, +- }, +- }, +- { +- name: "unordered tags", +- spec: &influxdb.ToProcedureSpec{ +- Spec: &influxdb.ToOpSpec{ +- Org: "my-org", +- Bucket: "my-bucket", +- TimeColumn: "_time", +- TagColumns: []string{"tag1", "tag2"}, +- MeasurementColumn: "_measurement", +- }, +- }, +- data: []flux.Table{executetest.MustCopyTable(&executetest.Table{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- {Label: "tag2", Type: flux.TString}, +- {Label: "tag1", Type: flux.TString}, +- }, +- Data: [][]interface{}{ +- {execute.Time(11), "m", "_value", 2.0, "aa", "a"}, +- {execute.Time(21), "m", "_value", 2.0, "bb", "a"}, +- {execute.Time(21), "m", "_value", 1.0, "cc", "b"}, +- {execute.Time(31), "m", "_value", 3.0, "dd", "a"}, +- {execute.Time(41), "m", "_value", 4.0, "ee", "c"}, +- }, +- })}, +- want: wanted{ +- result: &mock.PointsWriter{ +- Points: mockPoints(oid, bid, `m,tag1=a,tag2=aa _value=2 11 +-m,tag1=a,tag2=bb _value=2 21 +-m,tag1=b,tag2=cc _value=1 21 +-m,tag1=a,tag2=dd _value=3 31 +-m,tag1=c,tag2=ee _value=4 41`), +- }, +- tables: []*executetest.Table{{ +- ColMeta: []flux.ColMeta{ +- {Label: "_time", Type: flux.TTime}, +- {Label: "_measurement", Type: flux.TString}, +- {Label: "_field", Type: flux.TString}, +- {Label: "_value", Type: flux.TFloat}, +- {Label: "tag2", Type: flux.TString}, +- {Label: "tag1", Type: flux.TString}, +- }, +- Data: [][]interface{}{ +- {execute.Time(11), "m", "_value", 2.0, "aa", "a"}, +- {execute.Time(21), "m", "_value", 2.0, "bb", "a"}, +- {execute.Time(21), "m", "_value", 1.0, "cc", "b"}, +- {execute.Time(31), "m", "_value", 3.0, "dd", "a"}, +- {execute.Time(41), "m", "_value", 4.0, "ee", "c"}, +- }, +- }}, +- }, +- }, +- } +- +- for _, tc := range testCases { +- tc := tc +- t.Run(tc.name, func(t *testing.T) { +- deps := influxdb.Dependencies{ +- FluxDeps: dependenciestest.Default(), +- StorageDeps: influxdb.StorageDependencies{ +- ToDeps: mockDependencies(), +- }, +- } +- executetest.ProcessTestHelper( +- t, +- tc.data, +- tc.want.tables, +- nil, +- func(d execute.Dataset, c execute.TableBuilderCache) execute.Transformation { +- ctx := deps.Inject(context.Background()) +- newT, err := influxdb.NewToTransformation(ctx, d, c, tc.spec, deps.StorageDeps.ToDeps) +- if err != nil { +- t.Error(err) +- } +- return newT +- }, +- ) +- pw := deps.StorageDeps.ToDeps.PointsWriter.(*mock.PointsWriter) +- if len(pw.Points) != len(tc.want.result.Points) { +- t.Errorf("Expected result values to have length of %d but got %d", len(tc.want.result.Points), len(pw.Points)) +- } +- +- gotStr := pointsToStr(pw.Points) +- wantStr := pointsToStr(tc.want.result.Points) +- +- if !cmp.Equal(gotStr, wantStr) { +- t.Errorf("got other than expected %s", cmp.Diff(gotStr, wantStr)) +- } +- }) +- } +-} +- +-func mockDependencies() influxdb.ToDependencies { +- return influxdb.ToDependencies{ +- BucketLookup: mock.BucketLookup{}, +- OrganizationLookup: mock.OrganizationLookup{}, +- PointsWriter: new(mock.PointsWriter), +- } +-} +- +-func pointsToStr(points []models.Point) string { +- outStr := "" +- for _, x := range points { +- outStr += x.String() + "\n" +- } +- return outStr +-} +- +-func mockPoints(org, bucket platform.ID, pointdata string) []models.Point { +- name := tsdb.EncodeName(org, bucket) +- points, err := models.ParsePoints([]byte(pointdata), name[:]) +- if err != nil { +- return nil +- } +- return points +-} +diff --git b/flux/stdlib/influxdata/influxdb/v1/databases.go a/flux/stdlib/influxdata/influxdb/v1/databases.go +index 6a6c59a76c..1779f411c5 100644 +--- b/flux/stdlib/influxdata/influxdb/v1/databases.go ++++ a/flux/stdlib/influxdata/influxdb/v1/databases.go +@@ -2,8 +2,8 @@ package v1 + + import ( + "context" ++ "errors" + "fmt" +- "time" + + "github.com/influxdata/flux" + "github.com/influxdata/flux/execute" +@@ -11,9 +11,9 @@ import ( + "github.com/influxdata/flux/plan" + "github.com/influxdata/flux/stdlib/influxdata/influxdb/v1" + "github.com/influxdata/flux/values" +- platform "github.com/influxdata/influxdb" +- "github.com/influxdata/influxdb/query" +- "github.com/pkg/errors" ++ "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" ++ "github.com/influxdata/influxdb/services/meta" ++ "github.com/influxdata/influxql" + ) + + const DatabasesKind = v1.DatabasesKind +@@ -67,9 +67,9 @@ func init() { + } + + type DatabasesDecoder struct { +- orgID platform.ID +- deps *DatabasesDependencies +- databases []*platform.DBRPMapping ++ deps *influxdb.StorageDependencies ++ databases []meta.DatabaseInfo ++ user meta.User + alloc *memory.Allocator + } + +@@ -78,45 +78,13 @@ func (bd *DatabasesDecoder) Connect(ctx context.Context) error { + } + + func (bd *DatabasesDecoder) Fetch(ctx context.Context) (bool, error) { +- b, _, err := bd.deps.DBRP.FindMany(ctx, platform.DBRPMappingFilter{}) +- if err != nil { +- return false, err +- } +- bd.databases = b ++ bd.databases = bd.deps.MetaClient.Databases() + return false, nil + } + + func (bd *DatabasesDecoder) Decode(ctx context.Context) (flux.Table, error) { +- type databaseInfo struct { +- *platform.DBRPMapping +- RetentionPeriod time.Duration +- } +- +- databases := make([]databaseInfo, 0, len(bd.databases)) +- for _, db := range bd.databases { +- bucket, err := bd.deps.BucketLookup.FindBucketByID(ctx, db.BucketID) +- if err != nil { +- code := platform.ErrorCode(err) +- if code == platform.EUnauthorized || code == platform.EForbidden { +- continue +- } +- return nil, err +- } +- databases = append(databases, databaseInfo{ +- DBRPMapping: db, +- RetentionPeriod: bucket.RetentionPeriod, +- }) +- } +- +- if len(databases) == 0 { +- return nil, &platform.Error{ +- Code: platform.ENotFound, +- Msg: "no 1.x databases found", +- } +- } +- + kb := execute.NewGroupKeyBuilder(nil) +- kb.AddKeyValue("organizationID", values.NewString(databases[0].OrganizationID.String())) ++ kb.AddKeyValue("organizationID", values.NewString("")) + gk, err := kb.Build() + if err != nil { + return nil, err +@@ -160,13 +128,29 @@ func (bd *DatabasesDecoder) Decode(ctx context.Context) (flux.Table, error) { + return nil, err + } + +- for _, db := range databases { +- _ = b.AppendString(0, db.OrganizationID.String()) +- _ = b.AppendString(1, db.Database) +- _ = b.AppendString(2, db.RetentionPolicy) +- _ = b.AppendInt(3, db.RetentionPeriod.Nanoseconds()) +- _ = b.AppendBool(4, db.Default) +- _ = b.AppendString(5, db.BucketID.String()) ++ var hasAccess func(db string) bool ++ if bd.user == nil { ++ hasAccess = func(db string) bool { ++ return true ++ } ++ } else { ++ hasAccess = func(db string) bool { ++ return bd.deps.Authorizer.AuthorizeDatabase(bd.user, influxql.ReadPrivilege, db) == nil || ++ bd.deps.Authorizer.AuthorizeDatabase(bd.user, influxql.WritePrivilege, db) == nil ++ } ++ } ++ ++ for _, db := range bd.databases { ++ if hasAccess(db.Name) { ++ for _, rp := range db.RetentionPolicies { ++ _ = b.AppendString(0, "") ++ _ = b.AppendString(1, db.Name) ++ _ = b.AppendString(2, rp.Name) ++ _ = b.AppendInt(3, rp.Duration.Nanoseconds()) ++ _ = b.AppendBool(4, db.DefaultRetentionPolicy == rp.Name) ++ _ = b.AppendString(5, "") ++ } ++ } + } + + return b.Table() +@@ -181,41 +165,14 @@ func createDatabasesSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a + if !ok { + return nil, fmt.Errorf("invalid spec type %T", prSpec) + } +- deps := GetDatabasesDependencies(a.Context()) +- req := query.RequestFromContext(a.Context()) +- if req == nil { +- return nil, errors.New("missing request on context") ++ deps := influxdb.GetStorageDependencies(a.Context()) ++ var user meta.User ++ if deps.AuthEnabled { ++ user = meta.UserFromContext(a.Context()) ++ if user == nil { ++ return nil, errors.New("createDatabasesSource: no user") ++ } + } +- orgID := req.OrganizationID +- +- bd := &DatabasesDecoder{orgID: orgID, deps: &deps, alloc: a.Allocator()} +- ++ bd := &DatabasesDecoder{deps: &deps, alloc: a.Allocator(), user: user} + return execute.CreateSourceFromDecoder(bd, dsid, a) + } +- +-type key int +- +-const dependenciesKey key = iota +- +-type DatabasesDependencies struct { +- DBRP platform.DBRPMappingService +- BucketLookup platform.BucketService +-} +- +-func (d DatabasesDependencies) Inject(ctx context.Context) context.Context { +- return context.WithValue(ctx, dependenciesKey, d) +-} +- +-func GetDatabasesDependencies(ctx context.Context) DatabasesDependencies { +- return ctx.Value(dependenciesKey).(DatabasesDependencies) +-} +- +-func (d DatabasesDependencies) Validate() error { +- if d.DBRP == nil { +- return errors.New("missing all databases lookup dependency") +- } +- if d.BucketLookup == nil { +- return errors.New("missing buckets lookup dependency") +- } +- return nil +-} diff --git a/patches/storage.patch b/patches/storage.patch new file mode 100644 index 00000000000..c4fbee4b1bc --- /dev/null +++ b/patches/storage.patch @@ -0,0 +1,923 @@ +diff --git b/storage/reads/datatypes/gen.go a/storage/reads/datatypes/gen.go +index 6df6b5c4a7..54b5a9699b 100644 +--- b/storage/reads/datatypes/gen.go ++++ a/storage/reads/datatypes/gen.go +@@ -1,3 +1 @@ + package datatypes +- +-//go:generate protoc -I ../../../internal -I . --plugin ../../../scripts/protoc-gen-gogofaster --gogofaster_out=Mgoogle/protobuf/empty.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,plugins=grpc:. storage_common.proto predicate.proto +diff --git b/storage/reads/flux_reader.go a/storage/reads/flux_reader.go +index bb98c2918c..e10bf3eae7 100644 +--- b/storage/reads/flux_reader.go ++++ a/storage/reads/flux_reader.go +@@ -10,8 +10,8 @@ import ( + "github.com/influxdata/flux/execute" + "github.com/influxdata/flux/memory" + "github.com/influxdata/flux/values" ++ "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" + "github.com/influxdata/influxdb/models" +- "github.com/influxdata/influxdb/query/stdlib/influxdata/influxdb" + "github.com/influxdata/influxdb/storage/reads/datatypes" + "github.com/influxdata/influxdb/tsdb/cursors" + ) +@@ -106,8 +106,8 @@ func (fi *filterIterator) Statistics() cursors.CursorStats { return fi.stats } + + func (fi *filterIterator) Do(f func(flux.Table) error) error { + src := fi.s.GetSource( +- uint64(fi.spec.OrganizationID), +- uint64(fi.spec.BucketID), ++ fi.spec.Database, ++ fi.spec.RetentionPolicy, + ) + + // Setup read request +@@ -230,8 +230,8 @@ func (gi *groupIterator) Statistics() cursors.CursorStats { return gi.stats } + + func (gi *groupIterator) Do(f func(flux.Table) error) error { + src := gi.s.GetSource( +- uint64(gi.spec.OrganizationID), +- uint64(gi.spec.BucketID), ++ gi.spec.Database, ++ gi.spec.RetentionPolicy, + ) + + // Setup read request +@@ -510,8 +510,8 @@ type tagKeysIterator struct { + + func (ti *tagKeysIterator) Do(f func(flux.Table) error) error { + src := ti.s.GetSource( +- uint64(ti.readSpec.OrganizationID), +- uint64(ti.readSpec.BucketID), ++ ti.readSpec.Database, ++ ti.readSpec.RetentionPolicy, + ) + + var req datatypes.TagKeysRequest +@@ -592,8 +592,8 @@ type tagValuesIterator struct { + + func (ti *tagValuesIterator) Do(f func(flux.Table) error) error { + src := ti.s.GetSource( +- uint64(ti.readSpec.OrganizationID), +- uint64(ti.readSpec.BucketID), ++ ti.readSpec.Database, ++ ti.readSpec.RetentionPolicy, + ) + + var req datatypes.TagValuesRequest +diff --git b/storage/reads/flux_table.go a/storage/reads/flux_table.go +index 58a586c777..952073c314 100644 +--- b/storage/reads/flux_table.go ++++ a/storage/reads/flux_table.go +@@ -1,7 +1,5 @@ + package reads + +-//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@types.tmpldata flux_table.gen.go.tmpl +- + import ( + "errors" + "sync/atomic" +diff --git b/storage/reads/flux_table_test.go a/storage/reads/flux_table_test.go +index 620d0e37d7..ff8698b893 100644 +--- b/storage/reads/flux_table_test.go ++++ a/storage/reads/flux_table_test.go +@@ -1,174 +1 @@ + package reads_test +- +-import ( +- "context" +- "io/ioutil" +- "math" +- "math/rand" +- "os" +- "path/filepath" +- "testing" +- "time" +- +- "github.com/influxdata/flux" +- "github.com/influxdata/flux/execute" +- "github.com/influxdata/flux/memory" +- "github.com/influxdata/flux/values" +- "github.com/influxdata/influxdb/cmd/influxd/generate" +- "github.com/influxdata/influxdb/mock" +- "github.com/influxdata/influxdb/models" +- "github.com/influxdata/influxdb/pkg/data/gen" +- "github.com/influxdata/influxdb/query/stdlib/influxdata/influxdb" +- "github.com/influxdata/influxdb/storage" +- "github.com/influxdata/influxdb/storage/reads" +- "github.com/influxdata/influxdb/storage/readservice" +- "go.uber.org/zap/zaptest" +-) +- +-func BenchmarkReadFilter(b *testing.B) { +- idgen := mock.NewMockIDGenerator() +- tagsSpec := &gen.TagsSpec{ +- Tags: []*gen.TagValuesSpec{ +- { +- TagKey: "t0", +- Values: func() gen.CountableSequence { +- return gen.NewCounterByteSequence("a-%d", 0, 5) +- }, +- }, +- { +- TagKey: "t1", +- Values: func() gen.CountableSequence { +- return gen.NewCounterByteSequence("b-%d", 0, 1000) +- }, +- }, +- }, +- } +- spec := gen.Spec{ +- OrgID: idgen.ID(), +- BucketID: idgen.ID(), +- Measurements: []gen.MeasurementSpec{ +- { +- Name: "m0", +- TagsSpec: tagsSpec, +- FieldValuesSpec: &gen.FieldValuesSpec{ +- Name: "f0", +- TimeSequenceSpec: gen.TimeSequenceSpec{ +- Count: math.MaxInt32, +- Delta: time.Minute, +- }, +- DataType: models.Float, +- Values: func(spec gen.TimeSequenceSpec) gen.TimeValuesSequence { +- r := rand.New(rand.NewSource(10)) +- return gen.NewTimeFloatValuesSequence( +- spec.Count, +- gen.NewTimestampSequenceFromSpec(spec), +- gen.NewFloatRandomValuesSequence(0, 90, r), +- ) +- }, +- }, +- }, +- { +- Name: "m0", +- TagsSpec: tagsSpec, +- FieldValuesSpec: &gen.FieldValuesSpec{ +- Name: "f1", +- TimeSequenceSpec: gen.TimeSequenceSpec{ +- Count: math.MaxInt32, +- Delta: time.Minute, +- }, +- DataType: models.Float, +- Values: func(spec gen.TimeSequenceSpec) gen.TimeValuesSequence { +- r := rand.New(rand.NewSource(11)) +- return gen.NewTimeFloatValuesSequence( +- spec.Count, +- gen.NewTimestampSequenceFromSpec(spec), +- gen.NewFloatRandomValuesSequence(0, 180, r), +- ) +- }, +- }, +- }, +- { +- Name: "m0", +- TagsSpec: tagsSpec, +- FieldValuesSpec: &gen.FieldValuesSpec{ +- Name: "f1", +- TimeSequenceSpec: gen.TimeSequenceSpec{ +- Count: math.MaxInt32, +- Delta: time.Minute, +- }, +- DataType: models.Float, +- Values: func(spec gen.TimeSequenceSpec) gen.TimeValuesSequence { +- r := rand.New(rand.NewSource(12)) +- return gen.NewTimeFloatValuesSequence( +- spec.Count, +- gen.NewTimestampSequenceFromSpec(spec), +- gen.NewFloatRandomValuesSequence(10, 10000, r), +- ) +- }, +- }, +- }, +- }, +- } +- tr := gen.TimeRange{ +- Start: mustParseTime("2019-11-25T00:00:00Z"), +- End: mustParseTime("2019-11-26T00:00:00Z"), +- } +- sg := gen.NewSeriesGeneratorFromSpec(&spec, tr) +- benchmarkRead(b, sg, func(r influxdb.Reader) error { +- mem := &memory.Allocator{} +- tables, err := r.ReadFilter(context.Background(), influxdb.ReadFilterSpec{ +- OrganizationID: spec.OrgID, +- BucketID: spec.BucketID, +- Bounds: execute.Bounds{ +- Start: values.ConvertTime(tr.Start), +- Stop: values.ConvertTime(tr.End), +- }, +- }, mem) +- if err != nil { +- return err +- } +- return tables.Do(func(table flux.Table) error { +- table.Done() +- return nil +- }) +- }) +-} +- +-func benchmarkRead(b *testing.B, sg gen.SeriesGenerator, f func(r influxdb.Reader) error) { +- logger := zaptest.NewLogger(b) +- rootDir, err := ioutil.TempDir("", "storage-reads-test") +- if err != nil { +- b.Fatal(err) +- } +- defer func() { _ = os.RemoveAll(rootDir) }() +- +- generator := generate.Generator{} +- if _, err := generator.Run(context.Background(), rootDir, sg); err != nil { +- b.Fatal(err) +- } +- +- enginePath := filepath.Join(rootDir, "engine") +- engine := storage.NewEngine(enginePath, storage.NewConfig()) +- engine.WithLogger(logger) +- +- if err := engine.Open(context.Background()); err != nil { +- b.Fatal(err) +- } +- reader := reads.NewReader(readservice.NewStore(engine)) +- +- b.ResetTimer() +- b.ReportAllocs() +- for i := 0; i < b.N; i++ { +- if err := f(reader); err != nil { +- b.Fatal(err) +- } +- } +-} +- +-func mustParseTime(s string) time.Time { +- ts, err := time.Parse(time.RFC3339, s) +- if err != nil { +- panic(err) +- } +- return ts +-} +diff --git b/storage/reads/gen.go a/storage/reads/gen.go +index 9e80e93ba6..8eee6fe0b5 100644 +--- b/storage/reads/gen.go ++++ a/storage/reads/gen.go +@@ -1,4 +1 @@ + package reads +- +-//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@array_cursor.gen.go.tmpldata array_cursor.gen.go.tmpl +-//go:generate env GO111MODULE=on go run github.com/benbjohnson/tmpl -data=@array_cursor.gen.go.tmpldata response_writer.gen.go.tmpl +diff --git b/storage/reads/group_resultset.go a/storage/reads/group_resultset.go +index 24766cff67..21e0e2b4c9 100644 +--- b/storage/reads/group_resultset.go ++++ a/storage/reads/group_resultset.go +@@ -7,7 +7,6 @@ import ( + "math" + "sort" + +- "github.com/influxdata/influxdb/kit/tracing" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/storage/reads/datatypes" + "github.com/influxdata/influxdb/tsdb/cursors" +@@ -112,16 +111,7 @@ func (g *groupResultSet) Next() GroupCursor { + } + + func (g *groupResultSet) sort() (int, error) { +- span, _ := tracing.StartSpanFromContext(g.ctx) +- defer span.Finish() +- span.LogKV("group_type", g.req.Group.String()) +- + n, err := g.sortFn(g) +- +- if err != nil { +- span.LogKV("rows", n) +- } +- + return n, err + } + +diff --git b/storage/reads/group_resultset_test.go a/storage/reads/group_resultset_test.go +index ee13d16167..eb1fc91fc3 100644 +--- b/storage/reads/group_resultset_test.go ++++ a/storage/reads/group_resultset_test.go +@@ -394,7 +394,7 @@ func BenchmarkNewGroupResultSet_GroupBy(b *testing.B) { + vals[i] = gen.NewCounterByteSequenceCount(card[i]) + } + +- tags := gen.NewTagsValuesSequenceValues("m0", "f0", "tag", vals) ++ tags := gen.NewTagsValuesSequenceValues("tag", vals) + rows := make([]reads.SeriesRow, tags.Count()) + for i := range rows { + tags.Next() +diff --git b/storage/reads/helpers_test.go a/storage/reads/helpers_test.go +index d688ae3658..ff8698b893 100644 +--- b/storage/reads/helpers_test.go ++++ a/storage/reads/helpers_test.go +@@ -1,169 +1 @@ + package reads_test +- +-import ( +- "context" +- +- "github.com/influxdata/influxdb/models" +- "github.com/influxdata/influxdb/pkg/data/gen" +- "github.com/influxdata/influxdb/storage/reads" +- "github.com/influxdata/influxdb/tsdb" +- "github.com/influxdata/influxdb/tsdb/cursors" +-) +- +-type seriesGeneratorCursorIterator struct { +- g gen.SeriesGenerator +- f floatTimeValuesGeneratorCursor +- i integerTimeValuesGeneratorCursor +- u unsignedTimeValuesGeneratorCursor +- s stringTimeValuesGeneratorCursor +- b booleanTimeValuesGeneratorCursor +- cur cursors.Cursor +-} +- +-func (ci *seriesGeneratorCursorIterator) Next(ctx context.Context, r *cursors.CursorRequest) (cursors.Cursor, error) { +- switch ci.g.FieldType() { +- case models.Float: +- ci.f.tv = ci.g.TimeValuesGenerator() +- ci.cur = &ci.f +- case models.Integer: +- ci.i.tv = ci.g.TimeValuesGenerator() +- ci.cur = &ci.i +- case models.Unsigned: +- ci.u.tv = ci.g.TimeValuesGenerator() +- ci.cur = &ci.u +- case models.String: +- ci.s.tv = ci.g.TimeValuesGenerator() +- ci.cur = &ci.s +- case models.Boolean: +- ci.b.tv = ci.g.TimeValuesGenerator() +- ci.cur = &ci.b +- default: +- panic("unreachable") +- } +- +- return ci.cur, nil +-} +- +-func (ci *seriesGeneratorCursorIterator) Stats() cursors.CursorStats { +- return ci.cur.Stats() +-} +- +-type seriesGeneratorSeriesCursor struct { +- ci seriesGeneratorCursorIterator +- r reads.SeriesRow +-} +- +-func newSeriesGeneratorSeriesCursor(g gen.SeriesGenerator) *seriesGeneratorSeriesCursor { +- s := &seriesGeneratorSeriesCursor{} +- s.ci.g = g +- s.r.Query = tsdb.CursorIterators{&s.ci} +- return s +-} +- +-func (s *seriesGeneratorSeriesCursor) Close() {} +-func (s *seriesGeneratorSeriesCursor) Err() error { return nil } +- +-func (s *seriesGeneratorSeriesCursor) Next() *reads.SeriesRow { +- if s.ci.g.Next() { +- s.r.SeriesTags = s.ci.g.Tags() +- s.r.Tags = s.ci.g.Tags() +- return &s.r +- } +- return nil +-} +- +-type timeValuesGeneratorCursor struct { +- tv gen.TimeValuesSequence +- stats cursors.CursorStats +-} +- +-func (t timeValuesGeneratorCursor) Close() {} +-func (t timeValuesGeneratorCursor) Err() error { return nil } +-func (t timeValuesGeneratorCursor) Stats() cursors.CursorStats { return t.stats } +- +-type floatTimeValuesGeneratorCursor struct { +- timeValuesGeneratorCursor +- a tsdb.FloatArray +-} +- +-func (c *floatTimeValuesGeneratorCursor) Next() *cursors.FloatArray { +- if c.tv.Next() { +- c.tv.Values().(gen.FloatValues).Copy(&c.a) +- } else { +- c.a.Timestamps = c.a.Timestamps[:0] +- c.a.Values = c.a.Values[:0] +- } +- c.stats.ScannedBytes += len(c.a.Values) * 8 +- c.stats.ScannedValues += c.a.Len() +- return &c.a +-} +- +-type integerTimeValuesGeneratorCursor struct { +- timeValuesGeneratorCursor +- a tsdb.IntegerArray +-} +- +-func (c *integerTimeValuesGeneratorCursor) Next() *cursors.IntegerArray { +- if c.tv.Next() { +- c.tv.Values().(gen.IntegerValues).Copy(&c.a) +- } else { +- c.a.Timestamps = c.a.Timestamps[:0] +- c.a.Values = c.a.Values[:0] +- } +- c.stats.ScannedBytes += len(c.a.Values) * 8 +- c.stats.ScannedValues += c.a.Len() +- return &c.a +-} +- +-type unsignedTimeValuesGeneratorCursor struct { +- timeValuesGeneratorCursor +- a tsdb.UnsignedArray +-} +- +-func (c *unsignedTimeValuesGeneratorCursor) Next() *cursors.UnsignedArray { +- if c.tv.Next() { +- c.tv.Values().(gen.UnsignedValues).Copy(&c.a) +- } else { +- c.a.Timestamps = c.a.Timestamps[:0] +- c.a.Values = c.a.Values[:0] +- } +- c.stats.ScannedBytes += len(c.a.Values) * 8 +- c.stats.ScannedValues += c.a.Len() +- return &c.a +-} +- +-type stringTimeValuesGeneratorCursor struct { +- timeValuesGeneratorCursor +- a tsdb.StringArray +-} +- +-func (c *stringTimeValuesGeneratorCursor) Next() *cursors.StringArray { +- if c.tv.Next() { +- c.tv.Values().(gen.StringValues).Copy(&c.a) +- } else { +- c.a.Timestamps = c.a.Timestamps[:0] +- c.a.Values = c.a.Values[:0] +- } +- for _, v := range c.a.Values { +- c.stats.ScannedBytes += len(v) +- } +- c.stats.ScannedValues += c.a.Len() +- return &c.a +-} +- +-type booleanTimeValuesGeneratorCursor struct { +- timeValuesGeneratorCursor +- a tsdb.BooleanArray +-} +- +-func (c *booleanTimeValuesGeneratorCursor) Next() *cursors.BooleanArray { +- if c.tv.Next() { +- c.tv.Values().(gen.BooleanValues).Copy(&c.a) +- } else { +- c.a.Timestamps = c.a.Timestamps[:0] +- c.a.Values = c.a.Values[:0] +- } +- c.stats.ScannedBytes += len(c.a.Values) +- c.stats.ScannedValues += c.a.Len() +- return &c.a +-} +diff --git b/storage/reads/response_writer_test.go a/storage/reads/response_writer_test.go +index 0916c822b2..0abaf75443 100644 +--- b/storage/reads/response_writer_test.go ++++ a/storage/reads/response_writer_test.go +@@ -1,21 +1,12 @@ + package reads_test + + import ( +- "context" +- "errors" + "fmt" + "reflect" +- "strings" + "testing" +- "time" + +- "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/mock" +- "github.com/influxdata/influxdb/pkg/data/gen" +- "github.com/influxdata/influxdb/pkg/testing/assert" + "github.com/influxdata/influxdb/storage/reads" +- "github.com/influxdata/influxdb/storage/reads/datatypes" +- "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/cursors" + "google.golang.org/grpc/metadata" + ) +@@ -132,403 +123,3 @@ func TestResponseWriter_WriteGroupResultSet_Stats(t *testing.T) { + t.Errorf("expected scanned-bytes '%v' but got '%v'", []string{fmt.Sprint(scannedBytes)}, gotTrailer.Get("scanned-bytes")) + } + } +- +-var ( +- org = influxdb.ID(0xff00ff00) +- bucket = influxdb.ID(0xcc00cc00) +- orgBucketID = tsdb.EncodeName(org, bucket) +-) +- +-func makeTypedSeries(m, prefix, field string, val interface{}, valueCount int, counts ...int) gen.SeriesGenerator { +- spec := gen.TimeSequenceSpec{Count: valueCount, Start: time.Unix(0, 0), Delta: time.Second} +- ts := gen.NewTimestampSequenceFromSpec(spec) +- var vg gen.TimeValuesSequence +- switch val := val.(type) { +- case float64: +- vg = gen.NewTimeFloatValuesSequence(spec.Count, ts, gen.NewFloatConstantValuesSequence(val)) +- case int64: +- vg = gen.NewTimeIntegerValuesSequence(spec.Count, ts, gen.NewIntegerConstantValuesSequence(val)) +- case int: +- vg = gen.NewTimeIntegerValuesSequence(spec.Count, ts, gen.NewIntegerConstantValuesSequence(int64(val))) +- case uint64: +- vg = gen.NewTimeUnsignedValuesSequence(spec.Count, ts, gen.NewUnsignedConstantValuesSequence(val)) +- case string: +- vg = gen.NewTimeStringValuesSequence(spec.Count, ts, gen.NewStringConstantValuesSequence(val)) +- case bool: +- vg = gen.NewTimeBooleanValuesSequence(spec.Count, ts, gen.NewBooleanConstantValuesSequence(val)) +- default: +- panic(fmt.Sprintf("unexpected type %T", val)) +- } +- +- return gen.NewSeriesGenerator(orgBucketID, []byte(field), vg, gen.NewTagsValuesSequenceCounts(m, field, prefix, counts)) +-} +- +-type sendSummary struct { +- groupCount int +- seriesCount int +- floatCount int +- integerCount int +- unsignedCount int +- stringCount int +- booleanCount int +-} +- +-func (ss *sendSummary) makeSendFunc() func(*datatypes.ReadResponse) error { +- return func(r *datatypes.ReadResponse) error { +- for i := range r.Frames { +- d := r.Frames[i].Data +- switch p := d.(type) { +- case *datatypes.ReadResponse_Frame_FloatPoints: +- ss.floatCount += len(p.FloatPoints.Values) +- case *datatypes.ReadResponse_Frame_IntegerPoints: +- ss.integerCount += len(p.IntegerPoints.Values) +- case *datatypes.ReadResponse_Frame_UnsignedPoints: +- ss.unsignedCount += len(p.UnsignedPoints.Values) +- case *datatypes.ReadResponse_Frame_StringPoints: +- ss.stringCount += len(p.StringPoints.Values) +- case *datatypes.ReadResponse_Frame_BooleanPoints: +- ss.booleanCount += len(p.BooleanPoints.Values) +- case *datatypes.ReadResponse_Frame_Series: +- ss.seriesCount++ +- case *datatypes.ReadResponse_Frame_Group: +- ss.groupCount++ +- default: +- panic("unexpected") +- } +- } +- return nil +- } +-} +- +-func TestResponseWriter_WriteResultSet(t *testing.T) { +- t.Run("normal", func(t *testing.T) { +- t.Run("all types one series each", func(t *testing.T) { +- exp := sendSummary{ +- seriesCount: 5, +- floatCount: 500, +- integerCount: 400, +- unsignedCount: 300, +- stringCount: 200, +- booleanCount: 100, +- } +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- var gens []gen.SeriesGenerator +- +- gens = append(gens, makeTypedSeries("m0", "t", "ff", 3.3, exp.floatCount, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "if", 100, exp.integerCount, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "uf", uint64(25), exp.unsignedCount, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "sf", "foo", exp.stringCount, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "bf", false, exp.booleanCount, 1)) +- +- cur := newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)) +- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) +- err := w.WriteResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- t.Run("multi-series floats", func(t *testing.T) { +- exp := sendSummary{ +- seriesCount: 5, +- floatCount: 8600, +- } +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- var gens []gen.SeriesGenerator +- gens = append(gens, makeTypedSeries("m0", "t", "f0", 3.3, 2000, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "f1", 5.3, 1500, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "f2", 5.3, 2500, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "f3", -2.2, 900, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "f4", -9.2, 1700, 1)) +- +- cur := newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)) +- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) +- err := w.WriteResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- +- t.Run("multi-series strings", func(t *testing.T) { +- exp := sendSummary{ +- seriesCount: 4, +- stringCount: 6900, +- } +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- var gens []gen.SeriesGenerator +- gens = append(gens, makeTypedSeries("m0", "t", "s0", strings.Repeat("aaa", 100), 2000, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s1", strings.Repeat("bbb", 200), 1500, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s2", strings.Repeat("ccc", 300), 2500, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s3", strings.Repeat("ddd", 200), 900, 1)) +- +- cur := newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)) +- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) +- err := w.WriteResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- +- t.Run("writer doesn't send series with no values", func(t *testing.T) { +- exp := sendSummary{ +- seriesCount: 2, +- stringCount: 3700, +- } +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- var gens []gen.SeriesGenerator +- gens = append(gens, makeTypedSeries("m0", "t", "s0", strings.Repeat("aaa", 100), 2000, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s1", strings.Repeat("bbb", 200), 0, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s2", strings.Repeat("ccc", 100), 1700, 1)) +- cur := newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)) +- +- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) +- err := w.WriteResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- }) +- +- t.Run("error conditions", func(t *testing.T) { +- t.Run("writer returns stream error", func(t *testing.T) { +- exp := errors.New("no write") +- +- stream := mock.NewResponseStream() +- stream.SendFunc = func(r *datatypes.ReadResponse) error { return exp } +- w := reads.NewResponseWriter(stream, 0) +- +- cur := newSeriesGeneratorSeriesCursor(makeTypedSeries("m0", "t", "f0", strings.Repeat("0", 1000), 2000, 1)) +- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) +- _ = w.WriteResultSet(rs) +- assert.Equal(t, w.Err(), exp) +- }) +- }) +- +- t.Run("issues", func(t *testing.T) { +- t.Run("short write", func(t *testing.T) { +- t.Run("single string series", func(t *testing.T) { +- exp := sendSummary{seriesCount: 1, stringCount: 1020} +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- cur := newSeriesGeneratorSeriesCursor(makeTypedSeries("m0", "t", "f0", strings.Repeat("0", 1000), exp.stringCount, 1)) +- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) +- err := w.WriteResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- +- t.Run("single float series", func(t *testing.T) { +- exp := sendSummary{seriesCount: 1, floatCount: 50500} +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- cur := newSeriesGeneratorSeriesCursor(makeTypedSeries("m0", "t", "f0", 5.5, exp.floatCount, 1)) +- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) +- err := w.WriteResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- +- t.Run("multi series", func(t *testing.T) { +- exp := sendSummary{seriesCount: 2, stringCount: 3700} +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- var gens []gen.SeriesGenerator +- gens = append(gens, makeTypedSeries("m0", "t", "s0", strings.Repeat("aaa", 1000), 2200, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s1", strings.Repeat("bbb", 1000), 1500, 1)) +- +- cur := newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)) +- rs := reads.NewFilteredResultSet(context.Background(), &datatypes.ReadFilterRequest{}, cur) +- err := w.WriteResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- }) +- }) +-} +- +-func TestResponseWriter_WriteGroupResultSet(t *testing.T) { +- t.Run("normal", func(t *testing.T) { +- t.Run("all types one series each", func(t *testing.T) { +- exp := sendSummary{ +- groupCount: 1, +- seriesCount: 5, +- floatCount: 500, +- integerCount: 400, +- unsignedCount: 300, +- stringCount: 200, +- booleanCount: 100, +- } +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- newCursor := func() (cursor reads.SeriesCursor, e error) { +- var gens []gen.SeriesGenerator +- gens = append(gens, makeTypedSeries("m0", "t", "ff", 3.3, exp.floatCount, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "if", 100, exp.integerCount, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "uf", uint64(25), exp.unsignedCount, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "sf", "foo", exp.stringCount, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "bf", false, exp.booleanCount, 1)) +- return newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)), nil +- } +- +- rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.GroupNone}, newCursor) +- err := w.WriteGroupResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- t.Run("multi-series floats", func(t *testing.T) { +- exp := sendSummary{ +- groupCount: 1, +- seriesCount: 5, +- floatCount: 8600, +- } +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- newCursor := func() (cursor reads.SeriesCursor, e error) { +- var gens []gen.SeriesGenerator +- gens = append(gens, makeTypedSeries("m0", "t", "f0", 3.3, 2000, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "f1", 5.3, 1500, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "f2", 5.3, 2500, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "f3", -2.2, 900, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "f4", -9.2, 1700, 1)) +- return newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)), nil +- } +- +- rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.GroupNone}, newCursor) +- err := w.WriteGroupResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- +- t.Run("multi-series strings", func(t *testing.T) { +- exp := sendSummary{ +- groupCount: 1, +- seriesCount: 4, +- stringCount: 6900, +- } +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- newCursor := func() (cursor reads.SeriesCursor, e error) { +- var gens []gen.SeriesGenerator +- gens = append(gens, makeTypedSeries("m0", "t", "s0", strings.Repeat("aaa", 100), 2000, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s1", strings.Repeat("bbb", 200), 1500, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s2", strings.Repeat("ccc", 300), 2500, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s3", strings.Repeat("ddd", 200), 900, 1)) +- return newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)), nil +- } +- +- rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.GroupNone}, newCursor) +- err := w.WriteGroupResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- +- t.Run("writer doesn't send series with no values", func(t *testing.T) { +- exp := sendSummary{ +- groupCount: 1, +- seriesCount: 2, +- stringCount: 3700, +- } +- var ss sendSummary +- +- stream := mock.NewResponseStream() +- stream.SendFunc = ss.makeSendFunc() +- w := reads.NewResponseWriter(stream, 0) +- +- newCursor := func() (cursor reads.SeriesCursor, e error) { +- var gens []gen.SeriesGenerator +- gens = append(gens, makeTypedSeries("m0", "t", "s0", strings.Repeat("aaa", 100), 2000, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s1", strings.Repeat("bbb", 200), 0, 1)) +- gens = append(gens, makeTypedSeries("m0", "t", "s2", strings.Repeat("ccc", 100), 1700, 1)) +- return newSeriesGeneratorSeriesCursor(gen.NewMergedSeriesGenerator(gens)), nil +- } +- +- rs := reads.NewGroupResultSet(context.Background(), &datatypes.ReadGroupRequest{Group: datatypes.GroupNone}, newCursor) +- err := w.WriteGroupResultSet(rs) +- if err != nil { +- t.Fatalf("unexpected err: %v", err) +- } +- w.Flush() +- +- assert.Equal(t, ss, exp) +- }) +- }) +-} +diff --git b/storage/reads/store.go a/storage/reads/store.go +index 8918794b37..655d12d21c 100644 +--- b/storage/reads/store.go ++++ a/storage/reads/store.go +@@ -80,5 +80,5 @@ type Store interface { + TagKeys(ctx context.Context, req *datatypes.TagKeysRequest) (cursors.StringIterator, error) + TagValues(ctx context.Context, req *datatypes.TagValuesRequest) (cursors.StringIterator, error) + +- GetSource(orgID, bucketID uint64) proto.Message ++ GetSource(db, rp string) proto.Message + } diff --git a/storage/reads/datatypes/gen.go b/storage/reads/datatypes/gen.go new file mode 100644 index 00000000000..54b5a9699b0 --- /dev/null +++ b/storage/reads/datatypes/gen.go @@ -0,0 +1 @@ +package datatypes diff --git a/storage/reads/flux_predicate.go b/storage/reads/flux_predicate.go new file mode 100644 index 00000000000..1895d710059 --- /dev/null +++ b/storage/reads/flux_predicate.go @@ -0,0 +1,183 @@ +package reads + +import ( + "fmt" + + "github.com/influxdata/flux/ast" + "github.com/influxdata/flux/semantic" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/storage/reads/datatypes" + "github.com/pkg/errors" +) + +const ( + fieldKey = "_field" + measurementKey = "_measurement" + valueKey = "_value" +) + +func toStoragePredicate(f *semantic.FunctionExpression) (*datatypes.Predicate, error) { + if f.Block.Parameters == nil || len(f.Block.Parameters.List) != 1 { + return nil, errors.New("storage predicate functions must have exactly one parameter") + } + + root, err := toStoragePredicateHelper(f.Block.Body.(semantic.Expression), f.Block.Parameters.List[0].Key.Name) + if err != nil { + return nil, err + } + + return &datatypes.Predicate{ + Root: root, + }, nil +} + +func toStoragePredicateHelper(n semantic.Expression, objectName string) (*datatypes.Node, error) { + switch n := n.(type) { + case *semantic.LogicalExpression: + left, err := toStoragePredicateHelper(n.Left, objectName) + if err != nil { + return nil, errors.Wrap(err, "left hand side") + } + right, err := toStoragePredicateHelper(n.Right, objectName) + if err != nil { + return nil, errors.Wrap(err, "right hand side") + } + children := []*datatypes.Node{left, right} + switch n.Operator { + case ast.AndOperator: + return &datatypes.Node{ + NodeType: datatypes.NodeTypeLogicalExpression, + Value: &datatypes.Node_Logical_{Logical: datatypes.LogicalAnd}, + Children: children, + }, nil + case ast.OrOperator: + return &datatypes.Node{ + NodeType: datatypes.NodeTypeLogicalExpression, + Value: &datatypes.Node_Logical_{Logical: datatypes.LogicalOr}, + Children: children, + }, nil + default: + return nil, fmt.Errorf("unknown logical operator %v", n.Operator) + } + case *semantic.BinaryExpression: + left, err := toStoragePredicateHelper(n.Left, objectName) + if err != nil { + return nil, errors.Wrap(err, "left hand side") + } + right, err := toStoragePredicateHelper(n.Right, objectName) + if err != nil { + return nil, errors.Wrap(err, "right hand side") + } + children := []*datatypes.Node{left, right} + op, err := toComparisonOperator(n.Operator) + if err != nil { + return nil, err + } + return &datatypes.Node{ + NodeType: datatypes.NodeTypeComparisonExpression, + Value: &datatypes.Node_Comparison_{Comparison: op}, + Children: children, + }, nil + case *semantic.StringLiteral: + return &datatypes.Node{ + NodeType: datatypes.NodeTypeLiteral, + Value: &datatypes.Node_StringValue{ + StringValue: n.Value, + }, + }, nil + case *semantic.IntegerLiteral: + return &datatypes.Node{ + NodeType: datatypes.NodeTypeLiteral, + Value: &datatypes.Node_IntegerValue{ + IntegerValue: n.Value, + }, + }, nil + case *semantic.BooleanLiteral: + return &datatypes.Node{ + NodeType: datatypes.NodeTypeLiteral, + Value: &datatypes.Node_BooleanValue{ + BooleanValue: n.Value, + }, + }, nil + case *semantic.FloatLiteral: + return &datatypes.Node{ + NodeType: datatypes.NodeTypeLiteral, + Value: &datatypes.Node_FloatValue{ + FloatValue: n.Value, + }, + }, nil + case *semantic.RegexpLiteral: + return &datatypes.Node{ + NodeType: datatypes.NodeTypeLiteral, + Value: &datatypes.Node_RegexValue{ + RegexValue: n.Value.String(), + }, + }, nil + case *semantic.MemberExpression: + // Sanity check that the object is the objectName identifier + if ident, ok := n.Object.(*semantic.IdentifierExpression); !ok || ident.Name != objectName { + return nil, fmt.Errorf("unknown object %q", n.Object) + } + switch n.Property { + case fieldKey: + return &datatypes.Node{ + NodeType: datatypes.NodeTypeTagRef, + Value: &datatypes.Node_TagRefValue{ + TagRefValue: models.FieldKeyTagKey, + }, + }, nil + case measurementKey: + return &datatypes.Node{ + NodeType: datatypes.NodeTypeTagRef, + Value: &datatypes.Node_TagRefValue{ + TagRefValue: models.MeasurementTagKey, + }, + }, nil + case valueKey: + return &datatypes.Node{ + NodeType: datatypes.NodeTypeFieldRef, + Value: &datatypes.Node_FieldRefValue{ + FieldRefValue: valueKey, + }, + }, nil + + } + return &datatypes.Node{ + NodeType: datatypes.NodeTypeTagRef, + Value: &datatypes.Node_TagRefValue{ + TagRefValue: n.Property, + }, + }, nil + case *semantic.DurationLiteral: + return nil, errors.New("duration literals not supported in storage predicates") + case *semantic.DateTimeLiteral: + return nil, errors.New("time literals not supported in storage predicates") + default: + return nil, fmt.Errorf("unsupported semantic expression type %T", n) + } +} + +func toComparisonOperator(o ast.OperatorKind) (datatypes.Node_Comparison, error) { + switch o { + case ast.EqualOperator: + return datatypes.ComparisonEqual, nil + case ast.NotEqualOperator: + return datatypes.ComparisonNotEqual, nil + case ast.RegexpMatchOperator: + return datatypes.ComparisonRegex, nil + case ast.NotRegexpMatchOperator: + return datatypes.ComparisonNotRegex, nil + case ast.StartsWithOperator: + return datatypes.ComparisonStartsWith, nil + case ast.LessThanOperator: + return datatypes.ComparisonLess, nil + case ast.LessThanEqualOperator: + return datatypes.ComparisonLessEqual, nil + case ast.GreaterThanOperator: + return datatypes.ComparisonGreater, nil + case ast.GreaterThanEqualOperator: + return datatypes.ComparisonGreaterEqual, nil + default: + return 0, fmt.Errorf("unknown operator %v", o) + } +} diff --git a/storage/reads/reader.go b/storage/reads/flux_reader.go similarity index 94% rename from storage/reads/reader.go rename to storage/reads/flux_reader.go index 38acd4157b8..e10bf3eae7d 100644 --- a/storage/reads/reader.go +++ b/storage/reads/flux_reader.go @@ -36,6 +36,7 @@ func (r *storeReader) ReadFilter(ctx context.Context, spec influxdb.ReadFilterSp ctx: ctx, s: r.s, spec: spec, + cache: newTagsCache(0), alloc: alloc, }, nil } @@ -45,6 +46,7 @@ func (r *storeReader) ReadGroup(ctx context.Context, spec influxdb.ReadGroupSpec ctx: ctx, s: r.s, spec: spec, + cache: newTagsCache(0), alloc: alloc, }, nil } @@ -96,6 +98,7 @@ type filterIterator struct { s Store spec influxdb.ReadFilterSpec stats cursors.CursorStats + cache *tagsCache alloc *memory.Allocator } @@ -137,7 +140,7 @@ func (fi *filterIterator) Do(f func(flux.Table) error) error { return nil } - return fi.handleRead(filterDuplicateTables(f), rs) + return fi.handleRead(f, rs) } func (fi *filterIterator) handleRead(f func(flux.Table) error, rs ResultSet) error { @@ -155,6 +158,7 @@ func (fi *filterIterator) handleRead(f func(flux.Table) error, rs ResultSet) err cur.Close() } rs.Close() + fi.cache.Release() }() READ: @@ -171,19 +175,19 @@ READ: switch typedCur := cur.(type) { case cursors.IntegerArrayCursor: cols, defs := determineTableColsForSeries(rs.Tags(), flux.TInt) - table = newIntegerTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.alloc) + table = newIntegerTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.cache, fi.alloc) case cursors.FloatArrayCursor: cols, defs := determineTableColsForSeries(rs.Tags(), flux.TFloat) - table = newFloatTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.alloc) + table = newFloatTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.cache, fi.alloc) case cursors.UnsignedArrayCursor: cols, defs := determineTableColsForSeries(rs.Tags(), flux.TUInt) - table = newUnsignedTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.alloc) + table = newUnsignedTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.cache, fi.alloc) case cursors.BooleanArrayCursor: cols, defs := determineTableColsForSeries(rs.Tags(), flux.TBool) - table = newBooleanTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.alloc) + table = newBooleanTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.cache, fi.alloc) case cursors.StringArrayCursor: cols, defs := determineTableColsForSeries(rs.Tags(), flux.TString) - table = newStringTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.alloc) + table = newStringTable(done, typedCur, bnds, key, cols, rs.Tags(), defs, fi.cache, fi.alloc) default: panic(fmt.Sprintf("unreachable: %T", typedCur)) } @@ -218,6 +222,7 @@ type groupIterator struct { s Store spec influxdb.ReadGroupSpec stats cursors.CursorStats + cache *tagsCache alloc *memory.Allocator } @@ -267,7 +272,7 @@ func (gi *groupIterator) Do(f func(flux.Table) error) error { if rs == nil { return nil } - return gi.handleRead(filterDuplicateTables(f), rs) + return gi.handleRead(f, rs) } func (gi *groupIterator) handleRead(f func(flux.Table) error, rs GroupResultSet) error { @@ -289,6 +294,7 @@ func (gi *groupIterator) handleRead(f func(flux.Table) error, rs GroupResultSet) gc.Close() } rs.Close() + gi.cache.Release() }() gc = rs.Next() @@ -313,19 +319,19 @@ READ: switch typedCur := cur.(type) { case cursors.IntegerArrayCursor: cols, defs := determineTableColsForGroup(gc.Keys(), flux.TInt) - table = newIntegerGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.alloc) + table = newIntegerGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.cache, gi.alloc) case cursors.FloatArrayCursor: cols, defs := determineTableColsForGroup(gc.Keys(), flux.TFloat) - table = newFloatGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.alloc) + table = newFloatGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.cache, gi.alloc) case cursors.UnsignedArrayCursor: cols, defs := determineTableColsForGroup(gc.Keys(), flux.TUInt) - table = newUnsignedGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.alloc) + table = newUnsignedGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.cache, gi.alloc) case cursors.BooleanArrayCursor: cols, defs := determineTableColsForGroup(gc.Keys(), flux.TBool) - table = newBooleanGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.alloc) + table = newBooleanGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.cache, gi.alloc) case cursors.StringArrayCursor: cols, defs := determineTableColsForGroup(gc.Keys(), flux.TString) - table = newStringGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.alloc) + table = newStringGroupTable(done, gc, typedCur, bnds, key, cols, gc.Tags(), defs, gi.cache, gi.alloc) default: panic(fmt.Sprintf("unreachable: %T", typedCur)) } @@ -648,27 +654,3 @@ func (ti *tagValuesIterator) handleRead(f func(flux.Table) error, rs cursors.Str func (ti *tagValuesIterator) Statistics() cursors.CursorStats { return cursors.CursorStats{} } - -type duplicateFilter struct { - f func(tbl flux.Table) error - seen *execute.GroupLookup -} - -func filterDuplicateTables(f func(tbl flux.Table) error) func(tbl flux.Table) error { - filter := &duplicateFilter{ - f: f, - seen: execute.NewGroupLookup(), - } - return filter.Process -} - -func (df *duplicateFilter) Process(tbl flux.Table) error { - // Identify duplicate keys within the cursor. - if _, ok := df.seen.Lookup(tbl.Key()); ok { - // Discard this table. - tbl.Done() - return nil - } - df.seen.Set(tbl.Key(), true) - return df.f(tbl) -} diff --git a/storage/reads/table.gen.go b/storage/reads/flux_table.gen.go similarity index 95% rename from storage/reads/table.gen.go rename to storage/reads/flux_table.gen.go index d4289aadcb6..a4e6f2bb366 100644 --- a/storage/reads/table.gen.go +++ b/storage/reads/flux_table.gen.go @@ -2,7 +2,7 @@ // https://github.com/benbjohnson/tmpl // // DO NOT EDIT! -// Source: table.gen.go.tmpl +// Source: flux_table.gen.go.tmpl package reads @@ -37,10 +37,11 @@ func newFloatTable( cols []flux.ColMeta, tags models.Tags, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) *floatTable { t := &floatTable{ - table: newTable(done, bounds, key, cols, defs, alloc), + table: newTable(done, bounds, key, cols, defs, cache, alloc), cur: cur, } t.readTags(tags) @@ -113,10 +114,11 @@ func newFloatGroupTable( cols []flux.ColMeta, tags models.Tags, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) *floatGroupTable { t := &floatGroupTable{ - table: newTable(done, bounds, key, cols, defs, alloc), + table: newTable(done, bounds, key, cols, defs, cache, alloc), gc: gc, cur: cur, } @@ -220,10 +222,11 @@ func newIntegerTable( cols []flux.ColMeta, tags models.Tags, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) *integerTable { t := &integerTable{ - table: newTable(done, bounds, key, cols, defs, alloc), + table: newTable(done, bounds, key, cols, defs, cache, alloc), cur: cur, } t.readTags(tags) @@ -296,10 +299,11 @@ func newIntegerGroupTable( cols []flux.ColMeta, tags models.Tags, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) *integerGroupTable { t := &integerGroupTable{ - table: newTable(done, bounds, key, cols, defs, alloc), + table: newTable(done, bounds, key, cols, defs, cache, alloc), gc: gc, cur: cur, } @@ -403,10 +407,11 @@ func newUnsignedTable( cols []flux.ColMeta, tags models.Tags, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) *unsignedTable { t := &unsignedTable{ - table: newTable(done, bounds, key, cols, defs, alloc), + table: newTable(done, bounds, key, cols, defs, cache, alloc), cur: cur, } t.readTags(tags) @@ -479,10 +484,11 @@ func newUnsignedGroupTable( cols []flux.ColMeta, tags models.Tags, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) *unsignedGroupTable { t := &unsignedGroupTable{ - table: newTable(done, bounds, key, cols, defs, alloc), + table: newTable(done, bounds, key, cols, defs, cache, alloc), gc: gc, cur: cur, } @@ -586,10 +592,11 @@ func newStringTable( cols []flux.ColMeta, tags models.Tags, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) *stringTable { t := &stringTable{ - table: newTable(done, bounds, key, cols, defs, alloc), + table: newTable(done, bounds, key, cols, defs, cache, alloc), cur: cur, } t.readTags(tags) @@ -662,10 +669,11 @@ func newStringGroupTable( cols []flux.ColMeta, tags models.Tags, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) *stringGroupTable { t := &stringGroupTable{ - table: newTable(done, bounds, key, cols, defs, alloc), + table: newTable(done, bounds, key, cols, defs, cache, alloc), gc: gc, cur: cur, } @@ -769,10 +777,11 @@ func newBooleanTable( cols []flux.ColMeta, tags models.Tags, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) *booleanTable { t := &booleanTable{ - table: newTable(done, bounds, key, cols, defs, alloc), + table: newTable(done, bounds, key, cols, defs, cache, alloc), cur: cur, } t.readTags(tags) @@ -845,10 +854,11 @@ func newBooleanGroupTable( cols []flux.ColMeta, tags models.Tags, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) *booleanGroupTable { t := &booleanGroupTable{ - table: newTable(done, bounds, key, cols, defs, alloc), + table: newTable(done, bounds, key, cols, defs, cache, alloc), gc: gc, cur: cur, } diff --git a/storage/reads/table.go b/storage/reads/flux_table.go similarity index 92% rename from storage/reads/table.go rename to storage/reads/flux_table.go index 3aa7485e230..952073c3147 100644 --- a/storage/reads/table.go +++ b/storage/reads/flux_table.go @@ -29,6 +29,7 @@ type table struct { err error cancelled, used int32 + cache *tagsCache alloc *memory.Allocator } @@ -38,6 +39,7 @@ func newTable( key flux.GroupKey, cols []flux.ColMeta, defs [][]byte, + cache *tagsCache, alloc *memory.Allocator, ) table { return table{ @@ -47,6 +49,7 @@ func newTable( tags: make([][]byte, len(cols)), defs: defs, cols: cols, + cache: cache, alloc: alloc, } } @@ -195,30 +198,15 @@ func (t *table) appendTags(cr *colReader) { for j := range t.cols { v := t.tags[j] if v != nil { - b := arrow.NewStringBuilder(t.alloc) - b.Reserve(cr.l) - b.ReserveData(cr.l * len(v)) - for i := 0; i < cr.l; i++ { - b.Append(v) - } - cr.cols[j] = b.NewArray() - b.Release() + cr.cols[j] = t.cache.GetTag(string(v), cr.l, t.alloc) } } } // appendBounds fills the colBufs for the time bounds func (t *table) appendBounds(cr *colReader) { - bounds := []execute.Time{t.bounds.Start, t.bounds.Stop} - for j := range []int{startColIdx, stopColIdx} { - b := arrow.NewIntBuilder(t.alloc) - b.Reserve(cr.l) - for i := 0; i < cr.l; i++ { - b.UnsafeAppend(int64(bounds[j])) - } - cr.cols[j] = b.NewArray() - b.Release() - } + start, stop := t.cache.GetBounds(t.bounds, cr.l, t.alloc) + cr.cols[startColIdx], cr.cols[stopColIdx] = start, stop } func (t *table) closeDone() { diff --git a/storage/reads/flux_table_test.go b/storage/reads/flux_table_test.go new file mode 100644 index 00000000000..ff8698b8939 --- /dev/null +++ b/storage/reads/flux_table_test.go @@ -0,0 +1 @@ +package reads_test diff --git a/storage/reads/flux_tags_cache.go b/storage/reads/flux_tags_cache.go new file mode 100644 index 00000000000..30a59c4d156 --- /dev/null +++ b/storage/reads/flux_tags_cache.go @@ -0,0 +1,214 @@ +package reads + +import ( + "container/list" + + "github.com/apache/arrow/go/arrow" + "github.com/apache/arrow/go/arrow/array" + "github.com/apache/arrow/go/arrow/memory" + "github.com/influxdata/flux/execute" +) + +// defaultMaxLengthForTagsCache is the default maximum number of +// tags that will be memoized when retrieving tags from the tags +// cache. +const defaultMaxLengthForTagsCache = 100 + +type tagsCache struct { + // startColumn is a special slot for holding the start column. + startColumn *array.Int64 + + // stopColumn is a special slot for holding the stop column. + stopColumn *array.Int64 + + // tags holds cached arrays for various tag values. + // An lru is used to keep track of the least recently used + // item in the cache so that it can be ejected. An lru is used + // here because we cannot be certain if tag values are going to + // be used again and we do not want to retain a reference + // that may have already been released. This makes an lru a good + // fit since it will be more likely to eject a value that is not + // going to be used again than another data structure. + // + // The increase in performance by reusing arrays for tag values + // is dependent on the order of the tags coming out of storage. + // It is possible that a value will be reused but could get + // ejected from the cache before it would be reused. + // + // The map contains the tag **values** and not the tag keys. + // An array can get shared among two different tag keys that + // have the same value. + tags map[string]*list.Element + lru *list.List + maxLength int +} + +// newTagsCache will create a tags cache that will retain +// the last sz entries. If zero, the default will be used. +func newTagsCache(sz int) *tagsCache { + return &tagsCache{maxLength: sz} +} + +// GetBounds will return arrays that match with the bounds. +// If an array that is within the cache works with the bounds +// and can be sliced to the length, a reference to it will be +// returned. +func (c *tagsCache) GetBounds(b execute.Bounds, l int, mem memory.Allocator) (start *array.Int64, stop *array.Int64) { + if c == nil { + start = c.createBounds(b.Start, l, mem) + stop = c.createBounds(b.Stop, l, mem) + return start, stop + } + + if c.startColumn != nil { + start = c.getOrReplaceBounds(&c.startColumn, b.Start, l, mem) + } else { + start = c.createBounds(b.Start, l, mem) + start.Retain() + c.startColumn = start + } + + if c.stopColumn != nil { + stop = c.getOrReplaceBounds(&c.stopColumn, b.Stop, l, mem) + } else { + stop = c.createBounds(b.Stop, l, mem) + stop.Retain() + c.stopColumn = stop + } + + return start, stop +} + +// getOrReplaceBounds will get or replace an array of timestamps +// and return a new reference to it. +func (c *tagsCache) getOrReplaceBounds(arr **array.Int64, ts execute.Time, l int, mem memory.Allocator) *array.Int64 { + if (*arr).Len() < l { + (*arr).Release() + *arr = c.createBounds(ts, l, mem) + (*arr).Retain() + return *arr + } else if (*arr).Len() == l { + (*arr).Retain() + return *arr + } + + // If the lengths do not match, but the cached array is less + // than the desired array, then we can use slice. + // NewSlice will automatically create a new reference to the + // passed in array so we do not need to manually retain. + data := array.NewSliceData((*arr).Data(), 0, int64(l)) + vs := array.NewInt64Data(data) + data.Release() + return vs +} + +func (c *tagsCache) createBounds(ts execute.Time, l int, mem memory.Allocator) *array.Int64 { + b := array.NewInt64Builder(mem) + b.Resize(l) + for i := 0; i < l; i++ { + b.Append(int64(ts)) + } + return b.NewInt64Array() +} + +// GetTag returns a binary arrow array that contains the value +// repeated l times. If an array with a length greater than or +// equal to the length and with the same value exists in the cache, +// a reference to the data will be retained and returned. +// Otherwise, the allocator will be used to construct a new column. +func (c *tagsCache) GetTag(value string, l int, mem memory.Allocator) *array.Binary { + if l == 0 { + return c.createTag(value, l, mem) + } + + if elem, ok := c.tags[value]; ok { + return c.getOrReplaceTag(elem, value, l, mem) + } + + arr := c.createTag(value, l, mem) + if c.lru == nil { + c.lru = list.New() + } + if c.tags == nil { + c.tags = make(map[string]*list.Element) + } + c.tags[value] = c.lru.PushFront(arr) + c.maintainLRU() + arr.Retain() + return arr +} + +func (c *tagsCache) getOrReplaceTag(elem *list.Element, value string, l int, mem memory.Allocator) *array.Binary { + // Move this element to the front of the lru. + c.lru.MoveBefore(elem, c.lru.Front()) + + // Determine if the array can be reused. + arr := elem.Value.(*array.Binary) + if arr.Len() < l { + // Create a new array with the appropriate length since + // this one cannot be reused here. + arr.Release() + arr = c.createTag(value, l, mem) + elem.Value = arr + arr.Retain() + return arr + } else if arr.Len() == l { + arr.Retain() + return arr + } + + // If the lengths do not match, but the cached array is less + // than the desired array, then we can use slice. + // Slice will automatically create a new reference to the + // passed in array so we do not need to manually retain. + data := array.NewSliceData(arr.Data(), 0, int64(l)) + vs := array.NewBinaryData(data) + data.Release() + return vs +} + +// maintainLRU will ensure the lru cache maintains the appropriate +// length by ejecting the least recently used value from the cache +// until the cache is the appropriate size. +func (c *tagsCache) maintainLRU() { + max := c.maxLength + if max == 0 { + max = defaultMaxLengthForTagsCache + } + if c.lru.Len() <= max { + return + } + arr := c.lru.Remove(c.lru.Back()).(*array.Binary) + value := arr.ValueString(0) + delete(c.tags, value) + arr.Release() +} + +func (c *tagsCache) createTag(value string, l int, mem memory.Allocator) *array.Binary { + b := array.NewBinaryBuilder(mem, arrow.BinaryTypes.String) + b.Resize(l) + b.ReserveData(l * len(value)) + for i := 0; i < l; i++ { + b.AppendString(value) + } + return b.NewBinaryArray() +} + +// Release will release all references to cached tag columns. +func (c *tagsCache) Release() { + if c.startColumn != nil { + c.startColumn.Release() + c.startColumn = nil + } + + if c.stopColumn != nil { + c.stopColumn.Release() + c.stopColumn = nil + } + + for _, elem := range c.tags { + elem.Value.(*array.Binary).Release() + } + c.tags = nil + c.lru = nil +} diff --git a/storage/reads/gen.go b/storage/reads/gen.go new file mode 100644 index 00000000000..8eee6fe0b5c --- /dev/null +++ b/storage/reads/gen.go @@ -0,0 +1 @@ +package reads diff --git a/storage/reads/group_resultset.go b/storage/reads/group_resultset.go index 5d6ca33a454..21e0e2b4c9d 100644 --- a/storage/reads/group_resultset.go +++ b/storage/reads/group_resultset.go @@ -23,7 +23,7 @@ type groupResultSet struct { keys [][]byte nilSort []byte rgc groupByCursor - km keyMerger + km KeyMerger newCursorFn func() (SeriesCursor, error) nextGroupFn func(c *groupResultSet) GroupCursor @@ -38,7 +38,7 @@ type GroupOption func(g *groupResultSet) // other value func GroupOptionNilSortLo() GroupOption { return func(g *groupResultSet) { - g.nilSort = nilSortLo + g.nilSort = NilSortLo } } @@ -48,7 +48,7 @@ func NewGroupResultSet(ctx context.Context, req *datatypes.ReadGroupRequest, new req: req, agg: req.Aggregate, keys: make([][]byte, len(req.GroupKeys)), - nilSort: nilSortHi, + nilSort: NilSortHi, newCursorFn: newCursorFn, } @@ -89,13 +89,13 @@ func NewGroupResultSet(ctx context.Context, req *datatypes.ReadGroupRequest, new return g } -// nilSort values determine the lexicographical order of nil values in the +// NilSort values determine the lexicographical order of nil values in the // partition key var ( // nil sorts lowest - nilSortLo = []byte{0x00} + NilSortLo = []byte{0x00} // nil sorts highest - nilSortHi = []byte{0xff} // sort nil values + NilSortHi = []byte{0xff} ) func (g *groupResultSet) Err() error { return nil } @@ -161,7 +161,7 @@ func groupNoneNextGroup(g *groupResultSet) GroupCursor { mb: g.mb, agg: g.agg, cur: cur, - keys: g.km.get(), + keys: g.km.Get(), } } @@ -174,13 +174,13 @@ func groupNoneSort(g *groupResultSet) (int, error) { } allTime := g.req.Hints.HintSchemaAllTime() - g.km.clear() + g.km.Clear() n := 0 row := cur.Next() for row != nil { if allTime || g.seriesHasPoints(row) { n++ - g.km.mergeTagKeys(row.Tags) + g.km.MergeTagKeys(row.Tags) } row = cur.Next() } @@ -195,16 +195,16 @@ func groupByNextGroup(g *groupResultSet) GroupCursor { g.rgc.vals[i] = row.Tags.Get(g.keys[i]) } - g.km.clear() + g.km.Clear() rowKey := row.SortKey j := g.i for j < len(g.rows) && bytes.Equal(rowKey, g.rows[j].SortKey) { - g.km.mergeTagKeys(g.rows[j].Tags) + g.km.MergeTagKeys(g.rows[j].Tags) j++ } g.rgc.reset(g.rows[g.i:j]) - g.rgc.keys = g.km.get() + g.rgc.keys = g.km.Get() g.i = j if j == len(g.rows) { diff --git a/storage/reads/eval.go b/storage/reads/influxql_eval.go similarity index 100% rename from storage/reads/eval.go rename to storage/reads/influxql_eval.go diff --git a/storage/reads/expr.go b/storage/reads/influxql_expr.go similarity index 100% rename from storage/reads/expr.go rename to storage/reads/influxql_expr.go diff --git a/storage/reads/influxql_predicate.go b/storage/reads/influxql_predicate.go new file mode 100644 index 00000000000..7aeb8f78a7e --- /dev/null +++ b/storage/reads/influxql_predicate.go @@ -0,0 +1,274 @@ +package reads + +import ( + "regexp" + + "github.com/influxdata/influxdb/storage/reads/datatypes" + "github.com/influxdata/influxql" + "github.com/pkg/errors" +) + +const ( + fieldRef = "$" +) + +// NodeToExpr transforms a predicate node to an influxql.Expr. +func NodeToExpr(node *datatypes.Node, remap map[string]string) (influxql.Expr, error) { + v := &nodeToExprVisitor{remap: remap} + WalkNode(v, node) + if err := v.Err(); err != nil { + return nil, err + } + + if len(v.exprs) > 1 { + return nil, errors.New("invalid expression") + } + + if len(v.exprs) == 0 { + return nil, nil + } + + // TODO(edd): It would be preferable if RewriteRegexConditions was a + // package level function in influxql. + stmt := &influxql.SelectStatement{ + Condition: v.exprs[0], + } + stmt.RewriteRegexConditions() + return stmt.Condition, nil +} + +type nodeToExprVisitor struct { + remap map[string]string + exprs []influxql.Expr + err error +} + +func (v *nodeToExprVisitor) Visit(n *datatypes.Node) NodeVisitor { + if v.err != nil { + return nil + } + + switch n.NodeType { + case datatypes.NodeTypeLogicalExpression: + if len(n.Children) > 1 { + op := influxql.AND + if n.GetLogical() == datatypes.LogicalOr { + op = influxql.OR + } + + WalkNode(v, n.Children[0]) + if v.err != nil { + return nil + } + + for i := 1; i < len(n.Children); i++ { + WalkNode(v, n.Children[i]) + if v.err != nil { + return nil + } + + if len(v.exprs) >= 2 { + lhs, rhs := v.pop2() + v.exprs = append(v.exprs, &influxql.BinaryExpr{LHS: lhs, Op: op, RHS: rhs}) + } + } + + return nil + } + + case datatypes.NodeTypeParenExpression: + if len(n.Children) != 1 { + v.err = errors.New("parenExpression expects one child") + return nil + } + + WalkNode(v, n.Children[0]) + if v.err != nil { + return nil + } + + if len(v.exprs) > 0 { + v.exprs = append(v.exprs, &influxql.ParenExpr{Expr: v.pop()}) + } + + return nil + + case datatypes.NodeTypeComparisonExpression: + WalkChildren(v, n) + + if len(v.exprs) < 2 { + v.err = errors.New("comparisonExpression expects two children") + return nil + } + + lhs, rhs := v.pop2() + + be := &influxql.BinaryExpr{LHS: lhs, RHS: rhs} + switch n.GetComparison() { + case datatypes.ComparisonEqual: + be.Op = influxql.EQ + case datatypes.ComparisonNotEqual: + be.Op = influxql.NEQ + case datatypes.ComparisonStartsWith: + // TODO(sgc): rewrite to anchored RE, as index does not support startsWith yet + v.err = errors.New("startsWith not implemented") + return nil + case datatypes.ComparisonRegex: + be.Op = influxql.EQREGEX + case datatypes.ComparisonNotRegex: + be.Op = influxql.NEQREGEX + case datatypes.ComparisonLess: + be.Op = influxql.LT + case datatypes.ComparisonLessEqual: + be.Op = influxql.LTE + case datatypes.ComparisonGreater: + be.Op = influxql.GT + case datatypes.ComparisonGreaterEqual: + be.Op = influxql.GTE + default: + v.err = errors.New("invalid comparison operator") + return nil + } + + v.exprs = append(v.exprs, be) + + return nil + + case datatypes.NodeTypeTagRef: + ref := n.GetTagRefValue() + if v.remap != nil { + if nk, ok := v.remap[ref]; ok { + ref = nk + } + } + + v.exprs = append(v.exprs, &influxql.VarRef{Val: ref, Type: influxql.Tag}) + return nil + + case datatypes.NodeTypeFieldRef: + v.exprs = append(v.exprs, &influxql.VarRef{Val: fieldRef}) + return nil + + case datatypes.NodeTypeLiteral: + switch val := n.Value.(type) { + case *datatypes.Node_StringValue: + v.exprs = append(v.exprs, &influxql.StringLiteral{Val: val.StringValue}) + + case *datatypes.Node_RegexValue: + // TODO(sgc): consider hashing the RegexValue and cache compiled version + re, err := regexp.Compile(val.RegexValue) + if err != nil { + v.err = err + } + v.exprs = append(v.exprs, &influxql.RegexLiteral{Val: re}) + return nil + + case *datatypes.Node_IntegerValue: + v.exprs = append(v.exprs, &influxql.IntegerLiteral{Val: val.IntegerValue}) + + case *datatypes.Node_UnsignedValue: + v.exprs = append(v.exprs, &influxql.UnsignedLiteral{Val: val.UnsignedValue}) + + case *datatypes.Node_FloatValue: + v.exprs = append(v.exprs, &influxql.NumberLiteral{Val: val.FloatValue}) + + case *datatypes.Node_BooleanValue: + v.exprs = append(v.exprs, &influxql.BooleanLiteral{Val: val.BooleanValue}) + + default: + v.err = errors.New("unexpected literal type") + return nil + } + + return nil + + default: + return v + } + return nil +} + +func (v *nodeToExprVisitor) Err() error { + return v.err +} + +func (v *nodeToExprVisitor) pop() influxql.Expr { + if len(v.exprs) == 0 { + panic("stack empty") + } + + var top influxql.Expr + top, v.exprs = v.exprs[len(v.exprs)-1], v.exprs[:len(v.exprs)-1] + return top +} + +func (v *nodeToExprVisitor) pop2() (influxql.Expr, influxql.Expr) { + if len(v.exprs) < 2 { + panic("stack empty") + } + + rhs := v.exprs[len(v.exprs)-1] + lhs := v.exprs[len(v.exprs)-2] + v.exprs = v.exprs[:len(v.exprs)-2] + return lhs, rhs +} + +func IsTrueBooleanLiteral(expr influxql.Expr) bool { + b, ok := expr.(*influxql.BooleanLiteral) + if ok { + return b.Val + } + return false +} + +func RewriteExprRemoveFieldValue(expr influxql.Expr) influxql.Expr { + return influxql.RewriteExpr(expr, func(expr influxql.Expr) influxql.Expr { + if be, ok := expr.(*influxql.BinaryExpr); ok { + if ref, ok := be.LHS.(*influxql.VarRef); ok { + if ref.Val == fieldRef { + return &influxql.BooleanLiteral{Val: true} + } + } + } + + return expr + }) +} + +type hasRefs struct { + refs []string + found []bool +} + +func (v *hasRefs) allFound() bool { + for _, val := range v.found { + if !val { + return false + } + } + return true +} + +func (v *hasRefs) Visit(node influxql.Node) influxql.Visitor { + if v.allFound() { + return nil + } + + if n, ok := node.(*influxql.VarRef); ok { + for i, r := range v.refs { + if !v.found[i] && r == n.Val { + v.found[i] = true + if v.allFound() { + return nil + } + } + } + } + return v +} + +func HasFieldValueKey(expr influxql.Expr) bool { + refs := hasRefs{refs: []string{fieldRef}, found: make([]bool, 1)} + influxql.Walk(&refs, expr) + return refs.found[0] +} diff --git a/storage/reads/influxql_predicate_test.go b/storage/reads/influxql_predicate_test.go new file mode 100644 index 00000000000..d8e0bc13ae9 --- /dev/null +++ b/storage/reads/influxql_predicate_test.go @@ -0,0 +1,92 @@ +package reads_test + +import ( + "testing" + + "github.com/influxdata/influxdb/storage/reads" + "github.com/influxdata/influxdb/storage/reads/datatypes" +) + +func TestHasFieldValueKey(t *testing.T) { + predicates := []*datatypes.Node{ + { + NodeType: datatypes.NodeTypeComparisonExpression, + Value: &datatypes.Node_Comparison_{ + Comparison: datatypes.ComparisonLess, + }, + Children: []*datatypes.Node{ + { + NodeType: datatypes.NodeTypeFieldRef, + Value: &datatypes.Node_FieldRefValue{ + FieldRefValue: "_value", + }, + }, + { + NodeType: datatypes.NodeTypeLiteral, + Value: &datatypes.Node_IntegerValue{ + IntegerValue: 3000, + }, + }, + }, + }, + { + NodeType: datatypes.NodeTypeLogicalExpression, + Value: &datatypes.Node_Logical_{ + Logical: datatypes.LogicalAnd, + }, + Children: []*datatypes.Node{ + { + NodeType: datatypes.NodeTypeComparisonExpression, + Value: &datatypes.Node_Comparison_{ + Comparison: datatypes.ComparisonEqual, + }, + Children: []*datatypes.Node{ + { + NodeType: datatypes.NodeTypeTagRef, + Value: &datatypes.Node_TagRefValue{ + TagRefValue: "_measurement", + }, + }, + { + NodeType: datatypes.NodeTypeLiteral, + Value: &datatypes.Node_StringValue{ + StringValue: "cpu", + }, + }, + }, + }, + { + NodeType: datatypes.NodeTypeComparisonExpression, + Value: &datatypes.Node_Comparison_{ + Comparison: datatypes.ComparisonLess, + }, + Children: []*datatypes.Node{ + { + NodeType: datatypes.NodeTypeFieldRef, + Value: &datatypes.Node_FieldRefValue{ + FieldRefValue: "_value", + }, + }, + { + NodeType: datatypes.NodeTypeLiteral, + Value: &datatypes.Node_IntegerValue{ + IntegerValue: 3000, + }, + }, + }, + }, + }, + }, + } + for _, predicate := range predicates { + t.Run("", func(t *testing.T) { + expr, err := reads.NodeToExpr(predicate, nil) + if err != nil { + t.Fatalf("unexpected error converting predicate to InfluxQL expression: %v", err) + } + if !reads.HasFieldValueKey(expr) { + t.Fatalf("did not find a field reference in %v", expr) + } + }) + } +} diff --git a/storage/reads/keymerger.go b/storage/reads/keymerger.go index 5b82b8672dc..ed5501d94e5 100644 --- a/storage/reads/keymerger.go +++ b/storage/reads/keymerger.go @@ -8,13 +8,13 @@ import ( ) // tagsKeyMerger is responsible for determining a merged set of tag keys -type keyMerger struct { +type KeyMerger struct { i int tmp [][]byte keys [2][][]byte } -func (km *keyMerger) clear() { +func (km *KeyMerger) Clear() { km.i = 0 km.keys[0] = km.keys[0][:0] if km.tmp != nil { @@ -25,17 +25,17 @@ func (km *keyMerger) clear() { } } -func (km *keyMerger) get() [][]byte { return km.keys[km.i&1] } +func (km *KeyMerger) Get() [][]byte { return km.keys[km.i&1] } -func (km *keyMerger) String() string { +func (km *KeyMerger) String() string { var s []string - for _, k := range km.get() { + for _, k := range km.Get() { s = append(s, string(k)) } return strings.Join(s, ",") } -func (km *keyMerger) mergeTagKeys(tags models.Tags) { +func (km *KeyMerger) MergeTagKeys(tags models.Tags) { if cap(km.tmp) < len(tags) { km.tmp = make([][]byte, len(tags)) } else { @@ -46,10 +46,10 @@ func (km *keyMerger) mergeTagKeys(tags models.Tags) { km.tmp[i] = tags[i].Key } - km.mergeKeys(km.tmp) + km.MergeKeys(km.tmp) } -func (km *keyMerger) mergeKeys(in [][]byte) { +func (km *KeyMerger) MergeKeys(in [][]byte) { keys := km.keys[km.i&1] i, j := 0, 0 for i < len(keys) && j < len(in) && bytes.Equal(keys[i], in[j]) { diff --git a/storage/reads/keymerger_test.go b/storage/reads/keymerger_test.go index 8f230acb761..88144055285 100644 --- a/storage/reads/keymerger_test.go +++ b/storage/reads/keymerger_test.go @@ -56,12 +56,12 @@ func TestKeyMerger_MergeTagKeys(t *testing.T) { }, } - var km keyMerger + var km KeyMerger for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - km.clear() + km.Clear() for _, tags := range tt.tags { - km.mergeTagKeys(tags) + km.MergeTagKeys(tags) } if got := km.String(); !cmp.Equal(got, tt.exp) { @@ -120,12 +120,12 @@ func TestKeyMerger_MergeKeys(t *testing.T) { }, } - var km keyMerger + var km KeyMerger for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - km.clear() + km.Clear() for _, keys := range tt.keys { - km.mergeKeys(keys) + km.MergeKeys(keys) } if got := km.String(); !cmp.Equal(got, tt.exp) { @@ -158,12 +158,12 @@ func BenchmarkKeyMerger_MergeKeys(b *testing.B) { b.Run(strconv.Itoa(n), func(b *testing.B) { b.ResetTimer() - var km keyMerger + var km KeyMerger for i := 0; i < b.N; i++ { for j := 0; j < n; j++ { - km.mergeKeys(keys[rand.Int()%len(keys)]) + km.MergeKeys(keys[rand.Int()%len(keys)]) } - km.clear() + km.Clear() } }) } @@ -192,12 +192,12 @@ func BenchmarkKeyMerger_MergeTagKeys(b *testing.B) { b.Run(strconv.Itoa(n), func(b *testing.B) { b.ResetTimer() - var km keyMerger + var km KeyMerger for i := 0; i < b.N; i++ { for j := 0; j < n; j++ { - km.mergeTagKeys(tags[rand.Int()%len(tags)]) + km.MergeTagKeys(tags[rand.Int()%len(tags)]) } - km.clear() + km.Clear() } }) } diff --git a/storage/reads/merge.go b/storage/reads/merge.go deleted file mode 100644 index da0032b6192..00000000000 --- a/storage/reads/merge.go +++ /dev/null @@ -1,311 +0,0 @@ -package reads - -import ( - "container/heap" - - "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/tsdb/cursors" -) - -type sequenceResultSet struct { - items []ResultSet - rs ResultSet - err error - stats cursors.CursorStats -} - -// NewSequenceResultSet combines results into a single ResultSet, -// draining each ResultSet in order before moving to the next. -func NewSequenceResultSet(results []ResultSet) ResultSet { - if len(results) == 0 { - return nil - } else if len(results) == 1 { - return results[0] - } - - rs := &sequenceResultSet{items: results} - rs.pop() - return rs -} - -func (r *sequenceResultSet) Err() error { return r.err } - -func (r *sequenceResultSet) Close() { - if r.rs != nil { - r.rs.Close() - r.rs = nil - } - - for _, rs := range r.items { - rs.Close() - } - r.items = nil -} - -func (r *sequenceResultSet) pop() bool { - if r.rs != nil { - r.rs.Close() - r.rs = nil - } - - if len(r.items) > 0 { - r.rs = r.items[0] - r.items[0] = nil - r.items = r.items[1:] - return true - } - - return false -} - -func (r *sequenceResultSet) Next() bool { -RETRY: - if r.rs != nil { - if r.rs.Next() { - return true - } - - err := r.rs.Err() - stats := r.rs.Stats() - if err != nil { - r.err = err - r.Close() - return false - } - - r.stats.Add(stats) - - if r.pop() { - goto RETRY - } - } - - return false -} - -func (r *sequenceResultSet) Cursor() cursors.Cursor { - return r.rs.Cursor() -} - -func (r *sequenceResultSet) Tags() models.Tags { - return r.rs.Tags() -} - -func (r *sequenceResultSet) Stats() cursors.CursorStats { - return r.stats -} - -type mergedResultSet struct { - heap resultSetHeap - err error - first bool - stats cursors.CursorStats -} - -// NewMergedResultSet combines the results into a single ResultSet, -// producing keys in ascending lexicographical order. It requires -// all input results are ordered. -func NewMergedResultSet(results []ResultSet) ResultSet { - if len(results) == 0 { - return nil - } else if len(results) == 1 { - return results[0] - } - - mrs := &mergedResultSet{first: true} - mrs.heap.init(results) - return mrs -} - -func (r *mergedResultSet) Err() error { return r.err } - -func (r *mergedResultSet) Close() { - for _, rs := range r.heap.items { - rs.Close() - } - r.heap.items = nil -} - -func (r *mergedResultSet) Next() bool { - if len(r.heap.items) == 0 { - return false - } - - if !r.first { - top := r.heap.items[0] - if top.Next() { - heap.Fix(&r.heap, 0) - return true - } - err := top.Err() - stats := top.Stats() - top.Close() - heap.Pop(&r.heap) - if err != nil { - r.err = err - r.Close() - return false - } - - r.stats.Add(stats) - - return len(r.heap.items) > 0 - } - - r.first = false - return true -} - -func (r *mergedResultSet) Cursor() cursors.Cursor { - return r.heap.items[0].Cursor() -} - -func (r *mergedResultSet) Tags() models.Tags { - return r.heap.items[0].Tags() -} - -func (r *mergedResultSet) Stats() cursors.CursorStats { - return r.stats -} - -type resultSetHeap struct { - items []ResultSet -} - -func (h *resultSetHeap) init(results []ResultSet) { - if cap(h.items) < len(results) { - h.items = make([]ResultSet, 0, len(results)) - } else { - h.items = h.items[:0] - } - - for _, rs := range results { - if rs.Next() { - h.items = append(h.items, rs) - } else { - rs.Close() - } - } - heap.Init(h) -} - -func (h *resultSetHeap) Less(i, j int) bool { - return models.CompareTags(h.items[i].Tags(), h.items[j].Tags()) == -1 -} - -func (h *resultSetHeap) Len() int { - return len(h.items) -} - -func (h *resultSetHeap) Swap(i, j int) { - h.items[i], h.items[j] = h.items[j], h.items[i] -} - -func (h *resultSetHeap) Push(x interface{}) { - panic("not implemented") -} - -func (h *resultSetHeap) Pop() interface{} { - n := len(h.items) - item := h.items[n-1] - h.items[n-1] = nil - h.items = h.items[:n-1] - return item -} - -// MergedStringIterator merges multiple storage.StringIterators into one. -// It sorts and deduplicates adjacent values, so the output is sorted iff all inputs are sorted. -// If all inputs are not sorted, then output order and deduplication are undefined and unpleasant. -type MergedStringIterator struct { - heap stringIteratorHeap - nextValue string - stats cursors.CursorStats -} - -// API compatibility -var _ cursors.StringIterator = (*MergedStringIterator)(nil) - -func NewMergedStringIterator(iterators []cursors.StringIterator) *MergedStringIterator { - nonEmptyIterators := make([]cursors.StringIterator, 0, len(iterators)) - var stats cursors.CursorStats - - for _, iterator := range iterators { - // All iterators must be Next()'d so that their Value() methods return a meaningful value, and sort properly. - if iterator.Next() { - nonEmptyIterators = append(nonEmptyIterators, iterator) - } else { - stats.Add(iterator.Stats()) - } - } - - msi := &MergedStringIterator{ - heap: stringIteratorHeap{iterators: nonEmptyIterators}, - stats: stats, - } - heap.Init(&msi.heap) - - return msi -} - -func (msi *MergedStringIterator) Next() bool { - for msi.heap.Len() > 0 { - iterator := msi.heap.iterators[0] - - haveNext := false - if proposedNextValue := iterator.Value(); proposedNextValue != msi.nextValue { // Skip dupes. - msi.nextValue = proposedNextValue - haveNext = true - } - - if iterator.Next() { - // iterator.Value() has changed, so re-order that iterator within the heap - heap.Fix(&msi.heap, 0) - } else { - // iterator is drained, so count the stats and remove it from the heap - msi.stats.Add(iterator.Stats()) - heap.Pop(&msi.heap) - } - - if haveNext { - return true - } - } - - return false -} - -func (msi *MergedStringIterator) Value() string { - return msi.nextValue -} - -func (msi *MergedStringIterator) Stats() cursors.CursorStats { - return msi.stats -} - -type stringIteratorHeap struct { - iterators []cursors.StringIterator -} - -func (h stringIteratorHeap) Len() int { - return len(h.iterators) -} - -func (h stringIteratorHeap) Less(i, j int) bool { - return h.iterators[i].Value() < h.iterators[j].Value() -} - -func (h *stringIteratorHeap) Swap(i, j int) { - h.iterators[i], h.iterators[j] = h.iterators[j], h.iterators[i] -} - -func (h *stringIteratorHeap) Push(x interface{}) { - h.iterators = append(h.iterators, x.(cursors.StringIterator)) -} - -func (h *stringIteratorHeap) Pop() interface{} { - n := len(h.iterators) - item := h.iterators[n-1] - h.iterators[n-1] = nil - h.iterators = h.iterators[:n-1] - return item -} diff --git a/storage/reads/merge_test.go b/storage/reads/merge_test.go deleted file mode 100644 index cf2a2386541..00000000000 --- a/storage/reads/merge_test.go +++ /dev/null @@ -1,269 +0,0 @@ -package reads_test - -import ( - "reflect" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/storage/reads" - "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxdb/tsdb/cursors" -) - -func newStreamSeries(v ...string) *sliceStreamReader { - var frames []datatypes.ReadResponse_Frame - for _, s := range v { - frames = append(frames, seriesF(Float, s)) - } - return newStreamReader(response(frames...)) -} - -func TestNewSequenceResultSet(t *testing.T) { - tests := []struct { - name string - streams []*sliceStreamReader - exp string - }{ - { - name: "outer inner", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val01", "m0,tag0=val02"), - newStreamSeries("m0,tag0=val00", "m0,tag0=val03"), - }, - exp: `series: _m=m0,tag0=val01 - cursor:Float -series: _m=m0,tag0=val02 - cursor:Float -series: _m=m0,tag0=val00 - cursor:Float -series: _m=m0,tag0=val03 - cursor:Float -`, - }, - { - name: "sequential", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val00", "m0,tag0=val01"), - newStreamSeries("m0,tag0=val02", "m0,tag0=val03"), - }, - exp: `series: _m=m0,tag0=val00 - cursor:Float -series: _m=m0,tag0=val01 - cursor:Float -series: _m=m0,tag0=val02 - cursor:Float -series: _m=m0,tag0=val03 - cursor:Float -`, - }, - { - name: "single resultset", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val00", "m0,tag0=val01", "m0,tag0=val02", "m0,tag0=val03"), - }, - exp: `series: _m=m0,tag0=val00 - cursor:Float -series: _m=m0,tag0=val01 - cursor:Float -series: _m=m0,tag0=val02 - cursor:Float -series: _m=m0,tag0=val03 - cursor:Float -`, - }, - { - name: "single series ordered", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val00"), - newStreamSeries("m0,tag0=val01"), - newStreamSeries("m0,tag0=val02"), - newStreamSeries("m0,tag0=val03"), - }, - exp: `series: _m=m0,tag0=val00 - cursor:Float -series: _m=m0,tag0=val01 - cursor:Float -series: _m=m0,tag0=val02 - cursor:Float -series: _m=m0,tag0=val03 - cursor:Float -`, - }, - { - name: "single series random order", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val02"), - newStreamSeries("m0,tag0=val03"), - newStreamSeries("m0,tag0=val00"), - newStreamSeries("m0,tag0=val01"), - }, - exp: `series: _m=m0,tag0=val02 - cursor:Float -series: _m=m0,tag0=val03 - cursor:Float -series: _m=m0,tag0=val00 - cursor:Float -series: _m=m0,tag0=val01 - cursor:Float -`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rss := make([]reads.ResultSet, len(tt.streams)) - for i := range tt.streams { - rss[i] = reads.NewResultSetStreamReader(tt.streams[i]) - } - - rs := reads.NewSequenceResultSet(rss) - sb := new(strings.Builder) - ResultSetToString(sb, rs) - - if got := sb.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected value; -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} - -func TestNewMergedResultSet(t *testing.T) { - exp := `series: _m=m0,tag0=val00 - cursor:Float -series: _m=m0,tag0=val01 - cursor:Float -series: _m=m0,tag0=val02 - cursor:Float -series: _m=m0,tag0=val03 - cursor:Float -` - - tests := []struct { - name string - streams []*sliceStreamReader - exp string - }{ - { - name: "outer inner", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val01", "m0,tag0=val02"), - newStreamSeries("m0,tag0=val00", "m0,tag0=val03"), - }, - exp: exp, - }, - { - name: "sequential", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val00", "m0,tag0=val01"), - newStreamSeries("m0,tag0=val02", "m0,tag0=val03"), - }, - exp: exp, - }, - { - name: "interleaved", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val01", "m0,tag0=val03"), - newStreamSeries("m0,tag0=val00", "m0,tag0=val02"), - }, - exp: exp, - }, - { - name: "single resultset", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val00", "m0,tag0=val01", "m0,tag0=val02", "m0,tag0=val03"), - }, - exp: exp, - }, - { - name: "single series ordered", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val00"), - newStreamSeries("m0,tag0=val01"), - newStreamSeries("m0,tag0=val02"), - newStreamSeries("m0,tag0=val03"), - }, - exp: exp, - }, - { - name: "single series random order", - streams: []*sliceStreamReader{ - newStreamSeries("m0,tag0=val02"), - newStreamSeries("m0,tag0=val03"), - newStreamSeries("m0,tag0=val00"), - newStreamSeries("m0,tag0=val01"), - }, - exp: exp, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rss := make([]reads.ResultSet, len(tt.streams)) - for i := range tt.streams { - rss[i] = reads.NewResultSetStreamReader(tt.streams[i]) - } - - rs := reads.NewMergedResultSet(rss) - sb := new(strings.Builder) - ResultSetToString(sb, rs) - - if got := sb.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected value; -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - }) - } -} - -func TestNewMergedStringIterator(t *testing.T) { - tests := []struct { - name string - iterators []cursors.StringIterator - expectedValues []string - }{ - { - name: "simple", - iterators: []cursors.StringIterator{ - newMockStringIterator(1, 2, "bar", "foo"), - }, - expectedValues: []string{"bar", "foo"}, - }, - { - name: "duplicates", - iterators: []cursors.StringIterator{ - newMockStringIterator(1, 10, "c"), - newMockStringIterator(10, 100, "b", "b"), // This kind of duplication is not explicitly documented, but works. - newMockStringIterator(1, 10, "a", "c"), - newMockStringIterator(1, 10, "b", "d"), - newMockStringIterator(1, 10, "0", "a", "b", "e"), - }, - expectedValues: []string{"0", "a", "b", "c", "d", "e"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m := reads.NewMergedStringIterator(tt.iterators) - - // Expect no stats before any iteration - var expectStats cursors.CursorStats - if !reflect.DeepEqual(expectStats, m.Stats()) { - t.Errorf("expected %+v, got %+v", expectStats, m.Stats()) - } - - var gotValues []string - for m.Next() { - gotValues = append(gotValues, m.Value()) - } - if !reflect.DeepEqual(tt.expectedValues, gotValues) { - t.Errorf("expected %v, got %v", tt.expectedValues, gotValues) - } - for _, iterator := range tt.iterators { - expectStats.Add(iterator.Stats()) - } - if !reflect.DeepEqual(expectStats, m.Stats()) { - t.Errorf("expected %+v, got %+v", expectStats, m.Stats()) - } - }) - } -} diff --git a/storage/reads/mergegroupresultset.go b/storage/reads/mergegroupresultset.go deleted file mode 100644 index 66ec0a0da39..00000000000 --- a/storage/reads/mergegroupresultset.go +++ /dev/null @@ -1,259 +0,0 @@ -package reads - -import ( - "bytes" - "sort" - - "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/pkg/slices" -) - -// groupNoneMergedGroupResultSet produces a single GroupCursor, merging all -// GroupResultSet#Keys -type groupNoneMergedGroupResultSet struct { - g []GroupResultSet - gc groupNoneMergedGroupCursor - done bool -} - -// Returns a GroupResultSet that merges results using the datatypes.GroupNone -// strategy. Each source GroupResultSet in g must be configured using the -// GroupNone strategy or the results are undefined. -// -// The GroupNone strategy must merge the partition key and tag keys -// from each source GroupResultSet when producing its -func NewGroupNoneMergedGroupResultSet(g []GroupResultSet) GroupResultSet { - if len(g) == 0 { - return nil - } else if len(g) == 1 { - return g[0] - } - - grs := &groupNoneMergedGroupResultSet{ - g: g, - gc: groupNoneMergedGroupCursor{ - mergedResultSet: mergedResultSet{first: true}, - }, - } - - var km keyMerger - results := make([]ResultSet, 0, len(g)) - for _, rs := range g { - if gc := rs.Next(); gc != nil { - results = append(results, gc) - km.mergeKeys(gc.Keys()) - } else if rs.Err() != nil { - grs.done = true - grs.gc.err = rs.Err() - results = nil - break - } - } - - if len(results) > 0 { - grs.gc.keys = km.get() - grs.gc.heap.init(results) - } - - return grs -} - -func (r *groupNoneMergedGroupResultSet) Next() GroupCursor { - if !r.done { - r.done = true - return &r.gc - } - return nil -} - -func (r *groupNoneMergedGroupResultSet) Err() error { return r.gc.err } - -func (r *groupNoneMergedGroupResultSet) Close() { - r.gc.Close() - for _, grs := range r.g { - grs.Close() - } - r.g = nil -} - -type groupNoneMergedGroupCursor struct { - mergedResultSet - keys [][]byte -} - -func (r *groupNoneMergedGroupCursor) Keys() [][]byte { - return r.keys -} - -func (r *groupNoneMergedGroupCursor) PartitionKeyVals() [][]byte { - return nil -} - -// groupByMergedGroupResultSet implements the GroupBy strategy. -type groupByMergedGroupResultSet struct { - items []*groupCursorItem - alt []*groupCursorItem - groupCursors []GroupCursor - resultSets []ResultSet - nilVal []byte - err error - - km models.TagKeysSet - gc groupByMergedGroupCursor -} - -// Returns a GroupResultSet that merges results using the datatypes.GroupBy -// strategy. Each source GroupResultSet in g must be configured using the -// GroupBy strategy with the same GroupKeys or the results are undefined. -func NewGroupByMergedGroupResultSet(g []GroupResultSet) GroupResultSet { - if len(g) == 0 { - return nil - } else if len(g) == 1 { - return g[0] - } - - grs := &groupByMergedGroupResultSet{} - grs.nilVal = nilSortHi - grs.groupCursors = make([]GroupCursor, 0, len(g)) - grs.resultSets = make([]ResultSet, 0, len(g)) - grs.items = make([]*groupCursorItem, 0, len(g)) - grs.alt = make([]*groupCursorItem, 0, len(g)) - for _, rs := range g { - grs.items = append(grs.items, &groupCursorItem{grs: rs}) - } - - return grs -} - -// next determines the cursors for the next partition key. -func (r *groupByMergedGroupResultSet) next() { - r.alt = r.alt[:0] - for i, item := range r.items { - if item.gc == nil { - item.gc = item.grs.Next() - if item.gc != nil { - r.alt = append(r.alt, item) - } else { - r.err = item.grs.Err() - item.grs.Close() - } - } else { - // append remaining non-nil cursors - r.alt = append(r.alt, r.items[i:]...) - break - } - } - - r.items, r.alt = r.alt, r.items - if len(r.items) == 0 { - r.groupCursors = r.groupCursors[:0] - r.resultSets = r.resultSets[:0] - return - } - - if r.err != nil { - r.Close() - return - } - - sort.Slice(r.items, func(i, j int) bool { - return comparePartitionKey(r.items[i].gc.PartitionKeyVals(), r.items[j].gc.PartitionKeyVals(), r.nilVal) == -1 - }) - - r.groupCursors = r.groupCursors[:1] - r.resultSets = r.resultSets[:1] - - first := r.items[0].gc - r.groupCursors[0] = first - r.resultSets[0] = first - r.items[0].gc = nil - - for i := 1; i < len(r.items); i++ { - if slices.CompareSlice(first.PartitionKeyVals(), r.items[i].gc.PartitionKeyVals()) == 0 { - r.groupCursors = append(r.groupCursors, r.items[i].gc) - r.resultSets = append(r.resultSets, r.items[i].gc) - r.items[i].gc = nil - } - } -} - -func (r *groupByMergedGroupResultSet) Next() GroupCursor { - r.next() - if len(r.groupCursors) == 0 { - return nil - } - - r.gc.first = true - r.gc.heap.init(r.resultSets) - - r.km.Clear() - for i := range r.groupCursors { - r.km.UnionBytes(r.groupCursors[i].Keys()) - } - - r.gc.keys = append(r.gc.keys[:0], r.km.KeysBytes()...) - r.gc.vals = r.groupCursors[0].PartitionKeyVals() - return &r.gc -} - -func (r *groupByMergedGroupResultSet) Err() error { return r.err } - -func (r *groupByMergedGroupResultSet) Close() { - r.gc.Close() - for _, grs := range r.items { - if grs.gc != nil { - grs.gc.Close() - } - grs.grs.Close() - } - r.items = nil - r.alt = nil -} - -type groupByMergedGroupCursor struct { - mergedResultSet - keys [][]byte - vals [][]byte -} - -func (r *groupByMergedGroupCursor) Keys() [][]byte { - return r.keys -} - -func (r *groupByMergedGroupCursor) PartitionKeyVals() [][]byte { - return r.vals -} - -type groupCursorItem struct { - grs GroupResultSet - gc GroupCursor -} - -func comparePartitionKey(a, b [][]byte, nilVal []byte) int { - i := 0 - for i < len(a) && i < len(b) { - av, bv := a[i], b[i] - if len(av) == 0 { - av = nilVal - } - if len(bv) == 0 { - bv = nilVal - } - if v := bytes.Compare(av, bv); v == 0 { - i++ - continue - } else { - return v - } - } - - if i < len(b) { - // b is longer, so assume a is less - return -1 - } else if i < len(a) { - // a is longer, so assume b is less - return 1 - } else { - return 0 - } -} diff --git a/storage/reads/mergegroupresultset_test.go b/storage/reads/mergegroupresultset_test.go deleted file mode 100644 index fbcefa928b0..00000000000 --- a/storage/reads/mergegroupresultset_test.go +++ /dev/null @@ -1,266 +0,0 @@ -package reads_test - -import ( - "errors" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/storage/reads" - "github.com/influxdata/influxdb/storage/reads/datatypes" -) - -func newGroupNoneStreamSeries(tagKeys string, v ...string) *sliceStreamReader { - var frames []datatypes.ReadResponse_Frame - frames = append(frames, groupF(tagKeys, "")) - for _, s := range v { - frames = append(frames, seriesF(Float, s)) - } - return newStreamReader(response(frames...)) -} - -func TestNewGroupNoneMergedGroupResultSet(t *testing.T) { - exp := `group: - tag key : m0,tag0,tag1,tag2 - partition key: - series: _m=m0,tag0=val00 - cursor:Float - series: _m=m0,tag0=val01 - cursor:Float - series: _m=m0,tag1=val10 - cursor:Float - series: _m=m0,tag2=val20 - cursor:Float -` - - tests := []struct { - name string - streams []*sliceStreamReader - exp string - }{ - { - name: "merge tagKey schemas series total order", - streams: []*sliceStreamReader{ - newGroupNoneStreamSeries("m0,tag0", "m0,tag0=val00", "m0,tag0=val01"), - newGroupNoneStreamSeries("m0,tag1,tag2", "m0,tag1=val10", "m0,tag2=val20"), - }, - exp: exp, - }, - { - name: "merge tagKey schemas series mixed", - streams: []*sliceStreamReader{ - newGroupNoneStreamSeries("m0,tag0,tag2", "m0,tag0=val01", "m0,tag2=val20"), - newGroupNoneStreamSeries("m0,tag0,tag1", "m0,tag0=val00", "m0,tag1=val10"), - }, - exp: exp, - }, - { - name: "merge single group schemas ordered", - streams: []*sliceStreamReader{ - newGroupNoneStreamSeries("m0,tag0", "m0,tag0=val00"), - newGroupNoneStreamSeries("m0,tag0", "m0,tag0=val01"), - newGroupNoneStreamSeries("m0,tag1", "m0,tag1=val10"), - newGroupNoneStreamSeries("m0,tag2", "m0,tag2=val20"), - }, - exp: exp, - }, - { - name: "merge single group schemas unordered", - streams: []*sliceStreamReader{ - newGroupNoneStreamSeries("m0,tag2", "m0,tag2=val20"), - newGroupNoneStreamSeries("m0,tag0", "m0,tag0=val00"), - newGroupNoneStreamSeries("m0,tag1", "m0,tag1=val10"), - newGroupNoneStreamSeries("m0,tag0", "m0,tag0=val01"), - }, - exp: exp, - }, - { - name: "merge single group", - streams: []*sliceStreamReader{ - newGroupNoneStreamSeries("m0,tag0,tag1,tag2", "m0,tag0=val00", "m0,tag0=val01", "m0,tag1=val10", "m0,tag2=val20"), - }, - exp: exp, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - grss := make([]reads.GroupResultSet, len(tt.streams)) - for i := range tt.streams { - grss[i] = reads.NewGroupResultSetStreamReader(tt.streams[i]) - } - - grs := reads.NewGroupNoneMergedGroupResultSet(grss) - sb := new(strings.Builder) - GroupResultSetToString(sb, grs) - - if got := sb.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected value; -got/+exp\n%s", cmp.Diff(strings.Split(got, "\n"), strings.Split(tt.exp, "\n"))) - } - - grs.Close() - }) - } -} - -func TestGroupNoneMergedGroupResultSet_ErrNoData(t *testing.T) { - exp := "no data" - streams := []reads.StreamReader{ - newGroupNoneStreamSeries("m0,tag2", "m0,tag2=val20"), - errStreamReader(exp), - } - - grss := make([]reads.GroupResultSet, len(streams)) - for i := range streams { - grss[i] = reads.NewGroupResultSetStreamReader(streams[i]) - } - - grs := reads.NewGroupNoneMergedGroupResultSet(grss) - if got := grs.Next(); got != nil { - t.Errorf("expected nil") - } - - if got, expErr := grs.Err(), errors.New(exp); !cmp.Equal(got, expErr, cmp.Comparer(errCmp)) { - t.Errorf("unexpected error; -got/+exp\n%s", cmp.Diff(got, expErr, cmp.Transformer("err", errTr))) - } -} - -func TestGroupNoneMergedGroupResultSet_ErrStreamNoData(t *testing.T) { - streams := []reads.StreamReader{ - newGroupNoneStreamSeries("m0,tag2", "m0,tag2=val20"), - &emptyStreamReader{}, - } - - grss := make([]reads.GroupResultSet, len(streams)) - for i := range streams { - grss[i] = reads.NewGroupResultSetStreamReader(streams[i]) - } - - grs := reads.NewGroupNoneMergedGroupResultSet(grss) - if got := grs.Next(); got != nil { - t.Errorf("expected nil") - } - - if got, expErr := grs.Err(), reads.ErrStreamNoData; !cmp.Equal(got, expErr, cmp.Comparer(errCmp)) { - t.Errorf("unexpected error; -got/+exp\n%s", cmp.Diff(got, expErr, cmp.Transformer("err", errTr))) - } -} - -func groupByF(tagKeys, parKeys string, v ...string) datatypes.ReadResponse { - var frames []datatypes.ReadResponse_Frame - frames = append(frames, groupF(tagKeys, parKeys)) - for _, s := range v { - frames = append(frames, seriesF(Float, s)) - } - return response(frames...) -} - -func TestNewGroupByMergedGroupResultSet(t *testing.T) { - exp := `group: - tag key : _m,tag0,tag1 - partition key: val00, - series: _m=aaa,tag0=val00 - cursor:Float - series: _m=cpu,tag0=val00,tag1=val10 - cursor:Float - series: _m=cpu,tag0=val00,tag1=val11 - cursor:Float - series: _m=cpu,tag0=val00,tag1=val12 - cursor:Float -group: - tag key : _m,tag0 - partition key: val01, - series: _m=aaa,tag0=val01 - cursor:Float -group: - tag key : _m,tag1,tag2 - partition key: ,val20 - series: _m=mem,tag1=val10,tag2=val20 - cursor:Float - series: _m=mem,tag1=val11,tag2=val20 - cursor:Float -group: - tag key : _m,tag1,tag2 - partition key: ,val21 - series: _m=mem,tag1=val11,tag2=val21 - cursor:Float -` - tests := []struct { - name string - streams []*sliceStreamReader - exp string - }{ - { - streams: []*sliceStreamReader{ - newStreamReader( - groupByF("_m,tag0,tag1", "val00,", "aaa,tag0=val00", "cpu,tag0=val00,tag1=val11"), - groupByF("_m,tag1,tag2", ",val20", "mem,tag1=val10,tag2=val20"), - groupByF("_m,tag1,tag2", ",val21", "mem,tag1=val11,tag2=val21"), - ), - newStreamReader( - groupByF("_m,tag0,tag1", "val00,", "cpu,tag0=val00,tag1=val10", "cpu,tag0=val00,tag1=val12"), - groupByF("_m,tag0", "val01,", "aaa,tag0=val01"), - ), - newStreamReader( - groupByF("_m,tag1,tag2", ",val20", "mem,tag1=val11,tag2=val20"), - ), - }, - exp: exp, - }, - { - streams: []*sliceStreamReader{ - newStreamReader( - groupByF("_m,tag1,tag2", ",val20", "mem,tag1=val10,tag2=val20"), - groupByF("_m,tag1,tag2", ",val21", "mem,tag1=val11,tag2=val21"), - ), - newStreamReader( - groupByF("_m,tag1,tag2", ",val20", "mem,tag1=val11,tag2=val20"), - ), - newStreamReader( - groupByF("_m,tag0,tag1", "val00,", "cpu,tag0=val00,tag1=val10", "cpu,tag0=val00,tag1=val12"), - groupByF("_m,tag0", "val01,", "aaa,tag0=val01"), - ), - newStreamReader( - groupByF("_m,tag0,tag1", "val00,", "aaa,tag0=val00", "cpu,tag0=val00,tag1=val11"), - ), - }, - exp: exp, - }, - { - name: "does merge keys", - streams: []*sliceStreamReader{ - newStreamReader( - groupByF("_m,tag1", "val00,", "aaa,tag0=val00", "cpu,tag0=val00,tag1=val11"), - groupByF("_m,tag2", ",val20", "mem,tag1=val10,tag2=val20"), - groupByF("_m,tag1,tag2", ",val21", "mem,tag1=val11,tag2=val21"), - ), - newStreamReader( - groupByF("_m,tag0,tag1", "val00,", "cpu,tag0=val00,tag1=val10", "cpu,tag0=val00,tag1=val12"), - groupByF("_m,tag0", "val01,", "aaa,tag0=val01"), - ), - newStreamReader( - groupByF("_m,tag1", ",val20", "mem,tag1=val11,tag2=val20"), - ), - }, - exp: exp, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - grss := make([]reads.GroupResultSet, len(tt.streams)) - for i := range tt.streams { - grss[i] = reads.NewGroupResultSetStreamReader(tt.streams[i]) - } - - grs := reads.NewGroupByMergedGroupResultSet(grss) - sb := new(strings.Builder) - GroupResultSetToString(sb, grs, SkipNilCursor()) - - if got := sb.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected value; -got/+exp\n%s", cmp.Diff(strings.Split(got, "\n"), strings.Split(tt.exp, "\n"))) - } - - grs.Close() - }) - } -} diff --git a/storage/reads/predicate.go b/storage/reads/predicate.go index f70d3cda971..99249f099b0 100644 --- a/storage/reads/predicate.go +++ b/storage/reads/predicate.go @@ -2,23 +2,9 @@ package reads import ( "bytes" - "fmt" - "regexp" "strconv" - "github.com/influxdata/flux/ast" - "github.com/influxdata/flux/semantic" - "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxql" - "github.com/pkg/errors" -) - -const ( - fieldKey = "_field" - measurementKey = "_measurement" - valueKey = "_value" - fieldRef = "$" ) // NodeVisitor can be called by Walk to traverse the Node hierarchy. @@ -154,430 +140,3 @@ func (v *predicateExpressionPrinter) Visit(n *datatypes.Node) NodeVisitor { return v } } - -func toStoragePredicate(f *semantic.FunctionExpression) (*datatypes.Predicate, error) { - if f.Block.Parameters == nil || len(f.Block.Parameters.List) != 1 { - return nil, errors.New("storage predicate functions must have exactly one parameter") - } - - root, err := toStoragePredicateHelper(f.Block.Body.(semantic.Expression), f.Block.Parameters.List[0].Key.Name) - if err != nil { - return nil, err - } - - return &datatypes.Predicate{ - Root: root, - }, nil -} - -func toStoragePredicateHelper(n semantic.Expression, objectName string) (*datatypes.Node, error) { - switch n := n.(type) { - case *semantic.LogicalExpression: - left, err := toStoragePredicateHelper(n.Left, objectName) - if err != nil { - return nil, errors.Wrap(err, "left hand side") - } - right, err := toStoragePredicateHelper(n.Right, objectName) - if err != nil { - return nil, errors.Wrap(err, "right hand side") - } - children := []*datatypes.Node{left, right} - switch n.Operator { - case ast.AndOperator: - return &datatypes.Node{ - NodeType: datatypes.NodeTypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: datatypes.LogicalAnd}, - Children: children, - }, nil - case ast.OrOperator: - return &datatypes.Node{ - NodeType: datatypes.NodeTypeLogicalExpression, - Value: &datatypes.Node_Logical_{Logical: datatypes.LogicalOr}, - Children: children, - }, nil - default: - return nil, fmt.Errorf("unknown logical operator %v", n.Operator) - } - case *semantic.BinaryExpression: - left, err := toStoragePredicateHelper(n.Left, objectName) - if err != nil { - return nil, errors.Wrap(err, "left hand side") - } - right, err := toStoragePredicateHelper(n.Right, objectName) - if err != nil { - return nil, errors.Wrap(err, "right hand side") - } - children := []*datatypes.Node{left, right} - op, err := toComparisonOperator(n.Operator) - if err != nil { - return nil, err - } - return &datatypes.Node{ - NodeType: datatypes.NodeTypeComparisonExpression, - Value: &datatypes.Node_Comparison_{Comparison: op}, - Children: children, - }, nil - case *semantic.StringLiteral: - return &datatypes.Node{ - NodeType: datatypes.NodeTypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: n.Value, - }, - }, nil - case *semantic.IntegerLiteral: - return &datatypes.Node{ - NodeType: datatypes.NodeTypeLiteral, - Value: &datatypes.Node_IntegerValue{ - IntegerValue: n.Value, - }, - }, nil - case *semantic.BooleanLiteral: - return &datatypes.Node{ - NodeType: datatypes.NodeTypeLiteral, - Value: &datatypes.Node_BooleanValue{ - BooleanValue: n.Value, - }, - }, nil - case *semantic.FloatLiteral: - return &datatypes.Node{ - NodeType: datatypes.NodeTypeLiteral, - Value: &datatypes.Node_FloatValue{ - FloatValue: n.Value, - }, - }, nil - case *semantic.RegexpLiteral: - return &datatypes.Node{ - NodeType: datatypes.NodeTypeLiteral, - Value: &datatypes.Node_RegexValue{ - RegexValue: n.Value.String(), - }, - }, nil - case *semantic.MemberExpression: - // Sanity check that the object is the objectName identifier - if ident, ok := n.Object.(*semantic.IdentifierExpression); !ok || ident.Name != objectName { - return nil, fmt.Errorf("unknown object %q", n.Object) - } - switch n.Property { - case fieldKey: - return &datatypes.Node{ - NodeType: datatypes.NodeTypeTagRef, - Value: &datatypes.Node_TagRefValue{ - TagRefValue: models.FieldKeyTagKey, - }, - }, nil - case measurementKey: - return &datatypes.Node{ - NodeType: datatypes.NodeTypeTagRef, - Value: &datatypes.Node_TagRefValue{ - TagRefValue: models.MeasurementTagKey, - }, - }, nil - case valueKey: - return &datatypes.Node{ - NodeType: datatypes.NodeTypeFieldRef, - Value: &datatypes.Node_FieldRefValue{ - FieldRefValue: valueKey, - }, - }, nil - - } - return &datatypes.Node{ - NodeType: datatypes.NodeTypeTagRef, - Value: &datatypes.Node_TagRefValue{ - TagRefValue: n.Property, - }, - }, nil - case *semantic.DurationLiteral: - return nil, errors.New("duration literals not supported in storage predicates") - case *semantic.DateTimeLiteral: - return nil, errors.New("time literals not supported in storage predicates") - default: - return nil, fmt.Errorf("unsupported semantic expression type %T", n) - } -} - -func toComparisonOperator(o ast.OperatorKind) (datatypes.Node_Comparison, error) { - switch o { - case ast.EqualOperator: - return datatypes.ComparisonEqual, nil - case ast.NotEqualOperator: - return datatypes.ComparisonNotEqual, nil - case ast.RegexpMatchOperator: - return datatypes.ComparisonRegex, nil - case ast.NotRegexpMatchOperator: - return datatypes.ComparisonNotRegex, nil - case ast.StartsWithOperator: - return datatypes.ComparisonStartsWith, nil - case ast.LessThanOperator: - return datatypes.ComparisonLess, nil - case ast.LessThanEqualOperator: - return datatypes.ComparisonLessEqual, nil - case ast.GreaterThanOperator: - return datatypes.ComparisonGreater, nil - case ast.GreaterThanEqualOperator: - return datatypes.ComparisonGreaterEqual, nil - default: - return 0, fmt.Errorf("unknown operator %v", o) - } -} - -// NodeToExpr transforms a predicate node to an influxql.Expr. -func NodeToExpr(node *datatypes.Node, remap map[string]string) (influxql.Expr, error) { - v := &nodeToExprVisitor{remap: remap} - WalkNode(v, node) - if err := v.Err(); err != nil { - return nil, err - } - - if len(v.exprs) > 1 { - return nil, errors.New("invalid expression") - } - - if len(v.exprs) == 0 { - return nil, nil - } - - // TODO(edd): It would be preferable if RewriteRegexConditions was a - // package level function in influxql. - stmt := &influxql.SelectStatement{ - Condition: v.exprs[0], - } - stmt.RewriteRegexConditions() - return stmt.Condition, nil -} - -type nodeToExprVisitor struct { - remap map[string]string - exprs []influxql.Expr - err error -} - -func (v *nodeToExprVisitor) Visit(n *datatypes.Node) NodeVisitor { - if v.err != nil { - return nil - } - - switch n.NodeType { - case datatypes.NodeTypeLogicalExpression: - if len(n.Children) > 1 { - op := influxql.AND - if n.GetLogical() == datatypes.LogicalOr { - op = influxql.OR - } - - WalkNode(v, n.Children[0]) - if v.err != nil { - return nil - } - - for i := 1; i < len(n.Children); i++ { - WalkNode(v, n.Children[i]) - if v.err != nil { - return nil - } - - if len(v.exprs) >= 2 { - lhs, rhs := v.pop2() - v.exprs = append(v.exprs, &influxql.BinaryExpr{LHS: lhs, Op: op, RHS: rhs}) - } - } - - return nil - } - - case datatypes.NodeTypeParenExpression: - if len(n.Children) != 1 { - v.err = errors.New("parenExpression expects one child") - return nil - } - - WalkNode(v, n.Children[0]) - if v.err != nil { - return nil - } - - if len(v.exprs) > 0 { - v.exprs = append(v.exprs, &influxql.ParenExpr{Expr: v.pop()}) - } - - return nil - - case datatypes.NodeTypeComparisonExpression: - WalkChildren(v, n) - - if len(v.exprs) < 2 { - v.err = errors.New("comparisonExpression expects two children") - return nil - } - - lhs, rhs := v.pop2() - - be := &influxql.BinaryExpr{LHS: lhs, RHS: rhs} - switch n.GetComparison() { - case datatypes.ComparisonEqual: - be.Op = influxql.EQ - case datatypes.ComparisonNotEqual: - be.Op = influxql.NEQ - case datatypes.ComparisonStartsWith: - // TODO(sgc): rewrite to anchored RE, as index does not support startsWith yet - v.err = errors.New("startsWith not implemented") - return nil - case datatypes.ComparisonRegex: - be.Op = influxql.EQREGEX - case datatypes.ComparisonNotRegex: - be.Op = influxql.NEQREGEX - case datatypes.ComparisonLess: - be.Op = influxql.LT - case datatypes.ComparisonLessEqual: - be.Op = influxql.LTE - case datatypes.ComparisonGreater: - be.Op = influxql.GT - case datatypes.ComparisonGreaterEqual: - be.Op = influxql.GTE - default: - v.err = errors.New("invalid comparison operator") - return nil - } - - v.exprs = append(v.exprs, be) - - return nil - - case datatypes.NodeTypeTagRef: - ref := n.GetTagRefValue() - if v.remap != nil { - if nk, ok := v.remap[ref]; ok { - ref = nk - } - } - - v.exprs = append(v.exprs, &influxql.VarRef{Val: ref, Type: influxql.Tag}) - return nil - - case datatypes.NodeTypeFieldRef: - v.exprs = append(v.exprs, &influxql.VarRef{Val: fieldRef}) - return nil - - case datatypes.NodeTypeLiteral: - switch val := n.Value.(type) { - case *datatypes.Node_StringValue: - v.exprs = append(v.exprs, &influxql.StringLiteral{Val: val.StringValue}) - - case *datatypes.Node_RegexValue: - // TODO(sgc): consider hashing the RegexValue and cache compiled version - re, err := regexp.Compile(val.RegexValue) - if err != nil { - v.err = err - } - v.exprs = append(v.exprs, &influxql.RegexLiteral{Val: re}) - return nil - - case *datatypes.Node_IntegerValue: - v.exprs = append(v.exprs, &influxql.IntegerLiteral{Val: val.IntegerValue}) - - case *datatypes.Node_UnsignedValue: - v.exprs = append(v.exprs, &influxql.UnsignedLiteral{Val: val.UnsignedValue}) - - case *datatypes.Node_FloatValue: - v.exprs = append(v.exprs, &influxql.NumberLiteral{Val: val.FloatValue}) - - case *datatypes.Node_BooleanValue: - v.exprs = append(v.exprs, &influxql.BooleanLiteral{Val: val.BooleanValue}) - - default: - v.err = errors.New("unexpected literal type") - return nil - } - - return nil - - default: - return v - } - return nil -} - -func (v *nodeToExprVisitor) Err() error { - return v.err -} - -func (v *nodeToExprVisitor) pop() influxql.Expr { - if len(v.exprs) == 0 { - panic("stack empty") - } - - var top influxql.Expr - top, v.exprs = v.exprs[len(v.exprs)-1], v.exprs[:len(v.exprs)-1] - return top -} - -func (v *nodeToExprVisitor) pop2() (influxql.Expr, influxql.Expr) { - if len(v.exprs) < 2 { - panic("stack empty") - } - - rhs := v.exprs[len(v.exprs)-1] - lhs := v.exprs[len(v.exprs)-2] - v.exprs = v.exprs[:len(v.exprs)-2] - return lhs, rhs -} - -func IsTrueBooleanLiteral(expr influxql.Expr) bool { - b, ok := expr.(*influxql.BooleanLiteral) - if ok { - return b.Val - } - return false -} - -func RewriteExprRemoveFieldValue(expr influxql.Expr) influxql.Expr { - return influxql.RewriteExpr(expr, func(expr influxql.Expr) influxql.Expr { - if be, ok := expr.(*influxql.BinaryExpr); ok { - if ref, ok := be.LHS.(*influxql.VarRef); ok { - if ref.Val == fieldRef { - return &influxql.BooleanLiteral{Val: true} - } - } - } - - return expr - }) -} - -type hasRefs struct { - refs []string - found []bool -} - -func (v *hasRefs) allFound() bool { - for _, val := range v.found { - if !val { - return false - } - } - return true -} - -func (v *hasRefs) Visit(node influxql.Node) influxql.Visitor { - if v.allFound() { - return nil - } - - if n, ok := node.(*influxql.VarRef); ok { - for i, r := range v.refs { - if !v.found[i] && r == n.Val { - v.found[i] = true - if v.allFound() { - return nil - } - } - } - } - return v -} - -func HasFieldValueKey(expr influxql.Expr) bool { - refs := hasRefs{refs: []string{fieldRef}, found: make([]bool, 1)} - influxql.Walk(&refs, expr) - return refs.found[0] -} diff --git a/storage/reads/predicate_test.go b/storage/reads/predicate_test.go index c300e663470..dcdeeaa9152 100644 --- a/storage/reads/predicate_test.go +++ b/storage/reads/predicate_test.go @@ -56,87 +56,3 @@ func TestPredicateToExprString(t *testing.T) { }) } } - -func TestHasFieldValueKey(t *testing.T) { - predicates := []*datatypes.Node{ - { - NodeType: datatypes.NodeTypeComparisonExpression, - Value: &datatypes.Node_Comparison_{ - Comparison: datatypes.ComparisonLess, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.NodeTypeFieldRef, - Value: &datatypes.Node_FieldRefValue{ - FieldRefValue: "_value", - }, - }, - { - NodeType: datatypes.NodeTypeLiteral, - Value: &datatypes.Node_IntegerValue{ - IntegerValue: 3000, - }, - }, - }, - }, - { - NodeType: datatypes.NodeTypeLogicalExpression, - Value: &datatypes.Node_Logical_{ - Logical: datatypes.LogicalAnd, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.NodeTypeComparisonExpression, - Value: &datatypes.Node_Comparison_{ - Comparison: datatypes.ComparisonEqual, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.NodeTypeTagRef, - Value: &datatypes.Node_TagRefValue{ - TagRefValue: "_measurement", - }, - }, - { - NodeType: datatypes.NodeTypeLiteral, - Value: &datatypes.Node_StringValue{ - StringValue: "cpu", - }, - }, - }, - }, - { - NodeType: datatypes.NodeTypeComparisonExpression, - Value: &datatypes.Node_Comparison_{ - Comparison: datatypes.ComparisonLess, - }, - Children: []*datatypes.Node{ - { - NodeType: datatypes.NodeTypeFieldRef, - Value: &datatypes.Node_FieldRefValue{ - FieldRefValue: "_value", - }, - }, - { - NodeType: datatypes.NodeTypeLiteral, - Value: &datatypes.Node_IntegerValue{ - IntegerValue: 3000, - }, - }, - }, - }, - }, - }, - } - for _, predicate := range predicates { - t.Run("", func(t *testing.T) { - expr, err := reads.NodeToExpr(predicate, nil) - if err != nil { - t.Fatalf("unexpected error converting predicate to InfluxQL expression: %v", err) - } - if !reads.HasFieldValueKey(expr) { - t.Fatalf("did not find a field reference in %v", expr) - } - }) - } -} diff --git a/storage/reads/reader_test.go b/storage/reads/reader_test.go deleted file mode 100644 index 8cc72783ab9..00000000000 --- a/storage/reads/reader_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package reads_test - -import ( - "context" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/flux" - "github.com/influxdata/flux/execute" - "github.com/influxdata/flux/execute/executetest" - "github.com/influxdata/flux/memory" - "github.com/influxdata/influxdb/flux/stdlib/influxdata/influxdb" - "github.com/influxdata/influxdb/mock" - "github.com/influxdata/influxdb/storage/reads" - "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxdb/tsdb/cursors" -) - -func TestDuplicateKeys_ReadFilter(t *testing.T) { - closed := 0 - - s := mock.NewStoreReader() - s.ReadFilterFunc = func(ctx context.Context, req *datatypes.ReadFilterRequest) (reads.ResultSet, error) { - inputs := make([]cursors.Cursor, 2) - inputs[0] = func() cursors.Cursor { - called := false - cur := mock.NewFloatArrayCursor() - cur.NextFunc = func() *cursors.FloatArray { - if called { - return &cursors.FloatArray{} - } - called = true - return &cursors.FloatArray{ - Timestamps: []int64{0}, - Values: []float64{1.0}, - } - } - cur.CloseFunc = func() { - closed++ - } - return cur - }() - inputs[1] = func() cursors.Cursor { - called := false - cur := mock.NewIntegerArrayCursor() - cur.NextFunc = func() *cursors.IntegerArray { - if called { - return &cursors.IntegerArray{} - } - called = true - return &cursors.IntegerArray{ - Timestamps: []int64{10}, - Values: []int64{1}, - } - } - cur.CloseFunc = func() { - closed++ - } - return cur - }() - - idx := -1 - rs := mock.NewResultSet() - rs.NextFunc = func() bool { - idx++ - return idx < len(inputs) - } - rs.CursorFunc = func() cursors.Cursor { - return inputs[idx] - } - rs.CloseFunc = func() { - idx = len(inputs) - } - return rs, nil - } - - r := reads.NewReader(s) - ti, err := r.ReadFilter(context.Background(), influxdb.ReadFilterSpec{ - Bounds: execute.Bounds{ - Start: execute.Time(0), - Stop: execute.Time(30), - }, - }, &memory.Allocator{}) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - var got []*executetest.Table - if err := ti.Do(func(tbl flux.Table) error { - t, err := executetest.ConvertTable(tbl) - if err != nil { - return err - } - got = append(got, t) - return nil - }); err != nil { - t.Fatalf("unexpected error: %s", err) - } - - want := []*executetest.Table{ - { - ColMeta: []flux.ColMeta{ - {Label: "_start", Type: flux.TTime}, - {Label: "_stop", Type: flux.TTime}, - {Label: "_time", Type: flux.TTime}, - {Label: "_value", Type: flux.TFloat}, - }, - KeyCols: []string{"_start", "_stop"}, - Data: [][]interface{}{ - {execute.Time(0), execute.Time(30), execute.Time(0), 1.0}, - }, - }, - } - for _, tbl := range want { - tbl.Normalize() - } - if !cmp.Equal(want, got) { - t.Fatalf("unexpected output:\n%s", cmp.Diff(want, got)) - } - - if want, got := closed, 2; want != got { - t.Fatalf("unexpected count of closed cursors -want/+got:\n\t- %d\n\t+ %d", want, got) - } -} diff --git a/storage/reads/readstate_string.go b/storage/reads/readstate_string.go deleted file mode 100644 index 19d34e236b7..00000000000 --- a/storage/reads/readstate_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=readState -trimprefix=state"; DO NOT EDIT. - -package reads - -import "strconv" - -const _readState_name = "ReadGroupReadSeriesReadPointsReadFloatPointsReadIntegerPointsReadUnsignedPointsReadBooleanPointsReadStringPointsReadErrDone" - -var _readState_index = [...]uint8{0, 9, 19, 29, 44, 61, 79, 96, 112, 119, 123} - -func (i readState) String() string { - if i >= readState(len(_readState_index)-1) { - return "readState(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _readState_name[_readState_index[i]:_readState_index[i+1]] -} diff --git a/storage/reads/resultset_lineprotocol.go b/storage/reads/resultset_lineprotocol.go new file mode 100644 index 00000000000..f821aa6b88e --- /dev/null +++ b/storage/reads/resultset_lineprotocol.go @@ -0,0 +1,129 @@ +package reads + +import ( + "errors" + "io" + "strconv" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb/cursors" +) + +// ResultSetToLineProtocol transforms rs to line protocol and writes the +// output to wr. +func ResultSetToLineProtocol(wr io.Writer, rs ResultSet) (err error) { + defer rs.Close() + + line := make([]byte, 0, 4096) + for rs.Next() { + tags := rs.Tags() + name := tags.Get(models.MeasurementTagKeyBytes) + field := tags.Get(models.FieldKeyTagKeyBytes) + if len(name) == 0 || len(field) == 0 { + return errors.New("missing measurement / field") + } + + line = append(line[:0], name...) + if tags.Len() > 2 { + tags = tags[1 : len(tags)-1] // take first and last elements which are measurement and field keys + line = tags.AppendHashKey(line) + } + + line = append(line, ' ') + line = append(line, field...) + line = append(line, '=') + err = cursorToLineProtocol(wr, line, rs.Cursor()) + if err != nil { + return err + } + } + + return rs.Err() +} + +func cursorToLineProtocol(wr io.Writer, line []byte, cur cursors.Cursor) error { + var newLine = []byte{'\n'} + + switch ccur := cur.(type) { + case cursors.IntegerArrayCursor: + for { + a := ccur.Next() + if a.Len() > 0 { + for i := range a.Timestamps { + buf := strconv.AppendInt(line, a.Values[i], 10) + buf = append(buf, 'i', ' ') + buf = strconv.AppendInt(buf, a.Timestamps[i], 10) + wr.Write(buf) + wr.Write(newLine) + } + } else { + break + } + } + case cursors.FloatArrayCursor: + for { + a := ccur.Next() + if a.Len() > 0 { + for i := range a.Timestamps { + buf := strconv.AppendFloat(line, a.Values[i], 'f', -1, 64) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, a.Timestamps[i], 10) + wr.Write(buf) + wr.Write(newLine) + } + } else { + break + } + } + case cursors.UnsignedArrayCursor: + for { + a := ccur.Next() + if a.Len() > 0 { + for i := range a.Timestamps { + buf := strconv.AppendUint(line, a.Values[i], 10) + buf = append(buf, 'u', ' ') + buf = strconv.AppendInt(buf, a.Timestamps[i], 10) + wr.Write(buf) + wr.Write(newLine) + } + } else { + break + } + } + case cursors.BooleanArrayCursor: + for { + a := ccur.Next() + if a.Len() > 0 { + for i := range a.Timestamps { + buf := strconv.AppendBool(line, a.Values[i]) + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, a.Timestamps[i], 10) + wr.Write(buf) + wr.Write(newLine) + } + } else { + break + } + } + case cursors.StringArrayCursor: + for { + a := ccur.Next() + if a.Len() > 0 { + for i := range a.Timestamps { + buf := strconv.AppendQuote(line, a.Values[i]) + buf = append(buf, 'i', ' ') + buf = strconv.AppendInt(buf, a.Timestamps[i], 10) + wr.Write(buf) + wr.Write(newLine) + } + } else { + break + } + } + default: + panic("unreachable") + } + + cur.Close() + return cur.Err() +} diff --git a/storage/reads/series_cursor_test.go b/storage/reads/series_cursor_test.go new file mode 100644 index 00000000000..8eee6fe0b5c --- /dev/null +++ b/storage/reads/series_cursor_test.go @@ -0,0 +1 @@ +package reads diff --git a/storage/reads/store_test.go b/storage/reads/store_test.go index 62fa108fa3c..432e0b34f92 100644 --- a/storage/reads/store_test.go +++ b/storage/reads/store_test.go @@ -11,7 +11,7 @@ import ( "github.com/influxdata/influxdb/tsdb/cursors" ) -func CursorToString(wr io.Writer, cur cursors.Cursor, opts ...optionFn) { +func cursorToString(wr io.Writer, cur cursors.Cursor) { switch ccur := cur.(type) { case cursors.IntegerArrayCursor: fmt.Fprintln(wr, "Integer") @@ -87,10 +87,6 @@ func CursorToString(wr io.Writer, cur cursors.Cursor, opts ...optionFn) { const nilVal = "" -var ( - nilValBytes = []byte(nilVal) -) - func joinString(b [][]byte) string { s := make([]string, len(b)) for i := range b { @@ -104,14 +100,14 @@ func joinString(b [][]byte) string { return strings.Join(s, ",") } -func TagsToString(wr io.Writer, tags models.Tags, opts ...optionFn) { +func tagsToString(wr io.Writer, tags models.Tags, opts ...optionFn) { if k := tags.HashKey(); len(k) > 0 { fmt.Fprintf(wr, "%s", string(k[1:])) } fmt.Fprintln(wr) } -func ResultSetToString(wr io.Writer, rs reads.ResultSet, opts ...optionFn) { +func resultSetToString(wr io.Writer, rs reads.ResultSet, opts ...optionFn) { var po PrintOptions for _, o := range opts { o(&po) @@ -122,7 +118,7 @@ func ResultSetToString(wr io.Writer, rs reads.ResultSet, opts ...optionFn) { for rs.Next() { fmt.Fprint(wr, "series: ") - TagsToString(wr, rs.Tags()) + tagsToString(wr, rs.Tags()) cur := rs.Cursor() if po.SkipNilCursor && cur == nil { @@ -137,7 +133,7 @@ func ResultSetToString(wr io.Writer, rs reads.ResultSet, opts ...optionFn) { goto LOOP } - CursorToString(wr, cur) + cursorToString(wr, cur) LOOP: iw.Indent(-2) } @@ -154,7 +150,7 @@ func GroupResultSetToString(wr io.Writer, rs reads.GroupResultSet, opts ...optio fmt.Fprintf(wr, "tag key : %s\n", joinString(gc.Keys())) fmt.Fprintf(wr, "partition key: %s\n", joinString(gc.PartitionKeyVals())) iw.Indent(2) - ResultSetToString(wr, gc, opts...) + resultSetToString(wr, gc, opts...) iw.Indent(-4) gc = rs.Next() } diff --git a/storage/reads/stream_reader.gen.go b/storage/reads/stream_reader.gen.go deleted file mode 100644 index 31b61b692f3..00000000000 --- a/storage/reads/stream_reader.gen.go +++ /dev/null @@ -1,249 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: stream_reader.gen.go.tmpl - -package reads - -import ( - "fmt" - - "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxdb/tsdb/cursors" -) - -type floatCursorStreamReader struct { - fr *frameReader - a cursors.FloatArray -} - -func (c *floatCursorStreamReader) Close() { - for c.fr.state == stateReadFloatPoints { - c.readFrame() - } -} - -func (c *floatCursorStreamReader) Err() error { return c.fr.err } - -func (c *floatCursorStreamReader) Next() *cursors.FloatArray { - if c.fr.state == stateReadFloatPoints { - c.readFrame() - } - return &c.a -} - -func (c *floatCursorStreamReader) readFrame() { - c.a.Timestamps = nil - c.a.Values = nil - - if f := c.fr.peekFrame(); f != nil { - switch ff := f.Data.(type) { - case *datatypes.ReadResponse_Frame_FloatPoints: - c.a.Timestamps = ff.FloatPoints.Timestamps - c.a.Values = ff.FloatPoints.Values - c.fr.nextFrame() - - case *datatypes.ReadResponse_Frame_Series: - c.fr.state = stateReadSeries - - case *datatypes.ReadResponse_Frame_Group: - c.fr.state = stateReadGroup - - default: - c.fr.setErr(fmt.Errorf("floatCursorStreamReader: unexpected frame type %T", f.Data)) - } - } -} - -func (c *floatCursorStreamReader) Stats() cursors.CursorStats { - return c.fr.stats.Stats() -} - -type integerCursorStreamReader struct { - fr *frameReader - a cursors.IntegerArray -} - -func (c *integerCursorStreamReader) Close() { - for c.fr.state == stateReadIntegerPoints { - c.readFrame() - } -} - -func (c *integerCursorStreamReader) Err() error { return c.fr.err } - -func (c *integerCursorStreamReader) Next() *cursors.IntegerArray { - if c.fr.state == stateReadIntegerPoints { - c.readFrame() - } - return &c.a -} - -func (c *integerCursorStreamReader) readFrame() { - c.a.Timestamps = nil - c.a.Values = nil - - if f := c.fr.peekFrame(); f != nil { - switch ff := f.Data.(type) { - case *datatypes.ReadResponse_Frame_IntegerPoints: - c.a.Timestamps = ff.IntegerPoints.Timestamps - c.a.Values = ff.IntegerPoints.Values - c.fr.nextFrame() - - case *datatypes.ReadResponse_Frame_Series: - c.fr.state = stateReadSeries - - case *datatypes.ReadResponse_Frame_Group: - c.fr.state = stateReadGroup - - default: - c.fr.setErr(fmt.Errorf("integerCursorStreamReader: unexpected frame type %T", f.Data)) - } - } -} - -func (c *integerCursorStreamReader) Stats() cursors.CursorStats { - return c.fr.stats.Stats() -} - -type unsignedCursorStreamReader struct { - fr *frameReader - a cursors.UnsignedArray -} - -func (c *unsignedCursorStreamReader) Close() { - for c.fr.state == stateReadUnsignedPoints { - c.readFrame() - } -} - -func (c *unsignedCursorStreamReader) Err() error { return c.fr.err } - -func (c *unsignedCursorStreamReader) Next() *cursors.UnsignedArray { - if c.fr.state == stateReadUnsignedPoints { - c.readFrame() - } - return &c.a -} - -func (c *unsignedCursorStreamReader) readFrame() { - c.a.Timestamps = nil - c.a.Values = nil - - if f := c.fr.peekFrame(); f != nil { - switch ff := f.Data.(type) { - case *datatypes.ReadResponse_Frame_UnsignedPoints: - c.a.Timestamps = ff.UnsignedPoints.Timestamps - c.a.Values = ff.UnsignedPoints.Values - c.fr.nextFrame() - - case *datatypes.ReadResponse_Frame_Series: - c.fr.state = stateReadSeries - - case *datatypes.ReadResponse_Frame_Group: - c.fr.state = stateReadGroup - - default: - c.fr.setErr(fmt.Errorf("unsignedCursorStreamReader: unexpected frame type %T", f.Data)) - } - } -} - -func (c *unsignedCursorStreamReader) Stats() cursors.CursorStats { - return c.fr.stats.Stats() -} - -type stringCursorStreamReader struct { - fr *frameReader - a cursors.StringArray -} - -func (c *stringCursorStreamReader) Close() { - for c.fr.state == stateReadStringPoints { - c.readFrame() - } -} - -func (c *stringCursorStreamReader) Err() error { return c.fr.err } - -func (c *stringCursorStreamReader) Next() *cursors.StringArray { - if c.fr.state == stateReadStringPoints { - c.readFrame() - } - return &c.a -} - -func (c *stringCursorStreamReader) readFrame() { - c.a.Timestamps = nil - c.a.Values = nil - - if f := c.fr.peekFrame(); f != nil { - switch ff := f.Data.(type) { - case *datatypes.ReadResponse_Frame_StringPoints: - c.a.Timestamps = ff.StringPoints.Timestamps - c.a.Values = ff.StringPoints.Values - c.fr.nextFrame() - - case *datatypes.ReadResponse_Frame_Series: - c.fr.state = stateReadSeries - - case *datatypes.ReadResponse_Frame_Group: - c.fr.state = stateReadGroup - - default: - c.fr.setErr(fmt.Errorf("stringCursorStreamReader: unexpected frame type %T", f.Data)) - } - } -} - -func (c *stringCursorStreamReader) Stats() cursors.CursorStats { - return c.fr.stats.Stats() -} - -type booleanCursorStreamReader struct { - fr *frameReader - a cursors.BooleanArray -} - -func (c *booleanCursorStreamReader) Close() { - for c.fr.state == stateReadBooleanPoints { - c.readFrame() - } -} - -func (c *booleanCursorStreamReader) Err() error { return c.fr.err } - -func (c *booleanCursorStreamReader) Next() *cursors.BooleanArray { - if c.fr.state == stateReadBooleanPoints { - c.readFrame() - } - return &c.a -} - -func (c *booleanCursorStreamReader) readFrame() { - c.a.Timestamps = nil - c.a.Values = nil - - if f := c.fr.peekFrame(); f != nil { - switch ff := f.Data.(type) { - case *datatypes.ReadResponse_Frame_BooleanPoints: - c.a.Timestamps = ff.BooleanPoints.Timestamps - c.a.Values = ff.BooleanPoints.Values - c.fr.nextFrame() - - case *datatypes.ReadResponse_Frame_Series: - c.fr.state = stateReadSeries - - case *datatypes.ReadResponse_Frame_Group: - c.fr.state = stateReadGroup - - default: - c.fr.setErr(fmt.Errorf("booleanCursorStreamReader: unexpected frame type %T", f.Data)) - } - } -} - -func (c *booleanCursorStreamReader) Stats() cursors.CursorStats { - return c.fr.stats.Stats() -} diff --git a/storage/reads/stream_reader.go b/storage/reads/stream_reader.go deleted file mode 100644 index d30b95e7267..00000000000 --- a/storage/reads/stream_reader.go +++ /dev/null @@ -1,455 +0,0 @@ -package reads - -import ( - "errors" - "fmt" - "io" - "strconv" - - "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxdb/tsdb/cursors" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -var ( - // ErrPartitionKeyOrder means the partition keys for a - // GroupResultSetStreamReader were incorrectly ordered. - ErrPartitionKeyOrder = errors.New("invalid partition key order") - - // ErrStreamNoData means the StreamReader repeatedly returned no data - // when calling Recv - ErrStreamNoData = errors.New("peekFrame: no data") -) - -// peekFrameRetries specifies the number of times peekFrame will -// retry before returning ErrStreamNoData when StreamReader.Recv -// returns an empty result. -const peekFrameRetries = 2 - -type StreamReader interface { - Recv() (*datatypes.ReadResponse, error) -} - -// statistics is the interface which wraps the Stats method. -type statistics interface { - Stats() cursors.CursorStats -} - -var zeroStatistics statistics = &emptyStatistics{} - -type emptyStatistics struct{} - -func (*emptyStatistics) Stats() cursors.CursorStats { - return cursors.CursorStats{} -} - -type StreamClient interface { - StreamReader - grpc.ClientStream -} - -// StorageReadClient adapts a grpc client to implement the cursors.Statistics -// interface and read the statistics from the gRPC trailer. -type StorageReadClient struct { - client StreamClient - trailer metadata.MD -} - -// NewStorageReadClient returns a new StorageReadClient which implements -// StreamReader and reads the gRPC trailer to return CursorStats. -func NewStorageReadClient(client StreamClient) *StorageReadClient { - return &StorageReadClient{client: client} -} - -func (rc *StorageReadClient) Recv() (res *datatypes.ReadResponse, err error) { - res, err = rc.client.Recv() - if err != nil { - rc.trailer = rc.client.Trailer() - } - return res, err -} - -func (rc *StorageReadClient) Stats() (stats cursors.CursorStats) { - for _, s := range rc.trailer.Get("scanned-bytes") { - v, err := strconv.Atoi(s) - if err != nil { - continue - } - stats.ScannedBytes += v - } - for _, s := range rc.trailer.Get("scanned-values") { - v, err := strconv.Atoi(s) - if err != nil { - continue - } - stats.ScannedValues += v - } - return stats -} - -type ResultSetStreamReader struct { - fr frameReader - cur cursorReaders - - tags models.Tags - prev models.Tags -} - -func NewResultSetStreamReader(stream StreamReader) *ResultSetStreamReader { - r := &ResultSetStreamReader{fr: frameReader{s: stream, state: stateReadSeries}} - r.fr.init() - r.cur.setFrameReader(&r.fr) - return r -} - -func (r *ResultSetStreamReader) Err() error { return r.fr.err } -func (r *ResultSetStreamReader) Close() { r.fr.state = stateDone } -func (r *ResultSetStreamReader) Cursor() cursors.Cursor { return r.cur.cursor() } -func (r *ResultSetStreamReader) Stats() cursors.CursorStats { - return r.fr.stats.Stats() -} - -// Peek reads the next frame on the underlying stream-reader -// if there is one -func (r *ResultSetStreamReader) Peek() { - r.fr.peekFrame() -} - -func (r *ResultSetStreamReader) Next() bool { - if r.fr.state == stateReadSeries { - return r.readSeriesFrame() - } - - if r.fr.state == stateDone || r.fr.state == stateReadErr { - return false - } - - r.fr.setErr(fmt.Errorf("expected reader in state %v, was in state %v", stateReadSeries, r.fr.state)) - - return false -} - -func (r *ResultSetStreamReader) readSeriesFrame() bool { - f := r.fr.peekFrame() - if f == nil { - return false - } - r.fr.nextFrame() - - if sf, ok := f.Data.(*datatypes.ReadResponse_Frame_Series); ok { - r.fr.state = stateReadPoints - - r.prev, r.tags = r.tags, r.prev - - if cap(r.tags) < len(sf.Series.Tags) { - r.tags = make(models.Tags, len(sf.Series.Tags)) - } else { - r.tags = r.tags[:len(sf.Series.Tags)] - } - - for i := range sf.Series.Tags { - r.tags[i].Key = sf.Series.Tags[i].Key - r.tags[i].Value = sf.Series.Tags[i].Value - } - - r.cur.nextType = sf.Series.DataType - - return true - } else { - r.fr.setErr(fmt.Errorf("expected series frame, got %T", f.Data)) - } - - return false -} - -func (r *ResultSetStreamReader) Tags() models.Tags { - return r.tags -} - -type GroupResultSetStreamReader struct { - fr frameReader - gc groupCursorStreamReader -} - -func NewGroupResultSetStreamReader(stream StreamReader) *GroupResultSetStreamReader { - r := &GroupResultSetStreamReader{fr: frameReader{s: stream, state: stateReadGroup}} - r.fr.init() - r.gc.fr = &r.fr - r.gc.cur.setFrameReader(&r.fr) - return r -} - -func (r *GroupResultSetStreamReader) Err() error { return r.fr.err } - -// Peek reads the next frame on the underlying stream-reader -// if there is one -func (r *GroupResultSetStreamReader) Peek() { - r.fr.peekFrame() -} - -func (r *GroupResultSetStreamReader) Next() GroupCursor { - if r.fr.state == stateReadGroup { - return r.readGroupFrame() - } - - if r.fr.state == stateDone || r.fr.state == stateReadErr { - return nil - } - - r.fr.setErr(fmt.Errorf("expected reader in state %v, was in state %v", stateReadGroup, r.fr.state)) - - return nil -} - -func (r *GroupResultSetStreamReader) readGroupFrame() GroupCursor { - f := r.fr.peekFrame() - if f == nil { - return nil - } - r.fr.nextFrame() - - if sf, ok := f.Data.(*datatypes.ReadResponse_Frame_Group); ok { - r.fr.state = stateReadSeries - - if cap(r.gc.tagKeys) < len(sf.Group.TagKeys) { - r.gc.tagKeys = make([][]byte, len(sf.Group.TagKeys)) - } else { - r.gc.tagKeys = r.gc.tagKeys[:len(sf.Group.TagKeys)] - } - copy(r.gc.tagKeys, sf.Group.TagKeys) - - r.gc.partitionKeyVals, r.gc.prevKey = r.gc.prevKey, r.gc.partitionKeyVals - - if cap(r.gc.partitionKeyVals) < len(sf.Group.PartitionKeyVals) { - r.gc.partitionKeyVals = make([][]byte, len(sf.Group.PartitionKeyVals)) - } else { - r.gc.partitionKeyVals = r.gc.partitionKeyVals[:len(sf.Group.PartitionKeyVals)] - } - - copy(r.gc.partitionKeyVals, sf.Group.PartitionKeyVals) - - if comparePartitionKey(r.gc.partitionKeyVals, r.gc.prevKey, nilSortHi) == 1 || r.gc.prevKey == nil { - return &r.gc - } - - r.fr.setErr(ErrPartitionKeyOrder) - } else { - r.fr.setErr(fmt.Errorf("expected group frame, got %T", f.Data)) - } - - return nil -} - -func (r *GroupResultSetStreamReader) Close() { - r.fr.state = stateDone -} - -type groupCursorStreamReader struct { - fr *frameReader - cur cursorReaders - - tagKeys [][]byte - partitionKeyVals [][]byte - prevKey [][]byte - tags models.Tags -} - -func (gc *groupCursorStreamReader) Err() error { return gc.fr.err } -func (gc *groupCursorStreamReader) Tags() models.Tags { return gc.tags } -func (gc *groupCursorStreamReader) Keys() [][]byte { return gc.tagKeys } -func (gc *groupCursorStreamReader) PartitionKeyVals() [][]byte { return gc.partitionKeyVals } -func (gc *groupCursorStreamReader) Cursor() cursors.Cursor { return gc.cur.cursor() } -func (gc *groupCursorStreamReader) Stats() cursors.CursorStats { - return gc.fr.stats.Stats() -} - -func (gc *groupCursorStreamReader) Next() bool { - if gc.fr.state == stateReadSeries { - return gc.readSeriesFrame() - } - - if gc.fr.state == stateDone || gc.fr.state == stateReadErr || gc.fr.state == stateReadGroup { - return false - } - - gc.fr.setErr(fmt.Errorf("expected reader in state %v, was in state %v", stateReadSeries, gc.fr.state)) - - return false -} - -func (gc *groupCursorStreamReader) readSeriesFrame() bool { - f := gc.fr.peekFrame() - if f == nil { - return false - } - - if sf, ok := f.Data.(*datatypes.ReadResponse_Frame_Series); ok { - gc.fr.nextFrame() - gc.fr.state = stateReadPoints - - if cap(gc.tags) < len(sf.Series.Tags) { - gc.tags = make(models.Tags, len(sf.Series.Tags)) - } else { - gc.tags = gc.tags[:len(sf.Series.Tags)] - } - - for i := range sf.Series.Tags { - gc.tags[i].Key = sf.Series.Tags[i].Key - gc.tags[i].Value = sf.Series.Tags[i].Value - } - - gc.cur.nextType = sf.Series.DataType - - return true - } else if _, ok := f.Data.(*datatypes.ReadResponse_Frame_Group); ok { - gc.fr.state = stateReadGroup - return false - } - - gc.fr.setErr(fmt.Errorf("expected series frame, got %T", f.Data)) - - return false -} - -func (gc *groupCursorStreamReader) Close() { -RETRY: - if gc.fr.state == stateReadPoints { - cur := gc.Cursor() - if cur != nil { - cur.Close() - } - } - - if gc.fr.state == stateReadSeries { - gc.readSeriesFrame() - goto RETRY - } -} - -type readState byte - -const ( - stateReadGroup readState = iota - stateReadSeries - stateReadPoints - stateReadFloatPoints - stateReadIntegerPoints - stateReadUnsignedPoints - stateReadBooleanPoints - stateReadStringPoints - stateReadErr - stateDone -) - -type frameReader struct { - s StreamReader - stats statistics - state readState - buf []datatypes.ReadResponse_Frame - p int - err error -} - -func (r *frameReader) init() { - if stats, ok := r.s.(statistics); ok { - r.stats = stats - } else { - r.stats = zeroStatistics - } -} - -func (r *frameReader) peekFrame() *datatypes.ReadResponse_Frame { - retries := peekFrameRetries - -RETRY: - if r.p < len(r.buf) { - f := &r.buf[r.p] - return f - } - - r.p = 0 - r.buf = nil - res, err := r.s.Recv() - if err == nil { - if res != nil { - r.buf = res.Frames - } - if retries > 0 { - retries-- - goto RETRY - } - - r.setErr(ErrStreamNoData) - } else if err == io.EOF { - r.state = stateDone - } else { - r.setErr(err) - } - return nil -} - -func (r *frameReader) nextFrame() { r.p++ } - -func (r *frameReader) setErr(err error) { - r.err = err - r.state = stateReadErr -} - -type cursorReaders struct { - fr *frameReader - nextType datatypes.ReadResponse_DataType - - cc cursors.Cursor - - f floatCursorStreamReader - i integerCursorStreamReader - u unsignedCursorStreamReader - b booleanCursorStreamReader - s stringCursorStreamReader -} - -func (cur *cursorReaders) setFrameReader(fr *frameReader) { - cur.fr = fr - cur.f.fr = fr - cur.i.fr = fr - cur.u.fr = fr - cur.b.fr = fr - cur.s.fr = fr -} - -func (cur *cursorReaders) cursor() cursors.Cursor { - cur.cc = nil - if cur.fr.state != stateReadPoints { - cur.fr.setErr(fmt.Errorf("expected reader in state %v, was in state %v", stateReadPoints, cur.fr.state)) - return cur.cc - } - - switch cur.nextType { - case datatypes.DataTypeFloat: - cur.fr.state = stateReadFloatPoints - cur.cc = &cur.f - - case datatypes.DataTypeInteger: - cur.fr.state = stateReadIntegerPoints - cur.cc = &cur.i - - case datatypes.DataTypeUnsigned: - cur.fr.state = stateReadUnsignedPoints - cur.cc = &cur.u - - case datatypes.DataTypeBoolean: - cur.fr.state = stateReadBooleanPoints - cur.cc = &cur.b - - case datatypes.DataTypeString: - cur.fr.state = stateReadStringPoints - cur.cc = &cur.s - - default: - cur.fr.setErr(fmt.Errorf("unexpected data type, %d", cur.nextType)) - } - - return cur.cc -} diff --git a/storage/reads/stream_reader_gen_test.go b/storage/reads/stream_reader_gen_test.go deleted file mode 100644 index 085a90f36ec..00000000000 --- a/storage/reads/stream_reader_gen_test.go +++ /dev/null @@ -1,163 +0,0 @@ -// Generated by tmpl -// https://github.com/benbjohnson/tmpl -// -// DO NOT EDIT! -// Source: stream_reader_gen_test.go.tmpl - -package reads_test - -import ( - "sort" - - "github.com/influxdata/influxdb/storage/reads/datatypes" -) - -type FloatPoints datatypes.ReadResponse_FloatPointsFrame - -func (a FloatPoints) Len() int { return len(a.Timestamps) } - -func (a FloatPoints) Less(i, j int) bool { return a.Timestamps[i] < a.Timestamps[j] } - -func (a FloatPoints) Swap(i, j int) { - a.Timestamps[i], a.Timestamps[j] = a.Timestamps[j], a.Timestamps[i] - a.Values[i], a.Values[j] = a.Values[j], a.Values[i] -} - -type floatS map[int64]float64 - -func floatF(points floatS) datatypes.ReadResponse_Frame { - var block FloatPoints - for t, v := range points { - block.Timestamps = append(block.Timestamps, t) - block.Values = append(block.Values, v) - } - - sort.Sort(block) - pointsFrame := datatypes.ReadResponse_FloatPointsFrame(block) - - return datatypes.ReadResponse_Frame{ - Data: &datatypes.ReadResponse_Frame_FloatPoints{ - FloatPoints: &pointsFrame, - }, - } -} - -type IntegerPoints datatypes.ReadResponse_IntegerPointsFrame - -func (a IntegerPoints) Len() int { return len(a.Timestamps) } - -func (a IntegerPoints) Less(i, j int) bool { return a.Timestamps[i] < a.Timestamps[j] } - -func (a IntegerPoints) Swap(i, j int) { - a.Timestamps[i], a.Timestamps[j] = a.Timestamps[j], a.Timestamps[i] - a.Values[i], a.Values[j] = a.Values[j], a.Values[i] -} - -type integerS map[int64]int64 - -func integerF(points integerS) datatypes.ReadResponse_Frame { - var block IntegerPoints - for t, v := range points { - block.Timestamps = append(block.Timestamps, t) - block.Values = append(block.Values, v) - } - - sort.Sort(block) - pointsFrame := datatypes.ReadResponse_IntegerPointsFrame(block) - - return datatypes.ReadResponse_Frame{ - Data: &datatypes.ReadResponse_Frame_IntegerPoints{ - IntegerPoints: &pointsFrame, - }, - } -} - -type UnsignedPoints datatypes.ReadResponse_UnsignedPointsFrame - -func (a UnsignedPoints) Len() int { return len(a.Timestamps) } - -func (a UnsignedPoints) Less(i, j int) bool { return a.Timestamps[i] < a.Timestamps[j] } - -func (a UnsignedPoints) Swap(i, j int) { - a.Timestamps[i], a.Timestamps[j] = a.Timestamps[j], a.Timestamps[i] - a.Values[i], a.Values[j] = a.Values[j], a.Values[i] -} - -type unsignedS map[int64]uint64 - -func unsignedF(points unsignedS) datatypes.ReadResponse_Frame { - var block UnsignedPoints - for t, v := range points { - block.Timestamps = append(block.Timestamps, t) - block.Values = append(block.Values, v) - } - - sort.Sort(block) - pointsFrame := datatypes.ReadResponse_UnsignedPointsFrame(block) - - return datatypes.ReadResponse_Frame{ - Data: &datatypes.ReadResponse_Frame_UnsignedPoints{ - UnsignedPoints: &pointsFrame, - }, - } -} - -type StringPoints datatypes.ReadResponse_StringPointsFrame - -func (a StringPoints) Len() int { return len(a.Timestamps) } - -func (a StringPoints) Less(i, j int) bool { return a.Timestamps[i] < a.Timestamps[j] } - -func (a StringPoints) Swap(i, j int) { - a.Timestamps[i], a.Timestamps[j] = a.Timestamps[j], a.Timestamps[i] - a.Values[i], a.Values[j] = a.Values[j], a.Values[i] -} - -type stringS map[int64]string - -func stringF(points stringS) datatypes.ReadResponse_Frame { - var block StringPoints - for t, v := range points { - block.Timestamps = append(block.Timestamps, t) - block.Values = append(block.Values, v) - } - - sort.Sort(block) - pointsFrame := datatypes.ReadResponse_StringPointsFrame(block) - - return datatypes.ReadResponse_Frame{ - Data: &datatypes.ReadResponse_Frame_StringPoints{ - StringPoints: &pointsFrame, - }, - } -} - -type BooleanPoints datatypes.ReadResponse_BooleanPointsFrame - -func (a BooleanPoints) Len() int { return len(a.Timestamps) } - -func (a BooleanPoints) Less(i, j int) bool { return a.Timestamps[i] < a.Timestamps[j] } - -func (a BooleanPoints) Swap(i, j int) { - a.Timestamps[i], a.Timestamps[j] = a.Timestamps[j], a.Timestamps[i] - a.Values[i], a.Values[j] = a.Values[j], a.Values[i] -} - -type booleanS map[int64]bool - -func booleanF(points booleanS) datatypes.ReadResponse_Frame { - var block BooleanPoints - for t, v := range points { - block.Timestamps = append(block.Timestamps, t) - block.Values = append(block.Values, v) - } - - sort.Sort(block) - pointsFrame := datatypes.ReadResponse_BooleanPointsFrame(block) - - return datatypes.ReadResponse_Frame{ - Data: &datatypes.ReadResponse_Frame_BooleanPoints{ - BooleanPoints: &pointsFrame, - }, - } -} diff --git a/storage/reads/stream_reader_test.go b/storage/reads/stream_reader_test.go deleted file mode 100644 index d8d6f798584..00000000000 --- a/storage/reads/stream_reader_test.go +++ /dev/null @@ -1,819 +0,0 @@ -package reads_test - -import ( - "bytes" - "errors" - "io" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/storage/reads" - "github.com/influxdata/influxdb/storage/reads/datatypes" -) - -func errCmp(x, y error) bool { - if x == nil { - return y == nil - } - if y == nil { - return false - } - return x.Error() == y.Error() -} - -func errTr(x error) string { - if x == nil { - return "" - } - return x.Error() -} - -func TestNewResultSetStreamReader(t *testing.T) { - tests := []struct { - name string - stream *sliceStreamReader - exp string - expErr error - }{ - { - name: "float series", - stream: newStreamReader( - response( - seriesF(Float, "cpu,tag0=val0"), - floatF(floatS{ - 0: 1.0, - 1: 2.0, - 2: 3.0, - }), - seriesF(Float, "cpu,tag0=val1"), - floatF(floatS{ - 10: 11.0, - 11: 12.0, - 12: 13.0, - }), - ), - ), - exp: `series: _m=cpu,tag0=val0 - cursor:Float - 0 | 1.00 - 1 | 2.00 - 2 | 3.00 -series: _m=cpu,tag0=val1 - cursor:Float - 10 | 11.00 - 11 | 12.00 - 12 | 13.00 -`, - }, - - { - name: "some empty series", - stream: newStreamReader( - response( - seriesF(Float, "cpu,tag0=val0"), - seriesF(Float, "cpu,tag0=val1"), - floatF(floatS{ - 10: 11.0, - 11: 12.0, - 12: 13.0, - }), - ), - ), - exp: `series: _m=cpu,tag0=val0 - cursor:Float -series: _m=cpu,tag0=val1 - cursor:Float - 10 | 11.00 - 11 | 12.00 - 12 | 13.00 -`, - }, - - { - name: "all data types", - stream: newStreamReader( - response( - seriesF(Boolean, "cpu,tag0=booleans"), - booleanF(booleanS{ - 3: false, - 4: true, - 5: true, - }), - seriesF(Float, "cpu,tag0=floats"), - floatF(floatS{ - 0: 1.0, - 1: 2.0, - 2: 3.0, - }), - seriesF(Integer, "cpu,tag0=integers"), - integerF(integerS{ - 1: 1, - 2: 2, - 3: 3, - }), - seriesF(String, "cpu,tag0=strings"), - stringF(stringS{ - 33: "thing 1", - 34: "thing 2", - 35: "things", - }), - seriesF(Unsigned, "cpu,tag0=unsigned"), - unsignedF(unsignedS{ - 2: 55, - 3: 56, - 4: 57, - }), - ), - ), - exp: `series: _m=cpu,tag0=booleans - cursor:Boolean - 3 | false - 4 | true - 5 | true -series: _m=cpu,tag0=floats - cursor:Float - 0 | 1.00 - 1 | 2.00 - 2 | 3.00 -series: _m=cpu,tag0=integers - cursor:Integer - 1 | 1 - 2 | 2 - 3 | 3 -series: _m=cpu,tag0=strings - cursor:String - 33 | thing 1 - 34 | thing 2 - 35 | things -series: _m=cpu,tag0=unsigned - cursor:Unsigned - 2 | 55 - 3 | 56 - 4 | 57 -`, - }, - - { - name: "invalid_points_no_series", - stream: newStreamReader( - response( - floatF(floatS{0: 1.0}), - ), - ), - expErr: errors.New("expected series frame, got *datatypes.ReadResponse_Frame_FloatPoints"), - }, - - { - name: "no points frames", - stream: newStreamReader( - response( - seriesF(Boolean, "cpu,tag0=booleans"), - seriesF(Float, "cpu,tag0=floats"), - seriesF(Integer, "cpu,tag0=integers"), - seriesF(String, "cpu,tag0=strings"), - seriesF(Unsigned, "cpu,tag0=unsigned"), - ), - ), - exp: `series: _m=cpu,tag0=booleans - cursor:Boolean -series: _m=cpu,tag0=floats - cursor:Float -series: _m=cpu,tag0=integers - cursor:Integer -series: _m=cpu,tag0=strings - cursor:String -series: _m=cpu,tag0=unsigned - cursor:Unsigned -`, - }, - - { - name: "invalid_group_frame", - stream: newStreamReader( - response( - groupF("tag0", "val0"), - floatF(floatS{0: 1.0}), - ), - ), - expErr: errors.New("expected series frame, got *datatypes.ReadResponse_Frame_Group"), - }, - - { - name: "invalid_multiple_data_types", - stream: newStreamReader( - response( - seriesF(Float, "cpu,tag0=val0"), - floatF(floatS{0: 1.0}), - integerF(integerS{0: 1}), - ), - ), - exp: `series: _m=cpu,tag0=val0 - cursor:Float - 0 | 1.00 - cursor err: floatCursorStreamReader: unexpected frame type *datatypes.ReadResponse_Frame_IntegerPoints -`, - expErr: errors.New("floatCursorStreamReader: unexpected frame type *datatypes.ReadResponse_Frame_IntegerPoints"), - }, - - { - name: "some empty frames", - stream: newStreamReader( - response( - seriesF(Float, "cpu,tag0=val0"), - ), - response( - floatF(floatS{ - 0: 1.0, - 1: 2.0, - 2: 3.0, - }), - ), - response(), - response( - seriesF(Float, "cpu,tag0=val1"), - ), - response(), - response( - floatF(floatS{ - 10: 11.0, - 11: 12.0, - 12: 13.0, - }), - ), - response(), - ), - exp: `series: _m=cpu,tag0=val0 - cursor:Float - 0 | 1.00 - 1 | 2.00 - 2 | 3.00 -series: _m=cpu,tag0=val1 - cursor:Float - 10 | 11.00 - 11 | 12.00 - 12 | 13.00 -`, - }, - - { - name: "last frame empty", - stream: newStreamReader( - response( - seriesF(Float, "cpu,tag0=val0"), - floatF(floatS{ - 0: 1.0, - 1: 2.0, - 2: 3.0, - }), - ), - response(), - ), - exp: `series: _m=cpu,tag0=val0 - cursor:Float - 0 | 1.00 - 1 | 2.00 - 2 | 3.00 -`, - }, - - { - name: "ErrUnexpectedEOF", - stream: newStreamReader( - response( - seriesF(Float, "cpu,tag0=val0"), - ), - response( - floatF(floatS{ - 0: 1.0, - 1: 2.0, - 2: 3.0, - }), - ), - response(), - response(), - response(), - ), - exp: `series: _m=cpu,tag0=val0 - cursor:Float - 0 | 1.00 - 1 | 2.00 - 2 | 3.00 - cursor err: peekFrame: no data -`, - expErr: reads.ErrStreamNoData, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rs := reads.NewResultSetStreamReader(tt.stream) - - // ensure a peek doesn't effect the end result - rs.Peek() - - sb := new(strings.Builder) - ResultSetToString(sb, rs) - - if got := sb.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected value; -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - - if got := rs.Err(); !cmp.Equal(got, tt.expErr, cmp.Comparer(errCmp)) { - t.Errorf("unexpected error; -got/+exp\n%s", cmp.Diff(got, tt.expErr, cmp.Transformer("err", errTr))) - } - }) - } -} - -func TestNewResultSetStreamReader_SkipSeriesCursors(t *testing.T) { - stream := newStreamReader( - response( - seriesF(Float, "cpu,tag0=floats"), - floatF(floatS{0: 1.0}), - seriesF(Integer, "cpu,tag0=integers"), - integerF(integerS{1: 1}), - seriesF(Unsigned, "cpu,tag0=unsigned"), - unsignedF(unsignedS{2: 55}), - ), - ) - - expSeries := []string{"_m=cpu,tag0=floats", "_m=cpu,tag0=integers", "_m=cpu,tag0=unsigned"} - - rs := reads.NewResultSetStreamReader(stream) - for i := 0; i < 3; i++ { - if got := rs.Next(); !cmp.Equal(got, true) { - t.Errorf("expected true") - } - - sb := new(strings.Builder) - TagsToString(sb, rs.Tags()) - if got := strings.TrimSpace(sb.String()); !cmp.Equal(got, expSeries[i]) { - t.Errorf("unexpected tags; -got/+exp\n%s", cmp.Diff(got, expSeries[i])) - } - - cur := rs.Cursor() - if cur == nil { - t.Errorf("expected cursor") - } - - cur.Close() - } - - if got := rs.Next(); !cmp.Equal(got, false) { - t.Errorf("expected false") - } - rs.Close() -} - -func TestNewGroupResultSetStreamReader(t *testing.T) { - tests := []struct { - name string - stream *sliceStreamReader - exp string - expErr error - }{ - { - name: "groups none no series no points", - stream: newStreamReader( - response( - groupF("tag0,tag1", ""), - ), - ), - exp: `group: - tag key : tag0,tag1 - partition key: -`, - }, - { - name: "groups none series no points", - stream: newStreamReader( - response( - groupF("_m,tag0", ""), - seriesF(Float, "cpu,tag0=floats"), - seriesF(Integer, "cpu,tag0=integers"), - seriesF(Unsigned, "cpu,tag0=unsigned"), - ), - ), - exp: `group: - tag key : _m,tag0 - partition key: - series: _m=cpu,tag0=floats - cursor:Float - series: _m=cpu,tag0=integers - cursor:Integer - series: _m=cpu,tag0=unsigned - cursor:Unsigned -`, - }, - { - name: "groups none series points", - stream: newStreamReader( - response( - groupF("_m,tag0", ""), - seriesF(Float, "cpu,tag0=floats"), - floatF(floatS{0: 0.0, 1: 1.0, 2: 2.0}), - seriesF(Integer, "cpu,tag0=integers"), - integerF(integerS{10: 10, 20: 20, 30: 30}), - seriesF(Unsigned, "cpu,tag0=unsigned"), - unsignedF(unsignedS{100: 100, 200: 200, 300: 300}), - ), - ), - exp: `group: - tag key : _m,tag0 - partition key: - series: _m=cpu,tag0=floats - cursor:Float - 0 | 0.00 - 1 | 1.00 - 2 | 2.00 - series: _m=cpu,tag0=integers - cursor:Integer - 10 | 10 - 20 | 20 - 30 | 30 - series: _m=cpu,tag0=unsigned - cursor:Unsigned - 100 | 100 - 200 | 200 - 300 | 300 -`, - }, - { - name: "groups by no series no points", - stream: newStreamReader( - response( - groupF("tag00,tag10", "val00,val10"), - groupF("tag00,tag10", "val00,val11"), - groupF("tag00,tag10", "val01,val10"), - groupF("tag00,tag10", "val01,val11"), - ), - ), - exp: `group: - tag key : tag00,tag10 - partition key: val00,val10 -group: - tag key : tag00,tag10 - partition key: val00,val11 -group: - tag key : tag00,tag10 - partition key: val01,val10 -group: - tag key : tag00,tag10 - partition key: val01,val11 -`, - }, - { - name: "groups by series no points", - stream: newStreamReader( - response( - groupF("_m,tag0", "cpu,val0"), - seriesF(Float, "cpu,tag0=val0"), - seriesF(Float, "cpu,tag0=val0,tag1=val0"), - groupF("_m,tag0", "cpu,val1"), - seriesF(Float, "cpu,tag0=val1"), - seriesF(Float, "cpu,tag0=val1,tag1=val0"), - ), - ), - exp: `group: - tag key : _m,tag0 - partition key: cpu,val0 - series: _m=cpu,tag0=val0 - cursor:Float - series: _m=cpu,tag0=val0,tag1=val0 - cursor:Float -group: - tag key : _m,tag0 - partition key: cpu,val1 - series: _m=cpu,tag0=val1 - cursor:Float - series: _m=cpu,tag0=val1,tag1=val0 - cursor:Float -`, - }, - { - name: "missing group frame", - stream: newStreamReader( - response( - seriesF(Float, "cpu,tag0=val0"), - ), - ), - expErr: errors.New("expected group frame, got *datatypes.ReadResponse_Frame_Series"), - }, - { - name: "incorrect points frame data type", - stream: newStreamReader( - response( - groupF("_m,tag0", "cpu,val0"), - seriesF(Float, "cpu,tag0=val0"), - integerF(integerS{0: 1}), - ), - ), - exp: `group: - tag key : _m,tag0 - partition key: cpu,val0 - series: _m=cpu,tag0=val0 - cursor:Float - cursor err: floatCursorStreamReader: unexpected frame type *datatypes.ReadResponse_Frame_IntegerPoints -`, - expErr: errors.New("floatCursorStreamReader: unexpected frame type *datatypes.ReadResponse_Frame_IntegerPoints"), - }, - { - name: "partition key order", - stream: newStreamReader( - response( - groupF("_m,tag0", "cpu,val1"), - groupF("_m,tag0", "cpu,val0"), - ), - ), - exp: `group: - tag key : _m,tag0 - partition key: cpu,val1 -`, - expErr: reads.ErrPartitionKeyOrder, - }, - { - name: "partition key order", - stream: newStreamReader( - response( - groupF("_m", "cpu,"), - groupF("_m,tag0", "cpu,val0"), - ), - ), - exp: `group: - tag key : _m - partition key: cpu, -`, - expErr: reads.ErrPartitionKeyOrder, - }, - { - name: "partition key order", - stream: newStreamReader( - response( - groupF("_m,tag0", ",val0"), - groupF("_m,tag0", "cpu,val0"), - ), - ), - exp: `group: - tag key : _m,tag0 - partition key: ,val0 -`, - expErr: reads.ErrPartitionKeyOrder, - }, - { - name: "partition key order", - stream: newStreamReader( - response( - groupF("_m,tag0", "cpu,val0"), - groupF("_m,tag0", "cpu,val1"), - groupF("_m,tag0", ","), - groupF("_m,tag0", ",val0"), - ), - ), - exp: `group: - tag key : _m,tag0 - partition key: cpu,val0 -group: - tag key : _m,tag0 - partition key: cpu,val1 -group: - tag key : _m,tag0 - partition key: , -`, - expErr: reads.ErrPartitionKeyOrder, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rs := reads.NewGroupResultSetStreamReader(tt.stream) - - // ensure a peek doesn't effect the end result - rs.Peek() - - sb := new(strings.Builder) - GroupResultSetToString(sb, rs) - - if got := sb.String(); !cmp.Equal(got, tt.exp) { - t.Errorf("unexpected value; -got/+exp\n%s", cmp.Diff(got, tt.exp)) - } - - if got := rs.Err(); !cmp.Equal(got, tt.expErr, cmp.Comparer(errCmp)) { - t.Errorf("unexpected error; -got/+exp\n%s", cmp.Diff(got, tt.expErr, cmp.Transformer("err", errTr))) - } - }) - } -} - -func joinB(b [][]byte) string { - return string(bytes.Join(b, []byte(","))) -} - -func TestNewGroupResultSetStreamReader_SkipGroupCursors(t *testing.T) { - stream := newStreamReader( - response( - groupF("_m,tag0", "cpu,val0"), - seriesF(Float, "cpu,tag0=val0"), - floatF(floatS{0: 1.0}), - groupF("_m,tag0", "cpu,val1"), - seriesF(Integer, "cpu,tag0=val1,tag1=val0"), - integerF(integerS{1: 1}), - seriesF(Integer, "cpu,tag0=val1,tag1=val1"), - unsignedF(unsignedS{2: 55}), - ), - ) - - type expGroup struct { - tagKeys string - parKeys string - series []string - } - - t.Run("skip series cursors", func(t *testing.T) { - exp := []expGroup{ - { - tagKeys: "_m,tag0", - parKeys: "cpu,val0", - series: []string{"_m=cpu,tag0=val0"}, - }, - { - tagKeys: "_m,tag0", - parKeys: "cpu,val1", - series: []string{"_m=cpu,tag0=val1,tag1=val0", "_m=cpu,tag0=val1,tag1=val1"}, - }, - } - - stream.reset() - grs := reads.NewGroupResultSetStreamReader(stream) - - for i := range exp { - rs := grs.Next() - if rs == nil { - t.Errorf("expected group cursor") - } - - if got := joinB(rs.Keys()); !cmp.Equal(got, exp[i].tagKeys) { - t.Errorf("unexpected group keys; -got/+exp\n%s", cmp.Diff(got, exp[i].tagKeys)) - } - if got := joinB(rs.PartitionKeyVals()); !cmp.Equal(got, exp[i].parKeys) { - t.Errorf("unexpected group keys; -got/+exp\n%s", cmp.Diff(got, exp[i].parKeys)) - } - - for j := range exp[i].series { - if got := rs.Next(); !cmp.Equal(got, true) { - t.Errorf("expected true") - } - - sb := new(strings.Builder) - TagsToString(sb, rs.Tags()) - if got := strings.TrimSpace(sb.String()); !cmp.Equal(got, exp[i].series[j]) { - t.Errorf("unexpected tags; -got/+exp\n%s", cmp.Diff(got, exp[i].series[j])) - } - - cur := rs.Cursor() - if cur == nil { - t.Errorf("expected cursor") - } - - cur.Close() - } - - if got := rs.Next(); !cmp.Equal(got, false) { - t.Errorf("expected false") - } - rs.Close() - } - - if rs := grs.Next(); rs != nil { - t.Errorf("unexpected group cursor") - } - - grs.Close() - }) - - t.Run("skip series", func(t *testing.T) { - exp := []expGroup{ - { - tagKeys: "_m,tag0", - parKeys: "cpu,val0", - }, - { - tagKeys: "_m,tag0", - parKeys: "cpu,val1", - }, - } - - stream.reset() - grs := reads.NewGroupResultSetStreamReader(stream) - - for i := range exp { - rs := grs.Next() - if rs == nil { - t.Errorf("expected group cursor") - } - - if got := joinB(rs.Keys()); !cmp.Equal(got, exp[i].tagKeys) { - t.Errorf("unexpected group keys; -got/+exp\n%s", cmp.Diff(got, exp[i].tagKeys)) - } - if got := joinB(rs.PartitionKeyVals()); !cmp.Equal(got, exp[i].parKeys) { - t.Errorf("unexpected group keys; -got/+exp\n%s", cmp.Diff(got, exp[i].parKeys)) - } - rs.Close() - } - - if rs := grs.Next(); rs != nil { - t.Errorf("unexpected group cursor") - } - - grs.Close() - }) - -} - -func response(f ...datatypes.ReadResponse_Frame) datatypes.ReadResponse { - return datatypes.ReadResponse{Frames: f} -} - -type sliceStreamReader struct { - res []datatypes.ReadResponse - p int -} - -func newStreamReader(res ...datatypes.ReadResponse) *sliceStreamReader { - return &sliceStreamReader{res: res} -} - -func (r *sliceStreamReader) reset() { r.p = 0 } - -func (r *sliceStreamReader) Recv() (*datatypes.ReadResponse, error) { - if r.p < len(r.res) { - res := &r.res[r.p] - r.p++ - return res, nil - } - return nil, io.EOF -} - -func (r *sliceStreamReader) String() string { - return "" -} - -// errStreamReader is a reads.StreamReader that always returns an error. -type errStreamReader string - -func (e errStreamReader) Recv() (*datatypes.ReadResponse, error) { - return nil, errors.New(string(e)) -} - -// emptyStreamReader is a reads.StreamReader that returns no data. -type emptyStreamReader struct{} - -func (s *emptyStreamReader) Recv() (*datatypes.ReadResponse, error) { - return nil, nil -} - -func groupF(tagKeys string, partitionKeyVals string) datatypes.ReadResponse_Frame { - var pk [][]byte - if partitionKeyVals != "" { - pk = bytes.Split([]byte(partitionKeyVals), []byte(",")) - for i := range pk { - if bytes.Equal(pk[i], nilValBytes) { - pk[i] = nil - } - } - } - return datatypes.ReadResponse_Frame{ - Data: &datatypes.ReadResponse_Frame_Group{ - Group: &datatypes.ReadResponse_GroupFrame{ - TagKeys: bytes.Split([]byte(tagKeys), []byte(",")), - PartitionKeyVals: pk, - }, - }, - } -} - -const ( - Float = datatypes.DataTypeFloat - Integer = datatypes.DataTypeInteger - Unsigned = datatypes.DataTypeUnsigned - Boolean = datatypes.DataTypeBoolean - String = datatypes.DataTypeString -) - -func seriesF(dt datatypes.ReadResponse_DataType, measurement string) datatypes.ReadResponse_Frame { - name, tags := models.ParseKeyBytes([]byte(measurement)) - tags.Set([]byte("_m"), name) - t := make([]datatypes.Tag, len(tags)) - for i, tag := range tags { - t[i].Key = tag.Key - t[i].Value = tag.Value - } - - return datatypes.ReadResponse_Frame{ - Data: &datatypes.ReadResponse_Frame_Series{ - Series: &datatypes.ReadResponse_SeriesFrame{ - DataType: dt, - Tags: t, - }, - }, - } -} diff --git a/storage/reads/string_iterator_reader.go b/storage/reads/string_iterator_reader.go deleted file mode 100644 index cf76eb4f730..00000000000 --- a/storage/reads/string_iterator_reader.go +++ /dev/null @@ -1,63 +0,0 @@ -package reads - -import ( - "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxdb/tsdb/cursors" -) - -type StringValuesStreamReader interface { - Recv() (*datatypes.StringValuesResponse, error) -} - -type StringIteratorStreamReader struct { - stream StringValuesStreamReader - response *datatypes.StringValuesResponse - i int - - err error -} - -// API compatibility -var _ cursors.StringIterator = (*StringIteratorStreamReader)(nil) - -func NewStringIteratorStreamReader(stream StringValuesStreamReader) *StringIteratorStreamReader { - return &StringIteratorStreamReader{ - stream: stream, - } -} - -func (r *StringIteratorStreamReader) Err() error { - return r.err -} - -func (r *StringIteratorStreamReader) Next() bool { - if r.err != nil { - return false - } - - if r.response == nil || len(r.response.Values)-1 <= r.i { - r.response, r.err = r.stream.Recv() - if r.err != nil { - return false - } - r.i = 0 - - } else { - r.i++ - } - - return len(r.response.Values) > r.i -} - -func (r *StringIteratorStreamReader) Value() string { - if len(r.response.Values) > r.i { - return string(r.response.Values[r.i]) - } - - // Better than panic. - return "" -} - -func (r *StringIteratorStreamReader) Stats() cursors.CursorStats { - return cursors.CursorStats{} -} diff --git a/storage/reads/string_iterator_reader_test.go b/storage/reads/string_iterator_reader_test.go deleted file mode 100644 index 8b59cc2a396..00000000000 --- a/storage/reads/string_iterator_reader_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package reads_test - -import ( - "io" - "reflect" - "testing" - - "github.com/influxdata/influxdb/storage/reads" - "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxdb/tsdb/cursors" -) - -type mockStringIterator struct { - values []string - nextValue *string - stats cursors.CursorStats -} - -func newMockStringIterator(scannedValues, scannedBytes int, values ...string) *mockStringIterator { - return &mockStringIterator{ - values: values, - stats: cursors.CursorStats{ - ScannedValues: scannedValues, - ScannedBytes: scannedBytes, - }, - } -} - -func (si *mockStringIterator) Next() bool { - if len(si.values) > 0 { - si.nextValue = &si.values[0] - si.values = si.values[1:] - return true - } - si.nextValue = nil - return false -} - -func (si *mockStringIterator) Value() string { - if si.nextValue != nil { - return *si.nextValue - } - - // Better than panic. - return "" -} - -func (si *mockStringIterator) Stats() cursors.CursorStats { - if len(si.values) > 0 { - return cursors.CursorStats{} - } - return si.stats -} - -type mockStringValuesStreamReader struct { - responses []*datatypes.StringValuesResponse -} - -func newMockStringValuesStreamReader(responseValuess ...[]string) *mockStringValuesStreamReader { - responses := make([]*datatypes.StringValuesResponse, len(responseValuess)) - for i := range responseValuess { - responses[i] = &datatypes.StringValuesResponse{ - Values: make([][]byte, len(responseValuess[i])), - } - for j := range responseValuess[i] { - responses[i].Values[j] = []byte(responseValuess[i][j]) - } - } - return &mockStringValuesStreamReader{ - responses: responses, - } -} - -func (r *mockStringValuesStreamReader) Recv() (*datatypes.StringValuesResponse, error) { - if len(r.responses) > 0 { - tr := r.responses[0] - r.responses = r.responses[1:] - return tr, nil - } - - return nil, io.EOF -} - -func TestStringIteratorStreamReader(t *testing.T) { - tests := []struct { - name string - responseValuess [][]string // []string is the values from one response - expectReadValues []string - }{ - { - name: "simple", - responseValuess: [][]string{{"foo", "bar"}}, - expectReadValues: []string{"foo", "bar"}, - }, - { - name: "no deduplication expected", - responseValuess: [][]string{{"foo", "bar", "bar"}, {"foo"}}, - expectReadValues: []string{"foo", "bar", "bar", "foo"}, - }, - { - name: "not as simple", - responseValuess: [][]string{{"foo", "bar", "baz"}, {"qux"}, {"more"}}, - expectReadValues: []string{"foo", "bar", "baz", "qux", "more"}, - }, - } - - for _, tt := range tests { - stream := newMockStringValuesStreamReader(tt.responseValuess...) - r := reads.NewStringIteratorStreamReader(stream) - - var got []string - for r.Next() { - got = append(got, r.Value()) - } - - if !reflect.DeepEqual(tt.expectReadValues, got) { - t.Errorf("expected %v got %v", tt.expectReadValues, got) - } - } -} diff --git a/storage/reads/string_iterator_writer.go b/storage/reads/string_iterator_writer.go deleted file mode 100644 index 38e3e7e662a..00000000000 --- a/storage/reads/string_iterator_writer.go +++ /dev/null @@ -1,75 +0,0 @@ -package reads - -import ( - "github.com/influxdata/influxdb/storage/reads/datatypes" - "github.com/influxdata/influxdb/tsdb/cursors" -) - -type StringIteratorStream interface { - Send(*datatypes.StringValuesResponse) error -} - -type StringIteratorWriter struct { - stream StringIteratorStream - res *datatypes.StringValuesResponse - err error - - sz int // estimated size in bytes for pending write - vc int // total value count -} - -func NewStringIteratorWriter(stream StringIteratorStream) *StringIteratorWriter { - siw := &StringIteratorWriter{ - stream: stream, - res: &datatypes.StringValuesResponse{ - Values: nil, - }, - } - - return siw -} - -func (w *StringIteratorWriter) Err() error { - return w.err -} - -func (w *StringIteratorWriter) WrittenN() int { - return w.vc -} - -func (w *StringIteratorWriter) WriteStringIterator(si cursors.StringIterator) error { - if si == nil { - return nil - } - - for si.Next() { - v := si.Value() - if v == "" { - // no value, no biggie - continue - } - - w.res.Values = append(w.res.Values, []byte(v)) - w.sz += len(v) - w.vc++ - } - - return nil -} - -func (w *StringIteratorWriter) Flush() { - if w.err != nil || w.sz == 0 { - return - } - - w.sz, w.vc = 0, 0 - - if w.err = w.stream.Send(w.res); w.err != nil { - return - } - - for i := range w.res.Values { - w.res.Values[i] = nil - } - w.res.Values = w.res.Values[:0] -} diff --git a/storage/reads/string_iterator_writer_test.go b/storage/reads/string_iterator_writer_test.go deleted file mode 100644 index d068614810d..00000000000 --- a/storage/reads/string_iterator_writer_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package reads_test - -import ( - "reflect" - "testing" - - "github.com/influxdata/influxdb/storage/reads" - "github.com/influxdata/influxdb/storage/reads/datatypes" -) - -type mockStringValuesStream struct { - responsesSent []*datatypes.StringValuesResponse -} - -func (s *mockStringValuesStream) Send(response *datatypes.StringValuesResponse) error { - responseCopy := &datatypes.StringValuesResponse{ - Values: make([][]byte, len(response.Values)), - } - for i := range response.Values { - responseCopy.Values[i] = response.Values[i] - } - s.responsesSent = append(s.responsesSent, responseCopy) - return nil -} - -func TestStringIteratorWriter(t *testing.T) { - mockStream := &mockStringValuesStream{} - w := reads.NewStringIteratorWriter(mockStream) - si := newMockStringIterator(1, 2, "foo", "bar") - err := w.WriteStringIterator(si) - if err != nil { - t.Fatal(err) - } - w.Flush() - - var got []string - for _, response := range mockStream.responsesSent { - for _, v := range response.Values { - got = append(got, string(v)) - } - } - - expect := []string{"foo", "bar"} - if !reflect.DeepEqual(expect, got) { - t.Errorf("expected %v got %v", expect, got) - } -} - -func TestStringIteratorWriter_Nil(t *testing.T) { - w := reads.NewStringIteratorWriter(&mockStringValuesStream{}) - err := w.WriteStringIterator(nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - w.Flush() -} diff --git a/storage/reads/viewer.go b/storage/reads/viewer.go new file mode 100644 index 00000000000..8eee6fe0b5c --- /dev/null +++ b/storage/reads/viewer.go @@ -0,0 +1 @@ +package reads