From 2970769e384fcc504beb1832887abf5fd14cd896 Mon Sep 17 00:00:00 2001 From: iguazio-deploy Date: Tue, 4 Jun 2019 20:52:32 +0000 Subject: [PATCH 1/2] Updated TSDB to v0.9.2 --- .../mattn/go-colorable/colorable_windows.go | 208 ++- vendor/github.com/mattn/go-colorable/go.mod | 3 + vendor/github.com/mattn/go-colorable/go.sum | 4 + vendor/github.com/mattn/go-isatty/go.mod | 3 + vendor/github.com/mattn/go-isatty/go.sum | 2 + .../mattn/go-isatty/isatty_appengine.go | 15 - .../github.com/mattn/go-isatty/isatty_bsd.go | 6 + .../mattn/go-isatty/isatty_linux.go | 20 +- .../mattn/go-isatty/isatty_linux_ppc64x.go | 19 - .../mattn/go-isatty/isatty_others.go | 11 +- .../mattn/go-isatty/isatty_solaris.go | 6 + vendor/github.com/nuclio/errors/errors.go | 285 +++++ vendor/github.com/nuclio/errors/go.mod | 3 + vendor/github.com/nuclio/errors/go.sum | 0 .../v3io/v3io-go/pkg/dataplane/container.go | 127 ++ .../v3io/v3io-go/pkg/dataplane/context.go | 24 + .../v3io-go/pkg/dataplane/http/container.go | 226 ++++ .../v3io-go/pkg/dataplane/http/context.go | 1118 +++++++++++++++++ .../v3io-go/pkg/dataplane/http/headers.go | 69 + .../v3io-go/pkg/dataplane/http/session.go | 50 + .../v3io/v3io-go/pkg/dataplane/item.go | 55 + .../v3io/v3io-go/pkg/dataplane/itemscursor.go | 137 ++ .../v3io-go/pkg/dataplane/requestresponse.go | 79 ++ .../v3io/v3io-go/pkg/dataplane/session.go | 23 + .../v3io/v3io-go/pkg/dataplane/types.go | 292 +++++ .../v3io/v3io-go/pkg/errors/errors.go | 30 + .../v3io/v3io-tsdb/pkg/aggregate/aggregate.go | 16 + .../v3io/v3io-tsdb/pkg/appender/appender.go | 6 +- .../v3io/v3io-tsdb/pkg/appender/ingest.go | 5 +- .../v3io/v3io-tsdb/pkg/appender/store.go | 7 +- .../v3io/v3io-tsdb/pkg/config/config.go | 16 + .../v3io/v3io-tsdb/pkg/partmgr/partmgr.go | 291 ++++- .../v3io/v3io-tsdb/pkg/pquerier/frames.go | 27 +- .../v3io/v3io-tsdb/pkg/pquerier/querier.go | 6 +- .../v3io/v3io-tsdb/pkg/pquerier/select.go | 7 +- .../v3io/v3io-tsdb/pkg/pquerier/sql_parser.go | 37 +- .../v3io/v3io-tsdb/pkg/pquerier/types.go | 13 +- .../v3io/v3io-tsdb/pkg/querier/querier.go | 6 +- .../v3io/v3io-tsdb/pkg/querier/seriesset.go | 4 +- .../v3io/v3io-tsdb/pkg/tsdb/schema/schema.go | 2 +- .../v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go | 95 +- .../v3io/v3io-tsdb/pkg/utils/asynciter.go | 11 +- .../v3io/v3io-tsdb/pkg/utils/container.go | 49 +- .../v3io/v3io-tsdb/pkg/utils/misc.go | 4 +- .../github.com/valyala/fasthttp/.travis.yml | 2 +- vendor/github.com/valyala/fasthttp/README.md | 1 + vendor/github.com/valyala/fasthttp/args.go | 112 +- .../github.com/valyala/fasthttp/bytesconv.go | 23 +- .../valyala/fasthttp/bytesconv_32.go | 1 - .../valyala/fasthttp/bytesconv_64.go | 1 - vendor/github.com/valyala/fasthttp/client.go | 185 ++- vendor/github.com/valyala/fasthttp/cookie.go | 60 + .../fasthttputil/inmemory_listener.go | 22 +- vendor/github.com/valyala/fasthttp/fs.go | 4 +- vendor/github.com/valyala/fasthttp/header.go | 115 +- vendor/github.com/valyala/fasthttp/http.go | 64 +- vendor/github.com/valyala/fasthttp/nocopy.go | 4 +- vendor/github.com/valyala/fasthttp/server.go | 243 +++- vendor/github.com/valyala/fasthttp/strings.go | 16 +- .../github.com/valyala/fasthttp/tcpdialer.go | 251 ++-- vendor/github.com/valyala/fasthttp/timer.go | 14 +- .../github.com/valyala/fasthttp/workerpool.go | 2 +- 62 files changed, 3977 insertions(+), 560 deletions(-) create mode 100644 vendor/github.com/mattn/go-colorable/go.mod create mode 100644 vendor/github.com/mattn/go-colorable/go.sum create mode 100644 vendor/github.com/mattn/go-isatty/go.mod create mode 100644 vendor/github.com/mattn/go-isatty/go.sum delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_appengine.go delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go create mode 100644 vendor/github.com/nuclio/errors/errors.go create mode 100644 vendor/github.com/nuclio/errors/go.mod create mode 100644 vendor/github.com/nuclio/errors/go.sum create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/http/headers.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/http/session.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/item.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/itemscursor.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/requestresponse.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/session.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go create mode 100644 vendor/github.com/v3io/v3io-go/pkg/errors/errors.go diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go index e17a5474e98..404e10ca02b 100644 --- a/vendor/github.com/mattn/go-colorable/colorable_windows.go +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -29,6 +29,15 @@ const ( backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) ) +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + type wchar uint16 type short int16 type dword uint32 @@ -69,14 +78,17 @@ var ( procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") ) // Writer provide colorable Writer to the console type Writer struct { - out io.Writer - handle syscall.Handle - oldattr word - oldpos coord + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer } // NewColorable return new instance of Writer which handle escape sequence from File. @@ -407,7 +419,18 @@ func (w *Writer) Write(data []byte) (n int, err error) { var csbi consoleScreenBufferInfo procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) - er := bytes.NewReader(data) + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } var bw [1]byte loop: for { @@ -425,29 +448,55 @@ loop: break loop } - if c2 == ']' { - if err := doTitleSequence(er); err != nil { + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { break loop } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() continue - } - if c2 != 0x5b { + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: continue } + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + var buf bytes.Buffer var m byte - for { - c, err := er.ReadByte() - if err != nil { - break loop - } + for i, c := range w.rest.Bytes()[2:] { if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() break } buf.Write([]byte(string(c))) } + if m == 0 { + break loop + } switch m { case 'A': @@ -455,61 +504,64 @@ loop: if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'B': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'C': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'D': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'E': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y += short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'F': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y -= short(n) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'G': n, err = strconv.Atoi(buf.String()) if err != nil { continue } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = short(n - 1) - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'H', 'f': - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) if buf.Len() > 0 { token := strings.Split(buf.String(), ";") switch len(token) { @@ -534,7 +586,7 @@ loop: } else { csbi.cursorPosition.y = 0 } - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'J': n := 0 if buf.Len() > 0 { @@ -545,20 +597,20 @@ loop: } var count, written dword var cursor coord - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) switch n { case 0: cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} - count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) case 1: cursor = coord{x: csbi.window.left, y: csbi.window.top} - count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.window.top-csbi.cursorPosition.y)*csbi.size.x) + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) case 2: cursor = coord{x: csbi.window.left, y: csbi.window.top} - count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) } - procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'K': n := 0 if buf.Len() > 0 { @@ -567,28 +619,28 @@ loop: continue } } - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) var cursor coord var count, written dword switch n { case 0: - cursor = coord{x: csbi.cursorPosition.x + 1, y: csbi.cursorPosition.y} - count = dword(csbi.size.x - csbi.cursorPosition.x - 1) + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) case 1: - cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} count = dword(csbi.size.x - csbi.cursorPosition.x) case 2: - cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} count = dword(csbi.size.x) } - procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) - procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'm': - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) attr := csbi.attributes cs := buf.String() if cs == "" { - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) continue } token := strings.Split(cs, ";") @@ -627,6 +679,21 @@ loop: attr |= n256foreAttr[n256] i += 2 } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } } else { attr = attr & (w.oldattr & backgroundMask) } @@ -654,6 +721,21 @@ loop: attr |= n256backAttr[n256] i += 2 } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } } else { attr = attr & (w.oldattr & foregroundMask) } @@ -685,38 +767,52 @@ loop: attr |= backgroundBlue } } - procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) } } case 'h': var ci consoleCursorInfo cs := buf.String() if cs == "5>" { - procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) } else if cs == "?25" { - procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } } case 'l': var ci consoleCursorInfo cs := buf.String() if cs == "5>" { - procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 1 - procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) } else if cs == "?25" { - procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 0 - procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } } case 's': - procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) w.oldpos = csbi.cursorPosition case 'u': - procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) } } diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 00000000000..9d9f4248541 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-colorable + +require github.com/mattn/go-isatty v0.0.5 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 00000000000..2c12960ec73 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,4 @@ +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 00000000000..f310320c33f --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-isatty + +require golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 00000000000..426c8973c0e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go deleted file mode 100644 index 9584a98842e..00000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_appengine.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on on appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} - -// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 42f2514d133..07e93039dbe 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -16,3 +16,9 @@ func IsTerminal(fd uintptr) bool { _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 } + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go index 7384cf99167..e004038ee70 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_linux.go +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -1,18 +1,18 @@ // +build linux -// +build !appengine,!ppc64,!ppc64le +// +build !appengine package isatty -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS +import "golang.org/x/sys/unix" // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false } diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go b/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go deleted file mode 100644 index 44e5d213021..00000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_linux_ppc64x.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build linux -// +build ppc64 ppc64le - -package isatty - -import ( - "unsafe" - - syscall "golang.org/x/sys/unix" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index 9d8b4a59961..f02849c56f2 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,9 +1,14 @@ -// +build !windows -// +build !appengine +// +build appengine js package isatty -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 // terminal. This is also always false on this environment. func IsCygwinTerminal(fd uintptr) bool { return false diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go index 1f0c6bf53dc..bdd5c79a07f 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_solaris.go +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -14,3 +14,9 @@ func IsTerminal(fd uintptr) bool { err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) return err == nil } + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/nuclio/errors/errors.go b/vendor/github.com/nuclio/errors/errors.go new file mode 100644 index 00000000000..dc70312e108 --- /dev/null +++ b/vendor/github.com/nuclio/errors/errors.go @@ -0,0 +1,285 @@ +// Package errors provides an api similar to github.com/nuclio/nuclio/pkg/errors +// However we don't carry stack trace around for performance +// (see https://github.com/pkg/errors/issues/124) +package errors + +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error +// %+v extended format. Will print stack trace of errors + +import ( + "bytes" + "fmt" + "io" + "os" + "runtime" + "strings" +) + +var ( + // ShowLineInfo sets if we collect location information (file, line) + // (getting location information makes creating error slower ~550ns vs 2ns) + ShowLineInfo bool +) + +// Error implements error interface with call stack +type Error struct { + message string + cause error + fileName string + lineNumber int +} + +func init() { + ShowLineInfo = len(os.Getenv("NUCLIO_NO_ERROR_LINE_INFO")) == 0 +} + +// caller return the caller informatin (file, line) +// Note this is sensitive to where it's called +func caller() (string, int) { + pcs := make([]uintptr, 1) + // skip 3 levels to get to the caller + n := runtime.Callers(3, pcs) + if n == 0 { + return "", 0 + } + + pc := pcs[0] - 1 + fn := runtime.FuncForPC(pc) + if fn == nil { + return "", 0 + } + + return fn.FileLine(pc) +} + +// New returns a new error +func New(message string) error { + err := &Error{message: message} + if ShowLineInfo { + err.fileName, err.lineNumber = caller() + } + return err +} + +// Errorf returns a new Error +func Errorf(format string, args ...interface{}) error { + err := &Error{message: fmt.Sprintf(format, args...)} + if ShowLineInfo { + err.fileName, err.lineNumber = caller() + } + return err +} + +// Wrap returns a new error with err as cause, if err is nil will return nil +func Wrap(err error, message string) error { + if err == nil { + return nil + } + + errObj := &Error{ + message: message, + cause: err, + } + + if ShowLineInfo { + errObj.fileName, errObj.lineNumber = caller() + } + return errObj +} + +// Wrapf returns a new error with err as cause, if err is nil will return nil +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + + message := fmt.Sprintf(format, args...) + errObj := &Error{ + message: message, + cause: err, + } + if ShowLineInfo { + errObj.fileName, errObj.lineNumber = caller() + } + return errObj +} + +// Error is the string representation of the error +func (err *Error) Error() string { + return err.message +} + +// Cause returns the cause of the error +func (err *Error) Cause() error { + return err.cause +} + +func asError(err error) *Error { + errObj, ok := err.(*Error) + if !ok { + return nil + } + return errObj +} + +// LineInfo info returns the location (file, line) where the error was created +func (err *Error) LineInfo() (string, int) { + return err.fileName, err.lineNumber +} + +// reverse reverses a slice in place +func reverse(slice []error) { + for left, right := 0, len(slice)-1; left < right; left, right = left+1, right-1 { + slice[left], slice[right] = slice[right], slice[left] + } +} + +// GetErrorStack return stack of messges (oldest on top) +// if n == -1 returns the whole stack +func GetErrorStack(err error, depth int) []error { + errors := []error{err} + + errObj := asError(err) + if errObj == nil { + return errors + } + + for errObj = asError(errObj.cause); errObj != nil; errObj = asError(errObj.cause) { + errors = append(errors, errObj) + } + + reverse(errors) + if depth > 0 { + if depth > len(errors) { + depth = len(errors) + } + errors = errors[:depth] + } + return errors +} + +// GetErrorStackString returns the error stack as a string +func GetErrorStackString(err error, depth int) string { + buffer := bytes.Buffer{} + + PrintErrorStack(&buffer, err, depth) + + return buffer.String() +} + +// PrintErrorStack prints the error stack into out up to depth levels +// If n == 1 then prints the whole stack +func PrintErrorStack(out io.Writer, err error, depth int) { + if err == nil { + return + } + + pathLen := 40 + + stack := GetErrorStack(err, depth) + errObj := asError(stack[0]) + + if errObj != nil && errObj.lineNumber != 0 { + cause := errObj.Error() + if errObj.cause != nil { + cause = errObj.cause.Error() + } + + fmt.Fprintf(out, "\nError - %s", cause) // nolint: errcheck + fmt.Fprintf(out, "\n %s:%d\n", trimPath(errObj.fileName, pathLen), errObj.lineNumber) // nolint: errcheck + } else { + fmt.Fprintf(out, "\nError - %s", stack[0].Error()) // nolint: errcheck + } + + fmt.Fprintf(out, "\nCall stack:") // nolint: errcheck + + for _, e := range stack { + errObj := asError(e) + fmt.Fprintf(out, "\n%s", e.Error()) // nolint: errcheck + if errObj != nil && errObj.lineNumber != 0 { + fmt.Fprintf(out, "\n %s:%d", trimPath(errObj.fileName, pathLen), errObj.lineNumber) // nolint: errcheck + } + } + + out.Write([]byte{'\n'}) // nolint: errcheck +} + +// Cause is the cause of the error +func Cause(err error) error { + var cause error + + if err == nil { + return nil + } + + errAsError := asError(err) + if errAsError != nil { + cause = errAsError.cause + } + + // treat the err as simply an error + if cause == nil { + cause = err + } + + return cause +} + +// RootCause is the cause of the error +func RootCause(err error) error { + currentErr := err + for { + cause := Cause(currentErr) + + // if there's a cause go deeper + if cause == nil || cause == currentErr { + break + } + + currentErr = cause + } + + return currentErr +} + +// sumLengths return sum of lengths of strings +func sumLengths(parts []string) int { + total := 0 + for _, s := range parts { + total += len(s) + } + return total +} + +// trimPath shortens fileName to be at most size characters +func trimPath(fileName string, size int) string { + if len(fileName) <= size { + return fileName + } + + // We'd like to cut at directory boundary + parts := strings.Split(fileName, "/") + for sumLengths(parts) > size && len(parts) > 1 { + parts = parts[1:] + } + + return ".../" + strings.Join(parts, "/") +} + +// Format formats an error +func (err *Error) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + PrintErrorStack(s, err, -1) + } + fallthrough + case 's': + fmt.Fprintf(s, err.Error()) // nolint: errcheck + case 'q': + fmt.Fprintf(s, "%q", err.Error()) // nolint: errcheck + } +} diff --git a/vendor/github.com/nuclio/errors/go.mod b/vendor/github.com/nuclio/errors/go.mod new file mode 100644 index 00000000000..c242fb09f86 --- /dev/null +++ b/vendor/github.com/nuclio/errors/go.mod @@ -0,0 +1,3 @@ +module github.com/nuclio/errors + +go 1.12 diff --git a/vendor/github.com/nuclio/errors/go.sum b/vendor/github.com/nuclio/errors/go.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go new file mode 100644 index 00000000000..05157edfb04 --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/container.go @@ -0,0 +1,127 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +// A container interface allows perform actions against a container +type Container interface { + + // + // Container + // + + // GetContainers + GetContainers(*GetContainersInput, interface{}, chan *Response) (*Request, error) + + // GetContainersSync + GetContainersSync(*GetContainersInput) (*Response, error) + + // GetContainers + GetContainerContents(*GetContainerContentsInput, interface{}, chan *Response) (*Request, error) + + // GetContainerContentsSync + GetContainerContentsSync(*GetContainerContentsInput) (*Response, error) + + // + // Object + // + + // GetObject + GetObject(*GetObjectInput, interface{}, chan *Response) (*Request, error) + + // GetObjectSync + GetObjectSync(*GetObjectInput) (*Response, error) + + // PutObject + PutObject(*PutObjectInput, interface{}, chan *Response) (*Request, error) + + // PutObjectSync + PutObjectSync(*PutObjectInput) error + + // DeleteObject + DeleteObject(*DeleteObjectInput, interface{}, chan *Response) (*Request, error) + + // DeleteObjectSync + DeleteObjectSync(*DeleteObjectInput) error + + // + // KV + // + + // GetItem + GetItem(*GetItemInput, interface{}, chan *Response) (*Request, error) + + // GetItemSync + GetItemSync(*GetItemInput) (*Response, error) + + // GetItems + GetItems(*GetItemsInput, interface{}, chan *Response) (*Request, error) + + // GetItemSync + GetItemsSync(*GetItemsInput) (*Response, error) + + // PutItem + PutItem(*PutItemInput, interface{}, chan *Response) (*Request, error) + + // PutItemSync + PutItemSync(*PutItemInput) error + + // PutItems + PutItems(*PutItemsInput, interface{}, chan *Response) (*Request, error) + + // PutItemsSync + PutItemsSync(*PutItemsInput) (*Response, error) + + // UpdateItem + UpdateItem(*UpdateItemInput, interface{}, chan *Response) (*Request, error) + + // UpdateItemSync + UpdateItemSync(*UpdateItemInput) error + + // + // Stream + // + + // CreateStream + CreateStream(*CreateStreamInput, interface{}, chan *Response) (*Request, error) + + // CreateStreamSync + CreateStreamSync(*CreateStreamInput) error + + // DeleteStream + DeleteStream(*DeleteStreamInput, interface{}, chan *Response) (*Request, error) + + // DeleteStreamSync + DeleteStreamSync(*DeleteStreamInput) error + + // SeekShard + SeekShard(*SeekShardInput, interface{}, chan *Response) (*Request, error) + + // SeekShardSync + SeekShardSync(*SeekShardInput) (*Response, error) + + // PutRecords + PutRecords(*PutRecordsInput, interface{}, chan *Response) (*Request, error) + + // PutRecordsSync + PutRecordsSync(*PutRecordsInput) (*Response, error) + + // GetRecords + GetRecords(*GetRecordsInput, interface{}, chan *Response) (*Request, error) + + // GetRecordsSync + GetRecordsSync(*GetRecordsInput) (*Response, error) +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go new file mode 100644 index 00000000000..5a9455df838 --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/context.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +type Context interface { + Container + + // create a new session + NewSession(*NewSessionInput) (Session, error) +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go new file mode 100644 index 00000000000..370a39dff59 --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/container.go @@ -0,0 +1,226 @@ +package v3iohttp + +import ( + "github.com/v3io/v3io-go/pkg/dataplane" + + "github.com/nuclio/logger" +) + +type container struct { + logger logger.Logger + session *session + containerName string +} + +func newContainer(parentLogger logger.Logger, + session *session, + containerName string) (v3io.Container, error) { + + return &container{ + logger: parentLogger.GetChild("container"), + session: session, + containerName: containerName, + }, nil +} + +func (c *container) populateInputFields(input *v3io.DataPlaneInput) { + input.ContainerName = c.containerName + input.AuthenticationToken = c.session.authenticationToken + input.AccessKey = c.session.accessKey +} + +// GetItem +func (c *container) GetItem(getItemInput *v3io.GetItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getItemInput.DataPlaneInput) + return c.session.context.GetItem(getItemInput, context, responseChan) +} + +// GetItemSync +func (c *container) GetItemSync(getItemInput *v3io.GetItemInput) (*v3io.Response, error) { + c.populateInputFields(&getItemInput.DataPlaneInput) + return c.session.context.GetItemSync(getItemInput) +} + +// GetItems +func (c *container) GetItems(getItemsInput *v3io.GetItemsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getItemsInput.DataPlaneInput) + return c.session.context.GetItems(getItemsInput, context, responseChan) +} + +// GetItemSync +func (c *container) GetItemsSync(getItemsInput *v3io.GetItemsInput) (*v3io.Response, error) { + c.populateInputFields(&getItemsInput.DataPlaneInput) + return c.session.context.GetItemsSync(getItemsInput) +} + +// PutItem +func (c *container) PutItem(putItemInput *v3io.PutItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&putItemInput.DataPlaneInput) + return c.session.context.PutItem(putItemInput, context, responseChan) +} + +// PutItemSync +func (c *container) PutItemSync(putItemInput *v3io.PutItemInput) error { + c.populateInputFields(&putItemInput.DataPlaneInput) + return c.session.context.PutItemSync(putItemInput) +} + +// PutItems +func (c *container) PutItems(putItemsInput *v3io.PutItemsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&putItemsInput.DataPlaneInput) + return c.session.context.PutItems(putItemsInput, context, responseChan) +} + +// PutItemsSync +func (c *container) PutItemsSync(putItemsInput *v3io.PutItemsInput) (*v3io.Response, error) { + c.populateInputFields(&putItemsInput.DataPlaneInput) + return c.session.context.PutItemsSync(putItemsInput) +} + +// UpdateItem +func (c *container) UpdateItem(updateItemInput *v3io.UpdateItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&updateItemInput.DataPlaneInput) + return c.session.context.UpdateItem(updateItemInput, context, responseChan) +} + +// UpdateItemSync +func (c *container) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) error { + c.populateInputFields(&updateItemInput.DataPlaneInput) + return c.session.context.UpdateItemSync(updateItemInput) +} + +// GetObject +func (c *container) GetObject(getObjectInput *v3io.GetObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getObjectInput.DataPlaneInput) + return c.session.context.GetObject(getObjectInput, context, responseChan) +} + +// GetObjectSync +func (c *container) GetObjectSync(getObjectInput *v3io.GetObjectInput) (*v3io.Response, error) { + c.populateInputFields(&getObjectInput.DataPlaneInput) + return c.session.context.GetObjectSync(getObjectInput) +} + +// PutObject +func (c *container) PutObject(putObjectInput *v3io.PutObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&putObjectInput.DataPlaneInput) + return c.session.context.PutObject(putObjectInput, context, responseChan) +} + +// PutObjectSync +func (c *container) PutObjectSync(putObjectInput *v3io.PutObjectInput) error { + c.populateInputFields(&putObjectInput.DataPlaneInput) + return c.session.context.PutObjectSync(putObjectInput) +} + +// DeleteObject +func (c *container) DeleteObject(deleteObjectInput *v3io.DeleteObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&deleteObjectInput.DataPlaneInput) + return c.session.context.DeleteObject(deleteObjectInput, context, responseChan) +} + +// DeleteObjectSync +func (c *container) DeleteObjectSync(deleteObjectInput *v3io.DeleteObjectInput) error { + c.populateInputFields(&deleteObjectInput.DataPlaneInput) + return c.session.context.DeleteObjectSync(deleteObjectInput) +} + +// GetContainers +func (c *container) GetContainers(getContainersInput *v3io.GetContainersInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getContainersInput.DataPlaneInput) + return c.session.context.GetContainers(getContainersInput, context, responseChan) +} + +// GetContainersSync +func (c *container) GetContainersSync(getContainersInput *v3io.GetContainersInput) (*v3io.Response, error) { + c.populateInputFields(&getContainersInput.DataPlaneInput) + return c.session.context.GetContainersSync(getContainersInput) +} + +// GetContainers +func (c *container) GetContainerContents(getContainerContentsInput *v3io.GetContainerContentsInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getContainerContentsInput.DataPlaneInput) + return c.session.context.GetContainerContents(getContainerContentsInput, context, responseChan) +} + +// GetContainerContentsSync +func (c *container) GetContainerContentsSync(getContainerContentsInput *v3io.GetContainerContentsInput) (*v3io.Response, error) { + c.populateInputFields(&getContainerContentsInput.DataPlaneInput) + return c.session.context.GetContainerContentsSync(getContainerContentsInput) +} + +// CreateStream +func (c *container) CreateStream(createStreamInput *v3io.CreateStreamInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&createStreamInput.DataPlaneInput) + return c.session.context.CreateStream(createStreamInput, context, responseChan) +} + +// CreateStreamSync +func (c *container) CreateStreamSync(createStreamInput *v3io.CreateStreamInput) error { + c.populateInputFields(&createStreamInput.DataPlaneInput) + return c.session.context.CreateStreamSync(createStreamInput) +} + +// DeleteStream +func (c *container) DeleteStream(deleteStreamInput *v3io.DeleteStreamInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&deleteStreamInput.DataPlaneInput) + return c.session.context.DeleteStream(deleteStreamInput, context, responseChan) +} + +// DeleteStreamSync +func (c *container) DeleteStreamSync(deleteStreamInput *v3io.DeleteStreamInput) error { + c.populateInputFields(&deleteStreamInput.DataPlaneInput) + return c.session.context.DeleteStreamSync(deleteStreamInput) +} + +// SeekShard +func (c *container) SeekShard(seekShardInput *v3io.SeekShardInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&seekShardInput.DataPlaneInput) + return c.session.context.SeekShard(seekShardInput, context, responseChan) +} + +// SeekShardSync +func (c *container) SeekShardSync(seekShardInput *v3io.SeekShardInput) (*v3io.Response, error) { + c.populateInputFields(&seekShardInput.DataPlaneInput) + return c.session.context.SeekShardSync(seekShardInput) +} + +// PutRecords +func (c *container) PutRecords(putRecordsInput *v3io.PutRecordsInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&putRecordsInput.DataPlaneInput) + return c.session.context.PutRecords(putRecordsInput, context, responseChan) +} + +// PutRecordsSync +func (c *container) PutRecordsSync(putRecordsInput *v3io.PutRecordsInput) (*v3io.Response, error) { + c.populateInputFields(&putRecordsInput.DataPlaneInput) + return c.session.context.PutRecordsSync(putRecordsInput) +} + +// GetRecords +func (c *container) GetRecords(getRecordsInput *v3io.GetRecordsInput, context interface{}, responseChan chan *v3io.Response) (*v3io.Request, error) { + c.populateInputFields(&getRecordsInput.DataPlaneInput) + return c.session.context.GetRecords(getRecordsInput, context, responseChan) +} + +// GetRecordsSync +func (c *container) GetRecordsSync(getRecordsInput *v3io.GetRecordsInput) (*v3io.Response, error) { + c.populateInputFields(&getRecordsInput.DataPlaneInput) + return c.session.context.GetRecordsSync(getRecordsInput) +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go new file mode 100644 index 00000000000..69c980ca92e --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/context.go @@ -0,0 +1,1118 @@ +package v3iohttp + +import ( + "bytes" + "crypto/tls" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "net" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-go/pkg/errors" + + "github.com/nuclio/errors" + "github.com/nuclio/logger" + "github.com/valyala/fasthttp" +) + +// TODO: Request should have a global pool +var requestID uint64 + +type context struct { + logger logger.Logger + requestChan chan *v3io.Request + httpClient *fasthttp.HostClient + clusterEndpoints []string + numWorkers int +} + +func NewContext(parentLogger logger.Logger, newContextInput *v3io.NewContextInput) (v3io.Context, error) { + var hosts []string + var httpEndpointFound, httpsEndpointFound bool + + if len(newContextInput.ClusterEndpoints) == 0 { + return nil, errors.New("Zero cluster endpoints provided") + } + + // iterate over endpoints which contain scheme + for _, clusterEndpoint := range newContextInput.ClusterEndpoints { + + // Return a clearer error if an empty cluster endpoint is provided. + if clusterEndpoint == "" { + return nil, errors.New("Cluster endpoint may not be empty") + } + + parsedClusterEndpoint, err := url.Parse(clusterEndpoint) + if err != nil { + return nil, err + } + switch parsedClusterEndpoint.Scheme { + case "http": + httpEndpointFound = true + case "https": + httpsEndpointFound = true + default: + return nil, errors.Errorf("Unsupported endpoint scheme: %s", parsedClusterEndpoint.Scheme) + } + hosts = append(hosts, parsedClusterEndpoint.Host) + } + + if httpEndpointFound && httpsEndpointFound { + return nil, errors.New("Cannot create a context with a mix of HTTP and HTTPS endpoints.") + } + + requestChanLen := newContextInput.RequestChanLen + if requestChanLen == 0 { + requestChanLen = 1024 + } + + numWorkers := newContextInput.NumWorkers + if numWorkers == 0 { + numWorkers = 8 + } + + tlsConfig := newContextInput.TlsConfig + if tlsConfig == nil { + tlsConfig = &tls.Config{InsecureSkipVerify: true} + } + + dialTimeout := newContextInput.DialTimeout + if dialTimeout == 0 { + dialTimeout = fasthttp.DefaultDialTimeout + } + dialFunction := func(addr string) (net.Conn, error) { + return fasthttp.DialTimeout(addMissingPort(addr, httpsEndpointFound), dialTimeout) + } + newContext := &context{ + logger: parentLogger.GetChild("context.http"), + httpClient: &fasthttp.HostClient{ + Addr: strings.Join(hosts, ","), + IsTLS: httpsEndpointFound, + TLSConfig: tlsConfig, + Dial: dialFunction, + }, + clusterEndpoints: newContextInput.ClusterEndpoints, + requestChan: make(chan *v3io.Request, requestChanLen), + numWorkers: numWorkers, + } + + for workerIndex := 0; workerIndex < numWorkers; workerIndex++ { + go newContext.workerEntry(workerIndex) + } + + return newContext, nil +} + +// Code from fasthttp library https://github.com/valyala/fasthttp/blob/ea427d2f448aa8abc0b139f638e80184d4b23d9d/client.go#L1596 +// For some reason, this code is skipped when a custom dial function is provided. +func addMissingPort(addr string, isTLS bool) string { + n := strings.Index(addr, ":") + if n >= 0 { + return addr + } + port := 80 + if isTLS { + port = 443 + } + return fmt.Sprintf("%s:%d", addr, port) +} + +// create a new session +func (c *context) NewSession(newSessionInput *v3io.NewSessionInput) (v3io.Session, error) { + return newSession(c.logger, + c, + newSessionInput.Username, + newSessionInput.Password, + newSessionInput.AccessKey) +} + +// GetContainers +func (c *context) GetContainers(getContainersInput *v3io.GetContainersInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getContainersInput, context, responseChan) +} + +// GetContainersSync +func (c *context) GetContainersSync(getContainersInput *v3io.GetContainersInput) (*v3io.Response, error) { + return c.sendRequestAndXMLUnmarshal( + &getContainersInput.DataPlaneInput, + http.MethodGet, + "", + "", + nil, + nil, + &v3io.GetContainersOutput{}) +} + +// GetContainers +func (c *context) GetContainerContents(getContainerContentsInput *v3io.GetContainerContentsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getContainerContentsInput, context, responseChan) +} + +// GetContainerContentsSync +func (c *context) GetContainerContentsSync(getContainerContentsInput *v3io.GetContainerContentsInput) (*v3io.Response, error) { + getContainerContentOutput := v3io.GetContainerContentsOutput{} + + query := "" + if getContainerContentsInput.Path != "" { + query += "prefix=" + getContainerContentsInput.Path + } + + return c.sendRequestAndXMLUnmarshal(&getContainerContentsInput.DataPlaneInput, + http.MethodGet, + "", + query, + nil, + nil, + &getContainerContentOutput) +} + +// GetItem +func (c *context) GetItem(getItemInput *v3io.GetItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getItemInput, context, responseChan) +} + +// GetItemSync +func (c *context) GetItemSync(getItemInput *v3io.GetItemInput) (*v3io.Response, error) { + + // no need to marshal, just sprintf + body := fmt.Sprintf(`{"AttributesToGet": "%s"}`, strings.Join(getItemInput.AttributeNames, ",")) + + response, err := c.sendRequest(&getItemInput.DataPlaneInput, + http.MethodPut, + getItemInput.Path, + "", + getItemHeaders, + []byte(body), + false) + + if err != nil { + return nil, err + } + + // ad hoc structure that contains response + item := struct { + Item map[string]map[string]interface{} + }{} + + c.logger.DebugWithCtx(getItemInput.Ctx, "Body", "body", string(response.Body())) + + // unmarshal the body + err = json.Unmarshal(response.Body(), &item) + if err != nil { + return nil, err + } + + // decode the response + attributes, err := c.decodeTypedAttributes(item.Item) + if err != nil { + return nil, err + } + + // attach the output to the response + response.Output = &v3io.GetItemOutput{Item: attributes} + + return response, nil +} + +// GetItems +func (c *context) GetItems(getItemsInput *v3io.GetItemsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getItemsInput, context, responseChan) +} + +// GetItemSync +func (c *context) GetItemsSync(getItemsInput *v3io.GetItemsInput) (*v3io.Response, error) { + + // create GetItem Body + body := map[string]interface{}{ + "AttributesToGet": strings.Join(getItemsInput.AttributeNames, ","), + } + + if getItemsInput.Filter != "" { + body["FilterExpression"] = getItemsInput.Filter + } + + if getItemsInput.Marker != "" { + body["Marker"] = getItemsInput.Marker + } + + if getItemsInput.ShardingKey != "" { + body["ShardingKey"] = getItemsInput.ShardingKey + } + + if getItemsInput.Limit != 0 { + body["Limit"] = getItemsInput.Limit + } + + if getItemsInput.TotalSegments != 0 { + body["TotalSegment"] = getItemsInput.TotalSegments + body["Segment"] = getItemsInput.Segment + } + + if getItemsInput.SortKeyRangeStart != "" { + body["SortKeyRangeStart"] = getItemsInput.SortKeyRangeStart + } + + if getItemsInput.SortKeyRangeEnd != "" { + body["SortKeyRangeEnd"] = getItemsInput.SortKeyRangeEnd + } + + marshalledBody, err := json.Marshal(body) + if err != nil { + return nil, err + } + + response, err := c.sendRequest(&getItemsInput.DataPlaneInput, + "PUT", + getItemsInput.Path, + "", + getItemsHeaders, + marshalledBody, + false) + + if err != nil { + return nil, err + } + + c.logger.DebugWithCtx(getItemsInput.Ctx, "Body", "body", string(response.Body())) + + getItemsResponse := struct { + Items []map[string]map[string]interface{} + NextMarker string + LastItemIncluded string + }{} + + // unmarshal the body into an ad hoc structure + err = json.Unmarshal(response.Body(), &getItemsResponse) + if err != nil { + return nil, err + } + + //validate getItems response to avoid infinite loop + if getItemsResponse.LastItemIncluded != "TRUE" && (getItemsResponse.NextMarker == "" || getItemsResponse.NextMarker == getItemsInput.Marker) { + errMsg := fmt.Sprintf("Invalid getItems response: lastItemIncluded=false and nextMarker='%s', "+ + "startMarker='%s', probably due to object size bigger than 2M. Query is: %+v", getItemsResponse.NextMarker, getItemsInput.Marker, getItemsInput) + c.logger.Warn(errMsg) + } + + getItemsOutput := v3io.GetItemsOutput{ + NextMarker: getItemsResponse.NextMarker, + Last: getItemsResponse.LastItemIncluded == "TRUE", + } + + // iterate through the items and decode them + for _, typedItem := range getItemsResponse.Items { + + item, err := c.decodeTypedAttributes(typedItem) + if err != nil { + return nil, err + } + + getItemsOutput.Items = append(getItemsOutput.Items, item) + } + + // attach the output to the response + response.Output = &getItemsOutput + + return response, nil +} + +// PutItem +func (c *context) PutItem(putItemInput *v3io.PutItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(putItemInput, context, responseChan) +} + +// PutItemSync +func (c *context) PutItemSync(putItemInput *v3io.PutItemInput) error { + + // prepare the query path + _, err := c.putItem(&putItemInput.DataPlaneInput, + putItemInput.Path, + putItemFunctionName, + putItemInput.Attributes, + putItemInput.Condition, + putItemHeaders, + nil) + + return err +} + +// PutItems +func (c *context) PutItems(putItemsInput *v3io.PutItemsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(putItemsInput, context, responseChan) +} + +// PutItemsSync +func (c *context) PutItemsSync(putItemsInput *v3io.PutItemsInput) (*v3io.Response, error) { + + response := c.allocateResponse() + if response == nil { + return nil, errors.New("Failed to allocate response") + } + + putItemsOutput := v3io.PutItemsOutput{ + Success: true, + } + + for itemKey, itemAttributes := range putItemsInput.Items { + + // try to post the item + _, err := c.putItem(&putItemsInput.DataPlaneInput, + putItemsInput.Path+"/"+itemKey, + putItemFunctionName, + itemAttributes, + putItemsInput.Condition, + putItemHeaders, + nil) + + // if there was an error, shove it to the list of errors + if err != nil { + + // create the map to hold the errors since at least one exists + if putItemsOutput.Errors == nil { + putItemsOutput.Errors = map[string]error{} + } + + putItemsOutput.Errors[itemKey] = err + + // clear success, since at least one error exists + putItemsOutput.Success = false + } + } + + response.Output = &putItemsOutput + + return response, nil +} + +// UpdateItem +func (c *context) UpdateItem(updateItemInput *v3io.UpdateItemInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(updateItemInput, context, responseChan) +} + +// UpdateItemSync +func (c *context) UpdateItemSync(updateItemInput *v3io.UpdateItemInput) error { + var err error + + if updateItemInput.Attributes != nil { + + // specify update mode as part of body. "Items" will be injected + body := map[string]interface{}{ + "UpdateMode": "CreateOrReplaceAttributes", + } + + _, err = c.putItem(&updateItemInput.DataPlaneInput, + updateItemInput.Path, + putItemFunctionName, + updateItemInput.Attributes, + updateItemInput.Condition, + putItemHeaders, + body) + + } else if updateItemInput.Expression != nil { + + _, err = c.updateItemWithExpression(&updateItemInput.DataPlaneInput, + updateItemInput.Path, + updateItemFunctionName, + *updateItemInput.Expression, + updateItemInput.Condition, + updateItemHeaders) + } + + return err +} + +// GetObject +func (c *context) GetObject(getObjectInput *v3io.GetObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getObjectInput, context, responseChan) +} + +// GetObjectSync +func (c *context) GetObjectSync(getObjectInput *v3io.GetObjectInput) (*v3io.Response, error) { + return c.sendRequest(&getObjectInput.DataPlaneInput, + http.MethodGet, + getObjectInput.Path, + "", + nil, + nil, + false) +} + +// PutObject +func (c *context) PutObject(putObjectInput *v3io.PutObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(putObjectInput, context, responseChan) +} + +// PutObjectSync +func (c *context) PutObjectSync(putObjectInput *v3io.PutObjectInput) error { + _, err := c.sendRequest(&putObjectInput.DataPlaneInput, + http.MethodPut, + putObjectInput.Path, + "", + nil, + putObjectInput.Body, + true) + + return err +} + +// DeleteObject +func (c *context) DeleteObject(deleteObjectInput *v3io.DeleteObjectInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(deleteObjectInput, context, responseChan) +} + +// DeleteObjectSync +func (c *context) DeleteObjectSync(deleteObjectInput *v3io.DeleteObjectInput) error { + _, err := c.sendRequest(&deleteObjectInput.DataPlaneInput, + http.MethodDelete, + deleteObjectInput.Path, + "", + nil, + nil, + true) + + return err +} + +// CreateStream +func (c *context) CreateStream(createStreamInput *v3io.CreateStreamInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(createStreamInput, context, responseChan) +} + +// CreateStreamSync +func (c *context) CreateStreamSync(createStreamInput *v3io.CreateStreamInput) error { + body := fmt.Sprintf(`{"ShardCount": %d, "RetentionPeriodHours": %d}`, + createStreamInput.ShardCount, + createStreamInput.RetentionPeriodHours) + + _, err := c.sendRequest(&createStreamInput.DataPlaneInput, + http.MethodPost, + createStreamInput.Path, + "", + createStreamHeaders, + []byte(body), + true) + + return err +} + +// DeleteStream +func (c *context) DeleteStream(deleteStreamInput *v3io.DeleteStreamInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(deleteStreamInput, context, responseChan) +} + +// DeleteStreamSync +func (c *context) DeleteStreamSync(deleteStreamInput *v3io.DeleteStreamInput) error { + + // get all shards in the stream + response, err := c.GetContainerContentsSync(&v3io.GetContainerContentsInput{ + DataPlaneInput: deleteStreamInput.DataPlaneInput, + Path: deleteStreamInput.Path, + }) + + if err != nil { + return err + } + + defer response.Release() + + // delete the shards one by one + // TODO: paralellize + for _, content := range response.Output.(*v3io.GetContainerContentsOutput).Contents { + + // TODO: handle error - stop deleting? return multiple errors? + c.DeleteObjectSync(&v3io.DeleteObjectInput{ // nolint: errcheck + DataPlaneInput: deleteStreamInput.DataPlaneInput, + Path: "/" + content.Key, + }) + } + + // delete the actual stream + return c.DeleteObjectSync(&v3io.DeleteObjectInput{ + DataPlaneInput: deleteStreamInput.DataPlaneInput, + Path: "/" + path.Dir(deleteStreamInput.Path) + "/", + }) +} + +// SeekShard +func (c *context) SeekShard(seekShardInput *v3io.SeekShardInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(seekShardInput, context, responseChan) +} + +// SeekShardSync +func (c *context) SeekShardSync(seekShardInput *v3io.SeekShardInput) (*v3io.Response, error) { + var buffer bytes.Buffer + + buffer.WriteString(`{"Type": "`) + buffer.WriteString(seekShardsInputTypeToString[seekShardInput.Type]) + buffer.WriteString(`"`) + + if seekShardInput.Type == v3io.SeekShardInputTypeSequence { + buffer.WriteString(`, "StartingSequenceNumber": `) + buffer.WriteString(strconv.Itoa(seekShardInput.StartingSequenceNumber)) + } else if seekShardInput.Type == v3io.SeekShardInputTypeTime { + buffer.WriteString(`, "TimestampSec": `) + buffer.WriteString(strconv.Itoa(seekShardInput.Timestamp)) + buffer.WriteString(`, "TimestampNSec": 0`) + } + + buffer.WriteString(`}`) + + response, err := c.sendRequest(&seekShardInput.DataPlaneInput, + http.MethodPut, + seekShardInput.Path, + "", + seekShardsHeaders, + buffer.Bytes(), + false) + if err != nil { + return nil, err + } + + seekShardOutput := v3io.SeekShardOutput{} + + // unmarshal the body into an ad hoc structure + err = json.Unmarshal(response.Body(), &seekShardOutput) + if err != nil { + return nil, err + } + + // set the output in the response + response.Output = &seekShardOutput + + return response, nil +} + +// PutRecords +func (c *context) PutRecords(putRecordsInput *v3io.PutRecordsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(putRecordsInput, context, responseChan) +} + +// PutRecordsSync +func (c *context) PutRecordsSync(putRecordsInput *v3io.PutRecordsInput) (*v3io.Response, error) { + + // TODO: set this to an initial size through heuristics? + // This function encodes manually + var buffer bytes.Buffer + + buffer.WriteString(`{"Records": [`) + + for recordIdx, record := range putRecordsInput.Records { + buffer.WriteString(`{"Data": "`) + buffer.WriteString(base64.StdEncoding.EncodeToString(record.Data)) + buffer.WriteString(`"`) + + if record.ClientInfo != nil { + buffer.WriteString(`,"ClientInfo": "`) + buffer.WriteString(base64.StdEncoding.EncodeToString(record.ClientInfo)) + buffer.WriteString(`"`) + } + + if record.ShardID != nil { + buffer.WriteString(`, "ShardId": `) + buffer.WriteString(strconv.Itoa(*record.ShardID)) + } + + if record.PartitionKey != "" { + buffer.WriteString(`, "PartitionKey": `) + buffer.WriteString(`"` + record.PartitionKey + `"`) + } + + // add comma if not last + if recordIdx != len(putRecordsInput.Records)-1 { + buffer.WriteString(`}, `) + } else { + buffer.WriteString(`}`) + } + } + + buffer.WriteString(`]}`) + str := buffer.String() + fmt.Println(str) + + response, err := c.sendRequest(&putRecordsInput.DataPlaneInput, + http.MethodPost, + putRecordsInput.Path, + "", + putRecordsHeaders, + buffer.Bytes(), + false) + if err != nil { + return nil, err + } + + putRecordsOutput := v3io.PutRecordsOutput{} + + // unmarshal the body into an ad hoc structure + err = json.Unmarshal(response.Body(), &putRecordsOutput) + if err != nil { + return nil, err + } + + // set the output in the response + response.Output = &putRecordsOutput + + return response, nil +} + +// GetRecords +func (c *context) GetRecords(getRecordsInput *v3io.GetRecordsInput, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + return c.sendRequestToWorker(getRecordsInput, context, responseChan) +} + +// GetRecordsSync +func (c *context) GetRecordsSync(getRecordsInput *v3io.GetRecordsInput) (*v3io.Response, error) { + body := fmt.Sprintf(`{"Location": "%s", "Limit": %d}`, + getRecordsInput.Location, + getRecordsInput.Limit) + + response, err := c.sendRequest(&getRecordsInput.DataPlaneInput, + http.MethodPut, + getRecordsInput.Path, + "", + getRecordsHeaders, + []byte(body), + false) + if err != nil { + return nil, err + } + + getRecordsOutput := v3io.GetRecordsOutput{} + + // unmarshal the body into an ad hoc structure + err = json.Unmarshal(response.Body(), &getRecordsOutput) + if err != nil { + return nil, err + } + + // set the output in the response + response.Output = &getRecordsOutput + + return response, nil +} + +func (c *context) putItem(dataPlaneInput *v3io.DataPlaneInput, + path string, + functionName string, + attributes map[string]interface{}, + condition string, + headers map[string]string, + body map[string]interface{}) (*v3io.Response, error) { + + // iterate over all attributes and encode them with their types + typedAttributes, err := c.encodeTypedAttributes(attributes) + if err != nil { + return nil, err + } + + // create an empty body if the user didn't pass anything + if body == nil { + body = map[string]interface{}{} + } + + // set item in body (use what the user passed as a base) + body["Item"] = typedAttributes + + if condition != "" { + body["ConditionExpression"] = condition + } + + jsonEncodedBodyContents, err := json.Marshal(body) + if err != nil { + return nil, err + } + + return c.sendRequest(dataPlaneInput, + http.MethodPut, + path, + "", + headers, + jsonEncodedBodyContents, + false) +} + +func (c *context) updateItemWithExpression(dataPlaneInput *v3io.DataPlaneInput, + path string, + functionName string, + expression string, + condition string, + headers map[string]string) (*v3io.Response, error) { + + body := map[string]interface{}{ + "UpdateExpression": expression, + "UpdateMode": "CreateOrReplaceAttributes", + } + + if condition != "" { + body["ConditionExpression"] = condition + } + + jsonEncodedBodyContents, err := json.Marshal(body) + if err != nil { + return nil, err + } + + return c.sendRequest(dataPlaneInput, + http.MethodPost, + path, + "", + headers, + jsonEncodedBodyContents, + false) +} + +func (c *context) sendRequestAndXMLUnmarshal(dataPlaneInput *v3io.DataPlaneInput, + method string, + path string, + query string, + headers map[string]string, + body []byte, + output interface{}) (*v3io.Response, error) { + + response, err := c.sendRequest(dataPlaneInput, method, path, query, headers, body, false) + if err != nil { + return nil, err + } + + // unmarshal the body into the output + err = xml.Unmarshal(response.Body(), output) + if err != nil { + response.Release() + + return nil, err + } + + // set output in response + response.Output = output + + return response, nil +} + +func (c *context) sendRequest(dataPlaneInput *v3io.DataPlaneInput, + method string, + path string, + query string, + headers map[string]string, + body []byte, + releaseResponse bool) (*v3io.Response, error) { + + var success bool + var statusCode int + var err error + + if dataPlaneInput.ContainerName == "" { + return nil, errors.New("ContainerName must not be empty") + } + + request := fasthttp.AcquireRequest() + response := c.allocateResponse() + + uri, err := c.buildRequestURI(dataPlaneInput.ContainerName, query, path) + if err != nil { + return nil, err + } + uriStr := uri.String() + + // init request + request.SetRequestURI(uriStr) + request.Header.SetMethod(method) + request.SetBody(body) + + // check if we need to an an authorization header + if len(dataPlaneInput.AuthenticationToken) > 0 { + request.Header.Set("Authorization", dataPlaneInput.AuthenticationToken) + } + + if len(dataPlaneInput.AccessKey) > 0 { + request.Header.Set("X-v3io-session-key", dataPlaneInput.AccessKey) + } + + for headerName, headerValue := range headers { + request.Header.Add(headerName, headerValue) + } + + c.logger.DebugWithCtx(dataPlaneInput.Ctx, + "Tx", + "uri", uriStr, + "method", method, + "body", string(request.Body())) + + if dataPlaneInput.Timeout <= 0 { + err = c.httpClient.Do(request, response.HTTPResponse) + } else { + err = c.httpClient.DoTimeout(request, response.HTTPResponse, dataPlaneInput.Timeout) + } + + if err != nil { + goto cleanup + } + + statusCode = response.HTTPResponse.StatusCode() + + c.logger.DebugWithCtx(dataPlaneInput.Ctx, + "Rx", + "statusCode", statusCode, + "body", string(response.HTTPResponse.Body())) + + // did we get a 2xx response? + success = statusCode >= 200 && statusCode < 300 + + // make sure we got expected status + if !success { + err = v3ioerrors.NewErrorWithStatusCode(fmt.Errorf("Failed %s with status %d", method, statusCode), statusCode) + goto cleanup + } + +cleanup: + + // we're done with the request - the response must be released by the user + // unless there's an error + fasthttp.ReleaseRequest(request) + + if err != nil { + response.Release() + return nil, err + } + + // if the user doesn't need the response, release it + if releaseResponse { + response.Release() + return nil, nil + } + + return response, nil +} + +func (c *context) buildRequestURI(containerName string, query string, pathStr string) (*url.URL, error) { + uri, err := url.Parse(c.clusterEndpoints[0]) + if err != nil { + return nil, errors.Wrapf(err, "Failed to parse cluster endpoint URL %s", c.clusterEndpoints[0]) + } + uri.Path = path.Clean(path.Join("/", containerName, pathStr)) + if strings.HasSuffix(pathStr, "/") { + uri.Path += "/" // retain trailing slash + } + uri.RawQuery = query + return uri, nil +} + +func (c *context) allocateResponse() *v3io.Response { + return &v3io.Response{ + HTTPResponse: fasthttp.AcquireResponse(), + } +} + +// {"age": 30, "name": "foo"} -> {"age": {"N": 30}, "name": {"S": "foo"}} +func (c *context) encodeTypedAttributes(attributes map[string]interface{}) (map[string]map[string]interface{}, error) { + typedAttributes := make(map[string]map[string]interface{}) + + for attributeName, attributeValue := range attributes { + typedAttributes[attributeName] = make(map[string]interface{}) + switch value := attributeValue.(type) { + default: + return nil, fmt.Errorf("Unexpected attribute type for %s: %T", attributeName, reflect.TypeOf(attributeValue)) + case int: + typedAttributes[attributeName]["N"] = strconv.Itoa(value) + case int64: + typedAttributes[attributeName]["N"] = strconv.FormatInt(value, 10) + // this is a tmp bypass to the fact Go maps Json numbers to float64 + case float64: + typedAttributes[attributeName]["N"] = strconv.FormatFloat(value, 'E', -1, 64) + case string: + typedAttributes[attributeName]["S"] = value + case []byte: + typedAttributes[attributeName]["B"] = base64.StdEncoding.EncodeToString(value) + case bool: + typedAttributes[attributeName]["BOOL"] = value + } + } + + return typedAttributes, nil +} + +// {"age": {"N": 30}, "name": {"S": "foo"}} -> {"age": 30, "name": "foo"} +func (c *context) decodeTypedAttributes(typedAttributes map[string]map[string]interface{}) (map[string]interface{}, error) { + var err error + attributes := map[string]interface{}{} + + for attributeName, typedAttributeValue := range typedAttributes { + + typeError := func(attributeName string, attributeType string, value interface{}) error { + return errors.Errorf("Stated attribute type '%s' for attribute '%s' did not match actual attribute type '%T'", attributeType, attributeName, value) + } + + // try to parse as number + if value, ok := typedAttributeValue["N"]; ok { + numberValue, ok := value.(string) + if !ok { + return nil, typeError(attributeName, "N", value) + } + + // try int + if intValue, err := strconv.Atoi(numberValue); err != nil { + + // try float + floatValue, err := strconv.ParseFloat(numberValue, 64) + if err != nil { + return nil, fmt.Errorf("Value for %s is not int or float: %s", attributeName, numberValue) + } + + // save as float + attributes[attributeName] = floatValue + } else { + attributes[attributeName] = intValue + } + } else if value, ok := typedAttributeValue["S"]; ok { + stringValue, ok := value.(string) + if !ok { + return nil, typeError(attributeName, "S", value) + } + + attributes[attributeName] = stringValue + } else if value, ok := typedAttributeValue["B"]; ok { + byteSliceValue, ok := value.(string) + if !ok { + return nil, typeError(attributeName, "B", value) + } + + attributes[attributeName], err = base64.StdEncoding.DecodeString(byteSliceValue) + if err != nil { + return nil, err + } + } else if value, ok := typedAttributeValue["BOOL"]; ok { + boolValue, ok := value.(bool) + if !ok { + return nil, typeError(attributeName, "BOOL", value) + } + + attributes[attributeName] = boolValue + } + } + + return attributes, nil +} + +func (c *context) sendRequestToWorker(input interface{}, + context interface{}, + responseChan chan *v3io.Response) (*v3io.Request, error) { + id := atomic.AddUint64(&requestID, 1) + + // create a request/response (TODO: from pool) + requestResponse := &v3io.RequestResponse{ + Request: v3io.Request{ + ID: id, + Input: input, + Context: context, + ResponseChan: responseChan, + SendTimeNanoseconds: time.Now().UnixNano(), + }, + } + + // point to container + requestResponse.Request.RequestResponse = requestResponse + + // send the request to the request channel + c.requestChan <- &requestResponse.Request + + return &requestResponse.Request, nil +} + +func (c *context) workerEntry(workerIndex int) { + for { + var response *v3io.Response + var err error + + // read a request + request := <-c.requestChan + + // according to the input type + switch typedInput := request.Input.(type) { + case *v3io.PutObjectInput: + err = c.PutObjectSync(typedInput) + case *v3io.GetObjectInput: + response, err = c.GetObjectSync(typedInput) + case *v3io.DeleteObjectInput: + err = c.DeleteObjectSync(typedInput) + case *v3io.GetItemInput: + response, err = c.GetItemSync(typedInput) + case *v3io.GetItemsInput: + response, err = c.GetItemsSync(typedInput) + case *v3io.PutItemInput: + err = c.PutItemSync(typedInput) + case *v3io.PutItemsInput: + response, err = c.PutItemsSync(typedInput) + case *v3io.UpdateItemInput: + err = c.UpdateItemSync(typedInput) + case *v3io.CreateStreamInput: + err = c.CreateStreamSync(typedInput) + case *v3io.DeleteStreamInput: + err = c.DeleteStreamSync(typedInput) + case *v3io.GetRecordsInput: + response, err = c.GetRecordsSync(typedInput) + case *v3io.PutRecordsInput: + response, err = c.PutRecordsSync(typedInput) + case *v3io.SeekShardInput: + response, err = c.SeekShardSync(typedInput) + case *v3io.GetContainersInput: + response, err = c.GetContainersSync(typedInput) + case *v3io.GetContainerContentsInput: + response, err = c.GetContainerContentsSync(typedInput) + default: + c.logger.ErrorWith("Got unexpected request type", "type", reflect.TypeOf(request.Input).String()) + } + + // TODO: have the sync interfaces somehow use the pre-allocated response + if response != nil { + request.RequestResponse.Response = *response + } + + response = &request.RequestResponse.Response + + response.ID = request.ID + response.Error = err + response.RequestResponse = request.RequestResponse + response.Context = request.Context + + // write to response channel + request.ResponseChan <- &request.RequestResponse.Response + } +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/headers.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/headers.go new file mode 100644 index 00000000000..bad2efaa48e --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/headers.go @@ -0,0 +1,69 @@ +package v3iohttp + +// function names +const ( + putItemFunctionName = "PutItem" + updateItemFunctionName = "UpdateItem" + getItemFunctionName = "GetItem" + getItemsFunctionName = "GetItems" + createStreamFunctionName = "CreateStream" + putRecordsFunctionName = "PutRecords" + getRecordsFunctionName = "GetRecords" + seekShardsFunctionName = "SeekShard" +) + +// headers for put item +var putItemHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": putItemFunctionName, +} + +// headers for update item +var updateItemHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": updateItemFunctionName, +} + +// headers for update item +var getItemHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": getItemFunctionName, +} + +// headers for update item +var getItemsHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": getItemsFunctionName, +} + +// headers for create stream +var createStreamHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": createStreamFunctionName, +} + +// headers for put records +var putRecordsHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": putRecordsFunctionName, +} + +// headers for put records +var getRecordsHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": getRecordsFunctionName, +} + +// headers for seek records +var seekShardsHeaders = map[string]string{ + "Content-Type": "application/json", + "X-v3io-function": seekShardsFunctionName, +} + +// map between SeekShardInputType and its encoded counterpart +var seekShardsInputTypeToString = [...]string{ + "TIME", + "SEQUENCE", + "LATEST", + "EARLIEST", +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/session.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/session.go new file mode 100644 index 00000000000..799d6433629 --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/http/session.go @@ -0,0 +1,50 @@ +package v3iohttp + +import ( + "encoding/base64" + "fmt" + + "github.com/v3io/v3io-go/pkg/dataplane" + + "github.com/nuclio/logger" +) + +type session struct { + logger logger.Logger + context *context + authenticationToken string + accessKey string +} + +func newSession(parentLogger logger.Logger, + context *context, + username string, + password string, + accessKey string) (v3io.Session, error) { + + authenticationToken := "" + if username != "" && password != "" { + authenticationToken = GenerateAuthenticationToken(username, password) + } + + return &session{ + logger: parentLogger.GetChild("session"), + context: context, + authenticationToken: authenticationToken, + accessKey: accessKey, + }, nil +} + +// NewContainer creates a container +func (s *session) NewContainer(newContainerInput *v3io.NewContainerInput) (v3io.Container, error) { + return newContainer(s.logger, s, newContainerInput.ContainerName) +} + +func GenerateAuthenticationToken(username string, password string) string { + + // generate token for basic authentication + usernameAndPassword := fmt.Sprintf("%s:%s", username, password) + encodedUsernameAndPassword := base64.StdEncoding.EncodeToString([]byte(usernameAndPassword)) + + return "Basic " + encodedUsernameAndPassword +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/item.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/item.go new file mode 100644 index 00000000000..5fb6743da65 --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/item.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +import ( + "strconv" + + "github.com/v3io/v3io-go/pkg/errors" +) + +type Item map[string]interface{} + +func (i Item) GetField(name string) interface{} { + return i[name] +} + +func (i Item) GetFieldInt(name string) (int, error) { + switch typedField := i[name].(type) { + case int: + return typedField, nil + case float64: + return int(typedField), nil + case string: + return strconv.Atoi(typedField) + default: + return 0, v3ioerrors.ErrInvalidTypeConversion + } +} + +func (i Item) GetFieldString(name string) (string, error) { + switch typedField := i[name].(type) { + case int: + return strconv.Itoa(typedField), nil + case float64: + return strconv.FormatFloat(typedField, 'E', -1, 64), nil + case string: + return typedField, nil + default: + return "", v3ioerrors.ErrInvalidTypeConversion + } +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/itemscursor.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/itemscursor.go new file mode 100644 index 00000000000..c2e8e4f0455 --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/itemscursor.go @@ -0,0 +1,137 @@ +package v3io + +type ItemsCursor struct { + currentItem Item + currentError error + currentResponse *Response + nextMarker string + moreItemsExist bool + itemIndex int + items []Item + getItemsInput *GetItemsInput + container Container +} + +func NewItemsCursor(container Container, getItemsInput *GetItemsInput) (*ItemsCursor, error) { + newItemsCursor := &ItemsCursor{ + container: container, + getItemsInput: getItemsInput, + } + + response, err := container.GetItemsSync(getItemsInput) + if err != nil { + return nil, err + } + + newItemsCursor.setResponse(response) + + return newItemsCursor, nil +} + +// Err returns the last error +func (ic *ItemsCursor) Err() error { + return ic.currentError +} + +// Release releases a cursor and its underlying resources +func (ic *ItemsCursor) Release() { + if ic.currentResponse != nil { + ic.currentResponse.Release() + } +} + +// Next gets the next matching item. this may potentially block as this lazy loads items from the collection +func (ic *ItemsCursor) NextSync() bool { + item, err := ic.NextItemSync() + + if item == nil || err != nil { + return false + } + + return true +} + +// NextItem gets the next matching item. this may potentially block as this lazy loads items from the collection +func (ic *ItemsCursor) NextItemSync() (Item, error) { + + // are there any more items left in the previous response we received? + if ic.itemIndex < len(ic.items) { + ic.currentItem = ic.items[ic.itemIndex] + ic.currentError = nil + + // next time we'll give next item + ic.itemIndex++ + + return ic.currentItem, nil + } + + // are there any more items up stream? + if !ic.moreItemsExist { + ic.currentError = nil + return nil, nil + } + + // get the previous request input and modify it with the marker + ic.getItemsInput.Marker = ic.nextMarker + + // invoke get items + newResponse, err := ic.container.GetItemsSync(ic.getItemsInput) + if err != nil { + return nil, err + } + + // release the previous response + ic.currentResponse.Release() + + // set the new response - read all the sub information from it + ic.setResponse(newResponse) + + // and recurse into next now that we repopulated response + return ic.NextItemSync() +} + +// gets all items +func (ic *ItemsCursor) AllSync() ([]Item, error) { + var items []Item + + for ic.NextSync() { + items = append(items, ic.GetItem()) + } + + if ic.Err() != nil { + return nil, ic.Err() + } + + return items, nil +} + +func (ic *ItemsCursor) GetField(name string) interface{} { + return ic.currentItem[name] +} + +func (ic *ItemsCursor) GetFieldInt(name string) (int, error) { + return ic.currentItem.GetFieldInt(name) +} + +func (ic *ItemsCursor) GetFieldString(name string) (string, error) { + return ic.currentItem.GetFieldString(name) +} + +func (ic *ItemsCursor) GetFields() map[string]interface{} { + return ic.currentItem +} + +func (ic *ItemsCursor) GetItem() Item { + return ic.currentItem +} + +func (ic *ItemsCursor) setResponse(response *Response) { + ic.currentResponse = response + + getItemsOutput := response.Output.(*GetItemsOutput) + + ic.moreItemsExist = !getItemsOutput.Last + ic.nextMarker = getItemsOutput.NextMarker + ic.items = getItemsOutput.Items + ic.itemIndex = 0 +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/requestresponse.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/requestresponse.go new file mode 100644 index 00000000000..955cbec7e59 --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/requestresponse.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +import "github.com/valyala/fasthttp" + +type Request struct { + ID uint64 + + // holds the input (e.g. ListBucketInput, GetItemInput) + Input interface{} + + // a user supplied context + Context interface{} + + // the channel to which the response must be posted + ResponseChan chan *Response + + // pointer to container + RequestResponse *RequestResponse + + // Request time + SendTimeNanoseconds int64 +} + +type Response struct { + + // hold a decoded output, if any + Output interface{} + + // Equal to the ID of request + ID uint64 + + // holds the error for async responses + Error error + + // a user supplied context + Context interface{} + + // pointer to container + RequestResponse *RequestResponse + + // HTTP + HTTPResponse *fasthttp.Response +} + +func (r *Response) Release() { + if r.HTTPResponse != nil { + fasthttp.ReleaseResponse(r.HTTPResponse) + } +} + +func (r *Response) Body() []byte { + return r.HTTPResponse.Body() +} + +func (r *Response) Request() *Request { + return &r.RequestResponse.Request +} + +// holds both a request and response +type RequestResponse struct { + Request Request + Response Response +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/session.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/session.go new file mode 100644 index 00000000000..7d67e43e7b0 --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/session.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +type Session interface { + + // NewContainer creates a container + NewContainer(*NewContainerInput) (Container, error) +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go b/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go new file mode 100644 index 00000000000..b2e9a4988e5 --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/dataplane/types.go @@ -0,0 +1,292 @@ +/* +Copyright 2018 The v3io Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v3io + +import ( + "context" + "crypto/tls" + "encoding/xml" + "time" +) + +// +// Control plane +// + +type NewContextInput struct { + ClusterEndpoints []string + NumWorkers int + RequestChanLen int + TlsConfig *tls.Config + DialTimeout time.Duration +} + +type NewSessionInput struct { + Username string + Password string + AccessKey string +} + +type NewContainerInput struct { + ContainerName string +} + +// +// Data plane +// + +type DataPlaneInput struct { + Ctx context.Context + ContainerName string + AuthenticationToken string + AccessKey string + Timeout time.Duration +} + +type DataPlaneOutput struct { + ctx context.Context +} + +// +// Container +// + +type GetContainerContentsInput struct { + DataPlaneInput + Path string +} + +type Content struct { + XMLName xml.Name `xml:"Contents"` + Key string `xml:"Key"` + Size int `xml:"Size"` + LastSequenceID int `xml:"LastSequenceId"` + ETag string `xml:"ETag"` + LastModified string `xml:"LastModified"` +} + +type CommonPrefix struct { + CommonPrefixes xml.Name `xml:"CommonPrefixes"` + Prefix string `xml:"Prefix"` +} + +type GetContainerContentsOutput struct { + BucketName xml.Name `xml:"ListBucketResult"` + Name string `xml:"Name"` + NextMarker string `xml:"NextMarker"` + MaxKeys string `xml:"MaxKeys"` + Contents []Content `xml:"Contents"` + CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"` +} + +type GetContainersInput struct { + DataPlaneInput +} + +type GetContainersOutput struct { + DataPlaneOutput + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Owner interface{} `xml:"Owner"` + Results Containers `xml:"Buckets"` +} + +type Containers struct { + Name xml.Name `xml:"Buckets"` + Containers []ContainerInfo `xml:"Bucket"` +} + +type ContainerInfo struct { + BucketName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` + CreationDate string `xml:"CreationDate"` + ID int `xml:"Id"` +} + +// +// Object +// + +type GetObjectInput struct { + DataPlaneInput + Path string + Offset int + NumBytes int +} + +type PutObjectInput struct { + DataPlaneInput + Path string + Offset int + Body []byte +} + +type DeleteObjectInput struct { + DataPlaneInput + Path string +} + +// +// KV +// + +type PutItemInput struct { + DataPlaneInput + Path string + Condition string + Attributes map[string]interface{} +} + +type PutItemsInput struct { + DataPlaneInput + Path string + Condition string + Items map[string]map[string]interface{} +} + +type PutItemsOutput struct { + DataPlaneOutput + Success bool + Errors map[string]error +} + +type UpdateItemInput struct { + DataPlaneInput + Path string + Attributes map[string]interface{} + Expression *string + Condition string +} + +type GetItemInput struct { + DataPlaneInput + Path string + AttributeNames []string +} + +type GetItemOutput struct { + DataPlaneOutput + Item Item +} + +type GetItemsInput struct { + DataPlaneInput + Path string + AttributeNames []string + Filter string + Marker string + ShardingKey string + Limit int + Segment int + TotalSegments int + SortKeyRangeStart string + SortKeyRangeEnd string +} + +type GetItemsOutput struct { + DataPlaneOutput + Last bool + NextMarker string + Items []Item +} + +// +// Stream +// + +type StreamRecord struct { + ShardID *int + Data []byte + ClientInfo []byte + PartitionKey string +} + +type SeekShardInputType int + +const ( + SeekShardInputTypeTime SeekShardInputType = iota + SeekShardInputTypeSequence + SeekShardInputTypeLatest + SeekShardInputTypeEarliest +) + +type CreateStreamInput struct { + DataPlaneInput + Path string + ShardCount int + RetentionPeriodHours int +} + +type DeleteStreamInput struct { + DataPlaneInput + Path string +} + +type PutRecordsInput struct { + DataPlaneInput + Path string + Records []*StreamRecord +} + +type PutRecordResult struct { + SequenceNumber int + ShardID int `json:"ShardId"` + ErrorCode int + ErrorMessage string +} + +type PutRecordsOutput struct { + DataPlaneOutput + FailedRecordCount int + Records []PutRecordResult +} + +type SeekShardInput struct { + DataPlaneInput + Path string + Type SeekShardInputType + StartingSequenceNumber int + Timestamp int +} + +type SeekShardOutput struct { + DataPlaneOutput + Location string +} + +type GetRecordsInput struct { + DataPlaneInput + Path string + Location string + Limit int +} + +type GetRecordsResult struct { + ArrivalTimeSec int + ArrivalTimeNSec int + SequenceNumber int + ClientInfo []byte + PartitionKey string + Data []byte +} + +type GetRecordsOutput struct { + DataPlaneOutput + NextLocation string + MSecBehindLatest int + RecordsBehindLatest int + Records []GetRecordsResult +} diff --git a/vendor/github.com/v3io/v3io-go/pkg/errors/errors.go b/vendor/github.com/v3io/v3io-go/pkg/errors/errors.go new file mode 100644 index 00000000000..0f474736cf8 --- /dev/null +++ b/vendor/github.com/v3io/v3io-go/pkg/errors/errors.go @@ -0,0 +1,30 @@ +package v3ioerrors + +import ( + "errors" + "fmt" +) + +var ErrInvalidTypeConversion = errors.New("Invalid type conversion") + +var ErrTimeout = errors.New("Timed out") + +type ErrorWithStatusCode struct { + error + statusCode int +} + +func NewErrorWithStatusCode(err error, statusCode int) ErrorWithStatusCode { + return ErrorWithStatusCode{ + error: err, + statusCode: statusCode, + } +} + +func (e ErrorWithStatusCode) StatusCode() int { + return e.statusCode +} + +func (e ErrorWithStatusCode) Error() string { + return fmt.Sprintf("%s (%d response code)", e.error.Error(), e.statusCode) +} diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go b/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go index 68794091c52..122adfa7132 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/aggregate/aggregate.go @@ -295,6 +295,22 @@ func IsCountAggregate(aggr AggrType) bool { return aggr == aggrTypeCount } func HasAggregates(mask AggrType) bool { return mask != aggrTypeNone } +func AggregateMaskToString(mask AggrType) string { + var output strings.Builder + aggCount := 0 + for _, raw := range rawAggregates { + if mask&raw == raw { + if aggCount != 0 { + output.WriteString(",") + } + output.WriteString(aggrToString[raw]) + aggCount++ + } + } + + return output.String() +} + func ToAttrName(aggr AggrType) string { return config.AggregateAttrPrefix + aggr.String() } diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go b/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go index 0e172a0cc0b..203801441b0 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/appender/appender.go @@ -27,7 +27,7 @@ import ( "github.com/nuclio/logger" "github.com/pkg/errors" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-tsdb/internal/pkg/performance" "github.com/v3io/v3io-tsdb/pkg/config" "github.com/v3io/v3io-tsdb/pkg/partmgr" @@ -109,7 +109,7 @@ type MetricsCache struct { cfg *config.V3ioConfig partitionMngr *partmgr.PartitionManager mtx sync.RWMutex - container *v3io.Container + container v3io.Container logger logger.Logger started bool @@ -132,7 +132,7 @@ type MetricsCache struct { performanceReporter *performance.MetricReporter } -func NewMetricsCache(container *v3io.Container, logger logger.Logger, cfg *config.V3ioConfig, +func NewMetricsCache(container v3io.Container, logger logger.Logger, cfg *config.V3ioConfig, partMngr *partmgr.PartitionManager) *MetricsCache { newCache := MetricsCache{container: container, logger: logger, cfg: cfg, partitionMngr: partMngr} diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go b/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go index a81a1a9988e..1bbf3b4e203 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/appender/ingest.go @@ -27,7 +27,8 @@ import ( "time" "github.com/pkg/errors" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-go/pkg/errors" ) // Start event loops for handling metric updates (appends and Get/Update DB responses) @@ -298,7 +299,7 @@ func (mc *MetricsCache) handleResponse(metric *MetricState, resp *v3io.Response, // Metrics with too many update errors go into Error state metric.retryCount++ - if e, hasStatusCode := resp.Error.(v3io.ErrorWithStatusCode); hasStatusCode && e.StatusCode() != http.StatusServiceUnavailable { + if e, hasStatusCode := resp.Error.(v3ioerrors.ErrorWithStatusCode); hasStatusCode && e.StatusCode() != http.StatusServiceUnavailable { mc.logger.ErrorWith(fmt.Sprintf("Chunk update failed with status code %d.", e.StatusCode())) setError(mc, metric, errors.Wrap(resp.Error, fmt.Sprintf("Chunk update failed due to status code %d.", e.StatusCode()))) clear() diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go b/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go index 949008694d0..4c6ad3ae4c7 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/appender/store.go @@ -28,7 +28,7 @@ import ( "time" "github.com/nuclio/logger" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-tsdb/internal/pkg/performance" "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/chunkenc" @@ -42,7 +42,10 @@ const maxLateArrivalInterval = 59 * 60 * 1000 // Max late arrival of 59min // Create a chunk store with two chunks (current, previous) func NewChunkStore(logger logger.Logger, labelNames []string, aggrsOnly bool) *chunkStore { - store := chunkStore{logger: logger} + store := chunkStore{ + logger: logger, + lastTid: -1, + } if !aggrsOnly { store.chunks[0] = &attrAppender{} store.chunks[1] = &attrAppender{} diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go b/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go index e53810d6f6f..56bbe3c27a2 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/config/config.go @@ -436,4 +436,20 @@ func initDefaults(cfg *V3ioConfig) { if cfg.DisableNginxMitigation == nil { cfg.DisableNginxMitigation = &defaultDisableNginxMitigation } + + if cfg.WebApiEndpoint == "" { + cfg.WebApiEndpoint = os.Getenv("V3IO_API") + } + + if cfg.AccessKey == "" { + cfg.AccessKey = os.Getenv("V3IO_ACCESS_KEY") + } + + if cfg.Username == "" { + cfg.Username = os.Getenv("V3IO_USERNAME") + } + + if cfg.Password == "" { + cfg.Password = os.Getenv("V3IO_PASSWORD") + } } diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go b/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go index 0532fd8df50..087b9d8409d 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/partmgr/partmgr.go @@ -25,20 +25,22 @@ import ( "fmt" "math" "path" + "sort" "strconv" "strings" "sync" "github.com/pkg/errors" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-tsdb/internal/pkg/performance" "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/config" + "github.com/v3io/v3io-tsdb/pkg/tsdb/schema" "github.com/v3io/v3io-tsdb/pkg/utils" ) // Create a new partition manager -func NewPartitionMngr(schemaConfig *config.Schema, cont *v3io.Container, v3ioConfig *config.V3ioConfig) (*PartitionManager, error) { +func NewPartitionMngr(schemaConfig *config.Schema, cont v3io.Container, v3ioConfig *config.V3ioConfig) (*PartitionManager, error) { currentPartitionInterval, err := utils.Str2duration(schemaConfig.PartitionSchemaInfo.PartitionerInterval) if err != nil { return nil, err @@ -51,43 +53,6 @@ func NewPartitionMngr(schemaConfig *config.Schema, cont *v3io.Container, v3ioCon return newMngr, nil } -// Create and initialize a new partition -func NewDBPartition(pmgr *PartitionManager, startTime int64, path string) (*DBPartition, error) { - rollupTime, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.AggregationGranularity) - if err != nil { - return nil, err - } - partitionInterval, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.PartitionerInterval) - if err != nil { - return nil, err - } - chunkInterval, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.ChunckerInterval) - if err != nil { - return nil, err - } - newPart := DBPartition{ - manager: pmgr, - path: path, - startTime: startTime, - partitionInterval: partitionInterval, - chunkInterval: chunkInterval, - prefix: "", - retentionDays: pmgr.schemaConfig.PartitionSchemaInfo.SampleRetention, - rollupTime: rollupTime, - } - - aggrType, _, err := aggregate.AggregatesFromStringListWithCount(pmgr.schemaConfig.PartitionSchemaInfo.Aggregates) - if err != nil { - return nil, err - } - newPart.defaultRollups = aggrType - if rollupTime != 0 { - newPart.rollupBuckets = int(math.Ceil(float64(partitionInterval) / float64(rollupTime))) - } - - return &newPart, nil -} - type PartitionManager struct { mtx sync.RWMutex schemaConfig *config.Schema @@ -96,11 +61,15 @@ type PartitionManager struct { headPartition *DBPartition partitions []*DBPartition cyclic bool - container *v3io.Container + container v3io.Container currentPartitionInterval int64 //TODO update on schema changes v3ioConfig *config.V3ioConfig } +func (p *PartitionManager) GetPartitionsTablePath() string { + return path.Join(p.Path(), "partitions") +} + func (p *PartitionManager) Path() string { return p.v3ioConfig.TablePath } @@ -129,19 +98,25 @@ func (p *PartitionManager) TimeToPart(t int64) (*DBPartition, error) { } else { if t >= p.headPartition.startTime { if (t - p.headPartition.startTime) >= p.currentPartitionInterval { - _, err := p.createAndUpdatePartition(p.headPartition.startTime + p.currentPartitionInterval) + _, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) if err != nil { return nil, err } - return p.TimeToPart(t) - } else { - return p.headPartition, nil } + return p.headPartition, nil } else { // Iterate backwards; ignore the last element as it's the head partition for i := len(p.partitions) - 2; i >= 0; i-- { if t >= p.partitions[i].startTime { - return p.partitions[i], nil + if t < p.partitions[i].GetEndTime() { + return p.partitions[i], nil + } else { + part, err := p.createAndUpdatePartition(p.currentPartitionInterval * (t / p.currentPartitionInterval)) + if err != nil { + return nil, err + } + return part, nil + } } } head := p.headPartition @@ -160,39 +135,71 @@ func (p *PartitionManager) createAndUpdatePartition(t int64) (*DBPartition, erro return nil, err } p.currentPartitionInterval = partition.partitionInterval + + schemaPartition := &config.Partition{StartTime: partition.startTime, SchemaInfo: p.schemaConfig.PartitionSchemaInfo} if p.headPartition == nil || time > p.headPartition.startTime { p.headPartition = partition p.partitions = append(p.partitions, partition) + p.schemaConfig.Partitions = append(p.schemaConfig.Partitions, schemaPartition) } else { for i, part := range p.partitions { if part.startTime > time { p.partitions = append(p.partitions, nil) copy(p.partitions[i+1:], p.partitions[i:]) p.partitions[i] = partition + + p.schemaConfig.Partitions = append(p.schemaConfig.Partitions, nil) + copy(p.schemaConfig.Partitions[i+1:], p.schemaConfig.Partitions[i:]) + p.schemaConfig.Partitions[i] = schemaPartition break } } } - p.schemaConfig.Partitions = append(p.schemaConfig.Partitions, &config.Partition{StartTime: partition.startTime, SchemaInfo: p.schemaConfig.PartitionSchemaInfo}) + err = p.updateSchema() return partition, err } -func (p *PartitionManager) updateSchema() (err error) { +func (p *PartitionManager) updateSchema() error { + var outerError error metricReporter := performance.ReporterInstanceFromConfig(p.v3ioConfig) metricReporter.WithTimer("UpdateSchemaTimer", func() { + // updating schema version and copying partitions to kv table. + p.schemaConfig.TableSchemaInfo.Version = schema.Version + data, err := json.Marshal(p.schemaConfig) if err != nil { - err = errors.Wrap(err, "Failed to update a new partition in the schema file.") + outerError = errors.Wrap(err, "Failed to update a new partition in the schema file.") return } if p.container != nil { // Tests use case only - err = p.container.Sync.PutObject(&v3io.PutObjectInput{Path: path.Join(p.Path(), config.SchemaConfigFileName), Body: data}) + err = p.container.PutObjectSync(&v3io.PutObjectInput{Path: path.Join(p.Path(), config.SchemaConfigFileName), Body: data}) + if err != nil { + outerError = err + return + } + items := make(map[string]map[string]interface{}, len(p.partitions)) + for _, part := range p.partitions { + items[strconv.FormatInt(part.startTime, 10)] = part.ToMap() + } + + input := &v3io.PutItemsInput{Path: p.GetPartitionsTablePath(), Items: items} + resp, err := p.container.PutItemsSync(input) + + if err != nil { + outerError = errors.Wrap(err, "failed to update partitions table.") + return + } + output := resp.Output.(*v3io.PutItemsOutput) + if !output.Success { + outerError = fmt.Errorf("got one or more errors, err: %v", output.Errors) + return + } } }) - return + return outerError } func (p *PartitionManager) DeletePartitionsFromSchema(partitionsToDelete []*DBPartition) error { @@ -214,6 +221,17 @@ func (p *PartitionManager) DeletePartitionsFromSchema(partitionsToDelete []*DBPa } } + + // Delete from partitions KV table + if p.container != nil { // Tests use case only + for _, partToDelete := range partitionsToDelete { + err := p.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path.Join(p.GetPartitionsTablePath(), strconv.FormatInt(partToDelete.startTime, 10))}) + if err != nil { + return err + } + } + } + return p.updateSchema() } @@ -229,7 +247,7 @@ func (p *PartitionManager) ReadAndUpdateSchema() (err error) { err = errors.Wrap(err, "Failed to create timer ReadAndUpdateSchemaTimer.") return } - schemaInfoResp, err := p.container.Sync.GetItem(&v3io.GetItemInput{Path: fullPath, AttributeNames: []string{"__mtime_secs", "__mtime_nsecs"}}) + schemaInfoResp, err := p.container.GetItemSync(&v3io.GetItemInput{Path: fullPath, AttributeNames: []string{"__mtime_secs", "__mtime_nsecs"}}) if err != nil { err = errors.Wrapf(err, "Failed to read schema at path '%s'.", fullPath) } @@ -248,20 +266,23 @@ func (p *PartitionManager) ReadAndUpdateSchema() (err error) { p.schemaMtimeNanosecs = mtimeNsecs metricReporter.WithTimer("ReadAndUpdateSchemaTimer", func() { - resp, err := p.container.Sync.GetObject(&v3io.GetObjectInput{Path: fullPath}) - if err != nil { - err = errors.Wrapf(err, "Failed to read schema at path '%s'.", fullPath) + resp, innerError := p.container.GetObjectSync(&v3io.GetObjectInput{Path: fullPath}) + if innerError != nil { + err = errors.Wrapf(innerError, "Failed to read schema at path '%s'.", fullPath) + return } schema := &config.Schema{} - err = json.Unmarshal(resp.Body(), schema) - if err != nil { - err = errors.Wrapf(err, "Failed to unmarshal schema at path '%s'.", fullPath) + innerError = json.Unmarshal(resp.Body(), schema) + if innerError != nil { + err = errors.Wrapf(innerError, "Failed to unmarshal schema at path '%s'.", fullPath) + return } p.schemaConfig = schema - err = p.updatePartitionsFromSchema(schema) - if err != nil { - err = errors.Wrapf(err, "Failed to update partitions from schema at path '%s'.", fullPath) + innerError = p.updatePartitionsFromSchema(schema) + if innerError != nil { + err = errors.Wrapf(innerError, "Failed to update partitions from schema at path '%s'.", fullPath) + return } }) } @@ -269,6 +290,14 @@ func (p *PartitionManager) ReadAndUpdateSchema() (err error) { } func (p *PartitionManager) updatePartitionsFromSchema(schema *config.Schema) error { + if schema.TableSchemaInfo.Version == 3 { + return p.newLoadPartitions() + } + + return p.oldLoadPartitions(schema) +} + +func (p *PartitionManager) oldLoadPartitions(schema *config.Schema) error { p.partitions = []*DBPartition{} for _, part := range schema.Partitions { partPath := path.Join(p.Path(), strconv.FormatInt(part.StartTime/1000, 10)) + "/" @@ -286,6 +315,51 @@ func (p *PartitionManager) updatePartitionsFromSchema(schema *config.Schema) err return nil } +func (p *PartitionManager) newLoadPartitions() error { + if p.container == nil { // Tests use case only + return nil + } + + getItems := &v3io.GetItemsInput{Path: p.GetPartitionsTablePath() + "/", + AttributeNames: []string{"*"}} + + logger, err := utils.NewLogger(p.v3ioConfig.LogLevel) + if err != nil { + return err + } + iter, err := utils.NewAsyncItemsCursor(p.container, getItems, p.v3ioConfig.QryWorkers, []string{}, logger) + if err != nil { + return err + } + + p.partitions = []*DBPartition{} + for iter.Next() { + startTime := iter.GetField(config.ObjectNameAttrName).(string) + intStartTime, err := strconv.ParseInt(startTime, 10, 64) + if err != nil { + return errors.Wrapf(err, "invalid partition name '%v'", startTime) + } + + partPath := path.Join(p.Path(), strconv.FormatInt(intStartTime/1000, 10)) + "/" + newPart, err := NewDBPartitionFromMap(p, intStartTime, partPath, iter.GetItem()) + if err != nil { + return err + } + p.partitions = append(p.partitions, newPart) + if p.headPartition == nil { + p.headPartition = newPart + } else if p.headPartition.startTime < newPart.startTime { + p.headPartition = newPart + } + } + + sort.SliceStable(p.partitions, func(i, j int) bool { + return p.partitions[i].startTime < p.partitions[j].startTime + }) + + return nil +} + //if inclusive is true than partial partitions (not fully in range) will be retireved as well func (p *PartitionManager) PartsForRange(mint, maxt int64, inclusive bool) []*DBPartition { var parts []*DBPartition @@ -310,6 +384,93 @@ type DBPartition struct { rollupBuckets int // Total number of aggregation buckets per partition } +// Create and initialize a new partition +func NewDBPartition(pmgr *PartitionManager, startTime int64, path string) (*DBPartition, error) { + rollupTime, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.AggregationGranularity) + if err != nil { + return nil, err + } + partitionInterval, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.PartitionerInterval) + if err != nil { + return nil, err + } + chunkInterval, err := utils.Str2duration(pmgr.schemaConfig.PartitionSchemaInfo.ChunckerInterval) + if err != nil { + return nil, err + } + newPart := DBPartition{ + manager: pmgr, + path: path, + startTime: startTime, + partitionInterval: partitionInterval, + chunkInterval: chunkInterval, + prefix: "", + retentionDays: pmgr.schemaConfig.PartitionSchemaInfo.SampleRetention, + rollupTime: rollupTime, + } + + aggrType, _, err := aggregate.AggregatesFromStringListWithCount(pmgr.schemaConfig.PartitionSchemaInfo.Aggregates) + if err != nil { + return nil, err + } + newPart.defaultRollups = aggrType + if rollupTime != 0 { + newPart.rollupBuckets = int(math.Ceil(float64(partitionInterval) / float64(rollupTime))) + } + + return &newPart, nil +} + +// Create and initialize a new partition +func NewDBPartitionFromMap(pmgr *PartitionManager, startTime int64, path string, item v3io.Item) (*DBPartition, error) { + rollupTime, err := item.GetFieldInt("rollupTime") + if err != nil { + return nil, fmt.Errorf("failed to parse rollupTime for partition: %v, rollup: %v", startTime, item.GetField("rollupTime")) + } + + partitionInterval, err := item.GetFieldInt("partitionInterval") + if err != nil { + return nil, fmt.Errorf("failed to parse partitionInterval for partition: %v, interval: %v", startTime, item.GetField("partitionInterval")) + } + + chunkInterval, err := item.GetFieldInt("chunkInterval") + if err != nil { + return nil, fmt.Errorf("failed to parse chunk Interval for partition: %v, interval: %v", startTime, item.GetField("chunkInterval")) + } + + retention, err := item.GetFieldInt("retentionDays") + if err != nil { + return nil, errors.Wrapf(err, "failed to parse retention days for partition: %v, retention: %v", startTime, item.GetField("retentionDays")) + } + + stringAggregates, err := item.GetFieldString("aggregates") + if err != nil { + return nil, errors.Wrapf(err, "failed to parse aggregates for partition: %v, aggregates: %v", startTime, item.GetField("aggregates")) + } + mask, _, err := aggregate.AggregatesFromStringListWithCount(strings.Split(stringAggregates, ",")) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse aggregates from string for partition: %v, aggregates: %v", startTime, stringAggregates) + } + + newPart := DBPartition{ + manager: pmgr, + path: path, + startTime: startTime, + partitionInterval: int64(partitionInterval), + chunkInterval: int64(chunkInterval), + prefix: "", + retentionDays: retention, + rollupTime: int64(rollupTime), + defaultRollups: mask, + } + + if rollupTime != 0 { + newPart.rollupBuckets = int(math.Ceil(float64(partitionInterval) / float64(rollupTime))) + } + + return &newPart, nil +} + func (p *DBPartition) PreAggregates() []config.PreAggregate { return p.manager.GetConfig().TableSchemaInfo.PreAggregates } @@ -485,6 +646,16 @@ func (p *DBPartition) GetHashingBuckets() int { return p.manager.schemaConfig.TableSchemaInfo.ShardingBucketsCount } +func (p *DBPartition) ToMap() map[string]interface{} { + attributes := make(map[string]interface{}, 5) + attributes["aggregates"] = aggregate.AggregateMaskToString(p.AggrType()) + attributes["rollupTime"] = p.rollupTime + attributes["chunkInterval"] = p.chunkInterval + attributes["partitionInterval"] = p.partitionInterval + attributes["retentionDays"] = p.retentionDays + return attributes +} + // Convert a time in milliseconds to day and hour integers func TimeToDHM(tmilli int64) (int, int) { t := int(tmilli / 1000) diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go b/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go index ce7746d0ef1..2f0d4c4b65f 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/frames.go @@ -101,7 +101,7 @@ func (fi *frameIterator) Err() error { } // data frame, holds multiple value columns and an index (time) column -func NewDataFrame(columnsSpec []columnMeta, indexColumn Column, lset utils.Labels, hash uint64, isRawQuery, getAllMetrics bool, columnSize int, useServerAggregates, showAggregateLabel bool) (*dataFrame, error) { +func NewDataFrame(columnsSpec []columnMeta, indexColumn Column, lset utils.Labels, hash uint64, isRawQuery bool, columnSize int, useServerAggregates, showAggregateLabel bool) (*dataFrame, error) { df := &dataFrame{lset: lset, hash: hash, isRawSeries: isRawQuery, showAggregateLabel: showAggregateLabel} // is raw query if isRawQuery { @@ -114,13 +114,14 @@ func NewDataFrame(columnsSpec []columnMeta, indexColumn Column, lset utils.Label df.metricToCountColumn = map[string]Column{} df.metrics = map[string]struct{}{} df.nonEmptyRowsIndicators = make([]bool, columnSize) - // In case user wanted all metrics, save the template for every metric. - // Once we know what metrics we have we will create Columns out of the column Templates - if getAllMetrics { - df.columnsTemplates = columnsSpec - } else { - for i, col := range columnsSpec { - df.metrics[col.metric] = struct{}{} + + i := 0 + for _, col := range columnsSpec { + // In case user wanted all metrics, save the template for every metric. + // Once we know what metrics we have we will create Columns out of the column Templates + if col.isWildcard() { + df.columnsTemplates = append(df.columnsTemplates, col) + } else { column, err := createColumn(col, columnSize, useServerAggregates) if err != nil { return nil, err @@ -130,12 +131,12 @@ func NewDataFrame(columnsSpec []columnMeta, indexColumn Column, lset utils.Label } df.columns = append(df.columns, column) df.columnByName[col.getColumnName()] = i + i++ } - - for _, col := range df.columns { - if !col.GetColumnSpec().isConcrete() { - fillDependantColumns(col, df) - } + } + for _, col := range df.columns { + if !col.GetColumnSpec().isConcrete() { + fillDependantColumns(col, df) } } } diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go b/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go index b284ce6713f..ed221871357 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/querier.go @@ -10,7 +10,7 @@ import ( "github.com/nuclio/logger" "github.com/pkg/errors" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-tsdb/internal/pkg/performance" "github.com/v3io/v3io-tsdb/pkg/config" "github.com/v3io/v3io-tsdb/pkg/partmgr" @@ -18,7 +18,7 @@ import ( ) // Create a new Querier interface -func NewV3ioQuerier(container *v3io.Container, logger logger.Logger, +func NewV3ioQuerier(container v3io.Container, logger logger.Logger, cfg *config.V3ioConfig, partMngr *partmgr.PartitionManager) *V3ioQuerier { newQuerier := V3ioQuerier{ container: container, @@ -32,7 +32,7 @@ func NewV3ioQuerier(container *v3io.Container, logger logger.Logger, type V3ioQuerier struct { logger logger.Logger - container *v3io.Container + container v3io.Container cfg *config.V3ioConfig partitionMngr *partmgr.PartitionManager performanceReporter *performance.MetricReporter diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go b/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go index 32da950faf0..6e6158251a6 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/select.go @@ -11,7 +11,7 @@ import ( "github.com/nuclio/logger" "github.com/pkg/errors" "github.com/v3io/frames" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/chunkenc" "github.com/v3io/v3io-tsdb/pkg/config" @@ -23,7 +23,7 @@ const defaultToleranceFactor = 2 type selectQueryContext struct { logger logger.Logger - container *v3io.Container + container v3io.Container workers int v3ioConfig *config.V3ioConfig @@ -32,7 +32,6 @@ type selectQueryContext struct { columnsSpec []columnMeta columnsSpecByMetric map[string][]columnMeta - isAllMetrics bool totalColumns int isCrossSeriesAggregate bool @@ -347,7 +346,6 @@ func (queryCtx *selectQueryContext) processQueryResults(query *partQuery) error lset, hash, queryCtx.isRawQuery(), - queryCtx.isAllMetrics, queryCtx.getResultBucketsSize(), results.IsServerAggregates(), queryCtx.showAggregateLabel) @@ -410,7 +408,6 @@ func (queryCtx *selectQueryContext) createColumnSpecs() ([]columnMeta, map[strin } columnsSpecByMetric[col.Metric] = append(columnsSpecByMetric[col.Metric], colMeta) columnsSpec = append(columnsSpec, colMeta) - queryCtx.isAllMetrics = queryCtx.isAllMetrics || col.Metric == "" } // Adding hidden columns if needed diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go b/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go index 9a8fe5f7066..67b7a717053 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/sql_parser.go @@ -4,6 +4,7 @@ import ( "fmt" "strings" + "github.com/pkg/errors" "github.com/v3io/v3io-tsdb/pkg/utils" "github.com/xwb1989/sqlparser" ) @@ -44,7 +45,10 @@ func ParseQuery(sql string) (*SelectParams, string, error) { switch expr := col.Expr.(type) { case *sqlparser.FuncExpr: - parseFuncExpr(expr, &currCol) + err := parseFuncExpr(expr, &currCol) + if err != nil { + return nil, "", err + } case *sqlparser.ColName: currCol.Metric = removeBackticks(sqlparser.String(expr.Name)) default: @@ -70,6 +74,11 @@ func ParseQuery(sql string) (*SelectParams, string, error) { selectParams.GroupBy = strings.TrimPrefix(sqlparser.String(slct.GroupBy), " group by ") } + err = validateColumnNames(selectParams) + if err != nil { + return nil, "", err + } + return selectParams, fromTable, nil } @@ -109,6 +118,10 @@ func parseFuncExpr(expr *sqlparser.FuncExpr, destCol *RequestedColumn) error { parseFuncExpr(innerExpr, destCol) } } + + if destCol.Metric == "" && destCol.Alias != "" { + return errors.New("cannot alias a wildcard") + } } return nil @@ -139,3 +152,25 @@ func parseFilter(originalFilter string) (string, error) { func removeBackticks(origin string) string { return strings.Replace(origin, "`", "", -1) } + +func validateColumnNames(params *SelectParams) error { + names := make(map[string]bool) + requestedMetrics := make(map[string]bool) + + for _, column := range params.RequestedColumns { + columnName := column.GetColumnName() + if names[columnName] { + return fmt.Errorf("column name '%v' appears more than once in select query", columnName) + } + names[columnName] = true + requestedMetrics[column.Metric] = true + } + + for _, column := range params.RequestedColumns { + if column.Alias != "" && requestedMetrics[column.Alias] { + return fmt.Errorf("cannot use a metric name as an alias, alias: %v", column.Alias) + } + } + + return nil +} diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/types.go b/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/types.go index bb239726b71..e3e9b7be3e0 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/types.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/pquerier/types.go @@ -51,6 +51,17 @@ func (col *RequestedColumn) GetFunction() string { return strings.TrimSuffix(col.Function, aggregate.CrossSeriesSuffix) } +func (col *RequestedColumn) GetColumnName() string { + if col.Alias != "" { + return col.Alias + } + // If no aggregations are requested (raw down sampled data) + if col.Function == "" { + return col.Metric + } + return fmt.Sprintf("%v(%v)", col.Function, col.Metric) +} + type columnMeta struct { metric string alias string @@ -62,7 +73,7 @@ type columnMeta struct { } // if a user specifies he wants all metrics -func (c *columnMeta) isWildcard() bool { return c.metric == "*" } +func (c *columnMeta) isWildcard() bool { return c.metric == "" } // Concrete Column = has real data behind it, Virtual column = described as a function on top of concrete columns func (c columnMeta) isConcrete() bool { return c.function == 0 || aggregate.IsRawAggregate(c.function) } diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/querier/querier.go b/vendor/github.com/v3io/v3io-tsdb/pkg/querier/querier.go index ccec3e25e88..9487978916a 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/querier/querier.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/querier/querier.go @@ -27,7 +27,7 @@ import ( "github.com/nuclio/logger" "github.com/pkg/errors" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-tsdb/internal/pkg/performance" "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/config" @@ -36,7 +36,7 @@ import ( ) // Create a new Querier interface -func NewV3ioQuerier(container *v3io.Container, logger logger.Logger, mint, maxt int64, +func NewV3ioQuerier(container v3io.Container, logger logger.Logger, mint, maxt int64, cfg *config.V3ioConfig, partMngr *partmgr.PartitionManager) *V3ioQuerier { newQuerier := V3ioQuerier{ container: container, @@ -51,7 +51,7 @@ func NewV3ioQuerier(container *v3io.Container, logger logger.Logger, mint, maxt type V3ioQuerier struct { logger logger.Logger - container *v3io.Container + container v3io.Container cfg *config.V3ioConfig mint, maxt int64 partitionMngr *partmgr.PartitionManager diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go b/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go index 5c3326099ee..16c41f7bf29 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/querier/seriesset.go @@ -22,7 +22,7 @@ package querier import ( "github.com/nuclio/logger" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" "github.com/v3io/v3io-tsdb/pkg/aggregate" "github.com/v3io/v3io-tsdb/pkg/config" "github.com/v3io/v3io-tsdb/pkg/partmgr" @@ -53,7 +53,7 @@ type V3ioSeriesSet struct { // Get relevant items and attributes from the TSDB and create an iterator // TODO: get items per partition + merge, per partition calc attrs -func (s *V3ioSeriesSet) getItems(partition *partmgr.DBPartition, name, filter string, container *v3io.Container, workers int) error { +func (s *V3ioSeriesSet) getItems(partition *partmgr.DBPartition, name, filter string, container v3io.Container, workers int) error { path := partition.GetTablePath() shardingKeys := []string{} diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go b/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go index f769f4b664b..0b034302bd2 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/schema/schema.go @@ -13,7 +13,7 @@ import ( ) const ( - Version = 2 + Version = 3 ) func NewSchema(v3ioCfg *config.V3ioConfig, samplesIngestionRate, aggregationGranularity, aggregatesList string, crossLabelSets string) (*config.Schema, error) { diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go b/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go index 89bfa5d6ca0..9a670b210b1 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/tsdb/v3iotsdb.go @@ -31,7 +31,8 @@ import ( "github.com/nuclio/logger" "github.com/pkg/errors" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-go/pkg/dataplane/http" "github.com/v3io/v3io-tsdb/pkg/appender" "github.com/v3io/v3io-tsdb/pkg/config" "github.com/v3io/v3io-tsdb/pkg/partmgr" @@ -41,19 +42,23 @@ import ( "github.com/v3io/v3io-tsdb/pkg/utils" ) +const defaultHttpTimeout = 30 * time.Second + type V3ioAdapter struct { startTimeMargin int64 logger logger.Logger - container *v3io.Container + container v3io.Container + HttpTimeout time.Duration MetricsCache *appender.MetricsCache cfg *config.V3ioConfig partitionMngr *partmgr.PartitionManager } -func CreateTSDB(v3iocfg *config.V3ioConfig, schema *config.Schema) error { +func CreateTSDB(cfg *config.V3ioConfig, schema *config.Schema) error { - lgr, _ := utils.NewLogger(v3iocfg.LogLevel) - container, err := utils.CreateContainer(lgr, v3iocfg) + lgr, _ := utils.NewLogger(cfg.LogLevel) + httpTimeout := parseHttpTimeout(cfg, lgr) + container, err := utils.CreateContainer(lgr, cfg, httpTimeout) if err != nil { return errors.Wrap(err, "Failed to create a data container.") } @@ -63,24 +68,39 @@ func CreateTSDB(v3iocfg *config.V3ioConfig, schema *config.Schema) error { return errors.Wrap(err, "Failed to marshal the TSDB schema file.") } - path := pathUtil.Join(v3iocfg.TablePath, config.SchemaConfigFileName) + dataPlaneInput := v3io.DataPlaneInput{Timeout: httpTimeout} + + path := pathUtil.Join(cfg.TablePath, config.SchemaConfigFileName) // Check whether the config file already exists, and abort if it does - _, err = container.Sync.GetObject(&v3io.GetObjectInput{Path: path}) + _, err = container.GetObjectSync(&v3io.GetObjectInput{Path: path, DataPlaneInput: dataPlaneInput}) if err == nil { - return fmt.Errorf("A TSDB table already exists at path '" + v3iocfg.TablePath + "'.") + return fmt.Errorf("A TSDB table already exists at path '" + cfg.TablePath + "'.") } - err = container.Sync.PutObject(&v3io.PutObjectInput{Path: path, Body: data}) + err = container.PutObjectSync(&v3io.PutObjectInput{Path: path, Body: data, DataPlaneInput: dataPlaneInput}) if err != nil { - return errors.Wrapf(err, "Failed to create a TSDB schema at path '%s'.", - pathUtil.Join(v3iocfg.WebApiEndpoint, v3iocfg.Container, path)) + return errors.Wrapf(err, "Failed to create a TSDB schema at path '%s/%s/%s'.", cfg.WebApiEndpoint, cfg.Container, path) } return err } +func parseHttpTimeout(cfg *config.V3ioConfig, logger logger.Logger) time.Duration { + if cfg.HttpTimeout == "" { + return defaultHttpTimeout + } else { + timeout, err := time.ParseDuration(cfg.HttpTimeout) + if err != nil { + logger.Warn("Failed to parse httpTimeout '%s'. Defaulting to %d millis.", cfg.HttpTimeout, defaultHttpTimeout/time.Millisecond) + return defaultHttpTimeout + } else { + return timeout + } + } +} + // Create a new TSDB adapter, similar to Prometheus TSDB adapter but with a few // extensions. The Prometheus compliant adapter is found under /promtsdb. -func NewV3ioAdapter(cfg *config.V3ioConfig, container *v3io.Container, logger logger.Logger) (*V3ioAdapter, error) { +func NewV3ioAdapter(cfg *config.V3ioConfig, container v3io.Container, logger logger.Logger) (*V3ioAdapter, error) { var err error newV3ioAdapter := V3ioAdapter{} @@ -94,10 +114,12 @@ func NewV3ioAdapter(cfg *config.V3ioConfig, container *v3io.Container, logger lo } } + newV3ioAdapter.HttpTimeout = parseHttpTimeout(cfg, logger) + if container != nil { newV3ioAdapter.container = container } else { - newV3ioAdapter.container, err = utils.CreateContainer(newV3ioAdapter.logger, cfg) + newV3ioAdapter.container, err = utils.CreateContainer(newV3ioAdapter.logger, cfg, newV3ioAdapter.HttpTimeout) if err != nil { return nil, errors.Wrap(err, "Failed to create V3IO data container") } @@ -108,25 +130,18 @@ func NewV3ioAdapter(cfg *config.V3ioConfig, container *v3io.Container, logger lo return &newV3ioAdapter, err } -func NewContainer(v3ioUrl string, numWorkers int, accessKey string, username string, password string, containerName string, logger logger.Logger) (*v3io.Container, error) { - ctx, err := v3io.NewContext(logger, v3ioUrl, numWorkers) +func NewContainer(v3ioUrl string, numWorkers int, accessKey string, username string, password string, containerName string, logger logger.Logger) (v3io.Container, error) { + ctx, err := v3iohttp.NewContext(logger, &v3io.NewContextInput{ClusterEndpoints: []string{v3ioUrl}, NumWorkers: numWorkers}) if err != nil { return nil, err } - // Create session - accessKey will take precedence over user/password if exists - sessionConfig := &v3io.SessionConfig{ - Username: username, - Password: password, - Label: "tsdb", - SessionKey: accessKey, - } - session, err := ctx.NewSessionFromConfig(sessionConfig) + session, err := ctx.NewSession(&v3io.NewSessionInput{Username: username, Password: password, AccessKey: accessKey}) if err != nil { - return nil, errors.Wrap(err, "Failed to create a session.") + return nil, errors.Wrap(err, "Failed to create session.") } - container, err := session.NewContainer(containerName) + container, err := session.NewContainer(&v3io.NewContainerInput{ContainerName: containerName}) if err != nil { return nil, err } @@ -141,14 +156,14 @@ func (a *V3ioAdapter) GetLogger(child string) logger.Logger { return a.logger.GetChild(child) } -func (a *V3ioAdapter) GetContainer() (*v3io.Container, string) { +func (a *V3ioAdapter) GetContainer() (v3io.Container, string) { return a.container, a.cfg.TablePath } func (a *V3ioAdapter) connect() error { - fullpath := pathUtil.Join(a.cfg.WebApiEndpoint, a.cfg.Container, a.cfg.TablePath) - resp, err := a.container.Sync.GetObject(&v3io.GetObjectInput{Path: pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName)}) + fullpath := fmt.Sprintf("%s/%s/%s", a.cfg.WebApiEndpoint, a.cfg.Container, a.cfg.TablePath) + resp, err := a.container.GetObjectSync(&v3io.GetObjectInput{Path: pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName)}) if err != nil { if utils.IsNotExistsError(err) { return errors.Errorf("No TSDB schema file found at '%s'.", fullpath) @@ -161,11 +176,12 @@ func (a *V3ioAdapter) connect() error { tableSchema := config.Schema{} err = json.Unmarshal(resp.Body(), &tableSchema) if err != nil { - return errors.Wrapf(err, "Failed to unmarshal the TSDB schema at '%s'.", fullpath) + return errors.Wrapf(err, "Failed to unmarshal the TSDB schema at '%s', got: %v .", fullpath, string(resp.Body())) } + // in order to support backward compatibility we do not fail on version mismatch and only logging warning if tableSchema.TableSchemaInfo.Version != schema.Version { - return errors.Errorf("Table Schema version mismatch - existing table schema version is %d while the tsdb library version is %d! Make sure to create the table with same library version", + a.logger.Warn("Table Schema version mismatch - existing table schema version is %d while the tsdb library version is %d! Make sure to create the table with same library version", tableSchema.TableSchemaInfo.Version, schema.Version) } @@ -240,7 +256,7 @@ func (a *V3ioAdapter) DeleteDB(deleteAll bool, ignoreErrors bool, fromTime int64 return errors.Wrapf(err, "Failed to delete partition '%s'.", part.GetTablePath()) } // Delete the Directory object - err = a.container.Sync.DeleteObject(&v3io.DeleteObjectInput{Path: part.GetTablePath()}) + err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: part.GetTablePath()}) if err != nil && !ignoreErrors { return errors.Wrapf(err, "Failed to delete partition object '%s'.", part.GetTablePath()) } @@ -258,7 +274,7 @@ func (a *V3ioAdapter) DeleteDB(deleteAll bool, ignoreErrors bool, fromTime int64 return errors.Wrap(err, "Failed to delete the metric-names table.") } // Delete the Directory object - err = a.container.Sync.DeleteObject(&v3io.DeleteObjectInput{Path: path}) + err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) if err != nil && !ignoreErrors { if !utils.IsNotExistsError(err) { return errors.Wrapf(err, "Failed to delete table object '%s'.", path) @@ -266,15 +282,26 @@ func (a *V3ioAdapter) DeleteDB(deleteAll bool, ignoreErrors bool, fromTime int64 } } if deleteAll { + // Delete Schema file schemaPath := pathUtil.Join(a.cfg.TablePath, config.SchemaConfigFileName) a.logger.Info("Delete the TSDB configuration at '%s'.", schemaPath) - err := a.container.Sync.DeleteObject(&v3io.DeleteObjectInput{Path: schemaPath}) + err := a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: schemaPath}) if err != nil && !ignoreErrors { return errors.New("The configuration at '" + schemaPath + "' cannot be deleted or doesn't exist.") } + + // Delete Partitions directory + partitionsKvPath := a.partitionMngr.GetPartitionsTablePath() + "/" + err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: partitionsKvPath}) + if err != nil && !ignoreErrors { + if !utils.IsNotExistsError(err) { + return errors.Wrapf(err, "Failed to delete partitions kv table '%s'.", partitionsKvPath) + } + } + // Delete the Directory object path := a.cfg.TablePath + "/" - err = a.container.Sync.DeleteObject(&v3io.DeleteObjectInput{Path: path}) + err = a.container.DeleteObjectSync(&v3io.DeleteObjectInput{Path: path}) if err != nil && !ignoreErrors { if !utils.IsNotExistsError(err) { return errors.Wrapf(err, "Failed to delete table object '%s'.", path) diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/utils/asynciter.go b/vendor/github.com/v3io/v3io-tsdb/pkg/utils/asynciter.go index 5d1d949a248..3551c821fe2 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/utils/asynciter.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/utils/asynciter.go @@ -27,7 +27,8 @@ import ( "github.com/nuclio/logger" "github.com/pkg/errors" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-go/pkg/errors" "github.com/v3io/v3io-tsdb/pkg/config" ) @@ -44,7 +45,7 @@ type AsyncItemsCursor struct { itemIndex int items []v3io.Item input *v3io.GetItemsInput - container *v3io.Container + container v3io.Container logger logger.Logger responseChan chan *v3io.Response @@ -54,9 +55,7 @@ type AsyncItemsCursor struct { Cnt int } -func NewAsyncItemsCursor( - container *v3io.Container, input *v3io.GetItemsInput, - workers int, shardingKeys []string, logger logger.Logger) (*AsyncItemsCursor, error) { +func NewAsyncItemsCursor(container v3io.Container, input *v3io.GetItemsInput, workers int, shardingKeys []string, logger logger.Logger) (*AsyncItemsCursor, error) { // TODO: use workers from Context.numWorkers (if no ShardingKey) if workers == 0 || input.ShardingKey != "" { @@ -167,7 +166,7 @@ func (ic *AsyncItemsCursor) processResponse() error { defer resp.Release() // Ignore 404s - if e, hasErrorCode := resp.Error.(v3io.ErrorWithStatusCode); hasErrorCode && e.StatusCode() == http.StatusNotFound { + if e, hasErrorCode := resp.Error.(v3ioerrors.ErrorWithStatusCode); hasErrorCode && e.StatusCode() == http.StatusNotFound { ic.logger.Debug("Got 404 - error: %v, request: %v", resp.Error, resp.Request().Input) ic.lastShards++ return nil diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/utils/container.go b/vendor/github.com/v3io/v3io-tsdb/pkg/utils/container.go index f3f035f8049..1a76bb4b160 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/utils/container.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/utils/container.go @@ -28,12 +28,11 @@ import ( "github.com/nuclio/logger" "github.com/nuclio/zap" "github.com/pkg/errors" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/dataplane" + "github.com/v3io/v3io-go/pkg/dataplane/http" "github.com/v3io/v3io-tsdb/pkg/config" ) -const defaultHttpTimeout = 30 * time.Second - func NewLogger(level string) (logger.Logger, error) { var logLevel nucliozap.Level switch level { @@ -56,41 +55,29 @@ func NewLogger(level string) (logger.Logger, error) { return log, nil } -func CreateContainer(logger logger.Logger, cfg *config.V3ioConfig) (*v3io.Container, error) { - // Create context - context, err := v3io.NewContext(logger, cfg.WebApiEndpoint, cfg.Workers) +func CreateContainer(logger logger.Logger, cfg *config.V3ioConfig, httpTimeout time.Duration) (v3io.Container, error) { + newContextInput := &v3io.NewContextInput{ + ClusterEndpoints: []string{cfg.WebApiEndpoint}, + NumWorkers: cfg.Workers, + DialTimeout: httpTimeout, + } + context, err := v3iohttp.NewContext(logger, newContextInput) if err != nil { return nil, errors.Wrap(err, "Failed to create a V3IO TSDB client.") } - if cfg.HttpTimeout == "" { - context.Sync.Timeout = defaultHttpTimeout - } else { - timeout, err := time.ParseDuration(cfg.HttpTimeout) - if err != nil { - logger.Warn("Failed to parse httpTimeout '%s'. Defaulting to %d millis.", cfg.HttpTimeout, defaultHttpTimeout/time.Millisecond) - context.Sync.Timeout = defaultHttpTimeout - } else { - context.Sync.Timeout = timeout - } - } - - // Create session - sessionConfig := &v3io.SessionConfig{ - Username: cfg.Username, - Password: cfg.Password, - Label: "tsdb", - SessionKey: cfg.AccessKey, - } - session, err := context.NewSessionFromConfig(sessionConfig) + session, err := context.NewSession(&v3io.NewSessionInput{ + Username: cfg.Username, + Password: cfg.Password, + AccessKey: cfg.AccessKey, + }) if err != nil { - return nil, errors.Wrap(err, "Failed to create a session.") + return nil, errors.Wrap(err, "Failed to create session.") } - // Create the container - container, err := session.NewContainer(cfg.Container) + container, err := session.NewContainer(&v3io.NewContainerInput{ContainerName: cfg.Container}) if err != nil { - return nil, errors.Wrap(err, "Failed to create a container.") + return nil, errors.Wrap(err, "Failed to create container.") } return container, nil @@ -107,7 +94,7 @@ func AsInt64Array(val []byte) []uint64 { return array } -func DeleteTable(logger logger.Logger, container *v3io.Container, path, filter string, workers int) error { +func DeleteTable(logger logger.Logger, container v3io.Container, path, filter string, workers int) error { input := v3io.GetItemsInput{Path: path, AttributeNames: []string{config.ObjectNameAttrName}, Filter: filter} iter, err := NewAsyncItemsCursor(container, &input, workers, []string{}, logger) if err != nil { diff --git a/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go b/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go index 6e2dae72292..d8e7f04cdb1 100644 --- a/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go +++ b/vendor/github.com/v3io/v3io-tsdb/pkg/utils/misc.go @@ -6,7 +6,7 @@ import ( "net/http" "strings" - "github.com/v3io/v3io-go-http" + "github.com/v3io/v3io-go/pkg/errors" ) func IsUndefined(value float64) bool { @@ -25,7 +25,7 @@ func FloatToNormalizedScientificStr(val float64) string { } func IsNotExistsError(err error) bool { - errorWithStatusCode, ok := err.(v3io.ErrorWithStatusCode) + errorWithStatusCode, ok := err.(v3ioerrors.ErrorWithStatusCode) if !ok { // error of different type return false diff --git a/vendor/github.com/valyala/fasthttp/.travis.yml b/vendor/github.com/valyala/fasthttp/.travis.yml index fca54a82a6d..104ead04588 100644 --- a/vendor/github.com/valyala/fasthttp/.travis.yml +++ b/vendor/github.com/valyala/fasthttp/.travis.yml @@ -2,7 +2,7 @@ language: go go: - tip - - 1.11 + - 1.11.x - 1.10.x - 1.9.x diff --git a/vendor/github.com/valyala/fasthttp/README.md b/vendor/github.com/valyala/fasthttp/README.md index 1a8bff836be..5fcb6398d05 100644 --- a/vendor/github.com/valyala/fasthttp/README.md +++ b/vendor/github.com/valyala/fasthttp/README.md @@ -493,6 +493,7 @@ uintBuf := fasthttp.AppendUint(nil, 1234) websocket implementation for fasthttp. * [fasthttpsession](https://github.com/phachon/fasthttpsession) - a fast and powerful session package for fasthttp servers. * [atreugo](https://github.com/savsgio/atreugo) - Micro-framework to make simple the use of routing and middlewares. + * [kratgo](https://github.com/savsgio/kratgo) - Simple, lightweight and ultra-fast HTTP Cache to speed up your websites. # FAQ diff --git a/vendor/github.com/valyala/fasthttp/args.go b/vendor/github.com/valyala/fasthttp/args.go index 4d9b35ee064..e5865cd2c73 100644 --- a/vendor/github.com/valyala/fasthttp/args.go +++ b/vendor/github.com/valyala/fasthttp/args.go @@ -4,11 +4,17 @@ import ( "bytes" "errors" "io" + "sort" "sync" "github.com/valyala/bytebufferpool" ) +const ( + argsNoValue = true + argsHasValue = false +) + // AcquireArgs returns an empty Args object from the pool. // // The returned Args may be returned to the pool with ReleaseArgs @@ -17,7 +23,7 @@ func AcquireArgs() *Args { return argsPool.Get().(*Args) } -// ReleaseArgs returns the object acquired via AquireArgs to the pool. +// ReleaseArgs returns the object acquired via AcquireArgs to the pool. // // Do not access the released Args object, otherwise data races may occur. func ReleaseArgs(a *Args) { @@ -45,8 +51,9 @@ type Args struct { } type argsKV struct { - key []byte - value []byte + key []byte + value []byte + noValue bool } // Reset clears query args. @@ -109,14 +116,29 @@ func (a *Args) QueryString() []byte { return a.buf } +// Sort sorts Args by key and then value using 'f' as comparison function. +// +// For example args.Sort(bytes.Compare) +func (a *Args) Sort(f func(x, y []byte) int) { + sort.SliceStable(a.args, func(i, j int) bool { + n := f(a.args[i].key, a.args[j].key) + if n == 0 { + return f(a.args[i].value, a.args[j].value) == -1 + } + return n == -1 + }) +} + // AppendBytes appends query string to dst and returns the extended dst. func (a *Args) AppendBytes(dst []byte) []byte { for i, n := 0, len(a.args); i < n; i++ { kv := &a.args[i] dst = AppendQuotedArg(dst, kv.key) - if len(kv.value) > 0 { + if !kv.noValue { dst = append(dst, '=') - dst = AppendQuotedArg(dst, kv.value) + if len(kv.value) > 0 { + dst = AppendQuotedArg(dst, kv.value) + } } if i+1 < n { dst = append(dst, '&') @@ -147,48 +169,74 @@ func (a *Args) DelBytes(key []byte) { // // Multiple values for the same key may be added. func (a *Args) Add(key, value string) { - a.args = appendArg(a.args, key, value) + a.args = appendArg(a.args, key, value, argsHasValue) } // AddBytesK adds 'key=value' argument. // // Multiple values for the same key may be added. func (a *Args) AddBytesK(key []byte, value string) { - a.args = appendArg(a.args, b2s(key), value) + a.args = appendArg(a.args, b2s(key), value, argsHasValue) } // AddBytesV adds 'key=value' argument. // // Multiple values for the same key may be added. func (a *Args) AddBytesV(key string, value []byte) { - a.args = appendArg(a.args, key, b2s(value)) + a.args = appendArg(a.args, key, b2s(value), argsHasValue) } // AddBytesKV adds 'key=value' argument. // // Multiple values for the same key may be added. func (a *Args) AddBytesKV(key, value []byte) { - a.args = appendArg(a.args, b2s(key), b2s(value)) + a.args = appendArg(a.args, b2s(key), b2s(value), argsHasValue) +} + +// AddNoValue adds only 'key' as argument without the '='. +// +// Multiple values for the same key may be added. +func (a *Args) AddNoValue(key string) { + a.args = appendArg(a.args, key, "", argsNoValue) +} + +// AddBytesKNoValue adds only 'key' as argument without the '='. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesKNoValue(key []byte) { + a.args = appendArg(a.args, b2s(key), "", argsNoValue) } // Set sets 'key=value' argument. func (a *Args) Set(key, value string) { - a.args = setArg(a.args, key, value) + a.args = setArg(a.args, key, value, argsHasValue) } // SetBytesK sets 'key=value' argument. func (a *Args) SetBytesK(key []byte, value string) { - a.args = setArg(a.args, b2s(key), value) + a.args = setArg(a.args, b2s(key), value, argsHasValue) } // SetBytesV sets 'key=value' argument. func (a *Args) SetBytesV(key string, value []byte) { - a.args = setArg(a.args, key, b2s(value)) + a.args = setArg(a.args, key, b2s(value), argsHasValue) } // SetBytesKV sets 'key=value' argument. func (a *Args) SetBytesKV(key, value []byte) { - a.args = setArgBytes(a.args, key, value) + a.args = setArgBytes(a.args, key, value, argsHasValue) +} + +// SetNoValue sets only 'key' as argument without the '='. +// +// Only key in argumemt, like key1&key2 +func (a *Args) SetNoValue(key string) { + a.args = setArg(a.args, key, "", argsNoValue) +} + +// SetBytesKNoValue sets 'key' argument. +func (a *Args) SetBytesKNoValue(key []byte) { + a.args = setArg(a.args, b2s(key), "", argsNoValue) } // Peek returns query arg value for the given key. @@ -322,7 +370,12 @@ func copyArgs(dst, src []argsKV) []argsKV { dstKV := &dst[i] srcKV := &src[i] dstKV.key = append(dstKV.key[:0], srcKV.key...) - dstKV.value = append(dstKV.value[:0], srcKV.value...) + if srcKV.noValue { + dstKV.value = dstKV.value[:0] + } else { + dstKV.value = append(dstKV.value[:0], srcKV.value...) + } + dstKV.noValue = srcKV.noValue } return dst } @@ -345,31 +398,41 @@ func delAllArgs(args []argsKV, key string) []argsKV { return args } -func setArgBytes(h []argsKV, key, value []byte) []argsKV { - return setArg(h, b2s(key), b2s(value)) +func setArgBytes(h []argsKV, key, value []byte, noValue bool) []argsKV { + return setArg(h, b2s(key), b2s(value), noValue) } -func setArg(h []argsKV, key, value string) []argsKV { +func setArg(h []argsKV, key, value string, noValue bool) []argsKV { n := len(h) for i := 0; i < n; i++ { kv := &h[i] if key == string(kv.key) { - kv.value = append(kv.value[:0], value...) + if noValue { + kv.value = kv.value[:0] + } else { + kv.value = append(kv.value[:0], value...) + } + kv.noValue = noValue return h } } - return appendArg(h, key, value) + return appendArg(h, key, value, noValue) } -func appendArgBytes(h []argsKV, key, value []byte) []argsKV { - return appendArg(h, b2s(key), b2s(value)) +func appendArgBytes(h []argsKV, key, value []byte, noValue bool) []argsKV { + return appendArg(h, b2s(key), b2s(value), noValue) } -func appendArg(args []argsKV, key, value string) []argsKV { +func appendArg(args []argsKV, key, value string, noValue bool) []argsKV { var kv *argsKV args, kv = allocArg(args) kv.key = append(kv.key[:0], key...) - kv.value = append(kv.value[:0], value...) + if noValue { + kv.value = kv.value[:0] + } else { + kv.value = append(kv.value[:0], value...) + } + kv.noValue = noValue return args } @@ -425,6 +488,7 @@ func (s *argsScanner) next(kv *argsKV) bool { if len(s.b) == 0 { return false } + kv.noValue = argsHasValue isKey := true k := 0 @@ -440,6 +504,7 @@ func (s *argsScanner) next(kv *argsKV) bool { if isKey { kv.key = decodeArgAppend(kv.key[:0], s.b[:i]) kv.value = kv.value[:0] + kv.noValue = argsNoValue } else { kv.value = decodeArgAppend(kv.value[:0], s.b[k:i]) } @@ -451,6 +516,7 @@ func (s *argsScanner) next(kv *argsKV) bool { if isKey { kv.key = decodeArgAppend(kv.key[:0], s.b) kv.value = kv.value[:0] + kv.noValue = argsNoValue } else { kv.value = decodeArgAppend(kv.value[:0], s.b[k:]) } diff --git a/vendor/github.com/valyala/fasthttp/bytesconv.go b/vendor/github.com/valyala/fasthttp/bytesconv.go index 6292957dbb8..8c0e1545d18 100644 --- a/vendor/github.com/valyala/fasthttp/bytesconv.go +++ b/vendor/github.com/valyala/fasthttp/bytesconv.go @@ -183,7 +183,8 @@ func parseUintBuf(b []byte) (int, int, error) { } return v, i, nil } - if i >= maxIntChars { + // Test for overflow. + if v*10 < v { return -1, i, errTooLongInt } v = 10*v + int(k) @@ -434,23 +435,3 @@ func appendQuotedPath(dst, src []byte) []byte { } return dst } - -// EqualBytesStr returns true if string(b) == s. -// -// This function has no performance benefits comparing to string(b) == s. -// It is left here for backwards compatibility only. -// -// Deprecated: may be deleted soon. -func EqualBytesStr(b []byte, s string) bool { - return string(b) == s -} - -// AppendBytesStr appends src to dst and returns the extended dst. -// -// This function has no performance benefits comparing to append(dst, src...). -// It is left here for backwards compatibility only. -// -// Deprecated: may be deleted soon. -func AppendBytesStr(dst []byte, src string) []byte { - return append(dst, src...) -} diff --git a/vendor/github.com/valyala/fasthttp/bytesconv_32.go b/vendor/github.com/valyala/fasthttp/bytesconv_32.go index 14377547443..7fd6f5f12b9 100644 --- a/vendor/github.com/valyala/fasthttp/bytesconv_32.go +++ b/vendor/github.com/valyala/fasthttp/bytesconv_32.go @@ -3,6 +3,5 @@ package fasthttp const ( - maxIntChars = 9 maxHexIntChars = 7 ) diff --git a/vendor/github.com/valyala/fasthttp/bytesconv_64.go b/vendor/github.com/valyala/fasthttp/bytesconv_64.go index 09d07ef10c8..edf7309c2ba 100644 --- a/vendor/github.com/valyala/fasthttp/bytesconv_64.go +++ b/vendor/github.com/valyala/fasthttp/bytesconv_64.go @@ -3,6 +3,5 @@ package fasthttp const ( - maxIntChars = 18 maxHexIntChars = 15 ) diff --git a/vendor/github.com/valyala/fasthttp/client.go b/vendor/github.com/valyala/fasthttp/client.go index 64be717ada9..89e98082df9 100644 --- a/vendor/github.com/valyala/fasthttp/client.go +++ b/vendor/github.com/valyala/fasthttp/client.go @@ -60,6 +60,11 @@ func Do(req *Request, resp *Response) error { // // It is recommended obtaining req and resp via AcquireRequest // and AcquireResponse in performance-critical code. +// +// Warning: DoTimeout does not terminate the request itself. The request will +// continue in the background and the response will be discarded. +// If requests take too long and the connection pool gets filled up please +// try using a Client and setting a ReadTimeout. func DoTimeout(req *Request, resp *Response, timeout time.Duration) error { return defaultClient.DoTimeout(req, resp, timeout) } @@ -154,6 +159,10 @@ type Client struct { // Default client name is used if not set. Name string + // NoDefaultUserAgentHeader when set to true, causes the default + // User-Agent header to be excluded from the Request. + NoDefaultUserAgentHeader bool + // Callback for establishing new connections to hosts. // // Default Dial is used if not set. @@ -184,6 +193,11 @@ type Client struct { // after DefaultMaxIdleConnDuration. MaxIdleConnDuration time.Duration + // Maximum number of attempts for idempotent calls + // + // DefaultMaxIdemponentCallAttempts is used if not set. + MaxIdemponentCallAttempts int + // Per-connection buffer size for responses' reading. // This also limits the maximum header size. // @@ -307,6 +321,11 @@ func (c *Client) Post(dst []byte, url string, postArgs *Args) (statusCode int, b // // It is recommended obtaining req and resp via AcquireRequest // and AcquireResponse in performance-critical code. +// +// Warning: DoTimeout does not terminate the request itself. The request will +// continue in the background and the response will be discarded. +// If requests take too long and the connection pool gets filled up please +// try setting a ReadTimeout. func (c *Client) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { return clientDoTimeout(req, resp, timeout, c) } @@ -389,12 +408,14 @@ func (c *Client) Do(req *Request, resp *Response) error { hc = &HostClient{ Addr: addMissingPort(string(host), isTLS), Name: c.Name, + NoDefaultUserAgentHeader: c.NoDefaultUserAgentHeader, Dial: c.Dial, DialDualStack: c.DialDualStack, IsTLS: isTLS, TLSConfig: c.TLSConfig, MaxConns: c.MaxConnsPerHost, MaxIdleConnDuration: c.MaxIdleConnDuration, + MaxIdemponentCallAttempts: c.MaxIdemponentCallAttempts, ReadBufferSize: c.ReadBufferSize, WriteBufferSize: c.WriteBufferSize, ReadTimeout: c.ReadTimeout, @@ -418,11 +439,15 @@ func (c *Client) Do(req *Request, resp *Response) error { func (c *Client) mCleaner(m map[string]*HostClient) { mustStop := false + for { - t := time.Now() c.mLock.Lock() for k, v := range m { - if t.Sub(v.LastUseTime()) > time.Minute { + v.connsLock.Lock() + shouldRemove := v.connsCount == 0 + v.connsLock.Unlock() + + if shouldRemove { delete(m, k) } } @@ -491,6 +516,10 @@ type HostClient struct { // Client name. Used in User-Agent request header. Name string + // NoDefaultUserAgentHeader when set to true, causes the default + // User-Agent header to be excluded from the Request. + NoDefaultUserAgentHeader bool + // Callback for establishing new connection to the host. // // Default Dial is used if not set. @@ -515,6 +544,9 @@ type HostClient struct { // Maximum number of connections which may be established to all hosts // listed in Addr. // + // You can change this value while the HostClient is being used + // using HostClient.SetMaxConns(value) + // // DefaultMaxConnsPerHost is used if not set. MaxConns int @@ -598,7 +630,7 @@ type HostClient struct { readerPool sync.Pool writerPool sync.Pool - pendingRequests uint64 + pendingRequests int32 connsCleanerRun bool } @@ -608,9 +640,6 @@ type clientConn struct { createdTime time.Time lastUseTime time.Time - - lastReadDeadlineTime time.Time - lastWriteDeadlineTime time.Time } var startTimeUnix = time.Now().Unix() @@ -724,7 +753,7 @@ func clientGetURLDeadline(dst []byte, url string, deadline time.Time, c clientDo } }() - tc := acquireTimer(timeout) + tc := AcquireTimer(timeout) select { case resp := <-ch: ReleaseRequest(req) @@ -736,7 +765,7 @@ func clientGetURLDeadline(dst []byte, url string, deadline time.Time, c clientDo body = dst err = ErrTimeout } - releaseTimer(tc) + ReleaseTimer(tc) return statusCode, body, err } @@ -770,9 +799,24 @@ func doRequestFollowRedirects(req *Request, dst []byte, url string, c clientDoer resp.keepBodyBuffer = true oldBody := bodyBuf.B bodyBuf.B = dst + scheme := req.uri.Scheme() + req.schemaUpdate = false redirectsCount := 0 for { + // In case redirect to different scheme + if redirectsCount > 0 && !bytes.Equal(scheme, req.uri.Scheme()) { + if strings.HasPrefix(url, string(strHTTPS)) { + req.isTLS = true + req.uri.SetSchemeBytes(strHTTPS) + } else { + req.isTLS = false + req.uri.SetSchemeBytes(strHTTP) + } + scheme = req.uri.Scheme() + req.schemaUpdate = true + } + req.parsedURI = false req.Header.host = req.Header.host[:0] req.SetRequestURI(url) @@ -886,6 +930,11 @@ func ReleaseResponse(resp *Response) { // // It is recommended obtaining req and resp via AcquireRequest // and AcquireResponse in performance-critical code. +// +// Warning: DoTimeout does not terminate the request itself. The request will +// continue in the background and the response will be discarded. +// If requests take too long and the connection pool gets filled up please +// try setting a ReadTimeout. func (c *HostClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { return clientDoTimeout(req, resp, timeout, c) } @@ -936,6 +985,9 @@ func clientDoDeadline(req *Request, resp *Response, deadline time.Time, c client req.copyToSkipBody(reqCopy) swapRequestBody(req, reqCopy) respCopy := AcquireResponse() + // Not calling resp.copyToSkipBody(respCopy) here to avoid + // unexpected messing with headers + respCopy.SkipBody = resp.SkipBody // Note that the request continues execution on ErrTimeout until // client-specific ReadTimeout exceeds. This helps limiting load @@ -944,11 +996,20 @@ func clientDoDeadline(req *Request, resp *Response, deadline time.Time, c client // Without this 'hack' the load on slow host could exceed MaxConns* // concurrent requests, since timed out requests on client side // usually continue execution on the host. + + var cleanup int32 go func() { - ch <- c.Do(reqCopy, respCopy) + errDo := c.Do(reqCopy, respCopy) + if atomic.LoadInt32(&cleanup) == 1 { + ReleaseResponse(respCopy) + ReleaseRequest(reqCopy) + errorChPool.Put(chv) + } else { + ch <- errDo + } }() - tc := acquireTimer(timeout) + tc := AcquireTimer(timeout) var err error select { case err = <-ch: @@ -961,9 +1022,10 @@ func clientDoDeadline(req *Request, resp *Response, deadline time.Time, c client ReleaseRequest(reqCopy) errorChPool.Put(chv) case <-tc.C: + atomic.StoreInt32(&cleanup, 1) err = ErrTimeout } - releaseTimer(tc) + ReleaseTimer(tc) return err } @@ -993,7 +1055,7 @@ func (c *HostClient) Do(req *Request, resp *Response) error { } attempts := 0 - atomic.AddUint64(&c.pendingRequests, 1) + atomic.AddInt32(&c.pendingRequests, 1) for { retry, err = c.do(req, resp) if err == nil || !retry { @@ -1017,7 +1079,7 @@ func (c *HostClient) Do(req *Request, resp *Response) error { break } } - atomic.AddUint64(&c.pendingRequests, ^uint64(0)) + atomic.AddInt32(&c.pendingRequests, -1) if err == io.EOF { err = ErrConnectionClosed @@ -1031,7 +1093,7 @@ func (c *HostClient) Do(req *Request, resp *Response) error { // This function may be used for balancing load among multiple HostClient // instances. func (c *HostClient) PendingRequests() int { - return int(atomic.LoadUint64(&c.pendingRequests)) + return int(atomic.LoadInt32(&c.pendingRequests)) } func isIdempotent(req *Request) bool { @@ -1068,23 +1130,31 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error) // so the GC may reclaim these resources (e.g. response body). resp.Reset() + // If we detected a redirect to another schema + if req.schemaUpdate { + c.IsTLS = bytes.Equal(req.URI().Scheme(), strHTTPS) + c.Addr = addMissingPort(string(req.Host()), c.IsTLS) + c.addrIdx = 0 + c.addrs = nil + req.schemaUpdate = false + req.SetConnectionClose() + } + cc, err := c.acquireConn() if err != nil { return false, err } conn := cc.c + resp.parseNetConn(conn) + if c.WriteTimeout > 0 { - // Optimization: update write deadline only if more than 25% - // of the last write deadline exceeded. - // See https://github.com/golang/go/issues/15133 for details. + // Set Deadline every time, since golang has fixed the performance issue + // See https://github.com/golang/go/issues/15133#issuecomment-271571395 for details currentTime := time.Now() - if currentTime.Sub(cc.lastWriteDeadlineTime) > (c.WriteTimeout >> 2) { - if err = conn.SetWriteDeadline(currentTime.Add(c.WriteTimeout)); err != nil { - c.closeConn(cc) - return true, err - } - cc.lastWriteDeadlineTime = currentTime + if err = conn.SetWriteDeadline(currentTime.Add(c.WriteTimeout)); err != nil { + c.closeConn(cc) + return true, err } } @@ -1116,16 +1186,12 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error) c.releaseWriter(bw) if c.ReadTimeout > 0 { - // Optimization: update read deadline only if more than 25% - // of the last read deadline exceeded. - // See https://github.com/golang/go/issues/15133 for details. + // Set Deadline every time, since golang has fixed the performance issue + // See https://github.com/golang/go/issues/15133#issuecomment-271571395 for details currentTime := time.Now() - if currentTime.Sub(cc.lastReadDeadlineTime) > (c.ReadTimeout >> 2) { - if err = conn.SetReadDeadline(currentTime.Add(c.ReadTimeout)); err != nil { - c.closeConn(cc) - return true, err - } - cc.lastReadDeadlineTime = currentTime + if err = conn.SetReadDeadline(currentTime.Add(c.ReadTimeout)); err != nil { + c.closeConn(cc) + return true, err } } @@ -1140,7 +1206,9 @@ func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error) if err = resp.ReadLimitBody(br, c.MaxResponseBodySize); err != nil { c.releaseReader(br) c.closeConn(cc) - return true, err + // Don't retry in case of ErrBodyTooLarge since we will just get the same again. + retry := err != ErrBodyTooLarge + return retry, err } c.releaseReader(br) @@ -1175,6 +1243,12 @@ var ( "Make sure the server returns 'Connection: close' response header before closing the connection") ) +func (c *HostClient) SetMaxConns(newMaxConns int) { + c.connsLock.Lock() + c.MaxConns = newMaxConns + c.connsLock.Unlock() +} + func (c *HostClient) acquireConn() (*clientConn, error) { var cc *clientConn createConn := false @@ -1507,7 +1581,7 @@ func (c *HostClient) getClientName() []byte { var clientName []byte if v == nil { clientName = []byte(c.Name) - if len(clientName) == 0 { + if len(clientName) == 0 && !c.NoDefaultUserAgentHeader { clientName = defaultUserAgent } c.clientName.Store(clientName) @@ -1674,6 +1748,11 @@ type pipelineWork struct { // // It is recommended obtaining req and resp via AcquireRequest // and AcquireResponse in performance-critical code. +// +// Warning: DoTimeout does not terminate the request itself. The request will +// continue in the background and the response will be discarded. +// If requests take too long and the connection pool gets filled up please +// try setting a ReadTimeout. func (c *PipelineClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { return c.DoDeadline(req, resp, time.Now().Add(timeout)) } @@ -1979,8 +2058,6 @@ func (c *pipelineConnClient) writer(conn net.Conn, stopCh <-chan struct{}) error w *pipelineWork err error - - lastWriteDeadlineTime time.Time ) close(instantTimerCh) for { @@ -2012,18 +2089,16 @@ func (c *pipelineConnClient) writer(conn net.Conn, stopCh <-chan struct{}) error continue } + w.resp.parseNetConn(conn) + if writeTimeout > 0 { - // Optimization: update write deadline only if more than 25% - // of the last write deadline exceeded. - // See https://github.com/golang/go/issues/15133 for details. + // Set Deadline every time, since golang has fixed the performance issue + // See https://github.com/golang/go/issues/15133#issuecomment-271571395 for details currentTime := time.Now() - if currentTime.Sub(lastWriteDeadlineTime) > (writeTimeout >> 2) { - if err = conn.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil { - w.err = err - w.done <- struct{}{} - return err - } - lastWriteDeadlineTime = currentTime + if err = conn.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil { + w.err = err + w.done <- struct{}{} + return err } } if err = w.req.Write(bw); err != nil { @@ -2077,8 +2152,6 @@ func (c *pipelineConnClient) reader(conn net.Conn, stopCh <-chan struct{}) error var ( w *pipelineWork err error - - lastReadDeadlineTime time.Time ) for { select { @@ -2094,17 +2167,13 @@ func (c *pipelineConnClient) reader(conn net.Conn, stopCh <-chan struct{}) error } if readTimeout > 0 { - // Optimization: update read deadline only if more than 25% - // of the last read deadline exceeded. - // See https://github.com/golang/go/issues/15133 for details. + // Set Deadline every time, since golang has fixed the performance issue + // See https://github.com/golang/go/issues/15133#issuecomment-271571395 for details currentTime := time.Now() - if currentTime.Sub(lastReadDeadlineTime) > (readTimeout >> 2) { - if err = conn.SetReadDeadline(currentTime.Add(readTimeout)); err != nil { - w.err = err - w.done <- struct{}{} - return err - } - lastReadDeadlineTime = currentTime + if err = conn.SetReadDeadline(currentTime.Add(readTimeout)); err != nil { + w.err = err + w.done <- struct{}{} + return err } } if err = w.resp.Read(br); err != nil { diff --git a/vendor/github.com/valyala/fasthttp/cookie.go b/vendor/github.com/valyala/fasthttp/cookie.go index d5abef2f97b..8137643c24b 100644 --- a/vendor/github.com/valyala/fasthttp/cookie.go +++ b/vendor/github.com/valyala/fasthttp/cookie.go @@ -18,6 +18,21 @@ var ( CookieExpireUnlimited = zeroTime ) +// CookieSameSite is an enum for the mode in which the SameSite flag should be set for the given cookie. +// See https://tools.ietf.org/html/draft-ietf-httpbis-cookie-same-site-00 for details. +type CookieSameSite int + +const ( + // CookieSameSiteDisabled removes the SameSite flag + CookieSameSiteDisabled CookieSameSite = iota + // CookieSameSiteDefaultMode sets the SameSite flag + CookieSameSiteDefaultMode + // CookieSameSiteLaxMode sets the SameSite flag with the "Lax" parameter + CookieSameSiteLaxMode + // CookieSameSiteStrictMode sets the SameSite flag with the "Strict" parameter + CookieSameSiteStrictMode +) + // AcquireCookie returns an empty Cookie object from the pool. // // The returned object may be returned back to the pool with ReleaseCookie. @@ -58,6 +73,7 @@ type Cookie struct { httpOnly bool secure bool + sameSite CookieSameSite bufKV argsKV buf []byte @@ -74,6 +90,7 @@ func (c *Cookie) CopyTo(src *Cookie) { c.path = append(c.path[:0], src.path...) c.httpOnly = src.httpOnly c.secure = src.secure + c.sameSite = src.sameSite } // HTTPOnly returns true if the cookie is http only. @@ -96,6 +113,16 @@ func (c *Cookie) SetSecure(secure bool) { c.secure = secure } +// SameSite returns the SameSite mode. +func (c *Cookie) SameSite() CookieSameSite { + return c.sameSite +} + +// SetSameSite sets the cookie's SameSite flag to the given value. +func (c *Cookie) SetSameSite(mode CookieSameSite) { + c.sameSite = mode +} + // Path returns cookie path. func (c *Cookie) Path() []byte { return c.path @@ -209,6 +236,7 @@ func (c *Cookie) Reset() { c.path = c.path[:0] c.httpOnly = false c.secure = false + c.sameSite = CookieSameSiteDisabled } // AppendBytes appends cookie representation to dst and returns @@ -246,6 +274,21 @@ func (c *Cookie) AppendBytes(dst []byte) []byte { dst = append(dst, ';', ' ') dst = append(dst, strCookieSecure...) } + switch c.sameSite { + case CookieSameSiteDefaultMode: + dst = append(dst, ';', ' ') + dst = append(dst, strCookieSameSite...) + case CookieSameSiteLaxMode: + dst = append(dst, ';', ' ') + dst = append(dst, strCookieSameSite...) + dst = append(dst, '=') + dst = append(dst, strCookieSameSiteLax...) + case CookieSameSiteStrictMode: + dst = append(dst, ';', ' ') + dst = append(dst, strCookieSameSite...) + dst = append(dst, '=') + dst = append(dst, strCookieSameSiteStrict...) + } return dst } @@ -330,6 +373,21 @@ func (c *Cookie) ParseBytes(src []byte) error { if caseInsensitiveCompare(strCookiePath, kv.key) { c.path = append(c.path[:0], kv.value...) } + + case 's': // "samesite" + if caseInsensitiveCompare(strCookieSameSite, kv.key) { + // Case insensitive switch on first char + switch kv.value[0] | 0x20 { + case 'l': // "lax" + if caseInsensitiveCompare(strCookieSameSiteLax, kv.value) { + c.sameSite = CookieSameSiteLaxMode + } + case 's': // "strict" + if caseInsensitiveCompare(strCookieSameSiteStrict, kv.value) { + c.sameSite = CookieSameSiteStrictMode + } + } + } } } else if len(kv.value) != 0 { @@ -343,6 +401,8 @@ func (c *Cookie) ParseBytes(src []byte) error { case 's': // "secure" if caseInsensitiveCompare(strCookieSecure, kv.value) { c.secure = true + } else if caseInsensitiveCompare(strCookieSameSite, kv.value) { + c.sameSite = CookieSameSiteDefaultMode } } } // else empty or no match diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go b/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go index 80aca3fbcb9..1b1a5f3666c 100644 --- a/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go @@ -13,13 +13,18 @@ import ( type InmemoryListener struct { lock sync.Mutex closed bool - conns chan net.Conn + conns chan acceptConn +} + +type acceptConn struct { + conn net.Conn + accepted chan struct{} } // NewInmemoryListener returns new in-memory dialer<->net.Listener. func NewInmemoryListener() *InmemoryListener { return &InmemoryListener{ - conns: make(chan net.Conn, 1024), + conns: make(chan acceptConn, 1024), } } @@ -33,7 +38,8 @@ func (ln *InmemoryListener) Accept() (net.Conn, error) { if !ok { return nil, fmt.Errorf("InmemoryListener is already closed: use of closed network connection") } - return c, nil + close(c.accepted) + return c.conn, nil } // Close implements net.Listener's Close. @@ -59,8 +65,9 @@ func (ln *InmemoryListener) Addr() net.Addr { } } -// Dial creates new client<->server connection, enqueues server side -// of the connection to Accept and returns client side of the connection. +// Dial creates new client<->server connection. +// Just like a real Dial it only returns once the server +// has accepted the connection. // // It is safe calling Dial from concurrently running goroutines. func (ln *InmemoryListener) Dial() (net.Conn, error) { @@ -68,8 +75,11 @@ func (ln *InmemoryListener) Dial() (net.Conn, error) { cConn := pc.Conn1() sConn := pc.Conn2() ln.lock.Lock() + accepted := make(chan struct{}) if !ln.closed { - ln.conns <- sConn + ln.conns <- acceptConn{sConn, accepted} + // Wait until the connection has been accepted. + <-accepted } else { sConn.Close() cConn.Close() diff --git a/vendor/github.com/valyala/fasthttp/fs.go b/vendor/github.com/valyala/fasthttp/fs.go index 7a73a795dd8..1e9b4ab193c 100644 --- a/vendor/github.com/valyala/fasthttp/fs.go +++ b/vendor/github.com/valyala/fasthttp/fs.go @@ -943,7 +943,7 @@ func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool) } fm := make(map[string]os.FileInfo, len(fileinfos)) - var filenames []string + filenames := make([]string, 0, len(fileinfos)) for _, fi := range fileinfos { name := fi.Name() if strings.HasSuffix(name, h.compressedFileSuffix) { @@ -958,7 +958,7 @@ func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool) base.CopyTo(&u) u.Update(string(u.Path()) + "/") - sort.Sort(sort.StringSlice(filenames)) + sort.Strings(filenames) for _, name := range filenames { u.Update(name) pathEscaped := html.EscapeString(string(u.Path())) diff --git a/vendor/github.com/valyala/fasthttp/header.go b/vendor/github.com/valyala/fasthttp/header.go index e00743e327b..190ac3238bb 100644 --- a/vendor/github.com/valyala/fasthttp/header.go +++ b/vendor/github.com/valyala/fasthttp/header.go @@ -72,6 +72,10 @@ type RequestHeader struct { cookies []argsKV rawHeaders []byte + + // stores an immutable copy of headers as they were received from the + // wire. + rawHeadersCopy []byte } // SetContentRange sets 'Content-Range: bytes startPos-endPos/contentLength' @@ -217,7 +221,7 @@ func (h *ResponseHeader) SetContentLength(contentLength int) { h.SetConnectionClose() value = strIdentity } - h.h = setArgBytes(h.h, strTransferEncoding, value) + h.h = setArgBytes(h.h, strTransferEncoding, value, argsHasValue) } } @@ -264,7 +268,7 @@ func (h *RequestHeader) SetContentLength(contentLength int) { h.h = delAllArgsBytes(h.h, strTransferEncoding) } else { h.contentLengthBytes = h.contentLengthBytes[:0] - h.h = setArgBytes(h.h, strTransferEncoding, strChunked) + h.h = setArgBytes(h.h, strTransferEncoding, strChunked, argsHasValue) } } @@ -722,6 +726,7 @@ func (h *RequestHeader) CopyTo(dst *RequestHeader) { dst.cookiesCollected = h.cookiesCollected dst.rawHeaders = append(dst.rawHeaders[:0], h.rawHeaders...) dst.rawHeadersParsed = h.rawHeadersParsed + dst.rawHeadersCopy = append(dst.rawHeadersCopy[:0], h.rawHeadersCopy...) } // VisitAll calls f for each header. @@ -775,6 +780,8 @@ func (h *RequestHeader) VisitAllCookie(f func(key, value []byte)) { // // f must not retain references to key and/or value after returning. // Copy key and/or value contents before returning if you need retaining them. +// +// To get the headers in order they were received use VisitAllInOrder. func (h *RequestHeader) VisitAll(f func(key, value []byte)) { h.parseRawHeaders() host := h.Host() @@ -804,6 +811,25 @@ func (h *RequestHeader) VisitAll(f func(key, value []byte)) { } } +// VisitAllInOrder calls f for each header in the order they were received. +// +// f must not retain references to key and/or value after returning. +// Copy key and/or value contents before returning if you need retaining them. +// +// This function is slightly slower than VisitAll because it has to reparse the +// raw headers to get the order. +func (h *RequestHeader) VisitAllInOrder(f func(key, value []byte)) { + h.parseRawHeaders() + var s headerScanner + s.b = h.rawHeaders + s.disableNormalizing = h.disableNormalizing + for s.next() { + if len(s.key) > 0 { + f(s.key, s.value) + } + } +} + // Del deletes header with the given key. func (h *ResponseHeader) Del(key string) { k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) @@ -874,7 +900,7 @@ func (h *RequestHeader) del(key []byte) { // Use Set for setting a single header for the given key. func (h *ResponseHeader) Add(key, value string) { k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) - h.h = appendArg(h.h, b2s(k), value) + h.h = appendArg(h.h, b2s(k), value, argsHasValue) } // AddBytesK adds the given 'key: value' header. @@ -957,14 +983,14 @@ func (h *ResponseHeader) SetCanonical(key, value []byte) { h.SetConnectionClose() } else { h.ResetConnectionClose() - h.h = setArgBytes(h.h, key, value) + h.h = setArgBytes(h.h, key, value, argsHasValue) } case "Transfer-Encoding": // Transfer-Encoding is managed automatically. case "Date": // Date is managed automatically. default: - h.h = setArgBytes(h.h, key, value) + h.h = setArgBytes(h.h, key, value, argsHasValue) } } @@ -972,14 +998,14 @@ func (h *ResponseHeader) SetCanonical(key, value []byte) { // // It is save re-using the cookie after the function returns. func (h *ResponseHeader) SetCookie(cookie *Cookie) { - h.cookies = setArgBytes(h.cookies, cookie.Key(), cookie.Cookie()) + h.cookies = setArgBytes(h.cookies, cookie.Key(), cookie.Cookie(), argsHasValue) } // SetCookie sets 'key: value' cookies. func (h *RequestHeader) SetCookie(key, value string) { h.parseRawHeaders() h.collectCookies() - h.cookies = setArg(h.cookies, key, value) + h.cookies = setArg(h.cookies, key, value, argsHasValue) } // SetCookieBytesK sets 'key: value' cookies. @@ -1058,7 +1084,7 @@ func (h *RequestHeader) DelAllCookies() { // Use Set for setting a single header for the given key. func (h *RequestHeader) Add(key, value string) { k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) - h.h = appendArg(h.h, b2s(k), value) + h.h = appendArg(h.h, b2s(k), value, argsHasValue) } // AddBytesK adds the given 'key: value' header. @@ -1142,12 +1168,12 @@ func (h *RequestHeader) SetCanonical(key, value []byte) { h.SetConnectionClose() } else { h.ResetConnectionClose() - h.h = setArgBytes(h.h, key, value) + h.h = setArgBytes(h.h, key, value, argsHasValue) } case "Transfer-Encoding": // Transfer-Encoding is managed automatically. default: - h.h = setArgBytes(h.h, key, value) + h.h = setArgBytes(h.h, key, value, argsHasValue) } } @@ -1370,6 +1396,11 @@ func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error { } } + if n == 1 { + // We didn't read a single byte. + return errNothingRead + } + return fmt.Errorf("error when reading request headers: %s", err) } b = mustPeekBuffered(r) @@ -1519,6 +1550,20 @@ func (h *RequestHeader) Header() []byte { return h.bufKV.value } +// RawHeaders returns raw header key/value bytes. +// +// Depending on server configuration, header keys may be normalized to +// capital-case in place. +// +// This copy is set aside during parsing, so empty slice is returned for all +// cases where parsing did not happen. Similarly, request line is not stored +// during parsing and can not be returned. +// +// The slice is not safe to use after the handler returns. +func (h *RequestHeader) RawHeaders() []byte { + return h.rawHeadersCopy +} + // String returns request header representation. func (h *RequestHeader) String() string { return string(h.Header()) @@ -1540,10 +1585,9 @@ func (h *RequestHeader) AppendBytes(dst []byte) []byte { } userAgent := h.UserAgent() - if len(userAgent) == 0 { - userAgent = defaultUserAgent + if len(userAgent) > 0 { + dst = appendHeaderLine(dst, strUserAgent, userAgent) } - dst = appendHeaderLine(dst, strUserAgent, userAgent) host := h.Host() if len(host) > 0 { @@ -1616,18 +1660,20 @@ func (h *RequestHeader) parse(buf []byte) (int, error) { } var n int + var rawHeaders []byte + rawHeaders, n, err = readRawHeaders(h.rawHeaders[:0], buf[m:]) + if err != nil { + return 0, err + } + h.rawHeadersCopy = append(h.rawHeadersCopy[:0], rawHeaders...) if !h.ignoreBody() || h.noHTTP11 { n, err = h.parseHeaders(buf[m:]) if err != nil { return 0, err } + h.rawHeaders = append(h.rawHeaders[:0], buf[m:m+n]...) h.rawHeadersParsed = true } else { - var rawHeaders []byte - rawHeaders, n, err = readRawHeaders(h.rawHeaders[:0], buf[m:]) - if err != nil { - return 0, err - } h.rawHeaders = rawHeaders } return m + n, nil @@ -1771,7 +1817,7 @@ func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) { if caseInsensitiveCompare(s.key, strContentType) { h.contentType = append(h.contentType[:0], s.value...) continue - } + } if caseInsensitiveCompare(s.key, strContentLength) { if h.contentLength != -1 { if h.contentLength, err = parseContentLength(s.value); err != nil { @@ -1787,7 +1833,7 @@ func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) { h.connectionClose = true } else { h.connectionClose = false - h.h = appendArgBytes(h.h, s.key, s.value) + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) } continue } @@ -1806,12 +1852,12 @@ func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) { if caseInsensitiveCompare(s.key, strTransferEncoding) { if !bytes.Equal(s.value, strIdentity) { h.contentLength = -1 - h.h = setArgBytes(h.h, strTransferEncoding, strChunked) + h.h = setArgBytes(h.h, strTransferEncoding, strChunked, argsHasValue) } continue } } - h.h = appendArgBytes(h.h, s.key, s.value) + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) } } if s.err != nil { @@ -1823,13 +1869,13 @@ func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) { h.contentLengthBytes = h.contentLengthBytes[:0] } if h.contentLength == -2 && !h.ConnectionUpgrade() && !h.mustSkipContentLength() { - h.h = setArgBytes(h.h, strTransferEncoding, strIdentity) + h.h = setArgBytes(h.h, strTransferEncoding, strIdentity, argsHasValue) h.connectionClose = true } if h.noHTTP11 && !h.connectionClose { // close connection for non-http/1.1 response unless 'Connection: keep-alive' is set. v := peekArgBytes(h.h, strConnection) - h.connectionClose = !hasHeaderValue(v, strKeepAlive) && !hasHeaderValue(v, strKeepAliveCamelCase) + h.connectionClose = !hasHeaderValue(v, strKeepAlive) } return len(buf) - len(s.b), nil @@ -1875,7 +1921,7 @@ func (h *RequestHeader) parseHeaders(buf []byte) (int, error) { h.connectionClose = true } else { h.connectionClose = false - h.h = appendArgBytes(h.h, s.key, s.value) + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) } continue } @@ -1883,13 +1929,13 @@ func (h *RequestHeader) parseHeaders(buf []byte) (int, error) { if caseInsensitiveCompare(s.key, strTransferEncoding) { if !bytes.Equal(s.value, strIdentity) { h.contentLength = -1 - h.h = setArgBytes(h.h, strTransferEncoding, strChunked) + h.h = setArgBytes(h.h, strTransferEncoding, strChunked, argsHasValue) } continue } } } - h.h = appendArgBytes(h.h, s.key, s.value) + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) } if s.err != nil { h.connectionClose = true @@ -1902,10 +1948,9 @@ func (h *RequestHeader) parseHeaders(buf []byte) (int, error) { if h.noHTTP11 && !h.connectionClose { // close connection for non-http/1.1 request unless 'Connection: keep-alive' is set. v := peekArgBytes(h.h, strConnection) - h.connectionClose = !hasHeaderValue(v, strKeepAlive) && !hasHeaderValue(v, strKeepAliveCamelCase) + h.connectionClose = !hasHeaderValue(v, strKeepAlive) } - - return len(buf) - len(s.b), nil + return s.hLen, nil } func (h *RequestHeader) parseRawHeaders() { @@ -1956,6 +2001,9 @@ type headerScanner struct { value []byte err error + // hLen stores header subslice len + hLen int + disableNormalizing bool } @@ -1963,10 +2011,12 @@ func (s *headerScanner) next() bool { bLen := len(s.b) if bLen >= 2 && s.b[0] == '\r' && s.b[1] == '\n' { s.b = s.b[2:] + s.hLen += 2 return false } if bLen >= 1 && s.b[0] == '\n' { s.b = s.b[1:] + s.hLen++ return false } n := bytes.IndexByte(s.b, ':') @@ -1980,6 +2030,7 @@ func (s *headerScanner) next() bool { for len(s.b) > n && s.b[n] == ' ' { n++ } + s.hLen += n s.b = s.b[n:] n = bytes.IndexByte(s.b, '\n') if n < 0 { @@ -1987,6 +2038,7 @@ func (s *headerScanner) next() bool { return false } s.value = s.b[:n] + s.hLen += n + 1 s.b = s.b[n+1:] if n > 0 && s.value[n-1] == '\r' { @@ -2034,7 +2086,7 @@ func hasHeaderValue(s, value []byte) bool { var vs headerValueScanner vs.b = s for vs.next() { - if bytes.Equal(vs.value, value) { + if caseInsensitiveCompare(vs.value, value) { return true } } @@ -2121,6 +2173,7 @@ func AppendNormalizedHeaderKeyBytes(dst, key []byte) []byte { var ( errNeedMore = errors.New("need more data: cannot find trailing lf") errSmallBuffer = errors.New("small read buffer. Increase ReadBufferSize") + errNothingRead = errors.New("read timeout with nothing read") ) // ErrSmallBuffer is returned when the provided buffer size is too small diff --git a/vendor/github.com/valyala/fasthttp/http.go b/vendor/github.com/valyala/fasthttp/http.go index f8c38fc531c..10dc4654e84 100644 --- a/vendor/github.com/valyala/fasthttp/http.go +++ b/vendor/github.com/valyala/fasthttp/http.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "mime/multipart" + "net" "os" "sync" @@ -44,6 +45,9 @@ type Request struct { keepBodyBuffer bool isTLS bool + + // To detect scheme changes in redirects + schemaUpdate bool } // Response represents HTTP response. @@ -72,6 +76,11 @@ type Response struct { SkipBody bool keepBodyBuffer bool + + // Remote TCPAddr from concurrently net.Conn + raddr net.Addr + // Local TCPAddr from concurrently net.Conn + laddr net.Addr } // SetHost sets host for the request. @@ -278,6 +287,23 @@ func (w *requestBodyWriter) Write(p []byte) (int, error) { return len(p), nil } +func (resp *Response) parseNetConn(conn net.Conn) { + resp.raddr = conn.RemoteAddr() + resp.laddr = conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. The Addr returned is shared +// by all invocations of RemoteAddr, so do not modify it. +func (resp *Response) RemoteAddr() net.Addr { + return resp.raddr +} + +// LocalAddr returns the local network address. The Addr returned is shared +// by all invocations of LocalAddr, so do not modify it. +func (resp *Response) LocalAddr() net.Addr { + return resp.laddr +} + // Body returns response body. // // The returned body is valid until the response modification. @@ -624,6 +650,8 @@ func (resp *Response) copyToSkipBody(dst *Response) { dst.Reset() resp.Header.CopyTo(&dst.Header) dst.SkipBody = resp.SkipBody + dst.raddr = resp.raddr + dst.laddr = resp.laddr } func swapRequestBody(a, b *Request) { @@ -744,7 +772,7 @@ func WriteMultipartForm(w io.Writer, f *multipart.Form, boundary string) error { // marshal files for k, fvv := range f.File { for _, fv := range fvv { - vw, err := mw.CreateFormFile(k, fv.Filename) + vw, err := mw.CreatePart(fv.Header) if err != nil { return fmt.Errorf("cannot create form file %q (%q): %s", k, fv.Filename, err) } @@ -817,6 +845,8 @@ func (resp *Response) Reset() { resp.Header.Reset() resp.resetSkipHeader() resp.SkipBody = false + resp.raddr = nil + resp.laddr = nil } func (resp *Response) resetSkipHeader() { @@ -844,7 +874,9 @@ func (req *Request) Read(r *bufio.Reader) error { const defaultMaxInMemoryFileSize = 16 * 1024 * 1024 -var errGetOnly = errors.New("non-GET request received") +// ErrGetOnly is returned when server expects only GET requests, +// but some other type of request came (Server.GetOnly option is true). +var ErrGetOnly = errors.New("non-GET request received") // ReadLimitBody reads request from the given r, limiting the body size. // @@ -878,7 +910,7 @@ func (req *Request) readLimitBody(r *bufio.Reader, maxBodySize int, getOnly bool return err } if getOnly && !req.Header.IsGet() { - return errGetOnly + return ErrGetOnly } if req.MayContinue() { @@ -984,7 +1016,6 @@ func (resp *Response) ReadLimitBody(r *bufio.Reader, maxBodySize int) error { bodyBuf.Reset() bodyBuf.B, err = readBody(r, resp.Header.ContentLength(), maxBodySize, bodyBuf.B) if err != nil { - resp.Reset() return err } resp.Header.SetContentLength(len(bodyBuf.B)) @@ -1649,6 +1680,11 @@ func appendBodyFixedSize(r *bufio.Reader, dst []byte, n int) ([]byte, error) { } } +// ErrBrokenChunk is returned when server receives a broken chunked body (Transfer-Encoding: chunked). +type ErrBrokenChunk struct { + error +} + func readBodyChunked(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) { if len(dst) > 0 { panic("BUG: expected zero-length buffer") @@ -1668,7 +1704,9 @@ func readBodyChunked(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, erro return dst, err } if !bytes.Equal(dst[len(dst)-strCRLFLen:], strCRLF) { - return dst, fmt.Errorf("cannot find crlf at the end of chunk") + return dst, ErrBrokenChunk{ + error: fmt.Errorf("cannot find crlf at the end of chunk"), + } } dst = dst[:len(dst)-strCRLFLen] if chunkSize == 0 { @@ -1685,23 +1723,31 @@ func parseChunkSize(r *bufio.Reader) (int, error) { for { c, err := r.ReadByte() if err != nil { - return -1, fmt.Errorf("cannot read '\r' char at the end of chunk size: %s", err) + return -1, ErrBrokenChunk{ + error: fmt.Errorf("cannot read '\r' char at the end of chunk size: %s", err), + } } // Skip any trailing whitespace after chunk size. if c == ' ' { continue } if c != '\r' { - return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\r') + return -1, ErrBrokenChunk{ + error: fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\r'), + } } break } c, err := r.ReadByte() if err != nil { - return -1, fmt.Errorf("cannot read '\n' char at the end of chunk size: %s", err) + return -1, ErrBrokenChunk{ + error: fmt.Errorf("cannot read '\n' char at the end of chunk size: %s", err), + } } if c != '\n' { - return -1, fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\n') + return -1, ErrBrokenChunk{ + error: fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\n'), + } } return n, nil } diff --git a/vendor/github.com/valyala/fasthttp/nocopy.go b/vendor/github.com/valyala/fasthttp/nocopy.go index 32af52e4382..8e9b89a4197 100644 --- a/vendor/github.com/valyala/fasthttp/nocopy.go +++ b/vendor/github.com/valyala/fasthttp/nocopy.go @@ -4,6 +4,8 @@ package fasthttp // so `go vet` gives a warning if this struct is copied. // // See https://github.com/golang/go/issues/8005#issuecomment-190753527 for details. +// and also: https://stackoverflow.com/questions/52494458/nocopy-minimal-example type noCopy struct{} -func (*noCopy) Lock() {} +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} diff --git a/vendor/github.com/valyala/fasthttp/server.go b/vendor/github.com/valyala/fasthttp/server.go index 3c185cd1d90..5fcf20f5f02 100644 --- a/vendor/github.com/valyala/fasthttp/server.go +++ b/vendor/github.com/valyala/fasthttp/server.go @@ -2,6 +2,7 @@ package fasthttp import ( "bufio" + "context" "crypto/tls" "errors" "fmt" @@ -134,6 +135,9 @@ func ListenAndServeTLSEmbed(addr string, certData, keyData []byte, handler Reque // must be limited. type RequestHandler func(ctx *RequestCtx) +// ServeHandler must process tls.Config.NextProto negotiated requests. +type ServeHandler func(c net.Conn) error + // Server implements HTTP server. // // Default Server settings should satisfy the majority of Server users. @@ -147,8 +151,22 @@ type Server struct { noCopy noCopy // Handler for processing incoming requests. + // + // Take into account that no `panic` recovery is done by `fasthttp` (thus any `panic` will take down the entire server). + // Instead the user should use `recover` to handle these situations. Handler RequestHandler + // ErrorHandler for returning a response in case of an error while receiving or parsing the request. + // + // The following is a non-exhaustive list of errors that can be expected as argument: + // * io.EOF + // * io.ErrUnexpectedEOF + // * ErrGetOnly + // * ErrSmallBuffer + // * ErrBodyTooLarge + // * ErrBrokenChunks + ErrorHandler func(ctx *RequestCtx, err error) + // Server name for sending in response headers. // // Default server name is used if left blank. @@ -285,6 +303,11 @@ type Server struct { // * cONTENT-lenGTH -> Content-Length DisableHeaderNamesNormalizing bool + // SleepWhenConcurrencyLimitsExceeded is a duration to be slept of if + // the concurrency limit in exceeded (default [when is 0]: don't sleep + // and accept new connections immidiatelly). + SleepWhenConcurrencyLimitsExceeded time.Duration + // NoDefaultServerHeader, when set to true, causes the default Server header // to be excluded from the Response. // @@ -311,7 +334,8 @@ type Server struct { // By default standard logger from log package is used. Logger Logger - tlsConfig *tls.Config + tlsConfig *tls.Config + nextProtos map[string]ServeHandler concurrency uint32 concurrencyCh chan struct{} @@ -330,6 +354,7 @@ type Server struct { mu sync.Mutex open int32 stop int32 + done chan struct{} } // TimeoutHandler creates RequestHandler, which returns StatusRequestTimeout @@ -542,6 +567,7 @@ func (ctx *RequestCtx) VisitUserValues(visitor func([]byte, interface{})) { } type connTLSer interface { + Handshake() error ConnectionState() tls.ConnectionState } @@ -576,6 +602,15 @@ func (ctx *RequestCtx) TLSConnectionState() *tls.ConnectionState { return &state } +// Conn returns a reference to the underlying net.Conn. +// +// WARNING: Only use this method if you know what you are doing! +// +// Reading from or writing to the returned connection will end badly! +func (ctx *RequestCtx) Conn() net.Conn { + return ctx.c +} + type firstByteReader struct { c net.Conn ch byte @@ -993,6 +1028,7 @@ func (ctx *RequestCtx) SuccessString(contentType, body string) { // * StatusFound (302) // * StatusSeeOther (303) // * StatusTemporaryRedirect (307) +// * StatusPermanentRedirect (308) // // All other statusCode values are replaced by StatusFound (302). // @@ -1021,6 +1057,7 @@ func (ctx *RequestCtx) Redirect(uri string, statusCode int) { // * StatusFound (302) // * StatusSeeOther (303) // * StatusTemporaryRedirect (307) +// * StatusPermanentRedirect (308) // // All other statusCode values are replaced by StatusFound (302). // @@ -1045,7 +1082,8 @@ func (ctx *RequestCtx) redirect(uri []byte, statusCode int) { func getRedirectStatusCode(statusCode int) int { if statusCode == StatusMovedPermanently || statusCode == StatusFound || - statusCode == StatusSeeOther || statusCode == StatusTemporaryRedirect { + statusCode == StatusSeeOther || statusCode == StatusTemporaryRedirect || + statusCode == StatusPermanentRedirect { return statusCode } return StatusFound @@ -1242,6 +1280,29 @@ func (ctx *RequestCtx) TimeoutErrorWithResponse(resp *Response) { ctx.timeoutResponse = respCopy } +// NextProto adds nph to be processed when key is negotiated when TLS +// connection is established. +// +// This function can only be called before the server is started. +func (s *Server) NextProto(key string, nph ServeHandler) { + if s.nextProtos == nil { + s.nextProtos = make(map[string]ServeHandler) + } + s.configTLS() + s.tlsConfig.NextProtos = append(s.tlsConfig.NextProtos, key) + s.nextProtos[key] = nph +} + +func (s *Server) getNextProto(c net.Conn) (proto string, err error) { + if tlsConn, ok := c.(connTLSer); ok { + err = tlsConn.Handshake() + if err == nil { + proto = tlsConn.ConnectionState().NegotiatedProtocol + } + } + return +} + // tcpKeepAliveListener sets TCP keep-alive timeouts on accepted // connections. It's used by ListenAndServe, ListenAndServeTLS and // ListenAndServeTLSEmbed so dead TCP connections (e.g. closing laptop mid-download) @@ -1414,13 +1475,7 @@ func (s *Server) AppendCert(certFile, keyFile string) error { return fmt.Errorf("cannot load TLS key pair from certFile=%q and keyFile=%q: %s", certFile, keyFile, err) } - if s.tlsConfig == nil { - s.tlsConfig = &tls.Config{ - Certificates: []tls.Certificate{cert}, - PreferServerCipherSuites: true, - } - return nil - } + s.configTLS() s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert) return nil @@ -1438,16 +1493,18 @@ func (s *Server) AppendCertEmbed(certData, keyData []byte) error { len(certData), len(keyData), err) } + s.configTLS() + + s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert) + return nil +} + +func (s *Server) configTLS() { if s.tlsConfig == nil { s.tlsConfig = &tls.Config{ - Certificates: []tls.Certificate{cert}, PreferServerCipherSuites: true, } - return nil } - - s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert) - return nil } // DefaultConcurrency is the maximum number of concurrent connections @@ -1471,6 +1528,7 @@ func (s *Server) Serve(ln net.Listener) error { } s.ln = ln + s.done = make(chan struct{}) } s.mu.Unlock() @@ -1485,6 +1543,13 @@ func (s *Server) Serve(ln net.Listener) error { } wp.Start() + // Count our waiting to accept a connection as an open connection. + // This way we can't get into any weird state where just after accepting + // a connection Shutdown is called which reads open as 0 because it isn't + // incremented yet. + atomic.AddInt32(&s.open, 1) + defer atomic.AddInt32(&s.open, -1) + for { if c, err = acceptConn(s, ln, &lastPerIPErrorTime); err != nil { wp.Stop() @@ -1513,7 +1578,11 @@ func (s *Server) Serve(ln net.Listener) error { // // There is a hope other servers didn't reach their // concurrency limits yet :) - time.Sleep(100 * time.Millisecond) + // + // See also: https://github.com/valyala/fasthttp/pull/485#discussion_r239994990 + if s.SleepWhenConcurrencyLimitsExceeded > 0 { + time.Sleep(s.SleepWhenConcurrencyLimitsExceeded) + } } c = nil } @@ -1541,6 +1610,10 @@ func (s *Server) Shutdown() error { return err } + if s.done != nil { + close(s.done) + } + // Closing the listener will make Serve() call Stop on the worker pool. // Setting .stop to 1 will make serveConn() break out of its loop. // Now we just have to wait until all workers are done. @@ -1680,6 +1753,21 @@ func (s *Server) ServeConn(c net.Conn) error { var errHijacked = errors.New("connection has been hijacked") +// GetCurrentConcurrency returns a number of currently served +// connections. +// +// This function is intended be used by monitoring systems +func (s *Server) GetCurrentConcurrency() uint32 { + return atomic.LoadUint32(&s.concurrency) +} + +// GetOpenConnectionsCount returns a number of opened connections. +// +// This function is intended be used by monitoring systems +func (s *Server) GetOpenConnectionsCount() int32 { + return atomic.LoadInt32(&s.open) - 1 +} + func (s *Server) getConcurrency() int { n := s.Concurrency if n <= 0 { @@ -1703,6 +1791,15 @@ const DefaultMaxRequestBodySize = 4 * 1024 * 1024 func (s *Server) serveConn(c net.Conn) error { defer atomic.AddInt32(&s.open, -1) + if proto, err := s.getNextProto(c); err != nil { + return err + } else { + handler, ok := s.nextProtos[proto] + if ok { + return handler(c) + } + } + var serverName []byte if !s.NoDefaultServerHeader { serverName = s.getServerName() @@ -1734,11 +1831,6 @@ func (s *Server) serveConn(c net.Conn) error { isHTTP11 bool ) for { - if atomic.LoadInt32(&s.stop) == 1 { - err = nil - break - } - connRequestNum++ ctx.time = currentTime @@ -1767,11 +1859,11 @@ func (s *Server) serveConn(c net.Conn) error { } // reading Headers and Body err = ctx.Request.readLimitBody(br, maxRequestBodySize, s.GetOnly) - if br.Buffered() > 0 { + if err == nil { // If we read any bytes off the wire, we're active. s.setState(c, StateActive) } - if br.Buffered() == 0 || err != nil { + if (s.ReduceMemoryUsage && br.Buffered() == 0) || err != nil { releaseReader(s, br) br = nil } @@ -1783,8 +1875,15 @@ func (s *Server) serveConn(c net.Conn) error { if err != nil { if err == io.EOF { err = nil + } else if connRequestNum > 1 && err == errNothingRead { + // This is not the first request and we haven't read a single byte + // of a new request yet. This means it's just a keep-alive connection + // closing down either because the remote closed it or because + // or a read timeout on our side. Either way just close the connection + // and don't return any error response. + err = nil } else { - bw = writeErrorResponse(bw, ctx, serverName, err) + bw = s.writeErrorResponse(bw, ctx, serverName, err) } break } @@ -1798,23 +1897,25 @@ func (s *Server) serveConn(c net.Conn) error { } bw.Write(strResponseContinue) err = bw.Flush() - releaseWriter(s, bw) - bw = nil if err != nil { break } + if s.ReduceMemoryUsage { + releaseWriter(s, bw) + bw = nil + } // Read request body. if br == nil { br = acquireReader(ctx) } err = ctx.Request.ContinueReadBody(br, maxRequestBodySize) - if br.Buffered() == 0 || err != nil { + if (s.ReduceMemoryUsage && br.Buffered() == 0) || err != nil { releaseReader(s, br) br = nil } if err != nil { - bw = writeErrorResponse(bw, ctx, serverName, err) + bw = s.writeErrorResponse(bw, ctx, serverName, err) break } } @@ -1879,21 +1980,27 @@ func (s *Server) serveConn(c net.Conn) error { break } - if br == nil || connectionClose { + // Only flush the writer if we don't have another request in the pipeline. + // This is a big of an ugly optimization for https://www.techempower.com/benchmarks/ + // This benchmark will send 16 pipelined requests. It is faster to pack as many responses + // in a TCP packet and send it back at once than waiting for a flush every request. + // In real world circumstances this behaviour could be argued as being wrong. + if br == nil || br.Buffered() == 0 || connectionClose { err = bw.Flush() - releaseWriter(s, bw) - bw = nil if err != nil { break } - if connectionClose { - break - } + } + if connectionClose { + break + } + if s.ReduceMemoryUsage { + releaseWriter(s, bw) + bw = nil } if hijackHandler != nil { - var hjr io.Reader - hjr = c + var hjr io.Reader = c if br != nil { hjr = br br = nil @@ -1903,11 +2010,11 @@ func (s *Server) serveConn(c net.Conn) error { } if bw != nil { err = bw.Flush() - releaseWriter(s, bw) - bw = nil if err != nil { break } + releaseWriter(s, bw) + bw = nil } c.SetReadDeadline(zeroTime) c.SetWriteDeadline(zeroTime) @@ -1919,6 +2026,11 @@ func (s *Server) serveConn(c net.Conn) error { currentTime = time.Now() s.setState(c, StateIdle) + + if atomic.LoadInt32(&s.stop) == 1 { + err = nil + break + } } if br != nil { @@ -2190,6 +2302,51 @@ func (ctx *RequestCtx) Init(req *Request, remoteAddr net.Addr, logger Logger) { req.CopyTo(&ctx.Request) } +// Deadline returns the time when work done on behalf of this context +// should be canceled. Deadline returns ok==false when no deadline is +// set. Successive calls to Deadline return the same results. +// +// This method always returns 0, false and is only present to make +// RequestCtx implement the context interface. +func (ctx *RequestCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +// Done returns a channel that's closed when work done on behalf of this +// context should be canceled. Done may return nil if this context can +// never be canceled. Successive calls to Done return the same value. +func (ctx *RequestCtx) Done() <-chan struct{} { + return ctx.s.done +} + +// Err returns a non-nil error value after Done is closed, +// successive calls to Err return the same error. +// If Done is not yet closed, Err returns nil. +// If Done is closed, Err returns a non-nil error explaining why: +// Canceled if the context was canceled (via server Shutdown) +// or DeadlineExceeded if the context's deadline passed. +func (ctx *RequestCtx) Err() error { + select { + case <-ctx.s.done: + return context.Canceled + default: + return nil + } +} + +// Value returns the value associated with this context for key, or nil +// if no value is associated with key. Successive calls to Value with +// the same key returns the same result. +// +// This method is present to make RequestCtx implement the context interface. +// This method is the same as calling ctx.UserValue(key) +func (ctx *RequestCtx) Value(key interface{}) interface{} { + if keyString, ok := key.(string); ok { + return ctx.UserValue(keyString) + } + return nil +} + var fakeServer = &Server{ // Initialize concurrencyCh for TimeoutHandler concurrencyCh: make(chan struct{}, DefaultConcurrency), @@ -2263,12 +2420,22 @@ func (s *Server) writeFastError(w io.Writer, statusCode int, msg string) { serverDate.Load(), len(msg), msg) } -func writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, serverName []byte, err error) *bufio.Writer { +func defaultErrorHandler(ctx *RequestCtx, err error) { if _, ok := err.(*ErrSmallBuffer); ok { ctx.Error("Too big request header", StatusRequestHeaderFieldsTooLarge) } else { ctx.Error("Error when parsing request", StatusBadRequest) } +} + +func (s *Server) writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, serverName []byte, err error) *bufio.Writer { + errorHandler := defaultErrorHandler + if s.ErrorHandler != nil { + errorHandler = s.ErrorHandler + } + + errorHandler(ctx, err) + if serverName != nil { ctx.Response.Header.SetServerBytes(serverName) } diff --git a/vendor/github.com/valyala/fasthttp/strings.go b/vendor/github.com/valyala/fasthttp/strings.go index afd70b40087..6d832310d75 100644 --- a/vendor/github.com/valyala/fasthttp/strings.go +++ b/vendor/github.com/valyala/fasthttp/strings.go @@ -53,18 +53,20 @@ var ( strRange = []byte("Range") strContentRange = []byte("Content-Range") - strCookieExpires = []byte("expires") - strCookieDomain = []byte("domain") - strCookiePath = []byte("path") - strCookieHTTPOnly = []byte("HttpOnly") - strCookieSecure = []byte("secure") - strCookieMaxAge = []byte("max-age") + strCookieExpires = []byte("expires") + strCookieDomain = []byte("domain") + strCookiePath = []byte("path") + strCookieHTTPOnly = []byte("HttpOnly") + strCookieSecure = []byte("secure") + strCookieMaxAge = []byte("max-age") + strCookieSameSite = []byte("SameSite") + strCookieSameSiteLax = []byte("Lax") + strCookieSameSiteStrict = []byte("Strict") strClose = []byte("close") strGzip = []byte("gzip") strDeflate = []byte("deflate") strKeepAlive = []byte("keep-alive") - strKeepAliveCamelCase = []byte("Keep-Alive") strUpgrade = []byte("Upgrade") strChunked = []byte("chunked") strIdentity = []byte("identity") diff --git a/vendor/github.com/valyala/fasthttp/tcpdialer.go b/vendor/github.com/valyala/fasthttp/tcpdialer.go index e31fd75857a..6a5cd3a1f5a 100644 --- a/vendor/github.com/valyala/fasthttp/tcpdialer.go +++ b/vendor/github.com/valyala/fasthttp/tcpdialer.go @@ -33,7 +33,7 @@ import ( // * foo.bar:80 // * aaa.com:8080 func Dial(addr string) (net.Conn, error) { - return getDialer(DefaultDialTimeout, false)(addr) + return defaultDialer.Dial(addr) } // DialTimeout dials the given TCP addr using tcp4 using the given timeout. @@ -58,7 +58,7 @@ func Dial(addr string) (net.Conn, error) { // * foo.bar:80 // * aaa.com:8080 func DialTimeout(addr string, timeout time.Duration) (net.Conn, error) { - return getDialer(timeout, false)(addr) + return defaultDialer.DialTimeout(addr, timeout) } // DialDualStack dials the given TCP addr using both tcp4 and tcp6. @@ -86,7 +86,7 @@ func DialTimeout(addr string, timeout time.Duration) (net.Conn, error) { // * foo.bar:80 // * aaa.com:8080 func DialDualStack(addr string) (net.Conn, error) { - return getDialer(DefaultDialTimeout, true)(addr) + return defaultDialer.DialDualStack(addr) } // DialDualStackTimeout dials the given TCP addr using both tcp4 and tcp6 @@ -112,45 +112,22 @@ func DialDualStack(addr string) (net.Conn, error) { // * foo.bar:80 // * aaa.com:8080 func DialDualStackTimeout(addr string, timeout time.Duration) (net.Conn, error) { - return getDialer(timeout, true)(addr) -} - -func getDialer(timeout time.Duration, dualStack bool) DialFunc { - if timeout <= 0 { - timeout = DefaultDialTimeout - } - timeoutRounded := int(timeout.Seconds()*10 + 9) - - m := dialMap - if dualStack { - m = dialDualStackMap - } - - dialMapLock.Lock() - d := m[timeoutRounded] - if d == nil { - dialer := dialerStd - if dualStack { - dialer = dialerDualStack - } - d = dialer.NewDial(timeout) - m[timeoutRounded] = d - } - dialMapLock.Unlock() - return d + return defaultDialer.DialDualStackTimeout(addr, timeout) } var ( - dialerStd = &tcpDialer{} - dialerDualStack = &tcpDialer{DualStack: true} - - dialMap = make(map[int]DialFunc) - dialDualStackMap = make(map[int]DialFunc) - dialMapLock sync.Mutex + defaultDialer = &TCPDialer{Concurrency: 1000} ) -type tcpDialer struct { - DualStack bool +// TCPDialer contains options to control a group of Dial calls. +type TCPDialer struct { + // Concurrency controls the maximum number of concurrent Dails + // that can be performed using this object. + // Setting this to 0 means unlimited. + // + // WARNING: This can only be changed before the first Dial. + // Changes made after the first Dial will not affect anything. + Concurrency int tcpAddrsLock sync.Mutex tcpAddrsMap map[string]*tcpAddrEntry @@ -160,41 +137,145 @@ type tcpDialer struct { once sync.Once } -const maxDialConcurrency = 1000 +// Dial dials the given TCP addr using tcp4. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// * It returns ErrDialTimeout if connection cannot be established during +// DefaultDialTimeout seconds. Use DialTimeout for customizing dial timeout. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func (d *TCPDialer) Dial(addr string) (net.Conn, error) { + return d.dial(addr, false, DefaultDialTimeout) +} + +// DialTimeout dials the given TCP addr using tcp4 using the given timeout. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func (d *TCPDialer) DialTimeout(addr string, timeout time.Duration) (net.Conn, error) { + return d.dial(addr, false, timeout) +} + +// DialDualStack dials the given TCP addr using both tcp4 and tcp6. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// * It returns ErrDialTimeout if connection cannot be established during +// DefaultDialTimeout seconds. Use DialDualStackTimeout for custom dial +// timeout. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func (d *TCPDialer) DialDualStack(addr string) (net.Conn, error) { + return d.dial(addr, true, DefaultDialTimeout) +} + +// DialDualStackTimeout dials the given TCP addr using both tcp4 and tcp6 +// using the given timeout. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func (d *TCPDialer) DialDualStackTimeout(addr string, timeout time.Duration) (net.Conn, error) { + return d.dial(addr, true, timeout) +} -func (d *tcpDialer) NewDial(timeout time.Duration) DialFunc { +func (d *TCPDialer) dial(addr string, dualStack bool, timeout time.Duration) (net.Conn, error) { d.once.Do(func() { - d.concurrencyCh = make(chan struct{}, maxDialConcurrency) + if d.Concurrency > 0 { + d.concurrencyCh = make(chan struct{}, d.Concurrency) + } d.tcpAddrsMap = make(map[string]*tcpAddrEntry) go d.tcpAddrsClean() }) - return func(addr string) (net.Conn, error) { - addrs, idx, err := d.getTCPAddrs(addr) - if err != nil { - return nil, err - } - network := "tcp4" - if d.DualStack { - network = "tcp" - } + addrs, idx, err := d.getTCPAddrs(addr, dualStack) + if err != nil { + return nil, err + } + network := "tcp4" + if dualStack { + network = "tcp" + } - var conn net.Conn - n := uint32(len(addrs)) - deadline := time.Now().Add(timeout) - for n > 0 { - conn, err = tryDial(network, &addrs[idx%n], deadline, d.concurrencyCh) - if err == nil { - return conn, nil - } - if err == ErrDialTimeout { - return nil, err - } - idx++ - n-- + var conn net.Conn + n := uint32(len(addrs)) + deadline := time.Now().Add(timeout) + for n > 0 { + conn, err = tryDial(network, &addrs[idx%n], deadline, d.concurrencyCh) + if err == nil { + return conn, nil } - return nil, err + if err == ErrDialTimeout { + return nil, err + } + idx++ + n-- } + return nil, err } func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyCh chan struct{}) (net.Conn, error) { @@ -203,28 +284,24 @@ func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyC return nil, ErrDialTimeout } - select { - case concurrencyCh <- struct{}{}: - default: - tc := acquireTimer(timeout) - isTimeout := false + if concurrencyCh != nil { select { case concurrencyCh <- struct{}{}: - case <-tc.C: - isTimeout = true - } - releaseTimer(tc) - if isTimeout { - return nil, ErrDialTimeout + default: + tc := AcquireTimer(timeout) + isTimeout := false + select { + case concurrencyCh <- struct{}{}: + case <-tc.C: + isTimeout = true + } + ReleaseTimer(tc) + if isTimeout { + return nil, ErrDialTimeout + } } } - timeout = -time.Since(deadline) - if timeout <= 0 { - <-concurrencyCh - return nil, ErrDialTimeout - } - chv := dialResultChanPool.Get() if chv == nil { chv = make(chan dialResult, 1) @@ -234,7 +311,9 @@ func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyC var dr dialResult dr.conn, dr.err = net.DialTCP(network, nil, addr) ch <- dr - <-concurrencyCh + if concurrencyCh != nil { + <-concurrencyCh + } }() var ( @@ -242,7 +321,7 @@ func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyC err error ) - tc := acquireTimer(timeout) + tc := AcquireTimer(timeout) select { case dr := <-ch: conn = dr.conn @@ -251,7 +330,7 @@ func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyC case <-tc.C: err = ErrDialTimeout } - releaseTimer(tc) + ReleaseTimer(tc) return conn, err } @@ -282,7 +361,7 @@ type tcpAddrEntry struct { // by Dial* functions. const DefaultDNSCacheDuration = time.Minute -func (d *tcpDialer) tcpAddrsClean() { +func (d *TCPDialer) tcpAddrsClean() { expireDuration := 2 * DefaultDNSCacheDuration for { time.Sleep(time.Second) @@ -298,7 +377,7 @@ func (d *tcpDialer) tcpAddrsClean() { } } -func (d *tcpDialer) getTCPAddrs(addr string) ([]net.TCPAddr, uint32, error) { +func (d *TCPDialer) getTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, uint32, error) { d.tcpAddrsLock.Lock() e := d.tcpAddrsMap[addr] if e != nil && !e.pending && time.Since(e.resolveTime) > DefaultDNSCacheDuration { @@ -308,7 +387,7 @@ func (d *tcpDialer) getTCPAddrs(addr string) ([]net.TCPAddr, uint32, error) { d.tcpAddrsLock.Unlock() if e == nil { - addrs, err := resolveTCPAddrs(addr, d.DualStack) + addrs, err := resolveTCPAddrs(addr, dualStack) if err != nil { d.tcpAddrsLock.Lock() e = d.tcpAddrsMap[addr] diff --git a/vendor/github.com/valyala/fasthttp/timer.go b/vendor/github.com/valyala/fasthttp/timer.go index bb12acb7e5e..4e919384ed4 100644 --- a/vendor/github.com/valyala/fasthttp/timer.go +++ b/vendor/github.com/valyala/fasthttp/timer.go @@ -26,7 +26,12 @@ func stopTimer(t *time.Timer) { } } -func acquireTimer(timeout time.Duration) *time.Timer { +// AcquireTimer returns a time.Timer from the pool and updates it to +// send the current time on its channel after at least timeout. +// +// The returned Timer may be returned to the pool with ReleaseTimer +// when no longer needed. This allows reducing GC load. +func AcquireTimer(timeout time.Duration) *time.Timer { v := timerPool.Get() if v == nil { return time.NewTimer(timeout) @@ -36,7 +41,12 @@ func acquireTimer(timeout time.Duration) *time.Timer { return t } -func releaseTimer(t *time.Timer) { +// ReleaseTimer returns the time.Timer acquired via AcquireTimer to the pool +// and prevents the Timer from firing. +// +// Do not access the released time.Timer or read from it's channel otherwise +// data races may occur. +func ReleaseTimer(t *time.Timer) { stopTimer(t) timerPool.Put(t) } diff --git a/vendor/github.com/valyala/fasthttp/workerpool.go b/vendor/github.com/valyala/fasthttp/workerpool.go index 0dd581ac952..bfd297c31eb 100644 --- a/vendor/github.com/valyala/fasthttp/workerpool.go +++ b/vendor/github.com/valyala/fasthttp/workerpool.go @@ -16,7 +16,7 @@ import ( type workerPool struct { // Function for serving server connections. // It must leave c unclosed. - WorkerFunc func(c net.Conn) error + WorkerFunc ServeHandler MaxWorkersCount int From a8e2d6d7eddf16e4699d95ca8794e723463a2a6b Mon Sep 17 00:00:00 2001 From: Gal Topper Date: Tue, 11 Jun 2019 22:32:00 +0300 Subject: [PATCH 2/2] IG-12303: Deleted references to v3io-go-http. Ran go mod vendor. (#44) * Deleted references to v3io-go-http. Ran go mod vendor. * v3io-tsdb 0.9.0 -> v0.9.2 --- go.mod | 5 +- go.sum | 18 + storage/tsdb/promtsdb.go | 5 +- storage/tsdb/v3io.go | 2 +- .../github.com/v3io/v3io-go-http/.gitignore | 15 - vendor/github.com/v3io/v3io-go-http/LICENSE | 201 ------ vendor/github.com/v3io/v3io-go-http/README.md | 1 - .../github.com/v3io/v3io-go-http/container.go | 144 ---- .../github.com/v3io/v3io-go-http/context.go | 116 ---- vendor/github.com/v3io/v3io-go-http/error.go | 27 - vendor/github.com/v3io/v3io-go-http/item.go | 35 - .../v3io/v3io-go-http/itemscursor.go | 11 - .../github.com/v3io/v3io-go-http/session.go | 72 -- .../v3io/v3io-go-http/synccontainer.go | 629 ------------------ .../v3io/v3io-go-http/synccontext.go | 36 - .../v3io/v3io-go-http/syncitemscursor.go | 143 ---- .../v3io/v3io-go-http/syncsession.go | 146 ---- vendor/github.com/v3io/v3io-go-http/types.go | 276 -------- vendor/github.com/v3io/v3io-go-http/utils.go | 11 - vendor/golang.org/x/sys/unix/syscall_linux.go | 14 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 6 + .../x/sys/unix/ztypes_linux_amd64.go | 6 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 6 + .../x/sys/unix/ztypes_linux_arm64.go | 6 + .../x/sys/unix/ztypes_linux_mips.go | 6 + .../x/sys/unix/ztypes_linux_mips64.go | 6 + .../x/sys/unix/ztypes_linux_mips64le.go | 6 + .../x/sys/unix/ztypes_linux_mipsle.go | 6 + .../x/sys/unix/ztypes_linux_ppc64.go | 6 + .../x/sys/unix/ztypes_linux_ppc64le.go | 6 + .../x/sys/unix/ztypes_linux_riscv64.go | 6 + .../x/sys/unix/ztypes_linux_s390x.go | 6 + .../x/sys/unix/ztypes_linux_sparc64.go | 6 + .../x/sys/windows/security_windows.go | 2 +- vendor/modules.txt | 20 +- 35 files changed, 127 insertions(+), 1880 deletions(-) delete mode 100644 vendor/github.com/v3io/v3io-go-http/.gitignore delete mode 100644 vendor/github.com/v3io/v3io-go-http/LICENSE delete mode 100644 vendor/github.com/v3io/v3io-go-http/README.md delete mode 100644 vendor/github.com/v3io/v3io-go-http/container.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/context.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/error.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/item.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/itemscursor.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/session.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/synccontainer.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/synccontext.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/syncitemscursor.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/syncsession.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/types.go delete mode 100644 vendor/github.com/v3io/v3io-go-http/utils.go diff --git a/go.mod b/go.mod index 18fd4ac7929..c6f3b7d2500 100644 --- a/go.mod +++ b/go.mod @@ -85,8 +85,7 @@ require ( github.com/shurcooL/vfsgen v0.0.0-20180711163814-62bca832be04 github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff // indirect github.com/stretchr/testify v1.3.0 - github.com/v3io/v3io-go-http v0.0.0-20190221115935-53e2b487c9a2 - github.com/v3io/v3io-tsdb v0.9.0 + github.com/v3io/v3io-tsdb v0.9.2 go.opencensus.io v0.19.2 // indirect golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 @@ -115,5 +114,3 @@ replace labix.org/v2/mgo => github.com/go-mgo/mgo v0.0.0-20180705113738-7446a034 replace launchpad.net/gocheck => github.com/go-check/check v0.0.0-20180628173108-788fd78401277ebd861206a03c884797c6ec5541 replace github.com/v3io/frames => github.com/v3io/frames v0.0.0-20190328123118-1dad1ff610509e7b087d9cd390ed1b452caecf15 - -replace github.com/v3io/v3io-tsdb => github.com/v3io/v3io-tsdb v0.9.0 diff --git a/go.sum b/go.sum index 41bc6b0623e..cceddd114dd 100644 --- a/go.sum +++ b/go.sum @@ -207,9 +207,13 @@ github.com/lightstep/lightstep-tracer-go v0.15.6 h1:D0GGa7afJ7GcQvu5as6ssLEEKYXv github.com/lightstep/lightstep-tracer-go v0.15.6/go.mod h1:6AMpwZpsyCFwSovxzM78e+AsYxE8sGwiM6C3TytaWeI= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -237,7 +241,10 @@ github.com/montanaflynn/stats v0.5.0 h1:2EkzeTSqBB4V4bJwWrt5gIIrZmpJBcoIRGS2kWLg github.com/montanaflynn/stats v0.5.0/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nuclio/errors v0.0.1 h1:JoADBDnhRKjW05Npu5CLS27Peo7gx+QZcNrLwINV6UY= +github.com/nuclio/errors v0.0.1/go.mod h1:it2rUqDarIL8PasLYZo0Q1Ebsx4NRPM+OyYYakgNyrQ= github.com/nuclio/logger v0.0.0-20180410162335-ccc5ab971395/go.mod h1:ttazNAqTxKjQ7XrGDZxecumGa9KCIuJh88gzFY1mRXo= +github.com/nuclio/logger v0.0.0-20190303161055-fc1e4b16d127/go.mod h1:ttazNAqTxKjQ7XrGDZxecumGa9KCIuJh88gzFY1mRXo= github.com/nuclio/logger v0.0.1 h1:e+vT/Ug65RC+u0QX2J+lq3P57ZBwJ1ZA6Q2LCEcViwE= github.com/nuclio/logger v0.0.1/go.mod h1:ttazNAqTxKjQ7XrGDZxecumGa9KCIuJh88gzFY1mRXo= github.com/nuclio/nuclio-sdk-go v0.0.0-20190205170814-3b507fbd0324/go.mod h1:NqMgotiF6Y0Ho4+i5AvJhH3FRKAyL4IMaMv/eoUOkKQ= @@ -269,6 +276,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pavius/zap v0.0.0-20180228181622-8d52692529b8 h1:1N/m7VjDY1Pd30Uwv6bLttZVFQm3n8RUK9Ylf2J+4a4= github.com/pavius/zap v0.0.0-20180228181622-8d52692529b8/go.mod h1:6FWOCx06uh50GClv8S2cfk3asqTJs3qq3ZNRtLZE77I= +github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8 h1:WqLgmr/wj9TO5Sc6oYPQRAJBxuHE0NTeuVeFnT+FZVo= +github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8/go.mod h1:6FWOCx06uh50GClv8S2cfk3asqTJs3qq3ZNRtLZE77I= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea h1:sKwxy1H95npauwu8vtF95vG/syrL0p8fSZo/XlDg5gk= @@ -343,14 +352,21 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/v3io/frames v0.0.0-20190328123118-1dad1ff610509e7b087d9cd390ed1b452caecf15 h1:Y/ABiIlS3tqLHsj0DZkkpMyvdoCmU1nwgQ0On6n6imk= github.com/v3io/frames v0.0.0-20190328123118-1dad1ff610509e7b087d9cd390ed1b452caecf15/go.mod h1:6aKW4Wl4A+gQhXH0JRCVOLgwvcrLyk+fqEpemuie094= +github.com/v3io/v3io-go v0.0.0-20180415000000-1486c75b0e590a14580f7d9b6cef7a944a231ca7 h1:J+ps6exCjowNidrtawSQglJQpKrJ6v8UjBVTNrRTpMs= +github.com/v3io/v3io-go v0.0.0-20180415000000-1486c75b0e590a14580f7d9b6cef7a944a231ca7/go.mod h1:MHc+d/Jg/y8lV4B9sgwTvuS3tEE9wS+kqtU0+D0Sr78= github.com/v3io/v3io-go-http v0.0.0-20190221115935-53e2b487c9a2 h1:NJc63wM25iS+ci5z7LVwjWD4QM0QpTQw/fovKzatss0= github.com/v3io/v3io-go-http v0.0.0-20190221115935-53e2b487c9a2/go.mod h1:GXYcR9MxgfbE3BJdkXki5EclvtS8Nxu2RQNLA8hMMog= +github.com/v3io/v3io-tsdb v0.0.0-20190328071546-4e85f3df2d205fc7368d54184bb2ceff949ab4bd/go.mod h1:A+5yKC16QxLf+Fy5v7VvIxSw+jwsKHLhUS7dCYFDLAA= github.com/v3io/v3io-tsdb v0.9.0 h1:HcopU5LpwAipvQ7D7jBZ2nrMNKycygttKBjnRwNzwZA= github.com/v3io/v3io-tsdb v0.9.0/go.mod h1:5GOd2S8a0EtKkQAjp7Ke3o+oxZKdUMJFVEU+mk9ltdE= +github.com/v3io/v3io-tsdb v0.9.2 h1:9taOa/SrAA/cWMgTLO8M12FNqgH4SyhrBzKzdCRp/Z0= +github.com/v3io/v3io-tsdb v0.9.2/go.mod h1:cPLq5KvhxzdvRaVzy8HTDxousSFUaSRBRdhtd131JgU= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.0.0 h1:BwIoZQbBsTo3v2F5lz5Oy3TlTq4wLKTLV260EVTEWco= github.com/valyala/fasthttp v1.0.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/fasthttp v1.2.0 h1:dzZJf2IuMiclVjdw0kkT+f9u4YdrapbNyGAN47E/qnk= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ= github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2/go.mod h1:hzfGeIUDq/j97IG+FhNqkowIyEcD88LrW6fyU3K3WqY= @@ -412,6 +428,8 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/time v0.0.0-20170424234030-8be79e1e0910 h1:bCMaBn7ph495H+x72gEvgcv+mDRd9dElbzo/mVCMxX4= diff --git a/storage/tsdb/promtsdb.go b/storage/tsdb/promtsdb.go index e6e8a2598a1..bddaa2e0ee3 100644 --- a/storage/tsdb/promtsdb.go +++ b/storage/tsdb/promtsdb.go @@ -9,7 +9,6 @@ import ( "github.com/pkg/errors" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" - "github.com/v3io/v3io-go-http" "github.com/v3io/v3io-tsdb/pkg/aggregate" tsdbAppender "github.com/v3io/v3io-tsdb/pkg/appender" "github.com/v3io/v3io-tsdb/pkg/config" @@ -25,7 +24,7 @@ type V3ioPromAdapter struct { useV3ioAggregations bool // Indicate whether or not to use v3io aggregations by default (passed from prometheus.yml) } -func NewV3ioProm(cfg *config.V3ioConfig, container *v3io.Container, logger logger.Logger) (*V3ioPromAdapter, error) { +func NewV3ioProm(cfg *config.V3ioConfig, logger logger.Logger) (*V3ioPromAdapter, error) { if logger == nil { newLogger, err := utils.NewLogger(cfg.LogLevel) @@ -35,7 +34,7 @@ func NewV3ioProm(cfg *config.V3ioConfig, container *v3io.Container, logger logge logger = newLogger } - adapter, err := tsdb.NewV3ioAdapter(cfg, container, logger) + adapter, err := tsdb.NewV3ioAdapter(cfg, nil, logger) newAdapter := V3ioPromAdapter{db: adapter, logger: logger.GetChild("v3io-prom-adapter")} return &newAdapter, err } diff --git a/storage/tsdb/v3io.go b/storage/tsdb/v3io.go index 7dd216c592b..e67bb7d4e84 100644 --- a/storage/tsdb/v3io.go +++ b/storage/tsdb/v3io.go @@ -233,7 +233,7 @@ func (s *ReadyStorage) createV3ioPromAdapater(configPath string) (*V3ioPromAdapt s.logger.Log("msg", "Creating v3io adapter", "config", string(jsonLoadedConfig)) } - adapter, err := NewV3ioProm(loadedConfig, nil, nil) + adapter, err := NewV3ioProm(loadedConfig, nil) adapter.SetUseV3ioAggregations(s.useV3ioAggregations) if err != nil { return nil, nil, err diff --git a/vendor/github.com/v3io/v3io-go-http/.gitignore b/vendor/github.com/v3io/v3io-go-http/.gitignore deleted file mode 100644 index 59f1f3e8618..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ -.idea/ diff --git a/vendor/github.com/v3io/v3io-go-http/LICENSE b/vendor/github.com/v3io/v3io-go-http/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/v3io/v3io-go-http/README.md b/vendor/github.com/v3io/v3io-go-http/README.md deleted file mode 100644 index aedb669f124..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/README.md +++ /dev/null @@ -1 +0,0 @@ -# v3io diff --git a/vendor/github.com/v3io/v3io-go-http/container.go b/vendor/github.com/v3io/v3io-go-http/container.go deleted file mode 100644 index 5c713761c3f..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/container.go +++ /dev/null @@ -1,144 +0,0 @@ -package v3io - -import ( - "sync/atomic" - "time" - - "github.com/nuclio/logger" -) - -type Container struct { - logger logger.Logger - session *Session - Sync *SyncContainer -} - -func newContainer(parentLogger logger.Logger, session *Session, alias string) (*Container, error) { - newSyncContainer, err := newSyncContainer(parentLogger, session.Sync, alias) - if err != nil { - return nil, err - } - - return &Container{ - logger: parentLogger.GetChild(alias), - session: session, - Sync: newSyncContainer, - }, nil -} - -func (c *Container) ListAll(input *ListAllInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) ListBucket(input *ListBucketInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) GetObject(input *GetObjectInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) DeleteObject(input *DeleteObjectInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) PutObject(input *PutObjectInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) GetItem(input *GetItemInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) GetItems(input *GetItemsInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) PutItem(input *PutItemInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) PutItems(input *PutItemsInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) UpdateItem(input *UpdateItemInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) CreateStream(input *CreateStreamInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) DeleteStream(input *DeleteStreamInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) SeekShard(input *SeekShardInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) PutRecords(input *PutRecordsInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) GetRecords(input *GetRecordsInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - return c.sendRequest(input, context, responseChan) -} - -func (c *Container) sendRequest(input interface{}, - context interface{}, - responseChan chan *Response) (*Request, error) { - id := atomic.AddUint64(&requestID, 1) - - // create a request/response (TODO: from pool) - requestResponse := &RequestResponse{ - Request: Request{ - ID: id, - container: c, - Input: input, - Context: context, - responseChan: responseChan, - SendTimeNanoseconds: time.Now().UnixNano(), - }, - } - - // point to container - requestResponse.Request.requestResponse = requestResponse - - if err := c.session.sendRequest(&requestResponse.Request); err != nil { - return nil, err - } - - return &requestResponse.Request, nil -} diff --git a/vendor/github.com/v3io/v3io-go-http/context.go b/vendor/github.com/v3io/v3io-go-http/context.go deleted file mode 100644 index f62dd286a55..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/context.go +++ /dev/null @@ -1,116 +0,0 @@ -package v3io - -import ( - "github.com/nuclio/logger" -) - -type Context struct { - logger logger.Logger - Sync *SyncContext - requestChan chan *Request - numWorkers int -} - -type SessionConfig struct { - Username string - Password string - Label string - SessionKey string -} - -func NewContext(parentLogger logger.Logger, clusterURL string, numWorkers int) (*Context, error) { - newSyncContext, err := newSyncContext(parentLogger, clusterURL) - if err != nil { - return nil, err - } - - newContext := &Context{ - logger: parentLogger.GetChild("v3io"), - Sync: newSyncContext, - requestChan: make(chan *Request, 1024), - numWorkers: numWorkers, - } - - for workerIndex := 0; workerIndex < numWorkers; workerIndex++ { - go newContext.workerEntry(workerIndex) - } - - return newContext, nil -} - -func (c *Context) NewSession(username string, password string, label string) (*Session, error) { - return newSession(c.logger, c, username, password, label, "") -} - -func (c *Context) NewSessionFromConfig(sc *SessionConfig) (*Session, error) { - return newSession(c.logger, c, sc.Username, sc.Password, sc.Label, sc.SessionKey) -} - -func (c *Context) sendRequest(request *Request) error { - - // send the request to the request channel - c.requestChan <- request - - return nil -} - -func (c *Context) workerEntry(workerIndex int) { - for { - var response *Response - var err error - - // read a request - request := <-c.requestChan - - // according to the input type - switch typedInput := request.Input.(type) { - case *ListAllInput: - response, err = request.session.Sync.ListAll() - case *ListBucketInput: - response, err = request.container.Sync.ListBucket(typedInput) - case *GetObjectInput: - response, err = request.container.Sync.GetObject(typedInput) - case *PutObjectInput: - err = request.container.Sync.PutObject(typedInput) - case *DeleteObjectInput: - err = request.container.Sync.DeleteObject(typedInput) - case *GetItemInput: - response, err = request.container.Sync.GetItem(typedInput) - case *GetItemsInput: - response, err = request.container.Sync.GetItems(typedInput) - case *PutItemInput: - err = request.container.Sync.PutItem(typedInput) - case *PutItemsInput: - response, err = request.container.Sync.PutItems(typedInput) - case *UpdateItemInput: - err = request.container.Sync.UpdateItem(typedInput) - case *CreateStreamInput: - err = request.container.Sync.CreateStream(typedInput) - case *DeleteStreamInput: - err = request.container.Sync.DeleteStream(typedInput) - case *SeekShardInput: - response, err = request.container.Sync.SeekShard(typedInput) - case *PutRecordsInput: - response, err = request.container.Sync.PutRecords(typedInput) - case *GetRecordsInput: - response, err = request.container.Sync.GetRecords(typedInput) - default: - c.logger.ErrorWith("Got unexpected request type", "request", request) - } - - // TODO: have the sync interfaces somehow use the pre-allocated response - if response != nil { - request.requestResponse.Response = *response - } - - response = &request.requestResponse.Response - - response.ID = request.ID - response.Error = err - response.requestResponse = request.requestResponse - response.Context = request.Context - - // write to response channel - request.responseChan <- &request.requestResponse.Response - } -} diff --git a/vendor/github.com/v3io/v3io-go-http/error.go b/vendor/github.com/v3io/v3io-go-http/error.go deleted file mode 100644 index 7732e31cce6..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/error.go +++ /dev/null @@ -1,27 +0,0 @@ -package v3io - -import ( - "fmt" - - "errors" -) - -// ErrorWithStatusCode is an error that holds a status code -type ErrorWithStatusCode struct { - error - statusCode int - message string -} - -// NewErrorWithStatusCode creates an error that holds a status code -func NewErrorWithStatusCode(statusCode int, format string, args ...interface{}) ErrorWithStatusCode { - return ErrorWithStatusCode{ - error: errors.New(fmt.Sprintf(format, args...)), - statusCode: statusCode, - } -} - -// StatusCode returns the status code of the error -func (e *ErrorWithStatusCode) StatusCode() int { - return e.statusCode -} diff --git a/vendor/github.com/v3io/v3io-go-http/item.go b/vendor/github.com/v3io/v3io-go-http/item.go deleted file mode 100644 index 0cbc1737170..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/item.go +++ /dev/null @@ -1,35 +0,0 @@ -package v3io - -import "strconv" - -type Item map[string]interface{} - -func (i Item) GetField(name string) interface{} { - return i[name] -} - -func (i Item) GetFieldInt(name string) (int, error) { - switch typedField := i[name].(type) { - case int: - return typedField, nil - case float64: - return int(typedField), nil - case string: - return strconv.Atoi(typedField) - default: - return 0, ErrInvalidTypeConversion - } -} - -func (i Item) GetFieldString(name string) (string, error) { - switch typedField := i[name].(type) { - case int: - return strconv.Itoa(typedField), nil - case float64: - return strconv.FormatFloat(typedField, 'E', -1, 64), nil - case string: - return typedField, nil - default: - return "", ErrInvalidTypeConversion - } -} diff --git a/vendor/github.com/v3io/v3io-go-http/itemscursor.go b/vendor/github.com/v3io/v3io-go-http/itemscursor.go deleted file mode 100644 index 57f13b49e0f..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/itemscursor.go +++ /dev/null @@ -1,11 +0,0 @@ -package v3io - -type ItemsCursor struct { - Sync *SyncItemsCursor -} - -func newItemsCursor(container *Container, input *GetItemsInput, response *Response) *ItemsCursor { - return &ItemsCursor{} -} - -// TODO: support Next and All() for async as well diff --git a/vendor/github.com/v3io/v3io-go-http/session.go b/vendor/github.com/v3io/v3io-go-http/session.go deleted file mode 100644 index 764df4bf7af..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/session.go +++ /dev/null @@ -1,72 +0,0 @@ -package v3io - -import ( - "sync/atomic" - - "github.com/nuclio/logger" -) - -// TODO: Request should have a global pool -var requestID uint64 = 0 - -type Session struct { - Sync *SyncSession - logger logger.Logger - context *Context -} - -func newSession(parentLogger logger.Logger, - context *Context, - username string, - password string, - label string, - sessionKey string) (*Session, error) { - - newSyncSession, err := newSyncSession(parentLogger, context.Sync, username, password, label, sessionKey) - if err != nil { - return nil, err - } - - return &Session{ - logger: parentLogger.GetChild("session"), - context: context, - Sync: newSyncSession, - }, nil -} - -func (s *Session) NewContainer(alias string) (*Container, error) { - return newContainer(s.logger, s, alias) -} - -func (s *Session) ListAll(input *ListAllInput, - context interface{}, - responseChan chan *Response) (*Request, error) { - - id := atomic.AddUint64(&requestID, 1) - requestResponse := &RequestResponse{ - Request: Request{ - ID: id, - Input: input, - Context: context, - responseChan: responseChan, - }, - } - - // point to container - requestResponse.Request.requestResponse = requestResponse - - if err := s.sendRequest(&requestResponse.Request); err != nil { - return nil, err - } - - return &requestResponse.Request, nil -} - -func (s *Session) sendRequest(request *Request) error { - - // set session - request.session = s - - // delegate to context - return s.context.sendRequest(request) -} diff --git a/vendor/github.com/v3io/v3io-go-http/synccontainer.go b/vendor/github.com/v3io/v3io-go-http/synccontainer.go deleted file mode 100644 index f52b99bf9da..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/synccontainer.go +++ /dev/null @@ -1,629 +0,0 @@ -package v3io - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "path" - "reflect" - "strconv" - "strings" - - "github.com/nuclio/logger" -) - -// function names -const ( - setObjectFunctionName = "ObjectSet" - putItemFunctionName = "PutItem" - updateItemFunctionName = "UpdateItem" - getItemFunctionName = "GetItem" - getItemsFunctionName = "GetItems" - createStreamFunctionName = "CreateStream" - putRecordsFunctionName = "PutRecords" - getRecordsFunctionName = "GetRecords" - seekShardsFunctionName = "SeekShard" -) - -// headers for set object -var setObjectHeaders = map[string]string{ - "Content-Type": "application/json", - "X-v3io-function": setObjectFunctionName, -} - -// headers for put item -var putItemHeaders = map[string]string{ - "Content-Type": "application/json", - "X-v3io-function": putItemFunctionName, -} - -// headers for update item -var updateItemHeaders = map[string]string{ - "Content-Type": "application/json", - "X-v3io-function": updateItemFunctionName, -} - -// headers for update item -var getItemHeaders = map[string]string{ - "Content-Type": "application/json", - "X-v3io-function": getItemFunctionName, -} - -// headers for update item -var getItemsHeaders = map[string]string{ - "Content-Type": "application/json", - "X-v3io-function": getItemsFunctionName, -} - -// headers for create stream -var createStreamHeaders = map[string]string{ - "Content-Type": "application/json", - "X-v3io-function": createStreamFunctionName, -} - -// headers for put records -var putRecordsHeaders = map[string]string{ - "Content-Type": "application/json", - "X-v3io-function": putRecordsFunctionName, -} - -// headers for put records -var getRecordsHeaders = map[string]string{ - "Content-Type": "application/json", - "X-v3io-function": getRecordsFunctionName, -} - -// headers for seek records -var seekShardsHeaders = map[string]string{ - "Content-Type": "application/json", - "X-v3io-function": seekShardsFunctionName, -} - -// map between SeekShardInputType and its encoded counterpart -var seekShardsInputTypeToString = [...]string{ - "TIME", - "SEQUENCE", - "LATEST", - "EARLIEST", -} - -type SyncContainer struct { - logger logger.Logger - session *SyncSession - alias string - uriPrefix string -} - -func newSyncContainer(parentLogger logger.Logger, session *SyncSession, alias string) (*SyncContainer, error) { - return &SyncContainer{ - logger: parentLogger.GetChild(alias), - session: session, - alias: alias, - uriPrefix: fmt.Sprintf("http://%s/%s", session.context.clusterURL, alias), - }, nil -} - -func (sc *SyncContainer) ListBucket(input *ListBucketInput) (*Response, error) { - output := ListBucketOutput{} - - // prepare the query path - fullPath := sc.uriPrefix - if input.Path != "" { - fullPath += "?prefix=" + input.Path - } - - return sc.session.sendRequestAndXMLUnmarshal("GET", fullPath, nil, nil, &output) -} - -func (sc *SyncContainer) GetObject(input *GetObjectInput) (*Response, error) { - response, err := sc.session.sendRequest("GET", sc.getPathURI(input.Path), nil, nil, false) - if err != nil { - return nil, err - } - - return response, nil -} - -func (sc *SyncContainer) DeleteObject(input *DeleteObjectInput) error { - _, err := sc.session.sendRequest("DELETE", sc.getPathURI(input.Path), nil, nil, true) - if err != nil { - return err - } - - return nil -} - -func (sc *SyncContainer) PutObject(input *PutObjectInput) error { - _, err := sc.session.sendRequest("PUT", sc.getPathURI(input.Path), nil, input.Body, true) - if err != nil { - return err - } - - return nil -} - -func (sc *SyncContainer) GetItem(input *GetItemInput) (*Response, error) { - - // no need to marshal, just sprintf - body := fmt.Sprintf(`{"AttributesToGet": "%s"}`, strings.Join(input.AttributeNames, ",")) - - response, err := sc.session.sendRequest("PUT", sc.getPathURI(input.Path), getItemHeaders, []byte(body), false) - if err != nil { - return nil, err - } - - // ad hoc structure that contains response - item := struct { - Item map[string]map[string]string - }{} - - sc.logger.DebugWith("Body", "body", string(response.Body())) - - // unmarshal the body - err = json.Unmarshal(response.Body(), &item) - if err != nil { - return nil, err - } - - // decode the response - attributes, err := sc.decodeTypedAttributes(item.Item) - if err != nil { - return nil, err - } - - // attach the output to the response - response.Output = &GetItemOutput{attributes} - - return response, nil -} - -func (sc *SyncContainer) GetItems(input *GetItemsInput) (*Response, error) { - - // create GetItem Body - body := map[string]interface{}{ - "AttributesToGet": strings.Join(input.AttributeNames, ","), - } - - if input.Filter != "" { - body["FilterExpression"] = input.Filter - } - - if input.Marker != "" { - body["Marker"] = input.Marker - } - - if input.ShardingKey != "" { - body["ShardingKey"] = input.ShardingKey - } - - if input.Limit != 0 { - body["Limit"] = input.Limit - } - - if input.TotalSegments != 0 { - body["TotalSegment"] = input.TotalSegments - body["Segment"] = input.Segment - } - - if input.SortKeyRangeStart != "" { - body["SortKeyRangeStart"] = input.SortKeyRangeStart - } - - if input.SortKeyRangeEnd != "" { - body["SortKeyRangeEnd"] = input.SortKeyRangeEnd - } - - marshalledBody, err := json.Marshal(body) - if err != nil { - return nil, err - } - - response, err := sc.session.sendRequest("PUT", - sc.getPathURI(input.Path), - getItemsHeaders, - []byte(marshalledBody), - false) - - if err != nil { - return nil, err - } - - sc.logger.DebugWith("Body", "body", string(response.Body())) - - getItemsResponse := struct { - Items []map[string]map[string]string - NextMarker string - LastItemIncluded string - }{} - - // unmarshal the body into an ad hoc structure - err = json.Unmarshal(response.Body(), &getItemsResponse) - if err != nil { - return nil, err - } - - //validate getItems response to avoid infinite loop - if getItemsResponse.LastItemIncluded != "TRUE" && (getItemsResponse.NextMarker == "" || getItemsResponse.NextMarker == input.Marker) { - errMsg := fmt.Sprintf("Invalid getItems response: lastItemIncluded=false and nextMarker='%s', "+ - "startMarker='%s', probably due to object size bigger than 2M. Query is: %+v", getItemsResponse.NextMarker, input.Marker, input) - sc.logger.Warn(errMsg) - } - - getItemsOutput := GetItemsOutput{ - NextMarker: getItemsResponse.NextMarker, - Last: getItemsResponse.LastItemIncluded == "TRUE", - } - - // iterate through the items and decode them - for _, typedItem := range getItemsResponse.Items { - - item, err := sc.decodeTypedAttributes(typedItem) - if err != nil { - return nil, err - } - - getItemsOutput.Items = append(getItemsOutput.Items, item) - } - - // attach the output to the response - response.Output = &getItemsOutput - - return response, nil -} - -func (sc *SyncContainer) GetItemsCursor(input *GetItemsInput) (*SyncItemsCursor, error) { - return newSyncItemsCursor(sc, input) -} - -func (sc *SyncContainer) PutItem(input *PutItemInput) error { - - // prepare the query path - _, err := sc.putItem(input.Path, putItemFunctionName, input.Attributes, input.Condition, putItemHeaders, nil) - return err -} - -func (sc *SyncContainer) PutItems(input *PutItemsInput) (*Response, error) { - response := allocateResponse() - if response == nil { - return nil, errors.New("Failed to allocate response") - } - - putItemsOutput := PutItemsOutput{ - Success: true, - } - - for itemKey, itemAttributes := range input.Items { - - // try to post the item - _, err := sc.putItem( - input.Path+"/"+itemKey, putItemFunctionName, itemAttributes, input.Condition, putItemHeaders, nil) - - // if there was an error, shove it to the list of errors - if err != nil { - - // create the map to hold the errors since at least one exists - if putItemsOutput.Errors == nil { - putItemsOutput.Errors = map[string]error{} - } - - putItemsOutput.Errors[itemKey] = err - - // clear success, since at least one error exists - putItemsOutput.Success = false - } - } - - response.Output = &putItemsOutput - - return response, nil -} - -func (sc *SyncContainer) UpdateItem(input *UpdateItemInput) error { - var err error - - if input.Attributes != nil { - - // specify update mode as part of body. "Items" will be injected - body := map[string]interface{}{ - "UpdateMode": "CreateOrReplaceAttributes", - } - - _, err = sc.putItem(input.Path, putItemFunctionName, input.Attributes, input.Condition, putItemHeaders, body) - - } else if input.Expression != nil { - - _, err = sc.updateItemWithExpression( - input.Path, updateItemFunctionName, *input.Expression, input.Condition, updateItemHeaders) - } - - return err -} - -func (sc *SyncContainer) CreateStream(input *CreateStreamInput) error { - body := fmt.Sprintf(`{"ShardCount": %d, "RetentionPeriodHours": %d}`, - input.ShardCount, - input.RetentionPeriodHours) - - _, err := sc.session.sendRequest("POST", sc.getPathURI(input.Path), createStreamHeaders, []byte(body), true) - if err != nil { - return err - } - - return nil -} - -func (sc *SyncContainer) DeleteStream(input *DeleteStreamInput) error { - - // get all shards in the stream - response, err := sc.ListBucket(&ListBucketInput{ - Path: input.Path, - }) - - if err != nil { - return err - } - - defer response.Release() - - // delete the shards one by one - for _, content := range response.Output.(*ListBucketOutput).Contents { - - // TODO: handle error - stop deleting? return multiple errors? - sc.DeleteObject(&DeleteObjectInput{ - Path: content.Key, - }) - } - - // delete the actual stream - return sc.DeleteObject(&DeleteObjectInput{ - Path: path.Dir(input.Path) + "/", - }) -} - -func (sc *SyncContainer) PutRecords(input *PutRecordsInput) (*Response, error) { - - // TODO: set this to an initial size through heuristics? - // This function encodes manually - var buffer bytes.Buffer - - buffer.WriteString(`{"Records": [`) - - for recordIdx, record := range input.Records { - buffer.WriteString(`{"Data": "`) - buffer.WriteString(base64.StdEncoding.EncodeToString(record.Data)) - buffer.WriteString(`"`) - - if record.ClientInfo != nil { - buffer.WriteString(`,"ClientInfo": "`) - buffer.WriteString(base64.StdEncoding.EncodeToString(record.ClientInfo)) - buffer.WriteString(`"`) - } - - if record.ShardID != nil { - buffer.WriteString(`, "ShardId": `) - buffer.WriteString(strconv.Itoa(*record.ShardID)) - } - - if record.PartitionKey != "" { - buffer.WriteString(`, "PartitionKey": `) - buffer.WriteString(`"` + record.PartitionKey + `"`) - } - - // add comma if not last - if recordIdx != len(input.Records)-1 { - buffer.WriteString(`}, `) - } else { - buffer.WriteString(`}`) - } - } - - buffer.WriteString(`]}`) - str := string(buffer.Bytes()) - fmt.Println(str) - - response, err := sc.session.sendRequest("POST", sc.getPathURI(input.Path), putRecordsHeaders, buffer.Bytes(), false) - if err != nil { - return nil, err - } - - putRecordsOutput := PutRecordsOutput{} - - // unmarshal the body into an ad hoc structure - err = json.Unmarshal(response.Body(), &putRecordsOutput) - if err != nil { - return nil, err - } - - // set the output in the response - response.Output = &putRecordsOutput - - return response, nil -} - -func (sc *SyncContainer) SeekShard(input *SeekShardInput) (*Response, error) { - var buffer bytes.Buffer - - buffer.WriteString(`{"Type": "`) - buffer.WriteString(seekShardsInputTypeToString[input.Type]) - buffer.WriteString(`"`) - - if input.Type == SeekShardInputTypeSequence { - buffer.WriteString(`, "StartingSequenceNumber": `) - buffer.WriteString(strconv.Itoa(input.StartingSequenceNumber)) - } else if input.Type == SeekShardInputTypeTime { - buffer.WriteString(`, "TimestampSec": `) - buffer.WriteString(strconv.Itoa(input.Timestamp)) - buffer.WriteString(`, "TimestampNSec": 0`) - } - - buffer.WriteString(`}`) - - response, err := sc.session.sendRequest("POST", sc.getPathURI(input.Path), seekShardsHeaders, buffer.Bytes(), false) - if err != nil { - return nil, err - } - - seekShardOutput := SeekShardOutput{} - - // unmarshal the body into an ad hoc structure - err = json.Unmarshal(response.Body(), &seekShardOutput) - if err != nil { - return nil, err - } - - // set the output in the response - response.Output = &seekShardOutput - - return response, nil -} - -func (sc *SyncContainer) GetRecords(input *GetRecordsInput) (*Response, error) { - body := fmt.Sprintf(`{"Location": "%s", "Limit": %d}`, - input.Location, - input.Limit) - - response, err := sc.session.sendRequest("POST", sc.getPathURI(input.Path), getRecordsHeaders, []byte(body), false) - if err != nil { - return nil, err - } - - getRecordsOutput := GetRecordsOutput{} - - // unmarshal the body into an ad hoc structure - err = json.Unmarshal(response.Body(), &getRecordsOutput) - if err != nil { - return nil, err - } - - // set the output in the response - response.Output = &getRecordsOutput - - return response, nil -} - -func (sc *SyncContainer) putItem(path string, - functionName string, - attributes map[string]interface{}, - condition string, - headers map[string]string, - body map[string]interface{}) (*Response, error) { - - // iterate over all attributes and encode them with their types - typedAttributes, err := sc.encodeTypedAttributes(attributes) - if err != nil { - return nil, err - } - - // create an empty body if the user didn't pass anything - if body == nil { - body = map[string]interface{}{} - } - - // set item in body (use what the user passed as a base) - body["Item"] = typedAttributes - - if condition != "" { - body["ConditionExpression"] = condition - } - - jsonEncodedBodyContents, err := json.Marshal(body) - if err != nil { - return nil, err - } - - return sc.session.sendRequest("PUT", sc.getPathURI(path), headers, jsonEncodedBodyContents, false) -} - -func (sc *SyncContainer) updateItemWithExpression(path string, - functionName string, - expression string, - condition string, - headers map[string]string) (*Response, error) { - - body := map[string]interface{}{ - "UpdateExpression": expression, - "UpdateMode": "CreateOrReplaceAttributes", - } - - if condition != "" { - body["ConditionExpression"] = condition - } - - jsonEncodedBodyContents, err := json.Marshal(body) - if err != nil { - return nil, err - } - - return sc.session.sendRequest("POST", sc.getPathURI(path), headers, jsonEncodedBodyContents, false) -} - -// {"age": 30, "name": "foo"} -> {"age": {"N": 30}, "name": {"S": "foo"}} -func (sc *SyncContainer) encodeTypedAttributes(attributes map[string]interface{}) (map[string]map[string]string, error) { - typedAttributes := make(map[string]map[string]string) - - for attributeName, attributeValue := range attributes { - typedAttributes[attributeName] = make(map[string]string) - switch value := attributeValue.(type) { - default: - return nil, fmt.Errorf("Unexpected attribute type for %s: %T", attributeName, reflect.TypeOf(attributeValue)) - case int: - typedAttributes[attributeName]["N"] = strconv.Itoa(value) - // this is a tmp bypass to the fact Go maps Json numbers to float64 - case float64: - typedAttributes[attributeName]["N"] = strconv.FormatFloat(value, 'E', -1, 64) - case string: - typedAttributes[attributeName]["S"] = value - case []byte: - typedAttributes[attributeName]["B"] = base64.StdEncoding.EncodeToString(value) - } - } - - return typedAttributes, nil -} - -// {"age": {"N": 30}, "name": {"S": "foo"}} -> {"age": 30, "name": "foo"} -func (sc *SyncContainer) decodeTypedAttributes(typedAttributes map[string]map[string]string) (map[string]interface{}, error) { - var err error - attributes := map[string]interface{}{} - - for attributeName, typedAttributeValue := range typedAttributes { - - // try to parse as number - if numberValue, ok := typedAttributeValue["N"]; ok { - - // try int - if intValue, err := strconv.Atoi(numberValue); err != nil { - - // try float - floatValue, err := strconv.ParseFloat(numberValue, 64) - if err != nil { - return nil, fmt.Errorf("Value for %s is not int or float: %s", attributeName, numberValue) - } - - // save as float - attributes[attributeName] = floatValue - } else { - attributes[attributeName] = intValue - } - } else if stringValue, ok := typedAttributeValue["S"]; ok { - attributes[attributeName] = stringValue - } else if byteSliceValue, ok := typedAttributeValue["B"]; ok { - attributes[attributeName], err = base64.StdEncoding.DecodeString(byteSliceValue) - if err != nil { - return nil, err - } - } - } - - return attributes, nil -} - -func (sc *SyncContainer) getContext() *SyncContext { - return sc.session.context -} - -func (sc *SyncContainer) getPathURI(path string) string { - return sc.uriPrefix + "/" + path -} diff --git a/vendor/github.com/v3io/v3io-go-http/synccontext.go b/vendor/github.com/v3io/v3io-go-http/synccontext.go deleted file mode 100644 index a6b890c5360..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/synccontext.go +++ /dev/null @@ -1,36 +0,0 @@ -package v3io - -import ( - "time" - - "github.com/nuclio/logger" - "github.com/valyala/fasthttp" -) - -type SyncContext struct { - logger logger.Logger - httpClient *fasthttp.HostClient - clusterURL string - Timeout time.Duration -} - -func newSyncContext(parentLogger logger.Logger, clusterURL string) (*SyncContext, error) { - newSyncContext := &SyncContext{ - logger: parentLogger.GetChild("v3io"), - httpClient: &fasthttp.HostClient{ - Addr: clusterURL, - }, - clusterURL: clusterURL, - } - - return newSyncContext, nil -} - -func (sc *SyncContext) sendRequest(request *fasthttp.Request, response *fasthttp.Response) error { - - if sc.Timeout <= 0 { - return sc.httpClient.Do(request, response) - } else { - return sc.httpClient.DoTimeout(request, response, sc.Timeout) - } -} diff --git a/vendor/github.com/v3io/v3io-go-http/syncitemscursor.go b/vendor/github.com/v3io/v3io-go-http/syncitemscursor.go deleted file mode 100644 index 2638ba42511..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/syncitemscursor.go +++ /dev/null @@ -1,143 +0,0 @@ -package v3io - -import ( - "errors" -) - -var ErrInvalidTypeConversion = errors.New("Invalid type conversion") - -type SyncItemsCursor struct { - currentItem Item - currentError error - currentResponse *Response - nextMarker string - moreItemsExist bool - itemIndex int - items []Item - input *GetItemsInput - container *SyncContainer -} - -func newSyncItemsCursor(container *SyncContainer, input *GetItemsInput) (*SyncItemsCursor, error) { - newSyncItemsCursor := &SyncItemsCursor{ - container: container, - input: input, - } - - response, err := container.GetItems(input) - if err != nil { - return nil, err - } - - newSyncItemsCursor.setResponse(response) - - return newSyncItemsCursor, nil -} - -// Err returns the last error -func (ic *SyncItemsCursor) Err() error { - return ic.currentError -} - -// Release releases a cursor and its underlying resources -func (ic *SyncItemsCursor) Release() { - if ic.currentResponse != nil { - ic.currentResponse.Release() - } -} - -// Next gets the next matching item. this may potentially block as this lazy loads items from the collection -func (ic *SyncItemsCursor) Next() bool { - item, err := ic.NextItem() - - if item == nil || err != nil { - return false - } - - return true -} - -// NextItem gets the next matching item. this may potentially block as this lazy loads items from the collection -func (ic *SyncItemsCursor) NextItem() (Item, error) { - - // are there any more items left in the previous response we received? - if ic.itemIndex < len(ic.items) { - ic.currentItem = ic.items[ic.itemIndex] - ic.currentError = nil - - // next time we'll give next item - ic.itemIndex++ - - return ic.currentItem, nil - } - - // are there any more items up stream? - if !ic.moreItemsExist { - ic.currentError = nil - return nil, nil - } - - // get the previous request input and modify it with the marker - ic.input.Marker = ic.nextMarker - - // invoke get items - newResponse, err := ic.container.GetItems(ic.input) - if err != nil { - return nil, err - } - - // release the previous response - ic.currentResponse.Release() - - // set the new response - read all the sub information from it - ic.setResponse(newResponse) - - // and recurse into next now that we repopulated response - return ic.NextItem() -} - -// gets all items -func (ic *SyncItemsCursor) All() ([]Item, error) { - var items []Item - - for ic.Next() { - items = append(items, ic.GetItem()) - } - - if ic.Err() != nil { - return nil, ic.Err() - } - - return items, nil -} - -func (ic *SyncItemsCursor) GetField(name string) interface{} { - return ic.currentItem[name] -} - -func (ic *SyncItemsCursor) GetFieldInt(name string) (int, error) { - return ic.currentItem.GetFieldInt(name) -} - -func (ic *SyncItemsCursor) GetFieldString(name string) (string, error) { - return ic.currentItem.GetFieldString(name) -} - -func (ic *SyncItemsCursor) GetFields() map[string]interface{} { - return ic.currentItem -} - -func (ic *SyncItemsCursor) GetItem() Item { - return ic.currentItem -} - -func (ic *SyncItemsCursor) setResponse(response *Response) { - ic.currentResponse = response - - getItemsOutput := response.Output.(*GetItemsOutput) - - ic.moreItemsExist = !getItemsOutput.Last - ic.nextMarker = getItemsOutput.NextMarker - ic.items = getItemsOutput.Items - ic.itemIndex = 0 -} diff --git a/vendor/github.com/v3io/v3io-go-http/syncsession.go b/vendor/github.com/v3io/v3io-go-http/syncsession.go deleted file mode 100644 index aad26927cf9..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/syncsession.go +++ /dev/null @@ -1,146 +0,0 @@ -package v3io - -import ( - "encoding/base64" - "encoding/xml" - "fmt" - - "github.com/nuclio/logger" - "github.com/valyala/fasthttp" -) - -type SyncSession struct { - logger logger.Logger - context *SyncContext - authenticatioHeaderKey string - authenticatioHeaderValue string -} - -func newSyncSession(parentLogger logger.Logger, - context *SyncContext, - username string, - password string, - label string, - sessionKey string) (*SyncSession, error) { - - if sessionKey != "" { - //if sessionKey not empty - return &SyncSession{ - logger: parentLogger.GetChild("session"), - context: context, - authenticatioHeaderKey: "X-v3io-session-key", - authenticatioHeaderValue: sessionKey, - }, nil - } - // generate token for basic authentication - usernameAndPassword := fmt.Sprintf("%s:%s", username, password) - encodedUsernameAndPassword := base64.StdEncoding.EncodeToString([]byte(usernameAndPassword)) - - return &SyncSession{ - logger: parentLogger.GetChild("session"), - context: context, - authenticatioHeaderKey: "Authorization", - authenticatioHeaderValue: "Basic " + encodedUsernameAndPassword, - }, nil -} - -func (ss *SyncSession) ListAll() (*Response, error) { - output := ListAllOutput{} - - return ss.sendRequestAndXMLUnmarshal("GET", fmt.Sprintf("http://%s/", ss.context.clusterURL), nil, nil, &output) -} - -func (ss *SyncSession) sendRequestViaContext(request *fasthttp.Request, response *fasthttp.Response) error { - - request.Header.Set(ss.authenticatioHeaderKey, ss.authenticatioHeaderValue) - - // delegate to context - return ss.context.sendRequest(request, response) -} - -func (ss *SyncSession) sendRequest( - method string, - uri string, - headers map[string]string, - body []byte, - releaseResponse bool) (*Response, error) { - - var success bool - var statusCode int - - request := fasthttp.AcquireRequest() - response := allocateResponse() - - // init request - request.SetRequestURI(uri) - request.Header.SetMethod(method) - request.SetBody(body) - - if headers != nil { - for headerName, headerValue := range headers { - request.Header.Add(headerName, headerValue) - } - } - - // execute the request - err := ss.sendRequestViaContext(request, response.response) - if err != nil { - goto cleanup - } - - statusCode = response.response.StatusCode() - - // did we get a 2xx response? - success = statusCode >= 200 && statusCode < 300 - - // make sure we got expected status - if !success { - err = NewErrorWithStatusCode(statusCode, "Failed %s with status %d", method, statusCode) - goto cleanup - } - -cleanup: - - // we're done with the request - the response must be released by the user - // unless there's an error - fasthttp.ReleaseRequest(request) - - if err != nil { - response.Release() - return nil, err - } - - // if the user doesn't need the response, release it - if releaseResponse { - response.Release() - return nil, nil - } - - return response, nil -} - -func (ss *SyncSession) sendRequestAndXMLUnmarshal( - method string, - uri string, - headers map[string]string, - body []byte, - output interface{}) (*Response, error) { - - response, err := ss.sendRequest(method, uri, headers, body, false) - if err != nil { - return nil, err - } - - // unmarshal the body into the output - err = xml.Unmarshal(response.response.Body(), output) - if err != nil { - response.Release() - - return nil, err - } - - // set output in response - response.Output = output - - return response, nil -} diff --git a/vendor/github.com/v3io/v3io-go-http/types.go b/vendor/github.com/v3io/v3io-go-http/types.go deleted file mode 100644 index 60266429399..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/types.go +++ /dev/null @@ -1,276 +0,0 @@ -package v3io - -import ( - "encoding/xml" - - "github.com/valyala/fasthttp" -) - -// -// Request / response -// - -type Request struct { - ID uint64 - - // the container on which the request was performed (if applicable) - container *Container - - // the session on which the request was performed (if applicable) - session *Session - - // holds the input (e.g. ListBucketInput, GetItemInput) - Input interface{} - - // a user supplied context - Context interface{} - - // the channel to which the response must be posted - responseChan chan *Response - - // pointer to container - requestResponse *RequestResponse - - // Request time - SendTimeNanoseconds int64 -} - -type Response struct { - response *fasthttp.Response - - // hold a decoded output, if any - Output interface{} - - // Equal to the ID of request - ID uint64 - - // holds the error for async responses - Error error - - // a user supplied context - Context interface{} - - // pointer to container - requestResponse *RequestResponse -} - -func (r *Response) Release() { - if r.response != nil { - fasthttp.ReleaseResponse(r.response) - } -} - -func (r *Response) Body() []byte { - return r.response.Body() -} - -func (r *Response) Request() *Request { - return &r.requestResponse.Request -} - -// holds both a request and response -type RequestResponse struct { - Request Request - Response Response -} - -type ListBucketInput struct { - Path string -} - -type Content struct { - XMLName xml.Name `xml:"Contents"` - Key string `xml:"Key"` - Size int `xml:"Size"` - LastSequenceId int `xml:"LastSequenceId"` - ETag string `xml:"ETag"` - LastModified string `xml:"LastModified"` -} - -type CommonPrefix struct { - XMLName xml.Name `xml:"CommonPrefixes"` - Prefix string `xml:"Prefix"` -} - -type ListBucketOutput struct { - XMLName xml.Name `xml:"ListBucketResult"` - Name string `xml:"Name"` - NextMarker string `xml:"NextMarker"` - MaxKeys string `xml:"MaxKeys"` - Contents []Content `xml:"Contents"` - CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"` -} - -type ListAllInput struct { -} - -type ListAllOutput struct { - XMLName xml.Name `xml:"ListAllMyBucketsResult"` - Owner interface{} `xml:"Owner"` - Buckets Buckets `xml:"Buckets"` -} - -type Buckets struct { - XMLName xml.Name `xml:"Buckets"` - Bucket []Bucket `xml:"Bucket"` -} - -type Bucket struct { - XMLName xml.Name `xml:"Bucket"` - Name string `xml:"Name"` - CreationDate string `xml:"CreationDate"` - Id int `xml:"Id"` -} - -type GetObjectInput struct { - Path string -} - -type PutObjectInput struct { - Path string - Body []byte -} - -type DeleteObjectInput struct { - Path string -} - -type SetObjectInput struct { - Path string - ValidationModifiedTimeSec uint64 - ValidationModifiedTimeNsec uint64 - ValidationOperation string - ValidationMask uint64 - ValidationValue uint64 - SetOperation string - DataMask uint64 - DataValue uint64 -} - -type PutItemInput struct { - Path string - Condition string - Attributes map[string]interface{} -} - -type PutItemsInput struct { - Path string - Condition string - Items map[string]map[string]interface{} -} - -type PutItemsOutput struct { - Success bool - Errors map[string]error -} - -type UpdateItemInput struct { - Path string - Attributes map[string]interface{} - Expression *string - Condition string -} - -type GetItemInput struct { - Path string - AttributeNames []string -} - -type GetItemOutput struct { - Item Item -} - -type GetItemsInput struct { - Path string - AttributeNames []string - Filter string - Marker string - ShardingKey string - Limit int - Segment int - TotalSegments int - SortKeyRangeStart string - SortKeyRangeEnd string -} - -type GetItemsOutput struct { - Last bool - NextMarker string - Items []Item -} - -type CreateStreamInput struct { - Path string - ShardCount int - RetentionPeriodHours int -} - -type StreamRecord struct { - ShardID *int - Data []byte - ClientInfo []byte - PartitionKey string -} - -type PutRecordsInput struct { - Path string - Records []*StreamRecord -} - -type PutRecordResult struct { - SequenceNumber int - ShardID int `json:"ShardId"` - ErrorCode int - ErrorMessage string -} - -type PutRecordsOutput struct { - FailedRecordCount int - Records []PutRecordResult -} - -type DeleteStreamInput struct { - Path string -} - -type SeekShardInputType int - -const ( - SeekShardInputTypeTime SeekShardInputType = iota - SeekShardInputTypeSequence - SeekShardInputTypeLatest - SeekShardInputTypeEarliest -) - -type SeekShardInput struct { - Path string - Type SeekShardInputType - StartingSequenceNumber int - Timestamp int -} - -type SeekShardOutput struct { - Location string -} - -type GetRecordsInput struct { - Path string - Location string - Limit int -} - -type GetRecordsResult struct { - ArrivalTimeSec int - ArrivalTimeNSec int - SequenceNumber int - ClientInfo []byte - PartitionKey string - Data []byte -} - -type GetRecordsOutput struct { - NextLocation string - MSecBehindLatest int - RecordsBehindLatest int - Records []GetRecordsResult -} diff --git a/vendor/github.com/v3io/v3io-go-http/utils.go b/vendor/github.com/v3io/v3io-go-http/utils.go deleted file mode 100644 index da5d19e7140..00000000000 --- a/vendor/github.com/v3io/v3io-go-http/utils.go +++ /dev/null @@ -1,11 +0,0 @@ -package v3io - -import ( - "github.com/valyala/fasthttp" -) - -func allocateResponse() *Response { - return &Response{ - response: fasthttp.AcquireResponse(), - } -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index a07ee49ea39..4bb86aa0fe5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -994,6 +994,20 @@ func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq)) } +// SetsockoptSockFprog attaches a classic BPF or an extended BPF program to a +// socket to filter incoming packets. See 'man 7 socket' for usage information. +func SetsockoptSockFprog(fd, level, opt int, fprog *SockFprog) error { + return setsockopt(fd, level, opt, unsafe.Pointer(fprog), unsafe.Sizeof(*fprog)) +} + +func SetsockoptCanRawFilter(fd, level, opt int, filter []CanFilter) error { + var p unsafe.Pointer + if len(filter) > 0 { + p = unsafe.Pointer(&filter[0]) + } + return setsockopt(fd, level, opt, p, uintptr(len(filter)*SizeofCanFilter)) +} + // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index d262150cc08..3e9c18e6814 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -405,6 +405,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -434,6 +439,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index e492caacda5..14365ff6cf3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -406,6 +406,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -435,6 +440,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index ad4342156e6..80ad473c614 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -409,6 +409,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -438,6 +443,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index ef76a362d9c..20e78cc1f6d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -407,6 +407,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -436,6 +441,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index dbf05903d42..bdeb0cb24f6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -408,6 +408,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -437,6 +442,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 1b7e6707934..2d3f5911290 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -407,6 +407,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -436,6 +441,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 57379005b58..5fb57ff2a38 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -407,6 +407,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -436,6 +441,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 0e88bf47bcd..b46b26f6ce1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -408,6 +408,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -437,6 +442,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 5ac91b3f7aa..e14e3c90abf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -408,6 +408,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -437,6 +442,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 1e59b45068c..2332e8fd126 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -408,6 +408,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -437,6 +442,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 508885f11f0..efec4f8193b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -407,6 +407,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -436,6 +441,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index d315f2c3a7a..71cc23f2e13 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -406,6 +406,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -435,6 +440,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index a1a9279c225..48805ba1952 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -410,6 +410,11 @@ type TCPInfo struct { Total_retrans uint32 } +type CanFilter struct { + Id uint32 + Mask uint32 +} + const ( SizeofSockaddrInet4 = 0x10 SizeofSockaddrInet6 = 0x1c @@ -439,6 +444,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 ) const ( diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 4f17a3331fd..9f946da6fe5 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -149,7 +149,7 @@ const ( DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d - DOMAIN_ALIAS_RID_MONITORING_USERS = 0X22e + DOMAIN_ALIAS_RID_MONITORING_USERS = 0x22e DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 diff --git a/vendor/modules.txt b/vendor/modules.txt index 9687f6bb018..5cd3a4e218a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -201,9 +201,9 @@ github.com/klauspost/compress/zlib github.com/klauspost/cpuid # github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 github.com/kr/logfmt -# github.com/mattn/go-colorable v0.0.9 +# github.com/mattn/go-colorable v0.1.1 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.4 +# github.com/mattn/go-isatty v0.0.5 github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil @@ -219,6 +219,8 @@ github.com/modern-go/concurrent github.com/modern-go/reflect2 # github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 github.com/mwitkow/go-conntrack +# github.com/nuclio/errors v0.0.1 +github.com/nuclio/errors # github.com/nuclio/logger v0.0.1 github.com/nuclio/logger # github.com/nuclio/zap v0.0.2 @@ -233,7 +235,7 @@ github.com/opentracing-contrib/go-stdlib/nethttp github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/log github.com/opentracing/opentracing-go/ext -# github.com/pavius/zap v0.0.0-20180228181622-8d52692529b8 +# github.com/pavius/zap v1.4.2-0.20180228181622-8d52692529b8 github.com/pavius/zap github.com/pavius/zap/zapcore github.com/pavius/zap/internal/bufferpool @@ -299,9 +301,11 @@ github.com/stretchr/testify/assert # github.com/v3io/frames v0.0.0-20190328123118-1dad1ff610509e7b087d9cd390ed1b452caecf15 => github.com/v3io/frames v0.0.0-20190328123118-1dad1ff610509e7b087d9cd390ed1b452caecf15 github.com/v3io/frames github.com/v3io/frames/pb -# github.com/v3io/v3io-go-http v0.0.0-20190221115935-53e2b487c9a2 -github.com/v3io/v3io-go-http -# github.com/v3io/v3io-tsdb v0.9.0 => github.com/v3io/v3io-tsdb v0.9.0 +# github.com/v3io/v3io-go v0.0.0-20180415000000-1486c75b0e590a14580f7d9b6cef7a944a231ca7 +github.com/v3io/v3io-go/pkg/dataplane +github.com/v3io/v3io-go/pkg/errors +github.com/v3io/v3io-go/pkg/dataplane/http +# github.com/v3io/v3io-tsdb v0.9.2 github.com/v3io/v3io-tsdb/pkg/aggregate github.com/v3io/v3io-tsdb/pkg/appender github.com/v3io/v3io-tsdb/pkg/config @@ -315,7 +319,7 @@ github.com/v3io/v3io-tsdb/pkg/querier github.com/v3io/v3io-tsdb/pkg/tsdb/schema # github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/bytebufferpool -# github.com/valyala/fasthttp v1.0.0 +# github.com/valyala/fasthttp v1.2.0 github.com/valyala/fasthttp github.com/valyala/fasthttp/fasthttputil github.com/valyala/fasthttp/stackless @@ -374,7 +378,7 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a +# golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.3.0