From 94a9191b50aa0232fbdf06f6ce6a13c6992482ae Mon Sep 17 00:00:00 2001 From: Brandon Liu Date: Mon, 29 Jan 2024 10:51:17 +0800 Subject: [PATCH] =?UTF-8?q?refactor=20names=20to=20only=20export=20intenti?= =?UTF-8?q?onal=20parts=20of=20functionality=20[#20=E2=80=A6=20(#124)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor names to only export intentional parts of functionality [#20, #46] * Also correct ordering of function arguments to put errors last * add go vet to CI, fix CLI syntax --- .github/workflows/test.yml | 7 ++- caddy/pmtiles_proxy.go | 12 ++--- main.go | 24 ++++----- pmtiles/bitmap.go | 2 +- pmtiles/bucket.go | 50 +++++++++-------- pmtiles/convert.go | 106 ++++++++++++++++++------------------- pmtiles/convert_test.go | 2 +- pmtiles/directory.go | 5 ++ pmtiles/extract.go | 70 ++++++++++++------------ pmtiles/extract_test.go | 32 +++++------ pmtiles/makesync.go | 30 ++++++----- pmtiles/readerv2.go | 32 +++++------ pmtiles/readerv2_test.go | 2 +- pmtiles/region.go | 2 + pmtiles/server.go | 88 +++++++++++++++--------------- pmtiles/show.go | 10 ++-- pmtiles/tile_id.go | 4 +- pmtiles/tilejson.go | 3 +- pmtiles/tilejson_test.go | 4 +- pmtiles/upload.go | 1 + pmtiles/verify.go | 4 +- 21 files changed, 254 insertions(+), 236 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 93adf11..1a7392c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -14,7 +14,7 @@ jobs: with: go-version: '^1.18.0' - run: go test ./pmtiles - fmt: + fmt_vet_lint: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -22,3 +22,8 @@ jobs: with: go-version: '^1.18.0' - run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1; fi + - run: go vet caddy/pmtiles_proxy.go + - run: go vet main.go + - run: go vet pmtiles/* + - name: Run Revive Action by pulling pre-built image + uses: docker://morphy/revive-action:v2 diff --git a/caddy/pmtiles_proxy.go b/caddy/pmtiles_proxy.go index e7aeabe..0c2cb6d 100644 --- a/caddy/pmtiles_proxy.go +++ b/caddy/pmtiles_proxy.go @@ -28,7 +28,7 @@ func init() { type Middleware struct { Bucket string `json:"bucket"` CacheSize int `json:"cache_size"` - PublicUrl string `json:"public_url"` + PublicURL string `json:"public_url"` logger *zap.Logger server *pmtiles.Server } @@ -45,7 +45,7 @@ func (m *Middleware) Provision(ctx caddy.Context) error { m.logger = ctx.Logger() logger := log.New(io.Discard, "", log.Ldate) prefix := "." // serve only the root of the bucket for now, at the root route of Caddyfile - server, err := pmtiles.NewServer(m.Bucket, prefix, logger, m.CacheSize, "", m.PublicUrl) + server, err := pmtiles.NewServer(m.Bucket, prefix, logger, m.CacheSize, "", m.PublicURL) if err != nil { return err } @@ -66,13 +66,13 @@ func (m *Middleware) Validate() error { func (m Middleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error { start := time.Now() - status_code, headers, body := m.server.Get(r.Context(), r.URL.Path) + statusCode, headers, body := m.server.Get(r.Context(), r.URL.Path) for k, v := range headers { w.Header().Set(k, v) } - w.WriteHeader(status_code) + w.WriteHeader(statusCode) w.Write(body) - m.logger.Info("response", zap.Int("status", status_code), zap.String("path", r.URL.Path), zap.Duration("duration", time.Since(start))) + m.logger.Info("response", zap.Int("status", statusCode), zap.String("path", r.URL.Path), zap.Duration("duration", time.Since(start))) return next.ServeHTTP(w, r) } @@ -96,7 +96,7 @@ func (m *Middleware) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { } m.CacheSize = num case "public_url": - if !d.Args(&m.PublicUrl) { + if !d.Args(&m.PublicURL) { return d.ArgErr() } } diff --git a/main.go b/main.go index e34d0af..c486c33 100644 --- a/main.go +++ b/main.go @@ -56,11 +56,11 @@ var cli struct { Bucket string `help:"Remote bucket of input archive."` Region string `help:"local GeoJSON Polygon or MultiPolygon file for area of interest." type:"existingfile"` Bbox string `help:"bbox area of interest: min_lon,min_lat,max_lon,max_lat" type:"string"` - Minzoom int8 `default:-1 help:"Minimum zoom level, inclusive."` - Maxzoom int8 `default:-1 help:"Maximum zoom level, inclusive."` - DownloadThreads int `default:4 help:"Number of download threads."` + Minzoom int8 `default:"-1" help:"Minimum zoom level, inclusive."` + Maxzoom int8 `default:"-1" help:"Maximum zoom level, inclusive."` + DownloadThreads int `default:"4" help:"Number of download threads."` DryRun bool `help:"Calculate tiles to extract, but don't download them."` - Overfetch float32 `default:0.05 help:"What ratio of extra data to download to minimize # requests; 0.2 is 20%"` + Overfetch float32 `default:"0.05" help:"What ratio of extra data to download to minimize # requests; 0.2 is 20%"` } `cmd:"" help:"Create an archive from a larger archive for a subset of zoom levels or geographic region."` Verify struct { @@ -69,8 +69,8 @@ var cli struct { Makesync struct { Input string `arg:"" type:"existingfile"` - BlockSizeKb int `default:1000 help:"The approximate block size, in kilobytes. 0 means 1 tile = 1 block."` - HashFunction string `default:fnv1a help:"The hash function."` + BlockSizeKb int `default:"1000" help:"The approximate block size, in kilobytes. 0 means 1 tile = 1 block."` + HashFunction string `default:"fnv1a" help:"The hash function."` Checksum string `help:"Store a checksum in the syncfile."` } `cmd:"" hidden:""` @@ -82,10 +82,10 @@ var cli struct { Serve struct { Path string `arg:"" help:"Local path or bucket prefix"` Interface string `default:"0.0.0.0"` - Port int `default:8080` - AdminPort int `default:-1` + Port int `default:"8080"` + AdminPort int `default:"-1"` Cors string `help:"Value of HTTP CORS header."` - CacheSize int `default:64 help:"Size of cache in Megabytes."` + CacheSize int `default:"64" help:"Size of cache in Megabytes."` Bucket string `help:"Remote bucket"` PublicURL string `help:"Public base URL of tile endpoint for TileJSON e.g. https://example.com/tiles/"` } `cmd:"" help:"Run an HTTP proxy server for Z/X/Y tiles."` @@ -94,15 +94,15 @@ var cli struct { OldFile string `type:"existingfile" help:"The old archive on disk. Providing this will check the new archive for a .sync file"` NewFile string `arg:"The remote file."` Bucket string `required:"" help:"Bucket of file to download."` - DownloadThreads int `default:4 help:"Number of download threads."` + DownloadThreads int `default:"4" help:"Number of download threads."` DryRun bool `help:"Calculate new parts to download, but don't download them."` - Overfetch float32 `default:0.05 help:"What ratio of extra data to download to minimize # requests; 0.2 is 20%"` + Overfetch float32 `default:"0.05" help:"What ratio of extra data to download to minimize # requests; 0.2 is 20%"` } `cmd:"" help:"Upload a local archive to remote storage."` Upload struct { Input string `arg:"" type:"existingfile"` Key string `arg:""` - MaxConcurrency int `default:2 help:"# of upload threads"` + MaxConcurrency int `default:"2" help:"# of upload threads"` Bucket string `required:"" help:"Bucket to upload to."` } `cmd:"" help:"Upload a local archive to remote storage."` diff --git a/pmtiles/bitmap.go b/pmtiles/bitmap.go index 2fcc7fd..278e61d 100644 --- a/pmtiles/bitmap.go +++ b/pmtiles/bitmap.go @@ -103,7 +103,7 @@ func generalizeAnd(r *roaring64.Bitmap) { } } -func WriteImage(interior *roaring64.Bitmap, boundary *roaring64.Bitmap, exterior *roaring64.Bitmap, filename string, zoom uint8) { +func writeImage(interior *roaring64.Bitmap, boundary *roaring64.Bitmap, exterior *roaring64.Bitmap, filename string, zoom uint8) { dim := 1 << zoom img := image.NewNRGBA(image.Rect(0, 0, dim, dim)) diff --git a/pmtiles/bucket.go b/pmtiles/bucket.go index 7f46dac..ca27779 100644 --- a/pmtiles/bucket.go +++ b/pmtiles/bucket.go @@ -13,6 +13,7 @@ import ( "strings" ) +// Bucket is an abstration over a gocloud or plain HTTP bucket. type Bucket interface { Close() error NewRangeReader(ctx context.Context, key string, offset int64, length int64) (io.ReadCloser, error) @@ -22,7 +23,7 @@ type HTTPBucket struct { baseURL string } -func (b HTTPBucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (io.ReadCloser, error) { +func (b HTTPBucket) NewRangeReader(_ context.Context, key string, offset, length int64) (io.ReadCloser, error) { reqURL := b.baseURL + "/" + key req, err := http.NewRequest("GET", reqURL, nil) @@ -77,26 +78,24 @@ func NormalizeBucketKey(bucket string, prefix string, key string) (string, strin dir = dir[:len(dir)-1] } return u.Scheme + "://" + u.Host + dir, file, nil - } else { - fileprotocol := "file://" - if string(os.PathSeparator) != "/" { - fileprotocol += "/" - } - if prefix != "" { - abs, err := filepath.Abs(prefix) - if err != nil { - return "", "", err - } - return fileprotocol + filepath.ToSlash(abs), key, nil - } - abs, err := filepath.Abs(key) + } + fileprotocol := "file://" + if string(os.PathSeparator) != "/" { + fileprotocol += "/" + } + if prefix != "" { + abs, err := filepath.Abs(prefix) if err != nil { return "", "", err } - return fileprotocol + filepath.ToSlash(filepath.Dir(abs)), filepath.Base(abs), nil + return fileprotocol + filepath.ToSlash(abs), key, nil } + abs, err := filepath.Abs(key) + if err != nil { + return "", "", err + } + return fileprotocol + filepath.ToSlash(filepath.Dir(abs)), filepath.Base(abs), nil } - return bucket, key, nil } @@ -104,15 +103,14 @@ func OpenBucket(ctx context.Context, bucketURL string, bucketPrefix string) (Buc if strings.HasPrefix(bucketURL, "http") { bucket := HTTPBucket{bucketURL} return bucket, nil - } else { - bucket, err := blob.OpenBucket(ctx, bucketURL) - if err != nil { - return nil, err - } - if bucketPrefix != "" && bucketPrefix != "/" && bucketPrefix != "." { - bucket = blob.PrefixedBucket(bucket, path.Clean(bucketPrefix)+string(os.PathSeparator)) - } - wrappedBucket := BucketAdapter{bucket} - return wrappedBucket, err } + bucket, err := blob.OpenBucket(ctx, bucketURL) + if err != nil { + return nil, err + } + if bucketPrefix != "" && bucketPrefix != "/" && bucketPrefix != "." { + bucket = blob.PrefixedBucket(bucket, path.Clean(bucketPrefix)+string(os.PathSeparator)) + } + wrappedBucket := BucketAdapter{bucket} + return wrappedBucket, err } diff --git a/pmtiles/convert.go b/pmtiles/convert.go index d13e35c..24d9c6f 100644 --- a/pmtiles/convert.go +++ b/pmtiles/convert.go @@ -22,35 +22,34 @@ import ( "zombiezen.com/go/sqlite" ) -type OffsetLen struct { +type offsetLen struct { Offset uint64 Length uint32 } -type Resolver struct { +type resolver struct { deduplicate bool compress bool Entries []EntryV3 Offset uint64 - OffsetMap map[string]OffsetLen + OffsetMap map[string]offsetLen AddressedTiles uint64 // none of them can be empty compressor *gzip.Writer compressTmp *bytes.Buffer hashfunc hash.Hash } -func (r *Resolver) NumContents() uint64 { +func (r *resolver) NumContents() uint64 { if r.deduplicate { return uint64(len(r.OffsetMap)) - } else { - return r.AddressedTiles } + return r.AddressedTiles } // must be called in increasing tile_id order, uniquely -func (r *Resolver) AddTileIsNew(tileID uint64, data []byte) (bool, []byte) { +func (r *resolver) AddTileIsNew(tileID uint64, data []byte) (bool, []byte) { r.AddressedTiles++ - var found OffsetLen + var found offsetLen var ok bool var sumString string if r.deduplicate { @@ -74,44 +73,43 @@ func (r *Resolver) AddTileIsNew(tileID uint64, data []byte) (bool, []byte) { } return false, nil + } + var newData []byte + if !r.compress || (len(data) >= 2 && data[0] == 31 && data[1] == 139) { + // the tile is already compressed + newData = data } else { - var newData []byte - if !r.compress || (len(data) >= 2 && data[0] == 31 && data[1] == 139) { - // the tile is already compressed - newData = data - } else { - r.compressTmp.Reset() - r.compressor.Reset(r.compressTmp) - r.compressor.Write(data) - r.compressor.Close() - newData = r.compressTmp.Bytes() - } + r.compressTmp.Reset() + r.compressor.Reset(r.compressTmp) + r.compressor.Write(data) + r.compressor.Close() + newData = r.compressTmp.Bytes() + } - if r.deduplicate { - r.OffsetMap[sumString] = OffsetLen{r.Offset, uint32(len(newData))} - } - r.Entries = append(r.Entries, EntryV3{tileID, r.Offset, uint32(len(newData)), 1}) - r.Offset += uint64(len(newData)) - return true, newData + if r.deduplicate { + r.OffsetMap[sumString] = offsetLen{r.Offset, uint32(len(newData))} } + r.Entries = append(r.Entries, EntryV3{tileID, r.Offset, uint32(len(newData)), 1}) + r.Offset += uint64(len(newData)) + return true, newData } -func NewResolver(deduplicate bool, compress bool) *Resolver { +func newResolver(deduplicate bool, compress bool) *resolver { b := new(bytes.Buffer) compressor, _ := gzip.NewWriterLevel(b, gzip.BestCompression) - r := Resolver{deduplicate, compress, make([]EntryV3, 0), 0, make(map[string]OffsetLen), 0, compressor, b, fnv.New128a()} + r := resolver{deduplicate, compress, make([]EntryV3, 0), 0, make(map[string]offsetLen), 0, compressor, b, fnv.New128a()} return &r } +// Convert an existing archive on disk to a new PMTiles specification version 3 archive. func Convert(logger *log.Logger, input string, output string, deduplicate bool, tmpfile *os.File) error { if strings.HasSuffix(input, ".pmtiles") { - return ConvertPmtilesV2(logger, input, output, deduplicate, tmpfile) - } else { - return ConvertMbtiles(logger, input, output, deduplicate, tmpfile) + return convertPmtilesV2(logger, input, output, deduplicate, tmpfile) } + return convertMbtiles(logger, input, output, deduplicate, tmpfile) } -func addDirectoryV2Entries(dir DirectoryV2, entries *[]EntryV3, f *os.File) { +func addDirectoryV2Entries(dir directoryV2, entries *[]EntryV3, f *os.File) { for zxy, rng := range dir.Entries { tileID := ZxyToID(zxy.Z, zxy.X, zxy.Y) *entries = append(*entries, EntryV3{tileID, rng.Offset, uint32(rng.Length), 1}) @@ -128,7 +126,7 @@ func addDirectoryV2Entries(dir DirectoryV2, entries *[]EntryV3, f *os.File) { f.Seek(int64(offset), 0) leafBytes := make([]byte, length) f.Read(leafBytes) - leafDir := ParseDirectoryV2(leafBytes) + leafDir := parseDirectoryV2(leafBytes) addDirectoryV2Entries(leafDir, entries, f) } } @@ -146,7 +144,7 @@ func setZoomCenterDefaults(header *HeaderV3, entries []EntryV3) { } } -func ConvertPmtilesV2(logger *log.Logger, input string, output string, deduplicate bool, tmpfile *os.File) error { +func convertPmtilesV2(logger *log.Logger, input string, output string, deduplicate bool, tmpfile *os.File) error { start := time.Now() f, err := os.Open(input) if err != nil { @@ -159,7 +157,7 @@ func ConvertPmtilesV2(logger *log.Logger, input string, output string, deduplica return fmt.Errorf("archive is already the latest PMTiles version (3)") } - v2JsonBytes, dir := ParseHeaderV2(bytes.NewReader(buffer)) + v2JsonBytes, dir := parseHeaderV2(bytes.NewReader(buffer)) var v2metadata map[string]interface{} json.Unmarshal(v2JsonBytes, &v2metadata) @@ -187,8 +185,8 @@ func ConvertPmtilesV2(logger *log.Logger, input string, output string, deduplica return entries[i].TileID < entries[j].TileID }) - // re-use resolver, because even if archives are de-duplicated we may need to recompress. - resolver := NewResolver(deduplicate, header.TileType == Mvt) + // re-use resolve, because even if archives are de-duplicated we may need to recompress. + resolve := newResolver(deduplicate, header.TileType == Mvt) bar := progressbar.Default(int64(len(entries))) for _, entry := range entries { @@ -207,7 +205,7 @@ func ConvertPmtilesV2(logger *log.Logger, input string, output string, deduplica } } // TODO: enforce sorted order - if isNew, newData := resolver.AddTileIsNew(entry.TileID, buf); isNew { + if isNew, newData := resolve.AddTileIsNew(entry.TileID, buf); isNew { _, err = tmpfile.Write(newData) if err != nil { return fmt.Errorf("Failed to write to tempfile, %w", err) @@ -216,7 +214,7 @@ func ConvertPmtilesV2(logger *log.Logger, input string, output string, deduplica bar.Add(1) } - err = finalize(logger, resolver, header, tmpfile, output, jsonMetadata) + err = finalize(logger, resolve, header, tmpfile, output, jsonMetadata) if err != nil { return err } @@ -225,7 +223,7 @@ func ConvertPmtilesV2(logger *log.Logger, input string, output string, deduplica return nil } -func ConvertMbtiles(logger *log.Logger, input string, output string, deduplicate bool, tmpfile *os.File) error { +func convertMbtiles(logger *log.Logger, input string, output string, deduplicate bool, tmpfile *os.File) error { start := time.Now() conn, err := sqlite.OpenConn(input, sqlite.OpenReadOnly) if err != nil { @@ -310,7 +308,7 @@ func ConvertMbtiles(logger *log.Logger, input string, output string, deduplicate } logger.Println("Pass 2: writing tiles") - resolver := NewResolver(deduplicate, header.TileType == Mvt) + resolve := newResolver(deduplicate, header.TileType == Mvt) { bar := progressbar.Default(int64(tileset.GetCardinality())) i := tileset.Iterator() @@ -341,7 +339,7 @@ func ConvertMbtiles(logger *log.Logger, input string, output string, deduplicate data := rawTileTmp.Bytes() if len(data) > 0 { - if isNew, newData := resolver.AddTileIsNew(id, data); isNew { + if isNew, newData := resolve.AddTileIsNew(id, data); isNew { _, err := tmpfile.Write(newData) if err != nil { return fmt.Errorf("Failed to write to tempfile: %s", err) @@ -354,7 +352,7 @@ func ConvertMbtiles(logger *log.Logger, input string, output string, deduplicate bar.Add(1) } } - err = finalize(logger, resolver, header, tmpfile, output, jsonMetadata) + err = finalize(logger, resolve, header, tmpfile, output, jsonMetadata) if err != nil { return err } @@ -362,14 +360,14 @@ func ConvertMbtiles(logger *log.Logger, input string, output string, deduplicate return nil } -func finalize(logger *log.Logger, resolver *Resolver, header HeaderV3, tmpfile *os.File, output string, jsonMetadata map[string]interface{}) error { - logger.Println("# of addressed tiles: ", resolver.AddressedTiles) - logger.Println("# of tile entries (after RLE): ", len(resolver.Entries)) - logger.Println("# of tile contents: ", resolver.NumContents()) +func finalize(logger *log.Logger, resolve *resolver, header HeaderV3, tmpfile *os.File, output string, jsonMetadata map[string]interface{}) error { + logger.Println("# of addressed tiles: ", resolve.AddressedTiles) + logger.Println("# of tile entries (after RLE): ", len(resolve.Entries)) + logger.Println("# of tile contents: ", resolve.NumContents()) - header.AddressedTilesCount = resolver.AddressedTiles - header.TileEntriesCount = uint64(len(resolver.Entries)) - header.TileContentsCount = resolver.NumContents() + header.AddressedTilesCount = resolve.AddressedTiles + header.TileEntriesCount = uint64(len(resolve.Entries)) + header.TileContentsCount = resolve.NumContents() // assemble the final file outfile, err := os.Create(output) @@ -377,7 +375,7 @@ func finalize(logger *log.Logger, resolver *Resolver, header HeaderV3, tmpfile * return fmt.Errorf("Failed to create %s, %w", output, err) } - rootBytes, leavesBytes, numLeaves := optimizeDirectories(resolver.Entries, 16384-HeaderV3LenBytes) + rootBytes, leavesBytes, numLeaves := optimizeDirectories(resolve.Entries, 16384-HeaderV3LenBytes) if numLeaves > 0 { logger.Println("Root dir bytes: ", len(rootBytes)) @@ -385,10 +383,10 @@ func finalize(logger *log.Logger, resolver *Resolver, header HeaderV3, tmpfile * logger.Println("Num leaf dirs: ", numLeaves) logger.Println("Total dir bytes: ", len(rootBytes)+len(leavesBytes)) logger.Println("Average leaf dir bytes: ", len(leavesBytes)/numLeaves) - logger.Printf("Average bytes per addressed tile: %.2f\n", float64(len(rootBytes)+len(leavesBytes))/float64(resolver.AddressedTiles)) + logger.Printf("Average bytes per addressed tile: %.2f\n", float64(len(rootBytes)+len(leavesBytes))/float64(resolve.AddressedTiles)) } else { logger.Println("Total dir bytes: ", len(rootBytes)) - logger.Printf("Average bytes per addressed tile: %.2f\n", float64(len(rootBytes))/float64(resolver.AddressedTiles)) + logger.Printf("Average bytes per addressed tile: %.2f\n", float64(len(rootBytes))/float64(resolve.AddressedTiles)) } var metadataBytes []byte @@ -404,7 +402,7 @@ func finalize(logger *log.Logger, resolver *Resolver, header HeaderV3, tmpfile * metadataBytes = b.Bytes() } - setZoomCenterDefaults(&header, resolver.Entries) + setZoomCenterDefaults(&header, resolve.Entries) header.Clustered = true header.InternalCompression = Gzip @@ -419,7 +417,7 @@ func finalize(logger *log.Logger, resolver *Resolver, header HeaderV3, tmpfile * header.LeafDirectoryOffset = header.MetadataOffset + header.MetadataLength header.LeafDirectoryLength = uint64(len(leavesBytes)) header.TileDataOffset = header.LeafDirectoryOffset + header.LeafDirectoryLength - header.TileDataLength = resolver.Offset + header.TileDataLength = resolve.Offset headerBytes := serializeHeader(header) diff --git a/pmtiles/convert_test.go b/pmtiles/convert_test.go index dbb2eee..38d581b 100644 --- a/pmtiles/convert_test.go +++ b/pmtiles/convert_test.go @@ -6,7 +6,7 @@ import ( ) func TestResolver(t *testing.T) { - resolver := NewResolver(true, true) + resolver := newResolver(true, true) resolver.AddTileIsNew(1, []byte{0x1, 0x2}) assert.Equal(t, 1, len(resolver.Entries)) resolver.AddTileIsNew(2, []byte{0x1, 0x3}) diff --git a/pmtiles/directory.go b/pmtiles/directory.go index 3a7463a..5820810 100644 --- a/pmtiles/directory.go +++ b/pmtiles/directory.go @@ -8,6 +8,7 @@ import ( "fmt" ) +// Compression is the compression algorithm applied to individual tiles (or none) type Compression uint8 const ( @@ -18,6 +19,7 @@ const ( Zstd = 4 ) +// TileType is the format of individual tile contents in the archive. type TileType uint8 const ( @@ -29,8 +31,10 @@ const ( Avif = 5 ) +// HeaderV3LenBytes is the fixed-size binary header size. const HeaderV3LenBytes = 127 +// HeaderV3 is a binary header for PMTiles specification version 3. type HeaderV3 struct { SpecVersion uint8 RootOffset uint64 @@ -104,6 +108,7 @@ func headerContentEncoding(compression Compression) (string, bool) { } } +// EntryV3 is an entry in a PMTiles spec version 3 directory. type EntryV3 struct { TileID uint64 Offset uint64 diff --git a/pmtiles/extract.go b/pmtiles/extract.go index 22978f4..0c14bd4 100644 --- a/pmtiles/extract.go +++ b/pmtiles/extract.go @@ -20,14 +20,13 @@ import ( "time" ) -type SrcDstRange struct { +type srcDstRange struct { SrcOffset uint64 DstOffset uint64 Length uint64 } -// given a bitmap and a set of existing entries, -// create only relevant entries +// RelevantEntries finds the intersection of a bitmap and a directory // return sorted slice of entries, and slice of all leaf entries // any runlengths > 1 will be "trimmed" to the relevance bitmap func RelevantEntries(bitmap *roaring64.Bitmap, maxzoom uint8, dir []EntryV3) ([]EntryV3, []EntryV3) { @@ -91,10 +90,10 @@ func RelevantEntries(bitmap *roaring64.Bitmap, maxzoom uint8, dir []EntryV3) ([] // * The # of addressed tiles (sum over RunLength) // * # the number of unique offsets ("tile contents") // - this might not be the last SrcDstRange new_offset + length, it's the highest offset (can be in the middle) -func ReencodeEntries(dir []EntryV3) ([]EntryV3, []SrcDstRange, uint64, uint64, uint64) { +func reencodeEntries(dir []EntryV3) ([]EntryV3, []srcDstRange, uint64, uint64, uint64) { reencoded := make([]EntryV3, 0, len(dir)) seenOffsets := make(map[uint64]uint64) - ranges := make([]SrcDstRange, 0) + ranges := make([]srcDstRange, 0) addressedTiles := uint64(0) dstOffset := uint64(0) @@ -107,10 +106,10 @@ func ReencodeEntries(dir []EntryV3) ([]EntryV3, []SrcDstRange, uint64, uint64, u if lastRange.SrcOffset+lastRange.Length == entry.Offset { ranges[len(ranges)-1].Length += uint64(entry.Length) } else { - ranges = append(ranges, SrcDstRange{entry.Offset, dstOffset, uint64(entry.Length)}) + ranges = append(ranges, srcDstRange{entry.Offset, dstOffset, uint64(entry.Length)}) } } else { - ranges = append(ranges, SrcDstRange{entry.Offset, dstOffset, uint64(entry.Length)}) + ranges = append(ranges, srcDstRange{entry.Offset, dstOffset, uint64(entry.Length)}) } reencoded = append(reencoded, EntryV3{entry.TileID, dstOffset, entry.Length, entry.RunLength}) @@ -124,24 +123,24 @@ func ReencodeEntries(dir []EntryV3) ([]EntryV3, []SrcDstRange, uint64, uint64, u } // "want the next N bytes, then discard N bytes" -type CopyDiscard struct { +type copyDiscard struct { Wanted uint64 Discard uint64 } -type OverfetchRange struct { - Rng SrcDstRange - CopyDiscards []CopyDiscard +type overfetchRange struct { + Rng srcDstRange + CopyDiscards []copyDiscard } // A single request, where only some of the bytes // in the requested range we want -type OverfetchListItem struct { - Rng SrcDstRange - CopyDiscards []CopyDiscard +type overfetchListItem struct { + Rng srcDstRange + CopyDiscards []copyDiscard BytesToNext uint64 // the "priority" - prev *OverfetchListItem - next *OverfetchListItem + prev *overfetchListItem + next *overfetchListItem index int } @@ -157,10 +156,10 @@ type OverfetchListItem struct { // input ranges are merged in order of smallest byte distance to next range // until the overfetch budget is consumed. // The slice is sorted by Length -func MergeRanges(ranges []SrcDstRange, overfetch float32) (*list.List, uint64) { +func MergeRanges(ranges []srcDstRange, overfetch float32) (*list.List, uint64) { totalSize := 0 - shortest := make([]*OverfetchListItem, len(ranges)) + shortest := make([]*overfetchListItem, len(ranges)) // create the heap items for i, rng := range ranges { @@ -174,10 +173,10 @@ func MergeRanges(ranges []SrcDstRange, overfetch float32) (*list.List, uint64) { } } - shortest[i] = &OverfetchListItem{ + shortest[i] = &overfetchListItem{ Rng: rng, BytesToNext: bytesToNext, - CopyDiscards: []CopyDiscard{{uint64(rng.Length), 0}}, + CopyDiscards: []copyDiscard{{uint64(rng.Length), 0}}, } totalSize += int(rng.Length) } @@ -205,7 +204,7 @@ func MergeRanges(ranges []SrcDstRange, overfetch float32) (*list.List, uint64) { // merge this item into item.next newLength := item.Rng.Length + item.BytesToNext + item.next.Rng.Length - item.next.Rng = SrcDstRange{item.Rng.SrcOffset, item.Rng.DstOffset, newLength} + item.next.Rng = srcDstRange{item.Rng.SrcOffset, item.Rng.DstOffset, newLength} item.next.prev = item.prev if item.prev != nil { item.prev.next = item.next @@ -225,7 +224,7 @@ func MergeRanges(ranges []SrcDstRange, overfetch float32) (*list.List, uint64) { totalBytes := uint64(0) result := list.New() for _, x := range shortest { - result.PushBack(OverfetchRange{ + result.PushBack(overfetchRange{ Rng: x.Rng, CopyDiscards: x.CopyDiscards, }) @@ -235,21 +234,22 @@ func MergeRanges(ranges []SrcDstRange, overfetch float32) (*list.List, uint64) { return result, totalBytes } +// Extract a smaller archive from local or remote archive. // 1. Get the root directory (check that it is clustered) // 2. Turn the input geometry into a relevance bitmap (using min(maxzoom, headermaxzoom)) // 3. Get all relevant level 1 directories (if any) // 4. Get all relevant level 2 directories (usually none) // 5. With the existing directory + relevance bitmap, construct -// * a new total directory (root + leaf directories) -// * a sorted slice of byte ranges in the old file required +// - a new total directory (root + leaf directories) +// - a sorted slice of byte ranges in the old file required +// // 6. Merge requested ranges using an overfetch parametter // 7. write the modified header // 8. write the root directory. // 9. get and write the metadata. // 10. write the leaf directories (if any) // 11. Get all tiles, and write directly to the output. - -func Extract(logger *log.Logger, bucketURL string, key string, minzoom int8, maxzoom int8, regionFile string, bbox string, output string, downloadThreads int, overfetch float32, dryRun bool) error { +func Extract(_ *log.Logger, bucketURL string, key string, minzoom int8, maxzoom int8, regionFile string, bbox string, output string, downloadThreads int, overfetch float32, dryRun bool) error { // 1. fetch the header start := time.Now() ctx := context.Background() @@ -365,9 +365,9 @@ func Extract(logger *log.Logger, bucketURL string, key string, minzoom int8, max // 4. get all relevant leaf entries - leafRanges := make([]SrcDstRange, 0) + leafRanges := make([]srcDstRange, 0) for _, leaf := range leaves { - leafRanges = append(leafRanges, SrcDstRange{header.LeafDirectoryOffset + leaf.Offset, 0, uint64(leaf.Length)}) + leafRanges = append(leafRanges, srcDstRange{header.LeafDirectoryOffset + leaf.Offset, 0, uint64(leaf.Length)}) } overfetchLeaves, _ := MergeRanges(leafRanges, overfetch) @@ -378,7 +378,7 @@ func Extract(logger *log.Logger, bucketURL string, key string, minzoom int8, max if overfetchLeaves.Len() == 0 { break } - or := overfetchLeaves.Remove(overfetchLeaves.Front()).(OverfetchRange) + or := overfetchLeaves.Remove(overfetchLeaves.Front()).(overfetchRange) chunkReader, err := bucket.NewRangeReader(ctx, key, int64(or.Rng.SrcOffset), int64(or.Rng.Length)) if err != nil { @@ -416,7 +416,7 @@ func Extract(logger *log.Logger, bucketURL string, key string, minzoom int8, max // 6. create the new header and chunk list // we now need to re-encode this entry list using cumulative offsets - reencoded, tileParts, tiledataLength, addressedTiles, tileContents := ReencodeEntries(tileEntries) + reencoded, tileParts, tiledataLength, addressedTiles, tileContents := reencodeEntries(tileEntries) overfetchRanges, totalBytes := MergeRanges(tileParts, overfetch) @@ -493,7 +493,7 @@ func Extract(logger *log.Logger, bucketURL string, key string, minzoom int8, max var mu sync.Mutex - downloadPart := func(or OverfetchRange) error { + downloadPart := func(or overfetchRange) error { tileReader, err := bucket.NewRangeReader(ctx, key, int64(sourceTileDataOffset+or.Rng.SrcOffset), int64(or.Rng.Length)) if err != nil { return err @@ -522,16 +522,16 @@ func Extract(logger *log.Logger, bucketURL string, key string, minzoom int8, max workBack := (i == 0 && downloadThreads > 1) errs.Go(func() error { done := false - var or OverfetchRange + var or overfetchRange for { mu.Lock() if overfetchRanges.Len() == 0 { done = true } else { if workBack { - or = overfetchRanges.Remove(overfetchRanges.Back()).(OverfetchRange) + or = overfetchRanges.Remove(overfetchRanges.Back()).(overfetchRange) } else { - or = overfetchRanges.Remove(overfetchRanges.Front()).(OverfetchRange) + or = overfetchRanges.Remove(overfetchRanges.Front()).(overfetchRange) } } mu.Unlock() @@ -543,8 +543,6 @@ func Extract(logger *log.Logger, bucketURL string, key string, minzoom int8, max return err } } - - return nil }) } diff --git a/pmtiles/extract_test.go b/pmtiles/extract_test.go index 9eacb36..2801041 100644 --- a/pmtiles/extract_test.go +++ b/pmtiles/extract_test.go @@ -83,7 +83,7 @@ func TestReencodeEntries(t *testing.T) { entries = append(entries, EntryV3{0, 400, 10, 1}) entries = append(entries, EntryV3{1, 500, 20, 2}) - reencoded, result, datalen, addressed, contents := ReencodeEntries(entries) + reencoded, result, datalen, addressed, contents := reencodeEntries(entries) assert.Equal(t, 2, len(result)) assert.Equal(t, result[0].SrcOffset, uint64(400)) @@ -106,7 +106,7 @@ func TestReencodeEntriesDuplicate(t *testing.T) { entries = append(entries, EntryV3{1, 500, 20, 1}) entries = append(entries, EntryV3{2, 400, 10, 1}) - reencoded, result, datalen, addressed, contents := ReencodeEntries(entries) + reencoded, result, datalen, addressed, contents := reencodeEntries(entries) assert.Equal(t, 2, len(result)) assert.Equal(t, result[0].SrcOffset, uint64(400)) @@ -129,7 +129,7 @@ func TestReencodeContiguous(t *testing.T) { entries = append(entries, EntryV3{0, 400, 10, 0}) entries = append(entries, EntryV3{1, 410, 20, 0}) - _, result, _, _, _ := ReencodeEntries(entries) + _, result, _, _, _ := reencodeEntries(entries) assert.Equal(t, len(result), 1) assert.Equal(t, result[0].SrcOffset, uint64(400)) @@ -137,32 +137,32 @@ func TestReencodeContiguous(t *testing.T) { } func TestMergeRanges(t *testing.T) { - ranges := make([]SrcDstRange, 0) - ranges = append(ranges, SrcDstRange{0, 0, 50}) - ranges = append(ranges, SrcDstRange{60, 60, 60}) + ranges := make([]srcDstRange, 0) + ranges = append(ranges, srcDstRange{0, 0, 50}) + ranges = append(ranges, srcDstRange{60, 60, 60}) result, totalTransferBytes := MergeRanges(ranges, 0.1) assert.Equal(t, 1, result.Len()) assert.Equal(t, uint64(120), totalTransferBytes) - front := result.Front().Value.(OverfetchRange) - assert.Equal(t, SrcDstRange{0, 0, 120}, front.Rng) + front := result.Front().Value.(overfetchRange) + assert.Equal(t, srcDstRange{0, 0, 120}, front.Rng) assert.Equal(t, 2, len(front.CopyDiscards)) - assert.Equal(t, CopyDiscard{50, 10}, front.CopyDiscards[0]) - assert.Equal(t, CopyDiscard{60, 0}, front.CopyDiscards[1]) + assert.Equal(t, copyDiscard{50, 10}, front.CopyDiscards[0]) + assert.Equal(t, copyDiscard{60, 0}, front.CopyDiscards[1]) } func TestMergeRangesMultiple(t *testing.T) { - ranges := make([]SrcDstRange, 0) - ranges = append(ranges, SrcDstRange{0, 0, 50}) - ranges = append(ranges, SrcDstRange{60, 60, 10}) - ranges = append(ranges, SrcDstRange{80, 80, 10}) + ranges := make([]srcDstRange, 0) + ranges = append(ranges, srcDstRange{0, 0, 50}) + ranges = append(ranges, srcDstRange{60, 60, 10}) + ranges = append(ranges, srcDstRange{80, 80, 10}) result, totalTransferBytes := MergeRanges(ranges, 0.3) - front := result.Front().Value.(OverfetchRange) + front := result.Front().Value.(overfetchRange) assert.Equal(t, uint64(90), totalTransferBytes) assert.Equal(t, 1, result.Len()) - assert.Equal(t, SrcDstRange{0, 0, 90}, front.Rng) + assert.Equal(t, srcDstRange{0, 0, 90}, front.Rng) assert.Equal(t, 3, len(front.CopyDiscards)) fmt.Println(result) } diff --git a/pmtiles/makesync.go b/pmtiles/makesync.go index f9e6a44..d8f44c4 100644 --- a/pmtiles/makesync.go +++ b/pmtiles/makesync.go @@ -21,25 +21,26 @@ import ( "time" ) -type Block struct { +type block struct { Index uint64 // starts at 0 Start uint64 // the start tileID Offset uint64 // the offset in the file, in bytes Length uint64 // the length, in bytes } -type Result struct { - Block Block +type result struct { + Block block Hash uint64 } -type Syncline struct { +type syncline struct { Offset uint64 Length uint64 Hash uint64 } -func Makesync(logger *log.Logger, cliVersion string, file string, blockSizeKb int, checksum string) error { +// Makesync generates a syncfile for an archive on disk. (experimental) +func Makesync(_ *log.Logger, cliVersion string, file string, blockSizeKb int, checksum string) error { ctx := context.Background() start := time.Now() @@ -128,14 +129,14 @@ func Makesync(logger *log.Logger, cliVersion string, file string, blockSizeKb in "writing syncfile", ) - var current Block + var current block - tasks := make(chan Block, 1000) + tasks := make(chan block, 1000) var wg sync.WaitGroup var mu sync.Mutex - synclines := make(map[uint64]Syncline) + synclines := make(map[uint64]syncline) errs, _ := errgroup.WithContext(ctx) // workers @@ -156,7 +157,7 @@ func Makesync(logger *log.Logger, cliVersion string, file string, blockSizeKb in sum64 := hasher.Sum64() mu.Lock() - synclines[block.Start] = Syncline{block.Offset, block.Length, sum64} + synclines[block.Start] = syncline{block.Offset, block.Length, sum64} mu.Unlock() hasher.Reset() @@ -182,7 +183,7 @@ func Makesync(logger *log.Logger, cliVersion string, file string, blockSizeKb in panic("Invalid clustering of archive detected - check with verify") } else { if current.Length+uint64(e.Length) > blockSizeBytes { - tasks <- Block{current.Index, current.Start, current.Offset, current.Length} + tasks <- block{current.Index, current.Start, current.Offset, current.Length} blocks++ currentIndex++ @@ -196,7 +197,7 @@ func Makesync(logger *log.Logger, cliVersion string, file string, blockSizeKb in } }) - tasks <- Block{current.Index, current.Start, current.Offset, current.Length} + tasks <- block{current.Index, current.Start, current.Offset, current.Length} blocks++ close(tasks) @@ -218,11 +219,12 @@ func Makesync(logger *log.Logger, cliVersion string, file string, blockSizeKb in return nil } -func Sync(logger *log.Logger, file string, syncfile string) error { +// Sync calculates the diff between an archive on disk and a syncfile. (experimental) +func Sync(_ *log.Logger, file string, syncfile string) error { start := time.Now() totalRemoteBytes := uint64(0) - byStartID := make(map[uint64]Syncline) + byStartID := make(map[uint64]syncline) sync, err := os.Open(syncfile) if err != nil { @@ -242,7 +244,7 @@ func Sync(logger *log.Logger, file string, syncfile string) error { length, _ := strconv.ParseUint(parts[2], 10, 64) totalRemoteBytes += length hash, _ := strconv.ParseUint(parts[3], 16, 64) - byStartID[startID] = Syncline{offset, length, hash} + byStartID[startID] = syncline{offset, length, hash} } // open the existing archive diff --git a/pmtiles/readerv2.go b/pmtiles/readerv2.go index c74aff9..efcf222 100644 --- a/pmtiles/readerv2.go +++ b/pmtiles/readerv2.go @@ -6,24 +6,25 @@ import ( "math" ) +// Zxy coordinates of a single tile (zoom, column, row) type Zxy struct { Z uint8 X uint32 Y uint32 } -type Range struct { +type rangeV2 struct { Offset uint64 Length uint64 } -type DirectoryV2 struct { - Entries map[Zxy]Range +type directoryV2 struct { + Entries map[Zxy]rangeV2 LeafZ uint8 - Leaves map[Zxy]Range + Leaves map[Zxy]rangeV2 } -func (d DirectoryV2) SizeBytes() int { +func (d directoryV2) SizeBytes() int { return 21*(len(d.Entries)+len(d.Leaves)) + 1 } @@ -35,14 +36,14 @@ func readUint48(b []byte) uint64 { return (uint64(binary.LittleEndian.Uint32(b[2:6])) << 16) + uint64(uint32(binary.LittleEndian.Uint16(b[0:2]))) } -func GetParentTile(tile Zxy, level uint8) Zxy { +func getParentTile(tile Zxy, level uint8) Zxy { tileDiff := tile.Z - level x := math.Floor(float64(tile.X / (1 << tileDiff))) y := math.Floor(float64(tile.Y / (1 << tileDiff))) return Zxy{Z: level, X: uint32(x), Y: uint32(y)} } -func ParseEntryV2(b []byte) (uint8, Zxy, Range) { +func parseEntryV2(b []byte) (uint8, Zxy, rangeV2) { zRaw := b[0] xRaw := b[1:4] yRaw := b[4:7] @@ -53,18 +54,17 @@ func ParseEntryV2(b []byte) (uint8, Zxy, Range) { offset := readUint48(offsetRaw) length := uint64(binary.LittleEndian.Uint32(lengthRaw)) if zRaw&0b10000000 == 0 { - return 0, Zxy{Z: uint8(zRaw), X: uint32(x), Y: uint32(y)}, Range{Offset: offset, Length: length} - } else { - leafZ := zRaw & 0b01111111 - return leafZ, Zxy{Z: leafZ, X: uint32(x), Y: uint32(y)}, Range{Offset: offset, Length: length} + return 0, Zxy{Z: uint8(zRaw), X: uint32(x), Y: uint32(y)}, rangeV2{Offset: offset, Length: length} } + leafZ := zRaw & 0b01111111 + return leafZ, Zxy{Z: leafZ, X: uint32(x), Y: uint32(y)}, rangeV2{Offset: offset, Length: length} } -func ParseDirectoryV2(dirBytes []byte) DirectoryV2 { - theDir := DirectoryV2{Entries: make(map[Zxy]Range), Leaves: make(map[Zxy]Range)} +func parseDirectoryV2(dirBytes []byte) directoryV2 { + theDir := directoryV2{Entries: make(map[Zxy]rangeV2), Leaves: make(map[Zxy]rangeV2)} var maxz uint8 for i := 0; i < len(dirBytes)/17; i++ { - leafZ, zxy, rng := ParseEntryV2(dirBytes[i*17 : i*17+17]) + leafZ, zxy, rng := parseEntryV2(dirBytes[i*17 : i*17+17]) if leafZ == 0 { theDir.Entries[zxy] = rng } else { @@ -76,7 +76,7 @@ func ParseDirectoryV2(dirBytes []byte) DirectoryV2 { return theDir } -func ParseHeaderV2(reader io.Reader) ([]byte, DirectoryV2) { +func parseHeaderV2(reader io.Reader) ([]byte, directoryV2) { magicNum := make([]byte, 2) io.ReadFull(reader, magicNum) version := make([]byte, 2) @@ -91,6 +91,6 @@ func ParseHeaderV2(reader io.Reader) ([]byte, DirectoryV2) { io.ReadFull(reader, metadataBytes) dirBytes := make([]byte, rootDirLen*17) io.ReadFull(reader, dirBytes) - theDir := ParseDirectoryV2(dirBytes) + theDir := parseDirectoryV2(dirBytes) return metadataBytes, theDir } diff --git a/pmtiles/readerv2_test.go b/pmtiles/readerv2_test.go index 5b802ec..671971a 100644 --- a/pmtiles/readerv2_test.go +++ b/pmtiles/readerv2_test.go @@ -21,5 +21,5 @@ func TestUint48(t *testing.T) { func TestGetParentTile(t *testing.T) { a := Zxy{Z: 8, X: 125, Y: 69} - assert.Equal(t, Zxy{Z: 7, X: 62, Y: 34}, GetParentTile(a, 7)) + assert.Equal(t, Zxy{Z: 7, X: 62, Y: 34}, getParentTile(a, 7)) } diff --git a/pmtiles/region.go b/pmtiles/region.go index 17bf7fc..68cfe4e 100644 --- a/pmtiles/region.go +++ b/pmtiles/region.go @@ -8,6 +8,7 @@ import ( "strings" ) +// BboxRegion parses a bbox string into an orb.MultiPolygon region. func BboxRegion(bbox string) (orb.MultiPolygon, error) { parts := strings.Split(bbox, ",") minLon, err := strconv.ParseFloat(parts[0], 64) @@ -29,6 +30,7 @@ func BboxRegion(bbox string) (orb.MultiPolygon, error) { return orb.MultiPolygon{{{{minLon, maxLat}, {maxLon, maxLat}, {maxLon, minLat}, {minLon, minLat}, {minLon, maxLat}}}}, nil } +// UnmarshalRegion parses JSON bytes into an orb.MultiPolygon region. func UnmarshalRegion(data []byte) (orb.MultiPolygon, error) { fc, err := geojson.UnmarshalFeatureCollection(data) diff --git a/pmtiles/server.go b/pmtiles/server.go index 80ca3d7..37cea67 100644 --- a/pmtiles/server.go +++ b/pmtiles/server.go @@ -15,33 +15,34 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -type CacheKey struct { +type cacheKey struct { name string offset uint64 // is 0 for header length uint64 // is 0 for header } -type Request struct { - key CacheKey - value chan CachedValue +type request struct { + key cacheKey + value chan cachedValue } -type CachedValue struct { +type cachedValue struct { header HeaderV3 directory []EntryV3 etag string ok bool } -type Response struct { - key CacheKey - value CachedValue +type response struct { + key cacheKey + value cachedValue size int ok bool } +// Server is an HTTP server for tiles and metadata. type Server struct { - reqs chan Request + reqs chan request bucket Bucket logger *log.Logger cacheSize int @@ -49,6 +50,7 @@ type Server struct { publicURL string } +// NewServer creates a new pmtiles HTTP server. func NewServer(bucketURL string, prefix string, logger *log.Logger, cacheSize int, cors string, publicURL string) (*Server, error) { ctx := context.Background() @@ -68,9 +70,10 @@ func NewServer(bucketURL string, prefix string, logger *log.Logger, cacheSize in return NewServerWithBucket(bucket, prefix, logger, cacheSize, cors, publicURL) } -func NewServerWithBucket(bucket Bucket, prefix string, logger *log.Logger, cacheSize int, cors string, publicURL string) (*Server, error) { +// NewServerWithBucket creates a new HTTP server for a gocloud Bucket. +func NewServerWithBucket(bucket Bucket, _ string, logger *log.Logger, cacheSize int, cors string, publicURL string) (*Server, error) { - reqs := make(chan Request, 8) + reqs := make(chan request, 8) l := &Server{ reqs: reqs, @@ -91,12 +94,13 @@ func register[K prometheus.Collector](server *Server, metric K) K { return metric } +// Start the server HTTP listener. func (server *Server) Start() { go func() { - cache := make(map[CacheKey]*list.Element) - inflight := make(map[CacheKey][]Request) - resps := make(chan Response, 8) + cache := make(map[cacheKey]*list.Element) + inflight := make(map[cacheKey][]request) + resps := make(chan response, 8) evictList := list.New() totalSize := 0 ctx := context.Background() @@ -114,13 +118,13 @@ func (server *Server) Start() { key := req.key if val, ok := cache[key]; ok { evictList.MoveToFront(val) - req.value <- val.Value.(*Response).value + req.value <- val.Value.(*response).value } else if _, ok := inflight[key]; ok { inflight[key] = append(inflight[key], req) } else { - inflight[key] = []Request{req} + inflight[key] = []request{req} go func() { - var result CachedValue + var result cachedValue isRoot := (key.offset == 0 && key.length == 0) offset := int64(key.offset) @@ -137,7 +141,7 @@ func (server *Server) Start() { // TODO: store away ETag if err != nil { ok = false - resps <- Response{key: key, value: result} + resps <- response{key: key, value: result} server.logger.Printf("failed to fetch %s %d-%d, %v", key.name, key.offset, key.length, err) return } @@ -145,7 +149,7 @@ func (server *Server) Start() { b, err := io.ReadAll(r) if err != nil { ok = false - resps <- Response{key: key, value: result} + resps <- response{key: key, value: result} server.logger.Printf("failed to fetch %s %d-%d, %v", key.name, key.offset, key.length, err) return } @@ -159,17 +163,17 @@ func (server *Server) Start() { // populate the root first before header rootEntries := deserializeEntries(bytes.NewBuffer(b[header.RootOffset : header.RootOffset+header.RootLength])) - result2 := CachedValue{directory: rootEntries, ok: true} + result2 := cachedValue{directory: rootEntries, ok: true} - rootKey := CacheKey{name: key.name, offset: header.RootOffset, length: header.RootLength} - resps <- Response{key: rootKey, value: result2, size: 24 * len(rootEntries), ok: true} + rootKey := cacheKey{name: key.name, offset: header.RootOffset, length: header.RootLength} + resps <- response{key: rootKey, value: result2, size: 24 * len(rootEntries), ok: true} - result = CachedValue{header: header, ok: true} - resps <- Response{key: key, value: result, size: 127, ok: true} + result = cachedValue{header: header, ok: true} + resps <- response{key: key, value: result, size: 127, ok: true} } else { directory := deserializeEntries(bytes.NewBuffer(b)) - result = CachedValue{directory: directory, ok: true} - resps <- Response{key: key, value: result, size: 24 * len(directory), ok: true} + result = cachedValue{directory: directory, ok: true} + resps <- response{key: key, value: result, size: 24 * len(directory), ok: true} } server.logger.Printf("fetched %s %d-%d", key.name, key.offset, key.length) @@ -196,7 +200,7 @@ func (server *Server) Start() { ent := evictList.Back() if ent != nil { evictList.Remove(ent) - kv := ent.Value.(*Response) + kv := ent.Value.(*response) delete(cache, kv.key) totalSize -= kv.size } @@ -208,19 +212,19 @@ func (server *Server) Start() { }() } -func (server *Server) getHeaderMetadata(ctx context.Context, name string) (error, bool, HeaderV3, []byte) { - rootReq := Request{key: CacheKey{name: name, offset: 0, length: 0}, value: make(chan CachedValue, 1)} +func (server *Server) getHeaderMetadata(ctx context.Context, name string) (bool, HeaderV3, []byte, error) { + rootReq := request{key: cacheKey{name: name, offset: 0, length: 0}, value: make(chan cachedValue, 1)} server.reqs <- rootReq rootValue := <-rootReq.value header := rootValue.header if !rootValue.ok { - return nil, false, HeaderV3{}, nil + return false, HeaderV3{}, nil, nil } r, err := server.bucket.NewRangeReader(ctx, name+".pmtiles", int64(header.MetadataOffset), int64(header.MetadataLength)) if err != nil { - return nil, false, HeaderV3{}, nil + return false, HeaderV3{}, nil, nil } defer r.Close() @@ -232,14 +236,14 @@ func (server *Server) getHeaderMetadata(ctx context.Context, name string) (error } else if header.InternalCompression == NoCompression { metadataBytes, err = io.ReadAll(r) } else { - return errors.New("Unknown compression"), true, HeaderV3{}, nil + return true, HeaderV3{}, nil, errors.New("unknown compression") } - return nil, true, header, metadataBytes + return true, header, metadataBytes, nil } func (server *Server) getTileJSON(ctx context.Context, httpHeaders map[string]string, name string) (int, map[string]string, []byte) { - err, found, header, metadataBytes := server.getHeaderMetadata(ctx, name) + found, header, metadataBytes, err := server.getHeaderMetadata(ctx, name) if err != nil { return 500, httpHeaders, []byte("I/O Error") @@ -256,7 +260,7 @@ func (server *Server) getTileJSON(ctx context.Context, httpHeaders map[string]st return 501, httpHeaders, []byte("PUBLIC_URL must be set for TileJSON") } - tilejsonBytes, err := CreateTilejson(header, metadataBytes, server.publicURL+"/"+name) + tilejsonBytes, err := CreateTileJSON(header, metadataBytes, server.publicURL+"/"+name) if err != nil { return 500, httpHeaders, []byte("Error generating tilejson") } @@ -267,7 +271,7 @@ func (server *Server) getTileJSON(ctx context.Context, httpHeaders map[string]st } func (server *Server) getMetadata(ctx context.Context, httpHeaders map[string]string, name string) (int, map[string]string, []byte) { - err, found, _, metadataBytes := server.getHeaderMetadata(ctx, name) + found, _, metadataBytes, err := server.getHeaderMetadata(ctx, name) if err != nil { return 500, httpHeaders, []byte("I/O Error") @@ -282,7 +286,7 @@ func (server *Server) getMetadata(ctx context.Context, httpHeaders map[string]st } func (server *Server) getTile(ctx context.Context, httpHeaders map[string]string, name string, z uint8, x uint32, y uint32, ext string) (int, map[string]string, []byte) { - rootReq := Request{key: CacheKey{name: name, offset: 0, length: 0}, value: make(chan CachedValue, 1)} + rootReq := request{key: cacheKey{name: name, offset: 0, length: 0}, value: make(chan cachedValue, 1)} server.reqs <- rootReq // https://golang.org/doc/faq#atomic_maps @@ -324,7 +328,7 @@ func (server *Server) getTile(ctx context.Context, httpHeaders map[string]string dirOffset, dirLen := header.RootOffset, header.RootLength for depth := 0; depth <= 3; depth++ { - dirReq := Request{key: CacheKey{name: name, offset: dirOffset, length: dirLen}, value: make(chan CachedValue, 1)} + dirReq := request{key: cacheKey{name: name, offset: dirOffset, length: dirLen}, value: make(chan cachedValue, 1)} server.reqs <- dirReq dirValue := <-dirReq.value directory := dirValue.directory @@ -350,12 +354,10 @@ func (server *Server) getTile(ctx context.Context, httpHeaders map[string]string httpHeaders["Content-Encoding"] = headerVal } return 200, httpHeaders, b - } else { - dirOffset = header.LeafDirectoryOffset + entry.Offset - dirLen = uint64(entry.Length) } + dirOffset = header.LeafDirectoryOffset + entry.Offset + dirLen = uint64(entry.Length) } - return 204, httpHeaders, nil } @@ -391,6 +393,8 @@ func parseMetadataPath(path string) (bool, string) { return false, "" } +// Get a response for the given path. +// Return status code, HTTP headers, and body. func (server *Server) Get(ctx context.Context, path string) (int, map[string]string, []byte) { httpHeaders := make(map[string]string) if len(server.cors) > 0 { diff --git a/pmtiles/show.go b/pmtiles/show.go index 8c2f861..1e2090c 100644 --- a/pmtiles/show.go +++ b/pmtiles/show.go @@ -13,7 +13,8 @@ import ( "os" ) -func Show(logger *log.Logger, bucketURL string, key string, showMetadataOnly bool, showTilejson bool, publicURL string, showTile bool, z int, x int, y int) error { +// Show prints detailed information about an archive. +func Show(_ *log.Logger, bucketURL string, key string, showMetadataOnly bool, showTilejson bool, publicURL string, showTile bool, z int, x int, y int) error { ctx := context.Background() bucketURL, key, err := NormalizeBucketKey(bucketURL, "", key) @@ -100,7 +101,7 @@ func Show(logger *log.Logger, bucketURL string, key string, showMetadataOnly boo // Stdout is being redirected. fmt.Fprintln(os.Stderr, "no --public-url specified; using placeholder tiles URL") } - tilejsonBytes, err := CreateTilejson(header, metadataBytes, publicURL) + tilejsonBytes, err := CreateTileJSON(header, metadataBytes, publicURL) if err != nil { return fmt.Errorf("Failed to create tilejson for %s, %w", key, err) } @@ -165,10 +166,9 @@ func Show(logger *log.Logger, bucketURL string, key string, showMetadataOnly boo } os.Stdout.Write(tileBytes) break - } else { - dirOffset = header.LeafDirectoryOffset + entry.Offset - dirLength = uint64(entry.Length) } + dirOffset = header.LeafDirectoryOffset + entry.Offset + dirLength = uint64(entry.Length) } else { fmt.Println("Tile not found in archive.") return nil diff --git a/pmtiles/tile_id.go b/pmtiles/tile_id.go index 66e259a..8a8fba8 100644 --- a/pmtiles/tile_id.go +++ b/pmtiles/tile_id.go @@ -27,6 +27,7 @@ func tOnLevel(z uint8, pos uint64) (uint8, uint32, uint32) { return uint8(z), uint32(tx), uint32(ty) } +// ZxyToID converts (Z,X,Y) tile coordinates to a Hilbert TileID. func ZxyToID(z uint8, x uint32, y uint32) uint64 { var acc uint64 var tz uint8 @@ -56,6 +57,7 @@ func ZxyToID(z uint8, x uint32, y uint32) uint64 { return acc + d } +// IDToZxy converts a Hilbert TileID to (Z,X,Y) tile coordinates. func IDToZxy(i uint64) (uint8, uint32, uint32) { var acc uint64 var z uint8 @@ -70,7 +72,7 @@ func IDToZxy(i uint64) (uint8, uint32, uint32) { } } -// fast parent ID calculation without converting to ZXY. +// ParentID efficiently finds a parent Hilbert TileID without converting to (Z,X,Y). func ParentID(i uint64) uint64 { var acc uint64 var lastAcc uint64 diff --git a/pmtiles/tilejson.go b/pmtiles/tilejson.go index 68ed25f..cf908e6 100644 --- a/pmtiles/tilejson.go +++ b/pmtiles/tilejson.go @@ -4,7 +4,8 @@ import ( "encoding/json" ) -func CreateTilejson(header HeaderV3, metadataBytes []byte, tileURL string) ([]byte, error) { +// CreateTileJSON returns TileJSON from an archive header+metadata and a given public tileURL. +func CreateTileJSON(header HeaderV3, metadataBytes []byte, tileURL string) ([]byte, error) { var metadataMap map[string]interface{} json.Unmarshal(metadataBytes, &metadataMap) diff --git a/pmtiles/tilejson_test.go b/pmtiles/tilejson_test.go index 108543b..496e6eb 100644 --- a/pmtiles/tilejson_test.go +++ b/pmtiles/tilejson_test.go @@ -31,7 +31,7 @@ func TestCreateTilejson(t *testing.T) { tileURL := "https://example.com/foo" // Call the function - tilejsonBytes, err := CreateTilejson(header, metadataBytes, tileURL) + tilejsonBytes, err := CreateTileJSON(header, metadataBytes, tileURL) // Check for errors if err != nil { @@ -76,7 +76,7 @@ func TestCreateTilejsonOptionalFields(t *testing.T) { { }`) - tilejsonBytes, err := CreateTilejson(header, metadataBytes, "") + tilejsonBytes, err := CreateTileJSON(header, metadataBytes, "") // Check for errors if err != nil { diff --git a/pmtiles/upload.go b/pmtiles/upload.go index dfacea3..53703b3 100644 --- a/pmtiles/upload.go +++ b/pmtiles/upload.go @@ -10,6 +10,7 @@ import ( "os" ) +// Upload a pmtiles archive to a bucket. func Upload(logger *log.Logger, input string, bucket string, key string, maxConcurrency int) error { ctx := context.Background() b, err := blob.OpenBucket(ctx, bucket) diff --git a/pmtiles/verify.go b/pmtiles/verify.go index 76e28ed..96bfe64 100644 --- a/pmtiles/verify.go +++ b/pmtiles/verify.go @@ -11,7 +11,9 @@ import ( "time" ) -func Verify(logger *log.Logger, file string) error { +// Verify that an archive's header statistics are correct, +// and that tiles are propertly ordered if clustered=true. +func Verify(_ *log.Logger, file string) error { start := time.Now() ctx := context.Background()