diff --git a/cmd/neofs-ir/main.go b/cmd/neofs-ir/main.go index bc2ea713f2..26b365607f 100644 --- a/cmd/neofs-ir/main.go +++ b/cmd/neofs-ir/main.go @@ -119,7 +119,7 @@ func main() { err := srv.Shutdown() if err != nil { log.Debug("could not shutdown HTTP server", - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/cmd/neofs-node/config.go b/cmd/neofs-node/config.go index e6182a580b..4cffb64d67 100644 --- a/cmd/neofs-node/config.go +++ b/cmd/neofs-node/config.go @@ -665,7 +665,7 @@ func initBasics(c *cfg, key *keys.PrivateKey, stateStorage *state.PersistentStor fromSideChainBlock, err := stateStorage.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - c.log.Warn("can't get last processed side chain block number", zap.String("error", err.Error())) + c.log.Warn("can't get last processed side chain block number", zap.Error(err)) } cli, err := client.New(key, @@ -687,7 +687,7 @@ func initBasics(c *cfg, key *keys.PrivateKey, stateStorage *state.PersistentStor if err != nil { c.log.Info("failed to create neo RPC client", zap.Any("endpoints", addresses), - zap.String("error", err.Error()), + zap.Error(err), ) fatalOnErr(err) diff --git a/cmd/neofs-node/container.go b/cmd/neofs-node/container.go index 9933278e14..65e115438b 100644 --- a/cmd/neofs-node/container.go +++ b/cmd/neofs-node/container.go @@ -441,7 +441,7 @@ func (d *localStorageLoad) Iterate(f loadcontroller.UsedSpaceFilter, h loadcontr if err != nil { d.log.Debug("failed to calculate container size in storage engine", zap.Stringer("cid", idList[i]), - zap.String("error", err.Error()), + zap.Error(err), ) continue diff --git a/cmd/neofs-node/main.go b/cmd/neofs-node/main.go index 1d6184d647..8fe996a0ff 100644 --- a/cmd/neofs-node/main.go +++ b/cmd/neofs-node/main.go @@ -83,7 +83,7 @@ func preRunAndLog(c *cfg, name string, srv *httputil.Server) { ln, err := srv.Listen() if err != nil { c.log.Fatal(fmt.Sprintf("could not init %s service", name), - zap.String("error", err.Error()), + zap.Error(err), ) return } @@ -103,7 +103,7 @@ func preRunAndLog(c *cfg, name string, srv *httputil.Server) { err := srv.Shutdown() if err != nil { c.log.Debug(fmt.Sprintf("could not shutdown %s server", name), - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/cmd/neofs-node/netmap.go b/cmd/neofs-node/netmap.go index 3d188816b2..e03c2ecf5d 100644 --- a/cmd/neofs-node/netmap.go +++ b/cmd/neofs-node/netmap.go @@ -197,7 +197,7 @@ func initNetmapService(c *cfg) { if err != nil { c.log.Error("could not update node state on new epoch", zap.Uint64("epoch", e), - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -210,7 +210,7 @@ func initNetmapService(c *cfg) { err := makeNotaryDeposit(c) if err != nil { c.log.Error("could not make notary deposit", - zap.String("error", err.Error()), + zap.Error(err), ) } }) diff --git a/cmd/neofs-node/object.go b/cmd/neofs-node/object.go index 7b3dedbafc..48845eb791 100644 --- a/cmd/neofs-node/object.go +++ b/cmd/neofs-node/object.go @@ -62,7 +62,7 @@ func (c *cfg) MaxObjectSize() uint64 { sz, err := c.cfgNetmap.wrapper.MaxObjectSize() if err != nil { c.log.Error("could not get max object size value", - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -210,7 +210,7 @@ func initObjectService(c *cfg) { err := ls.Delete(addr) if err != nil { c.log.Warn("could not inhume mark redundant copy as garbage", - zap.String("error", err.Error()), + zap.Error(err), ) } }), @@ -494,7 +494,7 @@ func (c *reputationClientConstructor) Get(info coreclient.NodeInfo) (coreclient. } } else { c.log.Warn("could not get latest network map to overload the client", - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/cmd/neofs-node/storage.go b/cmd/neofs-node/storage.go index 66b8be1002..f10a6ea57e 100644 --- a/cmd/neofs-node/storage.go +++ b/cmd/neofs-node/storage.go @@ -67,7 +67,7 @@ func initLocalStorage(c *cfg) { err := ls.Close() if err != nil { c.log.Info("storage engine closing failure", - zap.String("error", err.Error()), + zap.Error(err), ) } else { c.log.Info("all components of the storage engine closed successfully") diff --git a/cmd/neofs-node/tree.go b/cmd/neofs-node/tree.go index 60b9b343ad..9755f36cd6 100644 --- a/cmd/neofs-node/tree.go +++ b/cmd/neofs-node/tree.go @@ -98,7 +98,7 @@ func initTreeService(c *cfg) { // Ignore pilorama.ErrTreeNotFound but other errors, including shard.ErrReadOnly, should be logged. c.log.Error("container removal event received, but trees weren't removed", zap.Stringer("cid", ev.ID), - zap.String("error", err.Error())) + zap.Error(err)) } }) diff --git a/pkg/innerring/blocktimer.go b/pkg/innerring/blocktimer.go index b0221d092d..cfc4a9c306 100644 --- a/pkg/innerring/blocktimer.go +++ b/pkg/innerring/blocktimer.go @@ -91,7 +91,7 @@ func newEpochTimer(args *epochTimerArgs) *timer.BlockTimer { if err != nil { args.l.Warn("can't stop epoch estimation", zap.Uint64("epoch", epochN), - zap.String("error", err.Error())) + zap.Error(err)) } }) diff --git a/pkg/innerring/innerring.go b/pkg/innerring/innerring.go index 9a1bacc84f..9ee12cc349 100644 --- a/pkg/innerring/innerring.go +++ b/pkg/innerring/innerring.go @@ -196,7 +196,7 @@ func (s *Server) Start(ctx context.Context, intError chan<- error) (err error) { if err != nil { // we don't stop inner ring execution on this error s.log.Warn("can't vote for prepared validators", - zap.String("error", err.Error())) + zap.Error(err)) } // tick initial epoch @@ -284,7 +284,7 @@ func (s *Server) Stop() { for _, c := range s.closers { if err := c(); err != nil { s.log.Warn("closer error", - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -324,7 +324,7 @@ func New(ctx context.Context, log *zap.Logger, cfg *viper.Viper, errChan chan<- fromSideChainBlock, err := server.persistate.UInt32(persistateSideChainLastBlockKey) if err != nil { fromSideChainBlock = 0 - log.Warn("can't get last processed side chain block number", zap.String("error", err.Error())) + log.Warn("can't get last processed side chain block number", zap.Error(err)) } morphChain := chainParams{ @@ -588,7 +588,7 @@ func New(ctx context.Context, log *zap.Logger, cfg *viper.Viper, errChan chan<- fromMainChainBlock, err := server.persistate.UInt32(persistateMainChainLastBlockKey) if err != nil { fromMainChainBlock = 0 - log.Warn("can't get last processed main chain block number", zap.String("error", err.Error())) + log.Warn("can't get last processed main chain block number", zap.Error(err)) } mainnetChain.from = fromMainChainBlock diff --git a/pkg/innerring/processors/alphabet/process_emit.go b/pkg/innerring/processors/alphabet/process_emit.go index cb0a3b9e25..e8213ab2d5 100644 --- a/pkg/innerring/processors/alphabet/process_emit.go +++ b/pkg/innerring/processors/alphabet/process_emit.go @@ -29,7 +29,7 @@ func (ap *Processor) processEmit() { // there is no signature collecting, so we don't need extra fee err := ap.morphClient.Invoke(contract, 0, emitMethod) if err != nil { - ap.log.Warn("can't invoke alphabet emit method", zap.String("error", err.Error())) + ap.log.Warn("can't invoke alphabet emit method", zap.Error(err)) return } @@ -43,7 +43,7 @@ func (ap *Processor) processEmit() { networkMap, err := ap.netmapClient.NetMap() if err != nil { ap.log.Warn("can't get netmap snapshot to emit gas to storage nodes", - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -65,7 +65,7 @@ func (ap *Processor) processEmit() { key, err := keys.NewPublicKeyFromBytes(keyBytes, elliptic.P256()) if err != nil { ap.log.Warn("can't parse node public key", - zap.String("error", err.Error())) + zap.Error(err)) continue } @@ -75,7 +75,7 @@ func (ap *Processor) processEmit() { ap.log.Warn("can't transfer gas", zap.String("receiver", key.Address()), zap.Int64("amount", int64(gasPerNode)), - zap.String("error", err.Error()), + zap.Error(err), ) continue diff --git a/pkg/innerring/processors/audit/process.go b/pkg/innerring/processors/audit/process.go index ba4713f80c..304088164f 100644 --- a/pkg/innerring/processors/audit/process.go +++ b/pkg/innerring/processors/audit/process.go @@ -30,7 +30,7 @@ func (ap *Processor) processStartAudit(epoch uint64) { containers, err := ap.selectContainersToAudit(epoch) if err != nil { - log.Error("container selection failure", zap.String("error", err.Error())) + log.Error("container selection failure", zap.Error(err)) return } @@ -40,7 +40,7 @@ func (ap *Processor) processStartAudit(epoch uint64) { nm, err := ap.netmapClient.GetNetMap(0) if err != nil { ap.log.Error("can't fetch network map", - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -53,7 +53,7 @@ func (ap *Processor) processStartAudit(epoch uint64) { if err != nil { log.Error("can't get container info, ignore", zap.Stringer("cid", containers[i]), - zap.String("error", err.Error())) + zap.Error(err)) continue } @@ -63,7 +63,7 @@ func (ap *Processor) processStartAudit(epoch uint64) { if err != nil { log.Info("can't build placement for container, ignore", zap.Stringer("cid", containers[i]), - zap.String("error", err.Error())) + zap.Error(err)) continue } @@ -107,7 +107,7 @@ func (ap *Processor) processStartAudit(epoch uint64) { if err := ap.taskManager.PushTask(auditTask); err != nil { ap.log.Error("could not push audit task", - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -135,7 +135,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) [] err := clientcore.NodeInfoFromRawNetmapElement(&info, netmapcore.Node(shuffled[i])) if err != nil { - log.Warn("parse client node info", zap.String("error", err.Error())) + log.Warn("parse client node info", zap.Error(err)) continue } @@ -152,7 +152,7 @@ func (ap *Processor) findStorageGroups(cnr cid.ID, shuffled netmapcore.Nodes) [] cancel() if err != nil { - log.Warn("error in storage group search", zap.String("error", err.Error())) + log.Warn("error in storage group search", zap.Error(err)) continue } diff --git a/pkg/innerring/processors/container/process_container.go b/pkg/innerring/processors/container/process_container.go index 408505af3b..29cf4bf12b 100644 --- a/pkg/innerring/processors/container/process_container.go +++ b/pkg/innerring/processors/container/process_container.go @@ -44,7 +44,7 @@ func (cp *Processor) processContainerPut(put putEvent) { err := cp.checkPutContainer(ctx) if err != nil { cp.log.Error("put container check failed", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -108,7 +108,7 @@ func (cp *Processor) approvePutContainer(ctx *putContainerContext) { if err != nil { cp.log.Error("could not approve put container", - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -124,7 +124,7 @@ func (cp *Processor) processContainerDelete(e *containerEvent.Delete) { err := cp.checkDeleteContainer(e) if err != nil { cp.log.Error("delete container check failed", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -179,7 +179,7 @@ func (cp *Processor) approveDeleteContainer(e *containerEvent.Delete) { if err != nil { cp.log.Error("could not approve delete container", - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/innerring/processors/container/process_eacl.go b/pkg/innerring/processors/container/process_eacl.go index bfca0f6cba..db7d90dc98 100644 --- a/pkg/innerring/processors/container/process_eacl.go +++ b/pkg/innerring/processors/container/process_eacl.go @@ -21,7 +21,7 @@ func (cp *Processor) processSetEACL(e container.SetEACL) { err := cp.checkSetEACL(e) if err != nil { cp.log.Error("set EACL check failed", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -92,7 +92,7 @@ func (cp *Processor) approveSetEACL(e container.SetEACL) { if err != nil { cp.log.Error("could not approve set EACL", - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/innerring/processors/governance/process_update.go b/pkg/innerring/processors/governance/process_update.go index 7e260f9783..49b6cdaf29 100644 --- a/pkg/innerring/processors/governance/process_update.go +++ b/pkg/innerring/processors/governance/process_update.go @@ -23,21 +23,21 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) { mainnetAlphabet, err := gp.mainnetClient.NeoFSAlphabetList() if err != nil { gp.log.Error("can't fetch alphabet list from main net", - zap.String("error", err.Error())) + zap.Error(err)) return } sidechainAlphabet, err := gp.morphClient.Committee() if err != nil { gp.log.Error("can't fetch alphabet list from side chain", - zap.String("error", err.Error())) + zap.Error(err)) return } newAlphabet, err := newAlphabetList(sidechainAlphabet, mainnetAlphabet) if err != nil { gp.log.Error("can't merge alphabet lists from main net and side chain", - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -55,19 +55,19 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) { err = gp.voter.VoteForSidechainValidator(newAlphabet, &txHash) if err != nil { gp.log.Error("can't vote for side chain committee", - zap.String("error", err.Error())) + zap.Error(err)) } // 2. Update NeoFSAlphabet role in the sidechain. innerRing, err := gp.irFetcher.InnerRingKeys() if err != nil { gp.log.Error("can't fetch inner ring list from side chain", - zap.String("error", err.Error())) + zap.Error(err)) } else { newInnerRing, err := updateInnerRing(innerRing, sidechainAlphabet, newAlphabet) if err != nil { gp.log.Error("can't create new inner ring list with new alphabet keys", - zap.String("error", err.Error())) + zap.Error(err)) } else { sort.Sort(newInnerRing) @@ -80,7 +80,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) { if err != nil { gp.log.Error("can't update inner ring list with new alphabet keys", - zap.String("error", err.Error())) + zap.Error(err)) } } } @@ -89,7 +89,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) { err = gp.morphClient.UpdateNotaryList(newAlphabet, txHash) if err != nil { gp.log.Error("can't update list of notary nodes in side chain", - zap.String("error", err.Error())) + zap.Error(err)) } // 4. Update NeoFS contract in the mainnet. @@ -98,7 +98,7 @@ func (gp *Processor) processAlphabetSync(txHash util.Uint256) { err = gp.neofsClient.AlphabetUpdate(id, newAlphabet) if err != nil { gp.log.Error("can't update list of alphabet nodes in neofs contract", - zap.String("error", err.Error())) + zap.Error(err)) } gp.log.Info("finished alphabet list update") diff --git a/pkg/innerring/processors/neofs/process_assets.go b/pkg/innerring/processors/neofs/process_assets.go index 4e324ea763..ad8dd14ecc 100644 --- a/pkg/innerring/processors/neofs/process_assets.go +++ b/pkg/innerring/processors/neofs/process_assets.go @@ -70,7 +70,7 @@ func (np *Processor) processDeposit(deposit *neofsEvent.Deposit) { err = np.morphClient.TransferGas(receiver, np.mintEmitValue) if err != nil { np.log.Error("can't transfer native gas to receiver", - zap.String("error", err.Error())) + zap.Error(err)) return } diff --git a/pkg/innerring/processors/neofs/process_bind.go b/pkg/innerring/processors/neofs/process_bind.go index 706765b734..ae9eee06ab 100644 --- a/pkg/innerring/processors/neofs/process_bind.go +++ b/pkg/innerring/processors/neofs/process_bind.go @@ -34,7 +34,7 @@ func (np *Processor) processBind(e bindCommon) { if err != nil { np.log.Error("invalid manage key event", zap.Bool("bind", c.bind), - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -78,7 +78,7 @@ func (np *Processor) approveBindCommon(e *bindCommonContext) { u160, err := util.Uint160DecodeBytesBE(scriptHash) if err != nil { np.log.Error("could not decode script hash from bytes", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -102,6 +102,6 @@ func (np *Processor) approveBindCommon(e *bindCommonContext) { if err != nil { np.log.Error(fmt.Sprintf("could not approve %s", typ), - zap.String("error", err.Error())) + zap.Error(err)) } } diff --git a/pkg/innerring/processors/netmap/process_cleanup.go b/pkg/innerring/processors/netmap/process_cleanup.go index 4101fe8032..3ddcd3299b 100644 --- a/pkg/innerring/processors/netmap/process_cleanup.go +++ b/pkg/innerring/processors/netmap/process_cleanup.go @@ -45,6 +45,6 @@ func (np *Processor) processNetmapCleanupTick(ev netmapCleanupTick) { }) if err != nil { np.log.Warn("can't iterate on netmap cleaner cache", - zap.String("error", err.Error())) + zap.Error(err)) } } diff --git a/pkg/innerring/processors/netmap/process_epoch.go b/pkg/innerring/processors/netmap/process_epoch.go index 745766c412..e6a513f328 100644 --- a/pkg/innerring/processors/netmap/process_epoch.go +++ b/pkg/innerring/processors/netmap/process_epoch.go @@ -17,7 +17,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { epochDuration, err := np.netmapClient.EpochDuration() if err != nil { np.log.Warn("can't get epoch duration", - zap.String("error", err.Error())) + zap.Error(err)) } else { np.epochState.SetEpochDuration(epochDuration) } @@ -28,19 +28,19 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { if err != nil { np.log.Warn("can't get transaction height", zap.String("hash", ev.TxHash().StringLE()), - zap.String("error", err.Error())) + zap.Error(err)) } if err := np.epochTimer.ResetEpochTimer(h); err != nil { np.log.Warn("can't reset epoch timer", - zap.String("error", err.Error())) + zap.Error(err)) } // get new netmap snapshot networkMap, err := np.netmapClient.NetMap() if err != nil { np.log.Warn("can't get netmap snapshot to perform cleanup", - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -60,7 +60,7 @@ func (np *Processor) processNewEpoch(ev netmapEvent.NewEpoch) { if err != nil { np.log.Warn("can't start container size estimation", zap.Uint64("epoch", epoch), - zap.String("error", err.Error())) + zap.Error(err)) } } diff --git a/pkg/innerring/processors/netmap/process_peers.go b/pkg/innerring/processors/netmap/process_peers.go index b50d9c5629..00b64c16c0 100644 --- a/pkg/innerring/processors/netmap/process_peers.go +++ b/pkg/innerring/processors/netmap/process_peers.go @@ -41,7 +41,7 @@ func (np *Processor) processAddPeer(ev netmapEvent.AddPeer) { if err != nil { np.log.Warn("could not verify and update information about network map candidate", zap.String("public_key", hex.EncodeToString(nodeInfo.PublicKey())), - zap.String("error", err.Error()), + zap.Error(err), ) return diff --git a/pkg/innerring/processors/reputation/process_put.go b/pkg/innerring/processors/reputation/process_put.go index ff53781ebf..503147ad44 100644 --- a/pkg/innerring/processors/reputation/process_put.go +++ b/pkg/innerring/processors/reputation/process_put.go @@ -47,7 +47,7 @@ func (rp *Processor) processPut(e *reputationEvent.Put) { if err := rp.checkManagers(epoch, value.Manager(), id); err != nil { rp.log.Info("ignore reputation value", zap.String("reason", "wrong manager"), - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -82,6 +82,6 @@ func (rp *Processor) approvePutReputation(e *reputationEvent.Put) { if err != nil { rp.log.Warn("can't send approval tx for reputation value", zap.String("peer_id", hex.EncodeToString(id.PublicKey())), - zap.String("error", err.Error())) + zap.Error(err)) } } diff --git a/pkg/innerring/processors/settlement/audit/calculate.go b/pkg/innerring/processors/settlement/audit/calculate.go index 02308533d3..b243089a42 100644 --- a/pkg/innerring/processors/settlement/audit/calculate.go +++ b/pkg/innerring/processors/settlement/audit/calculate.go @@ -70,7 +70,7 @@ func (c *Calculator) Calculate(epoch uint64) { auditFee, err := c.prm.AuditFeeFetcher.AuditFee() if err != nil { log.Warn("can't fetch audit fee from network config", - zap.String("error", err.Error())) + zap.Error(err)) auditFee = 0 } @@ -138,7 +138,7 @@ func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool { ctx.cnrInfo, err = c.prm.ContainerStorage.ContainerInfo(ctx.auditResult.Container) if err != nil { ctx.log.Error("could not get container info", - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -151,7 +151,7 @@ func (c *Calculator) buildPlacement(ctx *singleResultCtx) bool { ctx.cnrNodes, err = c.prm.PlacementCalculator.ContainerNodes(ctx.auditEpoch(), ctx.containerID()) if err != nil { ctx.log.Error("could not get container nodes", - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -207,7 +207,7 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool { if err != nil { ctx.log.Error("could not get SG info", zap.String("id", id.String()), - zap.String("error", err.Error()), + zap.Error(err), ) return false // we also can continue and calculate at least some part @@ -234,7 +234,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool { ownerID, err := c.prm.AccountStorage.ResolveKey(info) if err != nil { ctx.log.Error("could not resolve public key of the storage node", - zap.String("error", err.Error()), + zap.Error(err), zap.String("key", k), ) @@ -266,7 +266,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool { auditorKey, err := keys.NewPublicKeyFromBytes(ctx.auditResult.AuditorPublicKey, elliptic.P256()) if err != nil { ctx.log.Error("could not parse public key of the inner ring node", - zap.String("error", err.Error()), + zap.Error(err), zap.String("key", hex.EncodeToString(ctx.auditResult.AuditorPublicKey)), ) diff --git a/pkg/innerring/processors/settlement/basic/collect.go b/pkg/innerring/processors/settlement/basic/collect.go index b6ccaadc54..22ca9c5187 100644 --- a/pkg/innerring/processors/settlement/basic/collect.go +++ b/pkg/innerring/processors/settlement/basic/collect.go @@ -20,7 +20,7 @@ func (inc *IncomeSettlementContext) Collect() { cachedRate, err := inc.rate.BasicRate() if err != nil { inc.log.Error("can't get basic income rate", - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -29,7 +29,7 @@ func (inc *IncomeSettlementContext) Collect() { if err != nil { inc.log.Error("can't fetch container size estimations", zap.Uint64("epoch", inc.epoch), - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -42,7 +42,7 @@ func (inc *IncomeSettlementContext) Collect() { inc.log.Warn("can't fetch container info", zap.Uint64("epoch", inc.epoch), zap.Stringer("container_id", cnr), - zap.String("error", err.Error())) + zap.Error(err)) continue } @@ -52,7 +52,7 @@ func (inc *IncomeSettlementContext) Collect() { inc.log.Debug("can't fetch container info", zap.Uint64("epoch", inc.epoch), zap.Stringer("container_id", cnr), - zap.String("error", err.Error())) + zap.Error(err)) continue } diff --git a/pkg/innerring/processors/settlement/basic/distribute.go b/pkg/innerring/processors/settlement/basic/distribute.go index 2799325d90..8591ab5ed3 100644 --- a/pkg/innerring/processors/settlement/basic/distribute.go +++ b/pkg/innerring/processors/settlement/basic/distribute.go @@ -23,7 +23,7 @@ func (inc *IncomeSettlementContext) Distribute() { bankBalance, err := inc.balances.Balance(inc.bankOwner) if err != nil { inc.log.Error("can't fetch balance of banking account", - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -33,7 +33,7 @@ func (inc *IncomeSettlementContext) Distribute() { if err != nil { inc.log.Warn("can't transform public key to owner id", zap.String("public_key", hex.EncodeToString(key)), - zap.String("error", err.Error())) + zap.Error(err)) return } diff --git a/pkg/innerring/processors/settlement/calls.go b/pkg/innerring/processors/settlement/calls.go index dae6ae16eb..6cb5c89674 100644 --- a/pkg/innerring/processors/settlement/calls.go +++ b/pkg/innerring/processors/settlement/calls.go @@ -32,7 +32,7 @@ func (p *Processor) HandleAuditEvent(e event.Event) { err := p.pool.Submit(handler.handle) if err != nil { log.Warn("could not add handler of AuditEvent to queue", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -67,7 +67,7 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) { incomeCtx, err := p.basicIncome.CreateContext(epoch) if err != nil { p.log.Error("can't create income context", - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -79,7 +79,7 @@ func (p *Processor) HandleIncomeCollectionEvent(e event.Event) { }) if err != nil { p.log.Warn("could not add handler of basic income collection to queue", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -117,7 +117,7 @@ func (p *Processor) HandleIncomeDistributionEvent(e event.Event) { }) if err != nil { p.log.Warn("could not add handler of basic income distribution to queue", - zap.String("error", err.Error()), + zap.Error(err), ) return diff --git a/pkg/innerring/rpc.go b/pkg/innerring/rpc.go index 71802cd325..8af531ba42 100644 --- a/pkg/innerring/rpc.go +++ b/pkg/innerring/rpc.go @@ -106,7 +106,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne cli, err := c.getWrappedClient(info) if err != nil { c.log.Warn("can't setup remote connection", - zap.String("error", err.Error())) + zap.Error(err)) continue } @@ -122,7 +122,7 @@ func (c *ClientCache) getSG(ctx context.Context, addr oid.Address, nm *netmap.Ne if err != nil { c.log.Warn("can't get storage group object", - zap.String("error", err.Error())) + zap.Error(err)) continue } diff --git a/pkg/innerring/settlement.go b/pkg/innerring/settlement.go index ac167fed56..8671e51c29 100644 --- a/pkg/innerring/settlement.go +++ b/pkg/innerring/settlement.go @@ -232,7 +232,7 @@ func (s settlementDeps) Transfer(sender, recipient user.ID, amount *big.Int, det err := s.balanceClient.TransferX(params) if err != nil { log.Error(fmt.Sprintf("%s: could not send transfer", s.settlementCtx), - zap.String("error", err.Error()), + zap.Error(err), ) return diff --git a/pkg/innerring/state.go b/pkg/innerring/state.go index 5686c53b90..da545bb947 100644 --- a/pkg/innerring/state.go +++ b/pkg/innerring/state.go @@ -62,7 +62,7 @@ func (s *Server) IsAlphabet() bool { func (s *Server) InnerRingIndex() int { index, err := s.statusIndex.InnerRingIndex() if err != nil { - s.log.Error("can't get inner ring index", zap.String("error", err.Error())) + s.log.Error("can't get inner ring index", zap.Error(err)) return -1 } @@ -74,7 +74,7 @@ func (s *Server) InnerRingIndex() int { func (s *Server) InnerRingSize() int { size, err := s.statusIndex.InnerRingSize() if err != nil { - s.log.Error("can't get inner ring size", zap.String("error", err.Error())) + s.log.Error("can't get inner ring size", zap.Error(err)) return 0 } @@ -86,7 +86,7 @@ func (s *Server) InnerRingSize() int { func (s *Server) AlphabetIndex() int { index, err := s.statusIndex.AlphabetIndex() if err != nil { - s.log.Error("can't get alphabet index", zap.String("error", err.Error())) + s.log.Error("can't get alphabet index", zap.Error(err)) return -1 } @@ -138,7 +138,7 @@ func (s *Server) voteForSidechainValidator(validators keys.PublicKeys, trigger * s.log.Warn("can't invoke vote method in alphabet contract", zap.Int("alphabet_index", ind), zap.Uint64("epoch", epoch), - zap.String("error", err.Error())) + zap.Error(err)) } }) diff --git a/pkg/local_object_storage/blobstor/control.go b/pkg/local_object_storage/blobstor/control.go index 44f305d09e..87306be193 100644 --- a/pkg/local_object_storage/blobstor/control.go +++ b/pkg/local_object_storage/blobstor/control.go @@ -47,7 +47,7 @@ func (b *BlobStor) Close() error { for i := range b.storage { err := b.storage[i].Storage.Close() if err != nil { - b.log.Info("couldn't close storage", zap.String("error", err.Error())) + b.log.Info("couldn't close storage", zap.Error(err)) if firstErr == nil { firstErr = err } diff --git a/pkg/local_object_storage/blobstor/exists.go b/pkg/local_object_storage/blobstor/exists.go index 5c43acfaee..f52faf8187 100644 --- a/pkg/local_object_storage/blobstor/exists.go +++ b/pkg/local_object_storage/blobstor/exists.go @@ -46,7 +46,7 @@ func (b *BlobStor) Exists(prm common.ExistsPrm) (common.ExistsRes, error) { for _, err := range errors[:len(errors)-1] { b.log.Warn("error occurred during object existence checking", zap.Stringer("address", prm.Address), - zap.String("error", err.Error())) + zap.Error(err)) } return common.ExistsRes{}, errors[len(errors)-1] diff --git a/pkg/local_object_storage/engine/container.go b/pkg/local_object_storage/engine/container.go index 7fe6a63557..f1e4c51f86 100644 --- a/pkg/local_object_storage/engine/container.go +++ b/pkg/local_object_storage/engine/container.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" "go.uber.org/zap" @@ -30,17 +29,14 @@ func (e *StorageEngine) ContainerSize(cnr cid.ID) (uint64, error) { var size uint64 for _, sh := range e.unsortedShards() { - var csPrm shard.ContainerSizePrm - csPrm.SetContainerID(cnr) - - csRes, err := sh.Shard.ContainerSize(csPrm) + shardSize, err := sh.Shard.ContainerSize(cnr) if err != nil { e.reportShardError(sh, "can't get container size", err, zap.Stringer("container_id", cnr)) continue } - size += csRes.Size() + size += shardSize } return size, nil @@ -64,13 +60,13 @@ func (e *StorageEngine) ListContainers() ([]cid.ID, error) { uniqueIDs := make(map[cid.ID]struct{}) for _, sh := range e.unsortedShards() { - res, err := sh.Shard.ListContainers(shard.ListContainersPrm{}) + res, err := sh.Shard.ListContainers() if err != nil { e.reportShardError(sh, "can't get list of containers", err) continue } - for _, cnr := range res.Containers() { + for _, cnr := range res { if _, ok := uniqueIDs[cnr]; !ok { uniqueIDs[cnr] = struct{}{} } @@ -125,12 +121,12 @@ func (e *StorageEngine) deleteNotFoundContainers() error { wg.Go(func() error { shID := e.shards[iCopy].ID() - res, err := e.shards[iCopy].ListContainers(shard.ListContainersPrm{}) + res, err := e.shards[iCopy].ListContainers() if err != nil { return fmt.Errorf("fetching containers from '%s' shard: %w", shID, err) } - for _, cnrStored := range res.Containers() { + for _, cnrStored := range res { // in the most loaded scenarios it is a cache if _, err = e.cfg.containerSource.Get(cnrStored); errors.As(err, new(apistatus.ContainerNotFound)) { err = e.shards[iCopy].InhumeContainer(cnrStored) diff --git a/pkg/local_object_storage/engine/control.go b/pkg/local_object_storage/engine/control.go index 65a2d55480..93e8d3b362 100644 --- a/pkg/local_object_storage/engine/control.go +++ b/pkg/local_object_storage/engine/control.go @@ -26,7 +26,7 @@ func (e *StorageEngine) open() error { } e.log.Debug("could not open shard", zap.String("id", id), - zap.String("error", err.Error()), + zap.Error(err), ) delete(e.shards, id) } @@ -47,7 +47,7 @@ func (e *StorageEngine) Init() error { } e.log.Debug("could not init shard", zap.String("id", id), - zap.String("error", err.Error()), + zap.Error(err), ) delete(e.shards, id) } @@ -91,7 +91,7 @@ func (e *StorageEngine) close(releasePools bool) error { if err := sh.Close(); err != nil { e.log.Debug("could not close shard", zap.String("id", id), - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/local_object_storage/engine/dump.go b/pkg/local_object_storage/engine/dump.go index bdb1237725..e3d00b4856 100644 --- a/pkg/local_object_storage/engine/dump.go +++ b/pkg/local_object_storage/engine/dump.go @@ -1,11 +1,15 @@ package engine -import "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" +import ( + "io" + + "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" +) // DumpShard dumps objects from the shard with provided identifier. // // Returns an error if shard is not read-only. -func (e *StorageEngine) DumpShard(id *shard.ID, prm shard.DumpPrm) error { +func (e *StorageEngine) DumpShard(id *shard.ID, w io.Writer, ignoreErrors bool) error { e.mtx.RLock() defer e.mtx.RUnlock() @@ -14,6 +18,6 @@ func (e *StorageEngine) DumpShard(id *shard.ID, prm shard.DumpPrm) error { return errShardNotFound } - _, err := sh.Dump(prm) + _, err := sh.Dump(w, ignoreErrors) return err } diff --git a/pkg/local_object_storage/engine/engine.go b/pkg/local_object_storage/engine/engine.go index 31a53950db..44b1ccc40c 100644 --- a/pkg/local_object_storage/engine/engine.go +++ b/pkg/local_object_storage/engine/engine.go @@ -124,7 +124,7 @@ func (e *StorageEngine) reportShardErrorBackground(id string, msg string, err er if errors.Is(err, logicerr.Error) { e.log.Warn(msg, zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -142,7 +142,7 @@ func (e *StorageEngine) reportShardError( if errors.Is(err, logicerr.Error) { e.log.Warn(msg, zap.Stringer("shard_id", sh.ID()), - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -161,7 +161,7 @@ func (e *StorageEngine) reportShardErrorWithFlags( e.log.Warn(msg, append([]zap.Field{ zap.Stringer("shard_id", sid), zap.Uint32("error count", errCount), - zap.String("error", err.Error()), + zap.Error(err), }, fields...)...) if e.errorsThreshold == 0 || errCount < e.errorsThreshold { diff --git a/pkg/local_object_storage/engine/error_test.go b/pkg/local_object_storage/engine/error_test.go index 371a7e5f56..7feba188d4 100644 --- a/pkg/local_object_storage/engine/error_test.go +++ b/pkg/local_object_storage/engine/error_test.go @@ -68,10 +68,8 @@ func TestErrorReporting(t *testing.T) { obj := generateObjectWithCID(cidtest.ID()) obj.SetPayload(make([]byte, errSmallSize)) - var prm shard.PutPrm - prm.SetObject(obj) e.mtx.RLock() - _, err := e.shards[id[0].String()].Shard.Put(prm) + err := e.shards[id[0].String()].Shard.Put(obj, nil, 0) e.mtx.RUnlock() require.NoError(t, err) @@ -98,10 +96,8 @@ func TestErrorReporting(t *testing.T) { obj := generateObjectWithCID(cidtest.ID()) obj.SetPayload(make([]byte, errSmallSize)) - var prm shard.PutPrm - prm.SetObject(obj) e.mtx.RLock() - _, err := e.shards[id[0].String()].Put(prm) + err := e.shards[id[0].String()].Put(obj, nil, 0) e.mtx.RUnlock() require.NoError(t, err) @@ -148,10 +144,8 @@ func TestBlobstorFailback(t *testing.T) { obj := generateObjectWithCID(cidtest.ID()) obj.SetPayload(make([]byte, size)) - var prm shard.PutPrm - prm.SetObject(obj) e.mtx.RLock() - _, err = e.shards[id[0].String()].Shard.Put(prm) + err = e.shards[id[0].String()].Shard.Put(obj, nil, 0) e.mtx.RUnlock() require.NoError(t, err) objs = append(objs, obj) diff --git a/pkg/local_object_storage/engine/evacuate.go b/pkg/local_object_storage/engine/evacuate.go index afaac70b90..d00ca4a004 100644 --- a/pkg/local_object_storage/engine/evacuate.go +++ b/pkg/local_object_storage/engine/evacuate.go @@ -78,9 +78,6 @@ func (e *StorageEngine) Evacuate(shardIDs []*shard.ID, ignoreErrors bool, faultH } } - var listPrm shard.ListWithCursorPrm - listPrm.WithCount(defaultEvacuateBatchSize) - var count int mainLoop: @@ -89,11 +86,9 @@ mainLoop: var c *meta.Cursor for { - listPrm.WithCursor(c) - // TODO (@fyrchik): #1731 this approach doesn't work in degraded modes // because ListWithCursor works only with the metabase. - listRes, err := sh.ListWithCursor(listPrm) + lst, cursor, err := sh.ListWithCursor(defaultEvacuateBatchSize, c) if err != nil { if errors.Is(err, meta.ErrEndOfListing) || errors.Is(err, shard.ErrDegradedMode) { continue mainLoop @@ -102,17 +97,12 @@ mainLoop: } // TODO (@fyrchik): #1731 parallelize the loop - lst := listRes.AddressList() - loop: for i := range lst { addr := lst[i].Address addrHash := hrw.WrapBytes([]byte(addr.EncodeToString())) - var getPrm shard.GetPrm - getPrm.SetAddress(addr) - - getRes, err := sh.Get(getPrm) + obj, err := sh.Get(addr, false) if err != nil { if ignoreErrors { continue @@ -125,7 +115,7 @@ mainLoop: if _, ok := shardMap[shards[j].ID().String()]; ok { continue } - putDone, exists, _ := e.putToShard(shards[j].shardWrapper, j, shards[j].pool, addr, getRes.Object(), nil, 0) + putDone, exists, _ := e.putToShard(shards[j].shardWrapper, j, shards[j].pool, addr, obj, nil, 0) if putDone || exists { if putDone { e.log.Debug("object is moved to another shard", @@ -147,14 +137,14 @@ mainLoop: return count, fmt.Errorf("%w: %s", errPutShard, lst[i]) } - err = faultHandler(addr, getRes.Object()) + err = faultHandler(addr, obj) if err != nil { return count, err } count++ } - c = listRes.Cursor() + c = cursor } } diff --git a/pkg/local_object_storage/engine/evacuate_test.go b/pkg/local_object_storage/engine/evacuate_test.go index 5763f7ee6b..6cf31b0f4a 100644 --- a/pkg/local_object_storage/engine/evacuate_test.go +++ b/pkg/local_object_storage/engine/evacuate_test.go @@ -60,7 +60,7 @@ func newEngineEvacuate(t *testing.T, shardNum int, objPerShard int) (*StorageEng res, err := e.shards[ids[len(ids)-1].String()].List() require.NoError(t, err) - if len(res.AddressList()) == objPerShard { + if len(res) == objPerShard { break } } @@ -177,7 +177,7 @@ func TestEvacuateNetwork(t *testing.T) { res, err := e.shards[ids[i].String()].List() require.NoError(t, err) - totalCount += len(res.AddressList()) + totalCount += len(res) } for i := range ids { diff --git a/pkg/local_object_storage/engine/exists.go b/pkg/local_object_storage/engine/exists.go index 34004b8107..9b0f481cc6 100644 --- a/pkg/local_object_storage/engine/exists.go +++ b/pkg/local_object_storage/engine/exists.go @@ -10,11 +10,8 @@ import ( ) func (e *StorageEngine) exists(addr oid.Address) (bool, error) { - var shPrm shard.ExistsPrm - shPrm.SetAddress(addr) - for _, sh := range e.sortedShards(addr) { - res, err := sh.Exists(shPrm) + exists, err := sh.Exists(addr, false) if err != nil { if shard.IsErrRemoved(err) { return false, apistatus.ObjectAlreadyRemoved{} @@ -35,7 +32,7 @@ func (e *StorageEngine) exists(addr oid.Address) (bool, error) { continue } - if res.Exists() { + if exists { return true, nil } } diff --git a/pkg/local_object_storage/engine/get.go b/pkg/local_object_storage/engine/get.go index ed4ba2249d..bbc2752a5a 100644 --- a/pkg/local_object_storage/engine/get.go +++ b/pkg/local_object_storage/engine/get.go @@ -36,23 +36,16 @@ func (e *StorageEngine) Get(addr oid.Address) (*objectSDK.Object, error) { var ( err error obj *objectSDK.Object - sp shard.GetPrm ) - sp.SetAddress(addr) - err = e.get(addr, func(s *shard.Shard, ignoreMetadata bool) (bool, error) { - sp.SetIgnoreMeta(ignoreMetadata) - sr, err := s.Get(sp) - if err != nil { - return sr.HasMeta(), err - } - obj = sr.Object() - return sr.HasMeta(), nil + err = e.get(addr, func(s *shard.Shard, ignoreMetadata bool) error { + obj, err = s.Get(addr, ignoreMetadata) + return err }) return obj, err } -func (e *StorageEngine) get(addr oid.Address, shardFunc func(s *shard.Shard, ignoreMetadata bool) (hasMetadata bool, err error)) error { +func (e *StorageEngine) get(addr oid.Address, shardFunc func(s *shard.Shard, ignoreMetadata bool) error) error { var ( hasDegraded bool shardWithMeta shardWrapper @@ -64,11 +57,11 @@ func (e *StorageEngine) get(addr oid.Address, shardFunc func(s *shard.Shard, ign noMeta := sh.GetMode().NoMetabase() hasDegraded = hasDegraded || noMeta - hasMetadata, err := shardFunc(sh.Shard, noMeta) + err := shardFunc(sh.Shard, noMeta) if err != nil { var siErr *objectSDK.SplitInfoError - if hasMetadata { + if errors.Is(err, shard.ErrMetaWithNoObject) { shardWithMeta = sh metaError = err } @@ -121,7 +114,7 @@ func (e *StorageEngine) get(addr oid.Address, shardFunc func(s *shard.Shard, ign continue } - _, err := shardFunc(sh.Shard, true) + err := shardFunc(sh.Shard, true) if shard.IsErrOutOfRange(err) { return apistatus.ObjectOutOfRange{} } @@ -151,13 +144,13 @@ func (e *StorageEngine) GetBytes(addr oid.Address) ([]byte, error) { b []byte err error ) - err = e.get(addr, func(s *shard.Shard, ignoreMetadata bool) (hasMetadata bool, err error) { + err = e.get(addr, func(s *shard.Shard, ignoreMetadata bool) error { if ignoreMetadata { b, err = s.GetBytes(addr) } else { - b, hasMetadata, err = s.GetBytesWithMetadataLookup(addr) + b, err = s.GetBytesWithMetadataLookup(addr) } - return + return err }) return b, err } diff --git a/pkg/local_object_storage/engine/head.go b/pkg/local_object_storage/engine/head.go index ae080fff09..55a367dc3a 100644 --- a/pkg/local_object_storage/engine/head.go +++ b/pkg/local_object_storage/engine/head.go @@ -33,16 +33,10 @@ func (e *StorageEngine) Head(addr oid.Address, raw bool) (*objectSDK.Object, err return nil, e.blockErr } - var ( - shPrm shard.HeadPrm - splitInfo *objectSDK.SplitInfo - ) - - shPrm.SetAddress(addr) - shPrm.SetRaw(raw) + var splitInfo *objectSDK.SplitInfo for _, sh := range e.sortedShards(addr) { - res, err := sh.Head(shPrm) + res, err := sh.Head(addr, raw) if err != nil { var siErr *objectSDK.SplitInfoError @@ -73,7 +67,7 @@ func (e *StorageEngine) Head(addr oid.Address, raw bool) (*objectSDK.Object, err } } - return res.Object(), nil + return res, nil } if splitInfo != nil { diff --git a/pkg/local_object_storage/engine/head_test.go b/pkg/local_object_storage/engine/head_test.go index 6d133d2c34..e4847a7819 100644 --- a/pkg/local_object_storage/engine/head_test.go +++ b/pkg/local_object_storage/engine/head_test.go @@ -4,7 +4,6 @@ import ( "os" "testing" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" @@ -46,18 +45,12 @@ func TestHeadRaw(t *testing.T) { e := testNewEngineWithShards(s1, s2) defer e.Close() - var putPrmLeft shard.PutPrm - putPrmLeft.SetObject(child) - - var putPrmLink shard.PutPrm - putPrmLink.SetObject(link) - // put most left object in one shard - _, err := s1.Put(putPrmLeft) + err := s1.Put(child, nil, 0) require.NoError(t, err) // put link object in another shard - _, err = s2.Put(putPrmLink) + err = s2.Put(link, nil, 0) require.NoError(t, err) // head with raw flag should return SplitInfoError diff --git a/pkg/local_object_storage/engine/inhume.go b/pkg/local_object_storage/engine/inhume.go index e758e49b2a..daac3aaadb 100644 --- a/pkg/local_object_storage/engine/inhume.go +++ b/pkg/local_object_storage/engine/inhume.go @@ -38,11 +38,6 @@ func (e *StorageEngine) Inhume(tombstone oid.Address, tombExpiration uint64, add } func (e *StorageEngine) inhume(addrs []oid.Address, force bool, tombstone *oid.Address, tombExpiration uint64) error { - var shPrm shard.InhumePrm - if force { - shPrm.ForceRemoval() - } - for i := range addrs { if !force { locked, err := e.IsLocked(addrs[i]) @@ -56,13 +51,7 @@ func (e *StorageEngine) inhume(addrs []oid.Address, force bool, tombstone *oid.A } } - if tombstone != nil { - shPrm.InhumeByTomb(*tombstone, tombExpiration, addrs[i]) - } else { - shPrm.MarkAsGarbage(addrs[i]) - } - - ok, err := e.inhumeAddr(addrs[i], shPrm) + ok, err := e.inhumeAddr(addrs[i], force, tombstone, tombExpiration) if err != nil { return err } @@ -100,19 +89,17 @@ func (e *StorageEngine) InhumeContainer(cID cid.ID) error { } // Returns ok if object was inhumed during this invocation or before. -func (e *StorageEngine) inhumeAddr(addr oid.Address, prm shard.InhumePrm) (bool, error) { - var existPrm shard.ExistsPrm - var shardWithObject string - - var root bool - var children []oid.Address +func (e *StorageEngine) inhumeAddr(addr oid.Address, force bool, tombstone *oid.Address, tombExpiration uint64) (bool, error) { + var ( + children []oid.Address + err error + root bool + shardWithObject string + ) // see if the object is root for _, sh := range e.unsortedShards() { - existPrm.SetAddress(addr) - existPrm.IgnoreExpiration() - - res, err := sh.Exists(existPrm) + exists, err := sh.Exists(addr, true) if err != nil { if shard.IsErrNotFound(err) { continue @@ -176,18 +163,23 @@ func (e *StorageEngine) inhumeAddr(addr oid.Address, prm shard.InhumePrm) (bool, break } - if res.Exists() { + if exists { shardWithObject = sh.ID().String() break } } - prm.SetTargets(append(children, addr)...) + var addrs = append(children, addr) if shardWithObject != "" { sh := e.getShard(shardWithObject) - _, err := sh.Inhume(prm) + if tombstone != nil { + err = sh.Inhume(*tombstone, tombExpiration, addrs...) + } else { + err = sh.MarkGarbage(force, addrs...) + } + if err != nil { if !errors.Is(err, logicerr.Error) { e.reportShardError(sh, "could not inhume object in shard", err) @@ -206,7 +198,11 @@ func (e *StorageEngine) inhumeAddr(addr oid.Address, prm shard.InhumePrm) (bool, // has not found the object on any shard, so mark it as inhumed on the most probable one for _, sh := range e.sortedShards(addr) { - _, err := sh.Inhume(prm) + if tombstone != nil { + err = sh.Inhume(*tombstone, tombExpiration, addrs...) + } else { + err = sh.MarkGarbage(force, addrs...) + } if err != nil { var errLocked apistatus.ObjectLocked diff --git a/pkg/local_object_storage/engine/inhume_test.go b/pkg/local_object_storage/engine/inhume_test.go index 77e6d2088d..9fdd1d0aee 100644 --- a/pkg/local_object_storage/engine/inhume_test.go +++ b/pkg/local_object_storage/engine/inhume_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/nspcc-dev/neofs-node/pkg/core/object" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" @@ -61,14 +60,10 @@ func TestStorageEngine_Inhume(t *testing.T) { e := testNewEngineWithShards(s1, s2) defer e.Close() - var putChild shard.PutPrm - putChild.SetObject(child) - _, err := s1.Put(putChild) + err := s1.Put(child, nil, 0) require.NoError(t, err) - var putLink shard.PutPrm - putLink.SetObject(link) - _, err = s2.Put(putLink) + err = s2.Put(link, nil, 0) require.NoError(t, err) err = e.Inhume(tombstoneID, 0, object.AddressOf(parent)) @@ -124,23 +119,17 @@ func TestStorageEngine_Inhume(t *testing.T) { wrongShard := e.getShard(wrongShardID) - var putPrm shard.PutPrm - putPrm.SetObject(obj) - - var getPrm shard.GetPrm - getPrm.SetAddress(addr) - - _, err := wrongShard.Put(putPrm) + err := wrongShard.Put(obj, nil, 0) require.NoError(t, err) - _, err = wrongShard.Get(getPrm) + _, err = wrongShard.Get(addr, false) require.NoError(t, err) err = e.Delete(addr) require.NoError(t, err) // object was on the wrong (according to hash sorting) shard but is removed anyway - _, err = wrongShard.Get(getPrm) + _, err = wrongShard.Get(addr, false) require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) }) diff --git a/pkg/local_object_storage/engine/list.go b/pkg/local_object_storage/engine/list.go index 6cec638772..000c426777 100644 --- a/pkg/local_object_storage/engine/list.go +++ b/pkg/local_object_storage/engine/list.go @@ -67,19 +67,18 @@ func (e *StorageEngine) ListWithCursor(count uint32, cursor *Cursor) ([]objectco } count := uint32(int(count) - len(result)) - var shardPrm shard.ListWithCursorPrm - shardPrm.WithCount(count) + var shCursor *shard.Cursor if shardIDs[i] == cursor.shardID { - shardPrm.WithCursor(cursor.shardCursor) + shCursor = cursor.shardCursor } - res, err := shardInstance.ListWithCursor(shardPrm) + res, shCursor, err := shardInstance.ListWithCursor(int(count), shCursor) if err != nil { continue } - result = append(result, res.AddressList()...) - cursor.shardCursor = res.Cursor() + result = append(result, res...) + cursor.shardCursor = shCursor cursor.shardID = shardIDs[i] } diff --git a/pkg/local_object_storage/engine/lock.go b/pkg/local_object_storage/engine/lock.go index 503c61d67d..f871cecd40 100644 --- a/pkg/local_object_storage/engine/lock.go +++ b/pkg/local_object_storage/engine/lock.go @@ -60,10 +60,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi for _, sh := range e.sortedShards(addrLocked) { if checkExists { - var existsPrm shard.ExistsPrm - existsPrm.SetAddress(addrLocked) - - exRes, err := sh.Exists(existsPrm) + exists, err := sh.Exists(addrLocked, false) if err != nil { var siErr *objectSDK.SplitInfoError if !errors.As(err, &siErr) { @@ -81,7 +78,7 @@ func (e *StorageEngine) lockSingle(idCnr cid.ID, locker, locked oid.ID, checkExi } root = true - } else if !exRes.Exists() { + } else if !exists { if !root { return 0 } diff --git a/pkg/local_object_storage/engine/put.go b/pkg/local_object_storage/engine/put.go index 2fb42950da..b5086f5bdc 100644 --- a/pkg/local_object_storage/engine/put.go +++ b/pkg/local_object_storage/engine/put.go @@ -85,18 +85,19 @@ func (e *StorageEngine) Put(obj *objectSDK.Object, objBin []byte, hdrLen int) er // Second return value is true iff object already exists. // Third return value is true iff object cannot be put because of max concurrent load. func (e *StorageEngine) putToShard(sh shardWrapper, ind int, pool util.WorkerPool, addr oid.Address, obj *objectSDK.Object, objBin []byte, hdrLen int) (bool, bool, bool) { - var putSuccess, alreadyExists, overloaded bool - id := sh.ID() - - exitCh := make(chan struct{}) - - if err := pool.Submit(func() { + var ( + alreadyExists bool + err error + exitCh = make(chan struct{}) + id = sh.ID() + overloaded bool + putSuccess bool + ) + + err = pool.Submit(func() { defer close(exitCh) - var existPrm shard.ExistsPrm - existPrm.SetAddress(addr) - - exists, err := sh.Exists(existPrm) + exists, err := sh.Exists(addr, false) if err != nil { e.log.Warn("object put: check object existence", zap.Stringer("addr", addr), @@ -112,17 +113,14 @@ func (e *StorageEngine) putToShard(sh shardWrapper, ind int, pool util.WorkerPoo return // this is not ErrAlreadyRemoved error so we can go to the next shard } - alreadyExists = exists.Exists() + alreadyExists = exists if alreadyExists { if ind != 0 { - var toMoveItPrm shard.ToMoveItPrm - toMoveItPrm.SetAddress(addr) - - _, err = sh.ToMoveIt(toMoveItPrm) + err = sh.ToMoveIt(addr) if err != nil { e.log.Warn("could not mark object for shard relocation", zap.Stringer("shard", id), - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -134,19 +132,13 @@ func (e *StorageEngine) putToShard(sh shardWrapper, ind int, pool util.WorkerPoo return } - var putPrm shard.PutPrm - putPrm.SetObject(obj) - if objBin != nil { - putPrm.SetObjectBinary(objBin, hdrLen) - } - - _, err = sh.Put(putPrm) + err = sh.Put(obj, objBin, hdrLen) if err != nil { if errors.Is(err, shard.ErrReadOnlyMode) || errors.Is(err, blobstor.ErrNoPlaceFound) || errors.Is(err, common.ErrReadOnly) || errors.Is(err, common.ErrNoSpace) { e.log.Warn("could not put object to shard", zap.Stringer("shard_id", id), - zap.String("error", err.Error())) + zap.Error(err)) return } @@ -155,7 +147,8 @@ func (e *StorageEngine) putToShard(sh shardWrapper, ind int, pool util.WorkerPoo } putSuccess = true - }); err != nil { + }) + if err != nil { e.log.Warn("object put: pool task submitting", zap.Stringer("shard", id), zap.Error(err)) overloaded = errors.Is(err, ants.ErrPoolOverload) close(exitCh) diff --git a/pkg/local_object_storage/engine/range.go b/pkg/local_object_storage/engine/range.go index 55e0a2f3ab..ca6b70c7a8 100644 --- a/pkg/local_object_storage/engine/range.go +++ b/pkg/local_object_storage/engine/range.go @@ -2,22 +2,9 @@ package engine import ( "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" - objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" ) -// RngRes groups the resulting values of GetRange operation. -type RngRes struct { - obj *objectSDK.Object -} - -// Object returns the requested object part. -// -// Instance payload contains the requested range of the original object. -func (r RngRes) Object() *objectSDK.Object { - return r.obj -} - // GetRange reads a part of an object from local storage. Zero length is // interpreted as requiring full object length independent of the offset. // @@ -41,20 +28,16 @@ func (e *StorageEngine) GetRange(addr oid.Address, offset uint64, length uint64) } var ( - err error - data []byte - shPrm shard.RngPrm + err error + data []byte ) - shPrm.SetAddress(addr) - shPrm.SetRange(offset, length) - err = e.get(addr, func(sh *shard.Shard, ignoreMetadata bool) (bool, error) { - shPrm.SetIgnoreMeta(ignoreMetadata) - res, err := sh.GetRange(shPrm) + err = e.get(addr, func(sh *shard.Shard, ignoreMetadata bool) error { + res, err := sh.GetRange(addr, offset, length, ignoreMetadata) if err == nil { - data = res.Object().Payload() + data = res.Payload() } - return res.HasMeta(), err + return err }) return data, err } diff --git a/pkg/local_object_storage/engine/restore.go b/pkg/local_object_storage/engine/restore.go index 14738eafa3..9d4f331dcb 100644 --- a/pkg/local_object_storage/engine/restore.go +++ b/pkg/local_object_storage/engine/restore.go @@ -1,11 +1,15 @@ package engine -import "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" +import ( + "io" + + "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" +) // RestoreShard restores objects from dump to the shard with provided identifier. // // Returns an error if shard is not read-only. -func (e *StorageEngine) RestoreShard(id *shard.ID, prm shard.RestorePrm) error { +func (e *StorageEngine) RestoreShard(id *shard.ID, r io.Reader, ignoreErrors bool) error { e.mtx.RLock() defer e.mtx.RUnlock() @@ -14,6 +18,6 @@ func (e *StorageEngine) RestoreShard(id *shard.ID, prm shard.RestorePrm) error { return errShardNotFound } - _, err := sh.Restore(prm) + _, _, err := sh.Restore(r, ignoreErrors) return err } diff --git a/pkg/local_object_storage/engine/select.go b/pkg/local_object_storage/engine/select.go index 3d00c99dfc..3c4572a187 100644 --- a/pkg/local_object_storage/engine/select.go +++ b/pkg/local_object_storage/engine/select.go @@ -4,7 +4,6 @@ import ( "errors" objectcore "github.com/nspcc-dev/neofs-node/pkg/core/object" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" "github.com/nspcc-dev/neofs-sdk-go/object" oid "github.com/nspcc-dev/neofs-sdk-go/object/id" @@ -30,12 +29,8 @@ func (e *StorageEngine) Select(cnr cid.ID, filters object.SearchFilters) ([]oid. addrList := make([]oid.Address, 0) uniqueMap := make(map[string]struct{}) - var shPrm shard.SelectPrm - shPrm.SetContainerID(cnr) - shPrm.SetFilters(filters) - for _, sh := range e.unsortedShards() { - res, err := sh.Select(shPrm) + res, err := sh.Select(cnr, filters) if err != nil { if errors.Is(err, objectcore.ErrInvalidSearchQuery) { return addrList, err @@ -44,7 +39,7 @@ func (e *StorageEngine) Select(cnr cid.ID, filters object.SearchFilters) ([]oid. continue } - for _, addr := range res.AddressList() { // save only unique values + for _, addr := range res { // save only unique values if _, ok := uniqueMap[addr.EncodeToString()]; !ok { uniqueMap[addr.EncodeToString()] = struct{}{} addrList = append(addrList, addr) @@ -82,7 +77,7 @@ func (e *StorageEngine) List(limit uint64) ([]oid.Address, error) { e.reportShardError(sh, "could not select objects from shard", err) continue } - for _, addr := range res.AddressList() { // save only unique values + for _, addr := range res { // save only unique values if _, ok := uniqueMap[addr.EncodeToString()]; !ok { uniqueMap[addr.EncodeToString()] = struct{}{} addrList = append(addrList, addr) diff --git a/pkg/local_object_storage/engine/writecache.go b/pkg/local_object_storage/engine/writecache.go index d7dd469b38..e4385ed89c 100644 --- a/pkg/local_object_storage/engine/writecache.go +++ b/pkg/local_object_storage/engine/writecache.go @@ -14,5 +14,5 @@ func (e *StorageEngine) FlushWriteCache(id *shard.ID) error { return errShardNotFound } - return sh.FlushWriteCache(shard.FlushWriteCachePrm{}) + return sh.FlushWriteCache(false) } diff --git a/pkg/local_object_storage/metabase/counter_test.go b/pkg/local_object_storage/metabase/counter_test.go index 88401f00ee..cf6938e19a 100644 --- a/pkg/local_object_storage/metabase/counter_test.go +++ b/pkg/local_object_storage/metabase/counter_test.go @@ -77,13 +77,10 @@ func TestCounters(t *testing.T) { inhumedObjs[i] = objectcore.AddressOf(o) } - var prm meta.InhumePrm - prm.SetTombstone(oidtest.Address(), 0) - prm.SetAddresses(inhumedObjs...) - - res, err := db.Inhume(prm) + inhumed, deleted, err := db.Inhume(oidtest.Address(), 0, false, inhumedObjs...) require.NoError(t, err) - require.Equal(t, uint64(len(inhumedObjs)), res.AvailableInhumed()) + require.Equal(t, uint64(len(inhumedObjs)), inhumed) + require.Nil(t, deleted) c, err = db.ObjectCounters() require.NoError(t, err) @@ -147,11 +144,7 @@ func TestCounters(t *testing.T) { inhumedObjs[i] = objectcore.AddressOf(o) } - var prm meta.InhumePrm - prm.SetTombstone(oidtest.Address(), 0) - prm.SetAddresses(inhumedObjs...) - - _, err = db.Inhume(prm) + _, _, err = db.Inhume(oidtest.Address(), 0, false, inhumedObjs...) require.NoError(t, err) c, err = db.ObjectCounters() @@ -211,13 +204,10 @@ func TestCounters_Expired(t *testing.T) { // the GC do) should decrease the logic counter despite the // expiration fact - var inhumePrm meta.InhumePrm - inhumePrm.SetGCMark() - inhumePrm.SetAddresses(oo[0]) - - inhumeRes, err := db.Inhume(inhumePrm) + inhumed, deleted, err := db.MarkGarbage(false, false, oo[0]) require.NoError(t, err) - require.Equal(t, uint64(1), inhumeRes.AvailableInhumed()) + require.Equal(t, uint64(1), inhumed) + require.Nil(t, deleted) c, err = db.ObjectCounters() require.NoError(t, err) diff --git a/pkg/local_object_storage/metabase/get_test.go b/pkg/local_object_storage/metabase/get_test.go index 600bdadcfb..b047a5d028 100644 --- a/pkg/local_object_storage/metabase/get_test.go +++ b/pkg/local_object_storage/metabase/get_test.go @@ -129,10 +129,7 @@ func TestDB_Get(t *testing.T) { obj = oidtest.Address() - var prm meta.InhumePrm - prm.SetAddresses(obj) - - _, err = db.Inhume(prm) + _, _, err = db.MarkGarbage(false, false, obj) require.NoError(t, err) _, err = metaGet(db, obj, false) require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) diff --git a/pkg/local_object_storage/metabase/graveyard_test.go b/pkg/local_object_storage/metabase/graveyard_test.go index 05d4cae2c7..b82fca9bac 100644 --- a/pkg/local_object_storage/metabase/graveyard_test.go +++ b/pkg/local_object_storage/metabase/graveyard_test.go @@ -61,11 +61,7 @@ func TestDB_Iterate_OffsetNotFound(t *testing.T) { err = putBig(db, obj1) require.NoError(t, err) - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(object.AddressOf(obj1)) - inhumePrm.SetGCMark() - - _, err = db.Inhume(inhumePrm) + _, _, err = db.MarkGarbage(false, false, object.AddressOf(obj1)) require.NoError(t, err) var ( @@ -123,22 +119,14 @@ func TestDB_IterateDeletedObjects(t *testing.T) { err = putBig(db, obj4) require.NoError(t, err) - var inhumePrm meta.InhumePrm - // inhume with tombstone addrTombstone := oidtest.Address() - inhumePrm.SetAddresses(object.AddressOf(obj1), object.AddressOf(obj2)) - inhumePrm.SetTombstone(addrTombstone, 0) - - _, err = db.Inhume(inhumePrm) + _, _, err = db.Inhume(addrTombstone, 0, false, object.AddressOf(obj1), object.AddressOf(obj2)) require.NoError(t, err) - inhumePrm.SetAddresses(object.AddressOf(obj3), object.AddressOf(obj4)) - inhumePrm.SetGCMark() - // inhume with GC mark - _, err = db.Inhume(inhumePrm) + _, _, err = db.MarkGarbage(false, false, object.AddressOf(obj3), object.AddressOf(obj4)) require.NoError(t, err) var ( @@ -206,13 +194,9 @@ func TestDB_IterateOverGraveyard_Offset(t *testing.T) { // inhume with tombstone addrTombstone := oidtest.Address() - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses( - object.AddressOf(obj1), object.AddressOf(obj2), + _, _, err = db.Inhume(addrTombstone, 0, false, object.AddressOf(obj1), object.AddressOf(obj2), object.AddressOf(obj3), object.AddressOf(obj4)) - inhumePrm.SetTombstone(addrTombstone, 0) - _, err = db.Inhume(inhumePrm) require.NoError(t, err) expectedGraveyard := []oid.Address{ @@ -294,13 +278,9 @@ func TestDB_IterateOverGarbage_Offset(t *testing.T) { err = putBig(db, obj4) require.NoError(t, err) - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses( - object.AddressOf(obj1), object.AddressOf(obj2), + _, _, err = db.MarkGarbage(false, false, object.AddressOf(obj1), object.AddressOf(obj2), object.AddressOf(obj3), object.AddressOf(obj4)) - inhumePrm.SetGCMark() - _, err = db.Inhume(inhumePrm) require.NoError(t, err) expectedGarbage := []oid.Address{ @@ -406,15 +386,10 @@ func TestDropExpiredTSMarks(t *testing.T) { droppedObjects := oidtest.Addresses(1024) tombstone := oidtest.Address() - var pInh meta.InhumePrm - pInh.SetTombstone(tombstone, epoch) - pInh.SetAddresses(droppedObjects[:len(droppedObjects)/2]...) - _, err := db.Inhume(pInh) + _, _, err := db.Inhume(tombstone, epoch, false, droppedObjects[:len(droppedObjects)/2]...) require.NoError(t, err) - pInh.SetTombstone(tombstone, epoch+1) - pInh.SetAddresses(droppedObjects[len(droppedObjects)/2:]...) - _, err = db.Inhume(pInh) + _, _, err = db.Inhume(tombstone, epoch+1, false, droppedObjects[len(droppedObjects)/2:]...) require.NoError(t, err) for _, o := range droppedObjects { diff --git a/pkg/local_object_storage/metabase/inhume.go b/pkg/local_object_storage/metabase/inhume.go index 8c4db6da3c..c68b528c89 100644 --- a/pkg/local_object_storage/metabase/inhume.go +++ b/pkg/local_object_storage/metabase/inhume.go @@ -14,72 +14,6 @@ import ( "go.etcd.io/bbolt" ) -// InhumePrm encapsulates parameters for Inhume operation. -type InhumePrm struct { - tomb *oid.Address - tombExpiration uint64 - - target []oid.Address - - lockObjectHandling bool - - forceRemoval bool -} - -// InhumeRes encapsulates results of Inhume operation. -type InhumeRes struct { - deletedLockObj []oid.Address - availableImhumed uint64 -} - -// AvailableInhumed return number of available object -// that have been inhumed. -func (i InhumeRes) AvailableInhumed() uint64 { - return i.availableImhumed -} - -// DeletedLockObjects returns deleted object of LOCK -// type. Returns always nil if WithoutLockObjectHandling -// was provided to the InhumePrm. -func (i InhumeRes) DeletedLockObjects() []oid.Address { - return i.deletedLockObj -} - -// SetAddresses sets a list of object addresses that should be inhumed. -func (p *InhumePrm) SetAddresses(addrs ...oid.Address) { - p.target = addrs -} - -// SetTombstone sets tombstone address as the reason for inhume operation -// and tombstone's expiration. -// -// addr should not be nil. -// Should not be called along with SetGCMark. -func (p *InhumePrm) SetTombstone(addr oid.Address, epoch uint64) { - p.tomb = &addr - p.tombExpiration = epoch -} - -// SetGCMark marks the object to be physically removed. -// -// Should not be called along with SetTombstone. -func (p *InhumePrm) SetGCMark() { - p.tomb = nil -} - -// SetLockObjectHandling checks if there were -// any LOCK object among the targets set via WithAddresses. -func (p *InhumePrm) SetLockObjectHandling() { - p.lockObjectHandling = true -} - -// SetForceGCMark allows removal any object. Expected to be -// called only in control service. -func (p *InhumePrm) SetForceGCMark() { - p.tomb = nil - p.forceRemoval = true -} - var errBreakBucketForEach = errors.New("bucket ForEach break") // ErrLockObjectRemoval is returned when inhume operation is being @@ -92,21 +26,36 @@ var ErrLockObjectRemoval = logicerr.New("lock object removal") // if at least one object is locked. Returns ErrLockObjectRemoval if inhuming // is being performed on lock (not locked) object. // -// NOTE: Marks any object with GC mark (despite any prohibitions on operations -// with that object) if WithForceGCMark option has been provided. -func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) { +// Returns the number of available objects that were inhumed and a list of +// deleted LOCK objects (if handleLocks parameter is set). +func (db *DB) Inhume(tombstone oid.Address, tombExpiration uint64, handleLocks bool, addrs ...oid.Address) (uint64, []oid.Address, error) { + return db.inhume(&tombstone, tombExpiration, false, handleLocks, addrs...) +} + +// MarkGarbage marks objects to be physically removed from shard. force flag +// allows to override any restrictions imposed on object deletion (to be used +// by control service and other manual intervention cases). Otherwise similar +// to [DB.Inhume], but doesn't need a tombstone. +func (db *DB) MarkGarbage(force bool, handleLocks bool, addrs ...oid.Address) (uint64, []oid.Address, error) { + return db.inhume(nil, 0, force, handleLocks, addrs...) +} + +func (db *DB) inhume(tombstone *oid.Address, tombExpiration uint64, force bool, handleLocks bool, addrs ...oid.Address) (uint64, []oid.Address, error) { db.modeMtx.RLock() defer db.modeMtx.RUnlock() if db.mode.NoMetabase() { - return InhumeRes{}, ErrDegradedMode + return 0, nil, ErrDegradedMode } else if db.mode.ReadOnly() { - return InhumeRes{}, ErrReadOnlyMode + return 0, nil, ErrReadOnlyMode } - currEpoch := db.epochState.CurrentEpoch() - var inhumed uint64 - + var ( + currEpoch = db.epochState.CurrentEpoch() + deletedLockObjs []oid.Address + err error + inhumed uint64 + ) err = db.boltDB.Update(func(tx *bbolt.Tx) error { garbageObjectsBKT := tx.Bucket(garbageObjectsBucketName) garbageContainersBKT := tx.Bucket(garbageContainersBucketName) @@ -124,9 +73,9 @@ func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) { value []byte ) - if prm.tomb != nil { + if tombstone != nil { bkt = graveyardBKT - tombKey := addressKey(*prm.tomb, make([]byte, addressKeySize+8)) + tombKey := addressKey(*tombstone, make([]byte, addressKeySize+8)) // it is forbidden to have a tomb-on-tomb in NeoFS, // so graveyard keys must not be addresses of tombstones @@ -138,19 +87,19 @@ func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) { } } - value = binary.LittleEndian.AppendUint64(tombKey, prm.tombExpiration) + value = binary.LittleEndian.AppendUint64(tombKey, tombExpiration) } else { bkt = garbageObjectsBKT value = zeroValue } buf := make([]byte, addressKeySize) - for i := range prm.target { - id := prm.target[i].Object() - cnr := prm.target[i].Container() + for _, addr := range addrs { + id := addr.Object() + cnr := addr.Container() // prevent locked objects to be inhumed - if !prm.forceRemoval && objectLocked(tx, cnr, id) { + if !force && objectLocked(tx, cnr, id) { return apistatus.ObjectLocked{} } @@ -159,7 +108,7 @@ func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) { // prevent lock objects to be inhumed // if `Inhume` was called not with the // `WithForceGCMark` option - if !prm.forceRemoval { + if !force { if isLockObject(tx, cnr, id) { return ErrLockObjectRemoval } @@ -167,8 +116,8 @@ func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) { lockWasChecked = true } - obj, err := db.get(tx, prm.target[i], buf, false, true, currEpoch) - targetKey := addressKey(prm.target[i], buf) + obj, err := db.get(tx, addr, buf, false, true, currEpoch) + targetKey := addressKey(addr, buf) if err == nil { if inGraveyardWithKey(targetKey, graveyardBKT, garbageObjectsBKT, garbageContainersBKT) == 0 { // object is available, decrement the @@ -186,7 +135,7 @@ func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) { } } - if prm.tomb != nil { + if tombstone != nil { targetIsTomb := false // iterate over graveyard and check if target address @@ -226,7 +175,7 @@ func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) { return err } - if prm.lockObjectHandling { + if handleLocks { // do not perform lock check if // it was already called if lockWasChecked { @@ -236,7 +185,7 @@ func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) { } if isLockObject(tx, cnr, id) { - res.deletedLockObj = append(res.deletedLockObj, prm.target[i]) + deletedLockObjs = append(deletedLockObjs, addr) } } } @@ -244,9 +193,7 @@ func (db *DB) Inhume(prm InhumePrm) (res InhumeRes, err error) { return db.updateCounter(tx, logical, inhumed, false) }) - res.availableImhumed = inhumed - - return + return inhumed, deletedLockObjs, err } // InhumeContainer marks every object in a container as removed. diff --git a/pkg/local_object_storage/metabase/inhume_test.go b/pkg/local_object_storage/metabase/inhume_test.go index 40eb80b970..34d508a01b 100644 --- a/pkg/local_object_storage/metabase/inhume_test.go +++ b/pkg/local_object_storage/metabase/inhume_test.go @@ -41,28 +41,21 @@ func TestInhumeTombOnTomb(t *testing.T) { var ( err error - addr1 = oidtest.Address() - addr2 = oidtest.Address() - addr3 = oidtest.Address() - inhumePrm meta.InhumePrm + addr1 = oidtest.Address() + addr2 = oidtest.Address() + addr3 = oidtest.Address() ) - inhumePrm.SetAddresses(addr1) - inhumePrm.SetTombstone(addr2, 0) - // inhume addr1 via addr2 - _, err = db.Inhume(inhumePrm) + _, _, err = db.Inhume(addr2, 0, false, addr1) require.NoError(t, err) // addr1 should become inhumed {addr1:addr2} _, err = db.Exists(addr1, false) require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved)) - inhumePrm.SetAddresses(addr3) - inhumePrm.SetTombstone(addr1, 0) - // try to inhume addr3 via addr1 - _, err = db.Inhume(inhumePrm) + _, _, err = db.Inhume(addr1, 0, false, addr3) require.NoError(t, err) // record with {addr1:addr2} should be removed from graveyard @@ -76,11 +69,8 @@ func TestInhumeTombOnTomb(t *testing.T) { _, err = db.Exists(addr3, false) require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved)) - inhumePrm.SetAddresses(addr1) - inhumePrm.SetTombstone(oidtest.Address(), 0) - // try to inhume addr1 (which is already a tombstone in graveyard) - _, err = db.Inhume(inhumePrm) + _, _, err = db.Inhume(oidtest.Address(), 0, false, addr1) require.NoError(t, err) // record with addr1 key should not appear in graveyard @@ -98,10 +88,7 @@ func TestInhumeLocked(t *testing.T) { err := db.Lock(locked.Container(), oidtest.ID(), []oid.ID{locked.Object()}) require.NoError(t, err) - var prm meta.InhumePrm - prm.SetAddresses(locked) - - _, err = db.Inhume(prm) + _, _, err = db.MarkGarbage(false, false, locked) var e apistatus.ObjectLocked require.ErrorAs(t, err, &e) @@ -153,10 +140,6 @@ func TestInhumeContainer(t *testing.T) { } func metaInhume(db *meta.DB, target, tomb oid.Address) error { - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(target) - inhumePrm.SetTombstone(tomb, 0) - - _, err := db.Inhume(inhumePrm) + _, _, err := db.Inhume(tomb, 0, false, target) return err } diff --git a/pkg/local_object_storage/metabase/iterators_test.go b/pkg/local_object_storage/metabase/iterators_test.go index 8c604aeee0..a8d634896a 100644 --- a/pkg/local_object_storage/metabase/iterators_test.go +++ b/pkg/local_object_storage/metabase/iterators_test.go @@ -73,19 +73,12 @@ func TestDB_IterateCoveredByTombstones(t *testing.T) { protectedLocked := oidtest.Address() garbage := oidtest.Address() - var prm meta.InhumePrm var err error - prm.SetAddresses(protected1, protected2, protectedLocked) - prm.SetTombstone(ts, 0) - - _, err = db.Inhume(prm) + _, _, err = db.Inhume(ts, 0, false, protected1, protected2, protectedLocked) require.NoError(t, err) - prm.SetAddresses(garbage) - prm.SetGCMark() - - _, err = db.Inhume(prm) + _, _, err = db.MarkGarbage(false, false, garbage) require.NoError(t, err) var handled []oid.Address diff --git a/pkg/local_object_storage/metabase/lock_test.go b/pkg/local_object_storage/metabase/lock_test.go index c02de0ca04..10e08555cb 100644 --- a/pkg/local_object_storage/metabase/lock_test.go +++ b/pkg/local_object_storage/metabase/lock_test.go @@ -58,33 +58,28 @@ func TestDB_Lock(t *testing.T) { objAddr := objectcore.AddressOf(objs[0]) lockAddr := objectcore.AddressOf(lockObj) - var inhumePrm meta.InhumePrm - inhumePrm.SetGCMark() - // check locking relation - inhumePrm.SetAddresses(objAddr) - _, err := db.Inhume(inhumePrm) + _, _, err := db.MarkGarbage(false, false, objAddr) require.ErrorAs(t, err, new(apistatus.ObjectLocked)) - inhumePrm.SetTombstone(oidtest.Address(), 0) - _, err = db.Inhume(inhumePrm) + _, _, err = db.Inhume(oidtest.Address(), 0, false, objAddr) require.ErrorAs(t, err, new(apistatus.ObjectLocked)) // try to remove lock object - inhumePrm.SetAddresses(lockAddr) - _, err = db.Inhume(inhumePrm) + _, _, err = db.MarkGarbage(false, false, lockAddr) + require.Error(t, err) + + _, _, err = db.Inhume(oidtest.Address(), 0, false, lockAddr) require.Error(t, err) // check that locking relation has not been // dropped - inhumePrm.SetAddresses(objAddr) - _, err = db.Inhume(inhumePrm) + _, _, err = db.MarkGarbage(false, false, objAddr) require.ErrorAs(t, err, new(apistatus.ObjectLocked)) - inhumePrm.SetTombstone(oidtest.Address(), 0) - _, err = db.Inhume(inhumePrm) + _, _, err = db.Inhume(oidtest.Address(), 0, false, objAddr) require.ErrorAs(t, err, new(apistatus.ObjectLocked)) }) @@ -99,25 +94,18 @@ func TestDB_Lock(t *testing.T) { require.ErrorAs(t, err, new(apistatus.ObjectLocked)) // free locked object - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(lockAddr) - inhumePrm.SetForceGCMark() - inhumePrm.SetLockObjectHandling() - - res, err := db.Inhume(inhumePrm) + inhumed, deletedLocks, err := db.MarkGarbage(true, true, lockAddr) require.NoError(t, err) - require.Len(t, res.DeletedLockObjects(), 1) - require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0]) + require.Equal(t, uint64(1), inhumed) + require.Len(t, deletedLocks, 1) + require.Equal(t, objectcore.AddressOf(lockObj), deletedLocks[0]) unlocked, err := db.FreeLockedBy([]oid.Address{lockAddr}) require.NoError(t, err) require.ElementsMatch(t, objsToAddrs(objs), unlocked) - inhumePrm.SetAddresses(objAddr) - inhumePrm.SetGCMark() - // now we can inhume the object - _, err = db.Inhume(inhumePrm) + _, _, err = db.MarkGarbage(false, false, objAddr) require.NoError(t, err) }) @@ -128,46 +116,35 @@ func TestDB_Lock(t *testing.T) { objs, lockObj := putAndLockObj(t, db, objsNum) // force remove objects - - var inhumePrm meta.InhumePrm - inhumePrm.SetForceGCMark() - inhumePrm.SetAddresses(objectcore.AddressOf(lockObj)) - inhumePrm.SetLockObjectHandling() - - res, err := db.Inhume(inhumePrm) + inhumed, deletedLocks, err := db.MarkGarbage(true, true, objectcore.AddressOf(lockObj)) require.NoError(t, err) - require.Len(t, res.DeletedLockObjects(), 1) - require.Equal(t, objectcore.AddressOf(lockObj), res.DeletedLockObjects()[0]) + require.Equal(t, uint64(1), inhumed) + require.Len(t, deletedLocks, 1) + require.Equal(t, objectcore.AddressOf(lockObj), deletedLocks[0]) // unlock just objects that were locked by // just removed locker - unlocked, err := db.FreeLockedBy([]oid.Address{res.DeletedLockObjects()[0]}) + unlocked, err := db.FreeLockedBy(deletedLocks) require.NoError(t, err) require.ElementsMatch(t, objsToAddrs(objs), unlocked) // removing objects after unlock - inhumePrm.SetGCMark() - for i := range objsNum { - inhumePrm.SetAddresses(objectcore.AddressOf(objs[i])) - - res, err = db.Inhume(inhumePrm) + inhumed, deletedLocks, err = db.MarkGarbage(false, true, objectcore.AddressOf(objs[i])) require.NoError(t, err) - require.Len(t, res.DeletedLockObjects(), 0) + require.Equal(t, uint64(1), inhumed) + require.Len(t, deletedLocks, 0) } }) t.Run("skipping lock object handling", func(t *testing.T) { _, lockObj := putAndLockObj(t, db, 1) - var inhumePrm meta.InhumePrm - inhumePrm.SetForceGCMark() - inhumePrm.SetAddresses(objectcore.AddressOf(lockObj)) - - res, err := db.Inhume(inhumePrm) + inhumed, deletedLocks, err := db.MarkGarbage(true, false, objectcore.AddressOf(lockObj)) require.NoError(t, err) - require.Len(t, res.DeletedLockObjects(), 0) + require.Equal(t, uint64(1), inhumed) + require.Len(t, deletedLocks, 0) }) } diff --git a/pkg/local_object_storage/metabase/revive_test.go b/pkg/local_object_storage/metabase/revive_test.go index 49f99310a3..8e075fc4bd 100644 --- a/pkg/local_object_storage/metabase/revive_test.go +++ b/pkg/local_object_storage/metabase/revive_test.go @@ -59,11 +59,7 @@ func TestDB_ReviveObject(t *testing.T) { require.True(t, exists) // inhume with GC mark - var gcPrm meta.InhumePrm - gcPrm.SetGCMark() - gcPrm.SetAddresses(object.AddressOf(raw)) - - _, err = db.Inhume(gcPrm) + _, _, err = db.MarkGarbage(false, false, object.AddressOf(raw)) require.NoError(t, err) _, err = metaExists(db, object.AddressOf(raw)) @@ -92,10 +88,7 @@ func TestDB_ReviveObject(t *testing.T) { err := db.Lock(locked.Container(), oidtest.ID(), []oid.ID{locked.Object()}) require.NoError(t, err) - var prm meta.InhumePrm - prm.SetAddresses(locked) - - _, err = db.Inhume(prm) + _, _, err = db.MarkGarbage(false, false, locked) require.ErrorIs(t, err, new(apistatus.ObjectLocked)) diff --git a/pkg/local_object_storage/metabase/select.go b/pkg/local_object_storage/metabase/select.go index c9fc2b8b14..ca15ca1ab9 100644 --- a/pkg/local_object_storage/metabase/select.go +++ b/pkg/local_object_storage/metabase/select.go @@ -320,7 +320,7 @@ func (db *DB) selectFromFKBT( }) }) if err != nil { - db.log.Debug("error in FKBT selection", zap.String("error", err.Error())) + db.log.Debug("error in FKBT selection", zap.Error(err)) } return @@ -339,7 +339,7 @@ func (db *DB) selectFromFKBT( }) }) if err != nil { - db.log.Debug("error in FKBT selection", zap.String("error", err.Error())) + db.log.Debug("error in FKBT selection", zap.Error(err)) } } @@ -409,7 +409,7 @@ func (db *DB) selectFromList( case object.MatchStringEqual: lst, err = decodeList(bkt.Get(bucketKeyHelper(f.Header(), f.Value()))) if err != nil { - db.log.Debug("can't decode list bucket leaf", zap.String("error", err.Error())) + db.log.Debug("can't decode list bucket leaf", zap.Error(err)) return } default: @@ -424,7 +424,7 @@ func (db *DB) selectFromList( l, err := decodeList(val) if err != nil { db.log.Debug("can't decode list bucket leaf", - zap.String("error", err.Error()), + zap.Error(err), ) return err @@ -435,7 +435,7 @@ func (db *DB) selectFromList( return nil }); err != nil { db.log.Debug("can't iterate over the bucket", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -499,7 +499,7 @@ func (db *DB) selectObjectID( }) if err != nil { db.log.Debug("could not iterate over the buckets", - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/local_object_storage/metabase/version_test.go b/pkg/local_object_storage/metabase/version_test.go index 0b4d840492..c21911a3dc 100644 --- a/pkg/local_object_storage/metabase/version_test.go +++ b/pkg/local_object_storage/metabase/version_test.go @@ -103,18 +103,22 @@ type inhumeV2Prm struct { forceRemoval bool } -func (db *DB) inhumeV2(prm inhumeV2Prm) (res InhumeRes, err error) { +func (db *DB) inhumeV2(prm inhumeV2Prm) (uint64, []oid.Address, error) { db.modeMtx.RLock() defer db.modeMtx.RUnlock() if db.mode.NoMetabase() { - return InhumeRes{}, ErrDegradedMode + return 0, nil, ErrDegradedMode } else if db.mode.ReadOnly() { - return InhumeRes{}, ErrReadOnlyMode + return 0, nil, ErrReadOnlyMode } - currEpoch := db.epochState.CurrentEpoch() - var inhumed uint64 + var ( + currEpoch = db.epochState.CurrentEpoch() + deletedLockObjs []oid.Address + err error + inhumed uint64 + ) err = db.boltDB.Update(func(tx *bbolt.Tx) error { garbageObjectsBKT := tx.Bucket(garbageObjectsBucketName) @@ -245,7 +249,7 @@ func (db *DB) inhumeV2(prm inhumeV2Prm) (res InhumeRes, err error) { } if isLockObject(tx, cnr, id) { - res.deletedLockObj = append(res.deletedLockObj, prm.target[i]) + deletedLockObjs = append(deletedLockObjs, prm.target[i]) } } } @@ -253,9 +257,7 @@ func (db *DB) inhumeV2(prm inhumeV2Prm) (res InhumeRes, err error) { return db.updateCounter(tx, logical, inhumed, false) }) - res.availableImhumed = inhumed - - return + return inhumed, deletedLockObjs, err } const testEpoch = 123 @@ -299,7 +301,7 @@ func TestMigrate2to3(t *testing.T) { tomb := oidtest.Address() tombRaw := addressKey(tomb, make([]byte, addressKeySize)) - _, err := db.inhumeV2(inhumeV2Prm{ + _, _, err := db.inhumeV2(inhumeV2Prm{ target: testObjs, tomb: &tomb, }) diff --git a/pkg/local_object_storage/shard/container.go b/pkg/local_object_storage/shard/container.go index 64d6157479..b516fda903 100644 --- a/pkg/local_object_storage/shard/container.go +++ b/pkg/local_object_storage/shard/container.go @@ -7,38 +7,15 @@ import ( cid "github.com/nspcc-dev/neofs-sdk-go/container/id" ) -type ContainerSizePrm struct { - cnr cid.ID -} - -type ContainerSizeRes struct { - size uint64 -} - -func (p *ContainerSizePrm) SetContainerID(cnr cid.ID) { - p.cnr = cnr -} - -func (r ContainerSizeRes) Size() uint64 { - return r.size -} - -func (s *Shard) ContainerSize(prm ContainerSizePrm) (ContainerSizeRes, error) { +func (s *Shard) ContainerSize(cnr cid.ID) (uint64, error) { s.m.RLock() defer s.m.RUnlock() if s.info.Mode.NoMetabase() { - return ContainerSizeRes{}, ErrDegradedMode - } - - size, err := s.metaBase.ContainerSize(prm.cnr) - if err != nil { - return ContainerSizeRes{}, fmt.Errorf("could not get container size: %w", err) + return 0, ErrDegradedMode } - return ContainerSizeRes{ - size: size, - }, nil + return s.metaBase.ContainerSize(cnr) } // DeleteContainer deletes any information related to the container diff --git a/pkg/local_object_storage/shard/container_test.go b/pkg/local_object_storage/shard/container_test.go index ca1d0de44d..923a949a05 100644 --- a/pkg/local_object_storage/shard/container_test.go +++ b/pkg/local_object_storage/shard/container_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" "github.com/stretchr/testify/require" @@ -15,33 +14,26 @@ func TestShard_DeleteContainer(t *testing.T) { defer releaseShard(sh, t) cID := cidtest.ID() - var prm shard.PutPrm o1 := generateObjectWithCID(cID) - prm.SetObject(o1) - _, err := sh.Put(prm) + err := sh.Put(o1, nil, 0) require.NoError(t, err) o2 := generateObjectWithCID(cID) o2.SetType(objectSDK.TypeStorageGroup) - prm.SetObject(o2) - _, err = sh.Put(prm) + err = sh.Put(o2, nil, 0) require.NoError(t, err) o3 := generateObjectWithCID(cID) - prm.SetObject(o3) o3.SetType(objectSDK.TypeLock) - _, err = sh.Put(prm) + err = sh.Put(o3, nil, 0) require.NoError(t, err) err = sh.DeleteContainer(context.Background(), cID) require.NoError(t, err) - var selectPrm shard.SelectPrm - selectPrm.SetContainerID(cID) - - res, err := sh.Select(selectPrm) + res, err := sh.Select(cID, nil) require.NoError(t, err) - require.Empty(t, res.AddressList()) + require.Empty(t, res) } diff --git a/pkg/local_object_storage/shard/control.go b/pkg/local_object_storage/shard/control.go index 7ed49ad649..c3d19a1e1e 100644 --- a/pkg/local_object_storage/shard/control.go +++ b/pkg/local_object_storage/shard/control.go @@ -217,12 +217,7 @@ func (s *Shard) resyncObjectHandler(addr oid.Address, data []byte, descriptor [] tombMembers = append(tombMembers, a) } - var inhumePrm meta.InhumePrm - - inhumePrm.SetTombstone(tombAddr, exp) - inhumePrm.SetAddresses(tombMembers...) - - _, err = s.metaBase.Inhume(inhumePrm) + _, _, err = s.metaBase.Inhume(tombAddr, exp, false, tombMembers...) if err != nil { return fmt.Errorf("could not inhume objects: %w", err) } diff --git a/pkg/local_object_storage/shard/control_test.go b/pkg/local_object_storage/shard/control_test.go index e0f4614546..2abc42dacc 100644 --- a/pkg/local_object_storage/shard/control_test.go +++ b/pkg/local_object_storage/shard/control_test.go @@ -111,9 +111,7 @@ func TestResyncMetabaseCorrupted(t *testing.T) { obj.SetType(objectSDK.TypeRegular) obj.SetPayload([]byte{0, 1, 2, 3, 4, 5}) - var putPrm PutPrm - putPrm.SetObject(&obj) - _, err := sh.Put(putPrm) + err := sh.Put(&obj, nil, 0) require.NoError(t, err) require.NoError(t, sh.Close()) @@ -132,9 +130,7 @@ func TestResyncMetabaseCorrupted(t *testing.T) { require.NoError(t, sh.Open()) require.NoError(t, sh.Init()) - var getPrm GetPrm - getPrm.SetAddress(addr) - _, err = sh.Get(getPrm) + _, err = sh.Get(addr, false) require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) require.NoError(t, sh.Close()) } @@ -232,18 +228,12 @@ func TestResyncMetabase(t *testing.T) { tombMembers = append(tombMembers, a) } - var putPrm PutPrm - for _, v := range mObjs { - putPrm.SetObject(v.obj) - - _, err := sh.Put(putPrm) + err := sh.Put(v.obj, nil, 0) require.NoError(t, err) } - putPrm.SetObject(&tombObj) - - _, err := sh.Put(putPrm) + err := sh.Put(&tombObj, nil, 0) require.NoError(t, err) // LOCK object handling @@ -254,25 +244,17 @@ func TestResyncMetabase(t *testing.T) { lockObj.SetContainerID(cnrLocked) lockObj.WriteLock(lock) - putPrm.SetObject(&lockObj) - _, err = sh.Put(putPrm) + err = sh.Put(&lockObj, nil, 0) require.NoError(t, err) lockID := lockObj.GetID() require.NoError(t, sh.Lock(cnrLocked, lockID, locked)) - var inhumePrm InhumePrm - inhumePrm.InhumeByTomb(object.AddressOf(&tombObj), 0, tombMembers...) - - _, err = sh.Inhume(inhumePrm) + err = sh.Inhume(object.AddressOf(&tombObj), 0, tombMembers...) require.NoError(t, err) - var headPrm HeadPrm - checkObj := func(addr oid.Address, expObj *objectSDK.Object) { - headPrm.SetAddress(addr) - - res, err := sh.Head(headPrm) + res, err := sh.Head(addr, false) if expObj == nil { require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) @@ -280,7 +262,7 @@ func TestResyncMetabase(t *testing.T) { } require.NoError(t, err) - require.Equal(t, expObj.CutPayload(), res.Object()) + require.Equal(t, expObj.CutPayload(), res) } checkAllObjs := func(exists bool) { @@ -295,9 +277,7 @@ func TestResyncMetabase(t *testing.T) { checkTombMembers := func(exists bool) { for _, member := range tombMembers { - headPrm.SetAddress(member) - - _, err := sh.Head(headPrm) + _, err := sh.Head(member, false) if exists { require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved)) @@ -314,10 +294,7 @@ func TestResyncMetabase(t *testing.T) { for i := range locked { addr.SetObject(locked[i]) - var prm InhumePrm - prm.MarkAsGarbage(addr) - - _, err := sh.Inhume(prm) + err := sh.MarkGarbage(false, addr) require.ErrorAs(t, err, new(apistatus.ObjectLocked), "object %s should be locked", locked[i]) } diff --git a/pkg/local_object_storage/shard/delete.go b/pkg/local_object_storage/shard/delete.go index 2aa0e9832f..2505e3b104 100644 --- a/pkg/local_object_storage/shard/delete.go +++ b/pkg/local_object_storage/shard/delete.go @@ -9,73 +9,49 @@ import ( "go.uber.org/zap" ) -// DeletePrm groups the parameters of Delete operation. -type DeletePrm struct { - addr []oid.Address - skipNotFoundError bool -} - -// DeleteRes groups the resulting values of Delete operation. -type DeleteRes struct{} - -// SetAddresses is a Delete option to set the addresses of the objects to delete. -// -// Option is required. -func (p *DeletePrm) SetAddresses(addr ...oid.Address) { - p.addr = append(p.addr, addr...) -} - -// SkipNotFoundError is a Delete option to skip errors when an already deleted -// object is being deleted. -func (p *DeletePrm) SkipNotFoundError() { - p.skipNotFoundError = true -} - // Delete removes data from the shard's writeCache, metaBase and // blobStor. -func (s *Shard) Delete(prm DeletePrm) (DeleteRes, error) { +func (s *Shard) Delete(addrs []oid.Address) error { s.m.RLock() defer s.m.RUnlock() - return s.delete(prm) + return s.deleteObjs(addrs, false) } -func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) { +func (s *Shard) deleteObjs(addrs []oid.Address, skipNotFoundError bool) error { if s.info.Mode.ReadOnly() { - return DeleteRes{}, ErrReadOnlyMode + return ErrReadOnlyMode } else if s.info.Mode.NoMetabase() { - return DeleteRes{}, ErrDegradedMode + return ErrDegradedMode } - ln := len(prm.addr) - - smalls := make(map[oid.Address][]byte, ln) + smalls := make(map[oid.Address][]byte, len(addrs)) - for i := range prm.addr { + for _, addr := range addrs { if s.hasWriteCache() { - err := s.writeCache.Delete(prm.addr[i]) + err := s.writeCache.Delete(addr) if err != nil && !IsErrNotFound(err) && !errors.Is(err, writecache.ErrReadOnly) { - s.log.Warn("can't delete object from write cache", zap.String("error", err.Error())) + s.log.Warn("can't delete object from write cache", zap.Error(err)) } } - sid, err := s.metaBase.StorageID(prm.addr[i]) + sid, err := s.metaBase.StorageID(addr) if err != nil { s.log.Debug("can't get storage ID from metabase", - zap.Stringer("object", prm.addr[i]), - zap.String("error", err.Error())) + zap.Stringer("object", addr), + zap.Error(err)) continue } if sid != nil { - smalls[prm.addr[i]] = sid + smalls[addr] = sid } } - res, err := s.metaBase.Delete(prm.addr) + res, err := s.metaBase.Delete(addrs) if err != nil { - return DeleteRes{}, err // stop on metabase error ? + return err // stop on metabase error ? } s.decObjectCounterBy(physical, res.RawRemoved) @@ -83,31 +59,31 @@ func (s *Shard) delete(prm DeletePrm) (DeleteRes, error) { var totalRemovedPayload uint64 - for i := range prm.addr { + for i := range addrs { removedPayload := res.Sizes[i] totalRemovedPayload += removedPayload - s.addToContainerSize(prm.addr[i].Container().EncodeToString(), -int64(removedPayload)) + s.addToContainerSize(addrs[i].Container().EncodeToString(), -int64(removedPayload)) } s.addToPayloadCounter(-int64(totalRemovedPayload)) - for i := range prm.addr { + for _, addr := range addrs { var delPrm common.DeletePrm - delPrm.Address = prm.addr[i] - id := smalls[prm.addr[i]] + delPrm.Address = addr + id := smalls[addr] delPrm.StorageID = id _, err = s.blobStor.Delete(delPrm) if err != nil { - if IsErrNotFound(err) && prm.skipNotFoundError { + if IsErrNotFound(err) && skipNotFoundError { continue } s.log.Debug("can't remove object from blobStor", - zap.Stringer("object_address", prm.addr[i]), - zap.String("error", err.Error())) + zap.Stringer("object_address", addr), + zap.Error(err)) } } - return DeleteRes{}, nil + return nil } diff --git a/pkg/local_object_storage/shard/delete_test.go b/pkg/local_object_storage/shard/delete_test.go index 02363f6b55..66ac6fae65 100644 --- a/pkg/local_object_storage/shard/delete_test.go +++ b/pkg/local_object_storage/shard/delete_test.go @@ -4,9 +4,9 @@ import ( "testing" "github.com/nspcc-dev/neofs-node/pkg/core/object" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" "github.com/stretchr/testify/require" ) @@ -29,28 +29,19 @@ func testShardDelete(t *testing.T, hasWriteCache bool) { obj := generateObjectWithCID(cnr) addAttribute(obj, "foo", "bar") - var putPrm shard.PutPrm - var getPrm shard.GetPrm - t.Run("big object", func(t *testing.T) { addPayload(obj, 1<<20) - putPrm.SetObject(obj) - getPrm.SetAddress(object.AddressOf(obj)) - - var delPrm shard.DeletePrm - delPrm.SetAddresses(object.AddressOf(obj)) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) - _, err = testGet(t, sh, getPrm, hasWriteCache) + _, err = testGet(t, sh, object.AddressOf(obj), hasWriteCache) require.NoError(t, err) - _, err = sh.Delete(delPrm) + err = sh.Delete([]oid.Address{object.AddressOf(obj)}) require.NoError(t, err) - _, err = sh.Get(getPrm) + _, err = sh.Get(object.AddressOf(obj), false) require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) }) @@ -59,22 +50,16 @@ func testShardDelete(t *testing.T, hasWriteCache bool) { addAttribute(obj, "foo", "bar") addPayload(obj, 1<<5) - putPrm.SetObject(obj) - getPrm.SetAddress(object.AddressOf(obj)) - - var delPrm shard.DeletePrm - delPrm.SetAddresses(object.AddressOf(obj)) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) - _, err = sh.Get(getPrm) + _, err = sh.Get(object.AddressOf(obj), false) require.NoError(t, err) - _, err = sh.Delete(delPrm) + err = sh.Delete([]oid.Address{object.AddressOf(obj)}) require.NoError(t, err) - _, err = sh.Get(getPrm) + _, err = sh.Get(object.AddressOf(obj), false) require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) }) } diff --git a/pkg/local_object_storage/shard/dump.go b/pkg/local_object_storage/shard/dump.go index f486f9fa69..f8a41d6be3 100644 --- a/pkg/local_object_storage/shard/dump.go +++ b/pkg/local_object_storage/shard/dump.go @@ -3,7 +3,6 @@ package shard import ( "encoding/binary" "io" - "os" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/common" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/util/logicerr" @@ -12,67 +11,22 @@ import ( var dumpMagic = []byte("NEOF") -// DumpPrm groups the parameters of Dump operation. -type DumpPrm struct { - path string - stream io.Writer - ignoreErrors bool -} - -// WithPath is an Dump option to set the destination path. -func (p *DumpPrm) WithPath(path string) { - p.path = path -} - -// WithStream is an Dump option to set the destination stream. -// It takes priority over `path` option. -func (p *DumpPrm) WithStream(r io.Writer) { - p.stream = r -} - -// WithIgnoreErrors is an Dump option to allow ignore all errors during iteration. -// This includes invalid peapods as well as corrupted objects. -func (p *DumpPrm) WithIgnoreErrors(ignore bool) { - p.ignoreErrors = ignore -} - -// DumpRes groups the result fields of Dump operation. -type DumpRes struct { - count int -} - -// Count return amount of object written. -func (r DumpRes) Count() int { - return r.count -} - var ErrMustBeReadOnly = logicerr.New("shard must be in read-only mode") -// Dump dumps all objects from the shard to a file or stream. +// DumpToStream dumps all objects from the shard to a given stream. // -// Returns any error encountered. -func (s *Shard) Dump(prm DumpPrm) (DumpRes, error) { +// Returns any error encountered and the number of objects written. +func (s *Shard) Dump(w io.Writer, ignoreErrors bool) (int, error) { s.m.RLock() defer s.m.RUnlock() if !s.info.Mode.ReadOnly() { - return DumpRes{}, ErrMustBeReadOnly - } - - w := prm.stream - if w == nil { - f, err := os.OpenFile(prm.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o640) - if err != nil { - return DumpRes{}, err - } - defer f.Close() - - w = f + return 0, ErrMustBeReadOnly } _, err := w.Write(dumpMagic) if err != nil { - return DumpRes{}, err + return 0, err } var count int @@ -93,14 +47,14 @@ func (s *Shard) Dump(prm DumpPrm) (DumpRes, error) { return nil } - err := s.writeCache.Iterate(iterHandler, prm.ignoreErrors) + err := s.writeCache.Iterate(iterHandler, ignoreErrors) if err != nil { - return DumpRes{}, err + return count, err } } var pi common.IteratePrm - pi.IgnoreErrors = prm.ignoreErrors + pi.IgnoreErrors = ignoreErrors pi.Handler = func(elem common.IterationElement) error { data := elem.ObjectData @@ -119,8 +73,8 @@ func (s *Shard) Dump(prm DumpPrm) (DumpRes, error) { } if _, err := s.blobStor.Iterate(pi); err != nil { - return DumpRes{}, err + return count, err } - return DumpRes{count: count}, nil + return count, nil } diff --git a/pkg/local_object_storage/shard/dump_test.go b/pkg/local_object_storage/shard/dump_test.go index 573b1a8097..ad6b0296ba 100644 --- a/pkg/local_object_storage/shard/dump_test.go +++ b/pkg/local_object_storage/shard/dump_test.go @@ -59,22 +59,25 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) { defer releaseShard(sh, t) out := filepath.Join(t.TempDir(), "dump") - var prm shard.DumpPrm - prm.WithPath(out) + f, err := os.Create(out) + require.NoError(t, err) t.Run("must be read-only", func(t *testing.T) { - _, err := sh.Dump(prm) + _, err := sh.Dump(f, false) + require.NoError(t, f.Close()) require.ErrorIs(t, err, shard.ErrMustBeReadOnly) }) require.NoError(t, sh.SetMode(mode.ReadOnly)) + outEmpty := out + ".empty" - var dumpPrm shard.DumpPrm - dumpPrm.WithPath(outEmpty) + f, err = os.Create(outEmpty) + require.NoError(t, err) - res, err := sh.Dump(dumpPrm) + res, err := sh.Dump(f, false) + require.NoError(t, f.Close()) require.NoError(t, err) - require.Equal(t, 0, res.Count()) + require.Equal(t, 0, res) require.NoError(t, sh.SetMode(mode.ReadWrite)) // Approximate object header size. @@ -99,41 +102,28 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) { obj := generateObjectWithPayload(cnr, data) objects[i] = obj - var prm shard.PutPrm - prm.SetObject(objects[i]) - _, err := sh.Put(prm) + err := sh.Put(objects[i], nil, 0) require.NoError(t, err) } require.NoError(t, sh.SetMode(mode.ReadOnly)) - t.Run("invalid path", func(t *testing.T) { - var dumpPrm shard.DumpPrm - dumpPrm.WithPath("\x00") - - _, err := sh.Dump(dumpPrm) - require.Error(t, err) - }) - - res, err = sh.Dump(prm) + f, err = os.Create(out) require.NoError(t, err) - require.Equal(t, objCount, res.Count()) + res, err = sh.Dump(f, false) + require.NoError(t, f.Close()) + require.NoError(t, err) + require.Equal(t, objCount, res) t.Run("restore", func(t *testing.T) { sh := newShard(t, false) defer releaseShard(sh, t) t.Run("empty dump", func(t *testing.T) { - var restorePrm shard.RestorePrm - restorePrm.WithPath(outEmpty) - res, err := sh.Restore(restorePrm) + count, failed, err := restoreFile(t, sh, outEmpty, false) require.NoError(t, err) - require.Equal(t, 0, res.Count()) - }) - - t.Run("invalid path", func(t *testing.T) { - _, err := sh.Restore(*new(shard.RestorePrm)) - require.ErrorIs(t, err, os.ErrNotExist) + require.Equal(t, 0, count) + require.Equal(t, 0, failed) }) t.Run("invalid file", func(t *testing.T) { @@ -141,11 +131,10 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) { out := out + ".wrongmagic" require.NoError(t, os.WriteFile(out, []byte{0, 0, 0, 0}, os.ModePerm)) - var restorePrm shard.RestorePrm - restorePrm.WithPath(out) - - _, err := sh.Restore(restorePrm) + count, failed, err := restoreFile(t, sh, out, false) require.ErrorIs(t, err, shard.ErrInvalidMagic) + require.Equal(t, 0, count) + require.Equal(t, 0, failed) }) fileData, err := os.ReadFile(out) @@ -156,62 +145,55 @@ func testDump(t *testing.T, objCount int, hasWriteCache bool) { fileData := append(fileData, 1) require.NoError(t, os.WriteFile(out, fileData, os.ModePerm)) - var restorePrm shard.RestorePrm - restorePrm.WithPath(out) - - _, err := sh.Restore(restorePrm) + count, failed, err := restoreFile(t, sh, out, false) require.ErrorIs(t, err, io.ErrUnexpectedEOF) + require.Equal(t, objCount, count) + require.Equal(t, 0, failed) }) t.Run("incomplete object data", func(t *testing.T) { out := out + ".wrongsize" fileData := append(fileData, 1, 0, 0, 0) require.NoError(t, os.WriteFile(out, fileData, os.ModePerm)) - var restorePrm shard.RestorePrm - restorePrm.WithPath(out) - - _, err := sh.Restore(restorePrm) + count, failed, err := restoreFile(t, sh, out, false) require.ErrorIs(t, err, io.EOF) + require.Equal(t, objCount, count) + require.Equal(t, 0, failed) }) t.Run("invalid object", func(t *testing.T) { out := out + ".wrongobj" fileData := append(fileData, 1, 0, 0, 0, 0xFF, 4, 0, 0, 0, 1, 2, 3, 4) require.NoError(t, os.WriteFile(out, fileData, os.ModePerm)) - var restorePrm shard.RestorePrm - restorePrm.WithPath(out) - - _, err := sh.Restore(restorePrm) + count, failed, err := restoreFile(t, sh, out, false) require.Error(t, err) + require.Equal(t, objCount, count) + require.Equal(t, 0, failed) t.Run("skip errors", func(t *testing.T) { sh := newCustomShard(t, filepath.Join(t.TempDir(), "ignore"), false, nil, nil) t.Cleanup(func() { require.NoError(t, sh.Close()) }) - var restorePrm shard.RestorePrm - restorePrm.WithPath(out) - restorePrm.WithIgnoreErrors(true) - - res, err := sh.Restore(restorePrm) + count, failed, err := restoreFile(t, sh, out, true) require.NoError(t, err) - require.Equal(t, objCount, res.Count()) - require.Equal(t, 2, res.FailCount()) + require.Equal(t, objCount, count) + require.Equal(t, 2, failed) }) }) }) - var prm shard.RestorePrm - prm.WithPath(out) t.Run("must allow write", func(t *testing.T) { require.NoError(t, sh.SetMode(mode.ReadOnly)) - _, err := sh.Restore(prm) + count, failed, err := restoreFile(t, sh, out, false) require.ErrorIs(t, err, shard.ErrReadOnlyMode) + require.Equal(t, 0, count) + require.Equal(t, 0, failed) }) require.NoError(t, sh.SetMode(mode.ReadWrite)) - checkRestore(t, sh, prm, objects) + checkRestore(t, sh, out, nil, objects) }) } @@ -229,9 +211,7 @@ func TestStream(t *testing.T) { obj := generateObjectWithCID(cnr) objects[i] = obj - var prm shard.PutPrm - prm.SetObject(objects[i]) - _, err := sh1.Put(prm) + err := sh1.Put(objects[i], nil, 0) require.NoError(t, err) } @@ -241,20 +221,14 @@ func TestStream(t *testing.T) { finish := make(chan struct{}) go func() { - var dumpPrm shard.DumpPrm - dumpPrm.WithStream(w) - - res, err := sh1.Dump(dumpPrm) + res, err := sh1.Dump(w, false) require.NoError(t, err) - require.Equal(t, objCount, res.Count()) + require.Equal(t, objCount, res) require.NoError(t, w.Close()) close(finish) }() - var restorePrm shard.RestorePrm - restorePrm.WithStream(r) - - checkRestore(t, sh2, restorePrm, objects) + checkRestore(t, sh2, "", r, objects) require.Eventually(t, func() bool { select { case <-finish: @@ -265,18 +239,34 @@ func TestStream(t *testing.T) { }, time.Second, time.Millisecond) } -func checkRestore(t *testing.T, sh *shard.Shard, prm shard.RestorePrm, objects []*objectSDK.Object) { - res, err := sh.Restore(prm) +func restoreFile(t *testing.T, sh *shard.Shard, path string, ignoreErrors bool) (int, int, error) { + f, err := os.Open(path) require.NoError(t, err) - require.Equal(t, len(objects), res.Count()) + count, failed, err := sh.Restore(f, ignoreErrors) + f.Close() + return count, failed, err +} - var getPrm shard.GetPrm +func checkRestore(t *testing.T, sh *shard.Shard, path string, r io.Reader, objects []*objectSDK.Object) { + var ( + count int + err error + failed int + ) + + if r == nil { + count, failed, err = restoreFile(t, sh, path, false) + } else { + count, failed, err = sh.Restore(r, false) + } + require.NoError(t, err) + require.Equal(t, len(objects), count) + require.Equal(t, 0, failed) for i := range objects { - getPrm.SetAddress(object.AddressOf(objects[i])) - res, err := sh.Get(getPrm) + res, err := sh.Get(object.AddressOf(objects[i]), false) require.NoError(t, err) - require.Equal(t, objects[i], res.Object()) + require.Equal(t, objects[i], res) } } @@ -324,9 +314,7 @@ func TestDumpIgnoreErrors(t *testing.T) { obj := generateObjectWithPayload(cidtest.ID(), make([]byte, size)) objects[i] = obj - var prm shard.PutPrm - prm.SetObject(objects[i]) - _, err := sh.Put(prm) + err := sh.Put(objects[i], nil, 0) require.NoError(t, err) } @@ -390,10 +378,10 @@ func TestDumpIgnoreErrors(t *testing.T) { } out := filepath.Join(t.TempDir(), "out.dump") - var dumpPrm shard.DumpPrm - dumpPrm.WithPath(out) - dumpPrm.WithIgnoreErrors(true) - res, err := sh.Dump(dumpPrm) + f, err := os.Create(out) + require.NoError(t, err) + res, err := sh.Dump(f, true) + require.NoError(t, f.Close()) require.NoError(t, err) - require.Equal(t, objCount, res.Count()) + require.Equal(t, objCount, res) } diff --git a/pkg/local_object_storage/shard/exists.go b/pkg/local_object_storage/shard/exists.go index 365fb7cc47..ec4c720546 100644 --- a/pkg/local_object_storage/shard/exists.go +++ b/pkg/local_object_storage/shard/exists.go @@ -5,64 +5,27 @@ import ( oid "github.com/nspcc-dev/neofs-sdk-go/object/id" ) -// ExistsPrm groups the parameters of Exists operation. -type ExistsPrm struct { - addr oid.Address - ignoreExpiration bool -} - -// ExistsRes groups the resulting values of Exists operation. -type ExistsRes struct { - ex bool -} - -// SetAddress is an Exists option to set object checked for existence. -func (p *ExistsPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// IgnoreExpiration returns existence status despite the expiration status. -func (p *ExistsPrm) IgnoreExpiration() { - p.ignoreExpiration = true -} - -// Exists returns the fact that the object is in the shard. -func (p ExistsRes) Exists() bool { - return p.ex -} - -// Exists checks if object is presented in shard. +// Exists checks if object is presented in shard. ignoreExpiration flag +// allows to check for expired objects. // // Returns any error encountered that does not allow to // unambiguously determine the presence of an object. // // Returns an error of type apistatus.ObjectAlreadyRemoved if object has been marked as removed. // Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (s *Shard) Exists(prm ExistsPrm) (ExistsRes, error) { - var exists bool - var err error - +func (s *Shard) Exists(addr oid.Address, ignoreExpiration bool) (bool, error) { s.m.RLock() defer s.m.RUnlock() if s.info.Mode.NoMetabase() { var p common.ExistsPrm - p.Address = prm.addr + p.Address = addr - var res common.ExistsRes - res, err = s.blobStor.Exists(p) + res, err := s.blobStor.Exists(p) if err != nil { - return ExistsRes{}, err - } - exists = res.Exists - } else { - exists, err = s.metaBase.Exists(prm.addr, prm.ignoreExpiration) - if err != nil { - return ExistsRes{}, err + return false, err } + return res.Exists, nil } - - return ExistsRes{ - ex: exists, - }, err + return s.metaBase.Exists(addr, ignoreExpiration) } diff --git a/pkg/local_object_storage/shard/gc.go b/pkg/local_object_storage/shard/gc.go index 0c514158fb..a704626889 100644 --- a/pkg/local_object_storage/shard/gc.go +++ b/pkg/local_object_storage/shard/gc.go @@ -124,7 +124,7 @@ func (gc *gc) listenEvents() { }) if err != nil { gc.log.Warn("could not submit GC job to worker pool", - zap.String("error", err.Error()), + zap.Error(err), ) v.prevGroup.Done() @@ -186,15 +186,11 @@ func (s *Shard) removeGarbage() { return } - var deletePrm DeletePrm - deletePrm.SetAddresses(gObjs...) - deletePrm.skipNotFoundError = true - // delete accumulated objects - _, err = s.delete(deletePrm) + err = s.deleteObjs(gObjs, true) if err != nil { s.log.Warn("could not delete the objects", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -224,7 +220,7 @@ func (s *Shard) collectExpiredObjects(e Event) { }) if err != nil || len(expired) == 0 { if err != nil { - log.Warn("iterator over expired objects failed", zap.String("error", err.Error())) + log.Warn("iterator over expired objects failed", zap.Error(err)) } return } @@ -257,7 +253,7 @@ func (s *Shard) collectExpiredLocks(e Event) { }) if err != nil || len(expired) == 0 { if err != nil { - s.log.Warn("iterator over expired locks failed", zap.String("error", err.Error())) + s.log.Warn("iterator over expired locks failed", zap.Error(err)) } return } @@ -300,7 +296,7 @@ func (s *Shard) HandleExpiredLocks(lockers []oid.Address) { unlocked, err := s.metaBase.FreeLockedBy(lockers) if err != nil { s.log.Warn("failure to unlock objects", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -315,20 +311,16 @@ func (s *Shard) HandleExpiredLocks(lockers []oid.Address) { return } - var pInhume meta.InhumePrm - pInhume.SetAddresses(append(lockers, expired...)...) - pInhume.SetForceGCMark() - - res, err := s.metaBase.Inhume(pInhume) + inhumed, _, err := s.metaBase.MarkGarbage(true, false, append(lockers, expired...)...) if err != nil { s.log.Warn("failure to mark lockers as garbage", - zap.String("error", err.Error()), + zap.Error(err), ) return } - s.decObjectCounterBy(logical, res.AvailableInhumed()) + s.decObjectCounterBy(logical, inhumed) } // HandleDeletedLocks unlocks all objects which were locked by lockers. @@ -343,7 +335,7 @@ func (s *Shard) HandleDeletedLocks(lockers []oid.Address) { unlocked, err := s.metaBase.FreeLockedBy(lockers) if err != nil { s.log.Warn("failure to unlock objects", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -358,20 +350,16 @@ func (s *Shard) HandleDeletedLocks(lockers []oid.Address) { return } - var pInhume meta.InhumePrm - pInhume.SetAddresses(expired...) - pInhume.SetGCMark() - - res, err := s.metaBase.Inhume(pInhume) + inhumed, _, err := s.metaBase.MarkGarbage(false, false, expired...) if err != nil { s.log.Warn("failure to mark unlocked objects as garbage", - zap.String("error", err.Error()), + zap.Error(err), ) return } - s.decObjectCounterBy(logical, res.AvailableInhumed()) + s.decObjectCounterBy(logical, inhumed) } // NotificationChannel returns channel for shard events. diff --git a/pkg/local_object_storage/shard/gc_test.go b/pkg/local_object_storage/shard/gc_test.go index 56b590faa2..5fc2e7cc68 100644 --- a/pkg/local_object_storage/shard/gc_test.go +++ b/pkg/local_object_storage/shard/gc_test.go @@ -98,26 +98,20 @@ func TestGC_ExpiredObjectWithExpiredLock(t *testing.T) { lock.SetAttributes(expAttr) lockID := lock.GetID() - var putPrm shard.PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) err = sh.Lock(cnr, lockID, []oid.ID{objID}) require.NoError(t, err) - putPrm.SetObject(lock) - _, err = sh.Put(putPrm) + err = sh.Put(lock, nil, 0) require.NoError(t, err) epoch.Value = 5 sh.NotificationChannel() <- shard.EventNewEpoch(epoch.Value) - var getPrm shard.GetPrm - getPrm.SetAddress(objectCore.AddressOf(obj)) require.Eventually(t, func() bool { - _, err = sh.Get(getPrm) + _, err = sh.Get(objectCore.AddressOf(obj), false) return shard.IsErrNotFound(err) }, 3*time.Second, 1*time.Second, "lock expiration should free object removal") } @@ -134,8 +128,6 @@ func TestGC_ContainerCleanup(t *testing.T) { oo := make([]oid.Address, 0, numOfObjs) for i := range numOfObjs { - var putPrm shard.PutPrm - obj := generateObjectWithCID(cID) addAttribute(obj, fmt.Sprintf("foo%d", i), fmt.Sprintf("bar%d", i)) if i%2 == 0 { @@ -143,43 +135,36 @@ func TestGC_ContainerCleanup(t *testing.T) { } else { addPayload(obj, 1<<20) // big } - putPrm.SetObject(obj) - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) oo = append(oo, objectCore.AddressOf(obj)) } - res, err := sh.ListContainers(shard.ListContainersPrm{}) + containers, err := sh.ListContainers() require.NoError(t, err) - require.Len(t, res.Containers(), 1) + require.Len(t, containers, 1) for _, o := range oo { - var getPrm shard.GetPrm - getPrm.SetAddress(o) - - _, err = sh.Get(getPrm) + _, err = sh.Get(o, false) require.NoError(t, err) } require.NoError(t, sh.InhumeContainer(cID)) require.Eventually(t, func() bool { - res, err = sh.ListContainers(shard.ListContainersPrm{}) + containers, err = sh.ListContainers() require.NoError(t, err) for _, o := range oo { - var getPrm shard.GetPrm - getPrm.SetAddress(o) - - _, err = sh.Get(getPrm) + _, err = sh.Get(o, false) if !errors.Is(err, apistatus.ObjectNotFound{}) { return false } } - return len(res.Containers()) == 0 + return len(containers) == 0 }, time.Second, 100*time.Millisecond) } @@ -204,9 +189,7 @@ func TestExpiration(t *testing.T) { ), shard.WithExpiredObjectsCallback( func(addresses []oid.Address) { - var p shard.InhumePrm - p.MarkAsGarbage(addresses...) - _, err := sh.Inhume(p) + err := sh.MarkGarbage(false, addresses...) require.NoError(t, err) }, ), @@ -241,22 +224,16 @@ func TestExpiration(t *testing.T) { obj.SetType(typ) require.NoError(t, obj.SetIDWithSignature(neofscryptotest.Signer())) - var putPrm shard.PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) - var getPrm shard.GetPrm - getPrm.SetAddress(objectCore.AddressOf(obj)) - - _, err = sh.Get(getPrm) + _, err = sh.Get(objectCore.AddressOf(obj), false) require.NoError(t, err) ch <- shard.EventNewEpoch(exp + 1) require.Eventually(t, func() bool { - _, err = sh.Get(getPrm) + _, err = sh.Get(objectCore.AddressOf(obj), false) return shard.IsErrNotFound(err) }, 3*time.Second, 100*time.Millisecond, "lock expiration should free object removal") }) diff --git a/pkg/local_object_storage/shard/get.go b/pkg/local_object_storage/shard/get.go index 78fef02b5d..4bb2e2a691 100644 --- a/pkg/local_object_storage/shard/get.go +++ b/pkg/local_object_storage/shard/get.go @@ -1,6 +1,7 @@ package shard import ( + "errors" "fmt" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor" @@ -13,42 +14,11 @@ import ( "go.uber.org/zap" ) -// GetPrm groups the parameters of Get operation. -type GetPrm struct { - addr oid.Address - skipMeta bool -} - -// GetRes groups the resulting values of Get operation. -type GetRes struct { - obj *objectSDK.Object - hasMeta bool -} - -// SetAddress is a Get option to set the address of the requested object. -// -// Option is required. -func (p *GetPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// SetIgnoreMeta is a Get option try to fetch object from blobstor directly, -// without accessing metabase. -func (p *GetPrm) SetIgnoreMeta(ignore bool) { - p.skipMeta = ignore -} +// ErrMetaWithNoObject is returned when shard has metadata, but no object. +var ErrMetaWithNoObject = errors.New("got meta, but no object") -// Object returns the requested object. -func (r GetRes) Object() *objectSDK.Object { - return r.obj -} - -// HasMeta returns true if info about the object was found in the metabase. -func (r GetRes) HasMeta() bool { - return r.hasMeta -} - -// Get reads an object from shard. +// Get reads an object from shard. skipMeta flag allows to fetch object from +// the blobstor directly. // // Returns any error encountered that // did not allow to completely read the object part. @@ -56,37 +26,39 @@ func (r GetRes) HasMeta() bool { // Returns an error of type apistatus.ObjectNotFound if the requested object is missing in shard. // Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard. // Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (s *Shard) Get(prm GetPrm) (GetRes, error) { +func (s *Shard) Get(addr oid.Address, skipMeta bool) (*objectSDK.Object, error) { s.m.RLock() defer s.m.RUnlock() - var res GetRes + var res *objectSDK.Object cb := func(stor *blobstor.BlobStor, id []byte) error { var getPrm common.GetPrm - getPrm.Address = prm.addr + getPrm.Address = addr getPrm.StorageID = id r, err := stor.Get(getPrm) if err != nil { return err } - res.obj = r.Object + res = r.Object return nil } wc := func(c writecache.Cache) error { - o, err := c.Get(prm.addr) + o, err := c.Get(addr) if err != nil { return err } - res.obj = o + res = o return nil } - skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() - var err error - res.hasMeta, err = s.fetchObjectData(prm.addr, skipMeta, cb, wc) + skipMeta = skipMeta || s.info.Mode.NoMetabase() + gotMeta, err := s.fetchObjectData(addr, skipMeta, cb, wc) + if err != nil && gotMeta { + err = fmt.Errorf("%w, %w", err, ErrMetaWithNoObject) + } return res, err } @@ -160,18 +132,17 @@ func (s *Shard) fetchObjectData(addr oid.Address, skipMeta bool, // canonical NeoFS binary format. Returns [apistatus.ObjectNotFound] if object // is missing. func (s *Shard) GetBytes(addr oid.Address) ([]byte, error) { - b, _, err := s.getBytesWithMetadataLookup(addr, true) - return b, err + return s.getBytesWithMetadataLookup(addr, true) } // GetBytesWithMetadataLookup works similar to [shard.GetBytes], but pre-checks // object presence in the underlying metabase: if object cannot be accessed from // the metabase, GetBytesWithMetadataLookup returns an error. -func (s *Shard) GetBytesWithMetadataLookup(addr oid.Address) ([]byte, bool, error) { +func (s *Shard) GetBytesWithMetadataLookup(addr oid.Address) ([]byte, error) { return s.getBytesWithMetadataLookup(addr, false) } -func (s *Shard) getBytesWithMetadataLookup(addr oid.Address, skipMeta bool) ([]byte, bool, error) { +func (s *Shard) getBytesWithMetadataLookup(addr oid.Address, skipMeta bool) ([]byte, error) { s.m.RLock() defer s.m.RUnlock() @@ -185,5 +156,8 @@ func (s *Shard) getBytesWithMetadataLookup(addr oid.Address, skipMeta bool) ([]b b, err = w.GetBytes(addr) return err }) - return b, hasMeta, err + if err != nil && hasMeta { + err = fmt.Errorf("%w, %w", err, ErrMetaWithNoObject) + } + return b, err } diff --git a/pkg/local_object_storage/shard/get_test.go b/pkg/local_object_storage/shard/get_test.go index 93c7b10b4c..2e58adc0f4 100644 --- a/pkg/local_object_storage/shard/get_test.go +++ b/pkg/local_object_storage/shard/get_test.go @@ -29,26 +29,18 @@ func testShardGet(t *testing.T, hasWriteCache bool) { sh := newShard(t, hasWriteCache) defer releaseShard(sh, t) - var putPrm shard.PutPrm - var getPrm shard.GetPrm - t.Run("small object", func(t *testing.T) { obj := generateObject() addAttribute(obj, "foo", "bar") addPayload(obj, 1<<5) addr := object.AddressOf(obj) - putPrm.SetObject(obj) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) - getPrm.SetAddress(addr) - - res, err := testGet(t, sh, getPrm, hasWriteCache) + res, err := testGet(t, sh, addr, hasWriteCache) require.NoError(t, err) - require.Equal(t, obj, res.Object()) - require.True(t, res.HasMeta()) + require.Equal(t, obj, res) testGetBytes(t, sh, addr, obj.Marshal()) }) @@ -60,17 +52,12 @@ func testShardGet(t *testing.T, hasWriteCache bool) { addPayload(obj, 1<<20) // big obj addr := object.AddressOf(obj) - putPrm.SetObject(obj) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) - getPrm.SetAddress(addr) - - res, err := testGet(t, sh, getPrm, hasWriteCache) + res, err := testGet(t, sh, addr, hasWriteCache) require.NoError(t, err) - require.Equal(t, obj, res.Object()) - require.True(t, res.HasMeta()) + require.Equal(t, obj, res) testGetBytes(t, sh, addr, obj.Marshal()) }) @@ -91,20 +78,14 @@ func testShardGet(t *testing.T, hasWriteCache bool) { child.SetSplitID(splitID) addPayload(child, 1<<5) - putPrm.SetObject(child) - - _, err := sh.Put(putPrm) + err := sh.Put(child, nil, 0) require.NoError(t, err) - getPrm.SetAddress(object.AddressOf(child)) - - res, err := testGet(t, sh, getPrm, hasWriteCache) + res, err := testGet(t, sh, object.AddressOf(child), hasWriteCache) require.NoError(t, err) - require.True(t, binaryEqual(child, res.Object())) - - getPrm.SetAddress(object.AddressOf(parent)) + require.True(t, binaryEqual(child, res)) - _, err = testGet(t, sh, getPrm, hasWriteCache) + _, err = testGet(t, sh, object.AddressOf(parent), hasWriteCache) var si *objectSDK.SplitInfoError require.True(t, errors.As(err, &si)) @@ -118,12 +99,12 @@ func testShardGet(t *testing.T, hasWriteCache bool) { }) } -func testGet(t *testing.T, sh *shard.Shard, getPrm shard.GetPrm, hasWriteCache bool) (shard.GetRes, error) { - res, err := sh.Get(getPrm) +func testGet(t *testing.T, sh *shard.Shard, addr oid.Address, hasWriteCache bool) (*objectSDK.Object, error) { + res, err := sh.Get(addr, false) if hasWriteCache { require.Eventually(t, func() bool { if shard.IsErrNotFound(err) { - res, err = sh.Get(getPrm) + res, err = sh.Get(addr, false) } return !shard.IsErrNotFound(err) }, time.Second, time.Millisecond*100) @@ -136,10 +117,9 @@ func testGetBytes(t testing.TB, sh *shard.Shard, addr oid.Address, objBin []byte require.NoError(t, err) require.Equal(t, objBin, b) - b, hasMeta, err := sh.GetBytesWithMetadataLookup(addr) + b, err = sh.GetBytesWithMetadataLookup(addr) require.NoError(t, err) require.Equal(t, objBin, b) - require.True(t, hasMeta) } // binary equal is used when object contains empty lists in the structure and diff --git a/pkg/local_object_storage/shard/head.go b/pkg/local_object_storage/shard/head.go index 800e6a2e2b..f7384e63b0 100644 --- a/pkg/local_object_storage/shard/head.go +++ b/pkg/local_object_storage/shard/head.go @@ -5,65 +5,18 @@ import ( oid "github.com/nspcc-dev/neofs-sdk-go/object/id" ) -// HeadPrm groups the parameters of Head operation. -type HeadPrm struct { - addr oid.Address - raw bool -} - -// HeadRes groups the resulting values of Head operation. -type HeadRes struct { - obj *objectSDK.Object -} - -// SetAddress is a Head option to set the address of the requested object. -// -// Option is required. -func (p *HeadPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// SetRaw is a Head option to set raw flag value. If flag is unset, then Head -// returns header of virtual object, otherwise it returns SplitInfo of virtual -// object. -func (p *HeadPrm) SetRaw(raw bool) { - p.raw = raw -} - -// Object returns the requested object header. -func (r HeadRes) Object() *objectSDK.Object { - return r.obj -} - -// Head reads header of the object from the shard. +// Head reads header of the object from the shard. raw flag controls split +// object handling, if unset, then virtual object header is returned, otherwise +// SplitInfo of this object. // // Returns any error encountered. // // Returns an error of type apistatus.ObjectNotFound if object is missing in Shard. // Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard. // Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (s *Shard) Head(prm HeadPrm) (HeadRes, error) { - var obj *objectSDK.Object - var err error +func (s *Shard) Head(addr oid.Address, raw bool) (*objectSDK.Object, error) { if s.GetMode().NoMetabase() { - var getPrm GetPrm - getPrm.SetAddress(prm.addr) - getPrm.SetIgnoreMeta(true) - - var res GetRes - res, err = s.Get(getPrm) - if err != nil { - return HeadRes{}, err - } - obj = res.Object() - } else { - obj, err = s.metaBase.Get(prm.addr, prm.raw) - if err != nil { - return HeadRes{}, err - } + return s.Get(addr, true) } - - return HeadRes{ - obj: obj, - }, nil + return s.metaBase.Get(addr, raw) } diff --git a/pkg/local_object_storage/shard/head_test.go b/pkg/local_object_storage/shard/head_test.go index 31d9b7b567..54dad04368 100644 --- a/pkg/local_object_storage/shard/head_test.go +++ b/pkg/local_object_storage/shard/head_test.go @@ -9,6 +9,7 @@ import ( "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" "github.com/stretchr/testify/require" ) @@ -26,23 +27,16 @@ func testShardHead(t *testing.T, hasWriteCache bool) { sh := newShard(t, hasWriteCache) defer releaseShard(sh, t) - var putPrm shard.PutPrm - var headPrm shard.HeadPrm - t.Run("regular object", func(t *testing.T) { obj := generateObject() addAttribute(obj, "foo", "bar") - putPrm.SetObject(obj) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) - headPrm.SetAddress(object.AddressOf(obj)) - - res, err := testHead(t, sh, headPrm, hasWriteCache) + res, err := testHead(t, sh, object.AddressOf(obj), false, hasWriteCache) require.NoError(t, err) - require.Equal(t, obj.CutPayload(), res.Object()) + require.Equal(t, obj.CutPayload(), res) }) t.Run("virtual object", func(t *testing.T) { @@ -58,34 +52,26 @@ func testShardHead(t *testing.T, hasWriteCache bool) { child.SetParentID(idParent) child.SetSplitID(splitID) - putPrm.SetObject(child) - - _, err := sh.Put(putPrm) + err := sh.Put(child, nil, 0) require.NoError(t, err) - headPrm.SetAddress(object.AddressOf(parent)) - headPrm.SetRaw(true) - var siErr *objectSDK.SplitInfoError - _, err = testHead(t, sh, headPrm, hasWriteCache) + _, err = testHead(t, sh, object.AddressOf(parent), true, hasWriteCache) require.True(t, errors.As(err, &siErr)) - headPrm.SetAddress(object.AddressOf(parent)) - headPrm.SetRaw(false) - - head, err := sh.Head(headPrm) + head, err := sh.Head(object.AddressOf(parent), false) require.NoError(t, err) - require.Equal(t, parent.CutPayload(), head.Object()) + require.Equal(t, parent.CutPayload(), head) }) } -func testHead(t *testing.T, sh *shard.Shard, headPrm shard.HeadPrm, hasWriteCache bool) (shard.HeadRes, error) { - res, err := sh.Head(headPrm) +func testHead(t *testing.T, sh *shard.Shard, addr oid.Address, raw bool, hasWriteCache bool) (*objectSDK.Object, error) { + res, err := sh.Head(addr, raw) if hasWriteCache { require.Eventually(t, func() bool { if shard.IsErrNotFound(err) { - res, err = sh.Head(headPrm) + res, err = sh.Head(addr, raw) } return !shard.IsErrNotFound(err) }, time.Second, time.Millisecond*100) diff --git a/pkg/local_object_storage/shard/inhume.go b/pkg/local_object_storage/shard/inhume.go index 9890e83338..a20e29c5d8 100644 --- a/pkg/local_object_storage/shard/inhume.go +++ b/pkg/local_object_storage/shard/inhume.go @@ -10,124 +10,83 @@ import ( "go.uber.org/zap" ) -// InhumePrm encapsulates parameters for inhume operation. -type InhumePrm struct { - target []oid.Address - tombstone *oid.Address - tombstoneExpiration uint64 - forceRemoval bool -} - -// InhumeRes encapsulates results of inhume operation. -type InhumeRes struct{} - -// InhumeByTomb sets a list of objects that should be inhumed and tombstone address -// as the reason for inhume operation. -// -// tombstone should not be nil, addr should not be empty. -// Should not be called along with MarkAsGarbage. -func (p *InhumePrm) InhumeByTomb(tombstone oid.Address, tombExpiration uint64, addrs ...oid.Address) { - if p != nil { - p.target = addrs - p.tombstone = &tombstone - p.tombstoneExpiration = tombExpiration - } -} - -// MarkAsGarbage marks object to be physically removed from shard. -// -// Should not be called along with InhumeByTomb. -func (p *InhumePrm) MarkAsGarbage(addr ...oid.Address) { - if p != nil { - p.target = addr - p.tombstone = nil - p.tombstoneExpiration = 0 - } -} - -// ForceRemoval forces object removing despite any restrictions imposed -// on deleting that object. Expected to be used only in control service. -func (p *InhumePrm) ForceRemoval() { - if p != nil { - p.tombstone = nil - p.forceRemoval = true - } -} - -// SetTargets sets targets and does not change inhuming operation (GC or Tombstone). -func (p *InhumePrm) SetTargets(addrs ...oid.Address) { - if p != nil { - p.target = addrs - } -} - // ErrLockObjectRemoval is returned when inhume operation is being // performed on lock object, and it is not a forced object removal. var ErrLockObjectRemoval = meta.ErrLockObjectRemoval -// Inhume calls metabase. Inhume method to mark object as removed. It won't be -// removed physically from blobStor and metabase until `Delete` operation. +// Inhume marks objects as removed in metabase using provided tombstone data. +// Objects won't be removed physically from blobStor and metabase until +// `Delete` operation. // // Allows inhuming non-locked objects only. Returns apistatus.ObjectLocked // if at least one object is locked. // // Returns ErrReadOnlyMode error if shard is in "read-only" mode. -func (s *Shard) Inhume(prm InhumePrm) (InhumeRes, error) { +func (s *Shard) Inhume(tombstone oid.Address, tombExpiration uint64, addrs ...oid.Address) error { + return s.inhume(&tombstone, tombExpiration, false, addrs...) +} + +// MarkGarbage marks objects to be physically removed from shard. force flag +// allows to override any restrictions imposed on object deletion (to be used +// by control service and other manual intervention cases). Otherwise similar +// to [Shard.Inhume], but doesn't need a tombstone. +func (s *Shard) MarkGarbage(force bool, addrs ...oid.Address) error { + return s.inhume(nil, 0, force, addrs...) +} + +func (s *Shard) inhume(tombstone *oid.Address, tombExpiration uint64, force bool, addrs ...oid.Address) error { s.m.RLock() if s.info.Mode.ReadOnly() { s.m.RUnlock() - return InhumeRes{}, ErrReadOnlyMode + return ErrReadOnlyMode } else if s.info.Mode.NoMetabase() { s.m.RUnlock() - return InhumeRes{}, ErrDegradedMode + return ErrDegradedMode } if s.hasWriteCache() { - for i := range prm.target { - _ = s.writeCache.Delete(prm.target[i]) + for i := range addrs { + _ = s.writeCache.Delete(addrs[i]) } } - var metaPrm meta.InhumePrm - metaPrm.SetAddresses(prm.target...) - metaPrm.SetLockObjectHandling() + var ( + deletedLockObjs []oid.Address + err error + inhumed uint64 + ) - if prm.tombstone != nil { - metaPrm.SetTombstone(*prm.tombstone, prm.tombstoneExpiration) + if tombstone != nil { + inhumed, deletedLockObjs, err = s.metaBase.Inhume(*tombstone, tombExpiration, true, addrs...) } else { - metaPrm.SetGCMark() + inhumed, deletedLockObjs, err = s.metaBase.MarkGarbage(force, true, addrs...) } - if prm.forceRemoval { - metaPrm.SetForceGCMark() - } - - res, err := s.metaBase.Inhume(metaPrm) if err != nil { if errors.Is(err, meta.ErrLockObjectRemoval) { s.m.RUnlock() - return InhumeRes{}, ErrLockObjectRemoval + return ErrLockObjectRemoval } s.log.Debug("could not mark object to delete in metabase", - zap.String("error", err.Error()), + zap.Error(err), ) s.m.RUnlock() - return InhumeRes{}, fmt.Errorf("metabase inhume: %w", err) + return fmt.Errorf("metabase inhume: %w", err) } s.m.RUnlock() - s.decObjectCounterBy(logical, res.AvailableInhumed()) + s.decObjectCounterBy(logical, inhumed) - if deletedLockObjs := res.DeletedLockObjects(); len(deletedLockObjs) != 0 { + if len(deletedLockObjs) != 0 { s.deletedLockCallBack(deletedLockObjs) } - return InhumeRes{}, nil + return nil } // InhumeContainer marks every object in a container as removed. diff --git a/pkg/local_object_storage/shard/inhume_test.go b/pkg/local_object_storage/shard/inhume_test.go index c72b336508..640c027db5 100644 --- a/pkg/local_object_storage/shard/inhume_test.go +++ b/pkg/local_object_storage/shard/inhume_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/nspcc-dev/neofs-node/pkg/core/object" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" "github.com/stretchr/testify/require" @@ -31,24 +30,15 @@ func testShardInhume(t *testing.T, hasWriteCache bool) { ts := generateObjectWithCID(cnr) - var putPrm shard.PutPrm - putPrm.SetObject(obj) - - var inhPrm shard.InhumePrm - inhPrm.InhumeByTomb(object.AddressOf(ts), 0, object.AddressOf(obj)) - - var getPrm shard.GetPrm - getPrm.SetAddress(object.AddressOf(obj)) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) - _, err = testGet(t, sh, getPrm, hasWriteCache) + _, err = testGet(t, sh, object.AddressOf(obj), hasWriteCache) require.NoError(t, err) - _, err = sh.Inhume(inhPrm) + err = sh.Inhume(object.AddressOf(ts), 0, object.AddressOf(obj)) require.NoError(t, err) - _, err = sh.Get(getPrm) + _, err = sh.Get(object.AddressOf(obj), false) require.ErrorAs(t, err, new(apistatus.ObjectAlreadyRemoved)) } diff --git a/pkg/local_object_storage/shard/list.go b/pkg/local_object_storage/shard/list.go index b9af95e225..20e7abeb55 100644 --- a/pkg/local_object_storage/shard/list.go +++ b/pkg/local_object_storage/shard/list.go @@ -7,6 +7,7 @@ import ( meta "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/metabase" cid "github.com/nspcc-dev/neofs-sdk-go/container/id" "github.com/nspcc-dev/neofs-sdk-go/object" + oid "github.com/nspcc-dev/neofs-sdk-go/object/id" "go.uber.org/zap" ) @@ -18,96 +19,53 @@ type Cursor = meta.Cursor // cursor. Use nil cursor object to start listing again. var ErrEndOfListing = meta.ErrEndOfListing -type ListContainersPrm struct{} - -type ListContainersRes struct { - containers []cid.ID -} - -func (r ListContainersRes) Containers() []cid.ID { - return r.containers -} - -// ListWithCursorPrm contains parameters for ListWithCursor operation. -type ListWithCursorPrm struct { - count uint32 - cursor *Cursor -} - -// ListWithCursorRes contains values returned from ListWithCursor operation. -type ListWithCursorRes struct { - addrList []objectcore.AddressWithType - cursor *Cursor -} - -// WithCount sets maximum amount of addresses that ListWithCursor should return. -func (p *ListWithCursorPrm) WithCount(count uint32) { - p.count = count -} - -// WithCursor sets cursor for ListWithCursor operation. For initial request, -// ignore this param or use nil value. For consecutive requests, use value -// from ListWithCursorRes. -func (p *ListWithCursorPrm) WithCursor(cursor *Cursor) { - p.cursor = cursor -} - -// AddressList returns addresses selected by ListWithCursor operation. -func (r ListWithCursorRes) AddressList() []objectcore.AddressWithType { - return r.addrList -} - -// Cursor returns cursor for consecutive listing requests. -func (r ListWithCursorRes) Cursor() *Cursor { - return r.cursor -} - // List returns all objects physically stored in the Shard. -func (s *Shard) List() (res SelectRes, err error) { +func (s *Shard) List() ([]oid.Address, error) { s.m.RLock() defer s.m.RUnlock() if s.info.Mode.NoMetabase() { - return SelectRes{}, ErrDegradedMode + return nil, ErrDegradedMode } lst, err := s.metaBase.Containers() if err != nil { - return res, fmt.Errorf("can't list stored containers: %w", err) + return nil, fmt.Errorf("can't list stored containers: %w", err) } filters := object.NewSearchFilters() filters.AddPhyFilter() + var res []oid.Address + for i := range lst { addrs, err := s.metaBase.Select(lst[i], filters) // consider making List in metabase if err != nil { s.log.Debug("can't select all objects", zap.Stringer("cid", lst[i]), - zap.String("error", err.Error())) + zap.Error(err)) continue } - res.addrList = append(res.addrList, addrs...) + res = append(res, addrs...) } return res, nil } -func (s *Shard) ListContainers(_ ListContainersPrm) (ListContainersRes, error) { +// ListContainers enumerates all containers known to this shard. +func (s *Shard) ListContainers() ([]cid.ID, error) { if s.GetMode().NoMetabase() { - return ListContainersRes{}, ErrDegradedMode + return nil, ErrDegradedMode } containers, err := s.metaBase.Containers() if err != nil { - return ListContainersRes{}, fmt.Errorf("could not get list of containers: %w", err) + return nil, fmt.Errorf("could not get list of containers: %w", err) } - return ListContainersRes{ - containers: containers, - }, nil + return containers, nil } // ListWithCursor lists physical objects available in shard starting from @@ -116,18 +74,15 @@ func (s *Shard) ListContainers(_ ListContainersPrm) (ListContainersRes, error) { // // Returns ErrEndOfListing if there are no more objects to return or count // parameter set to zero. -func (s *Shard) ListWithCursor(prm ListWithCursorPrm) (ListWithCursorRes, error) { +func (s *Shard) ListWithCursor(count int, cursor *Cursor) ([]objectcore.AddressWithType, *Cursor, error) { if s.GetMode().NoMetabase() { - return ListWithCursorRes{}, ErrDegradedMode + return nil, nil, ErrDegradedMode } - addrs, cursor, err := s.metaBase.ListWithCursor(int(prm.count), prm.cursor) + addrs, cursor, err := s.metaBase.ListWithCursor(count, cursor) if err != nil { - return ListWithCursorRes{}, fmt.Errorf("could not get list of objects: %w", err) + return nil, nil, fmt.Errorf("could not get list of objects: %w", err) } - return ListWithCursorRes{ - addrList: addrs, - cursor: cursor, - }, nil + return addrs, cursor, nil } diff --git a/pkg/local_object_storage/shard/list_test.go b/pkg/local_object_storage/shard/list_test.go index 6b959d3aad..3f0062f7e9 100644 --- a/pkg/local_object_storage/shard/list_test.go +++ b/pkg/local_object_storage/shard/list_test.go @@ -32,7 +32,6 @@ func testShardList(t *testing.T, sh *shard.Shard) { const N = 5 objs := make(map[string]int) - var putPrm shard.PutPrm for range C { cnr := cidtest.ID() @@ -49,9 +48,7 @@ func testShardList(t *testing.T, sh *shard.Shard) { objs[object.AddressOf(obj).EncodeToString()] = 0 - putPrm.SetObject(obj) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) } } @@ -59,7 +56,7 @@ func testShardList(t *testing.T, sh *shard.Shard) { res, err := sh.List() require.NoError(t, err) - for _, objID := range res.AddressList() { + for _, objID := range res { i, ok := objs[objID.EncodeToString()] require.True(t, ok) require.Equal(t, 0, i) diff --git a/pkg/local_object_storage/shard/lock_test.go b/pkg/local_object_storage/shard/lock_test.go index 713387061b..726ea1308d 100644 --- a/pkg/local_object_storage/shard/lock_test.go +++ b/pkg/local_object_storage/shard/lock_test.go @@ -67,10 +67,7 @@ func TestShard_Lock(t *testing.T) { // put the object - var putPrm shard.PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) // lock the object @@ -78,61 +75,42 @@ func TestShard_Lock(t *testing.T) { err = sh.Lock(cnr, lockID, []oid.ID{objID}) require.NoError(t, err) - putPrm.SetObject(lock) - _, err = sh.Put(putPrm) + err = sh.Put(lock, nil, 0) require.NoError(t, err) t.Run("inhuming locked objects", func(t *testing.T) { ts := generateObjectWithCID(cnr) - var inhumePrm shard.InhumePrm - inhumePrm.InhumeByTomb(objectcore.AddressOf(ts), 0, objectcore.AddressOf(obj)) - - _, err = sh.Inhume(inhumePrm) + err = sh.Inhume(objectcore.AddressOf(ts), 0, objectcore.AddressOf(obj)) require.ErrorAs(t, err, new(apistatus.ObjectLocked)) - inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) - _, err = sh.Inhume(inhumePrm) + err = sh.MarkGarbage(false, objectcore.AddressOf(obj)) require.ErrorAs(t, err, new(apistatus.ObjectLocked)) }) t.Run("inhuming lock objects", func(t *testing.T) { ts := generateObjectWithCID(cnr) - var inhumePrm shard.InhumePrm - inhumePrm.InhumeByTomb(objectcore.AddressOf(ts), 0, objectcore.AddressOf(lock)) - - _, err = sh.Inhume(inhumePrm) + err = sh.Inhume(objectcore.AddressOf(ts), 0, objectcore.AddressOf(lock)) require.Error(t, err) - inhumePrm.MarkAsGarbage(objectcore.AddressOf(lock)) - _, err = sh.Inhume(inhumePrm) + err = sh.MarkGarbage(false, objectcore.AddressOf(lock)) require.Error(t, err) }) t.Run("force objects inhuming", func(t *testing.T) { - var inhumePrm shard.InhumePrm - inhumePrm.MarkAsGarbage(objectcore.AddressOf(lock)) - inhumePrm.ForceRemoval() - - _, err = sh.Inhume(inhumePrm) + err = sh.MarkGarbage(true, objectcore.AddressOf(lock)) require.NoError(t, err) // it should be possible to remove // lock object now - inhumePrm = shard.InhumePrm{} - inhumePrm.MarkAsGarbage(objectcore.AddressOf(obj)) - - _, err = sh.Inhume(inhumePrm) + err = sh.MarkGarbage(false, objectcore.AddressOf(obj)) require.NoError(t, err) // check that object has been removed - var getPrm shard.GetPrm - getPrm.SetAddress(objectcore.AddressOf(obj)) - - _, err = sh.Get(getPrm) + _, err = sh.Get(objectcore.AddressOf(obj), false) require.ErrorAs(t, err, new(apistatus.ObjectNotFound)) }) } @@ -149,10 +127,7 @@ func TestShard_IsLocked(t *testing.T) { // put the object - var putPrm shard.PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) // not locked object is not locked diff --git a/pkg/local_object_storage/shard/metrics_test.go b/pkg/local_object_storage/shard/metrics_test.go index 959a39f819..e78176154d 100644 --- a/pkg/local_object_storage/shard/metrics_test.go +++ b/pkg/local_object_storage/shard/metrics_test.go @@ -104,12 +104,8 @@ func TestCounters(t *testing.T) { } t.Run("put", func(t *testing.T) { - var prm shard.PutPrm - for i := range objNumber { - prm.SetObject(oo[i]) - - _, err := sh.Put(prm) + err := sh.Put(oo[i], nil, 0) require.NoError(t, err) } @@ -120,13 +116,10 @@ func TestCounters(t *testing.T) { }) t.Run("inhume_GC", func(t *testing.T) { - var prm shard.InhumePrm inhumedNumber := objNumber / 4 for i := range inhumedNumber { - prm.MarkAsGarbage(objectcore.AddressOf(oo[i])) - - _, err := sh.Inhume(prm) + err := sh.MarkGarbage(false, objectcore.AddressOf(oo[i])) require.NoError(t, err) } @@ -139,16 +132,14 @@ func TestCounters(t *testing.T) { }) t.Run("inhume_TS", func(t *testing.T) { - var prm shard.InhumePrm ts := objectcore.AddressOf(generateObject()) phy := mm.objectCounters[physical] logic := mm.objectCounters[logical] inhumedNumber := int(phy / 4) - prm.InhumeByTomb(ts, 0, addrFromObjs(oo[:inhumedNumber])...) - _, err := sh.Inhume(prm) + err := sh.Inhume(ts, 0, addrFromObjs(oo[:inhumedNumber])...) require.NoError(t, err) require.Equal(t, phy, mm.objectCounters[physical]) @@ -160,15 +151,12 @@ func TestCounters(t *testing.T) { }) t.Run("Delete", func(t *testing.T) { - var prm shard.DeletePrm - phy := mm.objectCounters[physical] logic := mm.objectCounters[logical] deletedNumber := int(phy / 4) - prm.SetAddresses(addrFromObjs(oo[:deletedNumber])...) - _, err := sh.Delete(prm) + err := sh.Delete(addrFromObjs(oo[:deletedNumber])) require.NoError(t, err) require.Equal(t, phy-uint64(deletedNumber), mm.objectCounters[physical]) diff --git a/pkg/local_object_storage/shard/move.go b/pkg/local_object_storage/shard/move.go index 381af30d59..c664206204 100644 --- a/pkg/local_object_storage/shard/move.go +++ b/pkg/local_object_storage/shard/move.go @@ -5,39 +5,25 @@ import ( "go.uber.org/zap" ) -// ToMoveItPrm encapsulates parameters for ToMoveIt operation. -type ToMoveItPrm struct { - addr oid.Address -} - -// ToMoveItRes encapsulates results of ToMoveIt operation. -type ToMoveItRes struct{} - -// SetAddress sets object address that should be marked to move into another -// shard. -func (p *ToMoveItPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - // ToMoveIt calls metabase.ToMoveIt method to mark object as relocatable to // another shard. -func (s *Shard) ToMoveIt(prm ToMoveItPrm) (ToMoveItRes, error) { +func (s *Shard) ToMoveIt(addr oid.Address) error { s.m.RLock() defer s.m.RUnlock() m := s.info.Mode if m.ReadOnly() { - return ToMoveItRes{}, ErrReadOnlyMode + return ErrReadOnlyMode } else if m.NoMetabase() { - return ToMoveItRes{}, ErrDegradedMode + return ErrDegradedMode } - err := s.metaBase.ToMoveIt(prm.addr) + err := s.metaBase.ToMoveIt(addr) if err != nil { s.log.Debug("could not mark object for shard relocation in metabase", - zap.String("error", err.Error()), + zap.Error(err), ) } - return ToMoveItRes{}, nil + return nil } diff --git a/pkg/local_object_storage/shard/put.go b/pkg/local_object_storage/shard/put.go index d74a404b92..d4f2a8f7e2 100644 --- a/pkg/local_object_storage/shard/put.go +++ b/pkg/local_object_storage/shard/put.go @@ -9,63 +9,35 @@ import ( "go.uber.org/zap" ) -// PutPrm groups the parameters of Put operation. -type PutPrm struct { - obj *object.Object - - binSet bool - objBin []byte - hdrLen int -} - -// PutRes groups the resulting values of Put operation. -type PutRes struct{} - -// SetObject is a Put option to set object to save. -func (p *PutPrm) SetObject(obj *object.Object) { - p.obj = obj -} - -// SetObjectBinary allows to provide the already encoded object in [Shard] -// format. Object header must be a prefix with specified length. If provided, -// the encoding step is skipped. It's the caller's responsibility to ensure that -// the data matches the object structure being processed. -func (p *PutPrm) SetObjectBinary(objBin []byte, hdrLen int) { - p.binSet = true - p.objBin = objBin - p.hdrLen = hdrLen -} - -// Put saves the object in shard. +// Put saves the object in shard. objBin and hdrLen parameters are +// optional and used to optimize out object marshaling, when used both must +// be valid. // // Returns any error encountered that // did not allow to completely save the object. // // Returns ErrReadOnlyMode error if shard is in "read-only" mode. -func (s *Shard) Put(prm PutPrm) (PutRes, error) { +func (s *Shard) Put(obj *object.Object, objBin []byte, hdrLen int) error { s.m.RLock() defer s.m.RUnlock() m := s.info.Mode if m.ReadOnly() { - return PutRes{}, ErrReadOnlyMode + return ErrReadOnlyMode } - var data []byte var err error - if prm.binSet { - data = prm.objBin - } else { - data = prm.obj.Marshal() + if objBin == nil { + objBin = obj.Marshal() // TODO: currently, we don't need to calculate prm.hdrLen in this case. // If you do this, then underlying code below for accessing the metabase could // reuse already encoded header. } var putPrm common.PutPrm // form Put parameters - putPrm.Object = prm.obj - putPrm.RawData = data - putPrm.Address = objectCore.AddressOf(prm.obj) + putPrm.Object = obj + putPrm.RawData = objBin + putPrm.Address = objectCore.AddressOf(obj) var res common.PutRes @@ -83,24 +55,24 @@ func (s *Shard) Put(prm PutPrm) (PutRes, error) { res, err = s.blobStor.Put(putPrm) if err != nil { - return PutRes{}, fmt.Errorf("could not put object to BLOB storage: %w", err) + return fmt.Errorf("could not put object to BLOB storage: %w", err) } } if !m.NoMetabase() { var binHeader []byte - if prm.binSet { - binHeader = data[:prm.hdrLen] + if hdrLen != 0 { + binHeader = objBin[:hdrLen] } - if err := s.metaBase.Put(prm.obj, res.StorageID, binHeader); err != nil { + if err := s.metaBase.Put(obj, res.StorageID, binHeader); err != nil { // may we need to handle this case in a special way // since the object has been successfully written to BlobStor - return PutRes{}, fmt.Errorf("could not put object to metabase: %w", err) + return fmt.Errorf("could not put object to metabase: %w", err) } s.incObjectCounter() - s.addToContainerSize(putPrm.Address.Container().EncodeToString(), int64(prm.obj.PayloadSize())) + s.addToContainerSize(putPrm.Address.Container().EncodeToString(), int64(obj.PayloadSize())) } - return PutRes{}, nil + return nil } diff --git a/pkg/local_object_storage/shard/put_test.go b/pkg/local_object_storage/shard/put_test.go index 5789ce1b28..72bdb7bf73 100644 --- a/pkg/local_object_storage/shard/put_test.go +++ b/pkg/local_object_storage/shard/put_test.go @@ -3,7 +3,6 @@ package shard_test import ( "testing" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" oidtest "github.com/nspcc-dev/neofs-sdk-go/object/id/test" objecttest "github.com/nspcc-dev/neofs-sdk-go/object/test" "github.com/stretchr/testify/require" @@ -31,17 +30,12 @@ func TestShard_PutBinary(t *testing.T) { sh := newShard(t, false) - var putPrm shard.PutPrm - putPrm.SetObject(&obj) - putPrm.SetObjectBinary(objBin, hdrLen) - _, err := sh.Put(putPrm) + err := sh.Put(&obj, objBin, hdrLen) require.NoError(t, err) - var getPrm shard.GetPrm - getPrm.SetAddress(addr) - res, err := sh.Get(getPrm) + res, err := sh.Get(addr, false) require.NoError(t, err) - require.Equal(t, &obj, res.Object()) + require.Equal(t, &obj, res) testGetBytes(t, sh, addr, objBin) require.NoError(t, err) @@ -49,16 +43,13 @@ func TestShard_PutBinary(t *testing.T) { // now place some garbage addr.SetObject(oidtest.ID()) obj.SetID(addr.Object()) // to avoid 'already exists' outcome - putPrm.SetObject(&obj) invalidObjBin := []byte("definitely not an object") - putPrm.SetObjectBinary(invalidObjBin, 5) - _, err = sh.Put(putPrm) + err = sh.Put(&obj, invalidObjBin, 5) require.NoError(t, err) testGetBytes(t, sh, addr, invalidObjBin) require.NoError(t, err) - getPrm.SetAddress(addr) - _, err = sh.Get(getPrm) + _, err = sh.Get(addr, false) require.Error(t, err) } diff --git a/pkg/local_object_storage/shard/range.go b/pkg/local_object_storage/shard/range.go index f45ae9432d..f760ee97cc 100644 --- a/pkg/local_object_storage/shard/range.go +++ b/pkg/local_object_storage/shard/range.go @@ -1,6 +1,8 @@ package shard import ( + "fmt" + "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/common" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/util/logicerr" @@ -10,54 +12,8 @@ import ( oid "github.com/nspcc-dev/neofs-sdk-go/object/id" ) -// RngPrm groups the parameters of GetRange operation. -type RngPrm struct { - ln uint64 - - off uint64 - - addr oid.Address - - skipMeta bool -} - -// RngRes groups the resulting values of GetRange operation. -type RngRes struct { - obj *object.Object - hasMeta bool -} - -// SetAddress is a Rng option to set the address of the requested object. -// -// Option is required. -func (p *RngPrm) SetAddress(addr oid.Address) { - p.addr = addr -} - -// SetRange is a GetRange option to set range of requested payload data. -func (p *RngPrm) SetRange(off uint64, ln uint64) { - p.off, p.ln = off, ln -} - -// SetIgnoreMeta is a Get option try to fetch object from blobstor directly, -// without accessing metabase. -func (p *RngPrm) SetIgnoreMeta(ignore bool) { - p.skipMeta = ignore -} - -// Object returns the requested object part. -// -// Instance payload contains the requested range of the original object. -func (r RngRes) Object() *object.Object { - return r.obj -} - -// HasMeta returns true if info about the object was found in the metabase. -func (r RngRes) HasMeta() bool { - return r.hasMeta -} - -// GetRange reads part of an object from shard. +// GetRange reads part of an object from shard. If skipMeta is specified +// data will be fetched directly from the blobstor, bypassing metabase. // // Returns any error encountered that // did not allow to completely read the object part. @@ -66,17 +22,17 @@ func (r RngRes) HasMeta() bool { // Returns an error of type apistatus.ObjectNotFound if the requested object is missing. // Returns an error of type apistatus.ObjectAlreadyRemoved if the requested object has been marked as removed in shard. // Returns the object.ErrObjectIsExpired if the object is presented but already expired. -func (s *Shard) GetRange(prm RngPrm) (RngRes, error) { +func (s *Shard) GetRange(addr oid.Address, offset uint64, length uint64, skipMeta bool) (*object.Object, error) { s.m.RLock() defer s.m.RUnlock() - var res RngRes + var obj *object.Object cb := func(stor *blobstor.BlobStor, id []byte) error { var getRngPrm common.GetRangePrm - getRngPrm.Address = prm.addr - getRngPrm.Range.SetOffset(prm.off) - getRngPrm.Range.SetLength(prm.ln) + getRngPrm.Address = addr + getRngPrm.Range.SetOffset(offset) + getRngPrm.Range.SetLength(length) getRngPrm.StorageID = id r, err := stor.GetRange(getRngPrm) @@ -84,33 +40,35 @@ func (s *Shard) GetRange(prm RngPrm) (RngRes, error) { return err } - res.obj = object.New() - res.obj.SetPayload(r.Data) + obj = object.New() + obj.SetPayload(r.Data) return nil } wc := func(c writecache.Cache) error { - o, err := c.Get(prm.addr) + o, err := c.Get(addr) if err != nil { return err } payload := o.Payload() - from := prm.off - to := from + prm.ln + from := offset + to := from + length if pLen := uint64(len(payload)); to < from || pLen < from || pLen < to { return logicerr.Wrap(apistatus.ObjectOutOfRange{}) } - res.obj = object.New() - res.obj.SetPayload(payload[from:to]) + obj = object.New() + obj.SetPayload(payload[from:to]) return nil } - skipMeta := prm.skipMeta || s.info.Mode.NoMetabase() - var err error - res.hasMeta, err = s.fetchObjectData(prm.addr, skipMeta, cb, wc) + skipMeta = skipMeta || s.info.Mode.NoMetabase() + gotMeta, err := s.fetchObjectData(addr, skipMeta, cb, wc) + if err != nil && gotMeta { + err = fmt.Errorf("%w, %w", err, ErrMetaWithNoObject) + } - return res, err + return obj, err } diff --git a/pkg/local_object_storage/shard/range_test.go b/pkg/local_object_storage/shard/range_test.go index 407a997eea..121d5e9a1a 100644 --- a/pkg/local_object_storage/shard/range_test.go +++ b/pkg/local_object_storage/shard/range_test.go @@ -11,7 +11,6 @@ import ( "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/peapod" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/writecache" apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status" objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" @@ -89,23 +88,16 @@ func testShardGetRange(t *testing.T, hasWriteCache bool) { addr := object.AddressOf(obj) payload := bytes.Clone(obj.Payload()) - var putPrm shard.PutPrm - putPrm.SetObject(obj) - - _, err := sh.Put(putPrm) + err := sh.Put(obj, nil, 0) require.NoError(t, err) - var rngPrm shard.RngPrm - rngPrm.SetAddress(addr) - rngPrm.SetRange(tc.rng.GetOffset(), tc.rng.GetLength()) - - res, err := sh.GetRange(rngPrm) + res, err := sh.GetRange(addr, tc.rng.GetOffset(), tc.rng.GetLength(), false) if tc.hasErr { require.ErrorAs(t, err, &apistatus.ObjectOutOfRange{}) } else { require.Equal(t, payload[tc.rng.GetOffset():tc.rng.GetOffset()+tc.rng.GetLength()], - res.Object().Payload()) + res.Payload()) } }) } diff --git a/pkg/local_object_storage/shard/reload_test.go b/pkg/local_object_storage/shard/reload_test.go index 238c1c6d9d..35ea63c841 100644 --- a/pkg/local_object_storage/shard/reload_test.go +++ b/pkg/local_object_storage/shard/reload_test.go @@ -56,17 +56,14 @@ func TestShardReload(t *testing.T) { for i := range objects { objects[i].obj = newObject(t) objects[i].addr = objectCore.AddressOf(objects[i].obj) - require.NoError(t, putObject(sh, objects[i].obj)) + require.NoError(t, sh.Put(objects[i].obj, nil, 0)) } checkHasObjects := func(t *testing.T, exists bool) { for i := range objects { - var prm ExistsPrm - prm.SetAddress(objects[i].addr) - - res, err := sh.Exists(prm) + res, err := sh.Exists(objects[i].addr, false) require.NoError(t, err) - require.Equal(t, exists, res.Exists(), "object #%d is missing", i) + require.Equal(t, exists, res, "object #%d is missing", i) } } @@ -90,7 +87,7 @@ func TestShardReload(t *testing.T) { t.Run("can put objects", func(t *testing.T) { obj := newObject(t) - require.NoError(t, putObject(sh, obj)) + require.NoError(t, sh.Put(obj, nil, 0)) objects = append(objects, objAddr{obj: obj, addr: objectCore.AddressOf(obj)}) }) @@ -108,7 +105,7 @@ func TestShardReload(t *testing.T) { // Cleanup is done, no panic. obj := newObject(t) - require.ErrorIs(t, putObject(sh, obj), ErrReadOnlyMode) + require.ErrorIs(t, sh.Put(obj, nil, 0), ErrReadOnlyMode) // Old objects are still accessible. checkHasObjects(t, true) @@ -118,7 +115,7 @@ func TestShardReload(t *testing.T) { require.NoError(t, sh.Reload(newOpts...)) obj = newObject(t) - require.NoError(t, putObject(sh, obj)) + require.NoError(t, sh.Put(obj, nil, 0)) objects = append(objects, objAddr{obj: obj, addr: objectCore.AddressOf(obj)}) checkHasObjects(t, true) @@ -126,14 +123,6 @@ func TestShardReload(t *testing.T) { }) } -func putObject(sh *Shard, obj *objectSDK.Object) error { - var prm PutPrm - prm.SetObject(obj) - - _, err := sh.Put(prm) - return err -} - func newObject(t testing.TB) *objectSDK.Object { x := objectSDK.New() ver := version.Current() diff --git a/pkg/local_object_storage/shard/restore.go b/pkg/local_object_storage/shard/restore.go index 349d3ae290..7ff16f1424 100644 --- a/pkg/local_object_storage/shard/restore.go +++ b/pkg/local_object_storage/shard/restore.go @@ -5,7 +5,6 @@ import ( "encoding/binary" "errors" "io" - "os" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/util/logicerr" "github.com/nspcc-dev/neofs-sdk-go/object" @@ -14,77 +13,26 @@ import ( // ErrInvalidMagic is returned when dump format is invalid. var ErrInvalidMagic = logicerr.New("invalid magic") -// RestorePrm groups the parameters of Restore operation. -type RestorePrm struct { - path string - stream io.Reader - ignoreErrors bool -} - -// WithPath is a Restore option to set the destination path. -func (p *RestorePrm) WithPath(path string) { - p.path = path -} - -// WithStream is a Restore option to set the stream to read objects from. -// It takes priority over `WithPath` option. -func (p *RestorePrm) WithStream(r io.Reader) { - p.stream = r -} - -// WithIgnoreErrors is a Restore option which allows to ignore errors encountered during restore. -// Corrupted objects will not be processed. -func (p *RestorePrm) WithIgnoreErrors(ignore bool) { - p.ignoreErrors = ignore -} - -// RestoreRes groups the result fields of Restore operation. -type RestoreRes struct { - count int - failed int -} - -// Count return amount of object written. -func (r RestoreRes) Count() int { - return r.count -} - -// FailCount return amount of object skipped. -func (r RestoreRes) FailCount() int { - return r.failed -} - -// Restore restores objects from the dump prepared by Dump. +// Restore restores objects from the dump prepared by Dump. If ignoreErrors +// is set any restore errors are ignored (corrupted objects are just skipped). // -// Returns any error encountered. -func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) { +// Returns two numbers: successful and failed restored objects, as well as any +// error encountered. +func (s *Shard) Restore(r io.Reader, ignoreErrors bool) (int, int, error) { // Disallow changing mode during restore. s.m.RLock() defer s.m.RUnlock() if s.info.Mode.ReadOnly() { - return RestoreRes{}, ErrReadOnlyMode - } - - r := prm.stream - if r == nil { - f, err := os.OpenFile(prm.path, os.O_RDONLY, os.ModeExclusive) - if err != nil { - return RestoreRes{}, err - } - defer f.Close() - - r = f + return 0, 0, ErrReadOnlyMode } var m [4]byte _, _ = io.ReadFull(r, m[:]) if !bytes.Equal(m[:], dumpMagic) { - return RestoreRes{}, ErrInvalidMagic + return 0, 0, ErrInvalidMagic } - var putPrm PutPrm - var count, failCount int var data []byte var size [4]byte @@ -96,7 +44,7 @@ func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) { if errors.Is(err, io.EOF) { break } - return RestoreRes{}, err + return count, failCount, err } sz := binary.LittleEndian.Uint32(size[:]) @@ -108,27 +56,26 @@ func (s *Shard) Restore(prm RestorePrm) (RestoreRes, error) { _, err = r.Read(data) if err != nil { - return RestoreRes{}, err + return count, failCount, err } obj := object.New() err = obj.Unmarshal(data) if err != nil { - if prm.ignoreErrors { + if ignoreErrors { failCount++ continue } - return RestoreRes{}, err + return count, failCount, err } - putPrm.SetObject(obj) - _, err = s.Put(putPrm) + err = s.Put(obj, nil, 0) if err != nil && !IsErrObjectExpired(err) && !IsErrRemoved(err) { - return RestoreRes{}, err + return count, failCount, err } count++ } - return RestoreRes{count: count, failed: failCount}, nil + return count, failCount, nil } diff --git a/pkg/local_object_storage/shard/select.go b/pkg/local_object_storage/shard/select.go index 6d84070786..eb752e757a 100644 --- a/pkg/local_object_storage/shard/select.go +++ b/pkg/local_object_storage/shard/select.go @@ -8,52 +8,24 @@ import ( oid "github.com/nspcc-dev/neofs-sdk-go/object/id" ) -// SelectPrm groups the parameters of Select operation. -type SelectPrm struct { - cnr cid.ID - filters object.SearchFilters -} - -// SelectRes groups the resulting values of Select operation. -type SelectRes struct { - addrList []oid.Address -} - -// SetContainerID is a Select option to set the container id to search in. -func (p *SelectPrm) SetContainerID(cnr cid.ID) { - p.cnr = cnr -} - -// SetFilters is a Select option to set the object filters. -func (p *SelectPrm) SetFilters(fs object.SearchFilters) { - p.filters = fs -} - -// AddressList returns list of addresses of the selected objects. -func (r SelectRes) AddressList() []oid.Address { - return r.addrList -} - // Select selects the objects from shard that match select parameters. // // Returns any error encountered that // did not allow to completely select the objects. // // Returns [object.ErrInvalidSearchQuery] if specified query is invalid. -func (s *Shard) Select(prm SelectPrm) (SelectRes, error) { +func (s *Shard) Select(cnr cid.ID, filters object.SearchFilters) ([]oid.Address, error) { s.m.RLock() defer s.m.RUnlock() if s.info.Mode.NoMetabase() { - return SelectRes{}, ErrDegradedMode + return nil, ErrDegradedMode } - addrs, err := s.metaBase.Select(prm.cnr, prm.filters) + addrs, err := s.metaBase.Select(cnr, filters) if err != nil { - return SelectRes{}, fmt.Errorf("could not select objects from metabase: %w", err) + return nil, fmt.Errorf("could not select objects from metabase: %w", err) } - return SelectRes{ - addrList: addrs, - }, nil + return addrs, nil } diff --git a/pkg/local_object_storage/shard/shutdown_test.go b/pkg/local_object_storage/shard/shutdown_test.go index de08b84161..8a06ca0d9b 100644 --- a/pkg/local_object_storage/shard/shutdown_test.go +++ b/pkg/local_object_storage/shard/shutdown_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/nspcc-dev/neofs-node/pkg/core/object" - "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/writecache" cidtest "github.com/nspcc-dev/neofs-sdk-go/container/id/test" objectSDK "github.com/nspcc-dev/neofs-sdk-go/object" @@ -34,11 +33,8 @@ func TestWriteCacheObjectLoss(t *testing.T) { sh := newCustomShard(t, dir, true, wcOpts, nil) - var putPrm shard.PutPrm - for i := range objects { - putPrm.SetObject(objects[i]) - _, err := sh.Put(putPrm) + err := sh.Put(objects[i], nil, 0) require.NoError(t, err) } require.NoError(t, sh.Close()) @@ -46,12 +42,8 @@ func TestWriteCacheObjectLoss(t *testing.T) { sh = newCustomShard(t, dir, true, wcOpts, nil) defer releaseShard(sh, t) - var getPrm shard.GetPrm - for i := range objects { - getPrm.SetAddress(object.AddressOf(objects[i])) - - _, err := sh.Get(getPrm) + _, err := sh.Get(object.AddressOf(objects[i]), false) require.NoError(t, err, i) } } diff --git a/pkg/local_object_storage/shard/writecache.go b/pkg/local_object_storage/shard/writecache.go index 7282f121ca..8963a31be9 100644 --- a/pkg/local_object_storage/shard/writecache.go +++ b/pkg/local_object_storage/shard/writecache.go @@ -4,22 +4,13 @@ import ( "errors" ) -// FlushWriteCachePrm represents parameters of a `FlushWriteCache` operation. -type FlushWriteCachePrm struct { - ignoreErrors bool -} - -// SetIgnoreErrors sets the flag to ignore read-errors during flush. -func (p *FlushWriteCachePrm) SetIgnoreErrors(ignore bool) { - p.ignoreErrors = ignore -} - // errWriteCacheDisabled is returned when an operation on write-cache is performed, // but write-cache is disabled. var errWriteCacheDisabled = errors.New("write-cache is disabled") -// FlushWriteCache flushes all data from the write-cache. -func (s *Shard) FlushWriteCache(p FlushWriteCachePrm) error { +// FlushWriteCache flushes all data from the write-cache. If ignoreErrors +// is set will flush all objects it can irrespective of any errors. +func (s *Shard) FlushWriteCache(ignoreErrors bool) error { if !s.hasWriteCache() { return errWriteCacheDisabled } @@ -35,5 +26,5 @@ func (s *Shard) FlushWriteCache(p FlushWriteCachePrm) error { return ErrDegradedMode } - return s.writeCache.Flush(p.ignoreErrors) + return s.writeCache.Flush(ignoreErrors) } diff --git a/pkg/local_object_storage/writecache/flush_test.go b/pkg/local_object_storage/writecache/flush_test.go index 7c3550395e..a0df0e7ef7 100644 --- a/pkg/local_object_storage/writecache/flush_test.go +++ b/pkg/local_object_storage/writecache/flush_test.go @@ -213,10 +213,7 @@ func TestFlush(t *testing.T) { require.NoError(t, err) } - var inhumePrm meta.InhumePrm - inhumePrm.SetAddresses(objects[0].addr, objects[1].addr) - inhumePrm.SetTombstone(oidtest.Address(), 0) - _, err := mb.Inhume(inhumePrm) + _, _, err := mb.Inhume(oidtest.Address(), 0, false, objects[0].addr, objects[1].addr) require.NoError(t, err) _, err = mb.Delete([]oid.Address{objects[2].addr, objects[3].addr}) diff --git a/pkg/morph/event/listener.go b/pkg/morph/event/listener.go index 2eb3ea552f..02347b44e0 100644 --- a/pkg/morph/event/listener.go +++ b/pkg/morph/event/listener.go @@ -132,7 +132,7 @@ func (l *listener) Listen(ctx context.Context) { l.startOnce.Do(func() { if err := l.listen(ctx); err != nil { l.log.Error("could not start listen to events", - zap.String("error", err.Error()), + zap.Error(err), ) } }) @@ -148,7 +148,7 @@ func (l *listener) ListenWithError(ctx context.Context, intError chan<- error) { l.startOnce.Do(func() { if err := l.listen(ctx); err != nil { l.log.Error("could not start listen to events", - zap.String("error", err.Error()), + zap.Error(err), ) intError <- err } @@ -299,7 +299,7 @@ func (l *listener) parseAndHandleNotification(notifyEvent *state.ContainedNotifi event, err := parser(notifyEvent) if err != nil { log.Warn("could not parse notification event", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -331,11 +331,11 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { case errors.Is(err, ErrTXAlreadyHandled) || errors.Is(err, ErrUnknownEvent): case errors.Is(err, ErrMainTXExpired): l.log.Warn("skip expired main TX notary event", - zap.String("error", err.Error()), + zap.Error(err), ) default: l.log.Warn("could not prepare and validate notary event", - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -367,7 +367,7 @@ func (l *listener) parseAndHandleNotary(nr *result.NotaryRequestEvent) { event, err := parser(notaryEvent) if err != nil { log.Warn("could not parse notary event", - zap.String("error", err.Error()), + zap.Error(err), ) return diff --git a/pkg/morph/event/utils.go b/pkg/morph/event/utils.go index 3a05334180..a3e3916109 100644 --- a/pkg/morph/event/utils.go +++ b/pkg/morph/event/utils.go @@ -93,7 +93,7 @@ func WorkerPoolHandler(w util2.WorkerPool, h Handler, log *zap.Logger) Handler { if err != nil { log.Warn("could not Submit handler to worker pool", - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/services/audit/auditor/context.go b/pkg/services/audit/auditor/context.go index 138190bf0a..05fa38c32c 100644 --- a/pkg/services/audit/auditor/context.go +++ b/pkg/services/audit/auditor/context.go @@ -201,7 +201,7 @@ func (c *Context) expired() bool { select { case <-ctx.Done(): c.log.Debug("audit context is done", - zap.String("error", ctx.Err().Error()), + zap.Error(ctx.Err()), ) return true diff --git a/pkg/services/audit/auditor/pdp.go b/pkg/services/audit/auditor/pdp.go index 656809ae16..562734b1f0 100644 --- a/pkg/services/audit/auditor/pdp.go +++ b/pkg/services/audit/auditor/pdp.go @@ -143,7 +143,7 @@ func (c *Context) collectHashes(p *gamePair) { c.log.Debug("could not get payload range hash", zap.Stringer("id", p.id), zap.String("node", netmap.StringifyPublicKey(n)), - zap.String("error", err.Error()), + zap.Error(err), ) return res } diff --git a/pkg/services/audit/auditor/pop.go b/pkg/services/audit/auditor/pop.go index a5b18c074d..c96f1929aa 100644 --- a/pkg/services/audit/auditor/pop.go +++ b/pkg/services/audit/auditor/pop.go @@ -62,7 +62,7 @@ func (c *Context) processObjectPlacement(id oid.ID, nodes []netmap.NodeInfo, rep if err != nil { c.log.Debug("could not get object header from candidate", zap.Stringer("id", id), - zap.String("error", err.Error()), + zap.Error(err), ) continue @@ -134,7 +134,7 @@ func (c *Context) iterateSGMembersPlacementRand(f func(oid.ID, int, []netmap.Nod if err != nil { c.log.Debug("could not build placement for object", zap.Stringer("id", id), - zap.String("error", err.Error()), + zap.Error(err), ) return false diff --git a/pkg/services/audit/auditor/por.go b/pkg/services/audit/auditor/por.go index a7e6c693e3..c5034c4829 100644 --- a/pkg/services/audit/auditor/por.go +++ b/pkg/services/audit/auditor/por.go @@ -104,7 +104,7 @@ func (c *Context) checkStorageGroupPoR(sgID oid.ID, sg storagegroupSDK.StorageGr if err != nil { c.log.Debug("can't concatenate tz hash", zap.String("oid", members[i].String()), - zap.String("error", err.Error())) + zap.Error(err)) break } diff --git a/pkg/services/audit/taskmanager/listen.go b/pkg/services/audit/taskmanager/listen.go index 1f5ef3f966..d4e6a88380 100644 --- a/pkg/services/audit/taskmanager/listen.go +++ b/pkg/services/audit/taskmanager/listen.go @@ -22,7 +22,7 @@ func (m *Manager) Listen(ctx context.Context) { select { case <-ctx.Done(): m.log.Warn("stop listener by context", - zap.String("error", ctx.Err().Error()), + zap.Error(ctx.Err()), ) m.workerPool.Release() @@ -42,7 +42,7 @@ func (m *Manager) handleTask(task *audit.Task) { pdpPool, err := m.pdpPoolGenerator() if err != nil { m.log.Error("could not generate PDP worker pool", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -51,7 +51,7 @@ func (m *Manager) handleTask(task *audit.Task) { porPool, err := m.pdpPoolGenerator() if err != nil { m.log.Error("could not generate PoR worker pool", - zap.String("error", err.Error()), + zap.Error(err), ) return diff --git a/pkg/services/container/announcement/load/controller/calls.go b/pkg/services/container/announcement/load/controller/calls.go index 4cae4f53f5..b89383dc45 100644 --- a/pkg/services/container/announcement/load/controller/calls.go +++ b/pkg/services/container/announcement/load/controller/calls.go @@ -63,7 +63,7 @@ func (c *announceContext) announce() { metricsIterator, err = c.ctrl.prm.LocalMetrics.InitIterator(c.ctx) if err != nil { c.log.Debug("could not initialize iterator over locally collected metrics", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -73,7 +73,7 @@ func (c *announceContext) announce() { targetWriter, err := c.ctrl.prm.LocalAnnouncementTarget.InitWriter(c.ctx) if err != nil { c.log.Debug("could not initialize announcement accumulator", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -107,7 +107,7 @@ func (c *announceContext) announce() { ) if err != nil { c.log.Debug("iterator over locally collected metrics aborted", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -117,7 +117,7 @@ func (c *announceContext) announce() { err = targetWriter.Close() if err != nil { c.log.Debug("could not finish writing local announcements", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -287,7 +287,7 @@ func (c *stopContext) report() { localIterator, err = c.ctrl.prm.AnnouncementAccumulator.InitIterator(c.ctx) if err != nil { c.log.Debug("could not initialize iterator over locally accumulated announcements", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -297,7 +297,7 @@ func (c *stopContext) report() { resultWriter, err := c.ctrl.prm.ResultReceiver.InitWriter(c.ctx) if err != nil { c.log.Debug("could not initialize result target", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -310,7 +310,7 @@ func (c *stopContext) report() { ) if err != nil { c.log.Debug("iterator over local announcements aborted", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -320,7 +320,7 @@ func (c *stopContext) report() { err = resultWriter.Close() if err != nil { c.log.Debug("could not finish writing load estimations", - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/services/container/announcement/load/route/calls.go b/pkg/services/container/announcement/load/route/calls.go index f18c10e830..f67dc85d07 100644 --- a/pkg/services/container/announcement/load/route/calls.go +++ b/pkg/services/container/announcement/load/route/calls.go @@ -122,7 +122,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error { provider, err := w.router.remoteProvider.InitRemote(remoteInfo) if err != nil { w.router.log.Debug("could not initialize writer provider", - zap.String("error", err.Error()), + zap.Error(err), ) continue // best effort @@ -131,7 +131,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error { remoteWriter, err = provider.InitWriter(w.ctx) if err != nil { w.router.log.Debug("could not initialize writer", - zap.String("error", err.Error()), + zap.Error(err), ) continue // best effort @@ -143,7 +143,7 @@ func (w *loadWriter) Put(a container.SizeEstimation) error { err := remoteWriter.Put(a) if err != nil { w.router.log.Debug("could not put the value", - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -159,7 +159,7 @@ func (w *loadWriter) Close() error { if err != nil { w.router.log.Debug("could not close remote server writer", zap.String("key", key), - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/services/control/server/dump.go b/pkg/services/control/server/dump.go index dba2c532e1..df069b273f 100644 --- a/pkg/services/control/server/dump.go +++ b/pkg/services/control/server/dump.go @@ -2,6 +2,8 @@ package control import ( "context" + "fmt" + "os" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" "github.com/nspcc-dev/neofs-node/pkg/services/control" @@ -23,11 +25,13 @@ func (s *Server) DumpShard(_ context.Context, req *control.DumpShardRequest) (*c shardID := shard.NewIDFromBytes(req.GetBody().GetShard_ID()) - var prm shard.DumpPrm - prm.WithPath(req.GetBody().GetFilepath()) - prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors()) + f, err := os.Create(req.GetBody().GetFilepath()) + if err != nil { + return nil, fmt.Errorf("can't open destination file: %w", err) + } + defer f.Close() - err = s.storage.DumpShard(shardID, prm) + err = s.storage.DumpShard(shardID, f, req.GetBody().GetIgnoreErrors()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/pkg/services/control/server/restore.go b/pkg/services/control/server/restore.go index 06a36a9cc6..627e4fe5cf 100644 --- a/pkg/services/control/server/restore.go +++ b/pkg/services/control/server/restore.go @@ -2,6 +2,7 @@ package control import ( "context" + "os" "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/shard" "github.com/nspcc-dev/neofs-node/pkg/services/control" @@ -23,11 +24,13 @@ func (s *Server) RestoreShard(_ context.Context, req *control.RestoreShardReques shardID := shard.NewIDFromBytes(req.GetBody().GetShard_ID()) - var prm shard.RestorePrm - prm.WithPath(req.GetBody().GetFilepath()) - prm.WithIgnoreErrors(req.GetBody().GetIgnoreErrors()) + f, err := os.Open(req.GetBody().GetFilepath()) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + defer f.Close() - err = s.storage.RestoreShard(shardID, prm) + err = s.storage.RestoreShard(shardID, f, req.GetBody().GetIgnoreErrors()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } diff --git a/pkg/services/object/acl/v2/classifier.go b/pkg/services/object/acl/v2/classifier.go index 6205898ba7..c37d29c776 100644 --- a/pkg/services/object/acl/v2/classifier.go +++ b/pkg/services/object/acl/v2/classifier.go @@ -50,7 +50,7 @@ func (c senderClassifier) classify( if err != nil { // do not throw error, try best case matching l.Debug("can't check if request from inner ring", - zap.String("error", err.Error())) + zap.Error(err)) } else if isInnerRingNode { return &classifyResult{ role: acl.RoleInnerRing, @@ -65,7 +65,7 @@ func (c senderClassifier) classify( // is not possible for previous epoch, so // do not throw error, try best case matching l.Debug("can't check if request from container node", - zap.String("error", err.Error())) + zap.Error(err)) } else if isContainerNode { return &classifyResult{ role: acl.RoleContainer, diff --git a/pkg/services/object/delete/delete.go b/pkg/services/object/delete/delete.go index b50e261c69..c628b057fd 100644 --- a/pkg/services/object/delete/delete.go +++ b/pkg/services/object/delete/delete.go @@ -50,7 +50,7 @@ func (exec *execCtx) analyzeStatus(execCnr bool) { exec.log.Debug("operation finished successfully") default: exec.log.Debug("operation finished with error", - zap.String("error", exec.err.Error()), + zap.Error(exec.err), ) if execCnr { diff --git a/pkg/services/object/delete/exec.go b/pkg/services/object/delete/exec.go index a483443d81..ea23c114c3 100644 --- a/pkg/services/object/delete/exec.go +++ b/pkg/services/object/delete/exec.go @@ -124,7 +124,7 @@ func (exec *execCtx) saveTombstone() bool { exec.err = err exec.log.Debug("could not save the tombstone", - zap.String("error", err.Error()), + zap.Error(err), ) return false diff --git a/pkg/services/object/delete/local.go b/pkg/services/object/delete/local.go index 4408b0bbe5..70d62965b5 100644 --- a/pkg/services/object/delete/local.go +++ b/pkg/services/object/delete/local.go @@ -26,7 +26,7 @@ func (exec *execCtx) formTombstone() (ok bool) { exec.err = err exec.log.Debug("could not read tombstone lifetime config", - zap.String("error", err.Error()), + zap.Error(err), ) return false diff --git a/pkg/services/object/get/container.go b/pkg/services/object/get/container.go index f5cb585777..ee3b64323e 100644 --- a/pkg/services/object/get/container.go +++ b/pkg/services/object/get/container.go @@ -45,7 +45,7 @@ func (exec *execCtx) executeOnContainer() { select { case <-ctx.Done(): exec.log.Debug("interrupt placement iteration by context", - zap.String("error", ctx.Err().Error()), + zap.Error(ctx.Err()), ) return diff --git a/pkg/services/object/get/exec.go b/pkg/services/object/get/exec.go index 428e13f706..44da77a6db 100644 --- a/pkg/services/object/get/exec.go +++ b/pkg/services/object/get/exec.go @@ -214,7 +214,7 @@ func (exec *execCtx) headChild(id oid.ID) (*objectSDK.Object, bool) { exec.log.Debug("could not get child object header", zap.Stringer("child ID", id), - zap.String("error", err.Error()), + zap.Error(err), ) return nil, false @@ -283,7 +283,7 @@ func (exec *execCtx) writeCollectedHeader() bool { exec.err = err exec.log.Debug("could not write header", - zap.String("error", err.Error()), + zap.Error(err), ) case err == nil: exec.status = statusOK @@ -306,7 +306,7 @@ func (exec *execCtx) writeObjectPayload(obj *objectSDK.Object) bool { exec.err = err exec.log.Debug("could not write payload chunk", - zap.String("error", err.Error()), + zap.Error(err), ) case err == nil: exec.status = statusOK diff --git a/pkg/services/object/get/get.go b/pkg/services/object/get/get.go index ba7a27329b..0c906ff6d5 100644 --- a/pkg/services/object/get/get.go +++ b/pkg/services/object/get/get.go @@ -115,7 +115,7 @@ func (exec *execCtx) analyzeStatus(execCnr bool) { return default: exec.log.Debug("operation finished with error", - zap.String("error", exec.err.Error()), + zap.Error(exec.err), ) if execCnr { diff --git a/pkg/services/object/get/local.go b/pkg/services/object/get/local.go index e397bc1269..598900d696 100644 --- a/pkg/services/object/get/local.go +++ b/pkg/services/object/get/local.go @@ -21,7 +21,7 @@ func (exec *execCtx) executeLocal() { exec.err = err exec.log.Debug("local get failed", - zap.String("error", err.Error()), + zap.Error(err), ) case err == nil: exec.status = statusOK diff --git a/pkg/services/object/get/remote.go b/pkg/services/object/get/remote.go index 566ec6f25e..8310f580aa 100644 --- a/pkg/services/object/get/remote.go +++ b/pkg/services/object/get/remote.go @@ -32,7 +32,7 @@ func (exec *execCtx) processNode(info client.NodeInfo) bool { exec.err = apistatus.ErrObjectNotFound l.Debug("remote call failed", - zap.String("error", err.Error()), + zap.Error(err), ) case err == nil: exec.status = statusOK diff --git a/pkg/services/object/search/container.go b/pkg/services/object/search/container.go index cead4fe789..b90de5ae17 100644 --- a/pkg/services/object/search/container.go +++ b/pkg/services/object/search/container.go @@ -67,7 +67,7 @@ func (exec *execCtx) executeOnContainer(ectx context.Context) { select { case <-ctx.Done(): lg.Debug("interrupt placement iteration by context", - zap.String("error", ctx.Err().Error())) + zap.Error(ctx.Err())) return default: } @@ -88,7 +88,7 @@ func (exec *execCtx) executeOnContainer(ectx context.Context) { ids, err := c.searchObjects(ctx, exec, info) if err != nil { lg.Debug("remote operation failed", - zap.String("error", err.Error())) + zap.Error(err)) return } diff --git a/pkg/services/object/search/exec.go b/pkg/services/object/search/exec.go index 2669458655..cbdd5b6973 100644 --- a/pkg/services/object/search/exec.go +++ b/pkg/services/object/search/exec.go @@ -68,7 +68,7 @@ func (exec *execCtx) writeIDList(ids []oid.ID) { exec.err = err exec.log.Debug("could not write object identifiers", - zap.String("error", err.Error()), + zap.Error(err), ) case err == nil: exec.status = statusOK diff --git a/pkg/services/object/search/local.go b/pkg/services/object/search/local.go index 1e47769212..339ebc1509 100644 --- a/pkg/services/object/search/local.go +++ b/pkg/services/object/search/local.go @@ -12,7 +12,7 @@ func (exec *execCtx) executeLocal() { exec.err = err exec.log.Debug("local operation failed", - zap.String("error", err.Error()), + zap.Error(err), ) return diff --git a/pkg/services/object/search/search.go b/pkg/services/object/search/search.go index 51742e0fb8..be28870576 100644 --- a/pkg/services/object/search/search.go +++ b/pkg/services/object/search/search.go @@ -51,7 +51,7 @@ func (exec *execCtx) analyzeStatus(ctx context.Context, execCnr bool) { switch exec.status { default: exec.log.Debug("operation finished with error", - zap.String("error", exec.err.Error()), + zap.Error(exec.err), ) case statusOK: exec.log.Debug("operation finished successfully") diff --git a/pkg/services/object/util/log.go b/pkg/services/object/util/log.go index 36c634e053..246173acf7 100644 --- a/pkg/services/object/util/log.go +++ b/pkg/services/object/util/log.go @@ -10,7 +10,7 @@ func LogServiceError(l *zap.Logger, req string, node network.AddressGroup, err e l.Error("object service error", zap.String("node", network.StringifyGroup(node)), zap.String("request", req), - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -18,6 +18,6 @@ func LogServiceError(l *zap.Logger, req string, node network.AddressGroup, err e func LogWorkerPoolError(l *zap.Logger, req string, err error) { l.Error("could not push task to worker pool", zap.String("request", req), - zap.String("error", err.Error()), + zap.Error(err), ) } diff --git a/pkg/services/policer/check.go b/pkg/services/policer/check.go index 6f9cde2e0e..8b3d849ff6 100644 --- a/pkg/services/policer/check.go +++ b/pkg/services/policer/check.go @@ -84,7 +84,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add if err != nil { p.log.Error("could not get container", zap.Stringer("cid", idCnr), - zap.String("error", err.Error()), + zap.Error(err), ) if container.IsErrNotFound(err) { err = p.jobQueue.localStorage.Delete(addrWithType.Address) @@ -92,7 +92,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add p.log.Error("could not inhume object with missing container", zap.Stringer("cid", idCnr), zap.Stringer("oid", idObj), - zap.String("error", err.Error())) + zap.Error(err)) } } @@ -105,7 +105,7 @@ func (p *Policer) processObject(ctx context.Context, addrWithType objectcore.Add if err != nil { p.log.Error("could not build placement vector for object", zap.Stringer("cid", idCnr), - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -275,7 +275,7 @@ func (p *Policer) processNodes(ctx context.Context, plc *processPlacementContext } else if err != nil { p.log.Error("receive object header to check policy compliance", zap.Stringer("object", plc.object.Address), - zap.String("error", err.Error()), + zap.Error(err), ) } else { shortage-- diff --git a/pkg/services/replicator/process.go b/pkg/services/replicator/process.go index 661ba5c4c8..098b4052ed 100644 --- a/pkg/services/replicator/process.go +++ b/pkg/services/replicator/process.go @@ -74,7 +74,7 @@ func (p *Replicator) HandleTask(ctx context.Context, task Task, res TaskResult) if err != nil { log.Error("could not replicate object", - zap.String("error", err.Error()), + zap.Error(err), ) } else { log.Debug("object successfully replicated") diff --git a/pkg/services/reputation/common/router/calls.go b/pkg/services/reputation/common/router/calls.go index 2794107d0d..c8447e489c 100644 --- a/pkg/services/reputation/common/router/calls.go +++ b/pkg/services/reputation/common/router/calls.go @@ -92,7 +92,7 @@ func (w *trustWriter) Write(t reputation.Trust) error { provider, err := w.router.remoteProvider.InitRemote(remoteInfo) if err != nil { w.router.log.Debug("could not initialize writer provider", - zap.String("error", err.Error()), + zap.Error(err), ) continue @@ -102,7 +102,7 @@ func (w *trustWriter) Write(t reputation.Trust) error { remoteWriter, err = provider.InitWriter(w.routeCtx.Context) if err != nil { w.router.log.Debug("could not initialize writer", - zap.String("error", err.Error()), + zap.Error(err), ) continue @@ -114,7 +114,7 @@ func (w *trustWriter) Write(t reputation.Trust) error { err := remoteWriter.Write(t) if err != nil { w.router.log.Debug("could not write the value", - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -128,7 +128,7 @@ func (w *trustWriter) Close() error { if err != nil { w.router.log.Debug("could not close remote server writer", zap.String("key", key), - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/services/reputation/eigentrust/calculator/calls.go b/pkg/services/reputation/eigentrust/calculator/calls.go index 31eb61d4a3..11aef14bd6 100644 --- a/pkg/services/reputation/eigentrust/calculator/calls.go +++ b/pkg/services/reputation/eigentrust/calculator/calls.go @@ -60,7 +60,7 @@ func (c *Calculator) Calculate(prm CalculatePrm) { consumersIter, err := c.prm.DaughterTrustSource.InitConsumersIterator(ctx) if err != nil { log.Debug("consumers trust iterator's init failure", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -80,7 +80,7 @@ func (c *Calculator) Calculate(prm CalculatePrm) { }) if err != nil { log.Debug("worker pool submit failure", - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -89,7 +89,7 @@ func (c *Calculator) Calculate(prm CalculatePrm) { }) if err != nil { log.Debug("iterate daughter's consumers failed", - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -109,7 +109,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) { if err != nil { c.opts.log.Debug("get initial trust failure", zap.Stringer("daughter", p.id), - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -118,7 +118,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) { daughterIter, err := c.prm.DaughterTrustSource.InitDaughterIterator(p.ctx, p.id) if err != nil { c.opts.log.Debug("daughter trust iterator's init failure", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -140,7 +140,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) { }) if err != nil { c.opts.log.Debug("iterate over daughter's trusts failure", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -162,7 +162,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) { finalWriter, err := c.prm.FinalResultTarget.InitIntermediateWriter(p.ctx) if err != nil { c.opts.log.Debug("init writer failure", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -173,7 +173,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) { err = finalWriter.WriteIntermediateTrust(intermediateTrust) if err != nil { c.opts.log.Debug("write final result failure", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -182,7 +182,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) { intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(p.ctx) if err != nil { c.opts.log.Debug("init writer failure", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -203,7 +203,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) { err := intermediateWriter.Write(trust) if err != nil { c.opts.log.Debug("write value failure", - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -211,7 +211,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) { }) if err != nil { c.opts.log.Debug("iterate daughter trusts failure", - zap.String("error", err.Error()), + zap.Error(err), ) } @@ -219,7 +219,7 @@ func (c *Calculator) iterateDaughter(p iterDaughterPrm) { if err != nil { c.opts.log.Error( "could not close writer", - zap.String("error", err.Error()), + zap.Error(err), ) } } @@ -229,7 +229,7 @@ func (c *Calculator) sendInitialValues(ctx Context) { daughterIter, err := c.prm.DaughterTrustSource.InitAllDaughtersIterator(ctx) if err != nil { c.opts.log.Debug("all daughters trust iterator's init failure", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -238,7 +238,7 @@ func (c *Calculator) sendInitialValues(ctx Context) { intermediateWriter, err := c.prm.IntermediateValueTarget.InitWriter(ctx) if err != nil { c.opts.log.Debug("init writer failure", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -252,7 +252,7 @@ func (c *Calculator) sendInitialValues(ctx Context) { if err != nil { c.opts.log.Debug("get initial trust failure", zap.Stringer("peer", trusted), - zap.String("error", err.Error()), + zap.Error(err), ) // don't stop on single failure @@ -265,7 +265,7 @@ func (c *Calculator) sendInitialValues(ctx Context) { err = intermediateWriter.Write(trust) if err != nil { c.opts.log.Debug("write value failure", - zap.String("error", err.Error()), + zap.Error(err), ) // don't stop on single failure @@ -276,14 +276,14 @@ func (c *Calculator) sendInitialValues(ctx Context) { }) if err != nil { c.opts.log.Debug("iterate over all daughters failure", - zap.String("error", err.Error()), + zap.Error(err), ) } err = intermediateWriter.Close() if err != nil { c.opts.log.Debug("could not close writer", - zap.String("error", err.Error()), + zap.Error(err), ) } } diff --git a/pkg/services/reputation/local/controller/calls.go b/pkg/services/reputation/local/controller/calls.go index c488ac5281..cd08f79758 100644 --- a/pkg/services/reputation/local/controller/calls.go +++ b/pkg/services/reputation/local/controller/calls.go @@ -101,7 +101,7 @@ func (c *reportContext) report() { iterator, err := c.ctrl.prm.LocalTrustSource.InitIterator(c.ctx) if err != nil { c.log.Debug("could not initialize iterator over local trust values", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -111,7 +111,7 @@ func (c *reportContext) report() { targetWriter, err := c.ctrl.prm.LocalTrustTarget.InitWriter(c.ctx) if err != nil { c.log.Debug("could not initialize local trust target", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -130,7 +130,7 @@ func (c *reportContext) report() { ) if err != nil && !errors.Is(err, context.Canceled) { c.log.Debug("iterator over local trust failed", - zap.String("error", err.Error()), + zap.Error(err), ) return @@ -140,7 +140,7 @@ func (c *reportContext) report() { err = targetWriter.Close() if err != nil { c.log.Debug("could not finish writing local trust values", - zap.String("error", err.Error()), + zap.Error(err), ) return