Skip to content

Commit

Permalink
Merge pull request etcd-io#7347 from gyuho/static-check
Browse files Browse the repository at this point in the history
*: add 'staticcheck' to 'test'
  • Loading branch information
gyuho authored Mar 7, 2017
2 parents 79de3be + 3d75395 commit e5d94a2
Show file tree
Hide file tree
Showing 20 changed files with 130 additions and 62 deletions.
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ before_install:
- go get -v github.com/chzchzchz/goword
- go get -v honnef.co/go/tools/cmd/gosimple
- go get -v honnef.co/go/tools/cmd/unused
- go get -v honnef.co/go/tools/cmd/staticcheck

# disable godep restore override
install:
Expand Down
3 changes: 2 additions & 1 deletion auth/store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ func TestRoleRevokePermission(t *testing.T) {
t.Fatal(err)
}

r, err := as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"})
_, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"})
if err != nil {
t.Fatal(err)
}
Expand All @@ -359,6 +359,7 @@ func TestRoleRevokePermission(t *testing.T) {
t.Fatal(err)
}

var r *pb.AuthRoleGetResponse
r, err = as.RoleGet(&pb.AuthRoleGetRequest{Role: "role-test-1"})
if err != nil {
t.Fatal(err)
Expand Down
4 changes: 2 additions & 2 deletions etcdctl/ctlv2/command/role_commands.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,13 +117,13 @@ func actionRoleAdd(c *cli.Context) error {
api, role := mustRoleAPIAndName(c)
ctx, cancel := contextWithTotalTimeout(c)
defer cancel()
currentRole, err := api.GetRole(ctx, role)
currentRole, _ := api.GetRole(ctx, role)
if currentRole != nil {
fmt.Fprintf(os.Stderr, "Role %s already exists\n", role)
os.Exit(1)
}

err = api.AddRole(ctx, role)
err := api.AddRole(ctx, role)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(1)
Expand Down
2 changes: 1 addition & 1 deletion etcdctl/ctlv3/command/printer_protobuf.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,5 +60,5 @@ func printPB(v interface{}) {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
fmt.Printf(string(b))
fmt.Print(string(b))
}
5 changes: 3 additions & 2 deletions etcdserver/raft.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ type raftNode struct {
readStateC chan raft.ReadState

// utility
ticker <-chan time.Time
ticker *time.Ticker
// contention detectors for raft heartbeat message
td *contention.TimeoutDetector
heartbeat time.Duration // for logging
Expand Down Expand Up @@ -143,7 +143,7 @@ func (r *raftNode) start(rh *raftReadyHandler) {

for {
select {
case <-r.ticker:
case <-r.ticker.C:
r.Tick()
case rd := <-r.Ready():
if rd.SoftState != nil {
Expand Down Expand Up @@ -303,6 +303,7 @@ func (r *raftNode) stop() {

func (r *raftNode) onStop() {
r.Stop()
r.ticker.Stop()
r.transport.Stop()
if err := r.storage.Close(); err != nil {
plog.Panicf("raft close storage error: %v", err)
Expand Down
1 change: 1 addition & 0 deletions etcdserver/raft_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ func TestStopRaftWhenWaitingForApplyDone(t *testing.T) {
storage: mockstorage.NewStorageRecorder(""),
raftStorage: raft.NewMemoryStorage(),
transport: rafthttp.NewNopTransporter(),
ticker: &time.Ticker{},
}}
srv.r.start(nil)
n.readyc <- raft.Ready{}
Expand Down
18 changes: 10 additions & 8 deletions etcdserver/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ type EtcdServer struct {
stats *stats.ServerStats
lstats *stats.LeaderStats

SyncTicker <-chan time.Time
SyncTicker *time.Ticker
// compactor is used to auto-compact the KV.
compactor *compactor.Periodic

Expand Down Expand Up @@ -416,7 +416,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
r: raftNode{
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
Node: n,
ticker: time.Tick(heartbeat),
ticker: time.NewTicker(heartbeat),
// set up contention detectors for raft heartbeat message.
// expect to send a heartbeat within 2 heartbeat intervals.
td: contention.NewTimeoutDetector(2 * heartbeat),
Expand All @@ -431,7 +431,7 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) {
cluster: cl,
stats: sstats,
lstats: lstats,
SyncTicker: time.Tick(500 * time.Millisecond),
SyncTicker: time.NewTicker(500 * time.Millisecond),
peerRt: prt,
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
forceVersionC: make(chan struct{}),
Expand Down Expand Up @@ -606,7 +606,7 @@ type raftReadyHandler struct {
}

func (s *EtcdServer) run() {
snap, err := s.r.raftStorage.Snapshot()
sn, err := s.r.raftStorage.Snapshot()
if err != nil {
plog.Panicf("get snapshot from raft storage error: %v", err)
}
Expand Down Expand Up @@ -637,7 +637,7 @@ func (s *EtcdServer) run() {
}
setSyncC(nil)
} else {
setSyncC(s.SyncTicker)
setSyncC(s.SyncTicker.C)
if s.compactor != nil {
s.compactor.Resume()
}
Expand All @@ -664,9 +664,9 @@ func (s *EtcdServer) run() {
// asynchronously accept apply packets, dispatch progress in-order
sched := schedule.NewFIFOScheduler()
ep := etcdProgress{
confState: snap.Metadata.ConfState,
snapi: snap.Metadata.Index,
appliedi: snap.Metadata.Index,
confState: sn.Metadata.ConfState,
snapi: sn.Metadata.Index,
appliedi: sn.Metadata.Index,
}

defer func() {
Expand All @@ -679,6 +679,8 @@ func (s *EtcdServer) run() {
// wait for gouroutines before closing raft so wal stays open
s.wg.Wait()

s.SyncTicker.Stop()

// must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
// by adding a peer after raft stops the transport
s.r.stop()
Expand Down
91 changes: 56 additions & 35 deletions etcdserver/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,11 +173,13 @@ func TestApplyRepeat(t *testing.T) {
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
transport: rafthttp.NewNopTransporter(),
ticker: &time.Ticker{},
},
Cfg: &ServerConfig{},
store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
Cfg: &ServerConfig{},
store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
}
s.applyV2 = &applierV2store{store: s.store, cluster: s.cluster}
s.start()
Expand Down Expand Up @@ -635,9 +637,11 @@ func TestDoProposal(t *testing.T) {
storage: mockstorage.NewStorageRecorder(""),
raftStorage: raft.NewMemoryStorage(),
transport: rafthttp.NewNopTransporter(),
ticker: &time.Ticker{},
},
store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
}
srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
srv.start()
Expand Down Expand Up @@ -788,16 +792,18 @@ func TestSyncTimeout(t *testing.T) {
func TestSyncTrigger(t *testing.T) {
n := newReadyNode()
st := make(chan time.Time, 1)
tk := &time.Ticker{C: st}
srv := &EtcdServer{
Cfg: &ServerConfig{TickMs: 1},
r: raftNode{
Node: n,
raftStorage: raft.NewMemoryStorage(),
transport: rafthttp.NewNopTransporter(),
storage: mockstorage.NewStorageRecorder(""),
ticker: &time.Ticker{},
},
store: mockstore.NewNop(),
SyncTicker: st,
SyncTicker: tk,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
}

Expand Down Expand Up @@ -910,9 +916,11 @@ func TestTriggerSnap(t *testing.T) {
raftStorage: raft.NewMemoryStorage(),
storage: p,
transport: rafthttp.NewNopTransporter(),
ticker: &time.Ticker{},
},
store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
store: st,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
}
srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}

Expand Down Expand Up @@ -979,9 +987,11 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) {
storage: mockstorage.NewStorageRecorder(testdir),
raftStorage: rs,
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
ticker: &time.Ticker{},
},
store: st,
cluster: cl,
store: st,
cluster: cl,
SyncTicker: &time.Ticker{},
}
s.applyV2 = &applierV2store{store: s.store, cluster: s.cluster}

Expand Down Expand Up @@ -1059,11 +1069,13 @@ func TestAddMember(t *testing.T) {
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
transport: rafthttp.NewNopTransporter(),
ticker: &time.Ticker{},
},
Cfg: &ServerConfig{},
store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
Cfg: &ServerConfig{},
store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
}
s.start()
m := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"foo"}}}
Expand Down Expand Up @@ -1099,11 +1111,13 @@ func TestRemoveMember(t *testing.T) {
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
transport: rafthttp.NewNopTransporter(),
ticker: &time.Ticker{},
},
Cfg: &ServerConfig{},
store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
Cfg: &ServerConfig{},
store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
}
s.start()
err := s.RemoveMember(context.TODO(), 1234)
Expand Down Expand Up @@ -1138,10 +1152,12 @@ func TestUpdateMember(t *testing.T) {
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
transport: rafthttp.NewNopTransporter(),
ticker: &time.Ticker{},
},
store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
}
s.start()
wm := membership.Member{ID: 1234, RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://127.0.0.1:1"}}}
Expand Down Expand Up @@ -1173,11 +1189,12 @@ func TestPublish(t *testing.T) {
readych: make(chan struct{}),
Cfg: &ServerConfig{TickMs: 1},
id: 1,
r: raftNode{Node: n},
r: raftNode{Node: n, ticker: &time.Ticker{}},
attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}},
cluster: &membership.RaftCluster{},
w: w,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
}
srv.publish(time.Hour)

Expand Down Expand Up @@ -1216,13 +1233,15 @@ func TestPublishStopped(t *testing.T) {
r: raftNode{
Node: newNodeNop(),
transport: rafthttp.NewNopTransporter(),
ticker: &time.Ticker{},
},
cluster: &membership.RaftCluster{},
w: mockwait.NewNop(),
done: make(chan struct{}),
stopping: make(chan struct{}),
stop: make(chan struct{}),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
cluster: &membership.RaftCluster{},
w: mockwait.NewNop(),
done: make(chan struct{}),
stopping: make(chan struct{}),
stop: make(chan struct{}),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
}
close(srv.stopping)
srv.publish(time.Hour)
Expand All @@ -1232,11 +1251,12 @@ func TestPublishStopped(t *testing.T) {
func TestPublishRetry(t *testing.T) {
n := newNodeRecorderStream()
srv := &EtcdServer{
Cfg: &ServerConfig{TickMs: 1},
r: raftNode{Node: n},
w: mockwait.NewNop(),
stopping: make(chan struct{}),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
Cfg: &ServerConfig{TickMs: 1},
r: raftNode{Node: n, ticker: &time.Ticker{}},
w: mockwait.NewNop(),
stopping: make(chan struct{}),
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
}
// expect multiple proposals from retrying
ch := make(chan struct{})
Expand Down Expand Up @@ -1270,11 +1290,12 @@ func TestUpdateVersion(t *testing.T) {
srv := &EtcdServer{
id: 1,
Cfg: &ServerConfig{TickMs: 1},
r: raftNode{Node: n},
r: raftNode{Node: n, ticker: &time.Ticker{}},
attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}},
cluster: &membership.RaftCluster{},
w: w,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
SyncTicker: &time.Ticker{},
}
srv.updateClusterVersion("2.0.0")

Expand Down
2 changes: 1 addition & 1 deletion integration/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -603,7 +603,7 @@ func (m *member) Launch() error {
if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {
return fmt.Errorf("failed to initialize the etcd server: %v", err)
}
m.s.SyncTicker = time.Tick(500 * time.Millisecond)
m.s.SyncTicker = time.NewTicker(500 * time.Millisecond)
m.s.Start()

m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)}
Expand Down
2 changes: 1 addition & 1 deletion integration/v3_lease_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ func TestV3LeaseGrantByID(t *testing.T) {
}

// create duplicate fixed lease
lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
_, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
context.TODO(),
&pb.LeaseGrantRequest{ID: 1, TTL: 1})
if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) {
Expand Down
3 changes: 2 additions & 1 deletion lease/lessor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,12 @@ func TestLessorGrant(t *testing.T) {
t.Errorf("term = %v, want at least %v", l.Remaining(), minLeaseTTLDuration-time.Second)
}

nl, err := le.Grant(1, 1)
_, err = le.Grant(1, 1)
if err == nil {
t.Errorf("allocated the same lease")
}

var nl *Lease
nl, err = le.Grant(2, 1)
if err != nil {
t.Errorf("could not grant lease 2 (%v)", err)
Expand Down
1 change: 0 additions & 1 deletion mvcc/kvstore_bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,6 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) {
}
}
b.ResetTimer()
s = NewStore(be, &lease.FakeLessor{}, &i)
}

func BenchmarkStoreRestoreRevs1(b *testing.B) {
Expand Down
4 changes: 4 additions & 0 deletions pkg/transport/keepalive_listener_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ func TestNewKeepAliveListener(t *testing.T) {
ln.Close()

ln, err = net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("unexpected Listen error: %v", err)
}

// tls
tmp, err := createTempFile([]byte("XXX"))
if err != nil {
Expand Down
Loading

0 comments on commit e5d94a2

Please sign in to comment.