Skip to content

Commit

Permalink
fixing tests
Browse files Browse the repository at this point in the history
  • Loading branch information
ineiti committed Jul 1, 2020
1 parent 4cec616 commit 17174af
Show file tree
Hide file tree
Showing 7 changed files with 271 additions and 199 deletions.
5 changes: 3 additions & 2 deletions byzcoin/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -430,12 +430,14 @@ func (s *Service) AddTransaction(req *AddTxRequest) (*AddTxResponse, error) {

// Either send the transaction to the leader, or,
// if this node is the leader, directly send it to ctxChan.
// For every new tx create a new protocol, like in skipchain
leader, err := s.getLeader(req.SkipchainID)
if err != nil {
return nil, xerrors.Errorf("Error getting the leader: %v", err)
}

// Need to create the hash before sending it to ctxChan,
// in case it's the leader.
// Else it will race when creating the Hash...
ctxHash := req.Transaction.Instructions.Hash()

if s.ServerIdentity().Equal(leader) {
Expand Down Expand Up @@ -497,7 +499,6 @@ func (s *Service) AddTransaction(req *AddTxRequest) (*AddTxResponse, error) {
tooLong := time.After(tooLongDur)

blocksLeft := req.InclusionWait

for {
select {
case notif := <-ch:
Expand Down
63 changes: 1 addition & 62 deletions byzcoin/service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -908,7 +908,6 @@ func waitInclusion(t *testing.T, client int) {
time.Sleep(time.Second)
}

/*
// Sends too many transactions to the ledger and waits for all blocks to be
// done.
func TestService_FloodLedger(t *testing.T) {
Expand Down Expand Up @@ -939,7 +938,6 @@ func TestService_FloodLedger(t *testing.T) {
t.Fatalf("didn't get at least 2 blocks: index before %d, index after %v", before.Index, latest.Index)
}
}
*/

func TestService_BigTx(t *testing.T) {
// Use longer block interval for this test, as sending around these big
Expand Down Expand Up @@ -1602,60 +1600,6 @@ func TestService_SetConfig(t *testing.T) {
require.Equal(t, blocksize, newBlocksize)
}

/*
func TestService_SetConfigInterval(t *testing.T) {
defer log.SetShowTime(log.ShowTime())
log.SetShowTime(true)
s := newSer(t, 1, testInterval)
defer s.local.CloseAll()
// Wait for a block completion to start the interval check
// to prevent the first one to be included in the setup block
ctx, err := createOneClientTx(s.darc.GetBaseID(), dummyContract, []byte{}, s.signer)
require.NoError(t, err)
s.sendTxAndWait(t, ctx, 10)
intervals := []time.Duration{
2 * time.Second,
5 * time.Second,
10 * time.Second,
20 * time.Second,
}
if testing.Short() {
intervals = intervals[0:2]
}
counter := 2
for _, interval := range intervals {
// The next block should now be in the range of testInterval.
log.Lvl1("Setting interval to", interval)
ctx, _ := createConfigTxWithCounter(t, interval, *s.roster, defaultMaxBlockSize, s, counter)
counter++
// The wait argument here is also used in case no block is received, so
// it means: at most 10*blockInterval, or after 10 blocks, whichever comes
// first. Putting it to 1 doesn't work, because the actual blockInterval
// is bigger, due to dedis/cothority#1409
s.sendTxAndWait(t, ctx, 10)
// We send an extra transaction first because the new interval is only loaded after a delay
// caused by the pipeline feature, i.e., the new interval is only used after an existing wait-interval
// is finished and not immediately after receiving the new configuration.
dummyCtx, _ := createOneClientTxWithCounter(s.darc.GetBaseID(), dummyContract, []byte{}, s.signer, uint64(counter))
counter++
s.sendTxAndWait(t, dummyCtx, 10)
start := time.Now()
dummyCtx, _ = createOneClientTxWithCounter(s.darc.GetBaseID(), dummyContract, []byte{}, s.signer, uint64(counter))
counter++
s.sendTxAndWait(t, dummyCtx, 10)
dur := time.Since(start)
require.InDelta(t, dur, interval, float64(1*time.Second))
}
}
*/

func TestService_SetConfigRosterKeepLeader(t *testing.T) {
n := 6
if testing.Short() {
Expand Down Expand Up @@ -2772,12 +2716,7 @@ func (s *ser) sendTx(t *testing.T, ctx ClientTransaction) {
}

func (s *ser) sendTxTo(t *testing.T, ctx ClientTransaction, idx int) {
resp, err := s.services[idx].AddTransaction(&AddTxRequest{
Version: CurrentVersion,
SkipchainID: s.genesis.SkipChainID(),
Transaction: ctx,
})
transactionOK(t, resp, err)
s.sendTxToAndWait(t, ctx, idx, 0)
}

func (s *ser) sendTxAndWait(t *testing.T, ctx ClientTransaction, wait int) {
Expand Down
51 changes: 22 additions & 29 deletions byzcoin/viewchange_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -288,58 +288,51 @@ func TestViewChange_LostSync(t *testing.T) {
// Test to make sure the view change triggers a proof propagation when a conode
// is sending request for old blocks, meaning it is out-of-sync and as the leader
// is offline, it will never catch up.
// - Node0 - leader - stopped after creation of block #1
// - Node3 - misses block #1, unpaused after creation of block #1
func TestViewChange_NeedCatchUp(t *testing.T) {
rw := time.Duration(3)
s := newSerN(t, 1, testInterval, 5, rw)
nodes := 4
s := newSerN(t, 1, testInterval, nodes, rw)
defer s.local.CloseAll()

for _, service := range s.services {
service.SetPropagationTimeout(2 * testInterval)
}

s.hosts[3].Pause()
s.hosts[nodes-1].Pause()

// Create a block that host 4 will miss
log.Lvl1("Send block that node 4 will miss")
tx1, err := createOneClientTx(s.darc.GetBaseID(), dummyContract, s.value, s.signer)
require.NoError(t, err)
s.sendTxTo(t, tx1, 0)
s.sendTxToAndWait(t, tx1, 0, 10)

time.Sleep(5 * time.Second)

// Kill the leader, but the view change won't happen as
// 2 nodes are down
// Kill the leader, and unpause the sleepy node
s.services[0].TestClose()
s.hosts[0].Pause()

s.hosts[3].Unpause()
// This will trigger the proof to be propagated. In that test, the catch up
// won't be trigger as only one block is missing.
s.services[3].sendViewChangeReq(viewchange.View{
ID: s.genesis.Hash,
Gen: s.genesis.SkipChainID(),
LeaderIndex: 1,
})
s.hosts[nodes-1].Unpause()

// Trigger a viewchange
// Create a block that host 4 will miss
log.Lvl1("Trigger the viewchange")
tx1, err = createOneClientTx(s.darc.GetBaseID(), dummyContract, s.value, s.signer)
require.NoError(t, err)
s.sendTxTo(t, tx1, 3)
s.sendTxTo(t, tx1, nodes-1)

// It will need a few seconds if it catches the leader index 1 and a bit
// more if it goes to the leader index 2 so we give enough time.
sb := s.genesis
for i := 0; i < 60 && sb.Index != 2; i++ {
proof, err := s.services[4].skService().GetDB().GetProof(s.genesis.Hash)
require.NoError(t, err)
sb = proof[len(proof)-1]
log.Lvl1("Wait for the block to propagate")
require.NoError(t, NewClient(s.genesis.SkipChainID(),
*s.roster).WaitPropagation(1))

// wait for the view change to happen
time.Sleep(1 * time.Second)
}
// Send the block again
log.Lvl1("Sending block again")
s.sendTxTo(t, tx1, nodes-1)

log.Lvl1("Wait for the transaction to be included")
require.NoError(t, NewClient(s.genesis.SkipChainID(),
*s.roster).WaitPropagation(2))

// Check that a view change was finally executed
leader, err := s.services[4].getLeader(s.genesis.SkipChainID())
leader, err := s.services[nodes-1].getLeader(s.genesis.SkipChainID())
require.NoError(t, err)
require.NotNil(t, leader)
require.False(t, leader.Equal(s.services[0].ServerIdentity()))
Expand Down
Loading

0 comments on commit 17174af

Please sign in to comment.