2020-08-07 18:50:29 +01:00
|
|
|
package statistics
|
|
|
|
|
|
|
|
import (
|
|
|
|
"math"
|
|
|
|
"testing"
|
|
|
|
"time"
|
2023-01-23 17:55:12 +00:00
|
|
|
|
|
|
|
"github.com/matrix-org/dendrite/test"
|
2023-04-19 15:50:33 +01:00
|
|
|
"github.com/matrix-org/gomatrixserverlib/spec"
|
2023-01-23 17:55:12 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
FailuresUntilAssumedOffline = 3
|
|
|
|
FailuresUntilBlacklist = 8
|
2020-08-07 18:50:29 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestBackoff(t *testing.T) {
|
2024-02-28 19:59:34 +00:00
|
|
|
stats := NewStatistics(nil, FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
|
2020-08-07 18:50:29 +01:00
|
|
|
server := ServerStatistics{
|
|
|
|
statistics: &stats,
|
|
|
|
serverName: "test.com",
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start by checking that counting successes works.
|
2023-01-23 17:55:12 +00:00
|
|
|
server.Success(SendDirect)
|
2020-08-07 18:50:29 +01:00
|
|
|
if successes := server.SuccessCount(); successes != 1 {
|
|
|
|
t.Fatalf("Expected success count 1, got %d", successes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a failure.
|
|
|
|
server.Failure()
|
|
|
|
|
|
|
|
t.Logf("Backoff counter: %d", server.backoffCount.Load())
|
|
|
|
|
|
|
|
// Now we're going to simulate backing off a few times to see
|
|
|
|
// what happens.
|
|
|
|
for i := uint32(1); i <= 10; i++ {
|
2020-08-20 14:58:53 +01:00
|
|
|
// Register another failure for good measure. This should have no
|
|
|
|
// side effects since a backoff is already in progress. If it does
|
|
|
|
// then we'll fail.
|
|
|
|
until, blacklisted := server.Failure()
|
2023-01-23 17:55:12 +00:00
|
|
|
blacklist := server.Blacklisted()
|
|
|
|
assumedOffline := server.AssumedOffline()
|
2022-10-19 11:03:16 +01:00
|
|
|
duration := time.Until(until)
|
2020-09-21 13:30:37 +01:00
|
|
|
|
|
|
|
// Unset the backoff, or otherwise our next call will think that
|
|
|
|
// there's a backoff in progress and return the same result.
|
|
|
|
server.cancel()
|
|
|
|
server.backoffStarted.Store(false)
|
2020-08-20 14:58:53 +01:00
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
if i >= stats.FailuresUntilAssumedOffline {
|
|
|
|
if !assumedOffline {
|
|
|
|
t.Fatalf("Backoff %d should have resulted in assuming the destination was offline but didn't", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we should be assumed offline by now.
|
|
|
|
if i >= stats.FailuresUntilAssumedOffline {
|
|
|
|
if !assumedOffline {
|
|
|
|
t.Fatalf("Backoff %d should have resulted in assumed offline but didn't", i)
|
|
|
|
} else {
|
|
|
|
t.Logf("Backoff %d is assumed offline as expected", i)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if assumedOffline {
|
|
|
|
t.Fatalf("Backoff %d should not have resulted in assumed offline but did", i)
|
|
|
|
} else {
|
|
|
|
t.Logf("Backoff %d is not assumed offline as expected", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-07 18:50:29 +01:00
|
|
|
// Check if we should be blacklisted by now.
|
2020-08-20 14:58:53 +01:00
|
|
|
if i >= stats.FailuresUntilBlacklist {
|
2020-08-07 18:50:29 +01:00
|
|
|
if !blacklist {
|
|
|
|
t.Fatalf("Backoff %d should have resulted in blacklist but didn't", i)
|
2020-08-20 14:58:53 +01:00
|
|
|
} else if blacklist != blacklisted {
|
2023-01-23 17:55:12 +00:00
|
|
|
t.Fatalf("Blacklisted and Failure returned different blacklist values")
|
2020-08-07 18:50:29 +01:00
|
|
|
} else {
|
|
|
|
t.Logf("Backoff %d is blacklisted as expected", i)
|
|
|
|
continue
|
|
|
|
}
|
2023-01-23 17:55:12 +00:00
|
|
|
} else {
|
|
|
|
if blacklist {
|
|
|
|
t.Fatalf("Backoff %d should not have resulted in blacklist but did", i)
|
|
|
|
} else {
|
|
|
|
t.Logf("Backoff %d is not blacklisted as expected", i)
|
|
|
|
}
|
2020-08-07 18:50:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the duration is what we expect.
|
|
|
|
t.Logf("Backoff %d is for %s", i, duration)
|
2022-10-19 11:03:16 +01:00
|
|
|
roundingAllowance := 0.01
|
|
|
|
minDuration := time.Millisecond * time.Duration(math.Exp2(float64(i))*minJitterMultiplier*1000-roundingAllowance)
|
|
|
|
maxDuration := time.Millisecond * time.Duration(math.Exp2(float64(i))*maxJitterMultiplier*1000+roundingAllowance)
|
|
|
|
var inJitterRange bool
|
|
|
|
if duration >= minDuration && duration <= maxDuration {
|
|
|
|
inJitterRange = true
|
|
|
|
} else {
|
|
|
|
inJitterRange = false
|
|
|
|
}
|
|
|
|
if !blacklist && !inJitterRange {
|
|
|
|
t.Fatalf("Backoff %d should have been between %s and %s but was %s", i, minDuration, maxDuration, duration)
|
2020-08-07 18:50:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-01-23 17:55:12 +00:00
|
|
|
|
|
|
|
func TestRelayServersListing(t *testing.T) {
|
2024-02-28 19:59:34 +00:00
|
|
|
stats := NewStatistics(test.NewInMemoryFederationDatabase(), FailuresUntilBlacklist, FailuresUntilAssumedOffline, false)
|
2023-01-23 17:55:12 +00:00
|
|
|
server := ServerStatistics{statistics: &stats}
|
2023-04-19 15:50:33 +01:00
|
|
|
server.AddRelayServers([]spec.ServerName{"relay1", "relay1", "relay2"})
|
2023-01-23 17:55:12 +00:00
|
|
|
relayServers := server.KnownRelayServers()
|
2023-04-19 15:50:33 +01:00
|
|
|
assert.Equal(t, []spec.ServerName{"relay1", "relay2"}, relayServers)
|
|
|
|
server.AddRelayServers([]spec.ServerName{"relay1", "relay1", "relay2"})
|
2023-01-23 17:55:12 +00:00
|
|
|
relayServers = server.KnownRelayServers()
|
2023-04-19 15:50:33 +01:00
|
|
|
assert.Equal(t, []spec.ServerName{"relay1", "relay2"}, relayServers)
|
2023-01-23 17:55:12 +00:00
|
|
|
}
|