Improve federation sender performance, implement backoff and blacklisting, fix up invites a bit (#1007)

* Improve federation sender performance and behaviour, add backoff

* Tweaks

* Tweaks

* Tweaks

* Take copies of events before passing to destination queues

* Don't accidentally drop queued messages

* Don't take copies again

* Tidy up a bit

* Break out statistics (tracked component-wide), report success and failures from Perform actions

* Fix comment, use atomic add

* Improve logic a bit, don't block on wakeup, move idle check

* Don't retry sucessful invites, don't dispatch sendEvent, sendInvite etc

* Dedupe destinations, fix other bug hopefully

* Dispatch sends again

* Federation sender to ignore invites that are destined locally

* Loopback invite events

* Remodel a bit with channels

* Linter

* Only loopback invite event if we know the room

* We should tell other resident servers about the invite if we know about the room

* Correct invite signing

* Fix invite loopback

* Check HTTP response codes, push new invites to front of queue

* Review comments
This commit is contained in:
Neil Alexander 2020-05-07 12:42:06 +01:00 committed by GitHub
parent 3b98535dc5
commit a16db1c408
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 474 additions and 142 deletions

View File

@ -188,11 +188,30 @@ func (s *OutputRoomEventConsumer) processInvite(oie api.OutputNewInviteEvent) er
return nil
}
// Ignore invites that don't have state keys - they are invalid.
if oie.Event.StateKey() == nil {
return fmt.Errorf("event %q doesn't have state key", oie.Event.EventID())
}
// Don't try to handle events that are actually destined for us.
stateKey := *oie.Event.StateKey()
_, destination, err := gomatrixserverlib.SplitID('@', stateKey)
if err != nil {
log.WithFields(log.Fields{
"event_id": oie.Event.EventID(),
"state_key": stateKey,
}).Info("failed to split destination from state key")
return nil
}
if s.cfg.Matrix.ServerName == destination {
return nil
}
// Try to extract the room invite state. The roomserver will have stashed
// this for us in invite_room_state if it didn't already exist.
strippedState := []gomatrixserverlib.InviteV2StrippedState{}
if inviteRoomState := gjson.GetBytes(oie.Event.Unsigned(), "invite_room_state"); inviteRoomState.Exists() {
if err := json.Unmarshal([]byte(inviteRoomState.Raw), &strippedState); err != nil {
if err = json.Unmarshal([]byte(inviteRoomState.Raw), &strippedState); err != nil {
log.WithError(err).Warn("failed to extract invite_room_state from event unsigned")
}
}

View File

@ -24,6 +24,7 @@ import (
"github.com/matrix-org/dendrite/federationsender/producers"
"github.com/matrix-org/dendrite/federationsender/queue"
"github.com/matrix-org/dendrite/federationsender/storage"
"github.com/matrix-org/dendrite/federationsender/types"
roomserverAPI "github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/gomatrixserverlib"
"github.com/sirupsen/logrus"
@ -42,9 +43,14 @@ func SetupFederationSenderComponent(
logrus.WithError(err).Panic("failed to connect to federation sender db")
}
roomserverProducer := producers.NewRoomserverProducer(rsAPI, base.Cfg.Matrix.ServerName)
roomserverProducer := producers.NewRoomserverProducer(
rsAPI, base.Cfg.Matrix.ServerName, base.Cfg.Matrix.KeyID, base.Cfg.Matrix.PrivateKey,
)
queues := queue.NewOutgoingQueues(base.Cfg.Matrix.ServerName, federation, roomserverProducer)
statistics := &types.Statistics{}
queues := queue.NewOutgoingQueues(
base.Cfg.Matrix.ServerName, federation, roomserverProducer, statistics,
)
rsConsumer := consumers.NewOutputRoomEventConsumer(
base.Cfg, base.KafkaConsumer, queues,
@ -63,6 +69,7 @@ func SetupFederationSenderComponent(
queryAPI := internal.NewFederationSenderInternalAPI(
federationSenderDB, base.Cfg, roomserverProducer, federation, keyRing,
statistics,
)
queryAPI.SetupHTTP(http.DefaultServeMux)

View File

@ -9,6 +9,7 @@ import (
"github.com/matrix-org/dendrite/federationsender/api"
"github.com/matrix-org/dendrite/federationsender/producers"
"github.com/matrix-org/dendrite/federationsender/storage"
"github.com/matrix-org/dendrite/federationsender/types"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
)
@ -18,6 +19,7 @@ type FederationSenderInternalAPI struct {
api.FederationSenderInternalAPI
db storage.Database
cfg *config.Dendrite
statistics *types.Statistics
producer *producers.RoomserverProducer
federation *gomatrixserverlib.FederationClient
keyRing *gomatrixserverlib.KeyRing
@ -28,6 +30,7 @@ func NewFederationSenderInternalAPI(
producer *producers.RoomserverProducer,
federation *gomatrixserverlib.FederationClient,
keyRing *gomatrixserverlib.KeyRing,
statistics *types.Statistics,
) *FederationSenderInternalAPI {
return &FederationSenderInternalAPI{
db: db,
@ -35,6 +38,7 @@ func NewFederationSenderInternalAPI(
producer: producer,
federation: federation,
keyRing: keyRing,
statistics: statistics,
}
}

View File

@ -25,10 +25,12 @@ func (r *FederationSenderInternalAPI) PerformDirectoryLookup(
request.RoomAlias,
)
if err != nil {
r.statistics.ForServer(request.ServerName).Failure()
return err
}
response.RoomID = dir.RoomID
response.ServerNames = dir.Servers
r.statistics.ForServer(request.ServerName).Success()
return nil
}
@ -61,6 +63,7 @@ func (r *FederationSenderInternalAPI) PerformJoin(
)
if err != nil {
// TODO: Check if the user was not allowed to join the room.
r.statistics.ForServer(serverName).Failure()
return fmt.Errorf("r.federation.MakeJoin: %w", err)
}
@ -112,6 +115,7 @@ func (r *FederationSenderInternalAPI) PerformJoin(
)
if err != nil {
logrus.WithError(err).Warnf("r.federation.SendJoin failed")
r.statistics.ForServer(serverName).Failure()
continue
}
@ -137,6 +141,7 @@ func (r *FederationSenderInternalAPI) PerformJoin(
}
// We're all good.
r.statistics.ForServer(serverName).Success()
return nil
}
@ -170,6 +175,7 @@ func (r *FederationSenderInternalAPI) PerformLeave(
if err != nil {
// TODO: Check if the user was not allowed to leave the room.
logrus.WithError(err).Warnf("r.federation.MakeLeave failed")
r.statistics.ForServer(serverName).Failure()
continue
}
@ -221,9 +227,11 @@ func (r *FederationSenderInternalAPI) PerformLeave(
)
if err != nil {
logrus.WithError(err).Warnf("r.federation.SendLeave failed")
r.statistics.ForServer(serverName).Failure()
continue
}
r.statistics.ForServer(serverName).Success()
return nil
}

View File

@ -16,6 +16,7 @@ package producers
import (
"context"
"crypto/ed25519"
"github.com/matrix-org/dendrite/roomserver/api"
"github.com/matrix-org/gomatrixserverlib"
@ -25,15 +26,20 @@ import (
type RoomserverProducer struct {
InputAPI api.RoomserverInternalAPI
serverName gomatrixserverlib.ServerName
keyID gomatrixserverlib.KeyID
privateKey ed25519.PrivateKey
}
// NewRoomserverProducer creates a new RoomserverProducer
func NewRoomserverProducer(
rsAPI api.RoomserverInternalAPI, serverName gomatrixserverlib.ServerName,
keyID gomatrixserverlib.KeyID, privateKey ed25519.PrivateKey,
) *RoomserverProducer {
return &RoomserverProducer{
InputAPI: rsAPI,
serverName: serverName,
keyID: keyID,
privateKey: privateKey,
}
}
@ -43,7 +49,7 @@ func NewRoomserverProducer(
func (c *RoomserverProducer) SendInviteResponse(
ctx context.Context, res gomatrixserverlib.RespInviteV2, roomVersion gomatrixserverlib.RoomVersion,
) (string, error) {
ev := res.Event.Headered(roomVersion)
ev := res.Event.Sign(string(c.serverName), c.keyID, c.privateKey).Headered(roomVersion)
ire := api.InputRoomEvent{
Kind: api.KindNew,
Event: ev,

View File

@ -18,12 +18,13 @@ import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/matrix-org/dendrite/federationsender/producers"
"github.com/matrix-org/dendrite/federationsender/types"
"github.com/matrix-org/gomatrix"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"go.uber.org/atomic"
)
@ -33,92 +34,190 @@ import (
// ensures that only one request is in flight to a given destination
// at a time.
type destinationQueue struct {
rsProducer *producers.RoomserverProducer
client *gomatrixserverlib.FederationClient
origin gomatrixserverlib.ServerName
destination gomatrixserverlib.ServerName
running atomic.Bool
// The running mutex protects sentCounter, lastTransactionIDs and
// pendingEvents, pendingEDUs.
runningMutex sync.Mutex
sentCounter int
lastTransactionIDs []gomatrixserverlib.TransactionID
pendingEvents []*gomatrixserverlib.HeaderedEvent
pendingEDUs []*gomatrixserverlib.EDU
pendingInvites []*gomatrixserverlib.InviteV2Request
rsProducer *producers.RoomserverProducer // roomserver producer
client *gomatrixserverlib.FederationClient // federation client
origin gomatrixserverlib.ServerName // origin of requests
destination gomatrixserverlib.ServerName // destination of requests
running atomic.Bool // is the queue worker running?
statistics *types.ServerStatistics // statistics about this remote server
incomingPDUs chan *gomatrixserverlib.HeaderedEvent // PDUs to send
incomingEDUs chan *gomatrixserverlib.EDU // EDUs to send
incomingInvites chan *gomatrixserverlib.InviteV2Request // invites to send
lastTransactionIDs []gomatrixserverlib.TransactionID // last transaction ID
pendingPDUs []*gomatrixserverlib.HeaderedEvent // owned by backgroundSend
pendingEDUs []*gomatrixserverlib.EDU // owned by backgroundSend
pendingInvites []*gomatrixserverlib.InviteV2Request // owned by backgroundSend
}
// Send event adds the event to the pending queue for the destination.
// If the queue is empty then it starts a background goroutine to
// start sending events to that destination.
func (oq *destinationQueue) sendEvent(ev *gomatrixserverlib.HeaderedEvent) {
oq.runningMutex.Lock()
defer oq.runningMutex.Unlock()
oq.pendingEvents = append(oq.pendingEvents, ev)
if oq.statistics.Blacklisted() {
// If the destination is blacklisted then drop the event.
return
}
if !oq.running.Load() {
go oq.backgroundSend()
}
oq.incomingPDUs <- ev
}
// sendEDU adds the EDU event to the pending queue for the destination.
// If the queue is empty then it starts a background goroutine to
// start sending events to that destination.
func (oq *destinationQueue) sendEDU(e *gomatrixserverlib.EDU) {
oq.runningMutex.Lock()
defer oq.runningMutex.Unlock()
oq.pendingEDUs = append(oq.pendingEDUs, e)
func (oq *destinationQueue) sendEDU(ev *gomatrixserverlib.EDU) {
if oq.statistics.Blacklisted() {
// If the destination is blacklisted then drop the event.
return
}
if !oq.running.Load() {
go oq.backgroundSend()
}
oq.incomingEDUs <- ev
}
// sendInvite adds the invite event to the pending queue for the
// destination. If the queue is empty then it starts a background
// goroutine to start sending events to that destination.
func (oq *destinationQueue) sendInvite(ev *gomatrixserverlib.InviteV2Request) {
oq.runningMutex.Lock()
defer oq.runningMutex.Unlock()
oq.pendingInvites = append(oq.pendingInvites, ev)
if oq.statistics.Blacklisted() {
// If the destination is blacklisted then drop the event.
return
}
if !oq.running.Load() {
go oq.backgroundSend()
}
oq.incomingInvites <- ev
}
// backgroundSend is the worker goroutine for sending events.
// nolint:gocyclo
func (oq *destinationQueue) backgroundSend() {
oq.running.Store(true)
// Check if a worker is already running, and if it isn't, then
// mark it as started.
if !oq.running.CAS(false, true) {
return
}
defer oq.running.Store(false)
for {
transaction, invites := oq.nextTransaction(), oq.nextInvites()
if !transaction && !invites {
// If the queue is empty then stop processing for this destination.
// TODO: Remove this destination from the queue map.
// Wait either for incoming events, or until we hit an
// idle timeout.
select {
case pdu := <-oq.incomingPDUs:
// Ordering of PDUs is important so we add them to the end
// of the queue and they will all be added to transactions
// in order.
oq.pendingPDUs = append(oq.pendingPDUs, pdu)
case edu := <-oq.incomingEDUs:
// Likewise for EDUs, although we should probably not try
// too hard with some EDUs (like typing notifications) after
// a certain amount of time has passed.
// TODO: think about EDU expiry some more
oq.pendingEDUs = append(oq.pendingEDUs, edu)
case invite := <-oq.incomingInvites:
// There's no strict ordering requirement for invites like
// there is for transactions, so we put the invite onto the
// front of the queue. This means that if an invite that is
// stuck failing already, that it won't block our new invite
// from being sent.
oq.pendingInvites = append(
[]*gomatrixserverlib.InviteV2Request{invite},
oq.pendingInvites...,
)
case <-time.After(time.Second * 30):
// The worker is idle so stop the goroutine. It'll
// get restarted automatically the next time we
// get an event.
return
}
// TODO: handle retries.
// TODO: blacklist uncooperative servers.
// If we are backing off this server then wait for the
// backoff duration to complete first.
if backoff, duration := oq.statistics.BackoffDuration(); backoff {
<-time.After(duration)
}
// How many things do we have waiting?
numPDUs := len(oq.pendingPDUs)
numEDUs := len(oq.pendingEDUs)
numInvites := len(oq.pendingInvites)
// If we have pending PDUs or EDUs then construct a transaction.
if numPDUs > 0 || numEDUs > 0 {
// Try sending the next transaction and see what happens.
transaction, terr := oq.nextTransaction(oq.pendingPDUs, oq.pendingEDUs, oq.statistics.SuccessCount())
if terr != nil {
// We failed to send the transaction.
if giveUp := oq.statistics.Failure(); giveUp {
// It's been suggested that we should give up because
// the backoff has exceeded a maximum allowable value.
return
}
} else if transaction {
// If we successfully sent the transaction then clear out
// the pending events and EDUs.
oq.statistics.Success()
// Reallocate so that the underlying arrays can be GC'd, as
// opposed to growing forever.
for i := 0; i < numPDUs; i++ {
oq.pendingPDUs[i] = nil
}
for i := 0; i < numEDUs; i++ {
oq.pendingEDUs[i] = nil
}
oq.pendingPDUs = append(
[]*gomatrixserverlib.HeaderedEvent{},
oq.pendingPDUs[numPDUs:]...,
)
oq.pendingEDUs = append(
[]*gomatrixserverlib.EDU{},
oq.pendingEDUs[numEDUs:]...,
)
}
}
// Try sending the next invite and see what happens.
if numInvites > 0 {
sent, ierr := oq.nextInvites(oq.pendingInvites)
if ierr != nil {
// We failed to send the transaction so increase the
// backoff and give it another go shortly.
if giveUp := oq.statistics.Failure(); giveUp {
// It's been suggested that we should give up because
// the backoff has exceeded a maximum allowable value.
return
}
} else if sent > 0 {
// If we successfully sent the invites then clear out
// the pending invites.
oq.statistics.Success()
// Reallocate so that the underlying array can be GC'd, as
// opposed to growing forever.
oq.pendingInvites = append(
[]*gomatrixserverlib.InviteV2Request{},
oq.pendingInvites[sent:]...,
)
}
}
}
}
// nextTransaction creates a new transaction from the pending event
// queue and sends it. Returns true if a transaction was sent or
// false otherwise.
func (oq *destinationQueue) nextTransaction() bool {
oq.runningMutex.Lock()
defer oq.runningMutex.Unlock()
if len(oq.pendingEvents) == 0 && len(oq.pendingEDUs) == 0 {
return false
}
func (oq *destinationQueue) nextTransaction(
pendingPDUs []*gomatrixserverlib.HeaderedEvent,
pendingEDUs []*gomatrixserverlib.EDU,
sentCounter uint32,
) (bool, error) {
t := gomatrixserverlib.Transaction{
PDUs: []json.RawMessage{},
EDUs: []gomatrixserverlib.EDU{},
}
now := gomatrixserverlib.AsTimestamp(time.Now())
t.TransactionID = gomatrixserverlib.TransactionID(fmt.Sprintf("%d-%d", now, oq.sentCounter))
t.TransactionID = gomatrixserverlib.TransactionID(fmt.Sprintf("%d-%d", now, sentCounter))
t.Origin = oq.origin
t.Destination = oq.destination
t.OriginServerTS = now
@ -129,44 +228,54 @@ func (oq *destinationQueue) nextTransaction() bool {
oq.lastTransactionIDs = []gomatrixserverlib.TransactionID{t.TransactionID}
for _, pdu := range oq.pendingEvents {
for _, pdu := range pendingPDUs {
// Append the JSON of the event, since this is a json.RawMessage type in the
// gomatrixserverlib.Transaction struct
t.PDUs = append(t.PDUs, (*pdu).JSON())
}
oq.pendingEvents = nil
oq.sentCounter += len(t.PDUs)
for _, edu := range oq.pendingEDUs {
for _, edu := range pendingEDUs {
t.EDUs = append(t.EDUs, *edu)
}
oq.pendingEDUs = nil
oq.sentCounter += len(t.EDUs)
util.GetLogger(context.TODO()).Infof("Sending transaction %q containing %d PDUs, %d EDUs", t.TransactionID, len(t.PDUs), len(t.EDUs))
logrus.WithField("server_name", oq.destination).Infof("Sending transaction %q containing %d PDUs, %d EDUs", t.TransactionID, len(t.PDUs), len(t.EDUs))
// TODO: we should check for 500-ish fails vs 400-ish here,
// since we shouldn't queue things indefinitely in response
// to a 400-ish error
_, err := oq.client.SendTransaction(context.TODO(), t)
if err != nil {
switch e := err.(type) {
case nil:
// No error was returned so the transaction looks to have
// been successfully sent.
return true, nil
case gomatrix.HTTPError:
// We received a HTTP error back. In this instance we only
// should report an error if
if e.Code >= 400 && e.Code <= 499 {
// We tried but the remote side has sent back a client error.
// It's no use retrying because it will happen again.
return true, nil
}
// Otherwise, report that we failed to send the transaction
// and we will retry again.
return false, err
default:
log.WithFields(log.Fields{
"destination": oq.destination,
log.ErrorKey: err,
}).Info("problem sending transaction")
return false, err
}
return true
}
// nextInvite takes pending invite events from the queue and sends
// them. Returns true if a transaction was sent or false otherwise.
func (oq *destinationQueue) nextInvites() bool {
oq.runningMutex.Lock()
defer oq.runningMutex.Unlock()
if len(oq.pendingInvites) == 0 {
return false
}
for _, inviteReq := range oq.pendingInvites {
func (oq *destinationQueue) nextInvites(
pendingInvites []*gomatrixserverlib.InviteV2Request,
) (int, error) {
done := 0
for _, inviteReq := range pendingInvites {
ev, roomVersion := inviteReq.Event(), inviteReq.RoomVersion()
log.WithFields(log.Fields{
@ -180,13 +289,32 @@ func (oq *destinationQueue) nextInvites() bool {
oq.destination,
*inviteReq,
)
if err != nil {
switch e := err.(type) {
case nil:
done++
case gomatrix.HTTPError:
log.WithFields(log.Fields{
"event_id": ev.EventID(),
"state_key": ev.StateKey(),
"destination": oq.destination,
"status_code": e.Code,
}).WithError(err).Error("failed to send invite due to HTTP error")
// Check whether we should do something about the error or
// just accept it as unavoidable.
if e.Code >= 400 && e.Code <= 499 {
// We tried but the remote side has sent back a client error.
// It's no use retrying because it will happen again.
done++
continue
}
return done, err
default:
log.WithFields(log.Fields{
"event_id": ev.EventID(),
"state_key": ev.StateKey(),
"destination": oq.destination,
}).WithError(err).Error("failed to send invite")
continue
return done, err
}
if _, err = oq.rsProducer.SendInviteResponse(
@ -199,10 +327,9 @@ func (oq *destinationQueue) nextInvites() bool {
"state_key": ev.StateKey(),
"destination": oq.destination,
}).WithError(err).Error("failed to return signed invite to roomserver")
return done, err
}
}
oq.pendingInvites = nil
return true
return done, nil
}

View File

@ -19,18 +19,20 @@ import (
"sync"
"github.com/matrix-org/dendrite/federationsender/producers"
"github.com/matrix-org/dendrite/federationsender/types"
"github.com/matrix-org/gomatrixserverlib"
"github.com/matrix-org/util"
log "github.com/sirupsen/logrus"
)
// OutgoingQueues is a collection of queues for sending transactions to other
// matrix servers
type OutgoingQueues struct {
rsProducer *producers.RoomserverProducer
origin gomatrixserverlib.ServerName
client *gomatrixserverlib.FederationClient
// The queuesMutex protects queues
queuesMutex sync.Mutex
rsProducer *producers.RoomserverProducer
origin gomatrixserverlib.ServerName
client *gomatrixserverlib.FederationClient
statistics *types.Statistics
queuesMutex sync.Mutex // protects the below
queues map[gomatrixserverlib.ServerName]*destinationQueue
}
@ -39,15 +41,37 @@ func NewOutgoingQueues(
origin gomatrixserverlib.ServerName,
client *gomatrixserverlib.FederationClient,
rsProducer *producers.RoomserverProducer,
statistics *types.Statistics,
) *OutgoingQueues {
return &OutgoingQueues{
rsProducer: rsProducer,
origin: origin,
client: client,
statistics: statistics,
queues: map[gomatrixserverlib.ServerName]*destinationQueue{},
}
}
func (oqs *OutgoingQueues) getQueue(destination gomatrixserverlib.ServerName) *destinationQueue {
oqs.queuesMutex.Lock()
defer oqs.queuesMutex.Unlock()
oq := oqs.queues[destination]
if oq == nil {
oq = &destinationQueue{
rsProducer: oqs.rsProducer,
origin: oqs.origin,
destination: destination,
client: oqs.client,
statistics: oqs.statistics.ForServer(destination),
incomingPDUs: make(chan *gomatrixserverlib.HeaderedEvent, 128),
incomingEDUs: make(chan *gomatrixserverlib.EDU, 128),
incomingInvites: make(chan *gomatrixserverlib.InviteV2Request, 128),
}
oqs.queues[destination] = oq
}
return oq
}
// SendEvent sends an event to the destinations
func (oqs *OutgoingQueues) SendEvent(
ev *gomatrixserverlib.HeaderedEvent, origin gomatrixserverlib.ServerName,
@ -62,27 +86,14 @@ func (oqs *OutgoingQueues) SendEvent(
}
// Remove our own server from the list of destinations.
destinations = filterDestinations(oqs.origin, destinations)
destinations = filterAndDedupeDests(oqs.origin, destinations)
log.WithFields(log.Fields{
"destinations": destinations, "event": ev.EventID(),
}).Info("Sending event")
oqs.queuesMutex.Lock()
defer oqs.queuesMutex.Unlock()
for _, destination := range destinations {
oq := oqs.queues[destination]
if oq == nil {
oq = &destinationQueue{
rsProducer: oqs.rsProducer,
origin: oqs.origin,
destination: destination,
client: oqs.client,
}
oqs.queues[destination] = oq
}
oq.sendEvent(ev)
oqs.getQueue(destination).sendEvent(ev)
}
return nil
@ -111,23 +122,11 @@ func (oqs *OutgoingQueues) SendInvite(
}
log.WithFields(log.Fields{
"event_id": ev.EventID(),
"event_id": ev.EventID(),
"server_name": destination,
}).Info("Sending invite")
oqs.queuesMutex.Lock()
defer oqs.queuesMutex.Unlock()
oq := oqs.queues[destination]
if oq == nil {
oq = &destinationQueue{
rsProducer: oqs.rsProducer,
origin: oqs.origin,
destination: destination,
client: oqs.client,
}
oqs.queues[destination] = oq
}
oq.sendInvite(inviteReq)
oqs.getQueue(destination).sendInvite(inviteReq)
return nil
}
@ -146,7 +145,7 @@ func (oqs *OutgoingQueues) SendEDU(
}
// Remove our own server from the list of destinations.
destinations = filterDestinations(oqs.origin, destinations)
destinations = filterAndDedupeDests(oqs.origin, destinations)
if len(destinations) > 0 {
log.WithFields(log.Fields{
@ -154,35 +153,27 @@ func (oqs *OutgoingQueues) SendEDU(
}).Info("Sending EDU event")
}
oqs.queuesMutex.Lock()
defer oqs.queuesMutex.Unlock()
for _, destination := range destinations {
oq := oqs.queues[destination]
if oq == nil {
oq = &destinationQueue{
rsProducer: oqs.rsProducer,
origin: oqs.origin,
destination: destination,
client: oqs.client,
}
oqs.queues[destination] = oq
}
oq.sendEDU(e)
oqs.getQueue(destination).sendEDU(e)
}
return nil
}
// filterDestinations removes our own server from the list of destinations.
// Otherwise we could end up trying to talk to ourselves.
func filterDestinations(origin gomatrixserverlib.ServerName, destinations []gomatrixserverlib.ServerName) []gomatrixserverlib.ServerName {
var result []gomatrixserverlib.ServerName
for _, destination := range destinations {
if destination == origin {
// filterAndDedupeDests removes our own server from the list of destinations
// and deduplicates any servers in the list that may appear more than once.
func filterAndDedupeDests(origin gomatrixserverlib.ServerName, destinations []gomatrixserverlib.ServerName) (
result []gomatrixserverlib.ServerName,
) {
strs := make([]string, len(destinations))
for i, d := range destinations {
strs[i] = string(d)
}
for _, destination := range util.UniqueStrings(strs) {
if gomatrixserverlib.ServerName(destination) == origin {
continue
}
result = append(result, destination)
result = append(result, gomatrixserverlib.ServerName(destination))
}
return result
}

View File

@ -0,0 +1,122 @@
package types
import (
"math"
"sync"
"time"
"github.com/matrix-org/gomatrixserverlib"
"go.uber.org/atomic"
)
const (
// How many times should we tolerate consecutive failures before we
// just blacklist the host altogether? Bear in mind that the backoff
// is exponential, so the max time here to attempt is 2**failures.
FailuresUntilBlacklist = 16 // 16 equates to roughly 18 hours.
)
// Statistics contains information about all of the remote federated
// hosts that we have interacted with. It is basically a threadsafe
// wrapper.
type Statistics struct {
servers map[gomatrixserverlib.ServerName]*ServerStatistics
mutex sync.RWMutex
}
// ForServer returns server statistics for the given server name. If it
// does not exist, it will create empty statistics and return those.
func (s *Statistics) ForServer(serverName gomatrixserverlib.ServerName) *ServerStatistics {
// If the map hasn't been initialised yet then do that.
if s.servers == nil {
s.mutex.Lock()
s.servers = make(map[gomatrixserverlib.ServerName]*ServerStatistics)
s.mutex.Unlock()
}
// Look up if we have statistics for this server already.
s.mutex.RLock()
server, found := s.servers[serverName]
s.mutex.RUnlock()
// If we don't, then make one.
if !found {
s.mutex.Lock()
server = &ServerStatistics{}
s.servers[serverName] = server
s.mutex.Unlock()
}
return server
}
// ServerStatistics contains information about our interactions with a
// remote federated host, e.g. how many times we were successful, how
// many times we failed etc. It also manages the backoff time and black-
// listing a remote host if it remains uncooperative.
type ServerStatistics struct {
blacklisted atomic.Bool // is the remote side dead?
backoffUntil atomic.Value // time.Time to wait until before sending requests
failCounter atomic.Uint32 // how many times have we failed?
successCounter atomic.Uint32 // how many times have we succeeded?
}
// Success updates the server statistics with a new successful
// attempt, which increases the sent counter and resets the idle and
// failure counters. If a host was blacklisted at this point then
// we will unblacklist it.
func (s *ServerStatistics) Success() {
s.successCounter.Add(1)
s.failCounter.Store(0)
s.blacklisted.Store(false)
}
// Failure marks a failure and works out when to backoff until. It
// returns true if the worker should give up altogether because of
// too many consecutive failures. At this point the host is marked
// as blacklisted.
func (s *ServerStatistics) Failure() bool {
// Increase the fail counter.
failCounter := s.failCounter.Add(1)
// Check that we haven't failed more times than is acceptable.
if failCounter >= FailuresUntilBlacklist {
// We've exceeded the maximum amount of times we're willing
// to back off, which is probably in the region of hours by
// now. Mark the host as blacklisted and tell the caller to
// give up.
s.blacklisted.Store(true)
return true
}
// We're still under the threshold so work out the exponential
// backoff based on how many times we have failed already. The
// worker goroutine will wait until this time before processing
// anything from the queue.
backoffSeconds := time.Second * time.Duration(math.Exp2(float64(failCounter)))
s.backoffUntil.Store(
time.Now().Add(backoffSeconds),
)
return false
}
// BackoffDuration returns both a bool stating whether to wait,
// and then if true, a duration to wait for.
func (s *ServerStatistics) BackoffDuration() (bool, time.Duration) {
backoff, until := false, time.Second
if b, ok := s.backoffUntil.Load().(time.Time); ok {
if b.After(time.Now()) {
backoff, until = true, time.Until(b)
}
}
return backoff, until
}
// Blacklisted returns true if the server is blacklisted and false
// otherwise.
func (s *ServerStatistics) Blacklisted() bool {
return s.blacklisted.Load()
}
// SuccessCount returns the number of successful requests. This is
// usually useful in constructing transaction IDs.
func (s *ServerStatistics) SuccessCount() uint32 {
return s.successCounter.Load()
}

View File

@ -58,15 +58,22 @@ func (r *RoomserverInternalAPI) InputRoomEvents(
// We lock as processRoomEvent can only be called once at a time
r.mutex.Lock()
defer r.mutex.Unlock()
for i := range request.InputInviteEvents {
var loopback *api.InputRoomEvent
if loopback, err = processInviteEvent(ctx, r.DB, r, request.InputInviteEvents[i]); err != nil {
return err
}
// The processInviteEvent function can optionally return a
// loopback room event containing the invite, for local invites.
// If it does, we should process it with the room events below.
if loopback != nil {
request.InputRoomEvents = append(request.InputRoomEvents, *loopback)
}
}
for i := range request.InputRoomEvents {
if response.EventID, err = processRoomEvent(ctx, r.DB, r, request.InputRoomEvents[i]); err != nil {
return err
}
}
for i := range request.InputInviteEvents {
if err = processInviteEvent(ctx, r.DB, r, request.InputInviteEvents[i]); err != nil {
return err
}
}
return nil
}

View File

@ -18,6 +18,7 @@ package internal
import (
"context"
"errors"
"fmt"
"github.com/matrix-org/dendrite/common"
@ -132,11 +133,11 @@ func calculateAndSetState(
func processInviteEvent(
ctx context.Context,
db storage.Database,
ow OutputRoomEventWriter,
ow *RoomserverInternalAPI,
input api.InputInviteEvent,
) (err error) {
) (*api.InputRoomEvent, error) {
if input.Event.StateKey() == nil {
return fmt.Errorf("invite must be a state event")
return nil, fmt.Errorf("invite must be a state event")
}
roomID := input.Event.RoomID()
@ -151,7 +152,7 @@ func processInviteEvent(
updater, err := db.MembershipUpdater(ctx, roomID, targetUserID, input.RoomVersion)
if err != nil {
return err
return nil, err
}
succeeded := false
defer func() {
@ -189,17 +190,27 @@ func processInviteEvent(
// For now we will implement option 2. Since in the abesence of a retry
// mechanism it will be equivalent to option 1, and we don't have a
// signalling mechanism to implement option 3.
return nil
return nil, nil
}
// Normally, with a federated invite, the federation sender would do
// the /v2/invite request (in which the remote server signs the invite)
// and then the signed event gets sent back to the roomserver as an input
// event. When the invite is local, we don't interact with the federation
// sender therefore we need to generate the loopback invite event for
// the room ourselves.
loopback, err := localInviteLoopback(ow, input)
if err != nil {
return nil, err
}
event := input.Event.Unwrap()
if len(input.InviteRoomState) > 0 {
// If we were supplied with some invite room state already (which is
// most likely to be if the event came in over federation) then use
// that.
if err = event.SetUnsignedField("invite_room_state", input.InviteRoomState); err != nil {
return err
return nil, err
}
} else {
// There's no invite room state, so let's have a go at building it
@ -208,22 +219,52 @@ func processInviteEvent(
// the invite room state, if we don't then we just fail quietly.
if irs, ierr := buildInviteStrippedState(ctx, db, input); ierr == nil {
if err = event.SetUnsignedField("invite_room_state", irs); err != nil {
return err
return nil, err
}
}
}
outputUpdates, err := updateToInviteMembership(updater, &event, nil, input.Event.RoomVersion)
if err != nil {
return err
return nil, err
}
if err = ow.WriteOutputEvents(roomID, outputUpdates); err != nil {
return err
return nil, err
}
succeeded = true
return nil
return loopback, nil
}
func localInviteLoopback(
ow *RoomserverInternalAPI,
input api.InputInviteEvent,
) (ire *api.InputRoomEvent, err error) {
if input.Event.StateKey() == nil {
return nil, errors.New("no state key on invite event")
}
ourServerName := string(ow.Cfg.Matrix.ServerName)
_, theirServerName, err := gomatrixserverlib.SplitID('@', *input.Event.StateKey())
if err != nil {
return nil, err
}
// Check if the invite originated locally and is destined locally.
if input.Event.Origin() == ow.Cfg.Matrix.ServerName && string(theirServerName) == ourServerName {
rsEvent := input.Event.Sign(
ourServerName,
ow.Cfg.Matrix.KeyID,
ow.Cfg.Matrix.PrivateKey,
).Headered(input.RoomVersion)
ire = &api.InputRoomEvent{
Kind: api.KindNew,
Event: rsEvent,
AuthEventIDs: rsEvent.AuthEventIDs(),
SendAsServer: ourServerName,
TransactionID: nil,
}
}
return ire, nil
}
func buildInviteStrippedState(