2017-04-20 23:40:52 +01:00
|
|
|
// Copyright 2017 Vector Creations Ltd
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-04-07 14:32:42 +01:00
|
|
|
package sync
|
|
|
|
|
|
|
|
import (
|
|
|
|
"net/http"
|
|
|
|
"time"
|
|
|
|
|
2017-05-23 17:43:05 +01:00
|
|
|
"github.com/matrix-org/dendrite/clientapi/auth/authtypes"
|
2017-07-26 14:53:11 +01:00
|
|
|
"github.com/matrix-org/dendrite/clientapi/auth/storage/accounts"
|
2017-04-07 14:32:42 +01:00
|
|
|
"github.com/matrix-org/dendrite/clientapi/httputil"
|
2017-04-10 15:12:18 +01:00
|
|
|
"github.com/matrix-org/dendrite/clientapi/jsonerror"
|
2017-04-20 17:22:44 +01:00
|
|
|
"github.com/matrix-org/dendrite/syncapi/storage"
|
|
|
|
"github.com/matrix-org/dendrite/syncapi/types"
|
2020-01-10 12:11:44 +00:00
|
|
|
"github.com/matrix-org/gomatrix"
|
2017-07-26 14:53:11 +01:00
|
|
|
"github.com/matrix-org/gomatrixserverlib"
|
2017-04-07 14:32:42 +01:00
|
|
|
"github.com/matrix-org/util"
|
2017-10-26 11:34:54 +01:00
|
|
|
log "github.com/sirupsen/logrus"
|
2017-04-07 14:32:42 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// RequestPool manages HTTP long-poll connections for /sync
|
|
|
|
type RequestPool struct {
|
2020-01-03 14:07:05 +00:00
|
|
|
db storage.Database
|
2017-07-26 14:53:11 +01:00
|
|
|
accountDB *accounts.Database
|
|
|
|
notifier *Notifier
|
2017-04-10 15:12:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewRequestPool makes a new RequestPool
|
2020-01-03 14:07:05 +00:00
|
|
|
func NewRequestPool(db storage.Database, n *Notifier, adb *accounts.Database) *RequestPool {
|
2017-07-26 14:53:11 +01:00
|
|
|
return &RequestPool{db, adb, n}
|
2017-04-07 14:32:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// OnIncomingSyncRequest is called when a client makes a /sync request. This function MUST be
|
|
|
|
// called in a dedicated goroutine for this request. This function will block the goroutine
|
|
|
|
// until a response is ready, or it times out.
|
2017-05-23 17:43:05 +01:00
|
|
|
func (rp *RequestPool) OnIncomingSyncRequest(req *http.Request, device *authtypes.Device) util.JSONResponse {
|
2018-12-18 17:56:08 +00:00
|
|
|
var syncData *types.Response
|
|
|
|
|
2017-04-07 14:32:42 +01:00
|
|
|
// Extract values from request
|
|
|
|
logger := util.GetLogger(req.Context())
|
2017-05-23 17:43:05 +01:00
|
|
|
userID := device.UserID
|
2017-12-15 15:42:55 +00:00
|
|
|
syncReq, err := newSyncRequest(req, *device)
|
2017-04-10 15:12:18 +01:00
|
|
|
if err != nil {
|
|
|
|
return util.JSONResponse{
|
2018-03-13 15:55:45 +00:00
|
|
|
Code: http.StatusBadRequest,
|
2017-04-10 15:12:18 +01:00
|
|
|
JSON: jsonerror.Unknown(err.Error()),
|
|
|
|
}
|
|
|
|
}
|
2017-04-07 14:32:42 +01:00
|
|
|
logger.WithFields(log.Fields{
|
|
|
|
"userID": userID,
|
2017-04-18 10:32:32 +01:00
|
|
|
"since": syncReq.since,
|
|
|
|
"timeout": syncReq.timeout,
|
2017-04-07 14:32:42 +01:00
|
|
|
}).Info("Incoming /sync request")
|
|
|
|
|
2017-10-16 13:34:08 +01:00
|
|
|
currPos := rp.notifier.CurrentPosition()
|
|
|
|
|
2019-08-01 05:36:13 +01:00
|
|
|
if shouldReturnImmediately(syncReq) {
|
2018-12-18 17:56:08 +00:00
|
|
|
syncData, err = rp.currentSyncForUser(*syncReq, currPos)
|
2017-04-10 15:12:18 +01:00
|
|
|
if err != nil {
|
2017-10-16 13:34:08 +01:00
|
|
|
return httputil.LogThenError(req, err)
|
2017-04-10 15:12:18 +01:00
|
|
|
}
|
|
|
|
return util.JSONResponse{
|
2018-03-13 15:55:45 +00:00
|
|
|
Code: http.StatusOK,
|
2017-10-16 13:34:08 +01:00
|
|
|
JSON: syncData,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we wait for the notifier to tell us if something *may* have
|
|
|
|
// happened. We loop in case it turns out that nothing did happen.
|
|
|
|
|
|
|
|
timer := time.NewTimer(syncReq.timeout) // case of timeout=0 is handled above
|
|
|
|
defer timer.Stop()
|
|
|
|
|
2017-10-26 11:34:54 +01:00
|
|
|
userStreamListener := rp.notifier.GetListener(*syncReq)
|
|
|
|
defer userStreamListener.Close()
|
|
|
|
|
2018-12-18 17:56:08 +00:00
|
|
|
// We need the loop in case userStreamListener wakes up even if there isn't
|
|
|
|
// anything to send down. In this case, we'll jump out of the select but
|
|
|
|
// don't want to send anything back until we get some actual content to
|
|
|
|
// respond with, so we skip the return an go back to waiting for content to
|
|
|
|
// be sent down or the request timing out.
|
|
|
|
var hasTimedOut bool
|
2019-07-12 15:59:53 +01:00
|
|
|
sincePos := *syncReq.since
|
2017-10-16 13:34:08 +01:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// Wait for notifier to wake us up
|
2019-07-12 15:59:53 +01:00
|
|
|
case <-userStreamListener.GetNotifyChannel(sincePos):
|
|
|
|
currPos = userStreamListener.GetSyncPosition()
|
|
|
|
sincePos = currPos
|
2017-10-16 13:34:08 +01:00
|
|
|
// Or for timeout to expire
|
|
|
|
case <-timer.C:
|
2018-12-18 17:56:08 +00:00
|
|
|
// We just need to ensure we get out of the select after reaching the
|
|
|
|
// timeout, but there's nothing specific we want to do in this case
|
|
|
|
// apart from that, so we do nothing except stating we're timing out
|
|
|
|
// and need to respond.
|
|
|
|
hasTimedOut = true
|
2017-10-16 13:34:08 +01:00
|
|
|
// Or for the request to be cancelled
|
|
|
|
case <-req.Context().Done():
|
|
|
|
return httputil.LogThenError(req, req.Context().Err())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Note that we don't time out during calculation of sync
|
|
|
|
// response. This ensures that we don't waste the hard work
|
|
|
|
// of calculating the sync only to get timed out before we
|
|
|
|
// can respond
|
|
|
|
|
2018-12-18 17:56:08 +00:00
|
|
|
syncData, err = rp.currentSyncForUser(*syncReq, currPos)
|
2017-10-16 13:34:08 +01:00
|
|
|
if err != nil {
|
|
|
|
return httputil.LogThenError(req, err)
|
|
|
|
}
|
2018-12-18 17:56:08 +00:00
|
|
|
|
|
|
|
if !syncData.IsEmpty() || hasTimedOut {
|
2017-10-16 13:34:08 +01:00
|
|
|
return util.JSONResponse{
|
2018-03-13 15:55:45 +00:00
|
|
|
Code: http.StatusOK,
|
2017-10-16 13:34:08 +01:00
|
|
|
JSON: syncData,
|
|
|
|
}
|
2017-04-10 15:12:18 +01:00
|
|
|
}
|
2017-04-07 14:32:42 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-12 15:59:53 +01:00
|
|
|
func (rp *RequestPool) currentSyncForUser(req syncRequest, latestPos types.SyncPosition) (res *types.Response, err error) {
|
2017-04-11 11:52:26 +01:00
|
|
|
// TODO: handle ignored users
|
2017-11-22 09:51:12 +00:00
|
|
|
if req.since == nil {
|
2017-12-15 15:42:55 +00:00
|
|
|
res, err = rp.db.CompleteSync(req.ctx, req.device.UserID, req.limit)
|
2017-10-16 13:34:08 +01:00
|
|
|
} else {
|
2019-08-01 05:36:13 +01:00
|
|
|
res, err = rp.db.IncrementalSync(req.ctx, req.device, *req.since, latestPos, req.limit, req.wantFullState)
|
2017-04-11 11:52:26 +01:00
|
|
|
}
|
2017-10-16 13:34:08 +01:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-01-10 12:11:44 +00:00
|
|
|
accountDataFilter := gomatrix.DefaultFilterPart() // TODO: use filter provided in req instead
|
2019-08-08 06:10:42 +01:00
|
|
|
res, err = rp.appendAccountData(res, req.device.UserID, req, latestPos.PDUPosition, &accountDataFilter)
|
2017-10-16 13:34:08 +01:00
|
|
|
return
|
2017-04-07 14:32:42 +01:00
|
|
|
}
|
2017-07-26 14:53:11 +01:00
|
|
|
|
2017-08-02 16:21:35 +01:00
|
|
|
func (rp *RequestPool) appendAccountData(
|
2019-07-12 15:59:53 +01:00
|
|
|
data *types.Response, userID string, req syncRequest, currentPos int64,
|
2020-01-10 12:11:44 +00:00
|
|
|
accountDataFilter *gomatrix.FilterPart,
|
2017-08-02 16:21:35 +01:00
|
|
|
) (*types.Response, error) {
|
2017-09-22 11:34:54 +01:00
|
|
|
// TODO: Account data doesn't have a sync position of its own, meaning that
|
|
|
|
// account data might be sent multiple time to the client if multiple account
|
|
|
|
// data keys were set between two message. This isn't a huge issue since the
|
|
|
|
// duplicate data doesn't represent a huge quantity of data, but an optimisation
|
|
|
|
// here would be making sure each data is sent only once to the client.
|
2017-07-26 14:53:11 +01:00
|
|
|
localpart, _, err := gomatrixserverlib.SplitID('@', userID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-11-22 09:51:12 +00:00
|
|
|
if req.since == nil {
|
2017-08-02 16:21:35 +01:00
|
|
|
// If this is the initial sync, we don't need to check if a data has
|
|
|
|
// already been sent. Instead, we send the whole batch.
|
|
|
|
var global []gomatrixserverlib.ClientEvent
|
|
|
|
var rooms map[string][]gomatrixserverlib.ClientEvent
|
2017-09-18 14:15:27 +01:00
|
|
|
global, rooms, err = rp.accountDB.GetAccountData(req.ctx, localpart)
|
2017-08-02 16:21:35 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
data.AccountData.Events = global
|
|
|
|
|
|
|
|
for r, j := range data.Rooms.Join {
|
|
|
|
if len(rooms[r]) > 0 {
|
|
|
|
j.AccountData.Events = rooms[r]
|
|
|
|
data.Rooms.Join[r] = j
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sync is not initial, get all account data since the latest sync
|
2019-08-08 06:10:42 +01:00
|
|
|
dataTypes, err := rp.db.GetAccountDataInRange(req.ctx, userID, req.since.PDUPosition, currentPos, accountDataFilter)
|
2017-07-26 14:53:11 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-08-02 16:21:35 +01:00
|
|
|
if len(dataTypes) == 0 {
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over the rooms
|
|
|
|
for roomID, dataTypes := range dataTypes {
|
|
|
|
events := []gomatrixserverlib.ClientEvent{}
|
|
|
|
// Request the missing data from the database
|
|
|
|
for _, dataType := range dataTypes {
|
2019-09-30 17:25:04 +01:00
|
|
|
event, err := rp.accountDB.GetAccountDataByType(
|
2017-09-18 14:15:27 +01:00
|
|
|
req.ctx, localpart, roomID, dataType,
|
|
|
|
)
|
2017-08-02 16:21:35 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-09-30 17:25:04 +01:00
|
|
|
events = append(events, *event)
|
2017-08-02 16:21:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Append the data to the response
|
|
|
|
if len(roomID) > 0 {
|
|
|
|
jr := data.Rooms.Join[roomID]
|
|
|
|
jr.AccountData.Events = events
|
|
|
|
data.Rooms.Join[roomID] = jr
|
|
|
|
} else {
|
|
|
|
data.AccountData.Events = events
|
2017-07-26 14:53:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return data, nil
|
|
|
|
}
|
2019-08-01 05:36:13 +01:00
|
|
|
|
|
|
|
// shouldReturnImmediately returns whether the /sync request is an initial sync,
|
|
|
|
// or timeout=0, or full_state=true, in any of the cases the request should
|
|
|
|
// return immediately.
|
|
|
|
func shouldReturnImmediately(syncReq *syncRequest) bool {
|
|
|
|
return syncReq.since == nil || syncReq.timeout == 0 || syncReq.wantFullState
|
|
|
|
}
|