2020-07-20 16:55:20 +01:00
|
|
|
// Copyright 2020 The Matrix.org Foundation C.I.C.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package shared
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
|
|
|
"fmt"
|
2021-07-15 17:45:37 +01:00
|
|
|
"time"
|
2020-07-20 16:55:20 +01:00
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
"github.com/matrix-org/dendrite/federationapi/storage/shared/receipt"
|
2021-11-24 10:45:23 +00:00
|
|
|
"github.com/matrix-org/dendrite/federationapi/storage/tables"
|
|
|
|
"github.com/matrix-org/dendrite/federationapi/types"
|
2020-12-04 14:52:10 +00:00
|
|
|
"github.com/matrix-org/dendrite/internal/caching"
|
2020-07-20 16:55:20 +01:00
|
|
|
"github.com/matrix-org/dendrite/internal/sqlutil"
|
|
|
|
"github.com/matrix-org/gomatrixserverlib"
|
2023-04-19 15:50:33 +01:00
|
|
|
"github.com/matrix-org/gomatrixserverlib/spec"
|
2020-07-20 16:55:20 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
type Database struct {
|
2021-11-24 10:45:23 +00:00
|
|
|
DB *sql.DB
|
2023-04-19 15:50:33 +01:00
|
|
|
IsLocalServerName func(spec.ServerName) bool
|
2021-11-24 10:45:23 +00:00
|
|
|
Cache caching.FederationCache
|
|
|
|
Writer sqlutil.Writer
|
|
|
|
FederationQueuePDUs tables.FederationQueuePDUs
|
|
|
|
FederationQueueEDUs tables.FederationQueueEDUs
|
|
|
|
FederationQueueJSON tables.FederationQueueJSON
|
|
|
|
FederationJoinedHosts tables.FederationJoinedHosts
|
|
|
|
FederationBlacklist tables.FederationBlacklist
|
2023-01-23 17:55:12 +00:00
|
|
|
FederationAssumedOffline tables.FederationAssumedOffline
|
|
|
|
FederationRelayServers tables.FederationRelayServers
|
2021-11-24 10:45:23 +00:00
|
|
|
FederationOutboundPeeks tables.FederationOutboundPeeks
|
|
|
|
FederationInboundPeeks tables.FederationInboundPeeks
|
|
|
|
NotaryServerKeysJSON tables.FederationNotaryServerKeysJSON
|
|
|
|
NotaryServerKeysMetadata tables.FederationNotaryServerKeysMetadata
|
|
|
|
ServerSigningKeys tables.FederationServerSigningKeys
|
2020-07-20 16:55:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateRoom updates the joined hosts for a room and returns what the joined
|
|
|
|
// hosts were before the update, or nil if this was a duplicate message.
|
|
|
|
// This is called when we receive a message from kafka, so we pass in
|
|
|
|
// oldEventID and newEventID to check that we haven't missed any messages or
|
|
|
|
// this isn't a duplicate message.
|
|
|
|
func (d *Database) UpdateRoom(
|
|
|
|
ctx context.Context,
|
2022-05-17 13:23:35 +01:00
|
|
|
roomID string,
|
2020-07-20 16:55:20 +01:00
|
|
|
addHosts []types.JoinedHost,
|
|
|
|
removeHosts []string,
|
2022-05-17 13:23:35 +01:00
|
|
|
purgeRoomFirst bool,
|
2020-07-20 16:55:20 +01:00
|
|
|
) (joinedHosts []types.JoinedHost, err error) {
|
2020-08-21 10:42:08 +01:00
|
|
|
err = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
2022-05-17 13:23:35 +01:00
|
|
|
if purgeRoomFirst {
|
|
|
|
if err = d.FederationJoinedHosts.DeleteJoinedHostsForRoom(ctx, txn, roomID); err != nil {
|
|
|
|
return fmt.Errorf("d.FederationJoinedHosts.DeleteJoinedHosts: %w", err)
|
|
|
|
}
|
2022-10-10 16:54:04 +01:00
|
|
|
for _, add := range addHosts {
|
|
|
|
if err = d.FederationJoinedHosts.InsertJoinedHosts(ctx, txn, roomID, add.MemberEventID, add.ServerName); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
joinedHosts = append(joinedHosts, add)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if joinedHosts, err = d.FederationJoinedHosts.SelectJoinedHostsWithTx(ctx, txn, roomID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, add := range addHosts {
|
|
|
|
if err = d.FederationJoinedHosts.InsertJoinedHosts(ctx, txn, roomID, add.MemberEventID, add.ServerName); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err = d.FederationJoinedHosts.DeleteJoinedHosts(ctx, txn, removeHosts); err != nil {
|
2020-07-20 16:55:20 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2021-02-04 11:52:49 +00:00
|
|
|
return nil
|
2020-07-20 16:55:20 +01:00
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetJoinedHosts returns the currently joined hosts for room,
|
|
|
|
// as known to federationserver.
|
|
|
|
// Returns an error if something goes wrong.
|
|
|
|
func (d *Database) GetJoinedHosts(
|
|
|
|
ctx context.Context, roomID string,
|
|
|
|
) ([]types.JoinedHost, error) {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationJoinedHosts.SelectJoinedHosts(ctx, roomID)
|
2020-07-20 16:55:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetAllJoinedHosts returns the currently joined hosts for
|
|
|
|
// all rooms known to the federation sender.
|
|
|
|
// Returns an error if something goes wrong.
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) GetAllJoinedHosts(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
) ([]spec.ServerName, error) {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationJoinedHosts.SelectAllJoinedHosts(ctx)
|
2020-07-20 16:55:20 +01:00
|
|
|
}
|
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) GetJoinedHostsForRooms(
|
|
|
|
ctx context.Context,
|
|
|
|
roomIDs []string,
|
|
|
|
excludeSelf,
|
|
|
|
excludeBlacklisted bool,
|
2023-04-19 15:50:33 +01:00
|
|
|
) ([]spec.ServerName, error) {
|
2022-11-15 17:21:16 +00:00
|
|
|
servers, err := d.FederationJoinedHosts.SelectJoinedHostsForRooms(ctx, roomIDs, excludeBlacklisted)
|
2022-01-25 17:00:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if excludeSelf {
|
|
|
|
for i, server := range servers {
|
2022-10-26 12:59:19 +01:00
|
|
|
if d.IsLocalServerName(server) {
|
2022-11-16 15:10:33 +00:00
|
|
|
copy(servers[i:], servers[i+1:])
|
2022-11-16 09:39:19 +00:00
|
|
|
servers = servers[:len(servers)-1]
|
|
|
|
break
|
2022-01-25 17:00:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return servers, nil
|
2020-08-04 11:32:14 +01:00
|
|
|
}
|
|
|
|
|
2020-07-20 16:55:20 +01:00
|
|
|
// StoreJSON adds a JSON blob into the queue JSON table and returns
|
|
|
|
// a NID. The NID will then be used when inserting the per-destination
|
|
|
|
// metadata entries.
|
|
|
|
func (d *Database) StoreJSON(
|
|
|
|
ctx context.Context, js string,
|
2023-01-23 17:55:12 +00:00
|
|
|
) (*receipt.Receipt, error) {
|
2020-08-21 10:42:08 +01:00
|
|
|
var nid int64
|
|
|
|
var err error
|
|
|
|
_ = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
2021-11-24 10:45:23 +00:00
|
|
|
nid, err = d.FederationQueueJSON.InsertQueueJSON(ctx, txn, js)
|
2020-09-10 14:39:18 +01:00
|
|
|
return err
|
2020-08-21 10:42:08 +01:00
|
|
|
})
|
2020-07-20 16:55:20 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("d.insertQueueJSON: %w", err)
|
|
|
|
}
|
2023-01-23 17:55:12 +00:00
|
|
|
newReceipt := receipt.NewReceipt(nid)
|
|
|
|
return &newReceipt, nil
|
2020-07-20 16:55:20 +01:00
|
|
|
}
|
2020-07-22 17:01:29 +01:00
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) AddServerToBlacklist(
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
) error {
|
2020-08-21 10:42:08 +01:00
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationBlacklist.InsertBlacklist(context.TODO(), txn, serverName)
|
2020-08-21 10:42:08 +01:00
|
|
|
})
|
2020-07-22 17:01:29 +01:00
|
|
|
}
|
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) RemoveServerFromBlacklist(
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
) error {
|
2020-08-21 10:42:08 +01:00
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationBlacklist.DeleteBlacklist(context.TODO(), txn, serverName)
|
2020-08-21 10:42:08 +01:00
|
|
|
})
|
2020-07-22 17:01:29 +01:00
|
|
|
}
|
|
|
|
|
2021-05-24 11:43:24 +01:00
|
|
|
func (d *Database) RemoveAllServersFromBlacklist() error {
|
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationBlacklist.DeleteAllBlacklist(context.TODO(), txn)
|
2021-05-24 11:43:24 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) IsServerBlacklisted(
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
) (bool, error) {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationBlacklist.SelectBlacklist(context.TODO(), nil, serverName)
|
2020-07-22 17:01:29 +01:00
|
|
|
}
|
2021-01-22 14:55:08 +00:00
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) SetServerAssumedOffline(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
) error {
|
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
|
|
|
return d.FederationAssumedOffline.InsertAssumedOffline(ctx, txn, serverName)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Database) RemoveServerAssumedOffline(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
) error {
|
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
|
|
|
return d.FederationAssumedOffline.DeleteAssumedOffline(ctx, txn, serverName)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Database) RemoveAllServersAssumedOffline(
|
|
|
|
ctx context.Context,
|
|
|
|
) error {
|
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
|
|
|
return d.FederationAssumedOffline.DeleteAllAssumedOffline(ctx, txn)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Database) IsServerAssumedOffline(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
) (bool, error) {
|
|
|
|
return d.FederationAssumedOffline.SelectAssumedOffline(ctx, nil, serverName)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Database) P2PAddRelayServersForServer(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
|
|
|
relayServers []spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
) error {
|
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
|
|
|
return d.FederationRelayServers.InsertRelayServers(ctx, txn, serverName, relayServers)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Database) P2PGetRelayServersForServer(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
|
|
|
) ([]spec.ServerName, error) {
|
2023-01-23 17:55:12 +00:00
|
|
|
return d.FederationRelayServers.SelectRelayServers(ctx, nil, serverName)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Database) P2PRemoveRelayServersForServer(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
|
|
|
relayServers []spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
) error {
|
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
|
|
|
return d.FederationRelayServers.DeleteRelayServers(ctx, txn, serverName, relayServers)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Database) P2PRemoveAllRelayServersForServer(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
) error {
|
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
|
|
|
return d.FederationRelayServers.DeleteAllRelayServers(ctx, txn, serverName)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Database) AddOutboundPeek(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
roomID string,
|
|
|
|
peekID string,
|
|
|
|
renewalInterval int64,
|
|
|
|
) error {
|
2021-01-22 14:55:08 +00:00
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationOutboundPeeks.InsertOutboundPeek(ctx, txn, serverName, roomID, peekID, renewalInterval)
|
2021-01-22 14:55:08 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) RenewOutboundPeek(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
roomID string,
|
|
|
|
peekID string,
|
|
|
|
renewalInterval int64,
|
|
|
|
) error {
|
2021-01-22 14:55:08 +00:00
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationOutboundPeeks.RenewOutboundPeek(ctx, txn, serverName, roomID, peekID, renewalInterval)
|
2021-01-22 14:55:08 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) GetOutboundPeek(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
roomID,
|
|
|
|
peekID string,
|
|
|
|
) (*types.OutboundPeek, error) {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationOutboundPeeks.SelectOutboundPeek(ctx, nil, serverName, roomID, peekID)
|
2021-01-22 14:55:08 +00:00
|
|
|
}
|
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) GetOutboundPeeks(
|
|
|
|
ctx context.Context,
|
|
|
|
roomID string,
|
|
|
|
) ([]types.OutboundPeek, error) {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationOutboundPeeks.SelectOutboundPeeks(ctx, nil, roomID)
|
2021-01-22 14:55:08 +00:00
|
|
|
}
|
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) AddInboundPeek(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
roomID string,
|
|
|
|
peekID string,
|
|
|
|
renewalInterval int64,
|
|
|
|
) error {
|
2021-01-22 14:55:08 +00:00
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationInboundPeeks.InsertInboundPeek(ctx, txn, serverName, roomID, peekID, renewalInterval)
|
2021-01-22 14:55:08 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) RenewInboundPeek(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
roomID string,
|
|
|
|
peekID string,
|
|
|
|
renewalInterval int64,
|
|
|
|
) error {
|
2021-01-22 14:55:08 +00:00
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationInboundPeeks.RenewInboundPeek(ctx, txn, serverName, roomID, peekID, renewalInterval)
|
2021-01-22 14:55:08 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) GetInboundPeek(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
roomID string,
|
|
|
|
peekID string,
|
|
|
|
) (*types.InboundPeek, error) {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationInboundPeeks.SelectInboundPeek(ctx, nil, serverName, roomID, peekID)
|
2021-01-22 14:55:08 +00:00
|
|
|
}
|
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) GetInboundPeeks(
|
|
|
|
ctx context.Context,
|
|
|
|
roomID string,
|
|
|
|
) ([]types.InboundPeek, error) {
|
2021-11-24 10:45:23 +00:00
|
|
|
return d.FederationInboundPeeks.SelectInboundPeeks(ctx, nil, roomID)
|
2021-01-22 14:55:08 +00:00
|
|
|
}
|
2021-07-15 17:45:37 +01:00
|
|
|
|
2023-01-23 17:55:12 +00:00
|
|
|
func (d *Database) UpdateNotaryKeys(
|
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
serverKeys gomatrixserverlib.ServerKeys,
|
|
|
|
) error {
|
2021-07-15 17:45:37 +01:00
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
|
|
|
validUntil := serverKeys.ValidUntilTS
|
|
|
|
// Servers MUST use the lesser of this field and 7 days into the future when determining if a key is valid.
|
|
|
|
// This is to avoid a situation where an attacker publishes a key which is valid for a significant amount of
|
|
|
|
// time without a way for the homeserver owner to revoke it.
|
|
|
|
// https://spec.matrix.org/unstable/server-server-api/#querying-keys-through-another-server
|
|
|
|
weekIntoFuture := time.Now().Add(7 * 24 * time.Hour)
|
|
|
|
if weekIntoFuture.Before(validUntil.Time()) {
|
2023-04-19 15:50:33 +01:00
|
|
|
validUntil = spec.AsTimestamp(weekIntoFuture)
|
2021-07-15 17:45:37 +01:00
|
|
|
}
|
|
|
|
notaryID, err := d.NotaryServerKeysJSON.InsertJSONResponse(ctx, txn, serverKeys, serverName, validUntil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// update the metadata for the keys
|
|
|
|
for keyID := range serverKeys.OldVerifyKeys {
|
|
|
|
_, err = d.NotaryServerKeysMetadata.UpsertKey(ctx, txn, serverName, keyID, notaryID, validUntil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for keyID := range serverKeys.VerifyKeys {
|
|
|
|
_, err = d.NotaryServerKeysMetadata.UpsertKey(ctx, txn, serverName, keyID, notaryID, validUntil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// clean up old responses
|
|
|
|
return d.NotaryServerKeysMetadata.DeleteOldJSONResponses(ctx, txn)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Database) GetNotaryKeys(
|
2023-01-23 17:55:12 +00:00
|
|
|
ctx context.Context,
|
2023-04-19 15:50:33 +01:00
|
|
|
serverName spec.ServerName,
|
2023-01-23 17:55:12 +00:00
|
|
|
optKeyIDs []gomatrixserverlib.KeyID,
|
2021-07-15 17:45:37 +01:00
|
|
|
) (sks []gomatrixserverlib.ServerKeys, err error) {
|
|
|
|
err = d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
|
|
|
sks, err = d.NotaryServerKeysMetadata.SelectKeys(ctx, txn, serverName, optKeyIDs)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
return sks, err
|
|
|
|
}
|
2023-01-19 20:02:32 +00:00
|
|
|
|
|
|
|
func (d *Database) PurgeRoom(ctx context.Context, roomID string) error {
|
|
|
|
return d.Writer.Do(d.DB, nil, func(txn *sql.Tx) error {
|
|
|
|
if err := d.FederationJoinedHosts.DeleteJoinedHostsForRoom(ctx, txn, roomID); err != nil {
|
|
|
|
return fmt.Errorf("failed to purge joined hosts: %w", err)
|
|
|
|
}
|
|
|
|
if err := d.FederationInboundPeeks.DeleteInboundPeeks(ctx, txn, roomID); err != nil {
|
|
|
|
return fmt.Errorf("failed to purge inbound peeks: %w", err)
|
|
|
|
}
|
|
|
|
if err := d.FederationOutboundPeeks.DeleteOutboundPeeks(ctx, txn, roomID); err != nil {
|
|
|
|
return fmt.Errorf("failed to purge outbound peeks: %w", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|