mirror of
https://github.com/1f349/dendrite.git
synced 2024-11-08 18:16:59 +00:00
Use LimitReader to prevent DoS risk (#1843)
* Use LimitReader to prevent DoS risk Signed-off-by: Till Faelligen <tfaelligen@gmail.com> * Check if bytesWritten is equal to the maxFileSize Add tests Signed-off-by: Till Faelligen <tfaelligen@gmail.com> * Use oldschool defer to cleanup after the tests * Let LimitReader read MaxFileSizeBytes + 1 Co-authored-by: Kegsay <kegan@matrix.org>
This commit is contained in:
parent
c488d3db75
commit
8b22c4270d
@ -147,7 +147,8 @@ func (r *uploadRequest) doUpload(
|
||||
// r.storeFileAndMetadata(ctx, tmpDir, ...)
|
||||
// before you return from doUpload else we will leak a temp file. We could make this nicer with a `WithTransaction` style of
|
||||
// nested function to guarantee either storage or cleanup.
|
||||
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, reqReader, cfg.AbsBasePath)
|
||||
lr := io.LimitReader(reqReader, int64(*cfg.MaxFileSizeBytes)+1)
|
||||
hash, bytesWritten, tmpDir, err := fileutils.WriteTempFile(ctx, lr, cfg.AbsBasePath)
|
||||
if err != nil {
|
||||
r.Logger.WithError(err).WithFields(log.Fields{
|
||||
"MaxFileSizeBytes": *cfg.MaxFileSizeBytes,
|
||||
|
132
mediaapi/routing/upload_test.go
Normal file
132
mediaapi/routing/upload_test.go
Normal file
@ -0,0 +1,132 @@
|
||||
package routing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/matrix-org/dendrite/mediaapi/fileutils"
|
||||
"github.com/matrix-org/dendrite/mediaapi/storage"
|
||||
"github.com/matrix-org/dendrite/mediaapi/types"
|
||||
"github.com/matrix-org/dendrite/setup/config"
|
||||
"github.com/matrix-org/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func Test_uploadRequest_doUpload(t *testing.T) {
|
||||
type fields struct {
|
||||
MediaMetadata *types.MediaMetadata
|
||||
Logger *log.Entry
|
||||
}
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
reqReader io.Reader
|
||||
cfg *config.MediaAPI
|
||||
db storage.Database
|
||||
activeThumbnailGeneration *types.ActiveThumbnailGeneration
|
||||
}
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get current working directory: %v", err)
|
||||
}
|
||||
|
||||
maxSize := config.FileSizeBytes(8)
|
||||
logger := log.New().WithField("mediaapi", "test")
|
||||
testdataPath := filepath.Join(wd, "./testdata")
|
||||
|
||||
cfg := &config.MediaAPI{
|
||||
MaxFileSizeBytes: &maxSize,
|
||||
BasePath: config.Path(testdataPath),
|
||||
AbsBasePath: config.Path(testdataPath),
|
||||
DynamicThumbnails: false,
|
||||
}
|
||||
|
||||
// create testdata folder and remove when done
|
||||
_ = os.Mkdir(testdataPath, os.ModePerm)
|
||||
defer fileutils.RemoveDir(types.Path(testdataPath), nil)
|
||||
|
||||
db, err := storage.Open(&config.DatabaseOptions{
|
||||
ConnectionString: "file::memory:?cache=shared",
|
||||
MaxOpenConnections: 100,
|
||||
MaxIdleConnections: 2,
|
||||
ConnMaxLifetimeSeconds: -1,
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("error opening mediaapi database: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want *util.JSONResponse
|
||||
}{
|
||||
{
|
||||
name: "upload ok",
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
reqReader: strings.NewReader("test"),
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
},
|
||||
fields: fields{
|
||||
Logger: logger,
|
||||
MediaMetadata: &types.MediaMetadata{
|
||||
MediaID: "1337",
|
||||
UploadName: "test ok",
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "upload ok (exact size)",
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
reqReader: strings.NewReader("testtest"),
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
},
|
||||
fields: fields{
|
||||
Logger: logger,
|
||||
MediaMetadata: &types.MediaMetadata{
|
||||
MediaID: "1338",
|
||||
UploadName: "test ok (exact size)",
|
||||
},
|
||||
},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "upload not ok",
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
reqReader: strings.NewReader("test test test"),
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
},
|
||||
fields: fields{
|
||||
Logger: logger,
|
||||
MediaMetadata: &types.MediaMetadata{
|
||||
MediaID: "1339",
|
||||
UploadName: "test fail",
|
||||
},
|
||||
},
|
||||
want: requestEntityTooLargeJSONResponse(maxSize),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := &uploadRequest{
|
||||
MediaMetadata: tt.fields.MediaMetadata,
|
||||
Logger: tt.fields.Logger,
|
||||
}
|
||||
if got := r.doUpload(tt.args.ctx, tt.args.reqReader, tt.args.cfg, tt.args.db, tt.args.activeThumbnailGeneration); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("doUpload() = %+v, want %+v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user