diff --git a/cmd/gotosocial/action/testrig/testrig.go b/cmd/gotosocial/action/testrig/testrig.go index 2259f7148..88ac369de 100644 --- a/cmd/gotosocial/action/testrig/testrig.go +++ b/cmd/gotosocial/action/testrig/testrig.go @@ -73,7 +73,7 @@ router := testrig.NewTestRouter(dbService) var storageBackend *storage.Driver if os.Getenv("GTS_STORAGE_BACKEND") == "s3" { - storageBackend = testrig.NewS3Storage() + storageBackend, _ = storage.NewS3Storage() } else { storageBackend = testrig.NewInMemoryStorage() } diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 498ea873a..099ae536c 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -26,6 +26,7 @@ "path" "time" + "codeberg.org/gruf/go-cache/v3" "codeberg.org/gruf/go-store/v2/kv" "codeberg.org/gruf/go-store/v2/storage" "github.com/minio/minio-go/v7" @@ -33,6 +34,11 @@ "github.com/superseriousbusiness/gotosocial/internal/config" ) +const ( + urlCacheTTL = time.Hour * 24 + urlCacheExpiryFrequency = time.Minute * 5 +) + // ErrAlreadyExists is a ptr to underlying storage.ErrAlreadyExists, // to put the related errors in the same package as our storage wrapper. var ErrAlreadyExists = storage.ErrAlreadyExists @@ -44,8 +50,9 @@ type Driver struct { Storage storage.Storage // S3-only parameters - Proxy bool - Bucket string + Proxy bool + Bucket string + PresignedCache cache.Cache[string, *url.URL] } // URL will return a presigned GET object URL, but only if running on S3 storage with proxying disabled. @@ -56,74 +63,90 @@ func (d *Driver) URL(ctx context.Context, key string) *url.URL { return nil } - // If URL request fails, fallback is to fetch the file. So ignore the error here - url, _ := s3.Client().PresignedGetObject(ctx, d.Bucket, key, time.Hour, url.Values{ + if u, ok := d.PresignedCache.Get(key); ok { + return u + } + + u, err := s3.Client().PresignedGetObject(ctx, d.Bucket, key, urlCacheTTL, url.Values{ "response-content-type": []string{mime.TypeByExtension(path.Ext(key))}, }) - - return url + if err != nil { + // If URL request fails, fallback is to fetch the file. So ignore the error here + return nil + } + d.PresignedCache.Set(key, u) + return u } func AutoConfig() (*Driver, error) { - var st storage.Storage - switch backend := config.GetStorageBackend(); backend { case "s3": - // Load runtime configuration - endpoint := config.GetStorageS3Endpoint() - access := config.GetStorageS3AccessKey() - secret := config.GetStorageS3SecretKey() - secure := config.GetStorageS3UseSSL() - bucket := config.GetStorageS3BucketName() - - // Open the s3 storage implementation - s3, err := storage.OpenS3(endpoint, bucket, &storage.S3Config{ - CoreOpts: minio.Options{ - Creds: credentials.NewStaticV4(access, secret, ""), - Secure: secure, - }, - GetOpts: minio.GetObjectOptions{}, - PutOpts: minio.PutObjectOptions{}, - PutChunkSize: 5 * 1024 * 1024, // 5MiB - StatOpts: minio.StatObjectOptions{}, - RemoveOpts: minio.RemoveObjectOptions{}, - ListSize: 200, - }) - if err != nil { - return nil, fmt.Errorf("error opening s3 storage: %w", err) - } - - // Set storage impl - st = s3 - + return NewS3Storage() case "local": - // Load runtime configuration - basePath := config.GetStorageLocalBasePath() - - // Open the disk storage implementation - disk, err := storage.OpenDisk(basePath, &storage.DiskConfig{ - // Put the store lockfile in the storage dir itself. - // Normally this would not be safe, since we could end up - // overwriting the lockfile if we store a file called 'store.lock'. - // However, in this case it's OK because the keys are set by - // GtS and not the user, so we know we're never going to overwrite it. - LockFile: path.Join(basePath, "store.lock"), - }) - if err != nil { - return nil, fmt.Errorf("error opening disk storage: %w", err) - } - - // Set storage impl - st = disk - + return NewFileStorage() default: return nil, fmt.Errorf("invalid storage backend: %s", backend) } +} + +func NewFileStorage() (*Driver, error) { + // Load runtime configuration + basePath := config.GetStorageLocalBasePath() + + // Open the disk storage implementation + disk, err := storage.OpenDisk(basePath, &storage.DiskConfig{ + // Put the store lockfile in the storage dir itself. + // Normally this would not be safe, since we could end up + // overwriting the lockfile if we store a file called 'store.lock'. + // However, in this case it's OK because the keys are set by + // GtS and not the user, so we know we're never going to overwrite it. + LockFile: path.Join(basePath, "store.lock"), + }) + if err != nil { + return nil, fmt.Errorf("error opening disk storage: %w", err) + } + return &Driver{ - KVStore: kv.New(st), - Proxy: config.GetStorageS3Proxy(), - Bucket: config.GetStorageS3BucketName(), - Storage: st, + KVStore: kv.New(disk), + Storage: disk, + }, nil +} + +func NewS3Storage() (*Driver, error) { + // Load runtime configuration + endpoint := config.GetStorageS3Endpoint() + access := config.GetStorageS3AccessKey() + secret := config.GetStorageS3SecretKey() + secure := config.GetStorageS3UseSSL() + bucket := config.GetStorageS3BucketName() + + // Open the s3 storage implementation + s3, err := storage.OpenS3(endpoint, bucket, &storage.S3Config{ + CoreOpts: minio.Options{ + Creds: credentials.NewStaticV4(access, secret, ""), + Secure: secure, + }, + GetOpts: minio.GetObjectOptions{}, + PutOpts: minio.PutObjectOptions{}, + PutChunkSize: 5 * 1024 * 1024, // 5MiB + StatOpts: minio.StatObjectOptions{}, + RemoveOpts: minio.RemoveObjectOptions{}, + ListSize: 200, + }) + if err != nil { + return nil, fmt.Errorf("error opening s3 storage: %w", err) + } + + // ttl should be lower than the expiry used by S3 to avoid serving invalid URLs + presignedCache := cache.New[string, *url.URL](0, 1000, urlCacheTTL-urlCacheExpiryFrequency) + presignedCache.Start(urlCacheExpiryFrequency) + + return &Driver{ + KVStore: kv.New(s3), + Proxy: config.GetStorageS3Proxy(), + Bucket: config.GetStorageS3BucketName(), + Storage: s3, + PresignedCache: presignedCache, }, nil } diff --git a/testrig/storage.go b/testrig/storage.go index 20226089c..7d3f3be71 100644 --- a/testrig/storage.go +++ b/testrig/storage.go @@ -26,9 +26,6 @@ "codeberg.org/gruf/go-store/v2/kv" "codeberg.org/gruf/go-store/v2/storage" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/superseriousbusiness/gotosocial/internal/config" gtsstorage "github.com/superseriousbusiness/gotosocial/internal/storage" ) @@ -41,38 +38,6 @@ func NewInMemoryStorage() *gtsstorage.Driver { } } -func NewS3Storage() *gtsstorage.Driver { - endpoint := config.GetStorageS3Endpoint() - access := config.GetStorageS3AccessKey() - secret := config.GetStorageS3SecretKey() - secure := config.GetStorageS3UseSSL() - bucket := config.GetStorageS3BucketName() - proxy := config.GetStorageS3Proxy() - - s3, err := storage.OpenS3(endpoint, bucket, &storage.S3Config{ - CoreOpts: minio.Options{ - Creds: credentials.NewStaticV4(access, secret, ""), - Secure: secure, - }, - GetOpts: minio.GetObjectOptions{}, - PutOpts: minio.PutObjectOptions{}, - PutChunkSize: 5 * 1024 * 1024, // 2MiB - StatOpts: minio.StatObjectOptions{}, - RemoveOpts: minio.RemoveObjectOptions{}, - ListSize: 200, - }) - if err != nil { - panic(fmt.Errorf("error opening s3 storage: %w", err)) - } - - return >sstorage.Driver{ - KVStore: kv.New(s3), - Storage: s3, - Proxy: proxy, - Bucket: bucket, - } -} - // StandardStorageSetup populates the storage with standard test entries from the given directory. func StandardStorageSetup(storage *gtsstorage.Driver, relativePath string) { storedA := newTestStoredAttachments()