[feature] migrate to monorepo
Some checks failed
Build Backend / Build Docker Image (push) Successful in 3m33s
Test Backend / test (push) Failing after 31s

This commit is contained in:
CDN 2025-02-21 00:49:20 +08:00
commit 05ddc1f783
Signed by: CDN
GPG key ID: 0C656827F9F80080
267 changed files with 75165 additions and 0 deletions

View file

@ -0,0 +1,66 @@
package storage
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"tss-rocks-be/internal/config"
)
// NewStorage creates a new storage instance based on the configuration
func NewStorage(ctx context.Context, cfg *config.StorageConfig) (Storage, error) {
switch cfg.Type {
case "local":
return NewLocalStorage(cfg.Local.RootDir)
case "s3":
// Load AWS configuration
var s3Client *s3.Client
if cfg.S3.Endpoint != "" {
// Custom endpoint (e.g., MinIO)
customResolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) {
return aws.Endpoint{
URL: cfg.S3.Endpoint,
}, nil
})
awsCfg, err := awsconfig.LoadDefaultConfig(ctx,
awsconfig.WithRegion(cfg.S3.Region),
awsconfig.WithEndpointResolverWithOptions(customResolver),
awsconfig.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
cfg.S3.AccessKeyID,
cfg.S3.SecretAccessKey,
"",
)),
)
if err != nil {
return nil, fmt.Errorf("unable to load AWS SDK config: %w", err)
}
s3Client = s3.NewFromConfig(awsCfg)
} else {
// Standard AWS S3
awsCfg, err := awsconfig.LoadDefaultConfig(ctx,
awsconfig.WithRegion(cfg.S3.Region),
awsconfig.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(
cfg.S3.AccessKeyID,
cfg.S3.SecretAccessKey,
"",
)),
)
if err != nil {
return nil, fmt.Errorf("unable to load AWS SDK config: %w", err)
}
s3Client = s3.NewFromConfig(awsCfg)
}
return NewS3Storage(s3Client, cfg.S3.Bucket, cfg.S3.CustomURL, cfg.S3.ProxyS3), nil
default:
return nil, fmt.Errorf("unsupported storage type: %s", cfg.Type)
}
}

View file

@ -0,0 +1,260 @@
package storage
import (
"bytes"
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
)
type LocalStorage struct {
rootDir string
metaDir string
}
func NewLocalStorage(rootDir string) (*LocalStorage, error) {
// Ensure the root directory exists
if err := os.MkdirAll(rootDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create root directory: %w", err)
}
// Create metadata directory
metaDir := filepath.Join(rootDir, ".meta")
if err := os.MkdirAll(metaDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create metadata directory: %w", err)
}
return &LocalStorage{
rootDir: rootDir,
metaDir: metaDir,
}, nil
}
func (s *LocalStorage) generateID() (string, error) {
bytes := make([]byte, 16)
if _, err := rand.Read(bytes); err != nil {
return "", err
}
return hex.EncodeToString(bytes), nil
}
func (s *LocalStorage) saveMetadata(id string, info *FileInfo) error {
metaPath := filepath.Join(s.metaDir, id+".meta")
file, err := os.Create(metaPath)
if err != nil {
return fmt.Errorf("failed to create metadata file: %w", err)
}
defer file.Close()
data := fmt.Sprintf("%s\n%s", info.Name, info.ContentType)
if _, err := file.WriteString(data); err != nil {
return fmt.Errorf("failed to write metadata: %w", err)
}
return nil
}
func (s *LocalStorage) loadMetadata(id string) (string, string, error) {
metaPath := filepath.Join(s.metaDir, id+".meta")
data, err := os.ReadFile(metaPath)
if err != nil {
if os.IsNotExist(err) {
return id, "", nil // Return ID as name if metadata doesn't exist
}
return "", "", fmt.Errorf("failed to read metadata: %w", err)
}
parts := bytes.Split(data, []byte("\n"))
name := string(parts[0])
contentType := ""
if len(parts) > 1 {
contentType = string(parts[1])
}
return name, contentType, nil
}
func (s *LocalStorage) Save(ctx context.Context, name string, contentType string, reader io.Reader) (*FileInfo, error) {
if reader == nil {
return nil, fmt.Errorf("reader cannot be nil")
}
// Generate a unique ID for the file
id, err := s.generateID()
if err != nil {
return nil, fmt.Errorf("failed to generate file ID: %w", err)
}
// Create the file path
filePath := filepath.Join(s.rootDir, id)
// Create the file
file, err := os.Create(filePath)
if err != nil {
return nil, fmt.Errorf("failed to create file: %w", err)
}
defer file.Close()
// Copy the content
size, err := io.Copy(file, reader)
if err != nil {
// Clean up the file if there's an error
os.Remove(filePath)
return nil, fmt.Errorf("failed to write file content: %w", err)
}
now := time.Now()
info := &FileInfo{
ID: id,
Name: name,
Size: size,
ContentType: contentType,
CreatedAt: now,
UpdatedAt: now,
URL: fmt.Sprintf("/api/media/file/%s", id),
}
// Save metadata
if err := s.saveMetadata(id, info); err != nil {
os.Remove(filePath)
return nil, err
}
return info, nil
}
func (s *LocalStorage) Get(ctx context.Context, id string) (io.ReadCloser, *FileInfo, error) {
filePath := filepath.Join(s.rootDir, id)
// Open the file
file, err := os.Open(filePath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil, fmt.Errorf("file not found: %s", id)
}
return nil, nil, fmt.Errorf("failed to open file: %w", err)
}
// Get file info
stat, err := file.Stat()
if err != nil {
file.Close()
return nil, nil, fmt.Errorf("failed to get file info: %w", err)
}
// Load metadata
name, contentType, err := s.loadMetadata(id)
if err != nil {
file.Close()
return nil, nil, err
}
info := &FileInfo{
ID: id,
Name: name,
Size: stat.Size(),
ContentType: contentType,
CreatedAt: stat.ModTime(),
UpdatedAt: stat.ModTime(),
URL: fmt.Sprintf("/api/media/file/%s", id),
}
return file, info, nil
}
func (s *LocalStorage) Delete(ctx context.Context, id string) error {
filePath := filepath.Join(s.rootDir, id)
if err := os.Remove(filePath); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("file not found: %s", id)
}
return fmt.Errorf("failed to delete file: %w", err)
}
// Remove metadata
metaPath := filepath.Join(s.metaDir, id+".meta")
if err := os.Remove(metaPath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove metadata: %w", err)
}
return nil
}
func (s *LocalStorage) List(ctx context.Context, prefix string, limit int, offset int) ([]*FileInfo, error) {
var files []*FileInfo
var count int
err := filepath.Walk(s.rootDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Skip directories and metadata directory
if info.IsDir() || path == s.metaDir {
if path == s.metaDir {
return filepath.SkipDir
}
return nil
}
// Get the file ID (basename of the path)
id := filepath.Base(path)
// Load metadata to get the original name
name, contentType, err := s.loadMetadata(id)
if err != nil {
return err
}
// Skip files that don't match the prefix
if prefix != "" && !strings.HasPrefix(name, prefix) {
return nil
}
// Skip files before offset
if count < offset {
count++
return nil
}
// Stop if we've reached the limit
if limit > 0 && len(files) >= limit {
return filepath.SkipDir
}
files = append(files, &FileInfo{
ID: id,
Name: name,
Size: info.Size(),
ContentType: contentType,
CreatedAt: info.ModTime(),
UpdatedAt: info.ModTime(),
URL: fmt.Sprintf("/api/media/file/%s", id),
})
count++
return nil
})
if err != nil {
return nil, fmt.Errorf("failed to list files: %w", err)
}
return files, nil
}
func (s *LocalStorage) Exists(ctx context.Context, id string) (bool, error) {
filePath := filepath.Join(s.rootDir, id)
_, err := os.Stat(filePath)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, fmt.Errorf("failed to check file existence: %w", err)
}

View file

@ -0,0 +1,154 @@
package storage
import (
"bytes"
"context"
"io"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLocalStorage(t *testing.T) {
// Create a temporary directory for testing
tempDir, err := os.MkdirTemp("", "storage_test_*")
require.NoError(t, err)
defer os.RemoveAll(tempDir)
// Create a new LocalStorage instance
storage, err := NewLocalStorage(tempDir)
require.NoError(t, err)
ctx := context.Background()
t.Run("Save and Get", func(t *testing.T) {
content := []byte("test content")
reader := bytes.NewReader(content)
// Save the file
fileInfo, err := storage.Save(ctx, "test.txt", "text/plain", reader)
require.NoError(t, err)
assert.NotEmpty(t, fileInfo.ID)
assert.Equal(t, "test.txt", fileInfo.Name)
assert.Equal(t, int64(len(content)), fileInfo.Size)
assert.Equal(t, "text/plain", fileInfo.ContentType)
assert.False(t, fileInfo.CreatedAt.IsZero())
// Get the file
readCloser, info, err := storage.Get(ctx, fileInfo.ID)
require.NoError(t, err)
defer readCloser.Close()
data, err := io.ReadAll(readCloser)
require.NoError(t, err)
assert.Equal(t, content, data)
assert.Equal(t, fileInfo.ID, info.ID)
assert.Equal(t, fileInfo.Name, info.Name)
assert.Equal(t, fileInfo.Size, info.Size)
})
t.Run("List", func(t *testing.T) {
// Clear the directory first
dirEntries, err := os.ReadDir(tempDir)
require.NoError(t, err)
for _, entry := range dirEntries {
if entry.Name() != ".meta" {
os.Remove(filepath.Join(tempDir, entry.Name()))
}
}
// Save multiple files
testFiles := []struct {
name string
content string
}{
{"test1.txt", "content1"},
{"test2.txt", "content2"},
{"other.txt", "content3"},
}
for _, f := range testFiles {
reader := bytes.NewReader([]byte(f.content))
_, err := storage.Save(ctx, f.name, "text/plain", reader)
require.NoError(t, err)
}
// List all files
allFiles, err := storage.List(ctx, "", 10, 0)
require.NoError(t, err)
assert.Len(t, allFiles, 3)
// List files with prefix
filesWithPrefix, err := storage.List(ctx, "test", 10, 0)
require.NoError(t, err)
assert.Len(t, filesWithPrefix, 2)
for _, f := range filesWithPrefix {
assert.True(t, strings.HasPrefix(f.Name, "test"))
}
// Test pagination
pagedFiles, err := storage.List(ctx, "", 2, 1)
require.NoError(t, err)
assert.Len(t, pagedFiles, 2)
})
t.Run("Exists", func(t *testing.T) {
// Save a file
content := []byte("test content")
reader := bytes.NewReader(content)
fileInfo, err := storage.Save(ctx, "exists.txt", "text/plain", reader)
require.NoError(t, err)
// Check if file exists
exists, err := storage.Exists(ctx, fileInfo.ID)
require.NoError(t, err)
assert.True(t, exists)
// Check non-existent file
exists, err = storage.Exists(ctx, "non-existent")
require.NoError(t, err)
assert.False(t, exists)
})
t.Run("Delete", func(t *testing.T) {
// Save a file
content := []byte("test content")
reader := bytes.NewReader(content)
fileInfo, err := storage.Save(ctx, "delete.txt", "text/plain", reader)
require.NoError(t, err)
// Delete the file
err = storage.Delete(ctx, fileInfo.ID)
require.NoError(t, err)
// Verify file is deleted
exists, err := storage.Exists(ctx, fileInfo.ID)
require.NoError(t, err)
assert.False(t, exists)
// Try to delete non-existent file
err = storage.Delete(ctx, "non-existent")
assert.Error(t, err)
})
t.Run("Invalid operations", func(t *testing.T) {
// Try to get non-existent file
_, _, err := storage.Get(ctx, "non-existent")
assert.Error(t, err)
assert.Contains(t, err.Error(), "file not found")
// Try to save file with nil reader
_, err = storage.Save(ctx, "test.txt", "text/plain", nil)
assert.Error(t, err)
assert.Contains(t, err.Error(), "reader cannot be nil")
// Try to delete non-existent file
err = storage.Delete(ctx, "non-existent")
assert.Error(t, err)
assert.Contains(t, err.Error(), "file not found")
})
}

View file

@ -0,0 +1,232 @@
package storage
import (
"context"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
)
type S3Storage struct {
client s3Client
bucket string
customURL string
proxyS3 bool
}
// s3Client is the interface that wraps the basic S3 client operations we need
type s3Client interface {
PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error)
GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error)
DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error)
ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error)
HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error)
}
func NewS3Storage(client s3Client, bucket string, customURL string, proxyS3 bool) *S3Storage {
return &S3Storage{
client: client,
bucket: bucket,
customURL: customURL,
proxyS3: proxyS3,
}
}
func (s *S3Storage) generateID() (string, error) {
bytes := make([]byte, 16)
if _, err := rand.Read(bytes); err != nil {
return "", err
}
return hex.EncodeToString(bytes), nil
}
func (s *S3Storage) getObjectURL(id string) string {
if s.customURL != "" {
return fmt.Sprintf("%s/%s", strings.TrimRight(s.customURL, "/"), id)
}
if s.proxyS3 {
return fmt.Sprintf("/api/media/file/%s", id)
}
return fmt.Sprintf("https://%s.s3.amazonaws.com/%s", s.bucket, id)
}
func (s *S3Storage) Save(ctx context.Context, name string, contentType string, reader io.Reader) (*FileInfo, error) {
// Generate a unique ID for the file
id, err := s.generateID()
if err != nil {
return nil, fmt.Errorf("failed to generate file ID: %w", err)
}
// Check if the file exists
_, err = s.client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id),
})
if err == nil {
return nil, fmt.Errorf("file already exists with ID: %s", id)
}
var noSuchKey *types.NoSuchKey
if !errors.As(err, &noSuchKey) {
return nil, fmt.Errorf("failed to check if file exists: %w", err)
}
// Upload the file
_, err = s.client.PutObject(ctx, &s3.PutObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id),
Body: reader,
ContentType: aws.String(contentType),
Metadata: map[string]string{
"x-amz-meta-original-name": name,
},
})
if err != nil {
return nil, fmt.Errorf("failed to upload file: %w", err)
}
now := time.Now()
info := &FileInfo{
ID: id,
Name: name,
Size: 0, // Size is not available until after upload
ContentType: contentType,
CreatedAt: now,
UpdatedAt: now,
URL: s.getObjectURL(id),
}
return info, nil
}
func (s *S3Storage) Get(ctx context.Context, id string) (io.ReadCloser, *FileInfo, error) {
// Get the object from S3
result, err := s.client.GetObject(ctx, &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id),
})
if err != nil {
return nil, nil, fmt.Errorf("failed to get file from S3: %w", err)
}
info := &FileInfo{
ID: id,
Name: result.Metadata["x-amz-meta-original-name"],
Size: aws.ToInt64(result.ContentLength),
ContentType: aws.ToString(result.ContentType),
CreatedAt: aws.ToTime(result.LastModified),
UpdatedAt: aws.ToTime(result.LastModified),
URL: s.getObjectURL(id),
}
return result.Body, info, nil
}
func (s *S3Storage) Delete(ctx context.Context, id string) error {
_, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id),
})
if err != nil {
return fmt.Errorf("failed to delete file from S3: %w", err)
}
return nil
}
func (s *S3Storage) List(ctx context.Context, prefix string, limit int, offset int) ([]*FileInfo, error) {
var files []*FileInfo
var continuationToken *string
// Skip objects for offset
for i := 0; i < offset/1000; i++ {
output, err := s.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(s.bucket),
Prefix: aws.String(prefix),
ContinuationToken: continuationToken,
MaxKeys: aws.Int32(1000),
})
if err != nil {
return nil, fmt.Errorf("failed to list files from S3: %w", err)
}
if !aws.ToBool(output.IsTruncated) {
return files, nil
}
continuationToken = output.NextContinuationToken
}
// Get the actual objects
output, err := s.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(s.bucket),
Prefix: aws.String(prefix),
ContinuationToken: continuationToken,
MaxKeys: aws.Int32(int32(limit)),
})
if err != nil {
return nil, fmt.Errorf("failed to list files from S3: %w", err)
}
for _, obj := range output.Contents {
// Get the object metadata
head, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(s.bucket),
Key: obj.Key,
})
var contentType string
var originalName string
if err != nil {
var noSuchKey *types.NoSuchKey
if errors.As(err, &noSuchKey) {
// If the object doesn't exist (which shouldn't happen normally),
// we'll still include it in the list but with empty metadata
contentType = ""
originalName = aws.ToString(obj.Key)
} else {
continue
}
} else {
contentType = aws.ToString(head.ContentType)
originalName = head.Metadata["x-amz-meta-original-name"]
if originalName == "" {
originalName = aws.ToString(obj.Key)
}
}
files = append(files, &FileInfo{
ID: aws.ToString(obj.Key),
Name: originalName,
Size: aws.ToInt64(obj.Size),
ContentType: contentType,
CreatedAt: aws.ToTime(obj.LastModified),
UpdatedAt: aws.ToTime(obj.LastModified),
URL: s.getObjectURL(aws.ToString(obj.Key)),
})
}
return files, nil
}
func (s *S3Storage) Exists(ctx context.Context, id string) (bool, error) {
_, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(id),
})
if err != nil {
var nsk *types.NoSuchKey
if ok := errors.As(err, &nsk); ok {
return false, nil
}
return false, fmt.Errorf("failed to check file existence in S3: %w", err)
}
return true, nil
}

View file

@ -0,0 +1,211 @@
package storage
import (
"bytes"
"context"
"io"
"testing"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
// MockS3Client is a mock implementation of the S3 client interface
type MockS3Client struct {
mock.Mock
}
func (m *MockS3Client) PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) {
args := m.Called(ctx, params)
return args.Get(0).(*s3.PutObjectOutput), args.Error(1)
}
func (m *MockS3Client) GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) {
args := m.Called(ctx, params)
return args.Get(0).(*s3.GetObjectOutput), args.Error(1)
}
func (m *MockS3Client) DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error) {
args := m.Called(ctx, params)
return args.Get(0).(*s3.DeleteObjectOutput), args.Error(1)
}
func (m *MockS3Client) ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) {
args := m.Called(ctx, params)
return args.Get(0).(*s3.ListObjectsV2Output), args.Error(1)
}
func (m *MockS3Client) HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) {
args := m.Called(ctx, params)
return args.Get(0).(*s3.HeadObjectOutput), args.Error(1)
}
func TestS3Storage(t *testing.T) {
ctx := context.Background()
mockClient := new(MockS3Client)
storage := NewS3Storage(mockClient, "test-bucket", "", false)
t.Run("Save", func(t *testing.T) {
mockClient.ExpectedCalls = nil
mockClient.Calls = nil
content := []byte("test content")
reader := bytes.NewReader(content)
// Mock HeadObject to return NotFound error
mockClient.On("HeadObject", ctx, mock.MatchedBy(func(input *s3.HeadObjectInput) bool {
return aws.ToString(input.Bucket) == "test-bucket"
})).Return(&s3.HeadObjectOutput{}, &types.NoSuchKey{
Message: aws.String("The specified key does not exist."),
})
mockClient.On("PutObject", ctx, mock.MatchedBy(func(input *s3.PutObjectInput) bool {
return aws.ToString(input.Bucket) == "test-bucket" &&
aws.ToString(input.ContentType) == "text/plain"
})).Return(&s3.PutObjectOutput{}, nil)
fileInfo, err := storage.Save(ctx, "test.txt", "text/plain", reader)
require.NoError(t, err)
assert.NotEmpty(t, fileInfo.ID)
assert.Equal(t, "test.txt", fileInfo.Name)
assert.Equal(t, "text/plain", fileInfo.ContentType)
mockClient.AssertExpectations(t)
})
t.Run("Get", func(t *testing.T) {
content := []byte("test content")
mockClient.On("GetObject", ctx, mock.MatchedBy(func(input *s3.GetObjectInput) bool {
return aws.ToString(input.Bucket) == "test-bucket" &&
aws.ToString(input.Key) == "test-id"
})).Return(&s3.GetObjectOutput{
Body: io.NopCloser(bytes.NewReader(content)),
ContentType: aws.String("text/plain"),
ContentLength: aws.Int64(int64(len(content))),
LastModified: aws.Time(time.Now()),
}, nil)
readCloser, info, err := storage.Get(ctx, "test-id")
require.NoError(t, err)
defer readCloser.Close()
data, err := io.ReadAll(readCloser)
require.NoError(t, err)
assert.Equal(t, content, data)
assert.Equal(t, "test-id", info.ID)
assert.Equal(t, int64(len(content)), info.Size)
mockClient.AssertExpectations(t)
})
t.Run("List", func(t *testing.T) {
mockClient.ExpectedCalls = nil
mockClient.Calls = nil
mockClient.On("ListObjectsV2", ctx, mock.MatchedBy(func(input *s3.ListObjectsV2Input) bool {
return aws.ToString(input.Bucket) == "test-bucket" &&
aws.ToString(input.Prefix) == "test" &&
aws.ToInt32(input.MaxKeys) == 10
})).Return(&s3.ListObjectsV2Output{
Contents: []types.Object{
{
Key: aws.String("test1"),
Size: aws.Int64(100),
LastModified: aws.Time(time.Now()),
},
{
Key: aws.String("test2"),
Size: aws.Int64(200),
LastModified: aws.Time(time.Now()),
},
},
}, nil)
// Mock HeadObject for both files
mockClient.On("HeadObject", ctx, mock.MatchedBy(func(input *s3.HeadObjectInput) bool {
return aws.ToString(input.Bucket) == "test-bucket" &&
aws.ToString(input.Key) == "test1"
})).Return(&s3.HeadObjectOutput{
ContentType: aws.String("text/plain"),
Metadata: map[string]string{
"x-amz-meta-original-name": "test1.txt",
},
}, nil).Once()
mockClient.On("HeadObject", ctx, mock.MatchedBy(func(input *s3.HeadObjectInput) bool {
return aws.ToString(input.Bucket) == "test-bucket" &&
aws.ToString(input.Key) == "test2"
})).Return(&s3.HeadObjectOutput{
ContentType: aws.String("text/plain"),
Metadata: map[string]string{
"x-amz-meta-original-name": "test2.txt",
},
}, nil).Once()
files, err := storage.List(ctx, "test", 10, 0)
require.NoError(t, err)
assert.Len(t, files, 2)
assert.Equal(t, "test1", files[0].ID)
assert.Equal(t, int64(100), files[0].Size)
assert.Equal(t, "test1.txt", files[0].Name)
assert.Equal(t, "text/plain", files[0].ContentType)
mockClient.AssertExpectations(t)
})
t.Run("Delete", func(t *testing.T) {
mockClient.On("DeleteObject", ctx, mock.MatchedBy(func(input *s3.DeleteObjectInput) bool {
return aws.ToString(input.Bucket) == "test-bucket" &&
aws.ToString(input.Key) == "test-id"
})).Return(&s3.DeleteObjectOutput{}, nil)
err := storage.Delete(ctx, "test-id")
require.NoError(t, err)
mockClient.AssertExpectations(t)
})
t.Run("Exists", func(t *testing.T) {
mockClient.ExpectedCalls = nil
mockClient.Calls = nil
// Mock HeadObject for existing file
mockClient.On("HeadObject", ctx, mock.MatchedBy(func(input *s3.HeadObjectInput) bool {
return aws.ToString(input.Bucket) == "test-bucket" &&
aws.ToString(input.Key) == "test-id"
})).Return(&s3.HeadObjectOutput{}, nil).Once()
exists, err := storage.Exists(ctx, "test-id")
require.NoError(t, err)
assert.True(t, exists)
// Mock HeadObject for non-existing file
mockClient.On("HeadObject", ctx, mock.MatchedBy(func(input *s3.HeadObjectInput) bool {
return aws.ToString(input.Bucket) == "test-bucket" &&
aws.ToString(input.Key) == "non-existent"
})).Return(&s3.HeadObjectOutput{}, &types.NoSuchKey{
Message: aws.String("The specified key does not exist."),
}).Once()
exists, err = storage.Exists(ctx, "non-existent")
require.NoError(t, err)
assert.False(t, exists)
mockClient.AssertExpectations(t)
})
t.Run("Custom URL", func(t *testing.T) {
customStorage := &S3Storage{
client: mockClient,
bucket: "test-bucket",
customURL: "https://custom.domain",
proxyS3: true,
}
assert.Contains(t, customStorage.getObjectURL("test-id"), "https://custom.domain")
})
}

View file

@ -0,0 +1,38 @@
package storage
//go:generate mockgen -source=storage.go -destination=mock/mock_storage.go -package=mock
import (
"context"
"io"
"time"
)
// FileInfo represents metadata about a stored file
type FileInfo struct {
ID string `json:"id"`
Name string `json:"name"`
Size int64 `json:"size"`
ContentType string `json:"content_type"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
URL string `json:"url"`
}
// Storage defines the interface for file storage operations
type Storage interface {
// Save stores a file and returns its FileInfo
Save(ctx context.Context, name string, contentType string, reader io.Reader) (*FileInfo, error)
// Get retrieves a file by its ID
Get(ctx context.Context, id string) (io.ReadCloser, *FileInfo, error)
// Delete removes a file by its ID
Delete(ctx context.Context, id string) error
// List returns a list of files with optional prefix
List(ctx context.Context, prefix string, limit int, offset int) ([]*FileInfo, error)
// Exists checks if a file exists
Exists(ctx context.Context, id string) (bool, error)
}