[feature] migrate to monorepo
This commit is contained in:
commit
05ddc1f783
267 changed files with 75165 additions and 0 deletions
232
backend/internal/storage/s3.go
Normal file
232
backend/internal/storage/s3.go
Normal file
|
@ -0,0 +1,232 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
type S3Storage struct {
|
||||
client s3Client
|
||||
bucket string
|
||||
customURL string
|
||||
proxyS3 bool
|
||||
}
|
||||
|
||||
// s3Client is the interface that wraps the basic S3 client operations we need
|
||||
type s3Client interface {
|
||||
PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error)
|
||||
GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error)
|
||||
DeleteObject(ctx context.Context, params *s3.DeleteObjectInput, optFns ...func(*s3.Options)) (*s3.DeleteObjectOutput, error)
|
||||
ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error)
|
||||
HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error)
|
||||
}
|
||||
|
||||
func NewS3Storage(client s3Client, bucket string, customURL string, proxyS3 bool) *S3Storage {
|
||||
return &S3Storage{
|
||||
client: client,
|
||||
bucket: bucket,
|
||||
customURL: customURL,
|
||||
proxyS3: proxyS3,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S3Storage) generateID() (string, error) {
|
||||
bytes := make([]byte, 16)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.EncodeToString(bytes), nil
|
||||
}
|
||||
|
||||
func (s *S3Storage) getObjectURL(id string) string {
|
||||
if s.customURL != "" {
|
||||
return fmt.Sprintf("%s/%s", strings.TrimRight(s.customURL, "/"), id)
|
||||
}
|
||||
if s.proxyS3 {
|
||||
return fmt.Sprintf("/api/media/file/%s", id)
|
||||
}
|
||||
return fmt.Sprintf("https://%s.s3.amazonaws.com/%s", s.bucket, id)
|
||||
}
|
||||
|
||||
func (s *S3Storage) Save(ctx context.Context, name string, contentType string, reader io.Reader) (*FileInfo, error) {
|
||||
// Generate a unique ID for the file
|
||||
id, err := s.generateID()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate file ID: %w", err)
|
||||
}
|
||||
|
||||
// Check if the file exists
|
||||
_, err = s.client.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(id),
|
||||
})
|
||||
if err == nil {
|
||||
return nil, fmt.Errorf("file already exists with ID: %s", id)
|
||||
}
|
||||
|
||||
var noSuchKey *types.NoSuchKey
|
||||
if !errors.As(err, &noSuchKey) {
|
||||
return nil, fmt.Errorf("failed to check if file exists: %w", err)
|
||||
}
|
||||
|
||||
// Upload the file
|
||||
_, err = s.client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(id),
|
||||
Body: reader,
|
||||
ContentType: aws.String(contentType),
|
||||
Metadata: map[string]string{
|
||||
"x-amz-meta-original-name": name,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to upload file: %w", err)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
info := &FileInfo{
|
||||
ID: id,
|
||||
Name: name,
|
||||
Size: 0, // Size is not available until after upload
|
||||
ContentType: contentType,
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
URL: s.getObjectURL(id),
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (s *S3Storage) Get(ctx context.Context, id string) (io.ReadCloser, *FileInfo, error) {
|
||||
// Get the object from S3
|
||||
result, err := s.client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(id),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get file from S3: %w", err)
|
||||
}
|
||||
|
||||
info := &FileInfo{
|
||||
ID: id,
|
||||
Name: result.Metadata["x-amz-meta-original-name"],
|
||||
Size: aws.ToInt64(result.ContentLength),
|
||||
ContentType: aws.ToString(result.ContentType),
|
||||
CreatedAt: aws.ToTime(result.LastModified),
|
||||
UpdatedAt: aws.ToTime(result.LastModified),
|
||||
URL: s.getObjectURL(id),
|
||||
}
|
||||
|
||||
return result.Body, info, nil
|
||||
}
|
||||
|
||||
func (s *S3Storage) Delete(ctx context.Context, id string) error {
|
||||
_, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(id),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete file from S3: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *S3Storage) List(ctx context.Context, prefix string, limit int, offset int) ([]*FileInfo, error) {
|
||||
var files []*FileInfo
|
||||
var continuationToken *string
|
||||
|
||||
// Skip objects for offset
|
||||
for i := 0; i < offset/1000; i++ {
|
||||
output, err := s.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
ContinuationToken: continuationToken,
|
||||
MaxKeys: aws.Int32(1000),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list files from S3: %w", err)
|
||||
}
|
||||
if !aws.ToBool(output.IsTruncated) {
|
||||
return files, nil
|
||||
}
|
||||
continuationToken = output.NextContinuationToken
|
||||
}
|
||||
|
||||
// Get the actual objects
|
||||
output, err := s.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Prefix: aws.String(prefix),
|
||||
ContinuationToken: continuationToken,
|
||||
MaxKeys: aws.Int32(int32(limit)),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list files from S3: %w", err)
|
||||
}
|
||||
|
||||
for _, obj := range output.Contents {
|
||||
// Get the object metadata
|
||||
head, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: obj.Key,
|
||||
})
|
||||
|
||||
var contentType string
|
||||
var originalName string
|
||||
|
||||
if err != nil {
|
||||
var noSuchKey *types.NoSuchKey
|
||||
if errors.As(err, &noSuchKey) {
|
||||
// If the object doesn't exist (which shouldn't happen normally),
|
||||
// we'll still include it in the list but with empty metadata
|
||||
contentType = ""
|
||||
originalName = aws.ToString(obj.Key)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
contentType = aws.ToString(head.ContentType)
|
||||
originalName = head.Metadata["x-amz-meta-original-name"]
|
||||
if originalName == "" {
|
||||
originalName = aws.ToString(obj.Key)
|
||||
}
|
||||
}
|
||||
|
||||
files = append(files, &FileInfo{
|
||||
ID: aws.ToString(obj.Key),
|
||||
Name: originalName,
|
||||
Size: aws.ToInt64(obj.Size),
|
||||
ContentType: contentType,
|
||||
CreatedAt: aws.ToTime(obj.LastModified),
|
||||
UpdatedAt: aws.ToTime(obj.LastModified),
|
||||
URL: s.getObjectURL(aws.ToString(obj.Key)),
|
||||
})
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (s *S3Storage) Exists(ctx context.Context, id string) (bool, error) {
|
||||
_, err := s.client.HeadObject(ctx, &s3.HeadObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(id),
|
||||
})
|
||||
if err != nil {
|
||||
var nsk *types.NoSuchKey
|
||||
if ok := errors.As(err, &nsk); ok {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to check file existence in S3: %w", err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue