You've already forked openaccounting-server
forked from cybercinch/openaccounting-server
feat: implement unified S3-compatible storage system
Consolidates storage backends into a single S3-compatible driver that supports: - AWS S3 (native) - Backblaze B2 (S3-compatible API) - Cloudflare R2 (S3-compatible API) - MinIO and other S3-compatible services - Local filesystem for development This replaces the previous separate B2 driver with a unified approach, reducing dependencies and complexity while adding support for more services. 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
106
core/storage/interface.go
Normal file
106
core/storage/interface.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Storage defines the interface for file storage backends
|
||||
type Storage interface {
|
||||
// Store saves a file and returns the storage path/key
|
||||
Store(filename string, content io.Reader, contentType string) (string, error)
|
||||
|
||||
// Retrieve gets a file by its storage path/key
|
||||
Retrieve(path string) (io.ReadCloser, error)
|
||||
|
||||
// Delete removes a file by its storage path/key
|
||||
Delete(path string) error
|
||||
|
||||
// GetURL returns a URL for accessing the file (may be signed/temporary)
|
||||
GetURL(path string, expiry time.Duration) (string, error)
|
||||
|
||||
// Exists checks if a file exists at the given path
|
||||
Exists(path string) (bool, error)
|
||||
|
||||
// GetMetadata returns file metadata (size, last modified, etc.)
|
||||
GetMetadata(path string) (*FileMetadata, error)
|
||||
}
|
||||
|
||||
// FileMetadata contains information about a stored file
|
||||
type FileMetadata struct {
|
||||
Size int64
|
||||
LastModified time.Time
|
||||
ContentType string
|
||||
ETag string
|
||||
}
|
||||
|
||||
// Config holds configuration for storage backends
|
||||
type Config struct {
|
||||
// Storage backend type: "local", "s3"
|
||||
Backend string `mapstructure:"backend"`
|
||||
|
||||
// Local filesystem configuration
|
||||
Local LocalConfig `mapstructure:"local"`
|
||||
|
||||
// S3-compatible storage configuration (S3, B2, R2, etc.)
|
||||
S3 S3Config `mapstructure:"s3"`
|
||||
}
|
||||
|
||||
// LocalConfig configures local filesystem storage
|
||||
type LocalConfig struct {
|
||||
// Root directory for file storage
|
||||
RootDir string `mapstructure:"root_dir"`
|
||||
|
||||
// Base URL for serving files (optional)
|
||||
BaseURL string `mapstructure:"base_url"`
|
||||
}
|
||||
|
||||
// S3Config configures S3-compatible storage (AWS S3, Backblaze B2, Cloudflare R2, etc.)
|
||||
type S3Config struct {
|
||||
// AWS Region (use "auto" for Cloudflare R2)
|
||||
Region string `mapstructure:"region"`
|
||||
|
||||
// S3 Bucket name
|
||||
Bucket string `mapstructure:"bucket"`
|
||||
|
||||
// Optional prefix for all objects
|
||||
Prefix string `mapstructure:"prefix"`
|
||||
|
||||
// Access Key ID
|
||||
AccessKeyID string `mapstructure:"access_key_id"`
|
||||
|
||||
// Secret Access Key
|
||||
SecretAccessKey string `mapstructure:"secret_access_key"`
|
||||
|
||||
// Custom endpoint URL for S3-compatible services:
|
||||
// - Backblaze B2: https://s3.us-west-004.backblazeb2.com
|
||||
// - Cloudflare R2: https://<account-id>.r2.cloudflarestorage.com
|
||||
// - MinIO: http://localhost:9000
|
||||
// Leave empty for AWS S3
|
||||
Endpoint string `mapstructure:"endpoint"`
|
||||
|
||||
// Use path-style addressing (required for some S3-compatible services)
|
||||
PathStyle bool `mapstructure:"path_style"`
|
||||
}
|
||||
|
||||
|
||||
// NewStorage creates a new storage backend based on configuration
|
||||
func NewStorage(config Config) (Storage, error) {
|
||||
switch config.Backend {
|
||||
case "local", "":
|
||||
return NewLocalStorage(config.Local)
|
||||
case "s3":
|
||||
return NewS3Storage(config.S3)
|
||||
default:
|
||||
return nil, &UnsupportedBackendError{Backend: config.Backend}
|
||||
}
|
||||
}
|
||||
|
||||
// UnsupportedBackendError is returned when an unknown storage backend is requested
|
||||
type UnsupportedBackendError struct {
|
||||
Backend string
|
||||
}
|
||||
|
||||
func (e *UnsupportedBackendError) Error() string {
|
||||
return "unsupported storage backend: " + e.Backend
|
||||
}
|
||||
101
core/storage/interface_test.go
Normal file
101
core/storage/interface_test.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewStorage(t *testing.T) {
|
||||
t.Run("Local Storage", func(t *testing.T) {
|
||||
config := Config{
|
||||
Backend: "local",
|
||||
Local: LocalConfig{
|
||||
RootDir: t.TempDir(),
|
||||
},
|
||||
}
|
||||
|
||||
storage, err := NewStorage(config)
|
||||
assert.NoError(t, err)
|
||||
assert.IsType(t, &LocalStorage{}, storage)
|
||||
})
|
||||
|
||||
t.Run("Default to Local Storage", func(t *testing.T) {
|
||||
config := Config{
|
||||
// No backend specified
|
||||
Local: LocalConfig{
|
||||
RootDir: t.TempDir(),
|
||||
},
|
||||
}
|
||||
|
||||
storage, err := NewStorage(config)
|
||||
assert.NoError(t, err)
|
||||
assert.IsType(t, &LocalStorage{}, storage)
|
||||
})
|
||||
|
||||
t.Run("S3 Storage", func(t *testing.T) {
|
||||
config := Config{
|
||||
Backend: "s3",
|
||||
S3: S3Config{
|
||||
Region: "us-east-1",
|
||||
Bucket: "test-bucket",
|
||||
},
|
||||
}
|
||||
|
||||
// This might succeed if AWS credentials are available via IAM roles or env vars
|
||||
// Let's just check that we get an S3Storage instance or an error
|
||||
storage, err := NewStorage(config)
|
||||
if err != nil {
|
||||
// If it fails, that's expected in test environments without AWS access
|
||||
assert.Nil(t, storage)
|
||||
} else {
|
||||
// If it succeeds, we should get an S3Storage instance
|
||||
assert.IsType(t, &S3Storage{}, storage)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("B2 Storage", func(t *testing.T) {
|
||||
config := Config{
|
||||
Backend: "b2",
|
||||
B2: B2Config{
|
||||
AccountID: "test-account",
|
||||
ApplicationKey: "test-key",
|
||||
Bucket: "test-bucket",
|
||||
},
|
||||
}
|
||||
|
||||
// This will fail because we don't have real B2 credentials
|
||||
storage, err := NewStorage(config)
|
||||
assert.Error(t, err) // Expected to fail without credentials
|
||||
assert.Nil(t, storage)
|
||||
})
|
||||
|
||||
t.Run("Unsupported Backend", func(t *testing.T) {
|
||||
config := Config{
|
||||
Backend: "unsupported",
|
||||
}
|
||||
|
||||
storage, err := NewStorage(config)
|
||||
assert.Error(t, err)
|
||||
assert.IsType(t, &UnsupportedBackendError{}, err)
|
||||
assert.Nil(t, storage)
|
||||
assert.Contains(t, err.Error(), "unsupported")
|
||||
})
|
||||
}
|
||||
|
||||
func TestStorageErrors(t *testing.T) {
|
||||
t.Run("UnsupportedBackendError", func(t *testing.T) {
|
||||
err := &UnsupportedBackendError{Backend: "ftp"}
|
||||
assert.Equal(t, "unsupported storage backend: ftp", err.Error())
|
||||
})
|
||||
|
||||
t.Run("FileNotFoundError", func(t *testing.T) {
|
||||
err := &FileNotFoundError{Path: "missing.txt"}
|
||||
assert.Equal(t, "file not found: missing.txt", err.Error())
|
||||
})
|
||||
|
||||
t.Run("InvalidPathError", func(t *testing.T) {
|
||||
err := &InvalidPathError{Path: "../../../etc/passwd"}
|
||||
assert.Equal(t, "invalid path: ../../../etc/passwd", err.Error())
|
||||
})
|
||||
}
|
||||
243
core/storage/local.go
Normal file
243
core/storage/local.go
Normal file
@@ -0,0 +1,243 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openaccounting/oa-server/core/util/id"
|
||||
)
|
||||
|
||||
// LocalStorage implements the Storage interface for local filesystem
|
||||
type LocalStorage struct {
|
||||
rootDir string
|
||||
baseURL string
|
||||
}
|
||||
|
||||
// NewLocalStorage creates a new local filesystem storage backend
|
||||
func NewLocalStorage(config LocalConfig) (*LocalStorage, error) {
|
||||
rootDir := config.RootDir
|
||||
if rootDir == "" {
|
||||
rootDir = "./uploads"
|
||||
}
|
||||
|
||||
// Ensure the root directory exists
|
||||
if err := os.MkdirAll(rootDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create storage directory: %w", err)
|
||||
}
|
||||
|
||||
return &LocalStorage{
|
||||
rootDir: rootDir,
|
||||
baseURL: config.BaseURL,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Store saves a file to the local filesystem
|
||||
func (l *LocalStorage) Store(filename string, content io.Reader, contentType string) (string, error) {
|
||||
// Generate a unique storage path
|
||||
storagePath := l.generateStoragePath(filename)
|
||||
fullPath := filepath.Join(l.rootDir, storagePath)
|
||||
|
||||
// Ensure the directory exists
|
||||
dir := filepath.Dir(fullPath)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
|
||||
// Create and write the file
|
||||
file, err := os.Create(fullPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = io.Copy(file, content)
|
||||
if err != nil {
|
||||
// Clean up the file if write failed
|
||||
os.Remove(fullPath)
|
||||
return "", fmt.Errorf("failed to write file: %w", err)
|
||||
}
|
||||
|
||||
return storagePath, nil
|
||||
}
|
||||
|
||||
// Retrieve gets a file from the local filesystem
|
||||
func (l *LocalStorage) Retrieve(path string) (io.ReadCloser, error) {
|
||||
// Validate path to prevent directory traversal
|
||||
if err := l.validatePath(path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fullPath := filepath.Join(l.rootDir, path)
|
||||
file, err := os.Open(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, &FileNotFoundError{Path: path}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to open file: %w", err)
|
||||
}
|
||||
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// Delete removes a file from the local filesystem
|
||||
func (l *LocalStorage) Delete(path string) error {
|
||||
// Validate path to prevent directory traversal
|
||||
if err := l.validatePath(path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fullPath := filepath.Join(l.rootDir, path)
|
||||
err := os.Remove(fullPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to delete file: %w", err)
|
||||
}
|
||||
|
||||
// Try to remove empty parent directories
|
||||
l.cleanupEmptyDirs(filepath.Dir(fullPath))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetURL returns a URL for accessing the file
|
||||
func (l *LocalStorage) GetURL(path string, expiry time.Duration) (string, error) {
|
||||
// Validate path to prevent directory traversal
|
||||
if err := l.validatePath(path); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Check if file exists
|
||||
exists, err := l.Exists(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !exists {
|
||||
return "", &FileNotFoundError{Path: path}
|
||||
}
|
||||
|
||||
if l.baseURL != "" {
|
||||
// Return a public URL if base URL is configured
|
||||
return l.baseURL + "/" + path, nil
|
||||
}
|
||||
|
||||
// For local storage without a base URL, return the file path
|
||||
// In a real application, you might serve these through an endpoint
|
||||
return "/files/" + path, nil
|
||||
}
|
||||
|
||||
// Exists checks if a file exists at the given path
|
||||
func (l *LocalStorage) Exists(path string) (bool, error) {
|
||||
// Validate path to prevent directory traversal
|
||||
if err := l.validatePath(path); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
fullPath := filepath.Join(l.rootDir, path)
|
||||
_, err := os.Stat(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to check file existence: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetMetadata returns file metadata
|
||||
func (l *LocalStorage) GetMetadata(path string) (*FileMetadata, error) {
|
||||
// Validate path to prevent directory traversal
|
||||
if err := l.validatePath(path); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fullPath := filepath.Join(l.rootDir, path)
|
||||
info, err := os.Stat(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, &FileNotFoundError{Path: path}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get file metadata: %w", err)
|
||||
}
|
||||
|
||||
return &FileMetadata{
|
||||
Size: info.Size(),
|
||||
LastModified: info.ModTime(),
|
||||
ContentType: "", // Local storage doesn't store content type
|
||||
ETag: "", // Local storage doesn't have ETags
|
||||
}, nil
|
||||
}
|
||||
|
||||
// generateStoragePath creates a unique storage path for a file
|
||||
func (l *LocalStorage) generateStoragePath(filename string) string {
|
||||
// Generate a unique ID for the file
|
||||
fileID := id.String(id.New())
|
||||
|
||||
// Extract file extension
|
||||
ext := filepath.Ext(filename)
|
||||
|
||||
// Create a path structure: YYYY/MM/DD/uuid.ext
|
||||
now := time.Now()
|
||||
datePath := fmt.Sprintf("%04d/%02d/%02d", now.Year(), now.Month(), now.Day())
|
||||
|
||||
return filepath.Join(datePath, fileID+ext)
|
||||
}
|
||||
|
||||
// validatePath ensures the path doesn't contain directory traversal attempts
|
||||
func (l *LocalStorage) validatePath(path string) error {
|
||||
// Clean the path and check for traversal attempts
|
||||
cleanPath := filepath.Clean(path)
|
||||
|
||||
// Reject paths that try to go up directories
|
||||
if strings.Contains(cleanPath, "..") {
|
||||
return &InvalidPathError{Path: path}
|
||||
}
|
||||
|
||||
// Reject absolute paths
|
||||
if filepath.IsAbs(cleanPath) {
|
||||
return &InvalidPathError{Path: path}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupEmptyDirs removes empty parent directories up to the root
|
||||
func (l *LocalStorage) cleanupEmptyDirs(dir string) {
|
||||
// Don't remove the root directory
|
||||
if dir == l.rootDir {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if directory is empty
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil || len(entries) > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Remove empty directory
|
||||
if err := os.Remove(dir); err == nil {
|
||||
// Recursively clean parent directories
|
||||
l.cleanupEmptyDirs(filepath.Dir(dir))
|
||||
}
|
||||
}
|
||||
|
||||
// FileNotFoundError is returned when a file doesn't exist
|
||||
type FileNotFoundError struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
func (e *FileNotFoundError) Error() string {
|
||||
return "file not found: " + e.Path
|
||||
}
|
||||
|
||||
// InvalidPathError is returned when a path is invalid or contains traversal attempts
|
||||
type InvalidPathError struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
func (e *InvalidPathError) Error() string {
|
||||
return "invalid path: " + e.Path
|
||||
}
|
||||
202
core/storage/local_test.go
Normal file
202
core/storage/local_test.go
Normal file
@@ -0,0 +1,202 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLocalStorage(t *testing.T) {
|
||||
// Create temporary directory for testing
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
config := LocalConfig{
|
||||
RootDir: tmpDir,
|
||||
BaseURL: "http://localhost:8080/files",
|
||||
}
|
||||
|
||||
storage, err := NewLocalStorage(config)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, storage)
|
||||
|
||||
t.Run("Store and Retrieve File", func(t *testing.T) {
|
||||
content := []byte("test file content")
|
||||
reader := bytes.NewReader(content)
|
||||
|
||||
// Store file
|
||||
path, err := storage.Store("test.txt", reader, "text/plain")
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, path)
|
||||
|
||||
// Verify file exists
|
||||
exists, err := storage.Exists(path)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, exists)
|
||||
|
||||
// Retrieve file
|
||||
retrievedReader, err := storage.Retrieve(path)
|
||||
assert.NoError(t, err)
|
||||
defer retrievedReader.Close()
|
||||
|
||||
retrievedContent, err := io.ReadAll(retrievedReader)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, content, retrievedContent)
|
||||
})
|
||||
|
||||
t.Run("Get File Metadata", func(t *testing.T) {
|
||||
content := []byte("metadata test content")
|
||||
reader := bytes.NewReader(content)
|
||||
|
||||
path, err := storage.Store("metadata.txt", reader, "text/plain")
|
||||
assert.NoError(t, err)
|
||||
|
||||
metadata, err := storage.GetMetadata(path)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(len(content)), metadata.Size)
|
||||
assert.False(t, metadata.LastModified.IsZero())
|
||||
})
|
||||
|
||||
t.Run("Get File URL", func(t *testing.T) {
|
||||
content := []byte("url test content")
|
||||
reader := bytes.NewReader(content)
|
||||
|
||||
path, err := storage.Store("url.txt", reader, "text/plain")
|
||||
assert.NoError(t, err)
|
||||
|
||||
url, err := storage.GetURL(path, time.Hour)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, url, path)
|
||||
assert.Contains(t, url, config.BaseURL)
|
||||
})
|
||||
|
||||
t.Run("Delete File", func(t *testing.T) {
|
||||
content := []byte("delete test content")
|
||||
reader := bytes.NewReader(content)
|
||||
|
||||
path, err := storage.Store("delete.txt", reader, "text/plain")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify file exists
|
||||
exists, err := storage.Exists(path)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, exists)
|
||||
|
||||
// Delete file
|
||||
err = storage.Delete(path)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Verify file no longer exists
|
||||
exists, err = storage.Exists(path)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, exists)
|
||||
})
|
||||
|
||||
t.Run("Path Validation", func(t *testing.T) {
|
||||
// Test directory traversal prevention
|
||||
_, err := storage.Retrieve("../../../etc/passwd")
|
||||
assert.Error(t, err)
|
||||
assert.IsType(t, &InvalidPathError{}, err)
|
||||
|
||||
// Test absolute path rejection
|
||||
_, err = storage.Retrieve("/etc/passwd")
|
||||
assert.Error(t, err)
|
||||
assert.IsType(t, &InvalidPathError{}, err)
|
||||
})
|
||||
|
||||
t.Run("File Not Found", func(t *testing.T) {
|
||||
_, err := storage.Retrieve("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
assert.IsType(t, &FileNotFoundError{}, err)
|
||||
|
||||
_, err = storage.GetMetadata("nonexistent.txt")
|
||||
assert.Error(t, err)
|
||||
assert.IsType(t, &FileNotFoundError{}, err)
|
||||
|
||||
_, err = storage.GetURL("nonexistent.txt", time.Hour)
|
||||
assert.Error(t, err)
|
||||
assert.IsType(t, &FileNotFoundError{}, err)
|
||||
})
|
||||
|
||||
t.Run("Storage Path Generation", func(t *testing.T) {
|
||||
content := []byte("path test content")
|
||||
reader1 := bytes.NewReader(content)
|
||||
reader2 := bytes.NewReader(content)
|
||||
|
||||
// Store two files with same name
|
||||
path1, err := storage.Store("same.txt", reader1, "text/plain")
|
||||
assert.NoError(t, err)
|
||||
|
||||
path2, err := storage.Store("same.txt", reader2, "text/plain")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Paths should be different (unique)
|
||||
assert.NotEqual(t, path1, path2)
|
||||
|
||||
// Both should exist
|
||||
exists1, err := storage.Exists(path1)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, exists1)
|
||||
|
||||
exists2, err := storage.Exists(path2)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, exists2)
|
||||
|
||||
// Both should have correct extension
|
||||
assert.True(t, strings.HasSuffix(path1, ".txt"))
|
||||
assert.True(t, strings.HasSuffix(path2, ".txt"))
|
||||
|
||||
// Should be organized by date
|
||||
now := time.Now()
|
||||
expectedPrefix := filepath.Join(
|
||||
fmt.Sprintf("%04d", now.Year()),
|
||||
fmt.Sprintf("%02d", now.Month()),
|
||||
fmt.Sprintf("%02d", now.Day()),
|
||||
)
|
||||
assert.True(t, strings.HasPrefix(path1, expectedPrefix))
|
||||
assert.True(t, strings.HasPrefix(path2, expectedPrefix))
|
||||
})
|
||||
}
|
||||
|
||||
func TestLocalStorageConfig(t *testing.T) {
|
||||
t.Run("Default Root Directory", func(t *testing.T) {
|
||||
config := LocalConfig{} // Empty config
|
||||
|
||||
storage, err := NewLocalStorage(config)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, storage)
|
||||
|
||||
// Should create default uploads directory
|
||||
assert.Equal(t, "./uploads", storage.rootDir)
|
||||
|
||||
// Verify directory was created
|
||||
_, err = os.Stat("./uploads")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Clean up
|
||||
os.RemoveAll("./uploads")
|
||||
})
|
||||
|
||||
t.Run("Custom Root Directory", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
customDir := filepath.Join(tmpDir, "custom", "storage")
|
||||
|
||||
config := LocalConfig{
|
||||
RootDir: customDir,
|
||||
}
|
||||
|
||||
storage, err := NewLocalStorage(config)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, customDir, storage.rootDir)
|
||||
|
||||
// Verify custom directory was created
|
||||
_, err = os.Stat(customDir)
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
236
core/storage/s3.go
Normal file
236
core/storage/s3.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/openaccounting/oa-server/core/util/id"
|
||||
)
|
||||
|
||||
// S3Storage implements the Storage interface for Amazon S3
|
||||
type S3Storage struct {
|
||||
client *s3.S3
|
||||
uploader *s3manager.Uploader
|
||||
bucket string
|
||||
prefix string
|
||||
}
|
||||
|
||||
// NewS3Storage creates a new S3 storage backend
|
||||
func NewS3Storage(config S3Config) (*S3Storage, error) {
|
||||
if config.Bucket == "" {
|
||||
return nil, fmt.Errorf("S3 bucket name is required")
|
||||
}
|
||||
|
||||
// Create AWS config
|
||||
awsConfig := &aws.Config{
|
||||
Region: aws.String(config.Region),
|
||||
}
|
||||
|
||||
// Set custom endpoint if provided (for S3-compatible services)
|
||||
if config.Endpoint != "" {
|
||||
awsConfig.Endpoint = aws.String(config.Endpoint)
|
||||
awsConfig.S3ForcePathStyle = aws.Bool(config.PathStyle)
|
||||
}
|
||||
|
||||
// Set credentials if provided
|
||||
if config.AccessKeyID != "" && config.SecretAccessKey != "" {
|
||||
awsConfig.Credentials = credentials.NewStaticCredentials(
|
||||
config.AccessKeyID,
|
||||
config.SecretAccessKey,
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
// Create session
|
||||
sess, err := session.NewSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create AWS session: %w", err)
|
||||
}
|
||||
|
||||
// Create S3 client
|
||||
client := s3.New(sess)
|
||||
uploader := s3manager.NewUploader(sess)
|
||||
|
||||
return &S3Storage{
|
||||
client: client,
|
||||
uploader: uploader,
|
||||
bucket: config.Bucket,
|
||||
prefix: config.Prefix,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Store saves a file to S3
|
||||
func (s *S3Storage) Store(filename string, content io.Reader, contentType string) (string, error) {
|
||||
// Generate a unique storage key
|
||||
storageKey := s.generateStorageKey(filename)
|
||||
|
||||
// Prepare upload input
|
||||
input := &s3manager.UploadInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(storageKey),
|
||||
Body: content,
|
||||
}
|
||||
|
||||
// Set content type if provided
|
||||
if contentType != "" {
|
||||
input.ContentType = aws.String(contentType)
|
||||
}
|
||||
|
||||
// Upload the file
|
||||
_, err := s.uploader.Upload(input)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to upload file to S3: %w", err)
|
||||
}
|
||||
|
||||
return storageKey, nil
|
||||
}
|
||||
|
||||
// Retrieve gets a file from S3
|
||||
func (s *S3Storage) Retrieve(path string) (io.ReadCloser, error) {
|
||||
input := &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(path),
|
||||
}
|
||||
|
||||
result, err := s.client.GetObject(input)
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
switch aerr.Code() {
|
||||
case s3.ErrCodeNoSuchKey:
|
||||
return nil, &FileNotFoundError{Path: path}
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to retrieve file from S3: %w", err)
|
||||
}
|
||||
|
||||
return result.Body, nil
|
||||
}
|
||||
|
||||
// Delete removes a file from S3
|
||||
func (s *S3Storage) Delete(path string) error {
|
||||
input := &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(path),
|
||||
}
|
||||
|
||||
_, err := s.client.DeleteObject(input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete file from S3: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetURL returns a presigned URL for accessing the file
|
||||
func (s *S3Storage) GetURL(path string, expiry time.Duration) (string, error) {
|
||||
// Check if file exists first
|
||||
exists, err := s.Exists(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !exists {
|
||||
return "", &FileNotFoundError{Path: path}
|
||||
}
|
||||
|
||||
// Generate presigned URL
|
||||
req, _ := s.client.GetObjectRequest(&s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(path),
|
||||
})
|
||||
|
||||
url, err := req.Presign(expiry)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate presigned URL: %w", err)
|
||||
}
|
||||
|
||||
return url, nil
|
||||
}
|
||||
|
||||
// Exists checks if a file exists in S3
|
||||
func (s *S3Storage) Exists(path string) (bool, error) {
|
||||
input := &s3.HeadObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(path),
|
||||
}
|
||||
|
||||
_, err := s.client.HeadObject(input)
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
switch aerr.Code() {
|
||||
case s3.ErrCodeNoSuchKey, "NotFound":
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, fmt.Errorf("failed to check file existence in S3: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetMetadata returns file metadata from S3
|
||||
func (s *S3Storage) GetMetadata(path string) (*FileMetadata, error) {
|
||||
input := &s3.HeadObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(path),
|
||||
}
|
||||
|
||||
result, err := s.client.HeadObject(input)
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
switch aerr.Code() {
|
||||
case s3.ErrCodeNoSuchKey, "NotFound":
|
||||
return nil, &FileNotFoundError{Path: path}
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get file metadata from S3: %w", err)
|
||||
}
|
||||
|
||||
metadata := &FileMetadata{
|
||||
Size: aws.Int64Value(result.ContentLength),
|
||||
}
|
||||
|
||||
if result.LastModified != nil {
|
||||
metadata.LastModified = *result.LastModified
|
||||
}
|
||||
|
||||
if result.ContentType != nil {
|
||||
metadata.ContentType = *result.ContentType
|
||||
}
|
||||
|
||||
if result.ETag != nil {
|
||||
metadata.ETag = strings.Trim(*result.ETag, "\"")
|
||||
}
|
||||
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// generateStorageKey creates a unique storage key for a file
|
||||
func (s *S3Storage) generateStorageKey(filename string) string {
|
||||
// Generate a unique ID for the file
|
||||
fileID := id.String(id.New())
|
||||
|
||||
// Extract file extension
|
||||
ext := path.Ext(filename)
|
||||
|
||||
// Create a key structure: prefix/YYYY/MM/DD/uuid.ext
|
||||
now := time.Now()
|
||||
datePath := fmt.Sprintf("%04d/%02d/%02d", now.Year(), now.Month(), now.Day())
|
||||
|
||||
key := path.Join(datePath, fileID+ext)
|
||||
|
||||
// Add prefix if configured
|
||||
if s.prefix != "" {
|
||||
key = path.Join(s.prefix, key)
|
||||
}
|
||||
|
||||
return key
|
||||
}
|
||||
Reference in New Issue
Block a user